- Timestamp:
- Mar 4, 2011 12:49:02 PM (14 years ago)
- svn:sync-xref-src-repo-rev:
- 70361
- Location:
- trunk/src
- Files:
-
- 4 added
- 54 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/REMInternal.h
r35346 r36170 246 246 int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP); 247 247 void remR3TrapStat(CPUState *env, uint32_t uTrap); 248 void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);249 248 void remR3RecordCall(CPUState *env); 250 249 #endif /* REM_INCLUDE_CPU_H */ -
trunk/src/recompiler/COPYING.LIB
r1 r36170 3 3 4 4 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 5 59 Temple Place, Suite 330, Boston, MA 02111-1307USA5 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 6 6 Everyone is permitted to copy and distribute verbatim copies 7 7 of this license document, but changing it is not allowed. … … 149 149 writing it). Whether that is true depends on what the Library does 150 150 and what the program that uses the Library does. 151 151 152 152 1. You may copy and distribute verbatim copies of the Library's 153 153 complete source code as you receive it, in any medium, provided that … … 495 495 You should have received a copy of the GNU Lesser General Public 496 496 License along with this library; if not, write to the Free Software 497 Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA497 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 498 498 499 499 Also add information on how to contact you by electronic and paper mail. -
trunk/src/recompiler/Sun/config-host.h
r28800 r36170 30 30 # define CONFIG_DARWIN 31 31 # elif defined(RT_OS_FREEBSD) || defined(RT_OS_NETBSD) || defined(RT_OS_OPENBSD) 32 # define HAVE_MACHINE_BSWAP_H 32 33 /*# define CONFIG_BSD*/ 33 34 # elif defined(RT_OS_SOLARIS) -
trunk/src/recompiler/VBoxRecompiler.c
r36148 r36170 23 23 #include <stdio.h> /* FILE */ 24 24 #include "osdep.h" 25 #include "config.h" 26 #include "cpu.h" 25 27 #include "exec-all.h" 26 #include "config.h"27 #include "cpu-all.h"28 28 29 29 #include <VBox/vmm/rem.h> … … 821 821 */ 822 822 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 823 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC );823 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB); 824 824 825 825 /* … … 864 864 if (fBp) 865 865 { 866 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC );866 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL); 867 867 Assert(rc2 == 0); NOREF(rc2); 868 868 } … … 885 885 { 886 886 VM_ASSERT_EMT(pVM); 887 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address ))887 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL)) 888 888 { 889 889 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address)); … … 906 906 { 907 907 VM_ASSERT_EMT(pVM); 908 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address ))908 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB)) 909 909 { 910 910 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address)); … … 990 990 */ 991 991 case EXCP_DEBUG: 992 { 993 /* breakpoint or single step? */ 994 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 995 int iBP; 996 rc = VINF_EM_DBG_STEPPED; 997 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++) 998 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC) 999 { 1000 rc = VINF_EM_DBG_BREAKPOINT; 1001 break; 1002 } 1003 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC)); 992 if (pVM->rem.s.Env.watchpoint_hit) 993 { 994 /** @todo deal with watchpoints */ 995 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc)); 996 rc = VINF_EM_DBG_BREAKPOINT; 997 } 998 else 999 { 1000 CPUBreakpoint *pBP; 1001 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 1002 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry) 1003 if (pBP->pc == GCPtrPC) 1004 break; 1005 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED; 1006 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC)); 1007 } 1004 1008 break; 1005 }1006 1009 1007 1010 /* … … 1170 1173 #endif 1171 1174 case EXCP_DEBUG: 1172 rc = VINF_EM_DBG_STEPPED; 1173 if (pVM->rem.s.Env.nb_breakpoints > 0) 1175 if (pVM->rem.s.Env.watchpoint_hit) 1174 1176 { 1175 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 1176 int iBP; 1177 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++) 1178 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC) 1179 { 1180 rc = VINF_EM_DBG_BREAKPOINT; 1181 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC); 1177 /** @todo deal with watchpoints */ 1178 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc)); 1179 rc = VINF_EM_DBG_BREAKPOINT; 1180 } 1181 else 1182 { 1183 CPUBreakpoint *pBP; 1184 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 1185 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry) 1186 if (pBP->pc == GCPtrPC) 1182 1187 break; 1183 } 1188 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED; 1189 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC)); 1184 1190 } 1185 1191 #ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING … … 1325 1331 */ 1326 1332 case EXCP_DEBUG: 1327 { 1328 #if 0//def DEBUG_bird 1329 static int iBP = 0; 1330 printf("howdy, breakpoint! iBP=%d\n", iBP); 1331 switch (iBP) 1333 if (pVM->rem.s.Env.watchpoint_hit) 1332 1334 { 1333 case 0: 1334 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base); 1335 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP; 1336 //pVM->rem.s.Env.interrupt_request = 0; 1337 //pVM->rem.s.Env.exception_index = -1; 1338 //g_fInterruptDisabled = 1; 1339 rc = VINF_SUCCESS; 1340 asm("int3"); 1341 break; 1342 default: 1343 asm("int3"); 1344 break; 1335 /** @todo deal with watchpoints */ 1336 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc)); 1337 rc = VINF_EM_DBG_BREAKPOINT; 1345 1338 } 1346 iBP++; 1347 #else 1348 /* breakpoint or single step? */ 1349 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 1350 int iBP; 1351 rc = VINF_EM_DBG_STEPPED; 1352 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++) 1353 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC) 1354 { 1355 rc = VINF_EM_DBG_BREAKPOINT; 1356 break; 1357 } 1358 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC)); 1359 #endif 1339 else 1340 { 1341 CPUBreakpoint *pBP; 1342 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 1343 TAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry) 1344 if (pBP->pc == GCPtrPC) 1345 break; 1346 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED; 1347 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC)); 1348 } 1360 1349 break; 1361 }1362 1350 1363 1351 /* … … 1540 1528 } 1541 1529 1542 if ( env->nb_breakpoints > 0)1530 if (!TAILQ_EMPTY(&env->breakpoints)) 1543 1531 { 1544 1532 //Log2(("raw mode refused: Breakpoints\n")); 1533 return false; 1534 } 1535 1536 if (!TAILQ_EMPTY(&env->watchpoints)) 1537 { 1538 //Log2(("raw mode refused: Watchpoints\n")); 1545 1539 return false; 1546 1540 } … … 4625 4619 * Perform the CPUID instruction. 4626 4620 * 4627 * ASMCpuId cannot be invoked from some source files where this is used because of global4628 * register allocations.4629 *4630 4621 * @param env Pointer to the recompiler CPU structure. 4631 * @param uOperator CPUID operation (eax). 4622 * @param idx The CPUID leaf (eax). 4623 * @param idxSub The CPUID sub-leaf (ecx) where applicable. 4632 4624 * @param pvEAX Where to store eax. 4633 4625 * @param pvEBX Where to store ebx. … … 4635 4627 * @param pvEDX Where to store edx. 4636 4628 */ 4637 void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX) 4638 { 4639 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX); 4629 void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub, 4630 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX) 4631 { 4632 NOREF(idxSub); 4633 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX); 4640 4634 } 4641 4635 -
trunk/src/recompiler/bswap.h
r36140 r36170 5 5 6 6 #include <inttypes.h> 7 8 #ifdef HAVE_MACHINE_BSWAP_H 9 #include <sys/endian.h> 10 #include <sys/types.h> 11 #include <machine/bswap.h> 12 #else 7 13 8 14 #ifdef HAVE_BYTESWAP_H … … 44 50 #endif /* !HAVE_BYTESWAP_H */ 45 51 46 #ifndef bswap16 /* BSD endian.h clash */47 52 static inline uint16_t bswap16(uint16_t x) 48 53 { 49 54 return bswap_16(x); 50 55 } 51 #endif 52 53 #ifndef bswap32 /* BSD endian.h clash */ 56 54 57 static inline uint32_t bswap32(uint32_t x) 55 58 { 56 59 return bswap_32(x); 57 60 } 58 #endif 59 60 #ifndef bswap64 /* BSD endian.h clash. */ 61 61 62 static inline uint64_t bswap64(uint64_t x) 62 63 { 63 64 return bswap_64(x); 64 65 } 65 #endif 66 67 #endif /* ! HAVE_MACHINE_BSWAP_H */ 66 68 67 69 static inline void bswap16s(uint16_t *s) … … 133 135 /* unaligned versions (optimized for frequent unaligned accesses)*/ 134 136 135 #if defined(__i386__) || defined(_ _powerpc__)137 #if defined(__i386__) || defined(_ARCH_PPC) 136 138 137 139 #define cpu_to_le16wu(p, v) cpu_to_le16w(p, v) -
trunk/src/recompiler/cpu-all.h
r36147 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 37 37 # include <VBox/log.h> 38 38 # include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */ 39 #endif 39 #endif /* VBOX */ 40 #include "qemu-common.h" 40 41 41 42 #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) … … 252 253 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 253 254 254 DECLINLINE(uint8_t) ldub_p( void *ptr)255 DECLINLINE(uint8_t) ldub_p(const void *ptr) 255 256 { 256 257 VBOX_CHECK_ADDR(ptr); … … 258 259 } 259 260 260 DECLINLINE(int8_t) ldsb_p( void *ptr)261 DECLINLINE(int8_t) ldsb_p(const void *ptr) 261 262 { 262 263 VBOX_CHECK_ADDR(ptr); … … 270 271 } 271 272 272 DECLINLINE(uint32_t) lduw_le_p( void *ptr)273 DECLINLINE(uint32_t) lduw_le_p(const void *ptr) 273 274 { 274 275 VBOX_CHECK_ADDR(ptr); … … 276 277 } 277 278 278 DECLINLINE(int32_t) ldsw_le_p( void *ptr)279 DECLINLINE(int32_t) ldsw_le_p(const void *ptr) 279 280 { 280 281 VBOX_CHECK_ADDR(ptr); … … 288 289 } 289 290 290 DECLINLINE(uint32_t) ldl_le_p( void *ptr)291 DECLINLINE(uint32_t) ldl_le_p(const void *ptr) 291 292 { 292 293 VBOX_CHECK_ADDR(ptr); … … 306 307 } 307 308 308 DECLINLINE(uint64_t) ldq_le_p( void *ptr)309 DECLINLINE(uint64_t) ldq_le_p(const void *ptr) 309 310 { 310 311 VBOX_CHECK_ADDR(ptr); … … 316 317 /* float access */ 317 318 318 DECLINLINE(float32) ldfl_le_p( void *ptr)319 DECLINLINE(float32) ldfl_le_p(const void *ptr) 319 320 { 320 321 union { … … 336 337 } 337 338 338 DECLINLINE(float64) ldfq_le_p( void *ptr)339 DECLINLINE(float64) ldfq_le_p(const void *ptr) 339 340 { 340 341 CPU_DoubleU u; … … 354 355 #else /* !VBOX */ 355 356 356 static inline int ldub_p( void *ptr)357 static inline int ldub_p(const void *ptr) 357 358 { 358 359 return *(uint8_t *)ptr; 359 360 } 360 361 361 static inline int ldsb_p( void *ptr)362 static inline int ldsb_p(const void *ptr) 362 363 { 363 364 return *(int8_t *)ptr; … … 375 376 376 377 /* conservative code for little endian unaligned accesses */ 377 static inline int lduw_le_p( void *ptr)378 { 379 #ifdef _ _powerpc__378 static inline int lduw_le_p(const void *ptr) 379 { 380 #ifdef _ARCH_PPC 380 381 int val; 381 382 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); 382 383 return val; 383 384 #else 384 uint8_t *p = ptr;385 const uint8_t *p = ptr; 385 386 return p[0] | (p[1] << 8); 386 387 #endif 387 388 } 388 389 389 static inline int ldsw_le_p( void *ptr)390 { 391 #ifdef _ _powerpc__390 static inline int ldsw_le_p(const void *ptr) 391 { 392 #ifdef _ARCH_PPC 392 393 int val; 393 394 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); 394 395 return (int16_t)val; 395 396 #else 396 uint8_t *p = ptr;397 const uint8_t *p = ptr; 397 398 return (int16_t)(p[0] | (p[1] << 8)); 398 399 #endif 399 400 } 400 401 401 static inline int ldl_le_p( void *ptr)402 { 403 #ifdef _ _powerpc__402 static inline int ldl_le_p(const void *ptr) 403 { 404 #ifdef _ARCH_PPC 404 405 int val; 405 406 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); 406 407 return val; 407 408 #else 408 uint8_t *p = ptr;409 const uint8_t *p = ptr; 409 410 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); 410 411 #endif 411 412 } 412 413 413 static inline uint64_t ldq_le_p( void *ptr)414 { 415 uint8_t *p = ptr;414 static inline uint64_t ldq_le_p(const void *ptr) 415 { 416 const uint8_t *p = ptr; 416 417 uint32_t v1, v2; 417 418 v1 = ldl_le_p(p); … … 422 423 static inline void stw_le_p(void *ptr, int v) 423 424 { 424 #ifdef _ _powerpc__425 #ifdef _ARCH_PPC 425 426 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); 426 427 #else … … 433 434 static inline void stl_le_p(void *ptr, int v) 434 435 { 435 #ifdef _ _powerpc__436 #ifdef _ARCH_PPC 436 437 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); 437 438 #else … … 453 454 /* float access */ 454 455 455 static inline float32 ldfl_le_p( void *ptr)456 static inline float32 ldfl_le_p(const void *ptr) 456 457 { 457 458 union { … … 473 474 } 474 475 475 static inline float64 ldfq_le_p( void *ptr)476 static inline float64 ldfq_le_p(const void *ptr) 476 477 { 477 478 CPU_DoubleU u; … … 491 492 #else 492 493 493 static inline int lduw_le_p( void *ptr)494 static inline int lduw_le_p(const void *ptr) 494 495 { 495 496 return *(uint16_t *)ptr; 496 497 } 497 498 498 static inline int ldsw_le_p( void *ptr)499 static inline int ldsw_le_p(const void *ptr) 499 500 { 500 501 return *(int16_t *)ptr; 501 502 } 502 503 503 static inline int ldl_le_p( void *ptr)504 static inline int ldl_le_p(const void *ptr) 504 505 { 505 506 return *(uint32_t *)ptr; 506 507 } 507 508 508 static inline uint64_t ldq_le_p( void *ptr)509 static inline uint64_t ldq_le_p(const void *ptr) 509 510 { 510 511 return *(uint64_t *)ptr; … … 528 529 /* float access */ 529 530 530 static inline float32 ldfl_le_p( void *ptr)531 static inline float32 ldfl_le_p(const void *ptr) 531 532 { 532 533 return *(float32 *)ptr; 533 534 } 534 535 535 static inline float64 ldfq_le_p( void *ptr)536 static inline float64 ldfq_le_p(const void *ptr) 536 537 { 537 538 return *(float64 *)ptr; … … 552 553 #if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) 553 554 554 static inline int lduw_be_p( void *ptr)555 static inline int lduw_be_p(const void *ptr) 555 556 { 556 557 #if defined(__i386__) … … 562 563 return val; 563 564 #else 564 uint8_t *b = (uint8_t *)ptr;565 const uint8_t *b = ptr; 565 566 return ((b[0] << 8) | b[1]); 566 567 #endif 567 568 } 568 569 569 static inline int ldsw_be_p( void *ptr)570 static inline int ldsw_be_p(const void *ptr) 570 571 { 571 572 #if defined(__i386__) … … 577 578 return (int16_t)val; 578 579 #else 579 uint8_t *b = (uint8_t *)ptr;580 const uint8_t *b = ptr; 580 581 return (int16_t)((b[0] << 8) | b[1]); 581 582 #endif 582 583 } 583 584 584 static inline int ldl_be_p( void *ptr)585 static inline int ldl_be_p(const void *ptr) 585 586 { 586 587 #if defined(__i386__) || defined(__x86_64__) … … 592 593 return val; 593 594 #else 594 uint8_t *b = (uint8_t *)ptr;595 const uint8_t *b = ptr; 595 596 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; 596 597 #endif 597 598 } 598 599 599 static inline uint64_t ldq_be_p( void *ptr)600 static inline uint64_t ldq_be_p(const void *ptr) 600 601 { 601 602 uint32_t a,b; … … 643 644 /* float access */ 644 645 645 static inline float32 ldfl_be_p( void *ptr)646 static inline float32 ldfl_be_p(const void *ptr) 646 647 { 647 648 union { … … 663 664 } 664 665 665 static inline float64 ldfq_be_p( void *ptr)666 static inline float64 ldfq_be_p(const void *ptr) 666 667 { 667 668 CPU_DoubleU u; … … 681 682 #else 682 683 683 static inline int lduw_be_p( void *ptr)684 static inline int lduw_be_p(const void *ptr) 684 685 { 685 686 return *(uint16_t *)ptr; 686 687 } 687 688 688 static inline int ldsw_be_p( void *ptr)689 static inline int ldsw_be_p(const void *ptr) 689 690 { 690 691 return *(int16_t *)ptr; 691 692 } 692 693 693 static inline int ldl_be_p( void *ptr)694 static inline int ldl_be_p(const void *ptr) 694 695 { 695 696 return *(uint32_t *)ptr; 696 697 } 697 698 698 static inline uint64_t ldq_be_p( void *ptr)699 static inline uint64_t ldq_be_p(const void *ptr) 699 700 { 700 701 return *(uint64_t *)ptr; … … 718 719 /* float access */ 719 720 720 static inline float32 ldfl_be_p( void *ptr)721 static inline float32 ldfl_be_p(const void *ptr) 721 722 { 722 723 return *(float32 *)ptr; 723 724 } 724 725 725 static inline float64 ldfq_be_p( void *ptr)726 static inline float64 ldfq_be_p(const void *ptr) 726 727 { 727 728 return *(float64 *)ptr; … … 770 771 771 772 #if defined(CONFIG_USER_ONLY) 773 #include <assert.h> 774 #include "qemu-types.h" 775 772 776 /* On some host systems the guest address space is reserved on the host. 773 777 * This allows the guest address space to be offset to a convenient location. … … 778 782 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ 779 783 #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) 780 #define h2g(x) ((target_ulong)((unsigned long)(x) - GUEST_BASE)) 784 #define h2g(x) ({ \ 785 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ 786 /* Check if given address fits target address space */ \ 787 assert(__ret == (abi_ulong)__ret); \ 788 (abi_ulong)__ret; \ 789 }) 790 #define h2g_valid(x) ({ \ 791 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ 792 (__guest == (abi_ulong)__guest); \ 793 }) 781 794 782 795 #define saddr(x) g2h(x) … … 877 890 void page_set_flags(target_ulong start, target_ulong end, int flags); 878 891 int page_check_range(target_ulong start, target_ulong len, int flags); 879 void page_unprotect_range(target_ulong data, target_ulong data_size); 892 893 void cpu_exec_init_all(unsigned long tb_size); 894 CPUState *cpu_copy(CPUState *env); 880 895 881 896 void cpu_dump_state(CPUState *env, FILE *f, … … 886 901 int flags); 887 902 888 void cpu_abort(CPUState *env, const char *fmt, ...)903 void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...) 889 904 #ifndef VBOX 890 __attribute__ ((__format__ (__printf__, 2, 3))) 891 #endif 892 __attribute__ ((__noreturn__)); 905 __attribute__ ((__format__ (__printf__, 2, 3))); 906 #else 907 ; 908 #endif 893 909 extern CPUState *first_cpu; 894 910 extern CPUState *cpu_single_env; … … 926 942 void cpu_reset_interrupt(CPUState *env, int mask); 927 943 928 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type); 929 int cpu_watchpoint_remove(CPUState *env, target_ulong addr); 930 void cpu_watchpoint_remove_all(CPUState *env); 931 int cpu_breakpoint_insert(CPUState *env, target_ulong pc); 932 int cpu_breakpoint_remove(CPUState *env, target_ulong pc); 933 void cpu_breakpoint_remove_all(CPUState *env); 944 /* Breakpoint/watchpoint flags */ 945 #define BP_MEM_READ 0x01 946 #define BP_MEM_WRITE 0x02 947 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) 948 #define BP_STOP_BEFORE_ACCESS 0x04 949 #define BP_WATCHPOINT_HIT 0x08 950 #define BP_GDB 0x10 951 #define BP_CPU 0x20 952 953 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, 954 CPUBreakpoint **breakpoint); 955 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags); 956 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint); 957 void cpu_breakpoint_remove_all(CPUState *env, int mask); 958 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, 959 int flags, CPUWatchpoint **watchpoint); 960 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, 961 target_ulong len, int flags); 962 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint); 963 void cpu_watchpoint_remove_all(CPUState *env, int mask); 934 964 935 965 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ … … 954 984 #define CPU_LOG_IOPORT (1 << 7) 955 985 #define CPU_LOG_TB_CPU (1 << 8) 986 #define CPU_LOG_RESET (1 << 9) 956 987 957 988 /* define log items */ … … 1035 1066 typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); 1036 1067 1037 void cpu_register_physical_memory(target_phys_addr_t start_addr, 1038 ram_addr_t size, 1039 ram_addr_t phys_offset); 1068 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, 1069 ram_addr_t size, 1070 ram_addr_t phys_offset, 1071 ram_addr_t region_offset); 1072 static inline void cpu_register_physical_memory(target_phys_addr_t start_addr, 1073 ram_addr_t size, 1074 ram_addr_t phys_offset) 1075 { 1076 cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0); 1077 } 1078 1040 1079 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr); 1041 1080 ram_addr_t qemu_ram_alloc(ram_addr_t); … … 1045 1084 CPUWriteMemoryFunc **mem_write, 1046 1085 void *opaque); 1086 void cpu_unregister_io_memory(int table_address); 1047 1087 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index); 1048 1088 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index); … … 1060 1100 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1); 1061 1101 } 1102 void *cpu_physical_memory_map(target_phys_addr_t addr, 1103 target_phys_addr_t *plen, 1104 int is_write); 1105 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, 1106 int is_write, target_phys_addr_t access_len); 1107 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); 1108 void cpu_unregister_map_client(void *cookie); 1109 1062 1110 uint32_t ldub_phys(target_phys_addr_t addr); 1063 1111 uint32_t lduw_phys(target_phys_addr_t addr); … … 1087 1135 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; 1088 1136 } 1089 #else 1137 #else /* VBOX */ 1090 1138 DECLINLINE(int) cpu_physical_memory_is_dirty(ram_addr_t addr) 1091 1139 { … … 1098 1146 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; 1099 1147 } 1100 #endif 1148 #endif /* VBOX */ 1101 1149 1102 1150 #ifndef VBOX … … 1106 1154 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; 1107 1155 } 1108 #else 1156 #else /* VBOX */ 1109 1157 DECLINLINE(int) cpu_physical_memory_get_dirty(ram_addr_t addr, 1110 1158 int dirty_flags) … … 1118 1166 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; 1119 1167 } 1120 #endif 1168 #endif /* VBOX */ 1121 1169 1122 1170 #ifndef VBOX … … 1125 1173 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; 1126 1174 } 1127 #else 1175 #else /* VBOX */ 1128 1176 DECLINLINE(void) cpu_physical_memory_set_dirty(ram_addr_t addr) 1129 1177 { … … 1136 1184 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; 1137 1185 } 1138 #endif 1186 #endif /* VBOX */ 1139 1187 1140 1188 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, … … 1146 1194 int cpu_physical_memory_get_dirty_tracking(void); 1147 1195 1196 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr); 1197 1148 1198 void dump_exec_info(FILE *f, 1149 1199 int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); 1150 1200 1201 /* Coalesced MMIO regions are areas where write operations can be reordered. 1202 * This usually implies that write operations are side-effect free. This allows 1203 * batching which can make a major impact on performance when using 1204 * virtualization. 1205 */ 1206 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); 1207 1208 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); 1209 1151 1210 /*******************************************/ 1152 1211 /* host CPU ticks (if available) */ 1153 1212 1154 #if defined(__powerpc__) 1155 1156 static inline uint32_t get_tbl(void) 1157 { 1158 uint32_t tbl; 1159 asm volatile("mftb %0" : "=r" (tbl)); 1160 return tbl; 1161 } 1162 1163 static inline uint32_t get_tbu(void) 1164 { 1165 uint32_t tbl; 1166 asm volatile("mftbu %0" : "=r" (tbl)); 1167 return tbl; 1168 } 1213 #if defined(_ARCH_PPC) 1169 1214 1170 1215 static inline int64_t cpu_get_real_ticks(void) 1171 1216 { 1172 uint32_t l, h, h1; 1173 /* NOTE: we test if wrapping has occurred */ 1174 do { 1175 h = get_tbu(); 1176 l = get_tbl(); 1177 h1 = get_tbu(); 1178 } while (h != h1); 1179 return ((int64_t)h << 32) | l; 1217 int64_t retval; 1218 #ifdef _ARCH_PPC64 1219 /* This reads timebase in one 64bit go and includes Cell workaround from: 1220 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html 1221 */ 1222 __asm__ __volatile__ ( 1223 "mftb %0\n\t" 1224 "cmpwi %0,0\n\t" 1225 "beq- $-8" 1226 : "=r" (retval)); 1227 #else 1228 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ 1229 unsigned long junk; 1230 __asm__ __volatile__ ( 1231 "mftbu %1\n\t" 1232 "mftb %L0\n\t" 1233 "mftbu %0\n\t" 1234 "cmpw %0,%1\n\t" 1235 "bne $-16" 1236 : "=r" (retval), "=r" (junk)); 1237 #endif 1238 return retval; 1180 1239 } 1181 1240 -
trunk/src/recompiler/cpu-defs.h
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 39 39 #include <inttypes.h> 40 40 #include "osdep.h" 41 #include "sys-queue.h" 41 42 42 43 #ifndef TARGET_LONG_BITS … … 93 94 #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ 94 95 #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ 95 #if defined(VBOX)96 # define EXCP_EXECUTE_RAW 0x11024 /*execute raw mode. */97 # define EXCP_EXECUTE_HWACC 0x11025 /*execute hardware accelerated raw mode. */98 # define EXCP_SINGLE_INSTR 0x11026 /*executed single instruction. */99 # define EXCP_RC 0x11027 /*a EM rc was raised (VMR3Reset/Suspend/PowerOff). */96 #ifdef VBOX 97 # define EXCP_EXECUTE_RAW 0x11024 /**< execute raw mode. */ 98 # define EXCP_EXECUTE_HWACC 0x11025 /**< execute hardware accelerated raw mode. */ 99 # define EXCP_SINGLE_INSTR 0x11026 /**< executed single instruction. */ 100 # define EXCP_RC 0x11027 /**< a EM rc was raised (VMR3Reset/Suspend/PowerOff). */ 100 101 #endif /* VBOX */ 101 #define MAX_BREAKPOINTS 32102 #define MAX_WATCHPOINTS 32103 102 104 103 #define TB_JMP_CACHE_BITS 12 … … 133 132 target_ulong addr_code; 134 133 /* Addend to virtual address to get physical address. IO accesses 135 use the corre cponding iotlb value. */134 use the corresponding iotlb value. */ 136 135 #if TARGET_PHYS_ADDR_BITS == 64 137 136 /* on i386 Linux make sure it is aligned */ … … 158 157 } icount_decr_u16; 159 158 #endif 159 160 struct kvm_run; 161 struct KVMState; 162 163 typedef struct CPUBreakpoint { 164 target_ulong pc; 165 int flags; /* BP_* */ 166 TAILQ_ENTRY(CPUBreakpoint) entry; 167 } CPUBreakpoint; 168 169 typedef struct CPUWatchpoint { 170 target_ulong vaddr; 171 target_ulong len_mask; 172 int flags; /* BP_* */ 173 TAILQ_ENTRY(CPUWatchpoint) entry; 174 } CPUWatchpoint; 160 175 161 176 #define CPU_TEMP_BUF_NLONGS 128 … … 193 208 /* from this point: preserved by CPU reset */ \ 194 209 /* ice debug support */ \ 195 target_ulong breakpoints[MAX_BREAKPOINTS]; \ 196 int nb_breakpoints; \ 210 TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \ 197 211 int singlestep_enabled; \ 198 212 \ 199 struct { \ 200 target_ulong vaddr; \ 201 int type; /* PAGE_READ/PAGE_WRITE */ \ 202 } watchpoint[MAX_WATCHPOINTS]; \ 203 int nb_watchpoints; \ 204 int watchpoint_hit; \ 213 TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \ 214 CPUWatchpoint *watchpoint_hit; \ 205 215 \ 206 216 struct GDBRegisterState *gdb_regs; \ … … 209 219 jmp_buf jmp_env; \ 210 220 int exception_index; \ 211 \212 int user_mode_only; \213 221 \ 214 222 void *next_cpu; /* next CPU sharing TB cache */ \ … … 218 226 void *opaque; \ 219 227 \ 220 const char *cpu_model_str; 221 222 #endif 228 const char *cpu_model_str; \ 229 struct KVMState *kvm_state; \ 230 struct kvm_run *kvm_run; \ 231 int kvm_fd; 232 233 #endif -
trunk/src/recompiler/cpu-exec.c
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 33 33 #include "disas.h" 34 34 #include "tcg.h" 35 #include "kvm.h" 35 36 36 37 #if !defined(CONFIG_SOFTMMU) … … 45 46 #undef EIP 46 47 #include <signal.h> 48 #ifdef __linux__ 47 49 #include <sys/ucontext.h> 50 #endif 48 51 #endif 49 52 … … 67 70 } 68 71 69 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))70 #define reg_T271 #endif72 73 72 /* exit the current TB from a signal handler. The host registers are 74 73 restored in a state compatible with the CPU emulator … … 77 76 { 78 77 #if !defined(CONFIG_SOFTMMU) 78 #ifdef __linux__ 79 79 struct ucontext *uc = puc; 80 #elif defined(__OpenBSD__) 81 struct sigcontext *uc = puc; 82 #endif 80 83 #endif 81 84 … … 87 90 if (puc) { 88 91 /* XXX: use siglongjmp ? */ 92 #ifdef __linux__ 89 93 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); 90 } 91 #endif 94 #elif defined(__OpenBSD__) 95 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL); 96 #endif 97 } 98 #endif 99 env->exception_index = -1; 92 100 longjmp(env->jmp_env, 1); 93 101 } … … 118 126 /* Restore PC. This may happen if async event occurs before 119 127 the TB starts executing. */ 120 CPU_PC_FROM_TB(env, tb);128 cpu_pc_from_tb(env, tb); 121 129 } 122 130 tb_phys_invalidate(tb, -1); … … 177 185 TranslationBlock *tb; 178 186 target_ulong cs_base, pc; 179 uint64_t flags;187 int flags; 180 188 181 189 /* we record a subset of the CPU state. It will 182 190 always be the same before a given translated block 183 191 is executed. */ 184 #if defined(TARGET_I386) 185 flags = env->hflags; 186 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 187 cs_base = env->segs[R_CS].base; 188 pc = cs_base + env->eip; 189 #elif defined(TARGET_ARM) 190 flags = env->thumb | (env->vfp.vec_len << 1) 191 | (env->vfp.vec_stride << 4); 192 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) 193 flags |= (1 << 6); 194 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) 195 flags |= (1 << 7); 196 flags |= (env->condexec_bits << 8); 197 cs_base = 0; 198 pc = env->regs[15]; 199 #elif defined(TARGET_SPARC) 200 #ifdef TARGET_SPARC64 201 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled 202 flags = ((env->pstate & PS_AM) << 2) 203 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2)) 204 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2); 205 #else 206 // FPU enable . Supervisor 207 flags = (env->psref << 4) | env->psrs; 208 #endif 209 cs_base = env->npc; 210 pc = env->pc; 211 #elif defined(TARGET_PPC) 212 flags = env->hflags; 213 cs_base = 0; 214 pc = env->nip; 215 #elif defined(TARGET_MIPS) 216 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK); 217 cs_base = 0; 218 pc = env->active_tc.PC; 219 #elif defined(TARGET_M68K) 220 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */ 221 | (env->sr & SR_S) /* Bit 13 */ 222 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */ 223 cs_base = 0; 224 pc = env->pc; 225 #elif defined(TARGET_SH4) 226 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL 227 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */ 228 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */ 229 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */ 230 cs_base = 0; 231 pc = env->pc; 232 #elif defined(TARGET_ALPHA) 233 flags = env->ps; 234 cs_base = 0; 235 pc = env->pc; 236 #elif defined(TARGET_CRIS) 237 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG); 238 flags |= env->dslot; 239 cs_base = 0; 240 pc = env->pc; 241 #else 242 #error unsupported CPU 243 #endif 192 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 244 193 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; 245 194 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || … … 248 197 } 249 198 return tb; 199 } 200 201 static CPUDebugExcpHandler *debug_excp_handler; 202 203 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) 204 { 205 CPUDebugExcpHandler *old_handler = debug_excp_handler; 206 207 debug_excp_handler = handler; 208 return old_handler; 209 } 210 211 static void cpu_handle_debug_exception(CPUState *env) 212 { 213 CPUWatchpoint *wp; 214 215 if (!env->watchpoint_hit) 216 TAILQ_FOREACH(wp, &env->watchpoints, entry) 217 wp->flags &= ~BP_WATCHPOINT_HIT; 218 219 if (debug_excp_handler) 220 debug_excp_handler(env); 250 221 } 251 222 … … 314 285 /* if an exception is pending, we execute it here */ 315 286 if (env->exception_index >= 0) { 316 Assert(!env->user_mode_only);317 287 if (env->exception_index >= EXCP_INTERRUPT) { 318 288 /* exit request from the cpu execution loop */ 319 289 ret = env->exception_index; 290 if (ret == EXCP_DEBUG) 291 cpu_handle_debug_exception(env); 320 292 break; 321 293 } else { … … 340 312 { 341 313 interrupt_request = env->interrupt_request; 342 #ifndef VBOX 343 if (__builtin_expect(interrupt_request, 0)) 344 #else 345 if (RT_UNLIKELY(interrupt_request != 0)) 346 #endif 347 { 314 if (unlikely(interrupt_request)) { 315 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) { 316 /* Mask out external interrupts for this step. */ 317 interrupt_request &= ~(CPU_INTERRUPT_HARD | 318 CPU_INTERRUPT_FIQ | 319 CPU_INTERRUPT_SMI | 320 CPU_INTERRUPT_NMI); 321 } 322 if (interrupt_request & CPU_INTERRUPT_DEBUG) { 323 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; 324 env->exception_index = EXCP_DEBUG; 325 cpu_loop_exit(); 326 } 348 327 /** @todo: reconcile with what QEMU really does */ 349 328 … … 467 446 468 447 env->current_tb = tb; 448 449 /* cpu_interrupt might be called while translating the 450 TB, but before it is linked into a potentially 451 infinite loop and becomes env->current_tb. Avoid 452 starting execution if there is a pending interrupt. */ 453 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT)) 454 env->current_tb = NULL; 455 469 456 while (env->current_tb) { 470 457 tc_ptr = tb->tc_ptr; … … 483 470 tb = (TranslationBlock *)(long)(next_tb & ~3); 484 471 /* Restore PC. */ 485 CPU_PC_FROM_TB(env, tb);472 cpu_pc_from_tb(env, tb); 486 473 insns_left = env->icount_decr.u32; 487 474 if (env->icount_extra && insns_left >= 0) { … … 540 527 #if defined(TARGET_I386) 541 528 /* restore flags in standard format */ 542 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);529 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 543 530 #else 544 531 #error unsupported target CPU … … 601 588 /* exit request from the cpu execution loop */ 602 589 ret = env->exception_index; 590 if (ret == EXCP_DEBUG) 591 cpu_handle_debug_exception(env); 603 592 break; 604 } else if (env->user_mode_only) { 593 } else { 594 #if defined(CONFIG_USER_ONLY) 605 595 /* if user mode only, we simulate a fake exception 606 596 which will be handled outside the cpu execution … … 616 606 ret = env->exception_index; 617 607 break; 618 } else { 608 #else 619 609 #if defined(TARGET_I386) 620 610 /* simulate a real cpu exception. On i386, it can … … 644 634 do_interrupt(0); 645 635 #endif 636 #endif 646 637 } 647 638 env->exception_index = -1; … … 650 641 if (kqemu_is_ok(env) && env->interrupt_request == 0) { 651 642 int ret; 652 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);643 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 653 644 ret = kqemu_cpu_exec(env); 654 645 /* put eflags in CPU temporary format */ … … 673 664 #endif 674 665 666 if (kvm_enabled()) { 667 kvm_cpu_exec(env); 668 longjmp(env->jmp_env, 1); 669 } 670 675 671 next_tb = 0; /* force lookup of first TB */ 676 672 for(;;) { 677 673 interrupt_request = env->interrupt_request; 678 if (unlikely(interrupt_request) && 679 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) { 674 if (unlikely(interrupt_request)) { 675 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) { 676 /* Mask out external interrupts for this step. */ 677 interrupt_request &= ~(CPU_INTERRUPT_HARD | 678 CPU_INTERRUPT_FIQ | 679 CPU_INTERRUPT_SMI | 680 CPU_INTERRUPT_NMI); 681 } 680 682 if (interrupt_request & CPU_INTERRUPT_DEBUG) { 681 683 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; … … 716 718 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); 717 719 intno = cpu_get_pic_interrupt(env); 718 if (loglevel & CPU_LOG_TB_IN_ASM) { 719 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno); 720 } 720 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno); 721 721 do_interrupt(intno, 0, 0, 0, 1); 722 722 /* ensure that no TB jump will be modified as … … 730 730 /* FIXME: this should respect TPR */ 731 731 svm_check_intercept(SVM_EXIT_VINTR); 732 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); 733 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno); 734 do_interrupt(intno, 0, 0, 0, 1); 732 735 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; 733 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));734 if (loglevel & CPU_LOG_TB_IN_ASM)735 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);736 do_interrupt(intno, 0, 0, 0, 1);737 736 next_tb = 0; 738 737 #endif … … 861 860 } 862 861 #ifdef DEBUG_EXEC 863 if ( (loglevel &CPU_LOG_TB_CPU)) {862 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { 864 863 /* restore flags in standard format */ 865 864 regs_to_env(); 866 865 #if defined(TARGET_I386) 867 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);868 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);866 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 867 log_cpu_state(env, X86_DUMP_CCOP); 869 868 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 870 869 #elif defined(TARGET_ARM) 871 cpu_dump_state(env, logfile, fprintf, 0);870 log_cpu_state(env, 0); 872 871 #elif defined(TARGET_SPARC) 873 cpu_dump_state(env, logfile, fprintf, 0);872 log_cpu_state(env, 0); 874 873 #elif defined(TARGET_PPC) 875 cpu_dump_state(env, logfile, fprintf, 0);874 log_cpu_state(env, 0); 876 875 #elif defined(TARGET_M68K) 877 876 cpu_m68k_flush_flags(env, env->cc_op); … … 879 878 env->sr = (env->sr & 0xffe0) 880 879 | env->cc_dest | (env->cc_x << 4); 881 cpu_dump_state(env, logfile, fprintf, 0);880 log_cpu_state(env, 0); 882 881 #elif defined(TARGET_MIPS) 883 cpu_dump_state(env, logfile, fprintf, 0);882 log_cpu_state(env, 0); 884 883 #elif defined(TARGET_SH4) 885 cpu_dump_state(env, logfile, fprintf, 0);884 log_cpu_state(env, 0); 886 885 #elif defined(TARGET_ALPHA) 887 cpu_dump_state(env, logfile, fprintf, 0);886 log_cpu_state(env, 0); 888 887 #elif defined(TARGET_CRIS) 889 cpu_dump_state(env, logfile, fprintf, 0);888 log_cpu_state(env, 0); 890 889 #else 891 890 #error unsupported target CPU … … 905 904 } 906 905 #ifdef DEBUG_EXEC 907 if ((loglevel & CPU_LOG_EXEC)) { 908 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", 909 (long)tb->tc_ptr, tb->pc, 910 lookup_symbol(tb->pc)); 911 } 906 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", 907 (long)tb->tc_ptr, tb->pc, 908 lookup_symbol(tb->pc)); 912 909 #endif 913 910 /* see if we can patch the calling TB. When the TB … … 925 922 spin_unlock(&tb_lock); 926 923 env->current_tb = tb; 924 925 /* cpu_interrupt might be called while translating the 926 TB, but before it is linked into a potentially 927 infinite loop and becomes env->current_tb. Avoid 928 starting execution if there is a pending interrupt. */ 929 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT)) 930 env->current_tb = NULL; 931 927 932 while (env->current_tb) { 928 933 tc_ptr = tb->tc_ptr; … … 940 945 tb = (TranslationBlock *)(long)(next_tb & ~3); 941 946 /* Restore PC. */ 942 CPU_PC_FROM_TB(env, tb);947 cpu_pc_from_tb(env, tb); 943 948 insns_left = env->icount_decr.u32; 944 949 if (env->icount_extra && insns_left >= 0) { … … 981 986 #if defined(TARGET_I386) 982 987 /* restore flags in standard format */ 983 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);988 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 984 989 #elif defined(TARGET_ARM) 985 990 /* XXX: Save/restore host fpu exception state?. */ … … 1241 1246 do it (XXX: use sigsetjmp) */ 1242 1247 sigprocmask(SIG_SETMASK, old_set, NULL); 1243 do_raise_exception_err(env->exception_index, env->error_code);1248 cpu_loop_exit(); 1244 1249 } else { 1245 1250 /* activate soft MMU for this block */ … … 1330 1335 do it (XXX: use sigsetjmp) */ 1331 1336 sigprocmask(SIG_SETMASK, old_set, NULL); 1332 do_raise_exception_err(env->exception_index, env->error_code);1337 cpu_loop_exit(); 1333 1338 } else { 1334 1339 /* activate soft MMU for this block */ … … 1511 1516 #elif defined(__x86_64__) 1512 1517 1518 #ifdef __NetBSD__ 1519 #define REG_ERR _REG_ERR 1520 #define REG_TRAPNO _REG_TRAPNO 1521 1522 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)] 1523 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc) 1524 #else 1525 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)] 1526 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP) 1527 #endif 1528 1513 1529 int cpu_signal_handler(int host_signum, void *pinfo, 1514 1530 void *puc) 1515 1531 { 1516 1532 siginfo_t *info = pinfo; 1533 unsigned long pc; 1534 #ifdef __NetBSD__ 1535 ucontext_t *uc = puc; 1536 #else 1517 1537 struct ucontext *uc = puc; 1518 unsigned long pc; 1519 1520 pc = uc->uc_mcontext.gregs[REG_RIP];1538 #endif 1539 1540 pc = QEMU_UC_MACHINE_PC(uc); 1521 1541 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1522 uc->uc_mcontext.gregs[REG_TRAPNO]== 0xe ?1523 ( uc->uc_mcontext.gregs[REG_ERR]>> 1) & 1 : 0,1542 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ? 1543 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0, 1524 1544 &uc->uc_sigmask, puc); 1525 1545 } 1526 1546 1527 #elif defined(_ _powerpc__)1547 #elif defined(_ARCH_PPC) 1528 1548 1529 1549 /*********************************************************************** … … 1642 1662 unsigned long pc = regs[1]; 1643 1663 #else 1664 #ifdef __linux__ 1644 1665 struct sigcontext *sc = puc; 1645 1666 unsigned long pc = sc->sigc_regs.tpc; 1646 1667 void *sigmask = (void *)sc->sigc_mask; 1668 #elif defined(__OpenBSD__) 1669 struct sigcontext *uc = puc; 1670 unsigned long pc = uc->sc_pc; 1671 void *sigmask = (void *)(long)uc->sc_mask; 1672 #endif 1647 1673 #endif 1648 1674 -
trunk/src/recompiler/cutils.c
r36140 r36170 23 23 */ 24 24 #include "qemu-common.h" 25 #include "host-utils.h" 25 26 26 27 #ifdef VBOX 27 28 #include "osdep.h" 29 28 30 29 31 static inline int toupper(int ch) { … … 565 567 q = val; 566 568 while (*q != '\0') { 567 if ( toupper(*p) !=toupper(*q))569 if (qemu_toupper(*p) != qemu_toupper(*q)) 568 570 return 0; 569 571 p++; … … 589 591 return t; 590 592 } 591 #endif /* VBOX */ 593 #endif /* !VBOX */ 594 595 int qemu_fls(int i) 596 { 597 return 32 - clz32(i); 598 } 599 600 #ifndef VBOX 601 /* io vectors */ 602 603 void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint) 604 { 605 qiov->iov = qemu_malloc(alloc_hint * sizeof(struct iovec)); 606 qiov->niov = 0; 607 qiov->nalloc = alloc_hint; 608 qiov->size = 0; 609 } 610 611 void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len) 612 { 613 if (qiov->niov == qiov->nalloc) { 614 qiov->nalloc = 2 * qiov->nalloc + 1; 615 qiov->iov = qemu_realloc(qiov->iov, qiov->nalloc * sizeof(struct iovec)); 616 } 617 qiov->iov[qiov->niov].iov_base = base; 618 qiov->iov[qiov->niov].iov_len = len; 619 qiov->size += len; 620 ++qiov->niov; 621 } 622 623 void qemu_iovec_destroy(QEMUIOVector *qiov) 624 { 625 qemu_free(qiov->iov); 626 } 627 628 void qemu_iovec_reset(QEMUIOVector *qiov) 629 { 630 qiov->niov = 0; 631 qiov->size = 0; 632 } 633 634 void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf) 635 { 636 uint8_t *p = (uint8_t *)buf; 637 int i; 638 639 for (i = 0; i < qiov->niov; ++i) { 640 memcpy(p, qiov->iov[i].iov_base, qiov->iov[i].iov_len); 641 p += qiov->iov[i].iov_len; 642 } 643 } 644 645 void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count) 646 { 647 const uint8_t *p = (const uint8_t *)buf; 648 size_t copy; 649 int i; 650 651 for (i = 0; i < qiov->niov && count; ++i) { 652 copy = count; 653 if (copy > qiov->iov[i].iov_len) 654 copy = qiov->iov[i].iov_len; 655 memcpy(qiov->iov[i].iov_base, p, copy); 656 p += copy; 657 count -= copy; 658 } 659 } 660 #endif /* !VBOX */ -
trunk/src/recompiler/disas.h
r2422 r36170 11 11 const char *lookup_symbol(target_ulong orig_addr); 12 12 13 /* Filled in by elfload.c. Simplistic, but will do for now. */ 14 extern struct syminfo { 13 struct syminfo; 14 struct elf32_sym; 15 struct elf64_sym; 16 17 typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_ulong orig_addr); 18 19 struct syminfo { 20 lookup_symbol_t lookup_symbol; 15 21 unsigned int disas_num_syms; 16 void *disas_symtab; 22 union { 23 struct elf32_sym *elf32; 24 struct elf64_sym *elf64; 25 } disas_symtab; 17 26 const char *disas_strtab; 18 27 struct syminfo *next; 19 } *syminfos; 28 }; 29 30 /* Filled in by elfload.c. Simplistic, but will do for now. */ 31 extern struct syminfo *syminfos; 20 32 21 33 #endif /* _QEMU_DISAS_H */ -
trunk/src/recompiler/dyngen-exec.h
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 56 56 #if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__)) 57 57 /* XXX may be done for all 64 bits targets ? */ 58 #if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(_ _powerpc64__)58 #if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(_ARCH_PPC64) 59 59 typedef unsigned long uint64_t; 60 60 #else … … 73 73 // Linux/Sparc64 defines int64_t 74 74 #if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__)) 75 #if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(_ _powerpc64__)75 #if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(_ARCH_PPC64) 76 76 typedef signed long int64_t; 77 77 #else … … 135 135 //#define AREG4 "rbp" 136 136 //#define AREG5 "rbx" 137 #elif defined(_ _powerpc__)137 #elif defined(_ARCH_PPC) 138 138 #define AREG0 "r27" 139 139 #define AREG1 "r24" … … 228 228 #endif 229 229 230 #ifndef VBOX /* WHY DO WE UNSUBSCRIBE TO THIS MACRO? */231 /* force GCC to generate only one epilog at the end of the function */232 #define FORCE_RET() __asm__ __volatile__("" : : : "memory");233 #endif234 235 #ifndef OPPROTO236 #define OPPROTO237 #endif238 239 230 #define xglue(x, y) x ## y 240 231 #define glue(x, y) xglue(x, y) 241 232 #define stringify(s) tostring(s) 242 233 #define tostring(s) #s 243 244 #if defined(__alpha__) || defined(__s390__)245 /* the symbols are considered non exported so a br immediate is generated */246 #define __hidden __attribute__((visibility("hidden")))247 #else248 #define __hidden249 #endif250 251 #if defined(__alpha__)252 /* Suggested by Richard Henderson. This will result in code like253 ldah $0,__op_param1($29) !gprelhigh254 lda $0,__op_param1($0) !gprellow255 We can then conveniently change $29 to $31 and adapt the offsets to256 emit the appropriate constant. */257 extern int __op_param1 __hidden;258 extern int __op_param2 __hidden;259 extern int __op_param3 __hidden;260 #define PARAM1 ({ int _r; asm("" : "=r"(_r) : "0" (&__op_param1)); _r; })261 #define PARAM2 ({ int _r; asm("" : "=r"(_r) : "0" (&__op_param2)); _r; })262 #define PARAM3 ({ int _r; asm("" : "=r"(_r) : "0" (&__op_param3)); _r; })263 #elif defined(__s390__)264 extern int __op_param1 __hidden;265 extern int __op_param2 __hidden;266 extern int __op_param3 __hidden;267 #define PARAM1 ({ int _r; asm("bras %0,8; .long " ASM_NAME(__op_param1) "; l %0,0(%0)" : "=r"(_r) : ); _r; })268 #define PARAM2 ({ int _r; asm("bras %0,8; .long " ASM_NAME(__op_param2) "; l %0,0(%0)" : "=r"(_r) : ); _r; })269 #define PARAM3 ({ int _r; asm("bras %0,8; .long " ASM_NAME(__op_param3) "; l %0,0(%0)" : "=r"(_r) : ); _r; })270 #else271 #if defined(__APPLE__)272 static int __op_param1, __op_param2, __op_param3;273 #else274 extern int __op_param1, __op_param2, __op_param3;275 #endif276 #define PARAM1 ((long)(&__op_param1))277 #define PARAM2 ((long)(&__op_param2))278 #define PARAM3 ((long)(&__op_param3))279 #endif /* !defined(__alpha__) */280 281 extern int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3;282 283 #if defined(_WIN32) || defined(__APPLE__) || defined(__OS2__)284 #define ASM_NAME(x) "_" #x285 #else286 #define ASM_NAME(x) #x287 #endif288 289 #if defined(__i386__)290 #define EXIT_TB() asm volatile ("ret")291 #define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n)292 #elif defined(__x86_64__)293 #define EXIT_TB() asm volatile ("ret")294 #define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n)295 #elif defined(__powerpc__)296 #define EXIT_TB() asm volatile ("blr")297 #define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n)298 #elif defined(__s390__)299 #define EXIT_TB() asm volatile ("br %r14")300 #define GOTO_LABEL_PARAM(n) asm volatile ("larl %r7,12; l %r7,0(%r7); br %r7; .long " ASM_NAME(__op_gen_label) #n)301 #elif defined(__alpha__)302 #define EXIT_TB() asm volatile ("ret")303 #elif defined(__ia64__)304 #define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;")305 #define GOTO_LABEL_PARAM(n) asm volatile ("br.sptk.many " \306 ASM_NAME(__op_gen_label) #n)307 #elif defined(__sparc__)308 #define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0; nop")309 #define GOTO_LABEL_PARAM(n) asm volatile ("ba " ASM_NAME(__op_gen_label) #n ";nop")310 #elif defined(__arm__)311 #define EXIT_TB() asm volatile ("b exec_loop")312 #define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n)313 #elif defined(__mc68000)314 #define EXIT_TB() asm volatile ("rts")315 #elif defined(__mips__)316 #define EXIT_TB() asm volatile ("jr $ra")317 #define GOTO_LABEL_PARAM(n) asm volatile (".set noat; la $1, " ASM_NAME(__op_gen_label) #n "; jr $1; .set at")318 #elif defined(__hppa__)319 #define GOTO_LABEL_PARAM(n) asm volatile ("b,n " ASM_NAME(__op_gen_label) #n)320 #else321 #error unsupported CPU322 #endif323 234 324 235 /* The return address may point to the start of the next instruction. -
trunk/src/recompiler/exec-all.h
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 27 27 * of the LGPL is applied is otherwise unspecified. 28 28 */ 29 30 #ifndef _EXEC_ALL_H_ 31 #define _EXEC_ALL_H_ 32 33 #include "qemu-common.h" 29 34 30 35 /* allow to see translation results - the slowdown should be negligible, so we leave it */ … … 74 79 extern target_ulong gen_opc_jump_pc[2]; 75 80 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE]; 76 77 typedef void (GenOpFunc)(void);78 typedef void (GenOpFunc1)(long);79 typedef void (GenOpFunc2)(long, long);80 typedef void (GenOpFunc3)(long, long, long);81 81 82 82 #include "qemu-log.h" … … 103 103 int cflags); 104 104 void cpu_exec_init(CPUState *env); 105 void QEMU_NORETURN cpu_loop_exit(void); 105 106 int page_unprotect(target_ulong address, unsigned long pc, void *puc); 106 107 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end, … … 137 138 #endif 138 139 139 #if defined(_ _powerpc__) || defined(__x86_64__) || defined(__arm__)140 #if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) 140 141 #define USE_DIRECT_JUMP 141 142 #endif … … 220 221 #if defined(USE_DIRECT_JUMP) 221 222 222 #if defined(_ _powerpc__)223 #if defined(_ARCH_PPC) 223 224 extern void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr); 224 225 #define tb_set_jmp_target1 ppc_tb_set_jmp_target … … 233 234 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) 234 235 { 236 #if QEMU_GNUC_PREREQ(4, 1) 237 void __clear_cache(char *beg, char *end); 238 #else 235 239 register unsigned long _beg __asm ("a1"); 236 240 register unsigned long _end __asm ("a2"); 237 241 register unsigned long _flg __asm ("a3"); 242 #endif 238 243 239 244 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ 240 245 *(uint32_t *)jmp_addr |= ((addr - (jmp_addr + 8)) >> 2) & 0xffffff; 241 246 247 #if QEMU_GNUC_PREREQ(4, 1) 248 __clear_cache((char *) jmp_addr, (char *) jmp_addr + 4); 249 #else 242 250 /* flush icache */ 243 251 _beg = jmp_addr; … … 245 253 _flg = 0; 246 254 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); 255 #endif 247 256 } 248 257 #endif … … 286 295 287 296 TranslationBlock *tb_find_pc(unsigned long pc_ptr); 288 289 #if defined(_WIN32)290 #define ASM_DATA_SECTION ".section \".data\"\n"291 #define ASM_PREVIOUS_SECTION ".section .text\n"292 #elif defined(__APPLE__)293 #define ASM_DATA_SECTION ".data\n"294 #define ASM_PREVIOUS_SECTION ".text\n"295 #else296 #define ASM_DATA_SECTION ".section \".data\"\n"297 #define ASM_PREVIOUS_SECTION ".previous\n"298 #endif299 300 #define ASM_OP_LABEL_NAME(n, opname) \301 ASM_NAME(__op_label) #n "." ASM_NAME(opname)302 297 303 298 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; … … 432 427 433 428 #endif 429 430 typedef void (CPUDebugExcpHandler)(CPUState *env); 431 432 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); 433 #endif -
trunk/src/recompiler/exec.c
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 61 61 #endif 62 62 #include "osdep.h" 63 #include "kvm.h" 63 64 #if defined(CONFIG_USER_ONLY) 64 65 #include <qemu.h> … … 149 150 causes some pages to fall outside the dirty map here. */ 150 151 RTGCPHYS phys_ram_dirty_size; 152 uint8_t *phys_ram_dirty; 151 153 #endif /* VBOX */ 152 #if !defined(VBOX)153 uint8_t *phys_ram_base;154 #endif155 uint8_t *phys_ram_dirty;156 154 157 155 CPUState *first_cpu; … … 182 180 /* offset in host memory of the page + io_index in the low bits */ 183 181 ram_addr_t phys_offset; 182 ram_addr_t region_offset; 184 183 } PhysPageDesc; 185 184 … … 226 225 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; 227 226 void *io_mem_opaque[IO_MEM_NB_ENTRIES]; 228 static int io_mem_nb;227 char io_mem_used[IO_MEM_NB_ENTRIES]; 229 228 static int io_mem_watch; 230 229 #endif … … 257 256 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4]; 258 257 void *opaque[TARGET_PAGE_SIZE][2][4]; 258 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4]; 259 259 } subpage_t; 260 260 … … 305 305 { 306 306 SYSTEM_INFO system_info; 307 DWORD old_protect;308 307 309 308 GetSystemInfo(&system_info); … … 314 313 #endif 315 314 #endif /* !VBOX */ 316 317 315 if (qemu_host_page_size == 0) 318 316 qemu_host_page_size = qemu_real_host_page_size; … … 406 404 /* allocate if not found */ 407 405 #if defined(CONFIG_USER_ONLY) 408 unsigned long addr;409 406 size_t len = sizeof(PageDesc) * L2_SIZE; 410 407 /* Don't use qemu_malloc because it may recurse. */ … … 412 409 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 413 410 *lp = p; 414 addr = h2g(p);415 if (addr == (target_ulong)addr) {411 if (h2g_valid(p)) { 412 unsigned long addr = h2g(p); 416 413 page_set_flags(addr & TARGET_PAGE_MASK, 417 414 TARGET_PAGE_ALIGN(addr + len), … … 486 483 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); 487 484 *lp = pd; 488 for (i = 0; i < L2_SIZE; i++) 485 for (i = 0; i < L2_SIZE; i++) { 489 486 pd[i].phys_offset = IO_MEM_UNASSIGNED; 487 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS; 488 } 490 489 } 491 490 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); … … 545 544 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 546 545 #else 547 /* XXX: needs a djustments */546 /* XXX: needs ajustments */ 548 547 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4); 549 548 #endif 550 551 549 } 552 550 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE) 553 551 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE; 554 552 #endif /* VBOX */ 555 556 553 /* The code gen buffer location may have constraints depending on 557 554 the host cpu and OS */ … … 564 561 return; 565 562 } 566 #else //!VBOX563 #else /* !VBOX */ 567 564 #if defined(__linux__) 568 565 { … … 582 579 if (code_gen_buffer_size > (512 * 1024 * 1024)) 583 580 code_gen_buffer_size = (512 * 1024 * 1024); 581 #elif defined(__arm__) 582 /* Map the buffer below 32M, so we can use direct calls and branches */ 583 flags |= MAP_FIXED; 584 start = (void *) 0x01000000UL; 585 if (code_gen_buffer_size > 16 * 1024 * 1024) 586 code_gen_buffer_size = 16 * 1024 * 1024; 584 587 #endif 585 588 code_gen_buffer = mmap(start, code_gen_buffer_size, … … 615 618 #else 616 619 code_gen_buffer = qemu_malloc(code_gen_buffer_size); 617 if (!code_gen_buffer) {618 fprintf(stderr, "Could not allocate dynamic translator buffer\n");619 exit(1);620 }621 620 map_exec(code_gen_buffer, code_gen_buffer_size); 622 621 #endif 623 map_exec(code_gen_prologue, sizeof(code_gen_prologue));624 622 #endif /* !VBOX */ 625 623 #endif /* !USE_STATIC_CODE_GEN_BUFFER */ … … 629 627 map_exec(code_gen_prologue, _1K); 630 628 #endif 631 632 629 code_gen_buffer_max_size = code_gen_buffer_size - 633 630 code_gen_max_block_size(); … … 677 674 } 678 675 #endif 679 #endif / /!VBOX676 #endif /* !VBOX */ 680 677 681 678 void cpu_exec_init(CPUState *env) … … 692 689 } 693 690 env->cpu_index = cpu_index; 694 env->nb_watchpoints = 0; 691 TAILQ_INIT(&env->breakpoints); 692 TAILQ_INIT(&env->watchpoints); 695 693 *penv = env; 696 694 #ifndef VBOX … … 701 699 cpu_save, cpu_load, env); 702 700 #endif 703 #endif / / !VBOX701 #endif /* !VBOX */ 704 702 } 705 703 … … 780 778 781 779 #ifdef DEBUG_TB_CHECK 780 782 781 static void tb_invalidate_check(target_ulong address) 783 782 { … … 1040 1039 1041 1040 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8); 1042 if (!p->code_bitmap)1043 return;1044 1041 1045 1042 tb = p->first_tb; … … 1109 1106 int is_cpu_write_access) 1110 1107 { 1111 int n, current_tb_modified, current_tb_not_found, current_flags;1108 TranslationBlock *tb, *tb_next, *saved_tb; 1112 1109 CPUState *env = cpu_single_env; 1110 target_ulong tb_start, tb_end; 1113 1111 PageDesc *p; 1114 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb; 1115 target_ulong tb_start, tb_end; 1116 target_ulong current_pc, current_cs_base; 1112 int n; 1113 #ifdef TARGET_HAS_PRECISE_SMC 1114 int current_tb_not_found = is_cpu_write_access; 1115 TranslationBlock *current_tb = NULL; 1116 int current_tb_modified = 0; 1117 target_ulong current_pc = 0; 1118 target_ulong current_cs_base = 0; 1119 int current_flags = 0; 1120 #endif /* TARGET_HAS_PRECISE_SMC */ 1117 1121 1118 1122 p = page_find(start >> TARGET_PAGE_BITS); … … 1128 1132 /* we remove all the TBs in the range [start, end[ */ 1129 1133 /* XXX: see if in some cases it could be faster to invalidate all the code */ 1130 current_tb_not_found = is_cpu_write_access;1131 current_tb_modified = 0;1132 current_tb = NULL; /* avoid warning */1133 current_pc = 0; /* avoid warning */1134 current_cs_base = 0; /* avoid warning */1135 current_flags = 0; /* avoid warning */1136 1134 tb = p->first_tb; 1137 1135 while (tb != NULL) { … … 1170 1168 cpu_restore_state(current_tb, env, 1171 1169 env->mem_io_pc, NULL); 1172 #if defined(TARGET_I386) 1173 current_flags = env->hflags; 1174 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 1175 current_cs_base = (target_ulong)env->segs[R_CS].base; 1176 current_pc = current_cs_base + env->eip; 1177 #else 1178 #error unsupported CPU 1179 #endif 1170 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1171 ¤t_flags); 1180 1172 } 1181 1173 #endif /* TARGET_HAS_PRECISE_SMC */ … … 1217 1209 } 1218 1210 1219 1220 1211 /* len must be <= 8 and start must be a multiple of len */ 1221 1212 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len) … … 1225 1216 #if 0 1226 1217 if (1) { 1227 if (loglevel) { 1228 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 1229 cpu_single_env->mem_io_vaddr, len, 1230 cpu_single_env->eip, 1231 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); 1232 } 1218 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 1219 cpu_single_env->mem_io_vaddr, len, 1220 cpu_single_env->eip, 1221 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); 1233 1222 } 1234 1223 #endif … … 1247 1236 } 1248 1237 1249 1250 1238 #if !defined(CONFIG_SOFTMMU) 1251 1239 static void tb_invalidate_phys_page(target_phys_addr_t addr, 1252 1240 unsigned long pc, void *puc) 1253 1241 { 1254 int n, current_flags, current_tb_modified; 1255 target_ulong current_pc, current_cs_base; 1242 TranslationBlock *tb; 1256 1243 PageDesc *p; 1257 TranslationBlock *tb, *current_tb;1244 int n; 1258 1245 #ifdef TARGET_HAS_PRECISE_SMC 1246 TranslationBlock *current_tb = NULL; 1259 1247 CPUState *env = cpu_single_env; 1248 int current_tb_modified = 0; 1249 target_ulong current_pc = 0; 1250 target_ulong current_cs_base = 0; 1251 int current_flags = 0; 1260 1252 #endif 1261 1253 … … 1265 1257 return; 1266 1258 tb = p->first_tb; 1267 current_tb_modified = 0;1268 current_tb = NULL;1269 current_pc = 0; /* avoid warning */1270 current_cs_base = 0; /* avoid warning */1271 current_flags = 0; /* avoid warning */1272 1259 #ifdef TARGET_HAS_PRECISE_SMC 1273 1260 if (tb && pc != 0) { … … 1289 1276 current_tb_modified = 1; 1290 1277 cpu_restore_state(current_tb, env, pc, puc); 1291 #if defined(TARGET_I386) 1292 current_flags = env->hflags; 1293 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 1294 current_cs_base = (target_ulong)env->segs[R_CS].base; 1295 current_pc = current_cs_base + env->eip; 1296 #else 1297 #error unsupported CPU 1298 #endif 1278 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1279 ¤t_flags); 1299 1280 } 1300 1281 #endif /* TARGET_HAS_PRECISE_SMC */ … … 1540 1521 1541 1522 /* Add a watchpoint. */ 1542 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type) 1543 { 1544 int i; 1545 1546 for (i = 0; i < env->nb_watchpoints; i++) { 1547 if (addr == env->watchpoint[i].vaddr) 1523 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, 1524 int flags, CPUWatchpoint **watchpoint) 1525 { 1526 target_ulong len_mask = ~(len - 1); 1527 CPUWatchpoint *wp; 1528 1529 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ 1530 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) { 1531 fprintf(stderr, "qemu: tried to set invalid watchpoint at " 1532 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); 1533 #ifndef VBOX 1534 return -EINVAL; 1535 #else 1536 return VERR_INVALID_PARAMETER; 1537 #endif 1538 } 1539 wp = qemu_malloc(sizeof(*wp)); 1540 1541 wp->vaddr = addr; 1542 wp->len_mask = len_mask; 1543 wp->flags = flags; 1544 1545 /* keep all GDB-injected watchpoints in front */ 1546 if (flags & BP_GDB) 1547 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); 1548 else 1549 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); 1550 1551 tlb_flush_page(env, addr); 1552 1553 if (watchpoint) 1554 *watchpoint = wp; 1555 return 0; 1556 } 1557 1558 /* Remove a specific watchpoint. */ 1559 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, 1560 int flags) 1561 { 1562 target_ulong len_mask = ~(len - 1); 1563 CPUWatchpoint *wp; 1564 1565 TAILQ_FOREACH(wp, &env->watchpoints, entry) { 1566 if (addr == wp->vaddr && len_mask == wp->len_mask 1567 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { 1568 cpu_watchpoint_remove_by_ref(env, wp); 1548 1569 return 0; 1549 } 1550 if (env->nb_watchpoints >= MAX_WATCHPOINTS) 1551 return -1; 1552 1553 i = env->nb_watchpoints++; 1554 env->watchpoint[i].vaddr = addr; 1555 env->watchpoint[i].type = type; 1556 tlb_flush_page(env, addr); 1557 /* FIXME: This flush is needed because of the hack to make memory ops 1558 terminate the TB. It can be removed once the proper IO trap and 1559 re-execute bits are in. */ 1560 tb_flush(env); 1561 return i; 1562 } 1563 1564 /* Remove a watchpoint. */ 1565 int cpu_watchpoint_remove(CPUState *env, target_ulong addr) 1566 { 1567 int i; 1568 1569 for (i = 0; i < env->nb_watchpoints; i++) { 1570 if (addr == env->watchpoint[i].vaddr) { 1571 env->nb_watchpoints--; 1572 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints]; 1573 tlb_flush_page(env, addr); 1570 } 1571 } 1572 #ifndef VBOX 1573 return -ENOENT; 1574 #else 1575 return VERR_NOT_FOUND; 1576 #endif 1577 } 1578 1579 /* Remove a specific watchpoint by reference. */ 1580 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) 1581 { 1582 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry); 1583 1584 tlb_flush_page(env, watchpoint->vaddr); 1585 1586 qemu_free(watchpoint); 1587 } 1588 1589 /* Remove all matching watchpoints. */ 1590 void cpu_watchpoint_remove_all(CPUState *env, int mask) 1591 { 1592 CPUWatchpoint *wp, *next; 1593 1594 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { 1595 if (wp->flags & mask) 1596 cpu_watchpoint_remove_by_ref(env, wp); 1597 } 1598 } 1599 1600 /* Add a breakpoint. */ 1601 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, 1602 CPUBreakpoint **breakpoint) 1603 { 1604 #if defined(TARGET_HAS_ICE) 1605 CPUBreakpoint *bp; 1606 1607 bp = qemu_malloc(sizeof(*bp)); 1608 1609 bp->pc = pc; 1610 bp->flags = flags; 1611 1612 /* keep all GDB-injected breakpoints in front */ 1613 if (flags & BP_GDB) 1614 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); 1615 else 1616 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); 1617 1618 breakpoint_invalidate(env, pc); 1619 1620 if (breakpoint) 1621 *breakpoint = bp; 1622 return 0; 1623 #else 1624 return -ENOSYS; 1625 #endif 1626 } 1627 1628 /* Remove a specific breakpoint. */ 1629 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) 1630 { 1631 #if defined(TARGET_HAS_ICE) 1632 CPUBreakpoint *bp; 1633 1634 TAILQ_FOREACH(bp, &env->breakpoints, entry) { 1635 if (bp->pc == pc && bp->flags == flags) { 1636 cpu_breakpoint_remove_by_ref(env, bp); 1574 1637 return 0; 1575 1638 } 1576 1639 } 1577 return -1; 1578 } 1579 1580 /* Remove all watchpoints. */ 1581 void cpu_watchpoint_remove_all(CPUState *env) { 1582 int i; 1583 1584 for (i = 0; i < env->nb_watchpoints; i++) { 1585 tlb_flush_page(env, env->watchpoint[i].vaddr); 1586 } 1587 env->nb_watchpoints = 0; 1588 } 1589 1590 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a 1591 breakpoint is reached */ 1592 int cpu_breakpoint_insert(CPUState *env, target_ulong pc) 1640 # ifndef VBOX 1641 return -ENOENT; 1642 # else 1643 return VERR_NOT_FOUND; 1644 # endif 1645 #else 1646 return -ENOSYS; 1647 #endif 1648 } 1649 1650 /* Remove a specific breakpoint by reference. */ 1651 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) 1593 1652 { 1594 1653 #if defined(TARGET_HAS_ICE) 1595 int i; 1596 1597 for(i = 0; i < env->nb_breakpoints; i++) { 1598 if (env->breakpoints[i] == pc) 1599 return 0; 1600 } 1601 1602 if (env->nb_breakpoints >= MAX_BREAKPOINTS) 1603 return -1; 1604 env->breakpoints[env->nb_breakpoints++] = pc; 1605 1606 breakpoint_invalidate(env, pc); 1607 return 0; 1608 #else 1609 return -1; 1610 #endif 1611 } 1612 1613 /* remove all breakpoints */ 1614 void cpu_breakpoint_remove_all(CPUState *env) { 1654 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry); 1655 1656 breakpoint_invalidate(env, breakpoint->pc); 1657 1658 qemu_free(breakpoint); 1659 #endif 1660 } 1661 1662 /* Remove all matching breakpoints. */ 1663 void cpu_breakpoint_remove_all(CPUState *env, int mask) 1664 { 1615 1665 #if defined(TARGET_HAS_ICE) 1616 int i; 1617 for(i = 0; i < env->nb_breakpoints; i++) { 1618 breakpoint_invalidate(env, env->breakpoints[i]); 1619 } 1620 env->nb_breakpoints = 0; 1621 #endif 1622 } 1623 1624 /* remove a breakpoint */ 1625 int cpu_breakpoint_remove(CPUState *env, target_ulong pc) 1626 { 1627 #if defined(TARGET_HAS_ICE) 1628 int i; 1629 for(i = 0; i < env->nb_breakpoints; i++) { 1630 if (env->breakpoints[i] == pc) 1631 goto found; 1632 } 1633 return -1; 1634 found: 1635 env->nb_breakpoints--; 1636 if (i < env->nb_breakpoints) 1637 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints]; 1638 1639 breakpoint_invalidate(env, pc); 1640 return 0; 1641 #else 1642 return -1; 1666 CPUBreakpoint *bp, *next; 1667 1668 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { 1669 if (bp->flags & mask) 1670 cpu_breakpoint_remove_by_ref(env, bp); 1671 } 1643 1672 #endif 1644 1673 } … … 1758 1787 1759 1788 #ifndef VBOX 1760 CPULogItem cpu_log_items[] = {1789 const CPULogItem cpu_log_items[] = { 1761 1790 { CPU_LOG_TB_OUT_ASM, "out_asm", 1762 1791 "show generated host assembly code for each compiled TB" }, … … 1780 1809 { CPU_LOG_PCALL, "pcall", 1781 1810 "show protected mode far calls/returns/exceptions" }, 1811 { CPU_LOG_RESET, "cpu_reset", 1812 "show CPU state before CPU resets" }, 1782 1813 #endif 1783 1814 #ifdef DEBUG_IOPORT … … 1845 1876 cpu_dump_state(env, stderr, fprintf, 0); 1846 1877 #endif 1847 if ( logfile) {1848 fprintf(logfile,"qemu: fatal: ");1849 vfprintf(logfile,fmt, ap2);1850 fprintf(logfile,"\n");1878 if (qemu_log_enabled()) { 1879 qemu_log("qemu: fatal: "); 1880 qemu_log_vprintf(fmt, ap2); 1881 qemu_log("\n"); 1851 1882 #ifdef TARGET_I386 1852 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);1853 #else 1854 cpu_dump_state(env, logfile, fprintf, 0);1855 #endif 1856 fflush(logfile);1857 fclose(logfile);1883 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP); 1884 #else 1885 log_cpu_state(env, 0); 1886 #endif 1887 qemu_log_flush(); 1888 qemu_log_close(); 1858 1889 } 1859 1890 va_end(ap2); … … 1867 1898 { 1868 1899 CPUState *new_env = cpu_init(env->cpu_model_str); 1869 /* preserve chaining and index */1870 1900 CPUState *next_cpu = new_env->next_cpu; 1871 1901 int cpu_index = new_env->cpu_index; 1902 #if defined(TARGET_HAS_ICE) 1903 CPUBreakpoint *bp; 1904 CPUWatchpoint *wp; 1905 #endif 1906 1872 1907 memcpy(new_env, env, sizeof(CPUState)); 1908 1909 /* Preserve chaining and index. */ 1873 1910 new_env->next_cpu = next_cpu; 1874 1911 new_env->cpu_index = cpu_index; 1912 1913 /* Clone all break/watchpoints. 1914 Note: Once we support ptrace with hw-debug register access, make sure 1915 BP_CPU break/watchpoints are handled correctly on clone. */ 1916 TAILQ_INIT(&env->breakpoints); 1917 TAILQ_INIT(&env->watchpoints); 1918 #if defined(TARGET_HAS_ICE) 1919 TAILQ_FOREACH(bp, &env->breakpoints, entry) { 1920 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL); 1921 } 1922 TAILQ_FOREACH(wp, &env->watchpoints, entry) { 1923 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1, 1924 wp->flags, NULL); 1925 } 1926 #endif 1927 1875 1928 return new_env; 1876 1929 } … … 2118 2171 return in_migration; 2119 2172 } 2120 #endif 2173 #endif /* !VBOX */ 2174 2175 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr) 2176 { 2177 if (kvm_enabled()) 2178 kvm_physical_sync_dirty_bitmap(start_addr, end_addr); 2179 } 2121 2180 2122 2181 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) … … 2154 2213 for(i = 0; i < CPU_TLB_SIZE; i++) 2155 2214 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]); 2156 # if (NB_MMU_MODES >= 3)2215 # if (NB_MMU_MODES >= 3) 2157 2216 for(i = 0; i < CPU_TLB_SIZE; i++) 2158 2217 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]); 2159 # if (NB_MMU_MODES == 4)2218 # if (NB_MMU_MODES == 4) 2160 2219 for(i = 0; i < CPU_TLB_SIZE; i++) 2161 2220 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]); 2162 # endif2163 # endif2221 # endif 2222 # endif 2164 2223 #else /* VBOX */ 2165 2224 for(i = 0; i < CPU_TLB_SIZE; i++) … … 2218 2277 int ret; 2219 2278 CPUTLBEntry *te; 2220 int i;2279 CPUWatchpoint *wp; 2221 2280 target_phys_addr_t iotlb; 2222 2281 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) … … 2267 2326 We can't use the high bits of pd for this because 2268 2327 IO_MEM_ROMD uses these as a ram address. */ 2269 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr; 2328 iotlb = (pd & ~TARGET_PAGE_MASK); 2329 #ifndef VBOX 2330 if (p) { 2331 #else 2332 if ( p->phys_offset 2333 && (pd & ~TARGET_PAGE_MASK) != env->pVM->rem.s.iMMIOMemType 2334 && (pd & ~TARGET_PAGE_MASK) != env->pVM->rem.s.iHandlerMemType) { 2335 #endif 2336 iotlb += p->region_offset; 2337 } else { 2338 iotlb += paddr; 2339 } 2270 2340 } 2271 2341 … … 2299 2369 /* Make accesses to pages with watchpoints go via the 2300 2370 watchpoint trap routines. */ 2301 for (i = 0; i < env->nb_watchpoints; i++) {2302 if (vaddr == ( env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {2371 TAILQ_FOREACH(wp, &env->watchpoints, entry) { 2372 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { 2303 2373 iotlb = io_mem_watch + paddr; 2304 2374 /* TODO: The memory case can be optimized by not trapping … … 2467 2537 target_ulong addr; 2468 2538 2539 if (start + len < start) 2540 /* we've wrapped around */ 2541 return -1; 2542 2469 2543 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ 2470 2544 start = start & TARGET_PAGE_MASK; 2471 2545 2472 if( end < start )2473 /* we've wrapped around */2474 return -1;2475 2546 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 2476 2547 p = page_find(addr >> TARGET_PAGE_BITS); … … 2553 2624 2554 2625 #if !defined(CONFIG_USER_ONLY) 2626 2555 2627 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 2556 ram_addr_t memory );2628 ram_addr_t memory, ram_addr_t region_offset); 2557 2629 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 2558 ram_addr_t orig_memory );2630 ram_addr_t orig_memory, ram_addr_t region_offset); 2559 2631 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ 2560 2632 need_subpage) \ … … 2579 2651 /* register physical memory. 'size' must be a multiple of the target 2580 2652 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an 2581 io memory page */ 2582 void cpu_register_physical_memory(target_phys_addr_t start_addr, 2583 ram_addr_t size, 2584 ram_addr_t phys_offset) 2653 io memory page. The address used when calling the IO function is 2654 the offset from the start of the region, plus region_offset. Both 2655 start_region and regon_offset are rounded down to a page boundary 2656 before calculating this offset. This should not be a problem unless 2657 the low bits of start_addr and region_offset differ. */ 2658 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, 2659 ram_addr_t size, 2660 ram_addr_t phys_offset, 2661 ram_addr_t region_offset) 2585 2662 { 2586 2663 target_phys_addr_t addr, end_addr; … … 2597 2674 } 2598 2675 #endif 2676 if (kvm_enabled()) 2677 kvm_set_phys_mem(start_addr, size, phys_offset); 2678 2679 if (phys_offset == IO_MEM_UNASSIGNED) { 2680 region_offset = start_addr; 2681 } 2682 region_offset &= TARGET_PAGE_MASK; 2599 2683 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; 2600 2684 end_addr = start_addr + (target_phys_addr_t)size; … … 2611 2695 if (!(orig_memory & IO_MEM_SUBPAGE)) { 2612 2696 subpage = subpage_init((addr & TARGET_PAGE_MASK), 2613 &p->phys_offset, orig_memory); 2697 &p->phys_offset, orig_memory, 2698 p->region_offset); 2614 2699 } else { 2615 2700 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) 2616 2701 >> IO_MEM_SHIFT]; 2617 2702 } 2618 subpage_register(subpage, start_addr2, end_addr2, phys_offset); 2703 subpage_register(subpage, start_addr2, end_addr2, phys_offset, 2704 region_offset); 2705 p->region_offset = 0; 2619 2706 } else { 2620 2707 p->phys_offset = phys_offset; … … 2626 2713 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2627 2714 p->phys_offset = phys_offset; 2715 p->region_offset = region_offset; 2628 2716 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 2629 (phys_offset & IO_MEM_ROMD)) 2717 (phys_offset & IO_MEM_ROMD)) { 2630 2718 phys_offset += TARGET_PAGE_SIZE; 2631 else {2719 } else { 2632 2720 target_phys_addr_t start_addr2, end_addr2; 2633 2721 int need_subpage = 0; … … 2638 2726 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { 2639 2727 subpage = subpage_init((addr & TARGET_PAGE_MASK), 2640 &p->phys_offset, IO_MEM_UNASSIGNED); 2728 &p->phys_offset, IO_MEM_UNASSIGNED, 2729 addr & TARGET_PAGE_MASK); 2641 2730 subpage_register(subpage, start_addr2, end_addr2, 2642 phys_offset); 2731 phys_offset, region_offset); 2732 p->region_offset = 0; 2643 2733 } 2644 2734 } 2645 2735 } 2736 region_offset += TARGET_PAGE_SIZE; 2646 2737 } 2647 2738 … … 2666 2757 2667 2758 #ifndef VBOX 2759 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) 2760 { 2761 if (kvm_enabled()) 2762 kvm_coalesce_mmio_region(addr, size); 2763 } 2764 2765 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) 2766 { 2767 if (kvm_enabled()) 2768 kvm_uncoalesce_mmio_region(addr, size); 2769 } 2770 2668 2771 /* XXX: better than nothing */ 2669 2772 ram_addr_t qemu_ram_alloc(ram_addr_t size) … … 2690 2793 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2691 2794 #endif 2692 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2795 #if defined(TARGET_SPARC) 2693 2796 do_unassigned_access(addr, 0, 0, 0, 1); 2694 2797 #endif … … 2701 2804 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2702 2805 #endif 2703 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2806 #if defined(TARGET_SPARC) 2704 2807 do_unassigned_access(addr, 0, 0, 0, 2); 2705 2808 #endif … … 2712 2815 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 2713 2816 #endif 2714 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2817 #if defined(TARGET_SPARC) 2715 2818 do_unassigned_access(addr, 0, 0, 0, 4); 2716 2819 #endif … … 2723 2826 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2724 2827 #endif 2725 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2828 #if defined(TARGET_SPARC) 2726 2829 do_unassigned_access(addr, 1, 0, 0, 1); 2727 2830 #endif … … 2733 2836 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2734 2837 #endif 2735 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2838 #if defined(TARGET_SPARC) 2736 2839 do_unassigned_access(addr, 1, 0, 0, 2); 2737 2840 #endif … … 2743 2846 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 2744 2847 #endif 2745 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)2848 #if defined(TARGET_SPARC) 2746 2849 do_unassigned_access(addr, 1, 0, 0, 4); 2747 2850 #endif … … 2828 2931 stw_p(phys_ram_base + ram_addr, val); 2829 2932 #endif 2830 2831 2933 #ifdef USE_KQEMU 2832 2934 if (cpu_single_env->kqemu_enabled && … … 2900 3002 2901 3003 /* Generate a debug exception if a watchpoint has been hit. */ 2902 static void check_watchpoint(int offset, int flags)3004 static void check_watchpoint(int offset, int len_mask, int flags) 2903 3005 { 2904 3006 CPUState *env = cpu_single_env; 3007 target_ulong pc, cs_base; 3008 TranslationBlock *tb; 2905 3009 target_ulong vaddr; 2906 int i; 2907 3010 CPUWatchpoint *wp; 3011 int cpu_flags; 3012 3013 if (env->watchpoint_hit) { 3014 /* We re-entered the check after replacing the TB. Now raise 3015 * the debug interrupt so that is will trigger after the 3016 * current instruction. */ 3017 cpu_interrupt(env, CPU_INTERRUPT_DEBUG); 3018 return; 3019 } 2908 3020 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; 2909 for (i = 0; i < env->nb_watchpoints; i++) { 2910 if (vaddr == env->watchpoint[i].vaddr 2911 && (env->watchpoint[i].type & flags)) { 2912 env->watchpoint_hit = i + 1; 2913 cpu_interrupt(env, CPU_INTERRUPT_DEBUG); 2914 break; 3021 TAILQ_FOREACH(wp, &env->watchpoints, entry) { 3022 if ((vaddr == (wp->vaddr & len_mask) || 3023 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { 3024 wp->flags |= BP_WATCHPOINT_HIT; 3025 if (!env->watchpoint_hit) { 3026 env->watchpoint_hit = wp; 3027 tb = tb_find_pc(env->mem_io_pc); 3028 if (!tb) { 3029 cpu_abort(env, "check_watchpoint: could not find TB for " 3030 "pc=%p", (void *)env->mem_io_pc); 3031 } 3032 cpu_restore_state(tb, env, env->mem_io_pc, NULL); 3033 tb_phys_invalidate(tb, -1); 3034 if (wp->flags & BP_STOP_BEFORE_ACCESS) { 3035 env->exception_index = EXCP_DEBUG; 3036 } else { 3037 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); 3038 tb_gen_code(env, pc, cs_base, cpu_flags, 1); 3039 } 3040 cpu_resume_from_signal(env, NULL); 3041 } 3042 } else { 3043 wp->flags &= ~BP_WATCHPOINT_HIT; 2915 3044 } 2916 3045 } … … 2922 3051 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) 2923 3052 { 2924 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);3053 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ); 2925 3054 return ldub_phys(addr); 2926 3055 } … … 2928 3057 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) 2929 3058 { 2930 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);3059 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ); 2931 3060 return lduw_phys(addr); 2932 3061 } … … 2934 3063 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) 2935 3064 { 2936 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);3065 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ); 2937 3066 return ldl_phys(addr); 2938 3067 } … … 2941 3070 uint32_t val) 2942 3071 { 2943 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);3072 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE); 2944 3073 stb_phys(addr, val); 2945 3074 } … … 2948 3077 uint32_t val) 2949 3078 { 2950 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);3079 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE); 2951 3080 stw_phys(addr, val); 2952 3081 } … … 2955 3084 uint32_t val) 2956 3085 { 2957 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);3086 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE); 2958 3087 stl_phys(addr, val); 2959 3088 } … … 2977 3106 unsigned int idx; 2978 3107 2979 idx = SUBPAGE_IDX(addr - mmio->base);3108 idx = SUBPAGE_IDX(addr); 2980 3109 #if defined(DEBUG_SUBPAGE) 2981 3110 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, 2982 3111 mmio, len, addr, idx); 2983 3112 #endif 2984 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr); 3113 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], 3114 addr + mmio->region_offset[idx][0][len]); 2985 3115 2986 3116 return ret; … … 2992 3122 unsigned int idx; 2993 3123 2994 idx = SUBPAGE_IDX(addr - mmio->base);3124 idx = SUBPAGE_IDX(addr); 2995 3125 #if defined(DEBUG_SUBPAGE) 2996 3126 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__, 2997 3127 mmio, len, addr, idx, value); 2998 3128 #endif 2999 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value); 3129 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], 3130 addr + mmio->region_offset[idx][1][len], 3131 value); 3000 3132 } 3001 3133 … … 3067 3199 3068 3200 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 3069 ram_addr_t memory )3201 ram_addr_t memory, ram_addr_t region_offset) 3070 3202 { 3071 3203 int idx, eidx; … … 3086 3218 mmio->mem_read[idx][i] = &io_mem_read[memory][i]; 3087 3219 mmio->opaque[idx][0][i] = io_mem_opaque[memory]; 3220 mmio->region_offset[idx][0][i] = region_offset; 3088 3221 } 3089 3222 if (io_mem_write[memory][i]) { 3090 3223 mmio->mem_write[idx][i] = &io_mem_write[memory][i]; 3091 3224 mmio->opaque[idx][1][i] = io_mem_opaque[memory]; 3225 mmio->region_offset[idx][1][i] = region_offset; 3092 3226 } 3093 3227 } … … 3098 3232 3099 3233 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 3100 ram_addr_t orig_memory )3234 ram_addr_t orig_memory, ram_addr_t region_offset) 3101 3235 { 3102 3236 subpage_t *mmio; … … 3104 3238 3105 3239 mmio = qemu_mallocz(sizeof(subpage_t)); 3106 if (mmio != NULL) { 3107 3108 3240 3241 mmio->base = base; 3242 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio); 3109 3243 #if defined(DEBUG_SUBPAGE) 3110 3111 3112 #endif 3113 3114 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);3115 }3244 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, 3245 mmio, base, TARGET_PAGE_SIZE, subpage_memory); 3246 #endif 3247 *phys = subpage_memory | IO_MEM_SUBPAGE; 3248 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory, 3249 region_offset); 3116 3250 3117 3251 return mmio; 3118 3252 } 3119 3253 3254 static int get_free_io_mem_idx(void) 3255 { 3256 int i; 3257 3258 for (i = 0; i<IO_MEM_NB_ENTRIES; i++) 3259 if (!io_mem_used[i]) { 3260 io_mem_used[i] = 1; 3261 return i; 3262 } 3263 3264 return -1; 3265 } 3266 3120 3267 static void io_mem_init(void) 3121 3268 { 3269 int i; 3270 3122 3271 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL); 3123 3272 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); 3124 3273 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); 3125 io_mem_nb = 5; 3274 for (i=0; i<5; i++) 3275 io_mem_used[i] = 1; 3126 3276 3127 3277 io_mem_watch = cpu_register_io_memory(0, watch_mem_read, … … 3151 3301 3152 3302 if (io_index <= 0) { 3153 i f (io_mem_nb >= IO_MEM_NB_ENTRIES)3154 return -1;3155 io_index = io_mem_nb++;3303 io_index = get_free_io_mem_idx(); 3304 if (io_index == -1) 3305 return io_index; 3156 3306 } else { 3157 3307 if (io_index >= IO_MEM_NB_ENTRIES) … … 3167 3317 io_mem_opaque[io_index] = opaque; 3168 3318 return (io_index << IO_MEM_SHIFT) | subwidth; 3319 } 3320 3321 void cpu_unregister_io_memory(int io_table_address) 3322 { 3323 int i; 3324 int io_index = io_table_address >> IO_MEM_SHIFT; 3325 3326 for (i=0;i < 3; i++) { 3327 io_mem_read[io_index][i] = unassigned_mem_read[i]; 3328 io_mem_write[io_index][i] = unassigned_mem_write[i]; 3329 } 3330 io_mem_opaque[io_index] = NULL; 3331 io_mem_used[io_index] = 0; 3169 3332 } 3170 3333 … … 3248 3411 if (is_write) { 3249 3412 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3413 target_phys_addr_t addr1 = addr; 3250 3414 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3415 if (p) 3416 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3251 3417 /* XXX: could force cpu_single_env to NULL to avoid 3252 3418 potential bugs */ 3253 if (l >= 4 && ((addr & 3) == 0)) {3419 if (l >= 4 && ((addr1 & 3) == 0)) { 3254 3420 /* 32 bit write access */ 3255 3421 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) … … 3258 3424 val = *(const uint32_t *)buf; 3259 3425 #endif 3260 io_mem_write[io_index][2](io_mem_opaque[io_index], addr , val);3426 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val); 3261 3427 l = 4; 3262 } else if (l >= 2 && ((addr & 1) == 0)) {3428 } else if (l >= 2 && ((addr1 & 1) == 0)) { 3263 3429 /* 16 bit write access */ 3264 3430 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) … … 3267 3433 val = *(const uint16_t *)buf; 3268 3434 #endif 3269 io_mem_write[io_index][1](io_mem_opaque[io_index], addr , val);3435 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val); 3270 3436 l = 2; 3271 3437 } else { … … 3276 3442 val = *(const uint8_t *)buf; 3277 3443 #endif 3278 io_mem_write[io_index][0](io_mem_opaque[io_index], addr , val);3444 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val); 3279 3445 l = 1; 3280 3446 } … … 3303 3469 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 3304 3470 !(pd & IO_MEM_ROMD)) { 3471 target_phys_addr_t addr1 = addr; 3305 3472 /* I/O case */ 3306 3473 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3307 if (l >= 4 && ((addr & 3) == 0)) { 3474 if (p) 3475 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3476 if (l >= 4 && ((addr1 & 3) == 0)) { 3308 3477 /* 32 bit read access */ 3309 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr );3478 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1); 3310 3479 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 3311 3480 stl_p(buf, val); … … 3314 3483 #endif 3315 3484 l = 4; 3316 } else if (l >= 2 && ((addr & 1) == 0)) {3485 } else if (l >= 2 && ((addr1 & 1) == 0)) { 3317 3486 /* 16 bit read access */ 3318 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr );3487 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1); 3319 3488 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 3320 3489 stw_p(buf, val); … … 3325 3494 } else { 3326 3495 /* 8 bit read access */ 3327 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr );3496 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1); 3328 3497 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 3329 3498 stb_p(buf, val); … … 3351 3520 3352 3521 #ifndef VBOX 3522 3353 3523 /* used for ROM loading : can write in RAM and ROM */ 3354 3524 void cpu_physical_memory_write_rom(target_phys_addr_t addr, … … 3389 3559 } 3390 3560 } 3561 3562 typedef struct { 3563 void *buffer; 3564 target_phys_addr_t addr; 3565 target_phys_addr_t len; 3566 } BounceBuffer; 3567 3568 static BounceBuffer bounce; 3569 3570 typedef struct MapClient { 3571 void *opaque; 3572 void (*callback)(void *opaque); 3573 LIST_ENTRY(MapClient) link; 3574 } MapClient; 3575 3576 static LIST_HEAD(map_client_list, MapClient) map_client_list 3577 = LIST_HEAD_INITIALIZER(map_client_list); 3578 3579 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) 3580 { 3581 MapClient *client = qemu_malloc(sizeof(*client)); 3582 3583 client->opaque = opaque; 3584 client->callback = callback; 3585 LIST_INSERT_HEAD(&map_client_list, client, link); 3586 return client; 3587 } 3588 3589 void cpu_unregister_map_client(void *_client) 3590 { 3591 MapClient *client = (MapClient *)_client; 3592 3593 LIST_REMOVE(client, link); 3594 } 3595 3596 static void cpu_notify_map_clients(void) 3597 { 3598 MapClient *client; 3599 3600 while (!LIST_EMPTY(&map_client_list)) { 3601 client = LIST_FIRST(&map_client_list); 3602 client->callback(client->opaque); 3603 LIST_REMOVE(client, link); 3604 } 3605 } 3606 3607 /* Map a physical memory region into a host virtual address. 3608 * May map a subset of the requested range, given by and returned in *plen. 3609 * May return NULL if resources needed to perform the mapping are exhausted. 3610 * Use only for reads OR writes - not for read-modify-write operations. 3611 * Use cpu_register_map_client() to know when retrying the map operation is 3612 * likely to succeed. 3613 */ 3614 void *cpu_physical_memory_map(target_phys_addr_t addr, 3615 target_phys_addr_t *plen, 3616 int is_write) 3617 { 3618 target_phys_addr_t len = *plen; 3619 target_phys_addr_t done = 0; 3620 int l; 3621 uint8_t *ret = NULL; 3622 uint8_t *ptr; 3623 target_phys_addr_t page; 3624 unsigned long pd; 3625 PhysPageDesc *p; 3626 unsigned long addr1; 3627 3628 while (len > 0) { 3629 page = addr & TARGET_PAGE_MASK; 3630 l = (page + TARGET_PAGE_SIZE) - addr; 3631 if (l > len) 3632 l = len; 3633 p = phys_page_find(page >> TARGET_PAGE_BITS); 3634 if (!p) { 3635 pd = IO_MEM_UNASSIGNED; 3636 } else { 3637 pd = p->phys_offset; 3638 } 3639 3640 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3641 if (done || bounce.buffer) { 3642 break; 3643 } 3644 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); 3645 bounce.addr = addr; 3646 bounce.len = l; 3647 if (!is_write) { 3648 cpu_physical_memory_rw(addr, bounce.buffer, l, 0); 3649 } 3650 ptr = bounce.buffer; 3651 } else { 3652 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3653 ptr = phys_ram_base + addr1; 3654 } 3655 if (!done) { 3656 ret = ptr; 3657 } else if (ret + done != ptr) { 3658 break; 3659 } 3660 3661 len -= l; 3662 addr += l; 3663 done += l; 3664 } 3665 *plen = done; 3666 return ret; 3667 } 3668 3669 /* Unmaps a memory region previously mapped by cpu_physical_memory_map(). 3670 * Will also mark the memory as dirty if is_write == 1. access_len gives 3671 * the amount of memory that was actually read or written by the caller. 3672 */ 3673 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, 3674 int is_write, target_phys_addr_t access_len) 3675 { 3676 if (buffer != bounce.buffer) { 3677 if (is_write) { 3678 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base; 3679 while (access_len) { 3680 unsigned l; 3681 l = TARGET_PAGE_SIZE; 3682 if (l > access_len) 3683 l = access_len; 3684 if (!cpu_physical_memory_is_dirty(addr1)) { 3685 /* invalidate code */ 3686 tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 3687 /* set dirty bit */ 3688 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 3689 (0xff & ~CODE_DIRTY_FLAG); 3690 } 3691 addr1 += l; 3692 access_len -= l; 3693 } 3694 } 3695 return; 3696 } 3697 if (is_write) { 3698 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len); 3699 } 3700 qemu_free(bounce.buffer); 3701 bounce.buffer = NULL; 3702 cpu_notify_map_clients(); 3703 } 3704 3391 3705 #endif /* !VBOX */ 3392 3393 3706 3394 3707 /* warning: addr must be aligned */ … … 3412 3725 /* I/O case */ 3413 3726 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3727 if (p) 3728 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3414 3729 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); 3415 3730 } else { … … 3446 3761 /* I/O case */ 3447 3762 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3763 if (p) 3764 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3448 3765 #ifdef TARGET_WORDS_BIGENDIAN 3449 3766 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; … … 3501 3818 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3502 3819 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3820 if (p) 3821 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3503 3822 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 3504 3823 } else { … … 3510 3829 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr); 3511 3830 #endif 3831 3512 3832 #ifndef VBOX 3513 3833 if (unlikely(in_migration)) { … … 3540 3860 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3541 3861 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3862 if (p) 3863 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3542 3864 #ifdef TARGET_WORDS_BIGENDIAN 3543 3865 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32); … … 3575 3897 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3576 3898 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3899 if (p) 3900 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 3577 3901 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 3578 3902 } else { -
trunk/src/recompiler/fpu/softfloat-macros.h
r36140 r36170 718 718 719 719 } 720 -
trunk/src/recompiler/fpu/softfloat-native.c
r36140 r36170 61 61 #endif 62 62 63 #if defined(_ _powerpc__)63 #if defined(_ARCH_PPC) 64 64 65 65 /* correct (but slow) PowerPC rint() (glibc version is incorrect) */ 66 double qemu_rint(double x)66 static double qemu_rint(double x) 67 67 { 68 68 double y = 4503599627370496.0; … … 230 230 { 231 231 if (a < b) { 232 return -1;232 return float_relation_less; 233 233 } else if (a == b) { 234 return 0;234 return float_relation_equal; 235 235 } else if (a > b) { 236 return 1;237 } else { 238 return 2;236 return float_relation_greater; 237 } else { 238 return float_relation_unordered; 239 239 } 240 240 } … … 242 242 { 243 243 if (isless(a, b)) { 244 return -1;244 return float_relation_less; 245 245 } else if (a == b) { 246 return 0;246 return float_relation_equal; 247 247 } else if (isgreater(a, b)) { 248 return 1;249 } else { 250 return 2;248 return float_relation_greater; 249 } else { 250 return float_relation_unordered; 251 251 } 252 252 } … … 258 258 a = u.i; 259 259 return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); 260 } 261 262 int float32_is_nan( float32 a1 ) 263 { 264 float32u u; 265 uint64_t a; 266 u.f = a1; 267 a = u.i; 268 return ( 0xFF800000 < ( a<<1 ) ); 260 269 } 261 270 … … 392 401 { 393 402 if (a < b) { 394 return -1;403 return float_relation_less; 395 404 } else if (a == b) { 396 return 0;405 return float_relation_equal; 397 406 } else if (a > b) { 398 return 1;399 } else { 400 return 2;407 return float_relation_greater; 408 } else { 409 return float_relation_unordered; 401 410 } 402 411 } … … 404 413 { 405 414 if (isless(a, b)) { 406 return -1;415 return float_relation_less; 407 416 } else if (a == b) { 408 return 0;417 return float_relation_equal; 409 418 } else if (isgreater(a, b)) { 410 return 1;411 } else { 412 return 2;419 return float_relation_greater; 420 } else { 421 return float_relation_unordered; 413 422 } 414 423 } … … 432 441 a = u.i; 433 442 434 return ( LIT64( 0xFF E0000000000000 ) < (bits64) ( a<<1 ) );443 return ( LIT64( 0xFFF0000000000000 ) < (bits64) ( a<<1 ) ); 435 444 436 445 } … … 484 493 { 485 494 if (a < b) { 486 return -1;495 return float_relation_less; 487 496 } else if (a == b) { 488 return 0;497 return float_relation_equal; 489 498 } else if (a > b) { 490 return 1;491 } else { 492 return 2;499 return float_relation_greater; 500 } else { 501 return float_relation_unordered; 493 502 } 494 503 } … … 496 505 { 497 506 if (isless(a, b)) { 498 return -1;507 return float_relation_less; 499 508 } else if (a == b) { 500 return 0;509 return float_relation_equal; 501 510 } else if (isgreater(a, b)) { 502 return 1;503 } else { 504 return 2;511 return float_relation_greater; 512 } else { 513 return float_relation_unordered; 505 514 } 506 515 } 507 516 int floatx80_is_signaling_nan( floatx80 a1) 517 { 518 floatx80u u; 519 uint64_t aLow; 520 u.f = a1; 521 522 aLow = u.i.low & ~ LIT64( 0x4000000000000000 ); 523 return 524 ( ( u.i.high & 0x7FFF ) == 0x7FFF ) 525 && (bits64) ( aLow<<1 ) 526 && ( u.i.low == aLow ); 527 } 528 529 int floatx80_is_nan( floatx80 a1 ) 508 530 { 509 531 floatx80u u; -
trunk/src/recompiler/fpu/softfloat-native.h
r36140 r36170 9 9 #endif 10 10 11 #ifdef __OpenBSD__ 12 /* Get OpenBSD version number */ 11 #if defined(__OpenBSD__) || defined(__NetBSD__) 13 12 #include <sys/param.h> 14 13 #endif … … 36 35 #endif 37 36 37 #ifdef __NetBSD__ 38 #ifndef isgreater 39 #define isgreater(x, y) __builtin_isgreater(x, y) 40 #endif 41 #ifndef isgreaterequal 42 #define isgreaterequal(x, y) __builtin_isgreaterequal(x, y) 43 #endif 44 #ifndef isless 45 #define isless(x, y) __builtin_isless(x, y) 46 #endif 47 #ifndef islessequal 48 #define islessequal(x, y) __builtin_islessequal(x, y) 49 #endif 50 #ifndef isunordered 51 #define isunordered(x, y) __builtin_isunordered(x, y) 52 #endif 53 #endif 54 55 38 56 #define isnormal(x) (fpclass(x) >= FP_NZERO) 39 57 #define isgreater(x, y) ((!unordered(x, y)) && ((x) > (y))) … … 123 141 124 142 typedef struct float_status { 125 signed charfloat_rounding_mode;126 #ifdef FLOATX80 127 signed charfloatx80_rounding_precision;143 int float_rounding_mode; 144 #ifdef FLOATX80 145 int floatx80_rounding_precision; 128 146 #endif 129 147 } float_status; … … 229 247 int float32_compare_quiet( float32, float32 STATUS_PARAM ); 230 248 int float32_is_signaling_nan( float32 ); 249 int float32_is_nan( float32 ); 231 250 232 251 INLINE float32 float32_abs(float32 a) … … 238 257 { 239 258 return -a; 259 } 260 261 INLINE float32 float32_is_infinity(float32 a) 262 { 263 return fpclassify(a) == FP_INFINITE; 264 } 265 266 INLINE float32 float32_is_neg(float32 a) 267 { 268 float32u u; 269 u.f = a; 270 return u.i >> 31; 271 } 272 273 INLINE float32 float32_is_zero(float32 a) 274 { 275 return fpclassify(a) == FP_ZERO; 240 276 } 241 277 … … 332 368 } 333 369 370 INLINE float64 float64_is_infinity(float64 a) 371 { 372 return fpclassify(a) == FP_INFINITE; 373 } 374 375 INLINE float64 float64_is_neg(float64 a) 376 { 377 float64u u; 378 u.f = a; 379 return u.i >> 63; 380 } 381 382 INLINE float64 float64_is_zero(float64 a) 383 { 384 return fpclassify(a) == FP_ZERO; 385 } 386 334 387 INLINE float64 float64_scalbn(float64 a, int n) 335 388 { … … 407 460 int floatx80_compare_quiet( floatx80, floatx80 STATUS_PARAM ); 408 461 int floatx80_is_signaling_nan( floatx80 ); 462 int floatx80_is_nan( floatx80 ); 409 463 410 464 INLINE floatx80 floatx80_abs(floatx80 a) … … 418 472 } 419 473 474 INLINE floatx80 floatx80_is_infinity(floatx80 a) 475 { 476 return fpclassify(a) == FP_INFINITE; 477 } 478 479 INLINE floatx80 floatx80_is_neg(floatx80 a) 480 { 481 floatx80u u; 482 u.f = a; 483 return u.i.high >> 15; 484 } 485 486 INLINE floatx80 floatx80_is_zero(floatx80 a) 487 { 488 return fpclassify(a) == FP_ZERO; 489 } 490 420 491 INLINE floatx80 floatx80_scalbn(floatx80 a, int n) 421 492 { -
trunk/src/recompiler/fpu/softfloat-specialize.h
r36140 r36170 38 38 39 39 /*---------------------------------------------------------------------------- 40 | Underflow tininess-detection mode, statically initialized to default value.41 | (The declaration in `softfloat.h' must match the `int8' type here.)42 *----------------------------------------------------------------------------*/43 int8 float_detect_tininess = float_tininess_after_rounding;44 45 /*----------------------------------------------------------------------------46 40 | Raises the exceptions specified by `flags'. Floating-point traps can be 47 41 | defined here if desired. It is currently not possible for such a trap … … 68 62 #if defined(TARGET_SPARC) 69 63 #define float32_default_nan make_float32(0x7FFFFFFF) 70 #elif defined(TARGET_POWERPC) 64 #elif defined(TARGET_POWERPC) || defined(TARGET_ARM) 71 65 #define float32_default_nan make_float32(0x7FC00000) 72 66 #elif defined(TARGET_HPPA) … … 150 144 flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; 151 145 bits32 av, bv, res; 146 147 if ( STATUS(default_nan_mode) ) 148 return float32_default_nan; 152 149 153 150 aIsNaN = float32_is_nan( a ); … … 193 190 #if defined(TARGET_SPARC) 194 191 #define float64_default_nan make_float64(LIT64( 0x7FFFFFFFFFFFFFFF )) 195 #elif defined(TARGET_POWERPC) 192 #elif defined(TARGET_POWERPC) || defined(TARGET_ARM) 196 193 #define float64_default_nan make_float64(LIT64( 0x7FF8000000000000 )) 197 194 #elif defined(TARGET_HPPA) … … 282 279 flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; 283 280 bits64 av, bv, res; 281 282 if ( STATUS(default_nan_mode) ) 283 return float64_default_nan; 284 284 285 285 aIsNaN = float64_is_nan( a ); … … 419 419 flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; 420 420 421 if ( STATUS(default_nan_mode) ) { 422 a.low = floatx80_default_nan_low; 423 a.high = floatx80_default_nan_high; 424 return a; 425 } 426 421 427 aIsNaN = floatx80_is_nan( a ); 422 428 aIsSignalingNaN = floatx80_is_signaling_nan( a ); … … 539 545 flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN; 540 546 547 if ( STATUS(default_nan_mode) ) { 548 a.low = float128_default_nan_low; 549 a.high = float128_default_nan_high; 550 return a; 551 } 552 541 553 aIsNaN = float128_is_nan( a ); 542 554 aIsSignalingNaN = float128_is_signaling_nan( a ); -
trunk/src/recompiler/fpu/softfloat.c
r36140 r36170 31 31 =============================================================================*/ 32 32 33 /* FIXME: Flush-To-Zero only effects results. Denormal inputs should also 34 be flushed to zero. */ 33 35 #include "softfloat.h" 34 36 … … 295 297 } 296 298 if ( zExp < 0 ) { 299 if ( STATUS(flush_to_zero) ) return packFloat32( zSign, 0, 0 ); 297 300 isTiny = 298 301 ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) … … 458 461 } 459 462 if ( zExp < 0 ) { 463 if ( STATUS(flush_to_zero) ) return packFloat64( zSign, 0, 0 ); 460 464 isTiny = 461 465 ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) … … 636 640 } 637 641 if ( zExp <= 0 ) { 642 if ( STATUS(flush_to_zero) ) return packFloatx80( zSign, 0, 0 ); 638 643 isTiny = 639 644 ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) … … 966 971 } 967 972 if ( zExp < 0 ) { 973 if ( STATUS(flush_to_zero) ) return packFloat128( zSign, 0, 0, 0 ); 968 974 isTiny = 969 975 ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) … … 1638 1644 return a; 1639 1645 } 1640 if ( aExp == 0 ) return packFloat32( zSign, 0, ( aSig + bSig )>>6 ); 1646 if ( aExp == 0 ) { 1647 if ( STATUS(flush_to_zero) ) return packFloat32( zSign, 0, 0 ); 1648 return packFloat32( zSign, 0, ( aSig + bSig )>>6 ); 1649 } 1641 1650 zSig = 0x40000000 + aSig + bSig; 1642 1651 zExp = aExp; … … 2046 2055 return roundAndPackFloat32( 0, zExp, zSig STATUS_VAR ); 2047 2056 2057 } 2058 2059 /*---------------------------------------------------------------------------- 2060 | Returns the binary log of the single-precision floating-point value `a'. 2061 | The operation is performed according to the IEC/IEEE Standard for Binary 2062 | Floating-Point Arithmetic. 2063 *----------------------------------------------------------------------------*/ 2064 float32 float32_log2( float32 a STATUS_PARAM ) 2065 { 2066 flag aSign, zSign; 2067 int16 aExp; 2068 bits32 aSig, zSig, i; 2069 2070 aSig = extractFloat32Frac( a ); 2071 aExp = extractFloat32Exp( a ); 2072 aSign = extractFloat32Sign( a ); 2073 2074 if ( aExp == 0 ) { 2075 if ( aSig == 0 ) return packFloat32( 1, 0xFF, 0 ); 2076 normalizeFloat32Subnormal( aSig, &aExp, &aSig ); 2077 } 2078 if ( aSign ) { 2079 float_raise( float_flag_invalid STATUS_VAR); 2080 return float32_default_nan; 2081 } 2082 if ( aExp == 0xFF ) { 2083 if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR ); 2084 return a; 2085 } 2086 2087 aExp -= 0x7F; 2088 aSig |= 0x00800000; 2089 zSign = aExp < 0; 2090 zSig = aExp << 23; 2091 2092 for (i = 1 << 22; i > 0; i >>= 1) { 2093 aSig = ( (bits64)aSig * aSig ) >> 23; 2094 if ( aSig & 0x01000000 ) { 2095 aSig >>= 1; 2096 zSig |= i; 2097 } 2098 } 2099 2100 if ( zSign ) 2101 zSig = -zSig; 2102 2103 return normalizeRoundAndPackFloat32( zSign, 0x85, zSig STATUS_VAR ); 2048 2104 } 2049 2105 … … 2596 2652 return a; 2597 2653 } 2598 if ( aExp == 0 ) return packFloat64( zSign, 0, ( aSig + bSig )>>9 ); 2654 if ( aExp == 0 ) { 2655 if ( STATUS(flush_to_zero) ) return packFloat64( zSign, 0, 0 ); 2656 return packFloat64( zSign, 0, ( aSig + bSig )>>9 ); 2657 } 2599 2658 zSig = LIT64( 0x4000000000000000 ) + aSig + bSig; 2600 2659 zExp = aExp; … … 2992 3051 return roundAndPackFloat64( 0, zExp, zSig STATUS_VAR ); 2993 3052 3053 } 3054 3055 /*---------------------------------------------------------------------------- 3056 | Returns the binary log of the double-precision floating-point value `a'. 3057 | The operation is performed according to the IEC/IEEE Standard for Binary 3058 | Floating-Point Arithmetic. 3059 *----------------------------------------------------------------------------*/ 3060 float64 float64_log2( float64 a STATUS_PARAM ) 3061 { 3062 flag aSign, zSign; 3063 int16 aExp; 3064 bits64 aSig, aSig0, aSig1, zSig, i; 3065 3066 aSig = extractFloat64Frac( a ); 3067 aExp = extractFloat64Exp( a ); 3068 aSign = extractFloat64Sign( a ); 3069 3070 if ( aExp == 0 ) { 3071 if ( aSig == 0 ) return packFloat64( 1, 0x7FF, 0 ); 3072 normalizeFloat64Subnormal( aSig, &aExp, &aSig ); 3073 } 3074 if ( aSign ) { 3075 float_raise( float_flag_invalid STATUS_VAR); 3076 return float64_default_nan; 3077 } 3078 if ( aExp == 0x7FF ) { 3079 if ( aSig ) return propagateFloat64NaN( a, float64_zero STATUS_VAR ); 3080 return a; 3081 } 3082 3083 aExp -= 0x3FF; 3084 aSig |= LIT64( 0x0010000000000000 ); 3085 zSign = aExp < 0; 3086 zSig = (bits64)aExp << 52; 3087 for (i = 1LL << 51; i > 0; i >>= 1) { 3088 mul64To128( aSig, aSig, &aSig0, &aSig1 ); 3089 aSig = ( aSig0 << 12 ) | ( aSig1 >> 52 ); 3090 if ( aSig & LIT64( 0x0020000000000000 ) ) { 3091 aSig >>= 1; 3092 zSig |= i; 3093 } 3094 } 3095 3096 if ( zSign ) 3097 zSig = -zSig; 3098 return normalizeRoundAndPackFloat64( zSign, 0x408, zSig STATUS_VAR ); 2994 3099 } 2995 3100 … … 4598 4703 } 4599 4704 add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); 4600 if ( aExp == 0 ) return packFloat128( zSign, 0, zSig0, zSig1 ); 4705 if ( aExp == 0 ) { 4706 if ( STATUS(flush_to_zero) ) return packFloat128( zSign, 0, 0, 0 ); 4707 return packFloat128( zSign, 0, zSig0, zSig1 ); 4708 } 4601 4709 zSig2 = 0; 4602 4710 zSig0 |= LIT64( 0x0002000000000000 ); … … 5480 5588 return a; 5481 5589 } 5482 aExp += n; 5483 return roundAndPackFloat32( aSign, aExp, aSig STATUS_VAR ); 5590 if ( aExp != 0 ) 5591 aSig |= 0x00800000; 5592 else if ( aSig == 0 ) 5593 return a; 5594 5595 aExp += n - 1; 5596 aSig <<= 7; 5597 return normalizeRoundAndPackFloat32( aSign, aExp, aSig STATUS_VAR ); 5484 5598 } 5485 5599 … … 5497 5611 return a; 5498 5612 } 5499 aExp += n; 5500 return roundAndPackFloat64( aSign, aExp, aSig STATUS_VAR ); 5613 if ( aExp != 0 ) 5614 aSig |= LIT64( 0x0010000000000000 ); 5615 else if ( aSig == 0 ) 5616 return a; 5617 5618 aExp += n - 1; 5619 aSig <<= 10; 5620 return normalizeRoundAndPackFloat64( aSign, aExp, aSig STATUS_VAR ); 5501 5621 } 5502 5622 … … 5515 5635 return a; 5516 5636 } 5637 if (aExp == 0 && aSig == 0) 5638 return a; 5639 5517 5640 aExp += n; 5518 return roundAndPackFloatx80( STATUS(floatx80_rounding_precision),5519 aSign, aExp, aSig, 0 STATUS_VAR );5641 return normalizeRoundAndPackFloatx80( STATUS(floatx80_rounding_precision), 5642 aSign, aExp, aSig, 0 STATUS_VAR ); 5520 5643 } 5521 5644 #endif … … 5535 5658 return a; 5536 5659 } 5537 aExp += n; 5538 return roundAndPackFloat128( aSign, aExp, aSig0, aSig1, 0 STATUS_VAR ); 5660 if ( aExp != 0 ) 5661 aSig0 |= LIT64( 0x0001000000000000 ); 5662 else if ( aSig0 == 0 && aSig1 == 0 ) 5663 return a; 5664 5665 aExp += n - 1; 5666 return normalizeRoundAndPackFloat128( aSign, aExp, aSig0, aSig1 5667 STATUS_VAR ); 5539 5668 5540 5669 } -
trunk/src/recompiler/fpu/softfloat.h
r36140 r36170 55 55 typedef uint8_t uint8; 56 56 typedef int8_t int8; 57 #ifndef _AIX 57 58 typedef int uint16; 58 59 typedef int int16; 60 #endif 59 61 typedef unsigned int uint32; 60 62 typedef signed int int32; … … 197 199 signed char floatx80_rounding_precision; 198 200 #endif 201 flag flush_to_zero; 202 flag default_nan_mode; 199 203 } float_status; 200 204 201 205 void set_float_rounding_mode(int val STATUS_PARAM); 202 206 void set_float_exception_flags(int val STATUS_PARAM); 207 INLINE void set_flush_to_zero(flag val STATUS_PARAM) 208 { 209 STATUS(flush_to_zero) = val; 210 } 211 INLINE void set_default_nan_mode(flag val STATUS_PARAM) 212 { 213 STATUS(default_nan_mode) = val; 214 } 203 215 INLINE int get_float_exception_flags(float_status *status) 204 216 { … … 266 278 float32 float32_rem( float32, float32 STATUS_PARAM ); 267 279 float32 float32_sqrt( float32 STATUS_PARAM ); 280 float32 float32_log2( float32 STATUS_PARAM ); 268 281 int float32_eq( float32, float32 STATUS_PARAM ); 269 282 int float32_le( float32, float32 STATUS_PARAM ); … … 288 301 } 289 302 303 INLINE int float32_is_infinity(float32 a) 304 { 305 return (float32_val(a) & 0x7fffffff) == 0x7f800000; 306 } 307 308 INLINE int float32_is_neg(float32 a) 309 { 310 return float32_val(a) >> 31; 311 } 312 313 INLINE int float32_is_zero(float32 a) 314 { 315 return (float32_val(a) & 0x7fffffff) == 0; 316 } 317 290 318 #define float32_zero make_float32(0) 319 #define float32_one make_float32(0x3f800000) 291 320 292 321 /*---------------------------------------------------------------------------- … … 320 349 float64 float64_rem( float64, float64 STATUS_PARAM ); 321 350 float64 float64_sqrt( float64 STATUS_PARAM ); 351 float64 float64_log2( float64 STATUS_PARAM ); 322 352 int float64_eq( float64, float64 STATUS_PARAM ); 323 353 int float64_le( float64, float64 STATUS_PARAM ); … … 342 372 } 343 373 374 INLINE int float64_is_infinity(float64 a) 375 { 376 return (float64_val(a) & 0x7fffffffffffffffLL ) == 0x7ff0000000000000LL; 377 } 378 379 INLINE int float64_is_neg(float64 a) 380 { 381 return float64_val(a) >> 63; 382 } 383 384 INLINE int float64_is_zero(float64 a) 385 { 386 return (float64_val(a) & 0x7fffffffffffffffLL) == 0; 387 } 388 344 389 #define float64_zero make_float64(0) 390 #define float64_one make_float64(0x3ff0000000000000LL) 345 391 346 392 #ifdef FLOATX80 … … 389 435 a.high ^= 0x8000; 390 436 return a; 437 } 438 439 INLINE int floatx80_is_infinity(floatx80 a) 440 { 441 return (a.high & 0x7fff) == 0x7fff && a.low == 0; 442 } 443 444 INLINE int floatx80_is_neg(floatx80 a) 445 { 446 return a.high >> 15; 447 } 448 449 INLINE int floatx80_is_zero(floatx80 a) 450 { 451 return (a.high & 0x7fff) == 0 && a.low == 0; 391 452 } 392 453 … … 442 503 } 443 504 505 INLINE int float128_is_infinity(float128 a) 506 { 507 return (a.high & 0x7fffffffffffffffLL) == 0x7fff000000000000LL && a.low == 0; 508 } 509 510 INLINE int float128_is_neg(float128 a) 511 { 512 return a.high >> 63; 513 } 514 515 INLINE int float128_is_zero(float128 a) 516 { 517 return (a.high & 0x7fffffffffffffffLL) == 0 && a.low == 0; 518 } 519 444 520 #endif 445 521 -
trunk/src/recompiler/gen-icount.h
r36125 r36170 6 6 static inline void gen_icount_start(void) 7 7 { 8 TCGv count;8 TCGv_i32 count; 9 9 10 10 if (!use_icount) … … 16 16 we allow the target to supply a convenient register temporary. */ 17 17 #ifndef ICOUNT_TEMP 18 count = tcg_temp_local_new (TCG_TYPE_I32);18 count = tcg_temp_local_new_i32(); 19 19 #else 20 20 count = ICOUNT_TEMP; … … 28 28 tcg_gen_st16_i32(count, cpu_env, offsetof(CPUState, icount_decr.u16.low)); 29 29 #ifndef ICOUNT_TEMP 30 tcg_temp_free (count);30 tcg_temp_free_i32(count); 31 31 #endif 32 32 } … … 41 41 } 42 42 43 inline staticvoid gen_io_start(void)43 static inline void gen_io_start(void) 44 44 { 45 TCGv tmp = tcg_const_i32(1);45 TCGv_i32 tmp = tcg_const_i32(1); 46 46 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, can_do_io)); 47 tcg_temp_free (tmp);47 tcg_temp_free_i32(tmp); 48 48 } 49 49 50 50 static inline void gen_io_end(void) 51 51 { 52 TCGv tmp = tcg_const_i32(0);52 TCGv_i32 tmp = tcg_const_i32(0); 53 53 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, can_do_io)); 54 tcg_temp_free (tmp);54 tcg_temp_free_i32(tmp); 55 55 } 56 -
trunk/src/recompiler/hostregs_helper.h
r36140 r36170 1 1 /* 2 * Save/restore host regist ers.2 * Save/restore host registrs. 3 3 * 4 4 * Copyright (c) 2007 CodeSourcery … … 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 28 28 */ 29 29 30 /* The GCC global register va irable extension is used to reserve some31 host registers for use by dyngen. However only the core parts of the32 t ranslation engine are compiled with these settings. We must manually30 /* The GCC global register variable extension is used to reserve some 31 host registers for use by generated code. However only the core parts of 32 the translation engine are compiled with these settings. We must manually 33 33 save/restore these registers when called from regular code. 34 34 It is not sufficient to save/restore T0 et. al. as these may be declared -
trunk/src/recompiler/osdep.h
r36140 r36170 87 87 #define likely(x) __builtin_expect(!!(x), 1) 88 88 #define unlikely(x) __builtin_expect(!!(x), 0) 89 #endif /* !likely */89 #endif 90 90 91 91 #ifndef offsetof … … 114 114 #else 115 115 #define always_inline __attribute__ (( always_inline )) __inline__ 116 #ifdef __OPTIMIZE__ 116 117 #define inline always_inline 118 #endif 117 119 #endif 118 120 #else … … 130 132 #endif 131 133 132 #if defined (__GNUC__) && defined (__GNUC_MINOR_ )134 #if defined (__GNUC__) && defined (__GNUC_MINOR__) 133 135 # define QEMU_GNUC_PREREQ(maj, min) \ 134 136 ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) … … 156 158 #define qemu_gettimeofday(tp) gettimeofday(tp, NULL); 157 159 #endif /* !_WIN32 */ 158 #endif /* !VBOX */ 159 160 #ifdef VBOX 161 /** @todo why don't we go with dyngen-exec.h here? */ 162 #define FORCE_RET() ; 160 #else /* VBOX */ 161 # define qemu_memalign(alignment, size) ( (alignment) <= PAGE_SIZE ? RTMemPageAlloc((size)) : NULL ) 163 162 #endif /* VBOX */ 164 163 -
trunk/src/recompiler/qemu-common.h
r36125 r36170 7 7 # include <string.h> 8 8 # include <inttypes.h> 9 # include <iprt/ctype.h> 10 11 #define QEMU_NORETURN __attribute__ ((__noreturn__)) 9 12 10 13 void pstrcpy(char *buf, int buf_size, const char *str); … … 12 15 # define snprintf RTStrPrintf 13 16 17 #define qemu_isalnum(c) RT_C_IS_ALNUM((unsigned char)(c)) 18 #define qemu_isalpha(c) RT_C_IS_ALPHA((unsigned char)(c)) 19 #define qemu_iscntrl(c) RT_C_IS_CNTRL((unsigned char)(c)) 20 #define qemu_isdigit(c) RT_C_IS_DIGIT((unsigned char)(c)) 21 #define qemu_isgraph(c) RT_C_IS_GRAPH((unsigned char)(c)) 22 #define qemu_islower(c) RT_C_IS_LOWER((unsigned char)(c)) 23 #define qemu_isprint(c) RT_C_IS_PRINT((unsigned char)(c)) 24 #define qemu_ispunct(c) RT_C_IS_PUNCT((unsigned char)(c)) 25 #define qemu_isspace(c) RT_C_IS_SPACE((unsigned char)(c)) 26 #define qemu_isupper(c) RT_C_IS_UPPER((unsigned char)(c)) 27 #define qemu_isxdigit(c) RT_C_IS_XDIGIT((unsigned char)(c)) 28 #define qemu_tolower(c) RT_C_TO_LOWER((unsigned char)(c)) 29 #define qemu_toupper(c) RT_C_TO_UPPER((unsigned char)(c)) 30 #define qemu_isascii(c) RT_C_IS_ASCII((unsigned char)(c)) 31 #define qemu_toascii(c) RT_C_TO_ASCII((unsigned char)(c)) 32 14 33 #else /* !VBOX */ 34 #ifdef _WIN32 35 #define WIN32_LEAN_AND_MEAN 36 #define WINVER 0x0501 /* needed for ipv6 bits */ 37 #include <windows.h> 38 #endif 39 40 #define QEMU_NORETURN __attribute__ ((__noreturn__)) 41 42 /* Hack around the mess dyngen-exec.h causes: We need QEMU_NORETURN in files that 43 cannot include the following headers without conflicts. This condition has 44 to be removed once dyngen is gone. */ 45 #ifndef __DYNGEN_EXEC_H__ 46 15 47 /* we put basic includes here to avoid repeating them in device drivers */ 16 48 #include <stdlib.h> … … 18 50 #include <stdarg.h> 19 51 #include <string.h> 52 #include <strings.h> 20 53 #include <inttypes.h> 21 54 #include <limits.h> … … 26 59 #include <fcntl.h> 27 60 #include <sys/stat.h> 61 #include "config-host.h" 28 62 29 63 #ifndef O_LARGEFILE … … 38 72 #endif 39 73 74 #ifndef HAVE_IOVEC 75 #define HAVE_IOVEC 76 struct iovec { 77 void *iov_base; 78 size_t iov_len; 79 }; 80 #else 81 #include <sys/uio.h> 82 #endif 83 40 84 #ifdef _WIN32 41 #define WIN32_LEAN_AND_MEAN42 #include <windows.h>43 85 #define fsync _commit 44 86 #define lseek _lseeki64 … … 63 105 #ifndef NEED_CPU_H 64 106 65 #include "config-host.h"66 107 #include <setjmp.h> 67 108 #include "osdep.h" … … 81 122 QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque); 82 123 void qemu_bh_schedule(QEMUBH *bh); 124 /* Bottom halfs that are scheduled from a bottom half handler are instantly 125 * invoked. This can create an infinite loop if a bottom half handler 126 * schedules itself. qemu_bh_schedule_idle() avoids this infinite loop by 127 * ensuring that the bottom half isn't executed until the next main loop 128 * iteration. 129 */ 130 void qemu_bh_schedule_idle(QEMUBH *bh); 83 131 void qemu_bh_cancel(QEMUBH *bh); 84 132 void qemu_bh_delete(QEMUBH *bh); … … 96 144 int stristart(const char *str, const char *val, const char **ptr); 97 145 time_t mktimegm(struct tm *tm); 146 int qemu_fls(int i); 147 148 #define qemu_isalnum(c) isalnum((unsigned char)(c)) 149 #define qemu_isalpha(c) isalpha((unsigned char)(c)) 150 #define qemu_iscntrl(c) iscntrl((unsigned char)(c)) 151 #define qemu_isdigit(c) isdigit((unsigned char)(c)) 152 #define qemu_isgraph(c) isgraph((unsigned char)(c)) 153 #define qemu_islower(c) islower((unsigned char)(c)) 154 #define qemu_isprint(c) isprint((unsigned char)(c)) 155 #define qemu_ispunct(c) ispunct((unsigned char)(c)) 156 #define qemu_isspace(c) isspace((unsigned char)(c)) 157 #define qemu_isupper(c) isupper((unsigned char)(c)) 158 #define qemu_isxdigit(c) isxdigit((unsigned char)(c)) 159 #define qemu_tolower(c) tolower((unsigned char)(c)) 160 #define qemu_toupper(c) toupper((unsigned char)(c)) 161 #define qemu_isascii(c) isascii((unsigned char)(c)) 162 #define qemu_toascii(c) toascii((unsigned char)(c)) 98 163 99 164 void *qemu_malloc(size_t size); … … 102 167 void qemu_free(void *ptr); 103 168 char *qemu_strdup(const char *str); 169 char *qemu_strndup(const char *str, size_t size); 104 170 105 171 void *get_mmap_addr(unsigned long size); … … 108 174 /* Error handling. */ 109 175 110 void hw_error(const char *fmt, ...) 111 __attribute__ ((__format__ (__printf__, 1, 2))) 112 __attribute__ ((__noreturn__)); 176 void QEMU_NORETURN hw_error(const char *fmt, ...) 177 __attribute__ ((__format__ (__printf__, 1, 2))); 113 178 114 179 /* IO callbacks. */ … … 131 196 typedef struct BlockDriverState BlockDriverState; 132 197 typedef struct DisplayState DisplayState; 198 typedef struct DisplayChangeListener DisplayChangeListener; 199 typedef struct DisplaySurface DisplaySurface; 200 typedef struct PixelFormat PixelFormat; 133 201 typedef struct TextConsole TextConsole; 134 202 typedef TextConsole QEMUConsole; … … 152 220 /* Force QEMU to stop what it's doing and service IO */ 153 221 void qemu_service_io(void); 222 223 typedef struct QEMUIOVector { 224 struct iovec *iov; 225 int niov; 226 int nalloc; 227 size_t size; 228 } QEMUIOVector; 229 230 void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint); 231 void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len); 232 void qemu_iovec_destroy(QEMUIOVector *qiov); 233 void qemu_iovec_reset(QEMUIOVector *qiov); 234 void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf); 235 void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count); 236 237 #endif /* dyngen-exec.h hack */ 238 154 239 #endif /* !VBOX */ 155 240 -
trunk/src/recompiler/qemu-lock.h
r36140 r36170 14 14 * You should have received a copy of the GNU Lesser General Public 15 15 * License along with this library; if not, write to the Free Software 16 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 17 */ 18 18 … … 73 73 return ASMAtomicCmpXchgU32((volatile uint32_t *)p, 1, 0) ? 0 : 1; 74 74 } 75 #elif defined(_ _powerpc__)75 #elif defined(_ARCH_PPC) 76 76 static inline int testandset (int *p) 77 77 { 78 78 int ret; 79 79 __asm__ __volatile__ ( 80 " 0:lwarx %0,0,%1\n"80 " lwarx %0,0,%1\n" 81 81 " xor. %0,%3,%0\n" 82 " bne 1f\n"82 " bne $+12\n" 83 83 " stwcx. %2,0,%1\n" 84 " bne- 0b\n" 85 "1: " 84 " bne- $-16\n" 86 85 : "=&r" (ret) 87 86 : "r" (p), "r" (1), "r" (0) -
trunk/src/recompiler/qemu-log.h
r29520 r36170 2 2 #define QEMU_LOG_H 3 3 4 /* The deprecated global variables: */ 4 5 extern FILE *logfile; 5 6 extern int loglevel; 6 7 8 9 /* 10 * The new API: 11 * 12 */ 13 14 /* Log settings checking macros: */ 15 16 /* Returns true if qemu_log() will really write somewhere 17 */ 18 #define qemu_log_enabled() (logfile != NULL) 19 20 /* Returns true if a bit is set in the current loglevel mask 21 */ 22 #define qemu_loglevel_mask(b) ((loglevel & (b)) != 0) 23 24 25 /* Logging functions: */ 26 27 /* main logging function 28 */ 29 #define qemu_log(...) do { \ 30 if (logfile) \ 31 fprintf(logfile, ## __VA_ARGS__); \ 32 } while (0) 33 34 /* vfprintf-like logging function 35 */ 36 #define qemu_log_vprintf(fmt, va) do { \ 37 if (logfile) \ 38 vfprintf(logfile, fmt, va); \ 39 } while (0) 40 41 /* log only if a bit is set on the current loglevel mask 42 */ 43 #define qemu_log_mask(b, ...) do { \ 44 if (loglevel & (b)) \ 45 fprintf(logfile, ## __VA_ARGS__); \ 46 } while (0) 47 48 49 50 51 /* Special cases: */ 52 53 /* cpu_dump_state() logging functions: */ 54 #define log_cpu_state(env, f) cpu_dump_state((env), logfile, fprintf, (f)); 55 #define log_cpu_state_mask(b, env, f) do { \ 56 if (loglevel & (b)) log_cpu_state((env), (f)); \ 57 } while (0) 58 59 /* disas() and target_disas() to logfile: */ 60 #define log_target_disas(start, len, flags) \ 61 target_disas(logfile, (start), (len), (flags)) 62 #define log_disas(start, len) \ 63 disas(logfile, (start), (len)) 64 65 /* page_dump() output to the log file: */ 66 #define log_page_dump() page_dump(logfile) 67 68 69 70 /* Maintenance: */ 71 72 /* fflush() the log file */ 73 #define qemu_log_flush() fflush(logfile) 74 75 /* Close the log file */ 76 #define qemu_log_close() do { \ 77 fclose(logfile); \ 78 logfile = NULL; \ 79 } while (0) 80 81 /* Set up a new log file */ 82 #define qemu_log_set_file(f) do { \ 83 logfile = (f); \ 84 } while (0) 85 86 /* Set up a new log file, only if none is set */ 87 #define qemu_log_try_set_file(f) do { \ 88 if (!logfile) \ 89 logfile = (f); \ 90 } while (0) 91 92 7 93 #endif -
trunk/src/recompiler/softmmu_exec.h
r36140 r36170 1 1 /* Common softmmu definitions and inline routines. */ 2 2 3 /* XXX: find something cleaner. 4 * Furthermore, this is false for 64 bits targets 5 */ 3 6 #define ldul_user ldl_user 4 7 #define ldul_kernel ldl_kernel -
trunk/src/recompiler/softmmu_header.h
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 -
trunk/src/recompiler/softmmu_template.h
r36125 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 85 85 } 86 86 87 env->mem_io_vaddr = addr; 87 88 #if SHIFT <= 2 88 89 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr); -
trunk/src/recompiler/target-i386/cpu.h
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 179 179 #define HF_EM_MASK (1 << HF_EM_SHIFT) 180 180 #define HF_TS_MASK (1 << HF_TS_SHIFT) 181 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) 181 182 #define HF_LMA_MASK (1 << HF_LMA_SHIFT) 182 183 #define HF_CS64_MASK (1 << HF_CS64_SHIFT) 183 184 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) 185 #define HF_VM_MASK (1 << HF_VM_SHIFT) 184 186 #define HF_SMM_MASK (1 << HF_SMM_SHIFT) 185 187 #define HF_SVME_MASK (1 << HF_SVME_SHIFT) … … 197 199 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) 198 200 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) 201 202 #define CR0_PE_SHIFT 0 203 #define CR0_MP_SHIFT 1 199 204 200 205 #define CR0_PE_MASK (1 << 0) … … 216 221 #define CR4_PGE_MASK (1 << 7) 217 222 #define CR4_PCE_MASK (1 << 8) 218 #define CR4_OSFXSR_MASK (1 << 9) 223 #define CR4_OSFXSR_SHIFT 9 224 #define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT) 219 225 #define CR4_OSXMMEXCPT_MASK (1 << 10) 226 227 #define DR6_BD (1 << 13) 228 #define DR6_BS (1 << 14) 229 #define DR6_BT (1 << 15) 230 #define DR6_FIXED_1 0xffff0ff0 231 232 #define DR7_GD (1 << 13) 233 #define DR7_TYPE_SHIFT 16 234 #define DR7_LEN_SHIFT 18 235 #define DR7_FIXED_1 0x00000400 220 236 221 237 #define PG_PRESENT_BIT 0 … … 249 265 #define PG_ERROR_I_D_MASK 0x10 250 266 267 #define MSR_IA32_TSC 0x10 251 268 #define MSR_IA32_APICBASE 0x1b 252 269 #define MSR_IA32_APICBASE_BSP (1<<8) … … 254 271 #define MSR_IA32_APICBASE_BASE (0xfffff<<12) 255 272 273 #define MSR_MTRRcap 0xfe 274 #define MSR_MTRRcap_VCNT 8 275 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) 276 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) 277 256 278 #define MSR_IA32_SYSENTER_CS 0x174 257 279 #define MSR_IA32_SYSENTER_ESP 0x175 … … 264 286 #define MSR_IA32_PERF_STATUS 0x198 265 287 288 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) 289 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) 290 291 #define MSR_MTRRfix64K_00000 0x250 292 #define MSR_MTRRfix16K_80000 0x258 293 #define MSR_MTRRfix16K_A0000 0x259 294 #define MSR_MTRRfix4K_C0000 0x268 295 #define MSR_MTRRfix4K_C8000 0x269 296 #define MSR_MTRRfix4K_D0000 0x26a 297 #define MSR_MTRRfix4K_D8000 0x26b 298 #define MSR_MTRRfix4K_E0000 0x26c 299 #define MSR_MTRRfix4K_E8000 0x26d 300 #define MSR_MTRRfix4K_F0000 0x26e 301 #define MSR_MTRRfix4K_F8000 0x26f 302 266 303 #define MSR_PAT 0x277 304 305 #define MSR_MTRRdefType 0x2ff 267 306 268 307 #define MSR_EFER 0xc0000080 … … 380 419 381 420 #define EXCP00_DIVZ 0 382 #define EXCP01_ SSTP1421 #define EXCP01_DB 1 383 422 #define EXCP02_NMI 2 384 423 #define EXCP03_INT3 3 … … 618 657 #endif 619 658 659 uint64_t tsc; 660 620 661 uint64_t pat; 621 662 … … 625 666 target_ulong exception_next_eip; 626 667 target_ulong dr[8]; /* debug registers */ 668 union { 669 CPUBreakpoint *cpu_breakpoint[4]; 670 CPUWatchpoint *cpu_watchpoint[4]; 671 }; /* break/watchpoints for dr[0..3] */ 627 672 uint32_t smbase; 628 673 int old_exception; /* exception in flight */ … … 662 707 663 708 #ifndef VBOX 709 /* MTRRs */ 710 uint64_t mtrr_fixed[11]; 711 uint64_t mtrr_deftype; 712 struct { 713 uint64_t base; 714 uint64_t mask; 715 } mtrr_var[8]; 716 664 717 #ifdef USE_KQEMU 665 718 int kqemu_enabled; 666 719 int last_io_time; 667 720 #endif 721 722 /* For KVM */ 723 uint64_t interrupt_bitmap[256 / 64]; 724 668 725 /* in order to simplify APIC support, we leave this pointer to the 669 726 user */ … … 874 931 } 875 932 933 /* op_helper.c */ 876 934 /* used for debug or cpu save/restore */ 877 935 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f); 878 936 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper); 879 937 938 /* cpu-exec.c */ 880 939 /* the following helpers are only usable in user mode simulation as 881 940 they can trigger unexpected exceptions */ … … 889 948 int cpu_x86_signal_handler(int host_signum, void *pinfo, 890 949 void *puc); 950 951 /* helper.c */ 952 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 953 int is_write, int mmu_idx, int is_softmmu); 891 954 void cpu_x86_set_a20(CPUX86State *env, int a20_state); 892 893 uint64_t cpu_get_tsc(CPUX86State *env); 894 955 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 956 uint32_t *eax, uint32_t *ebx, 957 uint32_t *ecx, uint32_t *edx); 958 959 static inline int hw_breakpoint_enabled(unsigned long dr7, int index) 960 { 961 return (dr7 >> (index * 2)) & 3; 962 } 963 964 static inline int hw_breakpoint_type(unsigned long dr7, int index) 965 { 966 return (dr7 >> (DR7_TYPE_SHIFT + (index * 2))) & 3; 967 } 968 969 static inline int hw_breakpoint_len(unsigned long dr7, int index) 970 { 971 int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 2))) & 3); 972 return (len == 2) ? 8 : len + 1; 973 } 974 975 void hw_breakpoint_insert(CPUX86State *env, int index); 976 void hw_breakpoint_remove(CPUX86State *env, int index); 977 int check_hw_breakpoints(CPUX86State *env, int force_dr6_update); 978 979 /* will be suppressed */ 980 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); 981 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 982 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 983 984 /* hw/apic.c */ 895 985 void cpu_set_apic_base(CPUX86State *env, uint64_t val); 896 986 uint64_t cpu_get_apic_base(CPUX86State *env); … … 899 989 uint8_t cpu_get_apic_tpr(CPUX86State *env); 900 990 #endif 901 #ifdef VBOX 902 int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue); 903 int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue); 904 #endif 991 992 /* hw/pc.c */ 905 993 void cpu_smm_update(CPUX86State *env); 906 907 /* will be suppressed */ 908 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); 994 uint64_t cpu_get_tsc(CPUX86State *env); 909 995 910 996 /* used to debug */ … … 922 1008 923 1009 #ifdef VBOX 1010 int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue); 1011 int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue); 924 1012 void cpu_trap_raw(CPUX86State *env1); 925 1013 … … 948 1036 #define cpu_list x86_cpu_list 949 1037 950 #define CPU_SAVE_VERSION 71038 #define CPU_SAVE_VERSION 8 951 1039 952 1040 /* MMU modes definitions */ … … 959 1047 } 960 1048 1049 /* translate.c */ 961 1050 void optimize_flags_init(void); 962 1051 … … 965 1054 int (*compute_c)(void); /* return the C flag */ 966 1055 } CCTable; 967 968 extern CCTable cc_table[];969 1056 970 1057 #if defined(CONFIG_USER_ONLY) … … 977 1064 #endif 978 1065 979 #define CPU_PC_FROM_TB(env, tb) env->eip = tb->pc - tb->cs_base980 981 1066 #include "cpu-all.h" 1067 #include "exec-all.h" 982 1068 983 1069 #include "svm.h" 984 1070 1071 static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb) 1072 { 1073 env->eip = tb->pc - tb->cs_base; 1074 } 1075 1076 static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc, 1077 target_ulong *cs_base, int *flags) 1078 { 1079 *cs_base = env->segs[R_CS].base; 1080 *pc = *cs_base + env->eip; 1081 *flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 1082 } 1083 985 1084 #endif /* CPU_I386_H */ -
trunk/src/recompiler/target-i386/exec.h
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 42 42 register struct CPUX86State *env asm(AREG0); 43 43 44 #include "qemu-common.h" 44 45 #include "qemu-log.h" 45 46 … … 68 69 #include "exec-all.h" 69 70 70 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 71 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 72 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 73 int is_write, int mmu_idx, int is_softmmu); 74 void __hidden cpu_lock(void); 75 void __hidden cpu_unlock(void); 71 /* op_helper.c */ 76 72 void do_interrupt(int intno, int is_int, int error_code, 77 73 target_ulong next_eip, int is_hw); 78 74 void do_interrupt_user(int intno, int is_int, int error_code, 79 75 target_ulong next_eip); 80 void raise_interrupt(int intno, int is_int, int error_code, 81 int next_eip_addend); 82 void raise_exception_err(int exception_index, int error_code); 83 void raise_exception(int exception_index); 76 void QEMU_NORETURN raise_exception_err(int exception_index, int error_code); 77 void QEMU_NORETURN raise_exception(int exception_index); 84 78 void do_smm_enter(void); 85 void __hidden cpu_loop_exit(void);86 87 void OPPROTO op_movl_eflags_T0(void);88 void OPPROTO op_movl_T0_eflags(void);89 #ifdef VBOX90 void OPPROTO op_movl_T0_eflags_vme(void);91 void OPPROTO op_movw_eflags_T0_vme(void);92 void OPPROTO op_cli_vme(void);93 void OPPROTO op_sti_vme(void);94 #endif95 79 96 80 /* n must be a constant to be efficient */ … … 331 315 #define FPUC_EM 0x3f 332 316 333 extern const CPU86_LDouble f15rk[7];334 335 void fpu_raise_exception(void);336 void restore_native_fp_state(CPUState *env);337 void save_native_fp_state(CPUState *env);338 339 extern const uint8_t parity_table[256];340 extern const uint8_t rclw_table[32];341 extern const uint8_t rclb_table[32];342 343 317 static inline uint32_t compute_eflags(void) 344 318 { 345 return env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);319 return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 346 320 } 347 321 -
trunk/src/recompiler/target-i386/helper.c
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 33 33 #include <string.h> 34 34 #ifndef VBOX 35 # 36 # 37 # 38 #endif 35 #include <inttypes.h> 36 #include <signal.h> 37 #include <assert.h> 38 #endif /* !VBOX */ 39 39 40 40 #include "cpu.h" 41 41 #include "exec-all.h" 42 #include "svm.h"43 42 #include "qemu-common.h" 43 #include "kvm.h" 44 44 45 45 //#define DEBUG_MMU 46 47 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);48 46 49 47 #ifndef VBOX … … 71 69 static const char *ext2_feature_name[] = { 72 70 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 73 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mt tr", "pge", "mca", "cmov",71 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov", 74 72 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx", 75 73 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow", … … 105 103 } 106 104 #endif /* !VBOX */ 107 108 #ifndef VBOX109 CPUX86State *cpu_x86_init(const char *cpu_model)110 #else111 CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)112 #endif113 {114 #ifndef VBOX115 CPUX86State *env;116 #endif117 static int inited;118 119 #ifndef VBOX120 env = qemu_mallocz(sizeof(CPUX86State));121 if (!env)122 return NULL;123 #endif124 cpu_exec_init(env);125 env->cpu_model_str = cpu_model;126 127 /* init various static tables */128 if (!inited) {129 inited = 1;130 optimize_flags_init();131 }132 if (cpu_x86_register(env, cpu_model) < 0) {133 cpu_x86_close(env);134 return NULL;135 }136 cpu_reset(env);137 #ifdef USE_KQEMU138 kqemu_init(env);139 #endif140 return env;141 }142 105 143 106 typedef struct x86_def_t { … … 188 151 .xlevel = 0x8000000A, 189 152 .model_id = "QEMU Virtual CPU version " QEMU_VERSION, 153 }, 154 { 155 .name = "phenom", 156 .level = 5, 157 .vendor1 = CPUID_VENDOR_AMD_1, 158 .vendor2 = CPUID_VENDOR_AMD_2, 159 .vendor3 = CPUID_VENDOR_AMD_3, 160 .family = 16, 161 .model = 2, 162 .stepping = 3, 163 /* Missing: CPUID_VME, CPUID_HT */ 164 .features = PPRO_FEATURES | 165 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 166 CPUID_PSE36, 167 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */ 168 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 169 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 170 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 171 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 172 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 173 CPUID_EXT2_FFXSR, 174 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 175 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 176 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 177 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 178 .ext3_features = CPUID_EXT3_SVM, 179 .xlevel = 0x8000001A, 180 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 190 181 }, 191 182 { … … 305 296 /* Some CPUs got no CPUID_SEP */ 306 297 .ext_features = CPUID_EXT_MONITOR | 307 CPUID_EXT_SSE3 /* PNI */ ,CPUID_EXT_SSSE3,298 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3, 308 299 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST | 309 300 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */ … … 327 318 328 319 def = NULL; 329 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {320 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) { 330 321 if (strcmp(name, x86_defs[i].name) == 0) { 331 322 def = &x86_defs[i]; … … 358 349 char *err; 359 350 model = strtol(val, &err, 10); 360 if (!*val || *err || model < 0 || model > 0xf ) {351 if (!*val || *err || model < 0 || model > 0xff) { 361 352 fprintf(stderr, "bad numerical value %s\n", val); 362 353 goto error; … … 417 408 unsigned int i; 418 409 419 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)410 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) 420 411 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name); 421 412 } … … 439 430 } 440 431 env->cpuid_level = def->level; 441 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping; 432 if (def->family > 0x0f) 433 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20); 434 else 435 env->cpuid_version = def->family << 8; 436 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16); 437 env->cpuid_version |= def->stepping; 442 438 env->cpuid_features = def->features; 443 439 env->pat = 0x0007040600070406ULL; … … 469 465 int i; 470 466 467 if (qemu_loglevel_mask(CPU_LOG_RESET)) { 468 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index); 469 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP); 470 } 471 471 472 memset(env, 0, offsetof(CPUX86State, breakpoints)); 472 473 … … 522 523 523 524 env->mxcsr = 0x1f80; 525 526 memset(env->dr, 0, sizeof(env->dr)); 527 env->dr[6] = DR6_FIXED_1; 528 env->dr[7] = DR7_FIXED_1; 529 cpu_breakpoint_remove_all(env, BP_CPU); 530 cpu_watchpoint_remove_all(env, BP_CPU); 524 531 } 525 532 … … 596 603 char cc_op_name[32]; 597 604 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; 605 606 if (kvm_enabled()) 607 kvm_arch_get_registers(env); 598 608 599 609 eflags = env->eflags; … … 694 704 env->cr[3], 695 705 (uint32_t)env->cr[4]); 706 for(i = 0; i < 4; i++) 707 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]); 708 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n", 709 env->dr[6], env->dr[7]); 696 710 } else 697 711 #endif … … 725 739 (uint32_t)env->cr[3], 726 740 (uint32_t)env->cr[4]); 741 for(i = 0; i < 4; i++) 742 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]); 743 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]); 727 744 } 728 745 if (flags & X86_DUMP_CCOP) { … … 899 916 } 900 917 901 /* XXX: also flush 4MB pages */902 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)903 {904 tlb_flush_page(env, addr);905 }906 907 918 #if defined(CONFIG_USER_ONLY) 908 919 … … 1327 1338 return paddr; 1328 1339 } 1340 1341 void hw_breakpoint_insert(CPUState *env, int index) 1342 { 1343 int type, err = 0; 1344 1345 switch (hw_breakpoint_type(env->dr[7], index)) { 1346 case 0: 1347 if (hw_breakpoint_enabled(env->dr[7], index)) 1348 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU, 1349 &env->cpu_breakpoint[index]); 1350 break; 1351 case 1: 1352 type = BP_CPU | BP_MEM_WRITE; 1353 goto insert_wp; 1354 case 2: 1355 /* No support for I/O watchpoints yet */ 1356 break; 1357 case 3: 1358 type = BP_CPU | BP_MEM_ACCESS; 1359 insert_wp: 1360 err = cpu_watchpoint_insert(env, env->dr[index], 1361 hw_breakpoint_len(env->dr[7], index), 1362 type, &env->cpu_watchpoint[index]); 1363 break; 1364 } 1365 if (err) 1366 env->cpu_breakpoint[index] = NULL; 1367 } 1368 1369 void hw_breakpoint_remove(CPUState *env, int index) 1370 { 1371 if (!env->cpu_breakpoint[index]) 1372 return; 1373 switch (hw_breakpoint_type(env->dr[7], index)) { 1374 case 0: 1375 if (hw_breakpoint_enabled(env->dr[7], index)) 1376 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]); 1377 break; 1378 case 1: 1379 case 3: 1380 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]); 1381 break; 1382 case 2: 1383 /* No support for I/O watchpoints yet */ 1384 break; 1385 } 1386 } 1387 1388 int check_hw_breakpoints(CPUState *env, int force_dr6_update) 1389 { 1390 target_ulong dr6; 1391 int reg, type; 1392 int hit_enabled = 0; 1393 1394 dr6 = env->dr[6] & ~0xf; 1395 for (reg = 0; reg < 4; reg++) { 1396 type = hw_breakpoint_type(env->dr[7], reg); 1397 if ((type == 0 && env->dr[reg] == env->eip) || 1398 ((type & 1) && env->cpu_watchpoint[reg] && 1399 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) { 1400 dr6 |= 1 << reg; 1401 if (hw_breakpoint_enabled(env->dr[7], reg)) 1402 hit_enabled = 1; 1403 } 1404 } 1405 if (hit_enabled || force_dr6_update) 1406 env->dr[6] = dr6; 1407 return hit_enabled; 1408 } 1409 1410 static CPUDebugExcpHandler *prev_debug_excp_handler; 1411 1412 void raise_exception(int exception_index); 1413 1414 static void breakpoint_handler(CPUState *env) 1415 { 1416 CPUBreakpoint *bp; 1417 1418 if (env->watchpoint_hit) { 1419 if (env->watchpoint_hit->flags & BP_CPU) { 1420 env->watchpoint_hit = NULL; 1421 if (check_hw_breakpoints(env, 0)) 1422 raise_exception(EXCP01_DB); 1423 else 1424 cpu_resume_from_signal(env, NULL); 1425 } 1426 } else { 1427 TAILQ_FOREACH(bp, &env->breakpoints, entry) 1428 if (bp->pc == env->eip) { 1429 if (bp->flags & BP_CPU) { 1430 check_hw_breakpoints(env, 1); 1431 raise_exception(EXCP01_DB); 1432 } 1433 break; 1434 } 1435 } 1436 if (prev_debug_excp_handler) 1437 prev_debug_excp_handler(env); 1438 } 1329 1439 #endif /* !CONFIG_USER_ONLY */ 1440 1441 #ifndef VBOX 1442 static void host_cpuid(uint32_t function, uint32_t count, 1443 uint32_t *eax, uint32_t *ebx, 1444 uint32_t *ecx, uint32_t *edx) 1445 { 1446 #if defined(CONFIG_KVM) 1447 uint32_t vec[4]; 1448 1449 #ifdef __x86_64__ 1450 asm volatile("cpuid" 1451 : "=a"(vec[0]), "=b"(vec[1]), 1452 "=c"(vec[2]), "=d"(vec[3]) 1453 : "0"(function), "c"(count) : "cc"); 1454 #else 1455 asm volatile("pusha \n\t" 1456 "cpuid \n\t" 1457 "mov %%eax, 0(%1) \n\t" 1458 "mov %%ebx, 4(%1) \n\t" 1459 "mov %%ecx, 8(%1) \n\t" 1460 "mov %%edx, 12(%1) \n\t" 1461 "popa" 1462 : : "a"(function), "c"(count), "S"(vec) 1463 : "memory", "cc"); 1464 #endif 1465 1466 if (eax) 1467 *eax = vec[0]; 1468 if (ebx) 1469 *ebx = vec[1]; 1470 if (ecx) 1471 *ecx = vec[2]; 1472 if (edx) 1473 *edx = vec[3]; 1474 #endif 1475 } 1476 1477 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 1478 uint32_t *eax, uint32_t *ebx, 1479 uint32_t *ecx, uint32_t *edx) 1480 { 1481 /* test if maximum index reached */ 1482 if (index & 0x80000000) { 1483 if (index > env->cpuid_xlevel) 1484 index = env->cpuid_level; 1485 } else { 1486 if (index > env->cpuid_level) 1487 index = env->cpuid_level; 1488 } 1489 1490 switch(index) { 1491 case 0: 1492 *eax = env->cpuid_level; 1493 *ebx = env->cpuid_vendor1; 1494 *edx = env->cpuid_vendor2; 1495 *ecx = env->cpuid_vendor3; 1496 1497 /* sysenter isn't supported on compatibility mode on AMD. and syscall 1498 * isn't supported in compatibility mode on Intel. so advertise the 1499 * actuall cpu, and say goodbye to migration between different vendors 1500 * is you use compatibility mode. */ 1501 if (kvm_enabled()) 1502 host_cpuid(0, 0, NULL, ebx, ecx, edx); 1503 break; 1504 case 1: 1505 *eax = env->cpuid_version; 1506 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 1507 *ecx = env->cpuid_ext_features; 1508 *edx = env->cpuid_features; 1509 1510 /* "Hypervisor present" bit required for Microsoft SVVP */ 1511 if (kvm_enabled()) 1512 *ecx |= (1 << 31); 1513 break; 1514 case 2: 1515 /* cache info: needed for Pentium Pro compatibility */ 1516 *eax = 1; 1517 *ebx = 0; 1518 *ecx = 0; 1519 *edx = 0x2c307d; 1520 break; 1521 case 4: 1522 /* cache info: needed for Core compatibility */ 1523 switch (count) { 1524 case 0: /* L1 dcache info */ 1525 *eax = 0x0000121; 1526 *ebx = 0x1c0003f; 1527 *ecx = 0x000003f; 1528 *edx = 0x0000001; 1529 break; 1530 case 1: /* L1 icache info */ 1531 *eax = 0x0000122; 1532 *ebx = 0x1c0003f; 1533 *ecx = 0x000003f; 1534 *edx = 0x0000001; 1535 break; 1536 case 2: /* L2 cache info */ 1537 *eax = 0x0000143; 1538 *ebx = 0x3c0003f; 1539 *ecx = 0x0000fff; 1540 *edx = 0x0000001; 1541 break; 1542 default: /* end of info */ 1543 *eax = 0; 1544 *ebx = 0; 1545 *ecx = 0; 1546 *edx = 0; 1547 break; 1548 } 1549 break; 1550 case 5: 1551 /* mwait info: needed for Core compatibility */ 1552 *eax = 0; /* Smallest monitor-line size in bytes */ 1553 *ebx = 0; /* Largest monitor-line size in bytes */ 1554 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 1555 *edx = 0; 1556 break; 1557 case 6: 1558 /* Thermal and Power Leaf */ 1559 *eax = 0; 1560 *ebx = 0; 1561 *ecx = 0; 1562 *edx = 0; 1563 break; 1564 case 9: 1565 /* Direct Cache Access Information Leaf */ 1566 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 1567 *ebx = 0; 1568 *ecx = 0; 1569 *edx = 0; 1570 break; 1571 case 0xA: 1572 /* Architectural Performance Monitoring Leaf */ 1573 *eax = 0; 1574 *ebx = 0; 1575 *ecx = 0; 1576 *edx = 0; 1577 break; 1578 case 0x80000000: 1579 *eax = env->cpuid_xlevel; 1580 *ebx = env->cpuid_vendor1; 1581 *edx = env->cpuid_vendor2; 1582 *ecx = env->cpuid_vendor3; 1583 break; 1584 case 0x80000001: 1585 *eax = env->cpuid_features; 1586 *ebx = 0; 1587 *ecx = env->cpuid_ext3_features; 1588 *edx = env->cpuid_ext2_features; 1589 1590 if (kvm_enabled()) { 1591 uint32_t h_eax, h_edx; 1592 1593 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx); 1594 1595 /* disable CPU features that the host does not support */ 1596 1597 /* long mode */ 1598 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */) 1599 *edx &= ~0x20000000; 1600 /* syscall */ 1601 if ((h_edx & 0x00000800) == 0) 1602 *edx &= ~0x00000800; 1603 /* nx */ 1604 if ((h_edx & 0x00100000) == 0) 1605 *edx &= ~0x00100000; 1606 1607 /* disable CPU features that KVM cannot support */ 1608 1609 /* svm */ 1610 *ecx &= ~4UL; 1611 /* 3dnow */ 1612 *edx &= ~0xc0000000; 1613 } 1614 break; 1615 case 0x80000002: 1616 case 0x80000003: 1617 case 0x80000004: 1618 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 1619 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 1620 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 1621 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 1622 break; 1623 case 0x80000005: 1624 /* cache info (L1 cache) */ 1625 *eax = 0x01ff01ff; 1626 *ebx = 0x01ff01ff; 1627 *ecx = 0x40020140; 1628 *edx = 0x40020140; 1629 break; 1630 case 0x80000006: 1631 /* cache info (L2 cache) */ 1632 *eax = 0; 1633 *ebx = 0x42004200; 1634 *ecx = 0x02008140; 1635 *edx = 0; 1636 break; 1637 case 0x80000008: 1638 /* virtual & phys address size in low 2 bytes. */ 1639 /* XXX: This value must match the one used in the MMU code. */ 1640 if (env->cpuid_ext2_features & CPUID_EXT2_LM) { 1641 /* 64 bit processor */ 1642 #if defined(USE_KQEMU) 1643 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */ 1644 #else 1645 /* XXX: The physical address space is limited to 42 bits in exec.c. */ 1646 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */ 1647 #endif 1648 } else { 1649 #if defined(USE_KQEMU) 1650 *eax = 0x00000020; /* 32 bits physical */ 1651 #else 1652 if (env->cpuid_features & CPUID_PSE36) 1653 *eax = 0x00000024; /* 36 bits physical */ 1654 else 1655 *eax = 0x00000020; /* 32 bits physical */ 1656 #endif 1657 } 1658 *ebx = 0; 1659 *ecx = 0; 1660 *edx = 0; 1661 break; 1662 case 0x8000000A: 1663 *eax = 0x00000001; /* SVM Revision */ 1664 *ebx = 0x00000010; /* nr of ASIDs */ 1665 *ecx = 0; 1666 *edx = 0; /* optional features */ 1667 break; 1668 default: 1669 /* reserved values: zero */ 1670 *eax = 0; 1671 *ebx = 0; 1672 *ecx = 0; 1673 *edx = 0; 1674 break; 1675 } 1676 } 1677 #endif /* !VBOX */ 1678 1679 #ifndef VBOX 1680 CPUX86State *cpu_x86_init(const char *cpu_model) 1681 #else 1682 CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model) 1683 #endif 1684 { 1685 #ifndef VBOX 1686 CPUX86State *env; 1687 #endif 1688 static int inited; 1689 1690 #ifndef VBOX 1691 env = qemu_mallocz(sizeof(CPUX86State)); 1692 #endif 1693 cpu_exec_init(env); 1694 env->cpu_model_str = cpu_model; 1695 1696 /* init various static tables */ 1697 if (!inited) { 1698 inited = 1; 1699 optimize_flags_init(); 1700 #ifndef CONFIG_USER_ONLY 1701 prev_debug_excp_handler = 1702 cpu_set_debug_excp_handler(breakpoint_handler); 1703 #endif 1704 } 1705 if (cpu_x86_register(env, cpu_model) < 0) { 1706 cpu_x86_close(env); 1707 return NULL; 1708 } 1709 cpu_reset(env); 1710 #ifdef USE_KQEMU 1711 kqemu_init(env); 1712 #endif 1713 if (kvm_enabled()) 1714 kvm_init_vcpu(env); 1715 return env; 1716 } -
trunk/src/recompiler/target-i386/helper.h
r36140 r36170 1 #ifndef DEF_HELPER 2 #define DEF_HELPER(ret, name, params) ret name params; 3 #endif 4 5 DEF_HELPER(void, helper_lock, (void)) 6 DEF_HELPER(void, helper_unlock, (void)) 7 DEF_HELPER(void, helper_write_eflags, (target_ulong t0, uint32_t update_mask)) 8 DEF_HELPER(target_ulong, helper_read_eflags, (void)) 9 #ifdef VBOX 10 DEF_HELPER(void, helper_write_eflags_vme, (target_ulong t0)) 11 DEF_HELPER(target_ulong, helper_read_eflags_vme, (void)) 12 #endif 13 DEF_HELPER(void, helper_divb_AL, (target_ulong t0)) 14 DEF_HELPER(void, helper_idivb_AL, (target_ulong t0)) 15 DEF_HELPER(void, helper_divw_AX, (target_ulong t0)) 16 DEF_HELPER(void, helper_idivw_AX, (target_ulong t0)) 17 DEF_HELPER(void, helper_divl_EAX, (target_ulong t0)) 18 DEF_HELPER(void, helper_idivl_EAX, (target_ulong t0)) 19 #ifdef TARGET_X86_64 20 DEF_HELPER(void, helper_mulq_EAX_T0, (target_ulong t0)) 21 DEF_HELPER(void, helper_imulq_EAX_T0, (target_ulong t0)) 22 DEF_HELPER(target_ulong, helper_imulq_T0_T1, (target_ulong t0, target_ulong t1)) 23 DEF_HELPER(void, helper_divq_EAX, (target_ulong t0)) 24 DEF_HELPER(void, helper_idivq_EAX, (target_ulong t0)) 25 #endif 26 27 DEF_HELPER(void, helper_aam, (int base)) 28 DEF_HELPER(void, helper_aad, (int base)) 29 DEF_HELPER(void, helper_aaa, (void)) 30 DEF_HELPER(void, helper_aas, (void)) 31 DEF_HELPER(void, helper_daa, (void)) 32 DEF_HELPER(void, helper_das, (void)) 33 34 DEF_HELPER(target_ulong, helper_lsl, (target_ulong selector1)) 35 DEF_HELPER(target_ulong, helper_lar, (target_ulong selector1)) 36 DEF_HELPER(void, helper_verr, (target_ulong selector1)) 37 DEF_HELPER(void, helper_verw, (target_ulong selector1)) 38 DEF_HELPER(void, helper_lldt, (int selector)) 39 DEF_HELPER(void, helper_ltr, (int selector)) 40 DEF_HELPER(void, helper_load_seg, (int seg_reg, int selector)) 41 DEF_HELPER(void, helper_ljmp_protected, (int new_cs, target_ulong new_eip, 42 int next_eip_addend)) 43 DEF_HELPER(void, helper_lcall_real, (int new_cs, target_ulong new_eip1, 44 int shift, int next_eip)) 45 DEF_HELPER(void, helper_lcall_protected, (int new_cs, target_ulong new_eip, 46 int shift, int next_eip_addend)) 47 DEF_HELPER(void, helper_iret_real, (int shift)) 48 DEF_HELPER(void, helper_iret_protected, (int shift, int next_eip)) 49 DEF_HELPER(void, helper_lret_protected, (int shift, int addend)) 50 DEF_HELPER(target_ulong, helper_read_crN, (int reg)) 51 DEF_HELPER(void, helper_write_crN, (int reg, target_ulong t0)) 52 DEF_HELPER(void, helper_lmsw, (target_ulong t0)) 53 DEF_HELPER(void, helper_clts, (void)) 54 DEF_HELPER(void, helper_movl_drN_T0, (int reg, target_ulong t0)) 55 DEF_HELPER(void, helper_invlpg, (target_ulong addr)) 56 57 DEF_HELPER(void, helper_enter_level, (int level, int data32, target_ulong t1)) 58 #ifdef TARGET_X86_64 59 DEF_HELPER(void, helper_enter64_level, (int level, int data64, target_ulong t1)) 60 #endif 61 DEF_HELPER(void, helper_sysenter, (void)) 62 DEF_HELPER(void, helper_sysexit, (int dflag)) 63 #ifdef TARGET_X86_64 64 DEF_HELPER(void, helper_syscall, (int next_eip_addend)) 65 DEF_HELPER(void, helper_sysret, (int dflag)) 66 #endif 67 DEF_HELPER(void, helper_hlt, (int next_eip_addend)) 68 DEF_HELPER(void, helper_monitor, (target_ulong ptr)) 69 DEF_HELPER(void, helper_mwait, (int next_eip_addend)) 70 DEF_HELPER(void, helper_debug, (void)) 71 DEF_HELPER(void, helper_raise_interrupt, (int intno, int next_eip_addend)) 72 DEF_HELPER(void, helper_raise_exception, (int exception_index)) 73 DEF_HELPER(void, helper_cli, (void)) 74 DEF_HELPER(void, helper_sti, (void)) 75 #ifdef VBOX 76 DEF_HELPER(void, helper_cli_vme, (void)) 77 DEF_HELPER(void, helper_sti_vme, (void)) 78 #endif 79 DEF_HELPER(void, helper_set_inhibit_irq, (void)) 80 DEF_HELPER(void, helper_reset_inhibit_irq, (void)) 81 DEF_HELPER(void, helper_boundw, (target_ulong a0, int v)) 82 DEF_HELPER(void, helper_boundl, (target_ulong a0, int v)) 83 DEF_HELPER(void, helper_rsm, (void)) 84 DEF_HELPER(void, helper_into, (int next_eip_addend)) 85 DEF_HELPER(void, helper_cmpxchg8b, (target_ulong a0)) 86 #ifdef TARGET_X86_64 87 DEF_HELPER(void, helper_cmpxchg16b, (target_ulong a0)) 88 #endif 89 DEF_HELPER(void, helper_single_step, (void)) 90 DEF_HELPER(void, helper_cpuid, (void)) 91 DEF_HELPER(void, helper_rdtsc, (void)) 92 DEF_HELPER(void, helper_rdpmc, (void)) 93 DEF_HELPER(void, helper_rdmsr, (void)) 94 DEF_HELPER(void, helper_wrmsr, (void)) 95 #ifdef VBOX 96 DEF_HELPER(void, helper_rdtscp, (void)) 97 #endif 98 99 DEF_HELPER(void, helper_check_iob, (uint32_t t0)) 100 DEF_HELPER(void, helper_check_iow, (uint32_t t0)) 101 DEF_HELPER(void, helper_check_iol, (uint32_t t0)) 102 #ifdef VBOX 103 DEF_HELPER(void, helper_check_external_event, (void)) 104 DEF_HELPER(void, helper_dump_state, (void)) 105 DEF_HELPER(void, helper_sync_seg, (uint32_t t0)) 106 #endif 107 DEF_HELPER(void, helper_outb, (uint32_t port, uint32_t data)) 108 DEF_HELPER(target_ulong, helper_inb, (uint32_t port)) 109 DEF_HELPER(void, helper_outw, (uint32_t port, uint32_t data)) 110 DEF_HELPER(target_ulong, helper_inw, (uint32_t port)) 111 DEF_HELPER(void, helper_outl, (uint32_t port, uint32_t data)) 112 DEF_HELPER(target_ulong, helper_inl, (uint32_t port)) 113 114 DEF_HELPER(void, helper_svm_check_intercept_param, (uint32_t type, uint64_t param)) 115 DEF_HELPER(void, helper_vmexit, (uint32_t exit_code, uint64_t exit_info_1)) 116 DEF_HELPER(void, helper_svm_check_io, (uint32_t port, uint32_t param, 117 uint32_t next_eip_addend)) 118 DEF_HELPER(void, helper_vmrun, (int aflag, int next_eip_addend)) 119 DEF_HELPER(void, helper_vmmcall, (void)) 120 DEF_HELPER(void, helper_vmload, (int aflag)) 121 DEF_HELPER(void, helper_vmsave, (int aflag)) 122 DEF_HELPER(void, helper_stgi, (void)) 123 DEF_HELPER(void, helper_clgi, (void)) 124 DEF_HELPER(void, helper_skinit, (void)) 125 DEF_HELPER(void, helper_invlpga, (int aflag)) 1 #include "def-helper.h" 2 3 DEF_HELPER_FLAGS_1(cc_compute_all, TCG_CALL_PURE, i32, int) 4 DEF_HELPER_FLAGS_1(cc_compute_c, TCG_CALL_PURE, i32, int) 5 6 DEF_HELPER_0(lock, void) 7 DEF_HELPER_0(unlock, void) 8 DEF_HELPER_2(write_eflags, void, tl, i32) 9 DEF_HELPER_0(read_eflags, tl) 10 DEF_HELPER_1(divb_AL, void, tl) 11 DEF_HELPER_1(idivb_AL, void, tl) 12 DEF_HELPER_1(divw_AX, void, tl) 13 DEF_HELPER_1(idivw_AX, void, tl) 14 DEF_HELPER_1(divl_EAX, void, tl) 15 DEF_HELPER_1(idivl_EAX, void, tl) 16 #ifdef TARGET_X86_64 17 DEF_HELPER_1(mulq_EAX_T0, void, tl) 18 DEF_HELPER_1(imulq_EAX_T0, void, tl) 19 DEF_HELPER_2(imulq_T0_T1, tl, tl, tl) 20 DEF_HELPER_1(divq_EAX, void, tl) 21 DEF_HELPER_1(idivq_EAX, void, tl) 22 #endif 23 24 DEF_HELPER_1(aam, void, int) 25 DEF_HELPER_1(aad, void, int) 26 DEF_HELPER_0(aaa, void) 27 DEF_HELPER_0(aas, void) 28 DEF_HELPER_0(daa, void) 29 DEF_HELPER_0(das, void) 30 31 DEF_HELPER_1(lsl, tl, tl) 32 DEF_HELPER_1(lar, tl, tl) 33 DEF_HELPER_1(verr, void, tl) 34 DEF_HELPER_1(verw, void, tl) 35 DEF_HELPER_1(lldt, void, int) 36 DEF_HELPER_1(ltr, void, int) 37 DEF_HELPER_2(load_seg, void, int, int) 38 DEF_HELPER_3(ljmp_protected, void, int, tl, int) 39 DEF_HELPER_4(lcall_real, void, int, tl, int, int) 40 DEF_HELPER_4(lcall_protected, void, int, tl, int, int) 41 DEF_HELPER_1(iret_real, void, int) 42 DEF_HELPER_2(iret_protected, void, int, int) 43 DEF_HELPER_2(lret_protected, void, int, int) 44 DEF_HELPER_1(read_crN, tl, int) 45 DEF_HELPER_2(write_crN, void, int, tl) 46 DEF_HELPER_1(lmsw, void, tl) 47 DEF_HELPER_0(clts, void) 48 DEF_HELPER_2(movl_drN_T0, void, int, tl) 49 DEF_HELPER_1(invlpg, void, tl) 50 51 DEF_HELPER_3(enter_level, void, int, int, tl) 52 #ifdef TARGET_X86_64 53 DEF_HELPER_3(enter64_level, void, int, int, tl) 54 #endif 55 DEF_HELPER_0(sysenter, void) 56 DEF_HELPER_1(sysexit, void, int) 57 #ifdef TARGET_X86_64 58 DEF_HELPER_1(syscall, void, int) 59 DEF_HELPER_1(sysret, void, int) 60 #endif 61 DEF_HELPER_1(hlt, void, int) 62 DEF_HELPER_1(monitor, void, tl) 63 DEF_HELPER_1(mwait, void, int) 64 DEF_HELPER_0(debug, void) 65 DEF_HELPER_2(raise_interrupt, void, int, int) 66 DEF_HELPER_1(raise_exception, void, int) 67 DEF_HELPER_0(cli, void) 68 DEF_HELPER_0(sti, void) 69 DEF_HELPER_0(set_inhibit_irq, void) 70 DEF_HELPER_0(reset_inhibit_irq, void) 71 DEF_HELPER_2(boundw, void, tl, int) 72 DEF_HELPER_2(boundl, void, tl, int) 73 DEF_HELPER_0(rsm, void) 74 DEF_HELPER_1(into, void, int) 75 DEF_HELPER_1(cmpxchg8b, void, tl) 76 #ifdef TARGET_X86_64 77 DEF_HELPER_1(cmpxchg16b, void, tl) 78 #endif 79 DEF_HELPER_0(single_step, void) 80 DEF_HELPER_0(cpuid, void) 81 DEF_HELPER_0(rdtsc, void) 82 DEF_HELPER_0(rdpmc, void) 83 DEF_HELPER_0(rdmsr, void) 84 DEF_HELPER_0(wrmsr, void) 85 86 DEF_HELPER_1(check_iob, void, i32) 87 DEF_HELPER_1(check_iow, void, i32) 88 DEF_HELPER_1(check_iol, void, i32) 89 DEF_HELPER_2(outb, void, i32, i32) 90 DEF_HELPER_1(inb, tl, i32) 91 DEF_HELPER_2(outw, void, i32, i32) 92 DEF_HELPER_1(inw, tl, i32) 93 DEF_HELPER_2(outl, void, i32, i32) 94 DEF_HELPER_1(inl, tl, i32) 95 96 DEF_HELPER_2(svm_check_intercept_param, void, i32, i64) 97 DEF_HELPER_2(vmexit, void, i32, i64) 98 DEF_HELPER_3(svm_check_io, void, i32, i32, i32) 99 DEF_HELPER_2(vmrun, void, int, int) 100 DEF_HELPER_0(vmmcall, void) 101 DEF_HELPER_1(vmload, void, int) 102 DEF_HELPER_1(vmsave, void, int) 103 DEF_HELPER_0(stgi, void) 104 DEF_HELPER_0(clgi, void) 105 DEF_HELPER_0(skinit, void) 106 DEF_HELPER_1(invlpga, void, int) 126 107 127 108 /* x86 FPU */ 128 109 129 DEF_HELPER (void, helper_flds_FT0, (uint32_t val))130 DEF_HELPER (void, helper_fldl_FT0, (uint64_t val))131 DEF_HELPER (void, helper_fildl_FT0, (int32_t val))132 DEF_HELPER (void, helper_flds_ST0, (uint32_t val))133 DEF_HELPER (void, helper_fldl_ST0, (uint64_t val))134 DEF_HELPER (void, helper_fildl_ST0, (int32_t val))135 DEF_HELPER (void, helper_fildll_ST0, (int64_t val))110 DEF_HELPER_1(flds_FT0, void, i32) 111 DEF_HELPER_1(fldl_FT0, void, i64) 112 DEF_HELPER_1(fildl_FT0, void, s32) 113 DEF_HELPER_1(flds_ST0, void, i32) 114 DEF_HELPER_1(fldl_ST0, void, i64) 115 DEF_HELPER_1(fildl_ST0, void, s32) 116 DEF_HELPER_1(fildll_ST0, void, s64) 136 117 #ifndef VBOX 137 DEF_HELPER (uint32_t, helper_fsts_ST0, (void))138 DEF_HELPER (uint64_t, helper_fstl_ST0, (void))139 DEF_HELPER (int32_t, helper_fist_ST0, (void))140 DEF_HELPER (int32_t, helper_fistl_ST0, (void))141 DEF_HELPER (int64_t, helper_fistll_ST0, (void))142 DEF_HELPER (int32_t, helper_fistt_ST0, (void))143 DEF_HELPER (int32_t, helper_fisttl_ST0, (void))144 DEF_HELPER (int64_t, helper_fisttll_ST0, (void))118 DEF_HELPER_0(fsts_ST0, i32) 119 DEF_HELPER_0(fstl_ST0, i64) 120 DEF_HELPER_0(fist_ST0, s32) 121 DEF_HELPER_0(fistl_ST0, s32) 122 DEF_HELPER_0(fistll_ST0, s64) 123 DEF_HELPER_0(fistt_ST0, s32) 124 DEF_HELPER_0(fisttl_ST0, s32) 125 DEF_HELPER_0(fisttll_ST0, s64) 145 126 #else /* VBOX */ 146 DEF_HELPER (RTCCUINTREG, helper_fsts_ST0, (void))147 DEF_HELPER (uint64_t, helper_fstl_ST0, (void))148 DEF_HELPER (RTCCINTREG, helper_fist_ST0, (void))149 DEF_HELPER (RTCCINTREG, helper_fistl_ST0, (void))150 DEF_HELPER (int64_t, helper_fistll_ST0, (void))151 DEF_HELPER (RTCCINTREG, helper_fistt_ST0, (void))152 DEF_HELPER (RTCCINTREG, helper_fisttl_ST0, (void))153 DEF_HELPER (int64_t, helper_fisttll_ST0, (void))127 DEF_HELPER_0(fsts_ST0, RTCCUINTREG) 128 DEF_HELPER_0(fstl_ST0, i64) 129 DEF_HELPER_0(fist_ST0, RTCCINTREG) 130 DEF_HELPER_0(fistl_ST0, RTCCINTREG) 131 DEF_HELPER_0(fistll_ST0, s64) 132 DEF_HELPER_0(fistt_ST0, RTCCINTREG) 133 DEF_HELPER_0(fisttl_ST0, RTCCINTREG) 134 DEF_HELPER_0(fisttll_ST0, s64) 154 135 #endif /* VBOX */ 155 DEF_HELPER (void, helper_fldt_ST0, (target_ulong ptr))156 DEF_HELPER (void, helper_fstt_ST0, (target_ulong ptr))157 DEF_HELPER (void, helper_fpush, (void))158 DEF_HELPER (void, helper_fpop, (void))159 DEF_HELPER (void, helper_fdecstp, (void))160 DEF_HELPER (void, helper_fincstp, (void))161 DEF_HELPER (void, helper_ffree_STN, (int st_index))162 DEF_HELPER (void, helper_fmov_ST0_FT0, (void))163 DEF_HELPER (void, helper_fmov_FT0_STN, (int st_index))164 DEF_HELPER (void, helper_fmov_ST0_STN, (int st_index))165 DEF_HELPER (void, helper_fmov_STN_ST0, (int st_index))166 DEF_HELPER (void, helper_fxchg_ST0_STN, (int st_index))167 DEF_HELPER (void, helper_fcom_ST0_FT0, (void))168 DEF_HELPER (void, helper_fucom_ST0_FT0, (void))169 DEF_HELPER (void, helper_fcomi_ST0_FT0, (void))170 DEF_HELPER (void, helper_fucomi_ST0_FT0, (void))171 DEF_HELPER (void, helper_fadd_ST0_FT0, (void))172 DEF_HELPER (void, helper_fmul_ST0_FT0, (void))173 DEF_HELPER (void, helper_fsub_ST0_FT0, (void))174 DEF_HELPER (void, helper_fsubr_ST0_FT0, (void))175 DEF_HELPER (void, helper_fdiv_ST0_FT0, (void))176 DEF_HELPER (void, helper_fdivr_ST0_FT0, (void))177 DEF_HELPER (void, helper_fadd_STN_ST0, (int st_index))178 DEF_HELPER (void, helper_fmul_STN_ST0, (int st_index))179 DEF_HELPER (void, helper_fsub_STN_ST0, (int st_index))180 DEF_HELPER (void, helper_fsubr_STN_ST0, (int st_index))181 DEF_HELPER (void, helper_fdiv_STN_ST0, (int st_index))182 DEF_HELPER (void, helper_fdivr_STN_ST0, (int st_index))183 DEF_HELPER (void, helper_fchs_ST0, (void))184 DEF_HELPER (void, helper_fabs_ST0, (void))185 DEF_HELPER (void, helper_fxam_ST0, (void))186 DEF_HELPER (void, helper_fld1_ST0, (void))187 DEF_HELPER (void, helper_fldl2t_ST0, (void))188 DEF_HELPER (void, helper_fldl2e_ST0, (void))189 DEF_HELPER (void, helper_fldpi_ST0, (void))190 DEF_HELPER (void, helper_fldlg2_ST0, (void))191 DEF_HELPER (void, helper_fldln2_ST0, (void))192 DEF_HELPER (void, helper_fldz_ST0, (void))193 DEF_HELPER (void, helper_fldz_FT0, (void))136 DEF_HELPER_1(fldt_ST0, void, tl) 137 DEF_HELPER_1(fstt_ST0, void, tl) 138 DEF_HELPER_0(fpush, void) 139 DEF_HELPER_0(fpop, void) 140 DEF_HELPER_0(fdecstp, void) 141 DEF_HELPER_0(fincstp, void) 142 DEF_HELPER_1(ffree_STN, void, int) 143 DEF_HELPER_0(fmov_ST0_FT0, void) 144 DEF_HELPER_1(fmov_FT0_STN, void, int) 145 DEF_HELPER_1(fmov_ST0_STN, void, int) 146 DEF_HELPER_1(fmov_STN_ST0, void, int) 147 DEF_HELPER_1(fxchg_ST0_STN, void, int) 148 DEF_HELPER_0(fcom_ST0_FT0, void) 149 DEF_HELPER_0(fucom_ST0_FT0, void) 150 DEF_HELPER_0(fcomi_ST0_FT0, void) 151 DEF_HELPER_0(fucomi_ST0_FT0, void) 152 DEF_HELPER_0(fadd_ST0_FT0, void) 153 DEF_HELPER_0(fmul_ST0_FT0, void) 154 DEF_HELPER_0(fsub_ST0_FT0, void) 155 DEF_HELPER_0(fsubr_ST0_FT0, void) 156 DEF_HELPER_0(fdiv_ST0_FT0, void) 157 DEF_HELPER_0(fdivr_ST0_FT0, void) 158 DEF_HELPER_1(fadd_STN_ST0, void, int) 159 DEF_HELPER_1(fmul_STN_ST0, void, int) 160 DEF_HELPER_1(fsub_STN_ST0, void, int) 161 DEF_HELPER_1(fsubr_STN_ST0, void, int) 162 DEF_HELPER_1(fdiv_STN_ST0, void, int) 163 DEF_HELPER_1(fdivr_STN_ST0, void, int) 164 DEF_HELPER_0(fchs_ST0, void) 165 DEF_HELPER_0(fabs_ST0, void) 166 DEF_HELPER_0(fxam_ST0, void) 167 DEF_HELPER_0(fld1_ST0, void) 168 DEF_HELPER_0(fldl2t_ST0, void) 169 DEF_HELPER_0(fldl2e_ST0, void) 170 DEF_HELPER_0(fldpi_ST0, void) 171 DEF_HELPER_0(fldlg2_ST0, void) 172 DEF_HELPER_0(fldln2_ST0, void) 173 DEF_HELPER_0(fldz_ST0, void) 174 DEF_HELPER_0(fldz_FT0, void) 194 175 #ifndef VBOX 195 DEF_HELPER (uint32_t, helper_fnstsw, (void))196 DEF_HELPER (uint32_t, helper_fnstcw, (void))176 DEF_HELPER_0(fnstsw, i32) 177 DEF_HELPER_0(fnstcw, i32) 197 178 #else /* VBOX */ 198 DEF_HELPER (RTCCUINTREG, helper_fnstsw, (void))199 DEF_HELPER (RTCCUINTREG, helper_fnstcw, (void))179 DEF_HELPER_0(fnstsw, RTCCUINTREG) 180 DEF_HELPER_0(fnstcw, RTCCUINTREG) 200 181 #endif /* VBOX */ 201 DEF_HELPER (void, helper_fldcw, (uint32_t val))202 DEF_HELPER (void, helper_fclex, (void))203 DEF_HELPER (void, helper_fwait, (void))204 DEF_HELPER (void, helper_fninit, (void))205 DEF_HELPER (void, helper_fbld_ST0, (target_ulong ptr))206 DEF_HELPER (void, helper_fbst_ST0, (target_ulong ptr))207 DEF_HELPER (void, helper_f2xm1, (void))208 DEF_HELPER (void, helper_fyl2x, (void))209 DEF_HELPER (void, helper_fptan, (void))210 DEF_HELPER (void, helper_fpatan, (void))211 DEF_HELPER (void, helper_fxtract, (void))212 DEF_HELPER (void, helper_fprem1, (void))213 DEF_HELPER (void, helper_fprem, (void))214 DEF_HELPER (void, helper_fyl2xp1, (void))215 DEF_HELPER (void, helper_fsqrt, (void))216 DEF_HELPER (void, helper_fsincos, (void))217 DEF_HELPER (void, helper_frndint, (void))218 DEF_HELPER (void, helper_fscale, (void))219 DEF_HELPER (void, helper_fsin, (void))220 DEF_HELPER (void, helper_fcos, (void))221 DEF_HELPER (void, helper_fstenv, (target_ulong ptr, int data32))222 DEF_HELPER (void, helper_fldenv, (target_ulong ptr, int data32))223 DEF_HELPER (void, helper_fsave, (target_ulong ptr, int data32))224 DEF_HELPER (void, helper_frstor, (target_ulong ptr, int data32))225 DEF_HELPER (void, helper_fxsave, (target_ulong ptr, int data64))226 DEF_HELPER (void, helper_fxrstor, (target_ulong ptr, int data64))227 DEF_HELPER (target_ulong, helper_bsf, (target_ulong t0))228 DEF_HELPER (target_ulong, helper_bsr, (target_ulong t0))182 DEF_HELPER_1(fldcw, void, i32) 183 DEF_HELPER_0(fclex, void) 184 DEF_HELPER_0(fwait, void) 185 DEF_HELPER_0(fninit, void) 186 DEF_HELPER_1(fbld_ST0, void, tl) 187 DEF_HELPER_1(fbst_ST0, void, tl) 188 DEF_HELPER_0(f2xm1, void) 189 DEF_HELPER_0(fyl2x, void) 190 DEF_HELPER_0(fptan, void) 191 DEF_HELPER_0(fpatan, void) 192 DEF_HELPER_0(fxtract, void) 193 DEF_HELPER_0(fprem1, void) 194 DEF_HELPER_0(fprem, void) 195 DEF_HELPER_0(fyl2xp1, void) 196 DEF_HELPER_0(fsqrt, void) 197 DEF_HELPER_0(fsincos, void) 198 DEF_HELPER_0(frndint, void) 199 DEF_HELPER_0(fscale, void) 200 DEF_HELPER_0(fsin, void) 201 DEF_HELPER_0(fcos, void) 202 DEF_HELPER_2(fstenv, void, tl, int) 203 DEF_HELPER_2(fldenv, void, tl, int) 204 DEF_HELPER_2(fsave, void, tl, int) 205 DEF_HELPER_2(frstor, void, tl, int) 206 DEF_HELPER_2(fxsave, void, tl, int) 207 DEF_HELPER_2(fxrstor, void, tl, int) 208 DEF_HELPER_1(bsf, tl, tl) 209 DEF_HELPER_1(bsr, tl, tl) 229 210 230 211 /* MMX/SSE */ 231 212 232 DEF_HELPER (void, helper_enter_mmx, (void))233 DEF_HELPER (void, helper_emms, (void))234 DEF_HELPER (void, helper_movq, (uint64_t *d, uint64_t *s))213 DEF_HELPER_0(enter_mmx, void) 214 DEF_HELPER_0(emms, void) 215 DEF_HELPER_2(movq, void, ptr, ptr) 235 216 236 217 #define SHIFT 0 … … 239 220 #include "ops_sse_header.h" 240 221 241 DEF_HELPER (target_ulong, helper_rclb, (target_ulong t0, target_ulong t1))242 DEF_HELPER (target_ulong, helper_rclw, (target_ulong t0, target_ulong t1))243 DEF_HELPER (target_ulong, helper_rcll, (target_ulong t0, target_ulong t1))244 DEF_HELPER (target_ulong, helper_rcrb, (target_ulong t0, target_ulong t1))245 DEF_HELPER (target_ulong, helper_rcrw, (target_ulong t0, target_ulong t1))246 DEF_HELPER (target_ulong, helper_rcrl, (target_ulong t0, target_ulong t1))247 #ifdef TARGET_X86_64 248 DEF_HELPER (target_ulong, helper_rclq, (target_ulong t0, target_ulong t1))249 DEF_HELPER (target_ulong, helper_rcrq, (target_ulong t0, target_ulong t1))222 DEF_HELPER_2(rclb, tl, tl, tl) 223 DEF_HELPER_2(rclw, tl, tl, tl) 224 DEF_HELPER_2(rcll, tl, tl, tl) 225 DEF_HELPER_2(rcrb, tl, tl, tl) 226 DEF_HELPER_2(rcrw, tl, tl, tl) 227 DEF_HELPER_2(rcrl, tl, tl, tl) 228 #ifdef TARGET_X86_64 229 DEF_HELPER_2(rclq, tl, tl, tl) 230 DEF_HELPER_2(rcrq, tl, tl, tl) 250 231 #endif 251 232 252 233 #ifdef VBOX 234 DEF_HELPER_1(write_eflags_vme, void, tl) 235 DEF_HELPER_0(read_eflags_vme, tl) 236 DEF_HELPER_0(cli_vme, void) 237 DEF_HELPER_0(sti_vme, void) 238 DEF_HELPER_0(rdtscp, void) 239 DEF_HELPER_0(check_external_event, void) 240 DEF_HELPER_0(dump_state, void) 241 DEF_HELPER_1(sync_seg, void, i32) 242 253 243 void helper_external_event(void); 254 244 void helper_record_call(void); … … 259 249 #endif /* VBOX */ 260 250 261 # undef DEF_HELPER251 #include "def-helper.h" -
trunk/src/recompiler/target-i386/helper_template.h
r33656 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 291 291 #endif 292 292 if (count) { 293 eflags = cc_table[CC_OP].compute_all();293 eflags = helper_cc_compute_all(CC_OP); 294 294 t0 &= DATA_MASK; 295 295 src = t0; … … 320 320 #endif 321 321 if (count) { 322 eflags = cc_table[CC_OP].compute_all();322 eflags = helper_cc_compute_all(CC_OP); 323 323 t0 &= DATA_MASK; 324 324 src = t0; -
trunk/src/recompiler/target-i386/op_helper.c
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 30 30 #define CPU_NO_GLOBAL_REGS 31 31 #include "exec.h" 32 #include "exec-all.h" 32 33 #include "host-utils.h" 33 34 … … 39 40 //#define DEBUG_PCALL 40 41 42 43 #ifdef DEBUG_PCALL 44 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__) 45 # define LOG_PCALL_STATE(env) \ 46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP) 47 #else 48 # define LOG_PCALL(...) do { } while (0) 49 # define LOG_PCALL_STATE(env) do { } while (0) 50 #endif 51 52 41 53 #if 0 42 54 #define raise_exception_err(a, b)\ 43 55 do {\ 44 if (logfile)\ 45 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\ 56 qemu_log("raise_exception line=%d\n", __LINE__);\ 46 57 (raise_exception_err)(a, b);\ 47 58 } while (0) 48 59 #endif 49 60 50 const uint8_t parity_table[256] = {61 static const uint8_t parity_table[256] = { 51 62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 52 63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, … … 84 95 85 96 /* modulo 17 table */ 86 const uint8_t rclw_table[32] = {97 static const uint8_t rclw_table[32] = { 87 98 0, 1, 2, 3, 4, 5, 6, 7, 88 99 8, 9,10,11,12,13,14,15, … … 92 103 93 104 /* modulo 9 table */ 94 const uint8_t rclb_table[32] = {105 static const uint8_t rclb_table[32] = { 95 106 0, 1, 2, 3, 4, 5, 6, 7, 96 107 8, 0, 1, 2, 3, 4, 5, 6, … … 99 110 }; 100 111 101 const CPU86_LDouble f15rk[7] =112 static const CPU86_LDouble f15rk[7] = 102 113 { 103 114 0.00000000000000000000L, … … 112 123 /* broken thread support */ 113 124 114 s pinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;125 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; 115 126 116 127 void helper_lock(void) … … 132 143 { 133 144 uint32_t eflags; 134 eflags = cc_table[CC_OP].compute_all();145 eflags = helper_cc_compute_all(CC_OP); 135 146 eflags |= (DF & DF_MASK); 136 147 eflags |= env->eflags & ~(VM_MASK | RF_MASK); … … 166 177 { 167 178 uint32_t eflags; 168 eflags = cc_table[CC_OP].compute_all();179 eflags = helper_cc_compute_all(CC_OP); 169 180 eflags |= (DF & DF_MASK); 170 181 eflags |= env->eflags & ~(VM_MASK | RF_MASK); … … 308 319 309 320 #ifdef VBOX 310 e1 = e2 = 0; 321 e1 = e2 = 0; /* gcc warning? */ 311 322 cpl = env->hflags & HF_CPL_MASK; 312 323 /* Trying to load a selector with CPL=1? */ … … 392 403 393 404 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 394 #ifdef DEBUG_PCALL 395 if (loglevel & CPU_LOG_PCALL) 396 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source); 397 #endif 398 399 #if defined(VBOX) && defined(DEBUG) 400 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip); 401 #endif 405 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source); 402 406 403 407 /* if task gate, we read the TSS segment and we load it */ … … 628 632 raise_exception_err(EXCP0D_GPF, 0); 629 633 } 634 635 #ifndef CONFIG_USER_ONLY 636 /* reset local breakpoints */ 637 if (env->dr[7] & 0x55) { 638 for (i = 0; i < 4; i++) { 639 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) 640 hw_breakpoint_remove(env, i); 641 } 642 env->dr[7] &= ~0x55; 643 } 644 #endif 630 645 } 631 646 … … 787 802 int type, dpl, selector, ss_dpl, cpl; 788 803 int has_error_code, new_stack, shift; 789 uint32_t e1, e2, offset, ss , esp, ss_e1, ss_e2;804 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; 790 805 uint32_t old_eip, sp_mask; 791 806 792 807 #ifdef VBOX 793 ss = ss_e1 = ss_e2 = 0;794 808 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS) 795 809 cpu_loop_exit(); … … 1271 1285 env->eflags &= ~IF_MASK; 1272 1286 } 1273 1274 1287 #ifndef VBOX 1275 1288 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); … … 1285 1298 #endif 1286 1299 1300 #ifdef TARGET_X86_64 1287 1301 #if defined(CONFIG_USER_ONLY) 1288 1302 void helper_syscall(int next_eip_addend) … … 1301 1315 } 1302 1316 selector = (env->star >> 32) & 0xffff; 1303 #ifdef TARGET_X86_641304 1317 if (env->hflags & HF_LMA_MASK) { 1305 1318 int code64; … … 1327 1340 else 1328 1341 env->eip = env->cstar; 1329 } else 1330 #endif 1331 { 1342 } else { 1332 1343 ECX = (uint32_t)(env->eip + next_eip_addend); 1333 1344 … … 1348 1359 } 1349 1360 #endif 1350 1361 #endif 1362 1363 #ifdef TARGET_X86_64 1351 1364 void helper_sysret(int dflag) 1352 1365 { … … 1361 1374 } 1362 1375 selector = (env->star >> 48) & 0xffff; 1363 #ifdef TARGET_X86_641364 1376 if (env->hflags & HF_LMA_MASK) { 1365 1377 if (dflag == 2) { … … 1387 1399 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); 1388 1400 cpu_x86_set_cpl(env, 3); 1389 } else 1390 #endif 1391 { 1401 } else { 1392 1402 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1393 1403 0, 0xffffffff, … … 1413 1423 #endif 1414 1424 } 1425 #endif 1415 1426 1416 1427 #ifdef VBOX … … 1421 1432 void helper_external_event(void) 1422 1433 { 1423 # if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)1434 # if defined(RT_OS_DARWIN) && defined(VBOX_STRICT) 1424 1435 uintptr_t uSP; 1425 # ifdef RT_ARCH_AMD641436 # ifdef RT_ARCH_AMD64 1426 1437 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP)); 1427 # else1438 # else 1428 1439 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP)); 1440 # endif 1441 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP)); 1429 1442 # endif 1430 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));1431 #endif1432 1443 /* Keep in sync with flags checked by gen_check_external_event() */ 1433 1444 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) … … 1546 1557 target_ulong next_eip, int is_hw) 1547 1558 { 1559 if (qemu_loglevel_mask(CPU_LOG_INT)) { 1560 if ((env->cr[0] & CR0_PE_MASK)) { 1561 static int count; 1562 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1563 count, intno, error_code, is_int, 1564 env->hflags & HF_CPL_MASK, 1565 env->segs[R_CS].selector, EIP, 1566 (int)env->segs[R_CS].base + EIP, 1567 env->segs[R_SS].selector, ESP); 1568 if (intno == 0x0e) { 1569 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1570 } else { 1571 qemu_log(" EAX=" TARGET_FMT_lx, EAX); 1572 } 1573 qemu_log("\n"); 1574 log_cpu_state(env, X86_DUMP_CCOP); 1575 #if 0 1576 { 1577 int i; 1578 uint8_t *ptr; 1579 qemu_log(" code="); 1580 ptr = env->segs[R_CS].base + env->eip; 1581 for(i = 0; i < 16; i++) { 1582 qemu_log(" %02x", ldub(ptr + i)); 1583 } 1584 qemu_log("\n"); 1585 } 1586 #endif 1587 count++; 1588 } 1589 } 1548 1590 #ifdef VBOX 1549 1591 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) { … … 1557 1599 } 1558 1600 #endif 1559 1560 if (loglevel & CPU_LOG_INT) {1561 if ((env->cr[0] & CR0_PE_MASK)) {1562 static int count;1563 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,1564 count, intno, error_code, is_int,1565 env->hflags & HF_CPL_MASK,1566 env->segs[R_CS].selector, EIP,1567 (int)env->segs[R_CS].base + EIP,1568 env->segs[R_SS].selector, ESP);1569 if (intno == 0x0e) {1570 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);1571 } else {1572 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);1573 }1574 fprintf(logfile, "\n");1575 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);1576 #if 01577 {1578 int i;1579 uint8_t *ptr;1580 fprintf(logfile, " code=");1581 ptr = env->segs[R_CS].base + env->eip;1582 for(i = 0; i < 16; i++) {1583 fprintf(logfile, " %02x", ldub(ptr + i));1584 }1585 fprintf(logfile, "\n");1586 }1587 #endif1588 count++;1589 }1590 }1591 1601 if (env->cr[0] & CR0_PE_MASK) { 1592 1602 #ifdef TARGET_X86_64 … … 1614 1624 } 1615 1625 1626 /* This should come from sysemu.h - if we could include it here... */ 1627 void qemu_system_reset_request(void); 1628 1616 1629 /* 1617 1630 * Check nested exceptions and change to double or triple fault if … … 1627 1640 (intno >= 10 && intno <= 13); 1628 1641 1629 if (loglevel & CPU_LOG_INT) 1630 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n", 1642 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n", 1631 1643 env->old_exception, intno); 1632 1644 1633 if (env->old_exception == EXCP08_DBLE) 1634 cpu_abort(env, "triple fault"); 1645 #if !defined(CONFIG_USER_ONLY) 1646 if (env->old_exception == EXCP08_DBLE) { 1647 if (env->hflags & HF_SVMI_MASK) 1648 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */ 1649 1650 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); 1651 1652 # ifndef VBOX 1653 qemu_system_reset_request(); 1654 # else 1655 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */ 1656 # endif 1657 return EXCP_HLT; 1658 } 1659 #endif 1635 1660 1636 1661 if ((first_contributory && second_contributory) … … 1654 1679 * is_int is TRUE. 1655 1680 */ 1656 voidraise_interrupt(int intno, int is_int, int error_code,1657 int next_eip_addend)1681 static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code, 1682 int next_eip_addend) 1658 1683 { 1659 1684 #if defined(VBOX) && defined(DEBUG) … … 1676 1701 /* shortcuts to generate exceptions */ 1677 1702 1678 void (raise_exception_err)(int exception_index, int error_code)1703 void raise_exception_err(int exception_index, int error_code) 1679 1704 { 1680 1705 raise_interrupt(exception_index, 0, error_code, 0); … … 1712 1737 int i, offset; 1713 1738 1714 if (loglevel & CPU_LOG_INT) { 1715 fprintf(logfile, "SMM: enter\n"); 1716 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 1717 } 1739 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n"); 1740 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP); 1718 1741 1719 1742 env->hflags |= HF_SMM_MASK; … … 1846 1869 cpu_abort(env, "helper_rsm"); 1847 1870 #else /* !VBOX */ 1848 target_ulong sm_1849 1850 1871 target_ulong sm_state; 1851 1872 int i, offset; … … 1960 1981 cpu_smm_update(env); 1961 1982 1962 if (loglevel & CPU_LOG_INT) { 1963 fprintf(logfile, "SMM: after RSM\n"); 1964 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 1965 } 1983 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); 1984 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP); 1966 1985 #endif /* !VBOX */ 1967 1986 } … … 2107 2126 int eflags; 2108 2127 2109 eflags = cc_table[CC_OP].compute_all();2128 eflags = helper_cc_compute_all(CC_OP); 2110 2129 af = eflags & CC_A; 2111 2130 al = EAX & 0xff; … … 2123 2142 EAX = (EAX & ~0xffff) | al | (ah << 8); 2124 2143 CC_SRC = eflags; 2125 FORCE_RET();2126 2144 } 2127 2145 … … 2132 2150 int eflags; 2133 2151 2134 eflags = cc_table[CC_OP].compute_all();2152 eflags = helper_cc_compute_all(CC_OP); 2135 2153 af = eflags & CC_A; 2136 2154 al = EAX & 0xff; … … 2148 2166 EAX = (EAX & ~0xffff) | al | (ah << 8); 2149 2167 CC_SRC = eflags; 2150 FORCE_RET();2151 2168 } 2152 2169 … … 2156 2173 int eflags; 2157 2174 2158 eflags = cc_table[CC_OP].compute_all();2175 eflags = helper_cc_compute_all(CC_OP); 2159 2176 cf = eflags & CC_C; 2160 2177 af = eflags & CC_A; … … 2176 2193 eflags |= (al & 0x80); /* sf */ 2177 2194 CC_SRC = eflags; 2178 FORCE_RET();2179 2195 } 2180 2196 … … 2184 2200 int eflags; 2185 2201 2186 eflags = cc_table[CC_OP].compute_all();2202 eflags = helper_cc_compute_all(CC_OP); 2187 2203 cf = eflags & CC_C; 2188 2204 af = eflags & CC_A; … … 2207 2223 eflags |= (al & 0x80); /* sf */ 2208 2224 CC_SRC = eflags; 2209 FORCE_RET();2210 2225 } 2211 2226 … … 2213 2228 { 2214 2229 int eflags; 2215 eflags = cc_table[CC_OP].compute_all();2230 eflags = helper_cc_compute_all(CC_OP); 2216 2231 if (eflags & CC_O) { 2217 2232 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend); … … 2224 2239 int eflags; 2225 2240 2226 eflags = cc_table[CC_OP].compute_all();2241 eflags = helper_cc_compute_all(CC_OP); 2227 2242 d = ldq(a0); 2228 2243 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) { … … 2247 2262 if ((a0 & 0xf) != 0) 2248 2263 raise_exception(EXCP0D_GPF); 2249 eflags = cc_table[CC_OP].compute_all();2264 eflags = helper_cc_compute_all(CC_OP); 2250 2265 d0 = ldq(a0); 2251 2266 d1 = ldq(a0 + 8); … … 2268 2283 void helper_single_step(void) 2269 2284 { 2270 env->dr[6] |= 0x4000; 2271 raise_exception(EXCP01_SSTP); 2285 #ifndef CONFIG_USER_ONLY 2286 check_hw_breakpoints(env, 1); 2287 env->dr[6] |= DR6_BS; 2288 #endif 2289 raise_exception(EXCP01_DB); 2272 2290 } 2273 2291 2274 2292 void helper_cpuid(void) 2275 2293 { 2276 #ifndef VBOX 2277 uint32_t index; 2294 uint32_t eax, ebx, ecx, edx; 2278 2295 2279 2296 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0); 2280 2297 2281 index = (uint32_t)EAX; 2282 /* test if maximum index reached */ 2283 if (index & 0x80000000) { 2284 if (index > env->cpuid_xlevel) 2285 index = env->cpuid_level; 2286 } else { 2287 if (index > env->cpuid_level) 2288 index = env->cpuid_level; 2289 } 2290 2291 switch(index) { 2292 case 0: 2293 EAX = env->cpuid_level; 2294 EBX = env->cpuid_vendor1; 2295 EDX = env->cpuid_vendor2; 2296 ECX = env->cpuid_vendor3; 2297 break; 2298 case 1: 2299 EAX = env->cpuid_version; 2300 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 2301 ECX = env->cpuid_ext_features; 2302 EDX = env->cpuid_features; 2303 break; 2304 case 2: 2305 /* cache info: needed for Pentium Pro compatibility */ 2306 EAX = 1; 2307 EBX = 0; 2308 ECX = 0; 2309 EDX = 0x2c307d; 2310 break; 2311 case 4: 2312 /* cache info: needed for Core compatibility */ 2313 switch (ECX) { 2314 case 0: /* L1 dcache info */ 2315 EAX = 0x0000121; 2316 EBX = 0x1c0003f; 2317 ECX = 0x000003f; 2318 EDX = 0x0000001; 2319 break; 2320 case 1: /* L1 icache info */ 2321 EAX = 0x0000122; 2322 EBX = 0x1c0003f; 2323 ECX = 0x000003f; 2324 EDX = 0x0000001; 2325 break; 2326 case 2: /* L2 cache info */ 2327 EAX = 0x0000143; 2328 EBX = 0x3c0003f; 2329 ECX = 0x0000fff; 2330 EDX = 0x0000001; 2331 break; 2332 default: /* end of info */ 2333 EAX = 0; 2334 EBX = 0; 2335 ECX = 0; 2336 EDX = 0; 2337 break; 2338 } 2339 2340 break; 2341 case 5: 2342 /* mwait info: needed for Core compatibility */ 2343 EAX = 0; /* Smallest monitor-line size in bytes */ 2344 EBX = 0; /* Largest monitor-line size in bytes */ 2345 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 2346 EDX = 0; 2347 break; 2348 case 6: 2349 /* Thermal and Power Leaf */ 2350 EAX = 0; 2351 EBX = 0; 2352 ECX = 0; 2353 EDX = 0; 2354 break; 2355 case 9: 2356 /* Direct Cache Access Information Leaf */ 2357 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */ 2358 EBX = 0; 2359 ECX = 0; 2360 EDX = 0; 2361 break; 2362 case 0xA: 2363 /* Architectural Performance Monitoring Leaf */ 2364 EAX = 0; 2365 EBX = 0; 2366 ECX = 0; 2367 EDX = 0; 2368 break; 2369 case 0x80000000: 2370 EAX = env->cpuid_xlevel; 2371 EBX = env->cpuid_vendor1; 2372 EDX = env->cpuid_vendor2; 2373 ECX = env->cpuid_vendor3; 2374 break; 2375 case 0x80000001: 2376 EAX = env->cpuid_features; 2377 EBX = 0; 2378 ECX = env->cpuid_ext3_features; 2379 EDX = env->cpuid_ext2_features; 2380 break; 2381 case 0x80000002: 2382 case 0x80000003: 2383 case 0x80000004: 2384 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 2385 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 2386 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 2387 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 2388 break; 2389 case 0x80000005: 2390 /* cache info (L1 cache) */ 2391 EAX = 0x01ff01ff; 2392 EBX = 0x01ff01ff; 2393 ECX = 0x40020140; 2394 EDX = 0x40020140; 2395 break; 2396 case 0x80000006: 2397 /* cache info (L2 cache) */ 2398 EAX = 0; 2399 EBX = 0x42004200; 2400 ECX = 0x02008140; 2401 EDX = 0; 2402 break; 2403 case 0x80000008: 2404 /* virtual & phys address size in low 2 bytes. */ 2405 /* XXX: This value must match the one used in the MMU code. */ 2406 if (env->cpuid_ext2_features & CPUID_EXT2_LM) { 2407 /* 64 bit processor */ 2408 #if defined(USE_KQEMU) 2409 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */ 2410 #else 2411 /* XXX: The physical address space is limited to 42 bits in exec.c. */ 2412 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */ 2413 #endif 2414 } else { 2415 #if defined(USE_KQEMU) 2416 EAX = 0x00000020; /* 32 bits physical */ 2417 #else 2418 if (env->cpuid_features & CPUID_PSE36) 2419 EAX = 0x00000024; /* 36 bits physical */ 2420 else 2421 EAX = 0x00000020; /* 32 bits physical */ 2422 #endif 2423 } 2424 EBX = 0; 2425 ECX = 0; 2426 EDX = 0; 2427 break; 2428 case 0x8000000A: 2429 EAX = 0x00000001; 2430 EBX = 0; 2431 ECX = 0; 2432 EDX = 0; 2433 break; 2434 default: 2435 /* reserved values: zero */ 2436 EAX = 0; 2437 EBX = 0; 2438 ECX = 0; 2439 EDX = 0; 2440 break; 2441 } 2442 #else /* VBOX */ 2443 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX); 2444 #endif /* VBOX */ 2298 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx); 2299 EAX = eax; 2300 EBX = ebx; 2301 ECX = ecx; 2302 EDX = edx; 2445 2303 } 2446 2304 … … 2724 2582 e2); 2725 2583 #if 0 2726 fprintf(logfile,"load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",2584 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 2727 2585 selector, (unsigned long)sc->base, sc->limit, sc->flags); 2728 2586 #endif … … 2738 2596 target_ulong next_eip; 2739 2597 2740 #ifdef VBOX 2598 #ifdef VBOX /** @todo Why do we do this? */ 2741 2599 e1 = e2 = 0; 2742 2600 #endif … … 2859 2717 int new_stack, i; 2860 2718 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; 2861 uint32_t ss , ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;2719 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask; 2862 2720 uint32_t val, limit, old_sp_mask; 2863 2721 target_ulong ssp, old_ssp, next_eip; 2864 2722 2865 #ifdef VBOX 2866 ss = ss_e1 = ss_e2 =e1 = e2 = 0;2723 #ifdef VBOX /** @todo Why do we do this? */ 2724 e1 = e2 = 0; 2867 2725 #endif 2868 2726 next_eip = env->eip + next_eip_addend; 2869 #ifdef DEBUG_PCALL 2870 if (loglevel & CPU_LOG_PCALL) { 2871 fprintf(logfile, "lcall %04x:%08x s=%d\n", 2872 new_cs, (uint32_t)new_eip, shift); 2873 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 2874 } 2875 #endif 2727 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift); 2728 LOG_PCALL_STATE(env); 2876 2729 if ((new_cs & 0xfffc) == 0) 2877 2730 raise_exception_err(EXCP0D_GPF, 0); … … 2879 2732 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2880 2733 cpl = env->hflags & HF_CPL_MASK; 2881 #ifdef DEBUG_PCALL 2882 if (loglevel & CPU_LOG_PCALL) { 2883 fprintf(logfile, "desc=%08x:%08x\n", e1, e2); 2884 } 2885 #endif 2734 LOG_PCALL("desc=%08x:%08x\n", e1, e2); 2886 2735 if (e2 & DESC_S_MASK) { 2887 2736 if (!(e2 & DESC_CS_MASK)) … … 2987 2836 /* to inner privilege */ 2988 2837 get_ss_esp_from_tss(&ss, &sp, dpl); 2989 #ifdef DEBUG_PCALL 2990 if (loglevel & CPU_LOG_PCALL) 2991 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 2838 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 2992 2839 ss, sp, param_count, ESP); 2993 #endif2994 2840 if ((ss & 0xfffc) == 0) 2995 2841 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); … … 3178 3024 target_ulong ssp, sp, new_eip, new_esp, sp_mask; 3179 3025 3180 #ifdef VBOX 3026 #ifdef VBOX /** @todo Why do we do this? */ 3181 3027 ss_e1 = ss_e2 = e1 = e2 = 0; 3182 3028 #endif … … 3233 3079 POPW(ssp, sp, sp_mask, new_eflags); 3234 3080 } 3235 #ifdef DEBUG_PCALL 3236 if (loglevel & CPU_LOG_PCALL) { 3237 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 3238 new_cs, new_eip, shift, addend); 3239 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 3240 } 3241 #endif 3081 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 3082 new_cs, new_eip, shift, addend); 3083 LOG_PCALL_STATE(env); 3242 3084 if ((new_cs & 0xfffc) == 0) 3243 3085 { … … 3324 3166 POPW(ssp, sp, sp_mask, new_ss); 3325 3167 } 3326 #ifdef DEBUG_PCALL 3327 if (loglevel & CPU_LOG_PCALL) { 3328 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n", 3168 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 3329 3169 new_ss, new_esp); 3330 }3331 #endif3332 3170 if ((new_ss & 0xfffc) == 0) { 3333 3171 #ifdef TARGET_X86_64 … … 3436 3274 3437 3275 #ifdef VBOX 3438 e1 = e2 = 0; 3276 e1 = e2 = 0; /** @todo Why do we do this? */ 3439 3277 remR3TrapClear(env->pVM); 3440 3278 #endif … … 3567 3405 { 3568 3406 } 3407 3408 void helper_movl_drN_T0(int reg, target_ulong t0) 3409 { 3410 } 3569 3411 #else 3570 3412 target_ulong helper_read_crN(int reg) … … 3612 3454 } 3613 3455 } 3456 3457 void helper_movl_drN_T0(int reg, target_ulong t0) 3458 { 3459 int i; 3460 3461 if (reg < 4) { 3462 hw_breakpoint_remove(env, reg); 3463 env->dr[reg] = t0; 3464 hw_breakpoint_insert(env, reg); 3465 } else if (reg == 7) { 3466 for (i = 0; i < 4; i++) 3467 hw_breakpoint_remove(env, i); 3468 env->dr[7] = t0; 3469 for (i = 0; i < 4; i++) 3470 hw_breakpoint_insert(env, i); 3471 } else 3472 env->dr[reg] = t0; 3473 } 3614 3474 #endif 3615 3475 … … 3626 3486 env->cr[0] &= ~CR0_TS_MASK; 3627 3487 env->hflags &= ~HF_TS_MASK; 3628 }3629 3630 /* XXX: do more */3631 void helper_movl_drN_T0(int reg, target_ulong t0)3632 {3633 env->dr[reg] = t0;3634 3488 } 3635 3489 … … 3721 3575 break; 3722 3576 case MSR_IA32_APICBASE: 3723 # ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */3577 # ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */ 3724 3578 cpu_set_apic_base(env, val); 3725 # endif3579 # endif 3726 3580 break; 3727 3581 case MSR_EFER: … … 3739 3593 if (env->cpuid_ext3_features & CPUID_EXT3_SVM) 3740 3594 update_mask |= MSR_EFER_SVME; 3595 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) 3596 update_mask |= MSR_EFER_FFXSR; 3741 3597 cpu_load_efer(env, (env->efer & ~update_mask) | 3742 3598 (val & update_mask)); … … 3772 3628 break; 3773 3629 #endif 3630 # ifndef VBOX 3631 case MSR_MTRRphysBase(0): 3632 case MSR_MTRRphysBase(1): 3633 case MSR_MTRRphysBase(2): 3634 case MSR_MTRRphysBase(3): 3635 case MSR_MTRRphysBase(4): 3636 case MSR_MTRRphysBase(5): 3637 case MSR_MTRRphysBase(6): 3638 case MSR_MTRRphysBase(7): 3639 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val; 3640 break; 3641 case MSR_MTRRphysMask(0): 3642 case MSR_MTRRphysMask(1): 3643 case MSR_MTRRphysMask(2): 3644 case MSR_MTRRphysMask(3): 3645 case MSR_MTRRphysMask(4): 3646 case MSR_MTRRphysMask(5): 3647 case MSR_MTRRphysMask(6): 3648 case MSR_MTRRphysMask(7): 3649 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val; 3650 break; 3651 case MSR_MTRRfix64K_00000: 3652 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val; 3653 break; 3654 case MSR_MTRRfix16K_80000: 3655 case MSR_MTRRfix16K_A0000: 3656 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val; 3657 break; 3658 case MSR_MTRRfix4K_C0000: 3659 case MSR_MTRRfix4K_C8000: 3660 case MSR_MTRRfix4K_D0000: 3661 case MSR_MTRRfix4K_D8000: 3662 case MSR_MTRRfix4K_E0000: 3663 case MSR_MTRRfix4K_E8000: 3664 case MSR_MTRRfix4K_F0000: 3665 case MSR_MTRRfix4K_F8000: 3666 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val; 3667 break; 3668 case MSR_MTRRdefType: 3669 env->mtrr_deftype = val; 3670 break; 3671 # endif /* !VBOX */ 3774 3672 default: 3775 # ifndef VBOX3673 # ifndef VBOX 3776 3674 /* XXX: exception ? */ 3777 # endif3778 break; 3779 } 3780 3781 # ifdef VBOX3675 # endif 3676 break; 3677 } 3678 3679 # ifdef VBOX 3782 3680 /* call CPUM. */ 3783 3681 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0) … … 3785 3683 /** @todo be a brave man and raise a \#GP(0) here as we should... */ 3786 3684 } 3787 # endif3685 # endif 3788 3686 } 3789 3687 … … 3791 3689 { 3792 3690 uint64_t val; 3691 3793 3692 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0); 3794 3693 … … 3818 3717 val = env->vm_hsave; 3819 3718 break; 3820 # ifndef VBOX /* forward to CPUMQueryGuestMsr. */3719 # ifndef VBOX /* forward to CPUMQueryGuestMsr. */ 3821 3720 case MSR_IA32_PERF_STATUS: 3822 3721 /* tsc_increment_by_tick */ … … 3825 3724 val |= (((uint64_t)4ULL) << 40); 3826 3725 break; 3827 # endif /* !VBOX */3726 # endif /* !VBOX */ 3828 3727 #ifdef TARGET_X86_64 3829 3728 case MSR_LSTAR: … … 3855 3754 break; 3856 3755 #endif 3756 # ifndef VBOX 3757 case MSR_MTRRphysBase(0): 3758 case MSR_MTRRphysBase(1): 3759 case MSR_MTRRphysBase(2): 3760 case MSR_MTRRphysBase(3): 3761 case MSR_MTRRphysBase(4): 3762 case MSR_MTRRphysBase(5): 3763 case MSR_MTRRphysBase(6): 3764 case MSR_MTRRphysBase(7): 3765 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base; 3766 break; 3767 case MSR_MTRRphysMask(0): 3768 case MSR_MTRRphysMask(1): 3769 case MSR_MTRRphysMask(2): 3770 case MSR_MTRRphysMask(3): 3771 case MSR_MTRRphysMask(4): 3772 case MSR_MTRRphysMask(5): 3773 case MSR_MTRRphysMask(6): 3774 case MSR_MTRRphysMask(7): 3775 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask; 3776 break; 3777 case MSR_MTRRfix64K_00000: 3778 val = env->mtrr_fixed[0]; 3779 break; 3780 case MSR_MTRRfix16K_80000: 3781 case MSR_MTRRfix16K_A0000: 3782 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1]; 3783 break; 3784 case MSR_MTRRfix4K_C0000: 3785 case MSR_MTRRfix4K_C8000: 3786 case MSR_MTRRfix4K_D0000: 3787 case MSR_MTRRfix4K_D8000: 3788 case MSR_MTRRfix4K_E0000: 3789 case MSR_MTRRfix4K_E8000: 3790 case MSR_MTRRfix4K_F0000: 3791 case MSR_MTRRfix4K_F8000: 3792 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3]; 3793 break; 3794 case MSR_MTRRdefType: 3795 val = env->mtrr_deftype; 3796 break; 3797 case MSR_MTRRcap: 3798 if (env->cpuid_features & CPUID_MTRR) 3799 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED; 3800 else 3801 /* XXX: exception ? */ 3802 val = 0; 3803 break; 3804 # endif /* !VBOX */ 3857 3805 default: 3858 # ifndef VBOX3806 # ifndef VBOX 3859 3807 /* XXX: exception ? */ 3860 3808 val = 0; 3861 # else /* VBOX */3809 # else /* VBOX */ 3862 3810 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0) 3863 3811 { … … 3865 3813 val = 0; 3866 3814 } 3867 # endif /* VBOX */3815 # endif /* VBOX */ 3868 3816 break; 3869 3817 } … … 3871 3819 EDX = (uint32_t)(val >> 32); 3872 3820 3873 # ifdef VBOX_STRICT3821 # ifdef VBOX_STRICT 3874 3822 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0) 3875 3823 val = 0; 3876 3824 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX))); 3877 # endif3825 # endif 3878 3826 } 3879 3827 #endif … … 3886 3834 3887 3835 selector = selector1 & 0xffff; 3888 eflags = cc_table[CC_OP].compute_all();3836 eflags = helper_cc_compute_all(CC_OP); 3889 3837 if (load_segment(&e1, &e2, selector) != 0) 3890 3838 goto fail; … … 3928 3876 3929 3877 selector = selector1 & 0xffff; 3930 eflags = cc_table[CC_OP].compute_all();3878 eflags = helper_cc_compute_all(CC_OP); 3931 3879 if ((selector & 0xfffc) == 0) 3932 3880 goto fail; … … 3974 3922 3975 3923 selector = selector1 & 0xffff; 3976 eflags = cc_table[CC_OP].compute_all();3924 eflags = helper_cc_compute_all(CC_OP); 3977 3925 if ((selector & 0xfffc) == 0) 3978 3926 goto fail; … … 4007 3955 4008 3956 selector = selector1 & 0xffff; 4009 eflags = cc_table[CC_OP].compute_all();3957 eflags = helper_cc_compute_all(CC_OP); 4010 3958 if ((selector & 0xfffc) == 0) 4011 3959 goto fail; … … 4047 3995 } 4048 3996 4049 void fpu_raise_exception(void)3997 static void fpu_raise_exception(void) 4050 3998 { 4051 3999 if (env->cr[0] & CR0_NE_MASK) { … … 4297 4245 ret = floatx_compare(ST0, FT0, &env->fp_status); 4298 4246 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; 4299 FORCE_RET();4300 4247 } 4301 4248 … … 4306 4253 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status); 4307 4254 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1]; 4308 FORCE_RET();4309 4255 } 4310 4256 … … 4317 4263 4318 4264 ret = floatx_compare(ST0, FT0, &env->fp_status); 4319 eflags = cc_table[CC_OP].compute_all();4265 eflags = helper_cc_compute_all(CC_OP); 4320 4266 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; 4321 4267 CC_SRC = eflags; 4322 FORCE_RET();4323 4268 } 4324 4269 … … 4329 4274 4330 4275 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status); 4331 eflags = cc_table[CC_OP].compute_all();4276 eflags = helper_cc_compute_all(CC_OP); 4332 4277 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; 4333 4278 CC_SRC = eflags; 4334 FORCE_RET();4335 4279 } 4336 4280 … … 4525 4469 if (env->fpus & FPUS_SE) 4526 4470 fpu_raise_exception(); 4527 FORCE_RET();4528 4471 } 4529 4472 … … 5040 4983 nb_xmm_regs = 8; 5041 4984 addr = ptr + 0xa0; 5042 for(i = 0; i < nb_xmm_regs; i++) { 5043 stq(addr, env->xmm_regs[i].XMM_Q(0)); 5044 stq(addr + 8, env->xmm_regs[i].XMM_Q(1)); 5045 addr += 16; 4985 /* Fast FXSAVE leaves out the XMM registers */ 4986 if (!(env->efer & MSR_EFER_FFXSR) 4987 || (env->hflags & HF_CPL_MASK) 4988 || !(env->hflags & HF_LMA_MASK)) { 4989 for(i = 0; i < nb_xmm_regs; i++) { 4990 stq(addr, env->xmm_regs[i].XMM_Q(0)); 4991 stq(addr + 8, env->xmm_regs[i].XMM_Q(1)); 4992 addr += 16; 4993 } 5046 4994 } 5047 4995 } … … 5080 5028 nb_xmm_regs = 8; 5081 5029 addr = ptr + 0xa0; 5082 for(i = 0; i < nb_xmm_regs; i++) { 5030 /* Fast FXRESTORE leaves out the XMM registers */ 5031 if (!(env->efer & MSR_EFER_FFXSR) 5032 || (env->hflags & HF_CPL_MASK) 5033 || !(env->hflags & HF_LMA_MASK)) { 5034 for(i = 0; i < nb_xmm_regs; i++) { 5083 5035 #if !defined(VBOX) || __GNUC__ < 4 5084 env->xmm_regs[i].XMM_Q(0) = ldq(addr);5085 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);5036 env->xmm_regs[i].XMM_Q(0) = ldq(addr); 5037 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8); 5086 5038 #else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */ 5087 5039 # if 1 5088 env->xmm_regs[i].XMM_L(0) = ldl(addr);5089 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);5090 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);5091 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);5040 env->xmm_regs[i].XMM_L(0) = ldl(addr); 5041 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4); 5042 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8); 5043 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12); 5092 5044 # else 5093 /* this works fine on Mac OS X, gcc 4.0.1 */5094 uint64_t u64 = ldq(addr);5095 env->xmm_regs[i].XMM_Q(0);5096 u64 = ldq(addr + 4);5097 env->xmm_regs[i].XMM_Q(1) = u64;5045 /* this works fine on Mac OS X, gcc 4.0.1 */ 5046 uint64_t u64 = ldq(addr); 5047 env->xmm_regs[i].XMM_Q(0); 5048 u64 = ldq(addr + 4); 5049 env->xmm_regs[i].XMM_Q(1) = u64; 5098 5050 # endif 5099 5051 #endif 5100 addr += 16; 5052 addr += 16; 5053 } 5101 5054 } 5102 5055 } … … 5429 5382 raise_exception(EXCP05_BOUND); 5430 5383 } 5431 FORCE_RET();5432 5384 } 5433 5385 … … 5440 5392 raise_exception(EXCP05_BOUND); 5441 5393 } 5442 FORCE_RET();5443 5394 } 5444 5395 … … 5520 5471 #endif /* VBOX */ 5521 5472 5473 #if !defined(CONFIG_USER_ONLY) 5522 5474 /* try to fill the TLB and return an exception if error. If retaddr is 5523 5475 NULL, it means that the function was called in C code (i.e. not … … 5552 5504 env = saved_env; 5553 5505 } 5506 #endif 5554 5507 5555 5508 #ifdef VBOX … … 6148 6101 addr = (uint32_t)EAX; 6149 6102 6150 if (loglevel & CPU_LOG_TB_IN_ASM) 6151 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr); 6103 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr); 6152 6104 6153 6105 env->vm_vmcb = addr; … … 6269 6221 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID); 6270 6222 6271 if (loglevel & CPU_LOG_TB_IN_ASM) 6272 fprintf(logfile, "Injecting(%#hx): ", valid_err); 6223 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); 6273 6224 /* FIXME: need to implement valid_err */ 6274 6225 switch (event_inj & SVM_EVTINJ_TYPE_MASK) { … … 6278 6229 env->exception_is_int = 0; 6279 6230 env->exception_next_eip = -1; 6280 if (loglevel & CPU_LOG_TB_IN_ASM) 6281 fprintf(logfile, "INTR"); 6231 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR"); 6282 6232 /* XXX: is it always correct ? */ 6283 6233 do_interrupt(vector, 0, 0, 0, 1); … … 6288 6238 env->exception_is_int = 0; 6289 6239 env->exception_next_eip = EIP; 6290 if (loglevel & CPU_LOG_TB_IN_ASM) 6291 fprintf(logfile, "NMI"); 6240 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); 6292 6241 cpu_loop_exit(); 6293 6242 break; … … 6297 6246 env->exception_is_int = 0; 6298 6247 env->exception_next_eip = -1; 6299 if (loglevel & CPU_LOG_TB_IN_ASM) 6300 fprintf(logfile, "EXEPT"); 6248 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); 6301 6249 cpu_loop_exit(); 6302 6250 break; … … 6306 6254 env->exception_is_int = 1; 6307 6255 env->exception_next_eip = EIP; 6308 if (loglevel & CPU_LOG_TB_IN_ASM) 6309 fprintf(logfile, "SOFT"); 6256 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); 6310 6257 cpu_loop_exit(); 6311 6258 break; 6312 6259 } 6313 if (loglevel & CPU_LOG_TB_IN_ASM) 6314 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code); 6260 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code); 6315 6261 } 6316 6262 } … … 6332 6278 addr = (uint32_t)EAX; 6333 6279 6334 if (loglevel & CPU_LOG_TB_IN_ASM) 6335 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", 6280 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", 6336 6281 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)), 6337 6282 env->segs[R_FS].base); … … 6368 6313 addr = (uint32_t)EAX; 6369 6314 6370 if (loglevel & CPU_LOG_TB_IN_ASM) 6371 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", 6315 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", 6372 6316 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)), 6373 6317 env->segs[R_FS].base); … … 6521 6465 uint32_t int_ctl; 6522 6466 6523 if (loglevel & CPU_LOG_TB_IN_ASM) 6524 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n", 6467 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n", 6525 6468 exit_code, exit_info_1, 6526 6469 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)), … … 6668 6611 6669 6612 /* XXX: suppress */ 6670 void helper_movq( uint64_t *d, uint64_t*s)6671 { 6672 * d = *s;6613 void helper_movq(void *d, void *s) 6614 { 6615 *(uint64_t *)d = *(uint64_t *)s; 6673 6616 } 6674 6617 … … 6740 6683 } 6741 6684 6742 CCTable cc_table[CC_OP_NB] = { 6743 [CC_OP_DYNAMIC] = { /* should never happen */ }, 6744 6745 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags }, 6746 6747 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull }, 6748 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull }, 6749 [CC_OP_MULL] = { compute_all_mull, compute_c_mull }, 6750 6751 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb }, 6752 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw }, 6753 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl }, 6754 6755 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb }, 6756 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw }, 6757 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl }, 6758 6759 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb }, 6760 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw }, 6761 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl }, 6762 6763 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb }, 6764 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw }, 6765 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl }, 6766 6767 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb }, 6768 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw }, 6769 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl }, 6770 6771 [CC_OP_INCB] = { compute_all_incb, compute_c_incl }, 6772 [CC_OP_INCW] = { compute_all_incw, compute_c_incl }, 6773 [CC_OP_INCL] = { compute_all_incl, compute_c_incl }, 6774 6775 [CC_OP_DECB] = { compute_all_decb, compute_c_incl }, 6776 [CC_OP_DECW] = { compute_all_decw, compute_c_incl }, 6777 [CC_OP_DECL] = { compute_all_decl, compute_c_incl }, 6778 6779 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb }, 6780 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw }, 6781 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll }, 6782 6783 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl }, 6784 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl }, 6785 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl }, 6685 uint32_t helper_cc_compute_all(int op) 6686 { 6687 switch (op) { 6688 default: /* should never happen */ return 0; 6689 6690 case CC_OP_EFLAGS: return compute_all_eflags(); 6691 6692 case CC_OP_MULB: return compute_all_mulb(); 6693 case CC_OP_MULW: return compute_all_mulw(); 6694 case CC_OP_MULL: return compute_all_mull(); 6695 6696 case CC_OP_ADDB: return compute_all_addb(); 6697 case CC_OP_ADDW: return compute_all_addw(); 6698 case CC_OP_ADDL: return compute_all_addl(); 6699 6700 case CC_OP_ADCB: return compute_all_adcb(); 6701 case CC_OP_ADCW: return compute_all_adcw(); 6702 case CC_OP_ADCL: return compute_all_adcl(); 6703 6704 case CC_OP_SUBB: return compute_all_subb(); 6705 case CC_OP_SUBW: return compute_all_subw(); 6706 case CC_OP_SUBL: return compute_all_subl(); 6707 6708 case CC_OP_SBBB: return compute_all_sbbb(); 6709 case CC_OP_SBBW: return compute_all_sbbw(); 6710 case CC_OP_SBBL: return compute_all_sbbl(); 6711 6712 case CC_OP_LOGICB: return compute_all_logicb(); 6713 case CC_OP_LOGICW: return compute_all_logicw(); 6714 case CC_OP_LOGICL: return compute_all_logicl(); 6715 6716 case CC_OP_INCB: return compute_all_incb(); 6717 case CC_OP_INCW: return compute_all_incw(); 6718 case CC_OP_INCL: return compute_all_incl(); 6719 6720 case CC_OP_DECB: return compute_all_decb(); 6721 case CC_OP_DECW: return compute_all_decw(); 6722 case CC_OP_DECL: return compute_all_decl(); 6723 6724 case CC_OP_SHLB: return compute_all_shlb(); 6725 case CC_OP_SHLW: return compute_all_shlw(); 6726 case CC_OP_SHLL: return compute_all_shll(); 6727 6728 case CC_OP_SARB: return compute_all_sarb(); 6729 case CC_OP_SARW: return compute_all_sarw(); 6730 case CC_OP_SARL: return compute_all_sarl(); 6786 6731 6787 6732 #ifdef TARGET_X86_64 6788 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull }, 6789 6790 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq }, 6791 6792 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq }, 6793 6794 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq }, 6795 6796 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq }, 6797 6798 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq }, 6799 6800 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl }, 6801 6802 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl }, 6803 6804 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq }, 6805 6806 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl }, 6807 #endif 6808 }; 6809 6733 case CC_OP_MULQ: return compute_all_mulq(); 6734 6735 case CC_OP_ADDQ: return compute_all_addq(); 6736 6737 case CC_OP_ADCQ: return compute_all_adcq(); 6738 6739 case CC_OP_SUBQ: return compute_all_subq(); 6740 6741 case CC_OP_SBBQ: return compute_all_sbbq(); 6742 6743 case CC_OP_LOGICQ: return compute_all_logicq(); 6744 6745 case CC_OP_INCQ: return compute_all_incq(); 6746 6747 case CC_OP_DECQ: return compute_all_decq(); 6748 6749 case CC_OP_SHLQ: return compute_all_shlq(); 6750 6751 case CC_OP_SARQ: return compute_all_sarq(); 6752 #endif 6753 } 6754 } 6755 6756 uint32_t helper_cc_compute_c(int op) 6757 { 6758 switch (op) { 6759 default: /* should never happen */ return 0; 6760 6761 case CC_OP_EFLAGS: return compute_c_eflags(); 6762 6763 case CC_OP_MULB: return compute_c_mull(); 6764 case CC_OP_MULW: return compute_c_mull(); 6765 case CC_OP_MULL: return compute_c_mull(); 6766 6767 case CC_OP_ADDB: return compute_c_addb(); 6768 case CC_OP_ADDW: return compute_c_addw(); 6769 case CC_OP_ADDL: return compute_c_addl(); 6770 6771 case CC_OP_ADCB: return compute_c_adcb(); 6772 case CC_OP_ADCW: return compute_c_adcw(); 6773 case CC_OP_ADCL: return compute_c_adcl(); 6774 6775 case CC_OP_SUBB: return compute_c_subb(); 6776 case CC_OP_SUBW: return compute_c_subw(); 6777 case CC_OP_SUBL: return compute_c_subl(); 6778 6779 case CC_OP_SBBB: return compute_c_sbbb(); 6780 case CC_OP_SBBW: return compute_c_sbbw(); 6781 case CC_OP_SBBL: return compute_c_sbbl(); 6782 6783 case CC_OP_LOGICB: return compute_c_logicb(); 6784 case CC_OP_LOGICW: return compute_c_logicw(); 6785 case CC_OP_LOGICL: return compute_c_logicl(); 6786 6787 case CC_OP_INCB: return compute_c_incl(); 6788 case CC_OP_INCW: return compute_c_incl(); 6789 case CC_OP_INCL: return compute_c_incl(); 6790 6791 case CC_OP_DECB: return compute_c_incl(); 6792 case CC_OP_DECW: return compute_c_incl(); 6793 case CC_OP_DECL: return compute_c_incl(); 6794 6795 case CC_OP_SHLB: return compute_c_shlb(); 6796 case CC_OP_SHLW: return compute_c_shlw(); 6797 case CC_OP_SHLL: return compute_c_shll(); 6798 6799 case CC_OP_SARB: return compute_c_sarl(); 6800 case CC_OP_SARW: return compute_c_sarl(); 6801 case CC_OP_SARL: return compute_c_sarl(); 6802 6803 #ifdef TARGET_X86_64 6804 case CC_OP_MULQ: return compute_c_mull(); 6805 6806 case CC_OP_ADDQ: return compute_c_addq(); 6807 6808 case CC_OP_ADCQ: return compute_c_adcq(); 6809 6810 case CC_OP_SUBQ: return compute_c_subq(); 6811 6812 case CC_OP_SBBQ: return compute_c_sbbq(); 6813 6814 case CC_OP_LOGICQ: return compute_c_logicq(); 6815 6816 case CC_OP_INCQ: return compute_c_incl(); 6817 6818 case CC_OP_DECQ: return compute_c_incl(); 6819 6820 case CC_OP_SHLQ: return compute_c_shlq(); 6821 6822 case CC_OP_SARQ: return compute_c_sarl(); 6823 #endif 6824 } 6825 } -
trunk/src/recompiler/target-i386/ops_sse.h
r36140 r36170 17 17 * You should have received a copy of the GNU Lesser General Public 18 18 * License along with this library; if not, write to the Free Software 19 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 20 20 */ 21 21 … … 69 69 #endif 70 70 } 71 FORCE_RET();72 71 } 73 72 … … 115 114 #endif 116 115 } 117 FORCE_RET();118 116 } 119 117 … … 136 134 #endif 137 135 } 138 FORCE_RET();139 136 } 140 137 … … 174 171 #endif 175 172 } 176 FORCE_RET();177 173 } 178 174 … … 193 189 #endif 194 190 } 195 FORCE_RET();196 191 } 197 192 … … 212 207 #endif 213 208 } 214 FORCE_RET();215 209 } 216 210 … … 227 221 for(i = 16 - shift; i < 16; i++) 228 222 d->B(i) = 0; 229 FORCE_RET();230 223 } 231 224 … … 241 234 for(i = 0; i < shift; i++) 242 235 d->B(i) = 0; 243 FORCE_RET();244 236 } 245 237 #endif … … 443 435 (int16_t)s->W(2*i+1) * (int16_t)d->W(2*i+1); 444 436 } 445 FORCE_RET();446 437 } 447 438 … … 490 481 stb(a0 + i, d->B(i)); 491 482 } 492 FORCE_RET();493 483 } 494 484 … … 928 918 ret = float32_compare_quiet(s0, s1, &env->sse_status); 929 919 CC_SRC = comis_eflags[ret + 1]; 930 FORCE_RET();931 920 } 932 921 … … 940 929 ret = float32_compare(s0, s1, &env->sse_status); 941 930 CC_SRC = comis_eflags[ret + 1]; 942 FORCE_RET();943 931 } 944 932 … … 952 940 ret = float64_compare_quiet(d0, d1, &env->sse_status); 953 941 CC_SRC = comis_eflags[ret + 1]; 954 FORCE_RET();955 942 } 956 943 … … 964 951 ret = float64_compare(d0, d1, &env->sse_status); 965 952 CC_SRC = comis_eflags[ret + 1]; 966 FORCE_RET();967 953 } 968 954 … … 991 977 uint32_t val; 992 978 val = 0; 993 val |= (s-> XMM_B(0) >> 7);994 val |= (s-> XMM_B(1) >> 6) & 0x02;995 val |= (s-> XMM_B(2) >> 5) & 0x04;996 val |= (s-> XMM_B(3) >> 4) & 0x08;997 val |= (s-> XMM_B(4) >> 3) & 0x10;998 val |= (s-> XMM_B(5) >> 2) & 0x20;999 val |= (s-> XMM_B(6) >> 1) & 0x40;1000 val |= (s-> XMM_B(7)) & 0x80;1001 #if SHIFT == 1 1002 val |= (s-> XMM_B(8) << 1) & 0x0100;1003 val |= (s-> XMM_B(9) << 2) & 0x0200;1004 val |= (s-> XMM_B(10) << 3) & 0x0400;1005 val |= (s-> XMM_B(11) << 4) & 0x0800;1006 val |= (s-> XMM_B(12) << 5) & 0x1000;1007 val |= (s-> XMM_B(13) << 6) & 0x2000;1008 val |= (s-> XMM_B(14) << 7) & 0x4000;1009 val |= (s-> XMM_B(15) << 8) & 0x8000;979 val |= (s->B(0) >> 7); 980 val |= (s->B(1) >> 6) & 0x02; 981 val |= (s->B(2) >> 5) & 0x04; 982 val |= (s->B(3) >> 4) & 0x08; 983 val |= (s->B(4) >> 3) & 0x10; 984 val |= (s->B(5) >> 2) & 0x20; 985 val |= (s->B(6) >> 1) & 0x40; 986 val |= (s->B(7)) & 0x80; 987 #if SHIFT == 1 988 val |= (s->B(8) << 1) & 0x0100; 989 val |= (s->B(9) << 2) & 0x0200; 990 val |= (s->B(10) << 3) & 0x0400; 991 val |= (s->B(11) << 4) & 0x0800; 992 val |= (s->B(12) << 5) & 0x1000; 993 val |= (s->B(13) << 6) & 0x2000; 994 val |= (s->B(14) << 7) & 0x4000; 995 val |= (s->B(15) << 8) & 0x8000; 1010 996 #endif 1011 997 return val; … … 1510 1496 d->elem(0) = F(0);\ 1511 1497 d->elem(1) = F(1);\ 1512 d->elem(2) = F(2);\1513 d->elem(3) = F(3);\1514 if (num > 3) {\1515 d->elem(4) = F(4);\1516 d->elem(5) = F(5);\1517 if (num > 5) {\1498 if (num > 2) {\ 1499 d->elem(2) = F(2);\ 1500 d->elem(3) = F(3);\ 1501 if (num > 4) {\ 1502 d->elem(4) = F(4);\ 1503 d->elem(5) = F(5);\ 1518 1504 d->elem(6) = F(6);\ 1519 1505 d->elem(7) = F(7);\ -
trunk/src/recompiler/target-i386/ops_sse_header.h
r33656 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 36 36 #endif 37 37 38 DEF_HELPER(void, glue(helper_psrlw, SUFFIX), (Reg *d, Reg *s)) 39 DEF_HELPER(void, glue(helper_psraw, SUFFIX), (Reg *d, Reg *s)) 40 DEF_HELPER(void, glue(helper_psllw, SUFFIX), (Reg *d, Reg *s)) 41 DEF_HELPER(void, glue(helper_psrld, SUFFIX), (Reg *d, Reg *s)) 42 DEF_HELPER(void, glue(helper_psrad, SUFFIX), (Reg *d, Reg *s)) 43 DEF_HELPER(void, glue(helper_pslld, SUFFIX), (Reg *d, Reg *s)) 44 DEF_HELPER(void, glue(helper_psrlq, SUFFIX), (Reg *d, Reg *s)) 45 DEF_HELPER(void, glue(helper_psllq, SUFFIX), (Reg *d, Reg *s)) 46 47 #if SHIFT == 1 48 DEF_HELPER(void, glue(helper_psrldq, SUFFIX), (Reg *d, Reg *s)) 49 DEF_HELPER(void, glue(helper_pslldq, SUFFIX), (Reg *d, Reg *s)) 38 #define dh_alias_Reg ptr 39 #define dh_alias_XMMReg ptr 40 #define dh_alias_MMXReg ptr 41 #define dh_ctype_Reg Reg * 42 #define dh_ctype_XMMReg XMMReg * 43 #define dh_ctype_MMXReg MMXReg * 44 45 DEF_HELPER_2(glue(psrlw, SUFFIX), void, Reg, Reg) 46 DEF_HELPER_2(glue(psraw, SUFFIX), void, Reg, Reg) 47 DEF_HELPER_2(glue(psllw, SUFFIX), void, Reg, Reg) 48 DEF_HELPER_2(glue(psrld, SUFFIX), void, Reg, Reg) 49 DEF_HELPER_2(glue(psrad, SUFFIX), void, Reg, Reg) 50 DEF_HELPER_2(glue(pslld, SUFFIX), void, Reg, Reg) 51 DEF_HELPER_2(glue(psrlq, SUFFIX), void, Reg, Reg) 52 DEF_HELPER_2(glue(psllq, SUFFIX), void, Reg, Reg) 53 54 #if SHIFT == 1 55 DEF_HELPER_2(glue(psrldq, SUFFIX), void, Reg, Reg) 56 DEF_HELPER_2(glue(pslldq, SUFFIX), void, Reg, Reg) 50 57 #endif 51 58 52 59 #define SSE_HELPER_B(name, F)\ 53 DEF_HELPER (void, glue(name, SUFFIX), (Reg *d, Reg *s))60 DEF_HELPER_2(glue(name, SUFFIX), void, Reg, Reg) 54 61 55 62 #define SSE_HELPER_W(name, F)\ 56 DEF_HELPER (void, glue(name, SUFFIX), (Reg *d, Reg *s))63 DEF_HELPER_2(glue(name, SUFFIX), void, Reg, Reg) 57 64 58 65 #define SSE_HELPER_L(name, F)\ 59 DEF_HELPER (void, glue(name, SUFFIX), (Reg *d, Reg *s))66 DEF_HELPER_2(glue(name, SUFFIX), void, Reg, Reg) 60 67 61 68 #define SSE_HELPER_Q(name, F)\ 62 DEF_HELPER (void, glue(name, SUFFIX), (Reg *d, Reg *s))63 64 SSE_HELPER_B( helper_paddb, FADD)65 SSE_HELPER_W( helper_paddw, FADD)66 SSE_HELPER_L( helper_paddl, FADD)67 SSE_HELPER_Q( helper_paddq, FADD)68 69 SSE_HELPER_B( helper_psubb, FSUB)70 SSE_HELPER_W( helper_psubw, FSUB)71 SSE_HELPER_L( helper_psubl, FSUB)72 SSE_HELPER_Q( helper_psubq, FSUB)73 74 SSE_HELPER_B( helper_paddusb, FADDUB)75 SSE_HELPER_B( helper_paddsb, FADDSB)76 SSE_HELPER_B( helper_psubusb, FSUBUB)77 SSE_HELPER_B( helper_psubsb, FSUBSB)78 79 SSE_HELPER_W( helper_paddusw, FADDUW)80 SSE_HELPER_W( helper_paddsw, FADDSW)81 SSE_HELPER_W( helper_psubusw, FSUBUW)82 SSE_HELPER_W( helper_psubsw, FSUBSW)83 84 SSE_HELPER_B( helper_pminub, FMINUB)85 SSE_HELPER_B( helper_pmaxub, FMAXUB)86 87 SSE_HELPER_W( helper_pminsw, FMINSW)88 SSE_HELPER_W( helper_pmaxsw, FMAXSW)89 90 SSE_HELPER_Q( helper_pand, FAND)91 SSE_HELPER_Q( helper_pandn, FANDN)92 SSE_HELPER_Q( helper_por, FOR)93 SSE_HELPER_Q( helper_pxor, FXOR)94 95 SSE_HELPER_B( helper_pcmpgtb, FCMPGTB)96 SSE_HELPER_W( helper_pcmpgtw, FCMPGTW)97 SSE_HELPER_L( helper_pcmpgtl, FCMPGTL)98 99 SSE_HELPER_B( helper_pcmpeqb, FCMPEQ)100 SSE_HELPER_W( helper_pcmpeqw, FCMPEQ)101 SSE_HELPER_L( helper_pcmpeql, FCMPEQ)102 103 SSE_HELPER_W( helper_pmullw, FMULLW)69 DEF_HELPER_2(glue(name, SUFFIX), void, Reg, Reg) 70 71 SSE_HELPER_B(paddb, FADD) 72 SSE_HELPER_W(paddw, FADD) 73 SSE_HELPER_L(paddl, FADD) 74 SSE_HELPER_Q(paddq, FADD) 75 76 SSE_HELPER_B(psubb, FSUB) 77 SSE_HELPER_W(psubw, FSUB) 78 SSE_HELPER_L(psubl, FSUB) 79 SSE_HELPER_Q(psubq, FSUB) 80 81 SSE_HELPER_B(paddusb, FADDUB) 82 SSE_HELPER_B(paddsb, FADDSB) 83 SSE_HELPER_B(psubusb, FSUBUB) 84 SSE_HELPER_B(psubsb, FSUBSB) 85 86 SSE_HELPER_W(paddusw, FADDUW) 87 SSE_HELPER_W(paddsw, FADDSW) 88 SSE_HELPER_W(psubusw, FSUBUW) 89 SSE_HELPER_W(psubsw, FSUBSW) 90 91 SSE_HELPER_B(pminub, FMINUB) 92 SSE_HELPER_B(pmaxub, FMAXUB) 93 94 SSE_HELPER_W(pminsw, FMINSW) 95 SSE_HELPER_W(pmaxsw, FMAXSW) 96 97 SSE_HELPER_Q(pand, FAND) 98 SSE_HELPER_Q(pandn, FANDN) 99 SSE_HELPER_Q(por, FOR) 100 SSE_HELPER_Q(pxor, FXOR) 101 102 SSE_HELPER_B(pcmpgtb, FCMPGTB) 103 SSE_HELPER_W(pcmpgtw, FCMPGTW) 104 SSE_HELPER_L(pcmpgtl, FCMPGTL) 105 106 SSE_HELPER_B(pcmpeqb, FCMPEQ) 107 SSE_HELPER_W(pcmpeqw, FCMPEQ) 108 SSE_HELPER_L(pcmpeql, FCMPEQ) 109 110 SSE_HELPER_W(pmullw, FMULLW) 104 111 #if SHIFT == 0 105 SSE_HELPER_W( helper_pmulhrw, FMULHRW)106 #endif 107 SSE_HELPER_W( helper_pmulhuw, FMULHUW)108 SSE_HELPER_W( helper_pmulhw, FMULHW)109 110 SSE_HELPER_B( helper_pavgb, FAVG)111 SSE_HELPER_W( helper_pavgw, FAVG)112 113 DEF_HELPER (void, glue(helper_pmuludq, SUFFIX) , (Reg *d, Reg *s))114 DEF_HELPER (void, glue(helper_pmaddwd, SUFFIX) , (Reg *d, Reg *s))115 116 DEF_HELPER (void, glue(helper_psadbw, SUFFIX) , (Reg *d, Reg *s))117 DEF_HELPER (void, glue(helper_maskmov, SUFFIX) , (Reg *d, Reg *s, target_ulong a0))118 DEF_HELPER (void, glue(helper_movl_mm_T0, SUFFIX) , (Reg *d, uint32_t val))112 SSE_HELPER_W(pmulhrw, FMULHRW) 113 #endif 114 SSE_HELPER_W(pmulhuw, FMULHUW) 115 SSE_HELPER_W(pmulhw, FMULHW) 116 117 SSE_HELPER_B(pavgb, FAVG) 118 SSE_HELPER_W(pavgw, FAVG) 119 120 DEF_HELPER_2(glue(pmuludq, SUFFIX), void, Reg, Reg) 121 DEF_HELPER_2(glue(pmaddwd, SUFFIX), void, Reg, Reg) 122 123 DEF_HELPER_2(glue(psadbw, SUFFIX), void, Reg, Reg) 124 DEF_HELPER_3(glue(maskmov, SUFFIX), void, Reg, Reg, tl) 125 DEF_HELPER_2(glue(movl_mm_T0, SUFFIX), void, Reg, i32) 119 126 #ifdef TARGET_X86_64 120 DEF_HELPER (void, glue(helper_movq_mm_T0, SUFFIX) , (Reg *d, uint64_t val))127 DEF_HELPER_2(glue(movq_mm_T0, SUFFIX), void, Reg, i64) 121 128 #endif 122 129 123 130 #if SHIFT == 0 124 DEF_HELPER (void, glue(helper_pshufw, SUFFIX) , (Reg *d, Reg *s, int order))131 DEF_HELPER_3(glue(pshufw, SUFFIX), void, Reg, Reg, int) 125 132 #else 126 DEF_HELPER (void, helper_shufps, (Reg *d, Reg *s, int order))127 DEF_HELPER (void, helper_shufpd, (Reg *d, Reg *s, int order))128 DEF_HELPER (void, glue(helper_pshufd, SUFFIX) , (Reg *d, Reg *s, int order))129 DEF_HELPER (void, glue(helper_pshuflw, SUFFIX) , (Reg *d, Reg *s, int order))130 DEF_HELPER (void, glue(helper_pshufhw, SUFFIX) , (Reg *d, Reg *s, int order))133 DEF_HELPER_3(shufps, void, Reg, Reg, int) 134 DEF_HELPER_3(shufpd, void, Reg, Reg, int) 135 DEF_HELPER_3(glue(pshufd, SUFFIX), void, Reg, Reg, int) 136 DEF_HELPER_3(glue(pshuflw, SUFFIX), void, Reg, Reg, int) 137 DEF_HELPER_3(glue(pshufhw, SUFFIX), void, Reg, Reg, int) 131 138 #endif 132 139 … … 136 143 137 144 #define SSE_HELPER_S(name, F)\ 138 DEF_HELPER (void, helper_ ## name ## ps , (Reg *d, Reg *s)) \139 DEF_HELPER (void, helper_ ## name ## ss , (Reg *d, Reg *s)) \140 DEF_HELPER (void, helper_ ## name ## pd , (Reg *d, Reg *s)) \141 DEF_HELPER (void, helper_ ## name ## sd , (Reg *d, Reg *s))145 DEF_HELPER_2(name ## ps , void, Reg, Reg) \ 146 DEF_HELPER_2(name ## ss , void, Reg, Reg) \ 147 DEF_HELPER_2(name ## pd , void, Reg, Reg) \ 148 DEF_HELPER_2(name ## sd , void, Reg, Reg) 142 149 143 150 SSE_HELPER_S(add, FPU_ADD) … … 150 157 151 158 152 DEF_HELPER (void, helper_cvtps2pd, (Reg *d, Reg *s))153 DEF_HELPER (void, helper_cvtpd2ps, (Reg *d, Reg *s))154 DEF_HELPER (void, helper_cvtss2sd, (Reg *d, Reg *s))155 DEF_HELPER (void, helper_cvtsd2ss, (Reg *d, Reg *s))156 DEF_HELPER (void, helper_cvtdq2ps, (Reg *d, Reg *s))157 DEF_HELPER (void, helper_cvtdq2pd, (Reg *d, Reg *s))158 DEF_HELPER (void, helper_cvtpi2ps, (XMMReg *d, MMXReg *s))159 DEF_HELPER (void, helper_cvtpi2pd, (XMMReg *d, MMXReg *s))160 DEF_HELPER (void, helper_cvtsi2ss, (XMMReg *d, uint32_t val))161 DEF_HELPER (void, helper_cvtsi2sd, (XMMReg *d, uint32_t val))159 DEF_HELPER_2(cvtps2pd, void, Reg, Reg) 160 DEF_HELPER_2(cvtpd2ps, void, Reg, Reg) 161 DEF_HELPER_2(cvtss2sd, void, Reg, Reg) 162 DEF_HELPER_2(cvtsd2ss, void, Reg, Reg) 163 DEF_HELPER_2(cvtdq2ps, void, Reg, Reg) 164 DEF_HELPER_2(cvtdq2pd, void, Reg, Reg) 165 DEF_HELPER_2(cvtpi2ps, void, XMMReg, MMXReg) 166 DEF_HELPER_2(cvtpi2pd, void, XMMReg, MMXReg) 167 DEF_HELPER_2(cvtsi2ss, void, XMMReg, i32) 168 DEF_HELPER_2(cvtsi2sd, void, XMMReg, i32) 162 169 163 170 #ifdef TARGET_X86_64 164 DEF_HELPER (void, helper_cvtsq2ss, (XMMReg *d, uint64_t val))165 DEF_HELPER (void, helper_cvtsq2sd, (XMMReg *d, uint64_t val))166 #endif 167 168 DEF_HELPER (void, helper_cvtps2dq, (XMMReg *d, XMMReg *s))169 DEF_HELPER (void, helper_cvtpd2dq, (XMMReg *d, XMMReg *s))170 DEF_HELPER (void, helper_cvtps2pi, (MMXReg *d, XMMReg *s))171 DEF_HELPER (void, helper_cvtpd2pi, (MMXReg *d, XMMReg *s))172 DEF_HELPER (int32_t, helper_cvtss2si, (XMMReg *s))173 DEF_HELPER (int32_t, helper_cvtsd2si, (XMMReg *s))171 DEF_HELPER_2(cvtsq2ss, void, XMMReg, i64) 172 DEF_HELPER_2(cvtsq2sd, void, XMMReg, i64) 173 #endif 174 175 DEF_HELPER_2(cvtps2dq, void, XMMReg, XMMReg) 176 DEF_HELPER_2(cvtpd2dq, void, XMMReg, XMMReg) 177 DEF_HELPER_2(cvtps2pi, void, MMXReg, XMMReg) 178 DEF_HELPER_2(cvtpd2pi, void, MMXReg, XMMReg) 179 DEF_HELPER_1(cvtss2si, s32, XMMReg) 180 DEF_HELPER_1(cvtsd2si, s32, XMMReg) 174 181 #ifdef TARGET_X86_64 175 DEF_HELPER (int64_t, helper_cvtss2sq, (XMMReg *s))176 DEF_HELPER (int64_t, helper_cvtsd2sq, (XMMReg *s))177 #endif 178 179 DEF_HELPER (void, helper_cvttps2dq, (XMMReg *d, XMMReg *s))180 DEF_HELPER (void, helper_cvttpd2dq, (XMMReg *d, XMMReg *s))181 DEF_HELPER (void, helper_cvttps2pi, (MMXReg *d, XMMReg *s))182 DEF_HELPER (void, helper_cvttpd2pi, (MMXReg *d, XMMReg *s))183 DEF_HELPER (int32_t, helper_cvttss2si, (XMMReg *s))184 DEF_HELPER (int32_t, helper_cvttsd2si, (XMMReg *s))182 DEF_HELPER_1(cvtss2sq, s64, XMMReg) 183 DEF_HELPER_1(cvtsd2sq, s64, XMMReg) 184 #endif 185 186 DEF_HELPER_2(cvttps2dq, void, XMMReg, XMMReg) 187 DEF_HELPER_2(cvttpd2dq, void, XMMReg, XMMReg) 188 DEF_HELPER_2(cvttps2pi, void, MMXReg, XMMReg) 189 DEF_HELPER_2(cvttpd2pi, void, MMXReg, XMMReg) 190 DEF_HELPER_1(cvttss2si, s32, XMMReg) 191 DEF_HELPER_1(cvttsd2si, s32, XMMReg) 185 192 #ifdef TARGET_X86_64 186 DEF_HELPER (int64_t, helper_cvttss2sq, (XMMReg *s))187 DEF_HELPER (int64_t, helper_cvttsd2sq, (XMMReg *s))188 #endif 189 190 DEF_HELPER (void, helper_rsqrtps, (XMMReg *d, XMMReg *s))191 DEF_HELPER (void, helper_rsqrtss, (XMMReg *d, XMMReg *s))192 DEF_HELPER (void, helper_rcpps, (XMMReg *d, XMMReg *s))193 DEF_HELPER (void, helper_rcpss, (XMMReg *d, XMMReg *s))194 DEF_HELPER (void, helper_haddps, (XMMReg *d, XMMReg *s))195 DEF_HELPER (void, helper_haddpd, (XMMReg *d, XMMReg *s))196 DEF_HELPER (void, helper_hsubps, (XMMReg *d, XMMReg *s))197 DEF_HELPER (void, helper_hsubpd, (XMMReg *d, XMMReg *s))198 DEF_HELPER (void, helper_addsubps, (XMMReg *d, XMMReg *s))199 DEF_HELPER (void, helper_addsubpd, (XMMReg *d, XMMReg *s))193 DEF_HELPER_1(cvttss2sq, s64, XMMReg) 194 DEF_HELPER_1(cvttsd2sq, s64, XMMReg) 195 #endif 196 197 DEF_HELPER_2(rsqrtps, void, XMMReg, XMMReg) 198 DEF_HELPER_2(rsqrtss, void, XMMReg, XMMReg) 199 DEF_HELPER_2(rcpps, void, XMMReg, XMMReg) 200 DEF_HELPER_2(rcpss, void, XMMReg, XMMReg) 201 DEF_HELPER_2(haddps, void, XMMReg, XMMReg) 202 DEF_HELPER_2(haddpd, void, XMMReg, XMMReg) 203 DEF_HELPER_2(hsubps, void, XMMReg, XMMReg) 204 DEF_HELPER_2(hsubpd, void, XMMReg, XMMReg) 205 DEF_HELPER_2(addsubps, void, XMMReg, XMMReg) 206 DEF_HELPER_2(addsubpd, void, XMMReg, XMMReg) 200 207 201 208 #define SSE_HELPER_CMP(name, F)\ 202 DEF_HELPER (void, helper_ ## name ## ps , (Reg *d, Reg *s)) \203 DEF_HELPER (void, helper_ ## name ## ss , (Reg *d, Reg *s)) \204 DEF_HELPER (void, helper_ ## name ## pd , (Reg *d, Reg *s)) \205 DEF_HELPER (void, helper_ ## name ## sd , (Reg *d, Reg *s))209 DEF_HELPER_2( name ## ps , void, Reg, Reg) \ 210 DEF_HELPER_2( name ## ss , void, Reg, Reg) \ 211 DEF_HELPER_2( name ## pd , void, Reg, Reg) \ 212 DEF_HELPER_2( name ## sd , void, Reg, Reg) 206 213 207 214 SSE_HELPER_CMP(cmpeq, FPU_CMPEQ) … … 214 221 SSE_HELPER_CMP(cmpord, FPU_CMPORD) 215 222 216 DEF_HELPER (void, helper_ucomiss, (Reg *d, Reg *s))217 DEF_HELPER (void, helper_comiss, (Reg *d, Reg *s))218 DEF_HELPER (void, helper_ucomisd, (Reg *d, Reg *s))219 DEF_HELPER (void, helper_comisd, (Reg *d, Reg *s))220 DEF_HELPER (uint32_t, helper_movmskps, (Reg *s))221 DEF_HELPER (uint32_t, helper_movmskpd, (Reg *s))222 #endif 223 224 DEF_HELPER (uint32_t, glue(helper_pmovmskb, SUFFIX), (Reg *s))225 DEF_HELPER (void, glue(helper_packsswb, SUFFIX) , (Reg *d, Reg *s))226 DEF_HELPER (void, glue(helper_packuswb, SUFFIX) , (Reg *d, Reg *s))227 DEF_HELPER (void, glue(helper_packssdw, SUFFIX) , (Reg *d, Reg *s))223 DEF_HELPER_2(ucomiss, void, Reg, Reg) 224 DEF_HELPER_2(comiss, void, Reg, Reg) 225 DEF_HELPER_2(ucomisd, void, Reg, Reg) 226 DEF_HELPER_2(comisd, void, Reg, Reg) 227 DEF_HELPER_1(movmskps, i32, Reg) 228 DEF_HELPER_1(movmskpd, i32, Reg) 229 #endif 230 231 DEF_HELPER_1(glue(pmovmskb, SUFFIX), i32, Reg) 232 DEF_HELPER_2(glue(packsswb, SUFFIX), void, Reg, Reg) 233 DEF_HELPER_2(glue(packuswb, SUFFIX), void, Reg, Reg) 234 DEF_HELPER_2(glue(packssdw, SUFFIX), void, Reg, Reg) 228 235 #define UNPCK_OP(base_name, base) \ 229 DEF_HELPER (void, glue(helper_punpck ## base_name ## bw, SUFFIX) , (Reg *d, Reg *s)) \230 DEF_HELPER (void, glue(helper_punpck ## base_name ## wd, SUFFIX) , (Reg *d, Reg *s)) \231 DEF_HELPER (void, glue(helper_punpck ## base_name ## dq, SUFFIX) , (Reg *d, Reg *s))236 DEF_HELPER_2(glue(punpck ## base_name ## bw, SUFFIX) , void, Reg, Reg) \ 237 DEF_HELPER_2(glue(punpck ## base_name ## wd, SUFFIX) , void, Reg, Reg) \ 238 DEF_HELPER_2(glue(punpck ## base_name ## dq, SUFFIX) , void, Reg, Reg) 232 239 233 240 UNPCK_OP(l, 0) … … 235 242 236 243 #if SHIFT == 1 237 DEF_HELPER (void, glue(helper_punpcklqdq, SUFFIX) , (Reg *d, Reg *s))238 DEF_HELPER (void, glue(helper_punpckhqdq, SUFFIX) , (Reg *d, Reg *s))244 DEF_HELPER_2(glue(punpcklqdq, SUFFIX), void, Reg, Reg) 245 DEF_HELPER_2(glue(punpckhqdq, SUFFIX), void, Reg, Reg) 239 246 #endif 240 247 241 248 /* 3DNow! float ops */ 242 249 #if SHIFT == 0 243 DEF_HELPER (void, helper_pi2fd, (MMXReg *d, MMXReg *s))244 DEF_HELPER (void, helper_pi2fw, (MMXReg *d, MMXReg *s))245 DEF_HELPER (void, helper_pf2id, (MMXReg *d, MMXReg *s))246 DEF_HELPER (void, helper_pf2iw, (MMXReg *d, MMXReg *s))247 DEF_HELPER (void, helper_pfacc, (MMXReg *d, MMXReg *s))248 DEF_HELPER (void, helper_pfadd, (MMXReg *d, MMXReg *s))249 DEF_HELPER (void, helper_pfcmpeq, (MMXReg *d, MMXReg *s))250 DEF_HELPER (void, helper_pfcmpge, (MMXReg *d, MMXReg *s))251 DEF_HELPER (void, helper_pfcmpgt, (MMXReg *d, MMXReg *s))252 DEF_HELPER (void, helper_pfmax, (MMXReg *d, MMXReg *s))253 DEF_HELPER (void, helper_pfmin, (MMXReg *d, MMXReg *s))254 DEF_HELPER (void, helper_pfmul, (MMXReg *d, MMXReg *s))255 DEF_HELPER (void, helper_pfnacc, (MMXReg *d, MMXReg *s))256 DEF_HELPER (void, helper_pfpnacc, (MMXReg *d, MMXReg *s))257 DEF_HELPER (void, helper_pfrcp, (MMXReg *d, MMXReg *s))258 DEF_HELPER (void, helper_pfrsqrt, (MMXReg *d, MMXReg *s))259 DEF_HELPER (void, helper_pfsub, (MMXReg *d, MMXReg *s))260 DEF_HELPER (void, helper_pfsubr, (MMXReg *d, MMXReg *s))261 DEF_HELPER (void, helper_pswapd, (MMXReg *d, MMXReg *s))250 DEF_HELPER_2(pi2fd, void, MMXReg, MMXReg) 251 DEF_HELPER_2(pi2fw, void, MMXReg, MMXReg) 252 DEF_HELPER_2(pf2id, void, MMXReg, MMXReg) 253 DEF_HELPER_2(pf2iw, void, MMXReg, MMXReg) 254 DEF_HELPER_2(pfacc, void, MMXReg, MMXReg) 255 DEF_HELPER_2(pfadd, void, MMXReg, MMXReg) 256 DEF_HELPER_2(pfcmpeq, void, MMXReg, MMXReg) 257 DEF_HELPER_2(pfcmpge, void, MMXReg, MMXReg) 258 DEF_HELPER_2(pfcmpgt, void, MMXReg, MMXReg) 259 DEF_HELPER_2(pfmax, void, MMXReg, MMXReg) 260 DEF_HELPER_2(pfmin, void, MMXReg, MMXReg) 261 DEF_HELPER_2(pfmul, void, MMXReg, MMXReg) 262 DEF_HELPER_2(pfnacc, void, MMXReg, MMXReg) 263 DEF_HELPER_2(pfpnacc, void, MMXReg, MMXReg) 264 DEF_HELPER_2(pfrcp, void, MMXReg, MMXReg) 265 DEF_HELPER_2(pfrsqrt, void, MMXReg, MMXReg) 266 DEF_HELPER_2(pfsub, void, MMXReg, MMXReg) 267 DEF_HELPER_2(pfsubr, void, MMXReg, MMXReg) 268 DEF_HELPER_2(pswapd, void, MMXReg, MMXReg) 262 269 #endif 263 270 264 271 /* SSSE3 op helpers */ 265 DEF_HELPER (void, glue(helper_phaddw, SUFFIX), (Reg *d, Reg *s))266 DEF_HELPER (void, glue(helper_phaddd, SUFFIX), (Reg *d, Reg *s))267 DEF_HELPER (void, glue(helper_phaddsw, SUFFIX), (Reg *d, Reg *s))268 DEF_HELPER (void, glue(helper_phsubw, SUFFIX), (Reg *d, Reg *s))269 DEF_HELPER (void, glue(helper_phsubd, SUFFIX), (Reg *d, Reg *s))270 DEF_HELPER (void, glue(helper_phsubsw, SUFFIX), (Reg *d, Reg *s))271 DEF_HELPER (void, glue(helper_pabsb, SUFFIX), (Reg *d, Reg *s))272 DEF_HELPER (void, glue(helper_pabsw, SUFFIX), (Reg *d, Reg *s))273 DEF_HELPER (void, glue(helper_pabsd, SUFFIX), (Reg *d, Reg *s))274 DEF_HELPER (void, glue(helper_pmaddubsw, SUFFIX), (Reg *d, Reg *s))275 DEF_HELPER (void, glue(helper_pmulhrsw, SUFFIX), (Reg *d, Reg *s))276 DEF_HELPER (void, glue(helper_pshufb, SUFFIX), (Reg *d, Reg *s))277 DEF_HELPER (void, glue(helper_psignb, SUFFIX), (Reg *d, Reg *s))278 DEF_HELPER (void, glue(helper_psignw, SUFFIX), (Reg *d, Reg *s))279 DEF_HELPER (void, glue(helper_psignd, SUFFIX), (Reg *d, Reg *s))280 DEF_HELPER (void, glue(helper_palignr, SUFFIX), (Reg *d, Reg *s, int32_t shift))272 DEF_HELPER_2(glue(phaddw, SUFFIX), void, Reg, Reg) 273 DEF_HELPER_2(glue(phaddd, SUFFIX), void, Reg, Reg) 274 DEF_HELPER_2(glue(phaddsw, SUFFIX), void, Reg, Reg) 275 DEF_HELPER_2(glue(phsubw, SUFFIX), void, Reg, Reg) 276 DEF_HELPER_2(glue(phsubd, SUFFIX), void, Reg, Reg) 277 DEF_HELPER_2(glue(phsubsw, SUFFIX), void, Reg, Reg) 278 DEF_HELPER_2(glue(pabsb, SUFFIX), void, Reg, Reg) 279 DEF_HELPER_2(glue(pabsw, SUFFIX), void, Reg, Reg) 280 DEF_HELPER_2(glue(pabsd, SUFFIX), void, Reg, Reg) 281 DEF_HELPER_2(glue(pmaddubsw, SUFFIX), void, Reg, Reg) 282 DEF_HELPER_2(glue(pmulhrsw, SUFFIX), void, Reg, Reg) 283 DEF_HELPER_2(glue(pshufb, SUFFIX), void, Reg, Reg) 284 DEF_HELPER_2(glue(psignb, SUFFIX), void, Reg, Reg) 285 DEF_HELPER_2(glue(psignw, SUFFIX), void, Reg, Reg) 286 DEF_HELPER_2(glue(psignd, SUFFIX), void, Reg, Reg) 287 DEF_HELPER_3(glue(palignr, SUFFIX), void, Reg, Reg, s32) 281 288 282 289 /* SSE4.1 op helpers */ 283 290 #if SHIFT == 1 284 DEF_HELPER (void, glue(helper_pblendvb, SUFFIX), (Reg *d, Reg *s))285 DEF_HELPER (void, glue(helper_blendvps, SUFFIX), (Reg *d, Reg *s))286 DEF_HELPER (void, glue(helper_blendvpd, SUFFIX), (Reg *d, Reg *s))287 DEF_HELPER (void, glue(helper_ptest, SUFFIX), (Reg *d, Reg *s))288 DEF_HELPER (void, glue(helper_pmovsxbw, SUFFIX), (Reg *d, Reg *s))289 DEF_HELPER (void, glue(helper_pmovsxbd, SUFFIX), (Reg *d, Reg *s))290 DEF_HELPER (void, glue(helper_pmovsxbq, SUFFIX), (Reg *d, Reg *s))291 DEF_HELPER (void, glue(helper_pmovsxwd, SUFFIX), (Reg *d, Reg *s))292 DEF_HELPER (void, glue(helper_pmovsxwq, SUFFIX), (Reg *d, Reg *s))293 DEF_HELPER (void, glue(helper_pmovsxdq, SUFFIX), (Reg *d, Reg *s))294 DEF_HELPER (void, glue(helper_pmovzxbw, SUFFIX), (Reg *d, Reg *s))295 DEF_HELPER (void, glue(helper_pmovzxbd, SUFFIX), (Reg *d, Reg *s))296 DEF_HELPER (void, glue(helper_pmovzxbq, SUFFIX), (Reg *d, Reg *s))297 DEF_HELPER (void, glue(helper_pmovzxwd, SUFFIX), (Reg *d, Reg *s))298 DEF_HELPER (void, glue(helper_pmovzxwq, SUFFIX), (Reg *d, Reg *s))299 DEF_HELPER (void, glue(helper_pmovzxdq, SUFFIX), (Reg *d, Reg *s))300 DEF_HELPER (void, glue(helper_pmuldq, SUFFIX), (Reg *d, Reg *s))301 DEF_HELPER (void, glue(helper_pcmpeqq, SUFFIX), (Reg *d, Reg *s))302 DEF_HELPER (void, glue(helper_packusdw, SUFFIX), (Reg *d, Reg *s))303 DEF_HELPER (void, glue(helper_pminsb, SUFFIX), (Reg *d, Reg *s))304 DEF_HELPER (void, glue(helper_pminsd, SUFFIX), (Reg *d, Reg *s))305 DEF_HELPER (void, glue(helper_pminuw, SUFFIX), (Reg *d, Reg *s))306 DEF_HELPER (void, glue(helper_pminud, SUFFIX), (Reg *d, Reg *s))307 DEF_HELPER (void, glue(helper_pmaxsb, SUFFIX), (Reg *d, Reg *s))308 DEF_HELPER (void, glue(helper_pmaxsd, SUFFIX), (Reg *d, Reg *s))309 DEF_HELPER (void, glue(helper_pmaxuw, SUFFIX), (Reg *d, Reg *s))310 DEF_HELPER (void, glue(helper_pmaxud, SUFFIX), (Reg *d, Reg *s))311 DEF_HELPER (void, glue(helper_pmulld, SUFFIX), (Reg *d, Reg *s))312 DEF_HELPER (void, glue(helper_phminposuw, SUFFIX), (Reg *d, Reg *s))313 DEF_HELPER (void, glue(helper_roundps, SUFFIX), (Reg *d, Reg *s, uint32_t mode))314 DEF_HELPER (void, glue(helper_roundpd, SUFFIX), (Reg *d, Reg *s, uint32_t mode))315 DEF_HELPER (void, glue(helper_roundss, SUFFIX), (Reg *d, Reg *s, uint32_t mode))316 DEF_HELPER (void, glue(helper_roundsd, SUFFIX), (Reg *d, Reg *s, uint32_t mode))317 DEF_HELPER (void, glue(helper_blendps, SUFFIX), (Reg *d, Reg *s, uint32_t imm))318 DEF_HELPER (void, glue(helper_blendpd, SUFFIX), (Reg *d, Reg *s, uint32_t imm))319 DEF_HELPER (void, glue(helper_pblendw, SUFFIX), (Reg *d, Reg *s, uint32_t imm))320 DEF_HELPER (void, glue(helper_dpps, SUFFIX), (Reg *d, Reg *s, uint32_t mask))321 DEF_HELPER (void, glue(helper_dppd, SUFFIX), (Reg *d, Reg *s, uint32_t mask))322 DEF_HELPER (void, glue(helper_mpsadbw, SUFFIX), (Reg *d, Reg *s, uint32_t off))291 DEF_HELPER_2(glue(pblendvb, SUFFIX), void, Reg, Reg) 292 DEF_HELPER_2(glue(blendvps, SUFFIX), void, Reg, Reg) 293 DEF_HELPER_2(glue(blendvpd, SUFFIX), void, Reg, Reg) 294 DEF_HELPER_2(glue(ptest, SUFFIX), void, Reg, Reg) 295 DEF_HELPER_2(glue(pmovsxbw, SUFFIX), void, Reg, Reg) 296 DEF_HELPER_2(glue(pmovsxbd, SUFFIX), void, Reg, Reg) 297 DEF_HELPER_2(glue(pmovsxbq, SUFFIX), void, Reg, Reg) 298 DEF_HELPER_2(glue(pmovsxwd, SUFFIX), void, Reg, Reg) 299 DEF_HELPER_2(glue(pmovsxwq, SUFFIX), void, Reg, Reg) 300 DEF_HELPER_2(glue(pmovsxdq, SUFFIX), void, Reg, Reg) 301 DEF_HELPER_2(glue(pmovzxbw, SUFFIX), void, Reg, Reg) 302 DEF_HELPER_2(glue(pmovzxbd, SUFFIX), void, Reg, Reg) 303 DEF_HELPER_2(glue(pmovzxbq, SUFFIX), void, Reg, Reg) 304 DEF_HELPER_2(glue(pmovzxwd, SUFFIX), void, Reg, Reg) 305 DEF_HELPER_2(glue(pmovzxwq, SUFFIX), void, Reg, Reg) 306 DEF_HELPER_2(glue(pmovzxdq, SUFFIX), void, Reg, Reg) 307 DEF_HELPER_2(glue(pmuldq, SUFFIX), void, Reg, Reg) 308 DEF_HELPER_2(glue(pcmpeqq, SUFFIX), void, Reg, Reg) 309 DEF_HELPER_2(glue(packusdw, SUFFIX), void, Reg, Reg) 310 DEF_HELPER_2(glue(pminsb, SUFFIX), void, Reg, Reg) 311 DEF_HELPER_2(glue(pminsd, SUFFIX), void, Reg, Reg) 312 DEF_HELPER_2(glue(pminuw, SUFFIX), void, Reg, Reg) 313 DEF_HELPER_2(glue(pminud, SUFFIX), void, Reg, Reg) 314 DEF_HELPER_2(glue(pmaxsb, SUFFIX), void, Reg, Reg) 315 DEF_HELPER_2(glue(pmaxsd, SUFFIX), void, Reg, Reg) 316 DEF_HELPER_2(glue(pmaxuw, SUFFIX), void, Reg, Reg) 317 DEF_HELPER_2(glue(pmaxud, SUFFIX), void, Reg, Reg) 318 DEF_HELPER_2(glue(pmulld, SUFFIX), void, Reg, Reg) 319 DEF_HELPER_2(glue(phminposuw, SUFFIX), void, Reg, Reg) 320 DEF_HELPER_3(glue(roundps, SUFFIX), void, Reg, Reg, i32) 321 DEF_HELPER_3(glue(roundpd, SUFFIX), void, Reg, Reg, i32) 322 DEF_HELPER_3(glue(roundss, SUFFIX), void, Reg, Reg, i32) 323 DEF_HELPER_3(glue(roundsd, SUFFIX), void, Reg, Reg, i32) 324 DEF_HELPER_3(glue(blendps, SUFFIX), void, Reg, Reg, i32) 325 DEF_HELPER_3(glue(blendpd, SUFFIX), void, Reg, Reg, i32) 326 DEF_HELPER_3(glue(pblendw, SUFFIX), void, Reg, Reg, i32) 327 DEF_HELPER_3(glue(dpps, SUFFIX), void, Reg, Reg, i32) 328 DEF_HELPER_3(glue(dppd, SUFFIX), void, Reg, Reg, i32) 329 DEF_HELPER_3(glue(mpsadbw, SUFFIX), void, Reg, Reg, i32) 323 330 #endif 324 331 325 332 /* SSE4.2 op helpers */ 326 333 #if SHIFT == 1 327 DEF_HELPER(void, glue(helper_pcmpgtq, SUFFIX), (Reg *d, Reg *s)) 328 DEF_HELPER(void, glue(helper_pcmpestri, SUFFIX), (Reg *d, Reg *s, uint32_t ctl)) 329 DEF_HELPER(void, glue(helper_pcmpestrm, SUFFIX), (Reg *d, Reg *s, uint32_t ctl)) 330 DEF_HELPER(void, glue(helper_pcmpistri, SUFFIX), (Reg *d, Reg *s, uint32_t ctl)) 331 DEF_HELPER(void, glue(helper_pcmpistrm, SUFFIX), (Reg *d, Reg *s, uint32_t ctl)) 332 DEF_HELPER(target_ulong, helper_crc32, 333 (uint32_t crc1, target_ulong msg, uint32_t len)) 334 DEF_HELPER(target_ulong, helper_popcnt, (target_ulong n, uint32_t type)) 334 DEF_HELPER_2(glue(pcmpgtq, SUFFIX), void, Reg, Reg) 335 DEF_HELPER_3(glue(pcmpestri, SUFFIX), void, Reg, Reg, i32) 336 DEF_HELPER_3(glue(pcmpestrm, SUFFIX), void, Reg, Reg, i32) 337 DEF_HELPER_3(glue(pcmpistri, SUFFIX), void, Reg, Reg, i32) 338 DEF_HELPER_3(glue(pcmpistrm, SUFFIX), void, Reg, Reg, i32) 339 DEF_HELPER_3(crc32, tl, i32, tl, i32) 340 DEF_HELPER_2(popcnt, tl, tl, i32) 335 341 #endif 336 342 -
trunk/src/recompiler/target-i386/translate.c
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 33 33 #include <string.h> 34 34 #ifndef VBOX 35 # 36 # 37 # 35 #include <inttypes.h> 36 #include <signal.h> 37 #include <assert.h> 38 38 #endif /* !VBOX */ 39 39 … … 41 41 #include "exec-all.h" 42 42 #include "disas.h" 43 #include "tcg-op.h" 44 43 45 #include "helper.h" 44 #include "tcg-op.h" 46 #define GEN_HELPER 1 47 #include "helper.h" 45 48 46 49 #define PREFIX_REPZ 0x01 … … 71 74 72 75 /* global register indexes */ 73 static TCGv cpu_env, cpu_A0, cpu_cc_op, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp; 76 static TCGv_ptr cpu_env; 77 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp; 78 static TCGv_i32 cpu_cc_op; 74 79 /* local temps */ 75 80 static TCGv cpu_T[2], cpu_T3; 76 81 /* local register indexes (only used inside old micro ops) */ 77 static TCGv cpu_tmp0, cpu_tmp1_i64, cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp4, cpu_ptr0, cpu_ptr1; 82 static TCGv cpu_tmp0, cpu_tmp4; 83 static TCGv_ptr cpu_ptr0, cpu_ptr1; 84 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32; 85 static TCGv_i64 cpu_tmp1_i64; 78 86 static TCGv cpu_tmp5, cpu_tmp6; 79 87 … … 658 666 default: 659 667 case 3: 668 /* Should never happen on 32-bit targets. */ 669 #ifdef TARGET_X86_64 660 670 tcg_gen_qemu_ld64(t0, a0, mem_index); 671 #endif 661 672 break; 662 673 } … … 694 705 default: 695 706 case 3: 707 /* Should never happen on 32-bit targets. */ 708 #ifdef TARGET_X86_64 696 709 tcg_gen_qemu_st64(t0, a0, mem_index); 710 #endif 697 711 break; 698 712 } … … 716 730 /** @todo: once TCG codegen improves, we may want to use version 717 731 from else version */ 718 tcg_gen_helper_0_0(helper_check_external_event);732 gen_helper_check_external_event(); 719 733 # else 720 734 int skip_label; … … 736 750 tcg_temp_free(t0); 737 751 738 tcg_gen_helper_0_0(helper_check_external_event);752 gen_helper_check_external_event(); 739 753 740 754 gen_set_label(skip_label); … … 745 759 static void gen_check_external_event2() 746 760 { 747 tcg_gen_helper_0_0(helper_check_external_event);761 gen_helper_check_external_event(); 748 762 } 749 763 # endif … … 762 776 gen_jmp_im(pc); 763 777 # ifdef VBOX_DUMP_STATE 764 tcg_gen_helper_0_0(helper_dump_state);778 gen_helper_dump_state(); 765 779 # endif 766 780 } … … 877 891 } 878 892 879 static void *helper_in_func[3] = { 880 helper_inb, 881 helper_inw, 882 helper_inl, 883 }; 884 885 static void *helper_out_func[3] = { 886 helper_outb, 887 helper_outw, 888 helper_outl, 889 }; 890 891 static void *gen_check_io_func[3] = { 892 helper_check_iob, 893 helper_check_iow, 894 helper_check_iol, 895 }; 893 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n) 894 { 895 switch (ot) { 896 case 0: gen_helper_inb(v, n); break; 897 case 1: gen_helper_inw(v, n); break; 898 case 2: gen_helper_inl(v, n); break; 899 } 900 901 } 902 903 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n) 904 { 905 switch (ot) { 906 case 0: gen_helper_outb(v, n); break; 907 case 1: gen_helper_outw(v, n); break; 908 case 2: gen_helper_outl(v, n); break; 909 } 910 911 } 896 912 897 913 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip, … … 908 924 state_saved = 1; 909 925 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 910 tcg_gen_helper_0_1(gen_check_io_func[ot], 911 cpu_tmp2_i32); 926 switch (ot) { 927 case 0: gen_helper_check_iob(cpu_tmp2_i32); break; 928 case 1: gen_helper_check_iow(cpu_tmp2_i32); break; 929 case 2: gen_helper_check_iol(cpu_tmp2_i32); break; 930 } 912 931 } 913 932 if(s->flags & HF_SVMI_MASK) { … … 921 940 next_eip = s->pc - s->cs_base; 922 941 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 923 tcg_gen_helper_0_3(helper_svm_check_io, 924 cpu_tmp2_i32, 925 tcg_const_i32(svm_flags), 926 tcg_const_i32(next_eip - cur_eip)); 942 gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags), 943 tcg_const_i32(next_eip - cur_eip)); 927 944 } 928 945 } … … 980 997 static void gen_compute_eflags_c(TCGv reg) 981 998 { 982 #if TCG_TARGET_REG_BITS == 32 983 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3); 984 tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 985 (long)cc_table + offsetof(CCTable, compute_c)); 986 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0); 987 tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE, 988 1, &cpu_tmp2_i32, 0, NULL); 989 #else 990 tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op); 991 tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4); 992 tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64, 993 (long)cc_table + offsetof(CCTable, compute_c)); 994 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0); 995 tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE, 996 1, &cpu_tmp2_i32, 0, NULL); 997 #endif 999 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op); 998 1000 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32); 999 1001 } … … 1002 1004 static void gen_compute_eflags(TCGv reg) 1003 1005 { 1004 #if TCG_TARGET_REG_BITS == 32 1005 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3); 1006 tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 1007 (long)cc_table + offsetof(CCTable, compute_all)); 1008 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0); 1009 tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE, 1010 1, &cpu_tmp2_i32, 0, NULL); 1011 #else 1012 tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op); 1013 tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4); 1014 tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64, 1015 (long)cc_table + offsetof(CCTable, compute_all)); 1016 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0); 1017 tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE, 1018 1, &cpu_tmp2_i32, 0, NULL); 1019 #endif 1006 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op); 1020 1007 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32); 1021 1008 } … … 1391 1378 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]); 1392 1379 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); 1393 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[0], cpu_tmp2_i32);1380 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32); 1394 1381 gen_op_st_T0_A0(ot + s->mem_index); 1395 1382 gen_op_movl_T0_Dshift(ot); … … 1410 1397 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); 1411 1398 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]); 1412 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);1399 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); 1413 1400 1414 1401 gen_op_movl_T0_Dshift(ot); … … 1462 1449 GEN_REPZ2(cmps) 1463 1450 1464 static void *helper_fp_arith_ST0_FT0[8] = { 1465 helper_fadd_ST0_FT0, 1466 helper_fmul_ST0_FT0, 1467 helper_fcom_ST0_FT0, 1468 helper_fcom_ST0_FT0, 1469 helper_fsub_ST0_FT0, 1470 helper_fsubr_ST0_FT0, 1471 helper_fdiv_ST0_FT0, 1472 helper_fdivr_ST0_FT0, 1473 }; 1451 static void gen_helper_fp_arith_ST0_FT0(int op) 1452 { 1453 switch (op) { 1454 case 0: gen_helper_fadd_ST0_FT0(); break; 1455 case 1: gen_helper_fmul_ST0_FT0(); break; 1456 case 2: gen_helper_fcom_ST0_FT0(); break; 1457 case 3: gen_helper_fcom_ST0_FT0(); break; 1458 case 4: gen_helper_fsub_ST0_FT0(); break; 1459 case 5: gen_helper_fsubr_ST0_FT0(); break; 1460 case 6: gen_helper_fdiv_ST0_FT0(); break; 1461 case 7: gen_helper_fdivr_ST0_FT0(); break; 1462 } 1463 } 1474 1464 1475 1465 /* NOTE the exception in "r" op ordering */ 1476 static void *helper_fp_arith_STN_ST0[8] = { 1477 helper_fadd_STN_ST0, 1478 helper_fmul_STN_ST0, 1479 NULL, 1480 NULL, 1481 helper_fsubr_STN_ST0, 1482 helper_fsub_STN_ST0, 1483 helper_fdivr_STN_ST0, 1484 helper_fdiv_STN_ST0, 1485 }; 1466 static void gen_helper_fp_arith_STN_ST0(int op, int opreg) 1467 { 1468 TCGv_i32 tmp = tcg_const_i32(opreg); 1469 switch (op) { 1470 case 0: gen_helper_fadd_STN_ST0(tmp); break; 1471 case 1: gen_helper_fmul_STN_ST0(tmp); break; 1472 case 4: gen_helper_fsubr_STN_ST0(tmp); break; 1473 case 5: gen_helper_fsub_STN_ST0(tmp); break; 1474 case 6: gen_helper_fdivr_STN_ST0(tmp); break; 1475 case 7: gen_helper_fdiv_STN_ST0(tmp); break; 1476 } 1477 } 1486 1478 1487 1479 /* if d == OR_TMP0, it means memory operand (address in A0) */ … … 1653 1645 1654 1646 /* XXX: inefficient */ 1655 t0 = tcg_temp_local_new( TCG_TYPE_TL);1656 t1 = tcg_temp_local_new( TCG_TYPE_TL);1647 t0 = tcg_temp_local_new(); 1648 t1 = tcg_temp_local_new(); 1657 1649 1658 1650 tcg_gen_mov_tl(t0, cpu_T[0]); … … 1744 1736 1745 1737 /* XXX: inefficient, but we must use local temps */ 1746 t0 = tcg_temp_local_new( TCG_TYPE_TL);1747 t1 = tcg_temp_local_new( TCG_TYPE_TL);1748 t2 = tcg_temp_local_new( TCG_TYPE_TL);1749 a0 = tcg_temp_local_new( TCG_TYPE_TL);1738 t0 = tcg_temp_local_new(); 1739 t1 = tcg_temp_local_new(); 1740 t2 = tcg_temp_local_new(); 1741 a0 = tcg_temp_local_new(); 1750 1742 1751 1743 if (ot == OT_QUAD) … … 1832 1824 } 1833 1825 1834 static void *helper_rotc[8] = {1835 helper_rclb,1836 helper_rclw,1837 helper_rcll,1838 X86_64_ONLY(helper_rclq),1839 helper_rcrb,1840 helper_rcrw,1841 helper_rcrl,1842 X86_64_ONLY(helper_rcrq),1843 };1844 1845 1826 /* XXX: add faster immediate = 1 case */ 1846 1827 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1, … … 1858 1839 gen_op_mov_TN_reg(ot, 0, op1); 1859 1840 1860 tcg_gen_helper_1_2(helper_rotc[ot + (is_right * 4)], 1861 cpu_T[0], cpu_T[0], cpu_T[1]); 1841 if (is_right) { 1842 switch (ot) { 1843 case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break; 1844 case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break; 1845 case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break; 1846 #ifdef TARGET_X86_64 1847 case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break; 1848 #endif 1849 } 1850 } else { 1851 switch (ot) { 1852 case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break; 1853 case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break; 1854 case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break; 1855 #ifdef TARGET_X86_64 1856 case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break; 1857 #endif 1858 } 1859 } 1862 1860 /* store */ 1863 1861 if (op1 == OR_TMP0) … … 1886 1884 TCGv t0, t1, t2, a0; 1887 1885 1888 t0 = tcg_temp_local_new( TCG_TYPE_TL);1889 t1 = tcg_temp_local_new( TCG_TYPE_TL);1890 t2 = tcg_temp_local_new( TCG_TYPE_TL);1891 a0 = tcg_temp_local_new( TCG_TYPE_TL);1886 t0 = tcg_temp_local_new(); 1887 t1 = tcg_temp_local_new(); 1888 t2 = tcg_temp_local_new(); 1889 a0 = tcg_temp_local_new(); 1892 1890 1893 1891 if (ot == OT_QUAD) … … 2449 2447 /* nominal case: we use a jump */ 2450 2448 /* XXX: make it faster by adding new instructions in TCG */ 2451 t0 = tcg_temp_local_new( TCG_TYPE_TL);2449 t0 = tcg_temp_local_new(); 2452 2450 tcg_gen_movi_tl(t0, 0); 2453 2451 l1 = gen_new_label(); … … 2507 2505 gen_jmp_im(cur_eip); 2508 2506 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 2509 tcg_gen_helper_0_2(helper_load_seg,tcg_const_i32(seg_reg), cpu_tmp2_i32);2507 gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32); 2510 2508 /* abort translation because the addseg value may change or 2511 2509 because ss32 may change. For R_SS, translation must always … … 2536 2534 gen_op_set_cc_op(s->cc_op); 2537 2535 gen_jmp_im(pc_start - s->cs_base); 2538 tcg_gen_helper_0_2(helper_svm_check_intercept_param,2539 tcg_const_i32(type),tcg_const_i64(param));2536 gen_helper_svm_check_intercept_param(tcg_const_i32(type), 2537 tcg_const_i64(param)); 2540 2538 } 2541 2539 … … 2744 2742 if (level) { 2745 2743 /* XXX: must save state */ 2746 tcg_gen_helper_0_3(helper_enter64_level, 2747 tcg_const_i32(level), 2748 tcg_const_i32((ot == OT_QUAD)), 2749 cpu_T[1]); 2744 gen_helper_enter64_level(tcg_const_i32(level), 2745 tcg_const_i32((ot == OT_QUAD)), 2746 cpu_T[1]); 2750 2747 } 2751 2748 gen_op_mov_reg_T1(ot, R_EBP); … … 2770 2767 if (level) { 2771 2768 /* XXX: must save state */ 2772 tcg_gen_helper_0_3(helper_enter_level, 2773 tcg_const_i32(level), 2774 tcg_const_i32(s->dflag), 2775 cpu_T[1]); 2769 gen_helper_enter_level(tcg_const_i32(level), 2770 tcg_const_i32(s->dflag), 2771 cpu_T[1]); 2776 2772 } 2777 2773 gen_op_mov_reg_T1(ot, R_EBP); … … 2786 2782 gen_op_set_cc_op(s->cc_op); 2787 2783 gen_jmp_im(cur_eip); 2788 tcg_gen_helper_0_1(helper_raise_exception,tcg_const_i32(trapno));2784 gen_helper_raise_exception(tcg_const_i32(trapno)); 2789 2785 s->is_jmp = 3; 2790 2786 } … … 2798 2794 gen_op_set_cc_op(s->cc_op); 2799 2795 gen_jmp_im(cur_eip); 2800 tcg_gen_helper_0_2(helper_raise_interrupt, 2801 tcg_const_i32(intno), 2802 tcg_const_i32(next_eip - cur_eip)); 2796 gen_helper_raise_interrupt(tcg_const_i32(intno), 2797 tcg_const_i32(next_eip - cur_eip)); 2803 2798 s->is_jmp = 3; 2804 2799 } … … 2809 2804 gen_op_set_cc_op(s->cc_op); 2810 2805 gen_jmp_im(cur_eip); 2811 tcg_gen_helper_0_0(helper_debug);2806 gen_helper_debug(); 2812 2807 s->is_jmp = 3; 2813 2808 } … … 2820 2815 gen_op_set_cc_op(s->cc_op); 2821 2816 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) { 2822 tcg_gen_helper_0_0(helper_reset_inhibit_irq);2817 gen_helper_reset_inhibit_irq(); 2823 2818 } 2824 2825 #ifdef VBOX2826 gen_check_external_event();2827 #endif /* VBOX */2828 2829 2819 if ( s->singlestep_enabled 2830 2820 #ifdef VBOX … … 2833 2823 #endif 2834 2824 ) { 2835 tcg_gen_helper_0_0(helper_debug);2825 gen_helper_debug(); 2836 2826 } else if (s->tf) { 2837 tcg_gen_helper_0_0(helper_single_step);2827 gen_helper_single_step(); 2838 2828 } else { 2839 2829 tcg_gen_exit_tb(0); … … 2927 2917 #define SSE_DUMMY ((void *)2) 2928 2918 2929 #define MMX_OP2(x) { helper_ ## x ## _mmx,helper_ ## x ## _xmm }2930 #define SSE_FOP(x) { helper_ ## x ## ps,helper_ ## x ## pd, \2931 helper_ ## x ## ss,helper_ ## x ## sd, }2919 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm } 2920 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \ 2921 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, } 2932 2922 2933 2923 static void *sse_op_table1[256][4] = { … … 2940 2930 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */ 2941 2931 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */ 2942 [0x14] = { helper_punpckldq_xmm,helper_punpcklqdq_xmm },2943 [0x15] = { helper_punpckhdq_xmm,helper_punpckhqdq_xmm },2932 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm }, 2933 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm }, 2944 2934 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */ 2945 2935 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */ … … 2951 2941 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ 2952 2942 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ 2953 [0x2e] = { helper_ucomiss,helper_ucomisd },2954 [0x2f] = { helper_comiss,helper_comisd },2943 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd }, 2944 [0x2f] = { gen_helper_comiss, gen_helper_comisd }, 2955 2945 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */ 2956 2946 [0x51] = SSE_FOP(sqrt), 2957 [0x52] = { helper_rsqrtps, NULL,helper_rsqrtss, NULL },2958 [0x53] = { helper_rcpps, NULL,helper_rcpss, NULL },2959 [0x54] = { helper_pand_xmm,helper_pand_xmm }, /* andps, andpd */2960 [0x55] = { helper_pandn_xmm,helper_pandn_xmm }, /* andnps, andnpd */2961 [0x56] = { helper_por_xmm,helper_por_xmm }, /* orps, orpd */2962 [0x57] = { helper_pxor_xmm,helper_pxor_xmm }, /* xorps, xorpd */2947 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL }, 2948 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL }, 2949 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */ 2950 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */ 2951 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */ 2952 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */ 2963 2953 [0x58] = SSE_FOP(add), 2964 2954 [0x59] = SSE_FOP(mul), 2965 [0x5a] = { helper_cvtps2pd,helper_cvtpd2ps,2966 helper_cvtss2sd,helper_cvtsd2ss },2967 [0x5b] = { helper_cvtdq2ps, helper_cvtps2dq,helper_cvttps2dq },2955 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps, 2956 gen_helper_cvtss2sd, gen_helper_cvtsd2ss }, 2957 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq }, 2968 2958 [0x5c] = SSE_FOP(sub), 2969 2959 [0x5d] = SSE_FOP(min), … … 2972 2962 2973 2963 [0xc2] = SSE_FOP(cmpeq), 2974 [0xc6] = { helper_shufps,helper_shufpd },2964 [0xc6] = { gen_helper_shufps, gen_helper_shufpd }, 2975 2965 2976 2966 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */ … … 2990 2980 [0x6a] = MMX_OP2(punpckhdq), 2991 2981 [0x6b] = MMX_OP2(packssdw), 2992 [0x6c] = { NULL, helper_punpcklqdq_xmm },2993 [0x6d] = { NULL, helper_punpckhqdq_xmm },2982 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm }, 2983 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm }, 2994 2984 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ 2995 2985 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ 2996 [0x70] = { helper_pshufw_mmx,2997 helper_pshufd_xmm,2998 helper_pshufhw_xmm,2999 helper_pshuflw_xmm },2986 [0x70] = { gen_helper_pshufw_mmx, 2987 gen_helper_pshufd_xmm, 2988 gen_helper_pshufhw_xmm, 2989 gen_helper_pshuflw_xmm }, 3000 2990 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ 3001 2991 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */ … … 3005 2995 [0x76] = MMX_OP2(pcmpeql), 3006 2996 [0x77] = { SSE_DUMMY }, /* emms */ 3007 [0x7c] = { NULL, helper_haddpd, NULL,helper_haddps },3008 [0x7d] = { NULL, helper_hsubpd, NULL,helper_hsubps },2997 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps }, 2998 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps }, 3009 2999 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */ 3010 3000 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */ 3011 3001 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */ 3012 3002 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */ 3013 [0xd0] = { NULL, helper_addsubpd, NULL,helper_addsubps },3003 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps }, 3014 3004 [0xd1] = MMX_OP2(psrlw), 3015 3005 [0xd2] = MMX_OP2(psrld), … … 3033 3023 [0xe4] = MMX_OP2(pmulhuw), 3034 3024 [0xe5] = MMX_OP2(pmulhw), 3035 [0xe6] = { NULL, helper_cvttpd2dq, helper_cvtdq2pd,helper_cvtpd2dq },3025 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq }, 3036 3026 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */ 3037 3027 [0xe8] = MMX_OP2(psubsb), … … 3068 3058 [8 + 6] = MMX_OP2(pslld), 3069 3059 [16 + 2] = MMX_OP2(psrlq), 3070 [16 + 3] = { NULL, helper_psrldq_xmm },3060 [16 + 3] = { NULL, gen_helper_psrldq_xmm }, 3071 3061 [16 + 6] = MMX_OP2(psllq), 3072 [16 + 7] = { NULL, helper_pslldq_xmm },3062 [16 + 7] = { NULL, gen_helper_pslldq_xmm }, 3073 3063 }; 3074 3064 3075 3065 static void *sse_op_table3[4 * 3] = { 3076 helper_cvtsi2ss,3077 helper_cvtsi2sd,3078 X86_64_ONLY( helper_cvtsq2ss),3079 X86_64_ONLY( helper_cvtsq2sd),3080 3081 helper_cvttss2si,3082 helper_cvttsd2si,3083 X86_64_ONLY( helper_cvttss2sq),3084 X86_64_ONLY( helper_cvttsd2sq),3085 3086 helper_cvtss2si,3087 helper_cvtsd2si,3088 X86_64_ONLY( helper_cvtss2sq),3089 X86_64_ONLY( helper_cvtsd2sq),3066 gen_helper_cvtsi2ss, 3067 gen_helper_cvtsi2sd, 3068 X86_64_ONLY(gen_helper_cvtsq2ss), 3069 X86_64_ONLY(gen_helper_cvtsq2sd), 3070 3071 gen_helper_cvttss2si, 3072 gen_helper_cvttsd2si, 3073 X86_64_ONLY(gen_helper_cvttss2sq), 3074 X86_64_ONLY(gen_helper_cvttsd2sq), 3075 3076 gen_helper_cvtss2si, 3077 gen_helper_cvtsd2si, 3078 X86_64_ONLY(gen_helper_cvtss2sq), 3079 X86_64_ONLY(gen_helper_cvtsd2sq), 3090 3080 }; 3091 3081 … … 3102 3092 3103 3093 static void *sse_op_table5[256] = { 3104 [0x0c] = helper_pi2fw,3105 [0x0d] = helper_pi2fd,3106 [0x1c] = helper_pf2iw,3107 [0x1d] = helper_pf2id,3108 [0x8a] = helper_pfnacc,3109 [0x8e] = helper_pfpnacc,3110 [0x90] = helper_pfcmpge,3111 [0x94] = helper_pfmin,3112 [0x96] = helper_pfrcp,3113 [0x97] = helper_pfrsqrt,3114 [0x9a] = helper_pfsub,3115 [0x9e] = helper_pfadd,3116 [0xa0] = helper_pfcmpgt,3117 [0xa4] = helper_pfmax,3118 [0xa6] = helper_movq, /* pfrcpit1; no need to actually increase precision */3119 [0xa7] = helper_movq, /* pfrsqit1 */3120 [0xaa] = helper_pfsubr,3121 [0xae] = helper_pfacc,3122 [0xb0] = helper_pfcmpeq,3123 [0xb4] = helper_pfmul,3124 [0xb6] = helper_movq, /* pfrcpit2 */3125 [0xb7] = helper_pmulhrw_mmx,3126 [0xbb] = helper_pswapd,3127 [0xbf] = helper_pavgb_mmx /* pavgusb */3094 [0x0c] = gen_helper_pi2fw, 3095 [0x0d] = gen_helper_pi2fd, 3096 [0x1c] = gen_helper_pf2iw, 3097 [0x1d] = gen_helper_pf2id, 3098 [0x8a] = gen_helper_pfnacc, 3099 [0x8e] = gen_helper_pfpnacc, 3100 [0x90] = gen_helper_pfcmpge, 3101 [0x94] = gen_helper_pfmin, 3102 [0x96] = gen_helper_pfrcp, 3103 [0x97] = gen_helper_pfrsqrt, 3104 [0x9a] = gen_helper_pfsub, 3105 [0x9e] = gen_helper_pfadd, 3106 [0xa0] = gen_helper_pfcmpgt, 3107 [0xa4] = gen_helper_pfmax, 3108 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */ 3109 [0xa7] = gen_helper_movq, /* pfrsqit1 */ 3110 [0xaa] = gen_helper_pfsubr, 3111 [0xae] = gen_helper_pfacc, 3112 [0xb0] = gen_helper_pfcmpeq, 3113 [0xb4] = gen_helper_pfmul, 3114 [0xb6] = gen_helper_movq, /* pfrcpit2 */ 3115 [0xb7] = gen_helper_pmulhrw_mmx, 3116 [0xbb] = gen_helper_pswapd, 3117 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */ 3128 3118 }; 3129 3119 … … 3132 3122 }; 3133 3123 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 } 3134 #define SSE41_OP(x) { { NULL, helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }3135 #define SSE42_OP(x) { { NULL, helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }3124 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 } 3125 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 } 3136 3126 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 } 3137 3127 static struct sse_op_helper_s sse_op_table6[256] = { … … 3254 3244 goto illegal_op; 3255 3245 /* femms */ 3256 tcg_gen_helper_0_0(helper_emms);3246 gen_helper_emms(); 3257 3247 return; 3258 3248 } 3259 3249 if (b == 0x77) { 3260 3250 /* emms */ 3261 tcg_gen_helper_0_0(helper_emms);3251 gen_helper_emms(); 3262 3252 return; 3263 3253 } … … 3265 3255 the static cpu state) */ 3266 3256 if (!is_xmm) { 3267 tcg_gen_helper_0_0(helper_enter_mmx);3257 gen_helper_enter_mmx(); 3268 3258 } 3269 3259 … … 3302 3292 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 3303 3293 offsetof(CPUX86State,fpregs[reg].mmx)); 3304 tcg_gen_helper_0_2(helper_movl_mm_T0_mmx, cpu_ptr0, cpu_T[0]); 3294 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 3295 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32); 3305 3296 } 3306 3297 break; … … 3311 3302 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 3312 3303 offsetof(CPUX86State,xmm_regs[reg])); 3313 tcg_gen_helper_0_2(helper_movq_mm_T0_xmm,cpu_ptr0, cpu_T[0]);3304 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]); 3314 3305 } else 3315 3306 #endif … … 3319 3310 offsetof(CPUX86State,xmm_regs[reg])); 3320 3311 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 3321 tcg_gen_helper_0_2(helper_movl_mm_T0_xmm,cpu_ptr0, cpu_tmp2_i32);3312 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32); 3322 3313 } 3323 3314 break; … … 3580 3571 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); 3581 3572 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset); 3582 tcg_gen_helper_0_2(sse_op2,cpu_ptr0, cpu_ptr1);3573 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1); 3583 3574 break; 3584 3575 case 0x050: /* movmskps */ … … 3586 3577 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 3587 3578 offsetof(CPUX86State,xmm_regs[rm])); 3588 tcg_gen_helper_1_1(helper_movmskps,cpu_tmp2_i32, cpu_ptr0);3579 gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0); 3589 3580 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 3590 3581 gen_op_mov_reg_T0(OT_LONG, reg); … … 3594 3585 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 3595 3586 offsetof(CPUX86State,xmm_regs[rm])); 3596 tcg_gen_helper_1_1(helper_movmskpd,cpu_tmp2_i32, cpu_ptr0);3587 gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0); 3597 3588 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 3598 3589 gen_op_mov_reg_T0(OT_LONG, reg); … … 3600 3591 case 0x02a: /* cvtpi2ps */ 3601 3592 case 0x12a: /* cvtpi2pd */ 3602 tcg_gen_helper_0_0(helper_enter_mmx);3593 gen_helper_enter_mmx(); 3603 3594 if (mod != 3) { 3604 3595 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); … … 3614 3605 switch(b >> 8) { 3615 3606 case 0x0: 3616 tcg_gen_helper_0_2(helper_cvtpi2ps,cpu_ptr0, cpu_ptr1);3607 gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1); 3617 3608 break; 3618 3609 default: 3619 3610 case 0x1: 3620 tcg_gen_helper_0_2(helper_cvtpi2pd,cpu_ptr0, cpu_ptr1);3611 gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1); 3621 3612 break; 3622 3613 } … … 3631 3622 if (ot == OT_LONG) { 3632 3623 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 3633 tcg_gen_helper_0_2(sse_op2,cpu_ptr0, cpu_tmp2_i32);3624 ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32); 3634 3625 } else { 3635 tcg_gen_helper_0_2(sse_op2,cpu_ptr0, cpu_T[0]);3626 ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]); 3636 3627 } 3637 3628 break; … … 3640 3631 case 0x02d: /* cvtps2pi */ 3641 3632 case 0x12d: /* cvtpd2pi */ 3642 tcg_gen_helper_0_0(helper_enter_mmx);3633 gen_helper_enter_mmx(); 3643 3634 if (mod != 3) { 3644 3635 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); … … 3654 3645 switch(b) { 3655 3646 case 0x02c: 3656 tcg_gen_helper_0_2(helper_cvttps2pi,cpu_ptr0, cpu_ptr1);3647 gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1); 3657 3648 break; 3658 3649 case 0x12c: 3659 tcg_gen_helper_0_2(helper_cvttpd2pi,cpu_ptr0, cpu_ptr1);3650 gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1); 3660 3651 break; 3661 3652 case 0x02d: 3662 tcg_gen_helper_0_2(helper_cvtps2pi,cpu_ptr0, cpu_ptr1);3653 gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1); 3663 3654 break; 3664 3655 case 0x12d: 3665 tcg_gen_helper_0_2(helper_cvtpd2pi,cpu_ptr0, cpu_ptr1);3656 gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1); 3666 3657 break; 3667 3658 } … … 3689 3680 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); 3690 3681 if (ot == OT_LONG) { 3691 tcg_gen_helper_1_1(sse_op2,cpu_tmp2_i32, cpu_ptr0);3682 ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0); 3692 3683 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 3693 3684 } else { 3694 tcg_gen_helper_1_1(sse_op2,cpu_T[0], cpu_ptr0);3685 ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0); 3695 3686 } 3696 3687 gen_op_mov_reg_T0(ot, reg); … … 3743 3734 break; 3744 3735 case 0x2d6: /* movq2dq */ 3745 tcg_gen_helper_0_0(helper_enter_mmx);3736 gen_helper_enter_mmx(); 3746 3737 rm = (modrm & 7); 3747 3738 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), … … 3750 3741 break; 3751 3742 case 0x3d6: /* movdq2q */ 3752 tcg_gen_helper_0_0(helper_enter_mmx);3743 gen_helper_enter_mmx(); 3753 3744 rm = (modrm & 7) | REX_B(s); 3754 3745 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx), … … 3762 3753 rm = (modrm & 7) | REX_B(s); 3763 3754 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); 3764 tcg_gen_helper_1_1(helper_pmovmskb_xmm,cpu_tmp2_i32, cpu_ptr0);3755 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0); 3765 3756 } else { 3766 3757 rm = (modrm & 7); 3767 3758 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); 3768 tcg_gen_helper_1_1(helper_pmovmskb_mmx,cpu_tmp2_i32, cpu_ptr0);3759 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0); 3769 3760 } 3770 3761 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); … … 3804 3795 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ 3805 3796 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ 3806 tcg_gen_qemu_ld32u(cpu_tmp 2_i32, cpu_A0,3797 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0, 3807 3798 (s->mem_index >> 2) - 1); 3799 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0); 3808 3800 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset + 3809 3801 offsetof(XMMReg, XMM_L(0))); … … 3837 3829 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 3838 3830 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 3839 tcg_gen_helper_0_2(sse_op2,cpu_ptr0, cpu_ptr1);3831 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1); 3840 3832 3841 3833 if (b == 0x17) … … 3866 3858 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 3867 3859 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 3868 tcg_gen_helper_1_3(helper_crc32,cpu_T[0], cpu_tmp2_i32,3869 cpu_T[0], tcg_const_i32(8 << ot));3860 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32, 3861 cpu_T[0], tcg_const_i32(8 << ot)); 3870 3862 3871 3863 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; … … 3917 3909 offsetof(CPUX86State, 3918 3910 xmm_regs[reg].XMM_L(val & 3))); 3911 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 3919 3912 if (mod == 3) 3920 gen_op_mov_reg_v(ot, rm, cpu_ tmp2_i32);3913 gen_op_mov_reg_v(ot, rm, cpu_T[0]); 3921 3914 else 3922 tcg_gen_qemu_st32(cpu_ tmp2_i32, cpu_A0,3915 tcg_gen_qemu_st32(cpu_T[0], cpu_A0, 3923 3916 (s->mem_index >> 2) - 1); 3924 3917 } else { /* pextrq */ 3918 #ifdef TARGET_X86_64 3925 3919 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, 3926 3920 offsetof(CPUX86State, … … 3931 3925 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, 3932 3926 (s->mem_index >> 2) - 1); 3927 #else 3928 goto illegal_op; 3929 #endif 3933 3930 } 3934 3931 break; … … 3946 3943 gen_op_mov_TN_reg(OT_LONG, 0, rm); 3947 3944 else 3948 tcg_gen_qemu_ld8u(cpu_ T[0], cpu_A0,3945 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0, 3949 3946 (s->mem_index >> 2) - 1); 3950 tcg_gen_st8_tl(cpu_ T[0], cpu_env, offsetof(CPUX86State,3947 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, 3951 3948 xmm_regs[reg].XMM_B(val & 15))); 3952 3949 break; 3953 3950 case 0x21: /* insertps */ 3954 if (mod == 3) 3951 if (mod == 3) { 3955 3952 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, 3956 3953 offsetof(CPUX86State,xmm_regs[rm] 3957 3954 .XMM_L((val >> 6) & 3))); 3958 else3959 tcg_gen_qemu_ld32u(cpu_tmp 2_i32, cpu_A0,3955 } else { 3956 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0, 3960 3957 (s->mem_index >> 2) - 1); 3958 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0); 3959 } 3961 3960 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, 3962 3961 offsetof(CPUX86State,xmm_regs[reg] … … 3982 3981 if (ot == OT_LONG) { /* pinsrd */ 3983 3982 if (mod == 3) 3984 gen_op_mov_v_reg(ot, cpu_tmp 2_i32, rm);3983 gen_op_mov_v_reg(ot, cpu_tmp0, rm); 3985 3984 else 3986 tcg_gen_qemu_ld32u(cpu_tmp 2_i32, cpu_A0,3985 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0, 3987 3986 (s->mem_index >> 2) - 1); 3987 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0); 3988 3988 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, 3989 3989 offsetof(CPUX86State, 3990 3990 xmm_regs[reg].XMM_L(val & 3))); 3991 3991 } else { /* pinsrq */ 3992 #ifdef TARGET_X86_64 3992 3993 if (mod == 3) 3993 3994 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm); … … 3998 3999 offsetof(CPUX86State, 3999 4000 xmm_regs[reg].XMM_Q(val & 1))); 4001 #else 4002 goto illegal_op; 4003 #endif 4000 4004 } 4001 4005 break; … … 4035 4039 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 4036 4040 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 4037 tcg_gen_helper_0_3(sse_op2,cpu_ptr0, cpu_ptr1, tcg_const_i32(val));4041 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); 4038 4042 break; 4039 4043 default: … … 4095 4099 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 4096 4100 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 4097 tcg_gen_helper_0_2(sse_op2,cpu_ptr0, cpu_ptr1);4101 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1); 4098 4102 break; 4099 4103 case 0x70: /* pshufx insn */ … … 4102 4106 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 4103 4107 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 4104 tcg_gen_helper_0_3(sse_op2,cpu_ptr0, cpu_ptr1, tcg_const_i32(val));4108 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); 4105 4109 break; 4106 4110 case 0xc2: … … 4112 4116 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 4113 4117 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 4114 tcg_gen_helper_0_2(sse_op2,cpu_ptr0, cpu_ptr1);4118 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1); 4115 4119 break; 4116 4120 case 0xf7: … … 4132 4136 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 4133 4137 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 4134 tcg_gen_helper_0_3(sse_op2,cpu_ptr0, cpu_ptr1, cpu_A0);4138 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0); 4135 4139 break; 4136 4140 default: 4137 4141 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 4138 4142 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 4139 tcg_gen_helper_0_2(sse_op2,cpu_ptr0, cpu_ptr1);4143 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1); 4140 4144 break; 4141 4145 } … … 4298 4302 int rex_w, rex_r; 4299 4303 4300 if (unlikely( loglevel & CPU_LOG_TB_OP))4304 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) 4301 4305 tcg_gen_debug_insn_start(pc_start); 4302 4306 s->pc = pc_start; … … 4431 4435 #ifndef VBOX 4432 4436 if (prefixes & PREFIX_LOCK) 4433 tcg_gen_helper_0_0(helper_lock);4437 gen_helper_lock(); 4434 4438 #else /* VBOX */ 4435 4439 if (prefixes & PREFIX_LOCK) { … … 4438 4442 return s->pc; 4439 4443 } 4440 tcg_gen_helper_0_0(helper_lock);4444 gen_helper_lock(); 4441 4445 } 4442 4446 #endif /* VBOX */ … … 4659 4663 #else 4660 4664 { 4661 TCGv t0, t1;4662 t0 = tcg_temp_new (TCG_TYPE_I64);4663 t1 = tcg_temp_new (TCG_TYPE_I64);4665 TCGv_i64 t0, t1; 4666 t0 = tcg_temp_new_i64(); 4667 t1 = tcg_temp_new_i64(); 4664 4668 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); 4665 4669 tcg_gen_extu_i32_i64(t0, cpu_T[0]); … … 4679 4683 #ifdef TARGET_X86_64 4680 4684 case OT_QUAD: 4681 tcg_gen_helper_0_1(helper_mulq_EAX_T0,cpu_T[0]);4685 gen_helper_mulq_EAX_T0(cpu_T[0]); 4682 4686 s->cc_op = CC_OP_MULQ; 4683 4687 break; … … 4728 4732 #else 4729 4733 { 4730 TCGv t0, t1;4731 t0 = tcg_temp_new (TCG_TYPE_I64);4732 t1 = tcg_temp_new (TCG_TYPE_I64);4734 TCGv_i64 t0, t1; 4735 t0 = tcg_temp_new_i64(); 4736 t1 = tcg_temp_new_i64(); 4733 4737 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); 4734 4738 tcg_gen_ext_i32_i64(t0, cpu_T[0]); … … 4749 4753 #ifdef TARGET_X86_64 4750 4754 case OT_QUAD: 4751 tcg_gen_helper_0_1(helper_imulq_EAX_T0,cpu_T[0]);4755 gen_helper_imulq_EAX_T0(cpu_T[0]); 4752 4756 s->cc_op = CC_OP_MULQ; 4753 4757 break; … … 4759 4763 case OT_BYTE: 4760 4764 gen_jmp_im(pc_start - s->cs_base); 4761 tcg_gen_helper_0_1(helper_divb_AL,cpu_T[0]);4765 gen_helper_divb_AL(cpu_T[0]); 4762 4766 break; 4763 4767 case OT_WORD: 4764 4768 gen_jmp_im(pc_start - s->cs_base); 4765 tcg_gen_helper_0_1(helper_divw_AX,cpu_T[0]);4769 gen_helper_divw_AX(cpu_T[0]); 4766 4770 break; 4767 4771 default: 4768 4772 case OT_LONG: 4769 4773 gen_jmp_im(pc_start - s->cs_base); 4770 tcg_gen_helper_0_1(helper_divl_EAX,cpu_T[0]);4774 gen_helper_divl_EAX(cpu_T[0]); 4771 4775 break; 4772 4776 #ifdef TARGET_X86_64 4773 4777 case OT_QUAD: 4774 4778 gen_jmp_im(pc_start - s->cs_base); 4775 tcg_gen_helper_0_1(helper_divq_EAX,cpu_T[0]);4779 gen_helper_divq_EAX(cpu_T[0]); 4776 4780 break; 4777 4781 #endif … … 4782 4786 case OT_BYTE: 4783 4787 gen_jmp_im(pc_start - s->cs_base); 4784 tcg_gen_helper_0_1(helper_idivb_AL,cpu_T[0]);4788 gen_helper_idivb_AL(cpu_T[0]); 4785 4789 break; 4786 4790 case OT_WORD: 4787 4791 gen_jmp_im(pc_start - s->cs_base); 4788 tcg_gen_helper_0_1(helper_idivw_AX,cpu_T[0]);4792 gen_helper_idivw_AX(cpu_T[0]); 4789 4793 break; 4790 4794 default: 4791 4795 case OT_LONG: 4792 4796 gen_jmp_im(pc_start - s->cs_base); 4793 tcg_gen_helper_0_1(helper_idivl_EAX,cpu_T[0]);4797 gen_helper_idivl_EAX(cpu_T[0]); 4794 4798 break; 4795 4799 #ifdef TARGET_X86_64 4796 4800 case OT_QUAD: 4797 4801 gen_jmp_im(pc_start - s->cs_base); 4798 tcg_gen_helper_0_1(helper_idivq_EAX,cpu_T[0]);4802 gen_helper_idivq_EAX(cpu_T[0]); 4799 4803 break; 4800 4804 #endif … … 4880 4884 gen_jmp_im(pc_start - s->cs_base); 4881 4885 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 4882 tcg_gen_helper_0_4(helper_lcall_protected, 4883 cpu_tmp2_i32, cpu_T[1], 4884 tcg_const_i32(dflag), 4885 tcg_const_i32(s->pc - pc_start)); 4886 gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1], 4887 tcg_const_i32(dflag), 4888 tcg_const_i32(s->pc - pc_start)); 4886 4889 } else { 4887 4890 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 4888 tcg_gen_helper_0_4(helper_lcall_real, 4889 cpu_tmp2_i32, cpu_T[1], 4890 tcg_const_i32(dflag), 4891 tcg_const_i32(s->pc - s->cs_base)); 4891 gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1], 4892 tcg_const_i32(dflag), 4893 tcg_const_i32(s->pc - s->cs_base)); 4892 4894 } 4893 4895 gen_eob(s); … … 4909 4911 gen_jmp_im(pc_start - s->cs_base); 4910 4912 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 4911 tcg_gen_helper_0_3(helper_ljmp_protected, 4912 cpu_tmp2_i32, 4913 cpu_T[1], 4914 tcg_const_i32(s->pc - pc_start)); 4913 gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1], 4914 tcg_const_i32(s->pc - pc_start)); 4915 4915 } else { 4916 4916 gen_op_movl_seg_T0_vm(R_CS); … … 5021 5021 #ifdef TARGET_X86_64 5022 5022 if (ot == OT_QUAD) { 5023 tcg_gen_helper_1_2(helper_imulq_T0_T1,cpu_T[0], cpu_T[0], cpu_T[1]);5023 gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]); 5024 5024 } else 5025 5025 #endif … … 5034 5034 #else 5035 5035 { 5036 TCGv t0, t1;5037 t0 = tcg_temp_new (TCG_TYPE_I64);5038 t1 = tcg_temp_new (TCG_TYPE_I64);5036 TCGv_i64 t0, t1; 5037 t0 = tcg_temp_new_i64(); 5038 t1 = tcg_temp_new_i64(); 5039 5039 tcg_gen_ext_i32_i64(t0, cpu_T[0]); 5040 5040 tcg_gen_ext_i32_i64(t1, cpu_T[1]); … … 5100 5100 reg = ((modrm >> 3) & 7) | rex_r; 5101 5101 mod = (modrm >> 6) & 3; 5102 t0 = tcg_temp_local_new( TCG_TYPE_TL);5103 t1 = tcg_temp_local_new( TCG_TYPE_TL);5104 t2 = tcg_temp_local_new( TCG_TYPE_TL);5105 a0 = tcg_temp_local_new( TCG_TYPE_TL);5102 t0 = tcg_temp_local_new(); 5103 t1 = tcg_temp_local_new(); 5104 t2 = tcg_temp_local_new(); 5105 a0 = tcg_temp_local_new(); 5106 5106 gen_op_mov_v_reg(ot, t1, reg); 5107 5107 if (mod == 3) { … … 5155 5155 gen_op_set_cc_op(s->cc_op); 5156 5156 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 5157 tcg_gen_helper_0_1(helper_cmpxchg16b,cpu_A0);5157 gen_helper_cmpxchg16b(cpu_A0); 5158 5158 } else 5159 5159 #endif … … 5165 5165 gen_op_set_cc_op(s->cc_op); 5166 5166 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 5167 tcg_gen_helper_0_1(helper_cmpxchg8b,cpu_A0);5167 gen_helper_cmpxchg8b(cpu_A0); 5168 5168 } 5169 5169 s->cc_op = CC_OP_EFLAGS; … … 5291 5291 _first_ does it */ 5292 5292 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) 5293 tcg_gen_helper_0_0(helper_set_inhibit_irq);5293 gen_helper_set_inhibit_irq(); 5294 5294 s->tf = 0; 5295 5295 } … … 5371 5371 _first_ does it */ 5372 5372 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) 5373 tcg_gen_helper_0_0(helper_set_inhibit_irq);5373 gen_helper_set_inhibit_irq(); 5374 5374 s->tf = 0; 5375 5375 } … … 5565 5565 /* for xchg, lock is implicit */ 5566 5566 if (!(prefixes & PREFIX_LOCK)) 5567 tcg_gen_helper_0_0(helper_lock);5567 gen_helper_lock(); 5568 5568 gen_op_ld_T1_A0(ot + s->mem_index); 5569 5569 gen_op_st_T0_A0(ot + s->mem_index); 5570 5570 if (!(prefixes & PREFIX_LOCK)) 5571 tcg_gen_helper_0_0(helper_unlock);5571 gen_helper_unlock(); 5572 5572 gen_op_mov_reg_T1(ot, reg); 5573 5573 } … … 5728 5728 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 5729 5729 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5730 tcg_gen_helper_0_1(helper_flds_FT0,cpu_tmp2_i32);5730 gen_helper_flds_FT0(cpu_tmp2_i32); 5731 5731 break; 5732 5732 case 1: 5733 5733 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 5734 5734 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5735 tcg_gen_helper_0_1(helper_fildl_FT0,cpu_tmp2_i32);5735 gen_helper_fildl_FT0(cpu_tmp2_i32); 5736 5736 break; 5737 5737 case 2: 5738 5738 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, 5739 5739 (s->mem_index >> 2) - 1); 5740 tcg_gen_helper_0_1(helper_fldl_FT0,cpu_tmp1_i64);5740 gen_helper_fldl_FT0(cpu_tmp1_i64); 5741 5741 break; 5742 5742 case 3: … … 5744 5744 gen_op_lds_T0_A0(OT_WORD + s->mem_index); 5745 5745 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5746 tcg_gen_helper_0_1(helper_fildl_FT0,cpu_tmp2_i32);5746 gen_helper_fildl_FT0(cpu_tmp2_i32); 5747 5747 break; 5748 5748 } 5749 5749 5750 tcg_gen_helper_0_0(helper_fp_arith_ST0_FT0[op1]);5750 gen_helper_fp_arith_ST0_FT0(op1); 5751 5751 if (op1 == 3) { 5752 5752 /* fcomp needs pop */ 5753 tcg_gen_helper_0_0(helper_fpop);5753 gen_helper_fpop(); 5754 5754 } 5755 5755 } … … 5767 5767 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 5768 5768 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5769 tcg_gen_helper_0_1(helper_flds_ST0,cpu_tmp2_i32);5769 gen_helper_flds_ST0(cpu_tmp2_i32); 5770 5770 break; 5771 5771 case 1: 5772 5772 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 5773 5773 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5774 tcg_gen_helper_0_1(helper_fildl_ST0,cpu_tmp2_i32);5774 gen_helper_fildl_ST0(cpu_tmp2_i32); 5775 5775 break; 5776 5776 case 2: 5777 5777 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, 5778 5778 (s->mem_index >> 2) - 1); 5779 tcg_gen_helper_0_1(helper_fldl_ST0,cpu_tmp1_i64);5779 gen_helper_fldl_ST0(cpu_tmp1_i64); 5780 5780 break; 5781 5781 case 3: … … 5783 5783 gen_op_lds_T0_A0(OT_WORD + s->mem_index); 5784 5784 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5785 tcg_gen_helper_0_1(helper_fildl_ST0,cpu_tmp2_i32);5785 gen_helper_fildl_ST0(cpu_tmp2_i32); 5786 5786 break; 5787 5787 } … … 5791 5791 switch(op >> 4) { 5792 5792 case 1: 5793 tcg_gen_helper_1_0(helper_fisttl_ST0,cpu_tmp2_i32);5793 gen_helper_fisttl_ST0(cpu_tmp2_i32); 5794 5794 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5795 5795 gen_op_st_T0_A0(OT_LONG + s->mem_index); 5796 5796 break; 5797 5797 case 2: 5798 tcg_gen_helper_1_0(helper_fisttll_ST0,cpu_tmp1_i64);5798 gen_helper_fisttll_ST0(cpu_tmp1_i64); 5799 5799 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, 5800 5800 (s->mem_index >> 2) - 1); … … 5802 5802 case 3: 5803 5803 default: 5804 tcg_gen_helper_1_0(helper_fistt_ST0,cpu_tmp2_i32);5804 gen_helper_fistt_ST0(cpu_tmp2_i32); 5805 5805 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5806 5806 gen_op_st_T0_A0(OT_WORD + s->mem_index); 5807 5807 break; 5808 5808 } 5809 tcg_gen_helper_0_0(helper_fpop);5809 gen_helper_fpop(); 5810 5810 break; 5811 5811 default: 5812 5812 switch(op >> 4) { 5813 5813 case 0: 5814 tcg_gen_helper_1_0(helper_fsts_ST0,cpu_tmp2_i32);5814 gen_helper_fsts_ST0(cpu_tmp2_i32); 5815 5815 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5816 5816 gen_op_st_T0_A0(OT_LONG + s->mem_index); 5817 5817 break; 5818 5818 case 1: 5819 tcg_gen_helper_1_0(helper_fistl_ST0,cpu_tmp2_i32);5819 gen_helper_fistl_ST0(cpu_tmp2_i32); 5820 5820 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5821 5821 gen_op_st_T0_A0(OT_LONG + s->mem_index); 5822 5822 break; 5823 5823 case 2: 5824 tcg_gen_helper_1_0(helper_fstl_ST0,cpu_tmp1_i64);5824 gen_helper_fstl_ST0(cpu_tmp1_i64); 5825 5825 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, 5826 5826 (s->mem_index >> 2) - 1); … … 5828 5828 case 3: 5829 5829 default: 5830 tcg_gen_helper_1_0(helper_fist_ST0,cpu_tmp2_i32);5830 gen_helper_fist_ST0(cpu_tmp2_i32); 5831 5831 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5832 5832 gen_op_st_T0_A0(OT_WORD + s->mem_index); … … 5834 5834 } 5835 5835 if ((op & 7) == 3) 5836 tcg_gen_helper_0_0(helper_fpop);5836 gen_helper_fpop(); 5837 5837 break; 5838 5838 } … … 5842 5842 gen_op_set_cc_op(s->cc_op); 5843 5843 gen_jmp_im(pc_start - s->cs_base); 5844 tcg_gen_helper_0_2(helper_fldenv,5844 gen_helper_fldenv( 5845 5845 cpu_A0, tcg_const_i32(s->dflag)); 5846 5846 break; … … 5848 5848 gen_op_ld_T0_A0(OT_WORD + s->mem_index); 5849 5849 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5850 tcg_gen_helper_0_1(helper_fldcw,cpu_tmp2_i32);5850 gen_helper_fldcw(cpu_tmp2_i32); 5851 5851 break; 5852 5852 case 0x0e: /* fnstenv mem */ … … 5854 5854 gen_op_set_cc_op(s->cc_op); 5855 5855 gen_jmp_im(pc_start - s->cs_base); 5856 tcg_gen_helper_0_2(helper_fstenv, 5857 cpu_A0, tcg_const_i32(s->dflag)); 5856 gen_helper_fstenv(cpu_A0, tcg_const_i32(s->dflag)); 5858 5857 break; 5859 5858 case 0x0f: /* fnstcw mem */ 5860 tcg_gen_helper_1_0(helper_fnstcw,cpu_tmp2_i32);5859 gen_helper_fnstcw(cpu_tmp2_i32); 5861 5860 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5862 5861 gen_op_st_T0_A0(OT_WORD + s->mem_index); … … 5866 5865 gen_op_set_cc_op(s->cc_op); 5867 5866 gen_jmp_im(pc_start - s->cs_base); 5868 tcg_gen_helper_0_1(helper_fldt_ST0,cpu_A0);5867 gen_helper_fldt_ST0(cpu_A0); 5869 5868 break; 5870 5869 case 0x1f: /* fstpt mem */ … … 5872 5871 gen_op_set_cc_op(s->cc_op); 5873 5872 gen_jmp_im(pc_start - s->cs_base); 5874 tcg_gen_helper_0_1(helper_fstt_ST0,cpu_A0);5875 tcg_gen_helper_0_0(helper_fpop);5873 gen_helper_fstt_ST0(cpu_A0); 5874 gen_helper_fpop(); 5876 5875 break; 5877 5876 case 0x2c: /* frstor mem */ … … 5879 5878 gen_op_set_cc_op(s->cc_op); 5880 5879 gen_jmp_im(pc_start - s->cs_base); 5881 tcg_gen_helper_0_2(helper_frstor, 5882 cpu_A0, tcg_const_i32(s->dflag)); 5880 gen_helper_frstor(cpu_A0, tcg_const_i32(s->dflag)); 5883 5881 break; 5884 5882 case 0x2e: /* fnsave mem */ … … 5886 5884 gen_op_set_cc_op(s->cc_op); 5887 5885 gen_jmp_im(pc_start - s->cs_base); 5888 tcg_gen_helper_0_2(helper_fsave, 5889 cpu_A0, tcg_const_i32(s->dflag)); 5886 gen_helper_fsave(cpu_A0, tcg_const_i32(s->dflag)); 5890 5887 break; 5891 5888 case 0x2f: /* fnstsw mem */ 5892 tcg_gen_helper_1_0(helper_fnstsw,cpu_tmp2_i32);5889 gen_helper_fnstsw(cpu_tmp2_i32); 5893 5890 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5894 5891 gen_op_st_T0_A0(OT_WORD + s->mem_index); … … 5898 5895 gen_op_set_cc_op(s->cc_op); 5899 5896 gen_jmp_im(pc_start - s->cs_base); 5900 tcg_gen_helper_0_1(helper_fbld_ST0,cpu_A0);5897 gen_helper_fbld_ST0(cpu_A0); 5901 5898 break; 5902 5899 case 0x3e: /* fbstp */ … … 5904 5901 gen_op_set_cc_op(s->cc_op); 5905 5902 gen_jmp_im(pc_start - s->cs_base); 5906 tcg_gen_helper_0_1(helper_fbst_ST0,cpu_A0);5907 tcg_gen_helper_0_0(helper_fpop);5903 gen_helper_fbst_ST0(cpu_A0); 5904 gen_helper_fpop(); 5908 5905 break; 5909 5906 case 0x3d: /* fildll */ 5910 5907 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, 5911 5908 (s->mem_index >> 2) - 1); 5912 tcg_gen_helper_0_1(helper_fildll_ST0,cpu_tmp1_i64);5909 gen_helper_fildll_ST0(cpu_tmp1_i64); 5913 5910 break; 5914 5911 case 0x3f: /* fistpll */ 5915 tcg_gen_helper_1_0(helper_fistll_ST0,cpu_tmp1_i64);5912 gen_helper_fistll_ST0(cpu_tmp1_i64); 5916 5913 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, 5917 5914 (s->mem_index >> 2) - 1); 5918 tcg_gen_helper_0_0(helper_fpop);5915 gen_helper_fpop(); 5919 5916 break; 5920 5917 default: … … 5927 5924 switch(op) { 5928 5925 case 0x08: /* fld sti */ 5929 tcg_gen_helper_0_0(helper_fpush);5930 tcg_gen_helper_0_1(helper_fmov_ST0_STN,tcg_const_i32((opreg + 1) & 7));5926 gen_helper_fpush(); 5927 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg + 1) & 7)); 5931 5928 break; 5932 5929 case 0x09: /* fxchg sti */ 5933 5930 case 0x29: /* fxchg4 sti, undocumented op */ 5934 5931 case 0x39: /* fxchg7 sti, undocumented op */ 5935 tcg_gen_helper_0_1(helper_fxchg_ST0_STN,tcg_const_i32(opreg));5932 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg)); 5936 5933 break; 5937 5934 case 0x0a: /* grp d9/2 */ … … 5942 5939 gen_op_set_cc_op(s->cc_op); 5943 5940 gen_jmp_im(pc_start - s->cs_base); 5944 tcg_gen_helper_0_0(helper_fwait);5941 gen_helper_fwait(); 5945 5942 break; 5946 5943 default: … … 5951 5948 switch(rm) { 5952 5949 case 0: /* fchs */ 5953 tcg_gen_helper_0_0(helper_fchs_ST0);5950 gen_helper_fchs_ST0(); 5954 5951 break; 5955 5952 case 1: /* fabs */ 5956 tcg_gen_helper_0_0(helper_fabs_ST0);5953 gen_helper_fabs_ST0(); 5957 5954 break; 5958 5955 case 4: /* ftst */ 5959 tcg_gen_helper_0_0(helper_fldz_FT0);5960 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);5956 gen_helper_fldz_FT0(); 5957 gen_helper_fcom_ST0_FT0(); 5961 5958 break; 5962 5959 case 5: /* fxam */ 5963 tcg_gen_helper_0_0(helper_fxam_ST0);5960 gen_helper_fxam_ST0(); 5964 5961 break; 5965 5962 default: … … 5971 5968 switch(rm) { 5972 5969 case 0: 5973 tcg_gen_helper_0_0(helper_fpush);5974 tcg_gen_helper_0_0(helper_fld1_ST0);5970 gen_helper_fpush(); 5971 gen_helper_fld1_ST0(); 5975 5972 break; 5976 5973 case 1: 5977 tcg_gen_helper_0_0(helper_fpush);5978 tcg_gen_helper_0_0(helper_fldl2t_ST0);5974 gen_helper_fpush(); 5975 gen_helper_fldl2t_ST0(); 5979 5976 break; 5980 5977 case 2: 5981 tcg_gen_helper_0_0(helper_fpush);5982 tcg_gen_helper_0_0(helper_fldl2e_ST0);5978 gen_helper_fpush(); 5979 gen_helper_fldl2e_ST0(); 5983 5980 break; 5984 5981 case 3: 5985 tcg_gen_helper_0_0(helper_fpush);5986 tcg_gen_helper_0_0(helper_fldpi_ST0);5982 gen_helper_fpush(); 5983 gen_helper_fldpi_ST0(); 5987 5984 break; 5988 5985 case 4: 5989 tcg_gen_helper_0_0(helper_fpush);5990 tcg_gen_helper_0_0(helper_fldlg2_ST0);5986 gen_helper_fpush(); 5987 gen_helper_fldlg2_ST0(); 5991 5988 break; 5992 5989 case 5: 5993 tcg_gen_helper_0_0(helper_fpush);5994 tcg_gen_helper_0_0(helper_fldln2_ST0);5990 gen_helper_fpush(); 5991 gen_helper_fldln2_ST0(); 5995 5992 break; 5996 5993 case 6: 5997 tcg_gen_helper_0_0(helper_fpush);5998 tcg_gen_helper_0_0(helper_fldz_ST0);5994 gen_helper_fpush(); 5995 gen_helper_fldz_ST0(); 5999 5996 break; 6000 5997 default: … … 6006 6003 switch(rm) { 6007 6004 case 0: /* f2xm1 */ 6008 tcg_gen_helper_0_0(helper_f2xm1);6005 gen_helper_f2xm1(); 6009 6006 break; 6010 6007 case 1: /* fyl2x */ 6011 tcg_gen_helper_0_0(helper_fyl2x);6008 gen_helper_fyl2x(); 6012 6009 break; 6013 6010 case 2: /* fptan */ 6014 tcg_gen_helper_0_0(helper_fptan);6011 gen_helper_fptan(); 6015 6012 break; 6016 6013 case 3: /* fpatan */ 6017 tcg_gen_helper_0_0(helper_fpatan);6014 gen_helper_fpatan(); 6018 6015 break; 6019 6016 case 4: /* fxtract */ 6020 tcg_gen_helper_0_0(helper_fxtract);6017 gen_helper_fxtract(); 6021 6018 break; 6022 6019 case 5: /* fprem1 */ 6023 tcg_gen_helper_0_0(helper_fprem1);6020 gen_helper_fprem1(); 6024 6021 break; 6025 6022 case 6: /* fdecstp */ 6026 tcg_gen_helper_0_0(helper_fdecstp);6023 gen_helper_fdecstp(); 6027 6024 break; 6028 6025 default: 6029 6026 case 7: /* fincstp */ 6030 tcg_gen_helper_0_0(helper_fincstp);6027 gen_helper_fincstp(); 6031 6028 break; 6032 6029 } … … 6035 6032 switch(rm) { 6036 6033 case 0: /* fprem */ 6037 tcg_gen_helper_0_0(helper_fprem);6034 gen_helper_fprem(); 6038 6035 break; 6039 6036 case 1: /* fyl2xp1 */ 6040 tcg_gen_helper_0_0(helper_fyl2xp1);6037 gen_helper_fyl2xp1(); 6041 6038 break; 6042 6039 case 2: /* fsqrt */ 6043 tcg_gen_helper_0_0(helper_fsqrt);6040 gen_helper_fsqrt(); 6044 6041 break; 6045 6042 case 3: /* fsincos */ 6046 tcg_gen_helper_0_0(helper_fsincos);6043 gen_helper_fsincos(); 6047 6044 break; 6048 6045 case 5: /* fscale */ 6049 tcg_gen_helper_0_0(helper_fscale);6046 gen_helper_fscale(); 6050 6047 break; 6051 6048 case 4: /* frndint */ 6052 tcg_gen_helper_0_0(helper_frndint);6049 gen_helper_frndint(); 6053 6050 break; 6054 6051 case 6: /* fsin */ 6055 tcg_gen_helper_0_0(helper_fsin);6052 gen_helper_fsin(); 6056 6053 break; 6057 6054 default: 6058 6055 case 7: /* fcos */ 6059 tcg_gen_helper_0_0(helper_fcos);6056 gen_helper_fcos(); 6060 6057 break; 6061 6058 } … … 6069 6066 op1 = op & 7; 6070 6067 if (op >= 0x20) { 6071 tcg_gen_helper_0_1(helper_fp_arith_STN_ST0[op1], tcg_const_i32(opreg));6068 gen_helper_fp_arith_STN_ST0(op1, opreg); 6072 6069 if (op >= 0x30) 6073 tcg_gen_helper_0_0(helper_fpop);6070 gen_helper_fpop(); 6074 6071 } else { 6075 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(opreg));6076 tcg_gen_helper_0_0(helper_fp_arith_ST0_FT0[op1]);6072 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); 6073 gen_helper_fp_arith_ST0_FT0(op1); 6077 6074 } 6078 6075 } … … 6080 6077 case 0x02: /* fcom */ 6081 6078 case 0x22: /* fcom2, undocumented op */ 6082 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(opreg));6083 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);6079 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); 6080 gen_helper_fcom_ST0_FT0(); 6084 6081 break; 6085 6082 case 0x03: /* fcomp */ 6086 6083 case 0x23: /* fcomp3, undocumented op */ 6087 6084 case 0x32: /* fcomp5, undocumented op */ 6088 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(opreg));6089 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);6090 tcg_gen_helper_0_0(helper_fpop);6085 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); 6086 gen_helper_fcom_ST0_FT0(); 6087 gen_helper_fpop(); 6091 6088 break; 6092 6089 case 0x15: /* da/5 */ 6093 6090 switch(rm) { 6094 6091 case 1: /* fucompp */ 6095 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(1));6096 tcg_gen_helper_0_0(helper_fucom_ST0_FT0);6097 tcg_gen_helper_0_0(helper_fpop);6098 tcg_gen_helper_0_0(helper_fpop);6092 gen_helper_fmov_FT0_STN(tcg_const_i32(1)); 6093 gen_helper_fucom_ST0_FT0(); 6094 gen_helper_fpop(); 6095 gen_helper_fpop(); 6099 6096 break; 6100 6097 default: … … 6109 6106 break; 6110 6107 case 2: /* fclex */ 6111 tcg_gen_helper_0_0(helper_fclex);6108 gen_helper_fclex(); 6112 6109 break; 6113 6110 case 3: /* fninit */ 6114 tcg_gen_helper_0_0(helper_fninit);6111 gen_helper_fninit(); 6115 6112 break; 6116 6113 case 4: /* fsetpm (287 only, just do nop here) */ … … 6123 6120 if (s->cc_op != CC_OP_DYNAMIC) 6124 6121 gen_op_set_cc_op(s->cc_op); 6125 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(opreg));6126 tcg_gen_helper_0_0(helper_fucomi_ST0_FT0);6122 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); 6123 gen_helper_fucomi_ST0_FT0(); 6127 6124 s->cc_op = CC_OP_EFLAGS; 6128 6125 break; … … 6130 6127 if (s->cc_op != CC_OP_DYNAMIC) 6131 6128 gen_op_set_cc_op(s->cc_op); 6132 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(opreg));6133 tcg_gen_helper_0_0(helper_fcomi_ST0_FT0);6129 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); 6130 gen_helper_fcomi_ST0_FT0(); 6134 6131 s->cc_op = CC_OP_EFLAGS; 6135 6132 break; 6136 6133 case 0x28: /* ffree sti */ 6137 tcg_gen_helper_0_1(helper_ffree_STN,tcg_const_i32(opreg));6134 gen_helper_ffree_STN(tcg_const_i32(opreg)); 6138 6135 break; 6139 6136 case 0x2a: /* fst sti */ 6140 tcg_gen_helper_0_1(helper_fmov_STN_ST0,tcg_const_i32(opreg));6137 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg)); 6141 6138 break; 6142 6139 case 0x2b: /* fstp sti */ … … 6144 6141 case 0x3a: /* fstp8 sti, undocumented op */ 6145 6142 case 0x3b: /* fstp9 sti, undocumented op */ 6146 tcg_gen_helper_0_1(helper_fmov_STN_ST0,tcg_const_i32(opreg));6147 tcg_gen_helper_0_0(helper_fpop);6143 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg)); 6144 gen_helper_fpop(); 6148 6145 break; 6149 6146 case 0x2c: /* fucom st(i) */ 6150 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(opreg));6151 tcg_gen_helper_0_0(helper_fucom_ST0_FT0);6147 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); 6148 gen_helper_fucom_ST0_FT0(); 6152 6149 break; 6153 6150 case 0x2d: /* fucomp st(i) */ 6154 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(opreg));6155 tcg_gen_helper_0_0(helper_fucom_ST0_FT0);6156 tcg_gen_helper_0_0(helper_fpop);6151 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); 6152 gen_helper_fucom_ST0_FT0(); 6153 gen_helper_fpop(); 6157 6154 break; 6158 6155 case 0x33: /* de/3 */ 6159 6156 switch(rm) { 6160 6157 case 1: /* fcompp */ 6161 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(1));6162 tcg_gen_helper_0_0(helper_fcom_ST0_FT0);6163 tcg_gen_helper_0_0(helper_fpop);6164 tcg_gen_helper_0_0(helper_fpop);6158 gen_helper_fmov_FT0_STN(tcg_const_i32(1)); 6159 gen_helper_fcom_ST0_FT0(); 6160 gen_helper_fpop(); 6161 gen_helper_fpop(); 6165 6162 break; 6166 6163 default: … … 6169 6166 break; 6170 6167 case 0x38: /* ffreep sti, undocumented op */ 6171 tcg_gen_helper_0_1(helper_ffree_STN,tcg_const_i32(opreg));6172 tcg_gen_helper_0_0(helper_fpop);6168 gen_helper_ffree_STN(tcg_const_i32(opreg)); 6169 gen_helper_fpop(); 6173 6170 break; 6174 6171 case 0x3c: /* df/4 */ 6175 6172 switch(rm) { 6176 6173 case 0: 6177 tcg_gen_helper_1_0(helper_fnstsw,cpu_tmp2_i32);6174 gen_helper_fnstsw(cpu_tmp2_i32); 6178 6175 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 6179 6176 gen_op_mov_reg_T0(OT_WORD, R_EAX); … … 6186 6183 if (s->cc_op != CC_OP_DYNAMIC) 6187 6184 gen_op_set_cc_op(s->cc_op); 6188 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(opreg));6189 tcg_gen_helper_0_0(helper_fucomi_ST0_FT0);6190 tcg_gen_helper_0_0(helper_fpop);6185 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); 6186 gen_helper_fucomi_ST0_FT0(); 6187 gen_helper_fpop(); 6191 6188 s->cc_op = CC_OP_EFLAGS; 6192 6189 break; … … 6194 6191 if (s->cc_op != CC_OP_DYNAMIC) 6195 6192 gen_op_set_cc_op(s->cc_op); 6196 tcg_gen_helper_0_1(helper_fmov_FT0_STN,tcg_const_i32(opreg));6197 tcg_gen_helper_0_0(helper_fcomi_ST0_FT0);6198 tcg_gen_helper_0_0(helper_fpop);6193 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); 6194 gen_helper_fcomi_ST0_FT0(); 6195 gen_helper_fpop(); 6199 6196 s->cc_op = CC_OP_EFLAGS; 6200 6197 break; … … 6212 6209 l1 = gen_new_label(); 6213 6210 gen_jcc1(s, s->cc_op, op1, l1); 6214 tcg_gen_helper_0_1(helper_fmov_ST0_STN,tcg_const_i32(opreg));6211 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg)); 6215 6212 gen_set_label(l1); 6216 6213 } … … 6349 6346 gen_io_start(); 6350 6347 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 6351 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32);6348 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32); 6352 6349 gen_op_mov_reg_T1(ot, R_EAX); 6353 6350 if (use_icount) { … … 6366 6363 gen_check_io(s, ot, pc_start - s->cs_base, 6367 6364 svm_is_rep(prefixes)); 6368 #ifdef VBOX /* bird: linux is writing to this port for delaying I/O. */ /** @todo this breaks AIX, remove. */6365 #ifdef VBOX /* bird: linux is writing to this port for delaying I/O. */ /** @todo YYY: this breaks AIX, remove. */ 6369 6366 if (val == 0x80) 6370 6367 break; … … 6377 6374 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); 6378 6375 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]); 6379 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);6376 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); 6380 6377 if (use_icount) { 6381 6378 gen_io_end(); … … 6396 6393 gen_io_start(); 6397 6394 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 6398 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32);6395 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32); 6399 6396 gen_op_mov_reg_T1(ot, R_EAX); 6400 6397 if (use_icount) { … … 6420 6417 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); 6421 6418 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]); 6422 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);6419 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); 6423 6420 if (use_icount) { 6424 6421 gen_io_end(); … … 6457 6454 gen_op_set_cc_op(s->cc_op); 6458 6455 gen_jmp_im(pc_start - s->cs_base); 6459 tcg_gen_helper_0_2(helper_lret_protected, 6460 tcg_const_i32(s->dflag), 6461 tcg_const_i32(val)); 6456 gen_helper_lret_protected(tcg_const_i32(s->dflag), 6457 tcg_const_i32(val)); 6462 6458 } else { 6463 6459 gen_stack_A0(s); … … 6485 6481 if (!s->pe) { 6486 6482 /* real mode */ 6487 tcg_gen_helper_0_1(helper_iret_real,tcg_const_i32(s->dflag));6483 gen_helper_iret_real(tcg_const_i32(s->dflag)); 6488 6484 s->cc_op = CC_OP_EFLAGS; 6489 6485 } else if (s->vm86) { … … 6495 6491 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 6496 6492 } else { 6497 tcg_gen_helper_0_1(helper_iret_real,tcg_const_i32(s->dflag));6493 gen_helper_iret_real(tcg_const_i32(s->dflag)); 6498 6494 s->cc_op = CC_OP_EFLAGS; 6499 6495 } … … 6502 6498 gen_op_set_cc_op(s->cc_op); 6503 6499 gen_jmp_im(pc_start - s->cs_base); 6504 tcg_gen_helper_0_2(helper_iret_protected, 6505 tcg_const_i32(s->dflag), 6506 tcg_const_i32(s->pc - s->cs_base)); 6500 gen_helper_iret_protected(tcg_const_i32(s->dflag), 6501 tcg_const_i32(s->pc - s->cs_base)); 6507 6502 s->cc_op = CC_OP_EFLAGS; 6508 6503 } … … 6519 6514 if (s->dflag == 0) 6520 6515 tval &= 0xffff; 6516 #ifdef VBOX /* upstream fix */ 6521 6517 else if (!CODE64(s)) 6522 6518 tval &= 0xffffffff; 6519 #endif 6523 6520 gen_movtl_T0_im(next_eip); 6524 6521 gen_push_T0(s); … … 6604 6601 reg = ((modrm >> 3) & 7) | rex_r; 6605 6602 mod = (modrm >> 6) & 3; 6606 t0 = tcg_temp_local_new( TCG_TYPE_TL);6603 t0 = tcg_temp_local_new(); 6607 6604 if (mod != 3) { 6608 6605 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); … … 6648 6645 #ifdef VBOX 6649 6646 if (s->vm86 && s->vme && s->iopl != 3) 6650 tcg_gen_helper_1_0(helper_read_eflags_vme,cpu_T[0]);6647 gen_helper_read_eflags_vme(cpu_T[0]); 6651 6648 else 6652 6649 #endif 6653 tcg_gen_helper_1_0(helper_read_eflags,cpu_T[0]);6650 gen_helper_read_eflags(cpu_T[0]); 6654 6651 gen_push_T0(s); 6655 6652 } … … 6667 6664 if (s->cpl == 0) { 6668 6665 if (s->dflag) { 6669 tcg_gen_helper_0_2(helper_write_eflags,cpu_T[0],6666 gen_helper_write_eflags(cpu_T[0], 6670 6667 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK))); 6671 6668 } else { 6672 tcg_gen_helper_0_2(helper_write_eflags,cpu_T[0],6669 gen_helper_write_eflags(cpu_T[0], 6673 6670 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff)); 6674 6671 } … … 6676 6673 if (s->cpl <= s->iopl) { 6677 6674 if (s->dflag) { 6678 tcg_gen_helper_0_2(helper_write_eflags,cpu_T[0],6675 gen_helper_write_eflags(cpu_T[0], 6679 6676 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK))); 6680 6677 } else { 6681 tcg_gen_helper_0_2(helper_write_eflags,cpu_T[0],6678 gen_helper_write_eflags(cpu_T[0], 6682 6679 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff)); 6683 6680 } 6684 6681 } else { 6685 6682 if (s->dflag) { 6686 tcg_gen_helper_0_2(helper_write_eflags,cpu_T[0],6683 gen_helper_write_eflags(cpu_T[0], 6687 6684 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK))); 6688 6685 } else { 6689 6686 #ifdef VBOX 6690 6687 if (s->vm86 && s->vme) 6691 tcg_gen_helper_0_1(helper_write_eflags_vme,cpu_T[0]);6688 gen_helper_write_eflags_vme(cpu_T[0]); 6692 6689 else 6693 6690 #endif 6694 tcg_gen_helper_0_2(helper_write_eflags,cpu_T[0],6691 gen_helper_write_eflags(cpu_T[0], 6695 6692 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); 6696 6693 } … … 6858 6855 label1 = gen_new_label(); 6859 6856 tcg_gen_movi_tl(cpu_cc_dst, 0); 6860 t0 = tcg_temp_local_new( TCG_TYPE_TL);6857 t0 = tcg_temp_local_new(); 6861 6858 tcg_gen_mov_tl(t0, cpu_T[0]); 6862 6859 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1); 6863 6860 if (b & 1) { 6864 tcg_gen_helper_1_1(helper_bsr,cpu_T[0], t0);6861 gen_helper_bsr(cpu_T[0], t0); 6865 6862 } else { 6866 tcg_gen_helper_1_1(helper_bsf,cpu_T[0], t0);6863 gen_helper_bsf(cpu_T[0], t0); 6867 6864 } 6868 6865 gen_op_mov_reg_T0(ot, reg); … … 6881 6878 if (s->cc_op != CC_OP_DYNAMIC) 6882 6879 gen_op_set_cc_op(s->cc_op); 6883 tcg_gen_helper_0_0(helper_daa);6880 gen_helper_daa(); 6884 6881 s->cc_op = CC_OP_EFLAGS; 6885 6882 break; … … 6889 6886 if (s->cc_op != CC_OP_DYNAMIC) 6890 6887 gen_op_set_cc_op(s->cc_op); 6891 tcg_gen_helper_0_0(helper_das);6888 gen_helper_das(); 6892 6889 s->cc_op = CC_OP_EFLAGS; 6893 6890 break; … … 6897 6894 if (s->cc_op != CC_OP_DYNAMIC) 6898 6895 gen_op_set_cc_op(s->cc_op); 6899 tcg_gen_helper_0_0(helper_aaa);6896 gen_helper_aaa(); 6900 6897 s->cc_op = CC_OP_EFLAGS; 6901 6898 break; … … 6905 6902 if (s->cc_op != CC_OP_DYNAMIC) 6906 6903 gen_op_set_cc_op(s->cc_op); 6907 tcg_gen_helper_0_0(helper_aas);6904 gen_helper_aas(); 6908 6905 s->cc_op = CC_OP_EFLAGS; 6909 6906 break; … … 6915 6912 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); 6916 6913 } else { 6917 tcg_gen_helper_0_1(helper_aam,tcg_const_i32(val));6914 gen_helper_aam(tcg_const_i32(val)); 6918 6915 s->cc_op = CC_OP_LOGICB; 6919 6916 } … … 6923 6920 goto illegal_op; 6924 6921 val = ldub_code(s->pc++); 6925 tcg_gen_helper_0_1(helper_aad,tcg_const_i32(val));6922 gen_helper_aad(tcg_const_i32(val)); 6926 6923 s->cc_op = CC_OP_LOGICB; 6927 6924 break; … … 6945 6942 gen_op_set_cc_op(s->cc_op); 6946 6943 gen_jmp_im(pc_start - s->cs_base); 6947 tcg_gen_helper_0_0(helper_fwait);6944 gen_helper_fwait(); 6948 6945 } 6949 6946 break; … … 6974 6971 gen_op_set_cc_op(s->cc_op); 6975 6972 gen_jmp_im(pc_start - s->cs_base); 6976 tcg_gen_helper_0_1(helper_into, tcg_const_i32(s->pc - pc_start)); 6977 break; 6973 gen_helper_into(tcg_const_i32(s->pc - pc_start)); 6974 break; 6975 #ifdef WANT_ICEBP 6978 6976 case 0xf1: /* icebp (undocumented, exits to external debugger) */ 6979 6977 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); … … 6986 6984 #endif 6987 6985 break; 6986 #endif 6988 6987 case 0xfa: /* cli */ 6989 6988 if (!s->vm86) { 6990 6989 if (s->cpl <= s->iopl) { 6991 tcg_gen_helper_0_0(helper_cli);6990 gen_helper_cli(); 6992 6991 } else { 6993 6992 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); … … 6995 6994 } else { 6996 6995 if (s->iopl == 3) { 6997 tcg_gen_helper_0_0(helper_cli);6996 gen_helper_cli(); 6998 6997 #ifdef VBOX 6999 6998 } else if (s->iopl != 3 && s->vme) { 7000 tcg_gen_helper_0_0(helper_cli_vme);6999 gen_helper_cli_vme(); 7001 7000 #endif 7002 7001 } else { … … 7009 7008 if (s->cpl <= s->iopl) { 7010 7009 gen_sti: 7011 tcg_gen_helper_0_0(helper_sti);7010 gen_helper_sti(); 7012 7011 /* interruptions are enabled only the first insn after sti */ 7013 7012 /* If several instructions disable interrupts, only the 7014 7013 _first_ does it */ 7015 7014 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) 7016 tcg_gen_helper_0_0(helper_set_inhibit_irq);7015 gen_helper_set_inhibit_irq(); 7017 7016 /* give a chance to handle pending irqs */ 7018 7017 gen_jmp_im(s->pc - s->cs_base); … … 7026 7025 #ifdef VBOX 7027 7026 } else if (s->iopl != 3 && s->vme) { 7028 tcg_gen_helper_0_0(helper_sti_vme);7027 gen_helper_sti_vme(); 7029 7028 /* give a chance to handle pending irqs */ 7030 7029 gen_jmp_im(s->pc - s->cs_base); … … 7050 7049 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 7051 7050 if (ot == OT_WORD) 7052 tcg_gen_helper_0_2(helper_boundw,cpu_A0, cpu_tmp2_i32);7051 gen_helper_boundw(cpu_A0, cpu_tmp2_i32); 7053 7052 else 7054 tcg_gen_helper_0_2(helper_boundl,cpu_A0, cpu_tmp2_i32);7053 gen_helper_boundl(cpu_A0, cpu_tmp2_i32); 7055 7054 break; 7056 7055 case 0x1c8 ... 0x1cf: /* bswap reg */ … … 7063 7062 } else 7064 7063 { 7065 TCGv tmp0;7064 TCGv_i32 tmp0; 7066 7065 gen_op_mov_TN_reg(OT_LONG, 0, reg); 7067 7066 7068 tmp0 = tcg_temp_new (TCG_TYPE_I32);7067 tmp0 = tcg_temp_new_i32(); 7069 7068 tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]); 7070 7069 tcg_gen_bswap_i32(tmp0, tmp0); … … 7150 7149 gen_jmp_im(pc_start - s->cs_base); 7151 7150 if (b & 2) { 7152 tcg_gen_helper_0_0(helper_rdmsr);7151 gen_helper_rdmsr(); 7153 7152 } else { 7154 tcg_gen_helper_0_0(helper_wrmsr);7153 gen_helper_wrmsr(); 7155 7154 } 7156 7155 } … … 7162 7161 if (use_icount) 7163 7162 gen_io_start(); 7164 tcg_gen_helper_0_0(helper_rdtsc);7163 gen_helper_rdtsc(); 7165 7164 if (use_icount) { 7166 7165 gen_io_end(); … … 7172 7171 gen_op_set_cc_op(s->cc_op); 7173 7172 gen_jmp_im(pc_start - s->cs_base); 7174 tcg_gen_helper_0_0(helper_rdpmc);7173 gen_helper_rdpmc(); 7175 7174 break; 7176 7175 case 0x134: /* sysenter */ … … 7191 7190 } 7192 7191 gen_jmp_im(pc_start - s->cs_base); 7193 tcg_gen_helper_0_0(helper_sysenter);7192 gen_helper_sysenter(); 7194 7193 gen_eob(s); 7195 7194 } … … 7212 7211 } 7213 7212 gen_jmp_im(pc_start - s->cs_base); 7214 tcg_gen_helper_0_1(helper_sysexit,tcg_const_i32(dflag));7213 gen_helper_sysexit(tcg_const_i32(dflag)); 7215 7214 gen_eob(s); 7216 7215 } … … 7224 7223 } 7225 7224 gen_jmp_im(pc_start - s->cs_base); 7226 tcg_gen_helper_0_1(helper_syscall,tcg_const_i32(s->pc - pc_start));7225 gen_helper_syscall(tcg_const_i32(s->pc - pc_start)); 7227 7226 gen_eob(s); 7228 7227 break; … … 7236 7235 } 7237 7236 gen_jmp_im(pc_start - s->cs_base); 7238 tcg_gen_helper_0_1(helper_sysret,tcg_const_i32(s->dflag));7237 gen_helper_sysret(tcg_const_i32(s->dflag)); 7239 7238 /* condition codes are modified only in long mode */ 7240 7239 if (s->lma) … … 7248 7247 gen_op_set_cc_op(s->cc_op); 7249 7248 gen_jmp_im(pc_start - s->cs_base); 7250 tcg_gen_helper_0_0(helper_cpuid);7249 gen_helper_cpuid(); 7251 7250 break; 7252 7251 case 0xf4: /* hlt */ … … 7257 7256 gen_op_set_cc_op(s->cc_op); 7258 7257 gen_jmp_im(pc_start - s->cs_base); 7259 tcg_gen_helper_0_1(helper_hlt,tcg_const_i32(s->pc - pc_start));7258 gen_helper_hlt(tcg_const_i32(s->pc - pc_start)); 7260 7259 s->is_jmp = 3; 7261 7260 } … … 7286 7285 gen_jmp_im(pc_start - s->cs_base); 7287 7286 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 7288 tcg_gen_helper_0_1(helper_lldt,cpu_tmp2_i32);7287 gen_helper_lldt(cpu_tmp2_i32); 7289 7288 } 7290 7289 break; … … 7309 7308 gen_jmp_im(pc_start - s->cs_base); 7310 7309 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 7311 tcg_gen_helper_0_1(helper_ltr,cpu_tmp2_i32);7310 gen_helper_ltr(cpu_tmp2_i32); 7312 7311 } 7313 7312 break; … … 7320 7319 gen_op_set_cc_op(s->cc_op); 7321 7320 if (op == 4) 7322 tcg_gen_helper_0_1(helper_verr,cpu_T[0]);7321 gen_helper_verr(cpu_T[0]); 7323 7322 else 7324 tcg_gen_helper_0_1(helper_verw,cpu_T[0]);7323 gen_helper_verw(cpu_T[0]); 7325 7324 s->cc_op = CC_OP_EFLAGS; 7326 7325 break; … … 7341 7340 goto illegal_op; 7342 7341 gen_jmp_im(pc_start - s->cs_base); 7343 tcg_gen_helper_0_0(helper_rdtscp);7342 gen_helper_rdtscp(); 7344 7343 break; 7345 7344 } … … 7380 7379 } 7381 7380 gen_add_A0_ds_seg(s); 7382 tcg_gen_helper_0_1(helper_monitor,cpu_A0);7381 gen_helper_monitor(cpu_A0); 7383 7382 break; 7384 7383 case 1: /* mwait */ … … 7391 7390 } 7392 7391 gen_jmp_im(pc_start - s->cs_base); 7393 tcg_gen_helper_0_1(helper_mwait,tcg_const_i32(s->pc - pc_start));7392 gen_helper_mwait(tcg_const_i32(s->pc - pc_start)); 7394 7393 gen_eob(s); 7395 7394 break; … … 7423 7422 break; 7424 7423 } else { 7425 tcg_gen_helper_0_2(helper_vmrun, 7426 tcg_const_i32(s->aflag), 7427 tcg_const_i32(s->pc - pc_start)); 7424 gen_helper_vmrun(tcg_const_i32(s->aflag), 7425 tcg_const_i32(s->pc - pc_start)); 7428 7426 tcg_gen_exit_tb(0); 7429 7427 s->is_jmp = 3; … … 7433 7431 if (!(s->flags & HF_SVME_MASK)) 7434 7432 goto illegal_op; 7435 tcg_gen_helper_0_0(helper_vmmcall);7433 gen_helper_vmmcall(); 7436 7434 break; 7437 7435 case 2: /* VMLOAD */ … … 7442 7440 break; 7443 7441 } else { 7444 tcg_gen_helper_0_1(helper_vmload, 7445 tcg_const_i32(s->aflag)); 7442 gen_helper_vmload(tcg_const_i32(s->aflag)); 7446 7443 } 7447 7444 break; … … 7453 7450 break; 7454 7451 } else { 7455 tcg_gen_helper_0_1(helper_vmsave, 7456 tcg_const_i32(s->aflag)); 7452 gen_helper_vmsave(tcg_const_i32(s->aflag)); 7457 7453 } 7458 7454 break; … … 7466 7462 break; 7467 7463 } else { 7468 tcg_gen_helper_0_0(helper_stgi);7464 gen_helper_stgi(); 7469 7465 } 7470 7466 break; … … 7476 7472 break; 7477 7473 } else { 7478 tcg_gen_helper_0_0(helper_clgi);7474 gen_helper_clgi(); 7479 7475 } 7480 7476 break; … … 7484 7480 !s->pe) 7485 7481 goto illegal_op; 7486 tcg_gen_helper_0_0(helper_skinit);7482 gen_helper_skinit(); 7487 7483 break; 7488 7484 case 7: /* INVLPGA */ … … 7493 7489 break; 7494 7490 } else { 7495 tcg_gen_helper_0_1(helper_invlpga, 7496 tcg_const_i32(s->aflag)); 7491 gen_helper_invlpga(tcg_const_i32(s->aflag)); 7497 7492 } 7498 7493 break; … … 7522 7517 case 4: /* smsw */ 7523 7518 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); 7519 #if defined TARGET_X86_64 && defined WORDS_BIGENDIAN 7520 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4); 7521 #else 7524 7522 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0])); 7523 #endif 7525 7524 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1); 7526 7525 break; … … 7531 7530 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); 7532 7531 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); 7533 tcg_gen_helper_0_1(helper_lmsw,cpu_T[0]);7532 gen_helper_lmsw(cpu_T[0]); 7534 7533 gen_jmp_im(s->pc - s->cs_base); 7535 7534 gen_eob(s); … … 7558 7557 gen_jmp_im(pc_start - s->cs_base); 7559 7558 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 7560 tcg_gen_helper_0_1(helper_invlpg,cpu_A0);7559 gen_helper_invlpg(cpu_A0); 7561 7560 gen_jmp_im(s->pc - s->cs_base); 7562 7561 gen_eob(s); … … 7615 7614 if (!s->pe || s->vm86) 7616 7615 goto illegal_op; 7617 t0 = tcg_temp_local_new( TCG_TYPE_TL);7618 t1 = tcg_temp_local_new( TCG_TYPE_TL);7619 t2 = tcg_temp_local_new( TCG_TYPE_TL);7616 t0 = tcg_temp_local_new(); 7617 t1 = tcg_temp_local_new(); 7618 t2 = tcg_temp_local_new(); 7620 7619 #ifdef VBOX 7621 a0 = tcg_temp_local_new( TCG_TYPE_TL);7620 a0 = tcg_temp_local_new(); 7622 7621 #endif 7623 7622 ot = OT_WORD; … … 7680 7679 reg = ((modrm >> 3) & 7) | rex_r; 7681 7680 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); 7682 t0 = tcg_temp_local_new( TCG_TYPE_TL);7681 t0 = tcg_temp_local_new(); 7683 7682 if (s->cc_op != CC_OP_DYNAMIC) 7684 7683 gen_op_set_cc_op(s->cc_op); 7685 7684 if (b == 0x102) 7686 tcg_gen_helper_1_1(helper_lar,t0, cpu_T[0]);7685 gen_helper_lar(t0, cpu_T[0]); 7687 7686 else 7688 tcg_gen_helper_1_1(helper_lsl,t0, cpu_T[0]);7687 gen_helper_lsl(t0, cpu_T[0]); 7689 7688 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z); 7690 7689 label1 = gen_new_label(); … … 7746 7745 if (b & 2) { 7747 7746 gen_op_mov_TN_reg(ot, 0, rm); 7748 tcg_gen_helper_0_2(helper_write_crN, 7749 tcg_const_i32(reg), cpu_T[0]); 7747 gen_helper_write_crN(tcg_const_i32(reg), cpu_T[0]); 7750 7748 gen_jmp_im(s->pc - s->cs_base); 7751 7749 gen_eob(s); 7752 7750 } else { 7753 tcg_gen_helper_1_1(helper_read_crN, 7754 cpu_T[0], tcg_const_i32(reg)); 7751 gen_helper_read_crN(cpu_T[0], tcg_const_i32(reg)); 7755 7752 gen_op_mov_reg_T0(ot, rm); 7756 7753 } … … 7783 7780 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); 7784 7781 gen_op_mov_TN_reg(ot, 0, rm); 7785 tcg_gen_helper_0_2(helper_movl_drN_T0, 7786 tcg_const_i32(reg), cpu_T[0]); 7782 gen_helper_movl_drN_T0(tcg_const_i32(reg), cpu_T[0]); 7787 7783 gen_jmp_im(s->pc - s->cs_base); 7788 7784 gen_eob(s); … … 7799 7795 } else { 7800 7796 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); 7801 tcg_gen_helper_0_0(helper_clts);7797 gen_helper_clts(); 7802 7798 /* abort block because static cpu state changed */ 7803 7799 gen_jmp_im(s->pc - s->cs_base); … … 7835 7831 gen_op_set_cc_op(s->cc_op); 7836 7832 gen_jmp_im(pc_start - s->cs_base); 7837 tcg_gen_helper_0_2(helper_fxsave, 7838 cpu_A0, tcg_const_i32((s->dflag == 2))); 7833 gen_helper_fxsave(cpu_A0, tcg_const_i32((s->dflag == 2))); 7839 7834 break; 7840 7835 case 1: /* fxrstor */ … … 7850 7845 gen_op_set_cc_op(s->cc_op); 7851 7846 gen_jmp_im(pc_start - s->cs_base); 7852 tcg_gen_helper_0_2(helper_fxrstor, 7853 cpu_A0, tcg_const_i32((s->dflag == 2))); 7847 gen_helper_fxrstor(cpu_A0, tcg_const_i32((s->dflag == 2))); 7854 7848 break; 7855 7849 case 2: /* ldmxcsr */ … … 7910 7904 } 7911 7905 gen_jmp_im(s->pc - s->cs_base); 7912 tcg_gen_helper_0_0(helper_rsm);7906 gen_helper_rsm(); 7913 7907 gen_eob(s); 7914 7908 break; … … 7931 7925 7932 7926 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 7933 tcg_gen_helper_1_2(helper_popcnt, 7934 cpu_T[0], cpu_T[0], tcg_const_i32(ot)); 7927 gen_helper_popcnt(cpu_T[0], cpu_T[0], tcg_const_i32(ot)); 7935 7928 gen_op_mov_reg_T0(ot, reg); 7936 7929 … … 7955 7948 /* lock generation */ 7956 7949 if (s->prefix & PREFIX_LOCK) 7957 tcg_gen_helper_0_0(helper_unlock);7950 gen_helper_unlock(); 7958 7951 return s->pc; 7959 7952 illegal_op: 7960 7953 if (s->prefix & PREFIX_LOCK) 7961 tcg_gen_helper_0_0(helper_unlock);7954 gen_helper_unlock(); 7962 7955 /* XXX: ensure that no lock was generated */ 7963 7956 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); … … 7972 7965 assert(sizeof(CCTable) == (1 << 4)); 7973 7966 #endif 7974 cpu_env = tcg_global_reg_new (TCG_TYPE_PTR,TCG_AREG0, "env");7975 cpu_cc_op = tcg_global_mem_new (TCG_TYPE_I32,7976 TCG_AREG0,offsetof(CPUState, cc_op), "cc_op");7977 cpu_cc_src = tcg_global_mem_new(TCG_ TYPE_TL,7978 TCG_AREG0, offsetof(CPUState, cc_src),"cc_src");7979 cpu_cc_dst = tcg_global_mem_new(TCG_ TYPE_TL,7980 TCG_AREG0, offsetof(CPUState, cc_dst),"cc_dst");7981 cpu_cc_tmp = tcg_global_mem_new(TCG_ TYPE_TL,7982 TCG_AREG0, offsetof(CPUState, cc_tmp),"cc_tmp");7967 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); 7968 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, 7969 offsetof(CPUState, cc_op), "cc_op"); 7970 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src), 7971 "cc_src"); 7972 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst), 7973 "cc_dst"); 7974 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp), 7975 "cc_tmp"); 7983 7976 7984 7977 /* register helpers */ 7985 7986 #define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name); 7978 #define GEN_HELPER 2 7987 7979 #include "helper.h" 7988 7980 } … … 7998 7990 target_ulong pc_ptr; 7999 7991 uint16_t *gen_opc_end; 7992 CPUBreakpoint *bp; 8000 7993 int j, lj, cflags; 8001 7994 uint64_t flags; … … 8029 8022 dc->record_call = 0; 8030 8023 # endif 8031 #endif 8024 #endif /* VBOX */ 8032 8025 dc->cpl = (flags >> HF_CPL_SHIFT) & 3; 8033 8026 dc->iopl = (flags >> IOPL_SHIFT) & 3; … … 8067 8060 #endif 8068 8061 8069 cpu_T[0] = tcg_temp_new( TCG_TYPE_TL);8070 cpu_T[1] = tcg_temp_new( TCG_TYPE_TL);8071 cpu_A0 = tcg_temp_new( TCG_TYPE_TL);8072 cpu_T3 = tcg_temp_new( TCG_TYPE_TL);8073 8074 cpu_tmp0 = tcg_temp_new( TCG_TYPE_TL);8075 cpu_tmp1_i64 = tcg_temp_new (TCG_TYPE_I64);8076 cpu_tmp2_i32 = tcg_temp_new (TCG_TYPE_I32);8077 cpu_tmp3_i32 = tcg_temp_new (TCG_TYPE_I32);8078 cpu_tmp4 = tcg_temp_new( TCG_TYPE_TL);8079 cpu_tmp5 = tcg_temp_new( TCG_TYPE_TL);8080 cpu_tmp6 = tcg_temp_new( TCG_TYPE_TL);8081 cpu_ptr0 = tcg_temp_new (TCG_TYPE_PTR);8082 cpu_ptr1 = tcg_temp_new (TCG_TYPE_PTR);8062 cpu_T[0] = tcg_temp_new(); 8063 cpu_T[1] = tcg_temp_new(); 8064 cpu_A0 = tcg_temp_new(); 8065 cpu_T3 = tcg_temp_new(); 8066 8067 cpu_tmp0 = tcg_temp_new(); 8068 cpu_tmp1_i64 = tcg_temp_new_i64(); 8069 cpu_tmp2_i32 = tcg_temp_new_i32(); 8070 cpu_tmp3_i32 = tcg_temp_new_i32(); 8071 cpu_tmp4 = tcg_temp_new(); 8072 cpu_tmp5 = tcg_temp_new(); 8073 cpu_tmp6 = tcg_temp_new(); 8074 cpu_ptr0 = tcg_temp_new_ptr(); 8075 cpu_ptr1 = tcg_temp_new_ptr(); 8083 8076 8084 8077 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; … … 8094 8087 gen_icount_start(); 8095 8088 for(;;) { 8096 if ( env->nb_breakpoints > 0) {8097 for(j = 0; j < env->nb_breakpoints; j++) {8098 if ( env->breakpoints[j]== pc_ptr) {8089 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) { 8090 TAILQ_FOREACH(bp, &env->breakpoints, entry) { 8091 if (bp->pc == pc_ptr) { 8099 8092 gen_debug(dc, pc_ptr - dc->cs_base); 8100 8093 break; … … 8174 8167 8175 8168 #ifdef DEBUG_DISAS 8176 if (loglevel & CPU_LOG_TB_CPU) { 8177 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 8178 } 8179 if (loglevel & CPU_LOG_TB_IN_ASM) { 8169 log_cpu_state_mask(CPU_LOG_TB_CPU, env, X86_DUMP_CCOP); 8170 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { 8180 8171 int disas_flags; 8181 fprintf(logfile,"----------------\n");8182 fprintf(logfile,"IN: %s\n", lookup_symbol(pc_start));8172 qemu_log("----------------\n"); 8173 qemu_log("IN: %s\n", lookup_symbol(pc_start)); 8183 8174 #ifdef TARGET_X86_64 8184 8175 if (dc->code64) … … 8187 8178 #endif 8188 8179 disas_flags = !dc->code32; 8189 target_disas(logfile,pc_start, pc_ptr - pc_start, disas_flags);8190 fprintf(logfile,"\n");8180 log_target_disas(pc_start, pc_ptr - pc_start, disas_flags); 8181 qemu_log("\n"); 8191 8182 } 8192 8183 #endif … … 8213 8204 int cc_op; 8214 8205 #ifdef DEBUG_DISAS 8215 if ( loglevel & CPU_LOG_TB_OP) {8206 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) { 8216 8207 int i; 8217 fprintf(logfile,"RESTORE:\n");8208 qemu_log("RESTORE:\n"); 8218 8209 for(i = 0;i <= pc_pos; i++) { 8219 8210 if (gen_opc_instr_start[i]) { 8220 fprintf(logfile,"0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);8221 } 8222 } 8223 fprintf(logfile,"spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",8211 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]); 8212 } 8213 } 8214 qemu_log("spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n", 8224 8215 searched_pc, pc_pos, gen_opc_pc[pc_pos] - tb->cs_base, 8225 8216 (uint32_t)tb->cs_base); -
trunk/src/recompiler/tcg/README
r36140 r36170 61 61 - Basic blocks end after branches (e.g. brcond_i32 instruction), 62 62 goto_tb and exit_tb instructions. 63 - Basic blocks end before legacy dyngen operations. 64 - Basic blocks start after the end of a previous basic block, at a 65 set_label instruction or after a legacy dyngen operation. 63 - Basic blocks start after the end of a previous basic block, or at a 64 set_label instruction. 66 65 67 66 After the end of a basic block, the content of temporaries is … … 206 205 t0=~t1 207 206 208 ********* Shifts 207 * andc_i32/i64 t0, t1, t2 208 209 t0=t1&~t2 210 211 * eqv_i32/i64 t0, t1, t2 212 213 t0=~(t1^t2) 214 215 * nand_i32/i64 t0, t1, t2 216 217 t0=~(t1&t2) 218 219 * nor_i32/i64 t0, t1, t2 220 221 t0=~(t1|t2) 222 223 * orc_i32/i64 t0, t1, t2 224 225 t0=t1|~t2 226 227 ********* Shifts/Rotates 209 228 210 229 * shl_i32/i64 t0, t1, t2 … … 219 238 220 239 t0=t1 >> t2 (signed). Undefined behavior if t2 < 0 or t2 >= 32 (resp 64) 240 241 * rotl_i32/i64 t0, t1, t2 242 243 Rotation of t2 bits to the left. Undefined behavior if t2 < 0 or t2 >= 32 (resp 64) 244 245 * rotr_i32/i64 t0, t1, t2 246 247 Rotation of t2 bits to the right. Undefined behavior if t2 < 0 or t2 >= 32 (resp 64) 221 248 222 249 ********* Misc … … 308 335 instructions. 309 336 310 * qemu_ld _i32/i64t0, t1, flags311 qemu_ld8 u_i32/i64t0, t1, flags312 qemu_ld 8s_i32/i64t0, t1, flags313 qemu_ld16 u_i32/i64t0, t1, flags314 qemu_ld 16s_i32/i64t0, t1, flags315 qemu_ld32 u_i64t0, t1, flags316 qemu_ld 32s_i64 t0, t1, flags337 * qemu_ld8u t0, t1, flags 338 qemu_ld8s t0, t1, flags 339 qemu_ld16u t0, t1, flags 340 qemu_ld16s t0, t1, flags 341 qemu_ld32u t0, t1, flags 342 qemu_ld32s t0, t1, flags 343 qemu_ld64 t0, t1, flags 317 344 318 345 Load data at the QEMU CPU address t1 into t0. t1 has the QEMU CPU … … 320 347 kernel access) for example. 321 348 322 * qemu_st _i32/i64t0, t1, flags323 qemu_st 8_i32/i64t0, t1, flags324 qemu_st 16_i32/i64t0, t1, flags325 qemu_st 32_i64 t0, t1, flags349 * qemu_st8 t0, t1, flags 350 qemu_st16 t0, t1, flags 351 qemu_st32 t0, t1, flags 352 qemu_st64 t0, t1, flags 326 353 327 354 Store the data t0 at the QEMU CPU Address t1. t1 has the QEMU CPU … … 362 389 instruction. Memory constraints are not supported in this 363 390 version. Aliases are specified in the input operands as for GCC. 391 392 The same register may be used for both an input and an output, even when 393 they are not explicitly aliased. If an op expands to multiple target 394 instructions then care must be taken to avoid clobbering input values. 395 GCC style "early clobber" outputs are not currently supported. 364 396 365 397 A target can define specific register or constant constraints. If an … … 391 423 64 bit return type. 392 424 393 5) Migration from dyngen to TCG 394 395 TCG is backward compatible with QEMU "dyngen" operations. It means 396 that TCG instructions can be freely mixed with dyngen operations. It 397 is expected that QEMU targets will be progressively fully converted to 398 TCG. Once a target is fully converted to TCG, it will be possible 399 to apply more optimizations because more registers will be free for 400 the generated code. 401 402 The exception model is the same as the dyngen one. 403 404 6) Recommended coding rules for best performance 425 5) Recommended coding rules for best performance 405 426 406 427 - Use globals to represent the parts of the QEMU CPU state which are … … 410 431 - Avoid globals stored in fixed registers. They must be used only to 411 432 store the pointer to the CPU state and possibly to store a pointer 412 to a register window. The other uses are to ensure backward 413 compatibility with dyngen during the porting a new target to TCG. 433 to a register window. 414 434 415 435 - Use temporaries. Use local temporaries only when really needed, -
trunk/src/recompiler/tcg/i386/tcg-target.c
r36140 r36170 302 302 } 303 303 304 void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)304 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) 305 305 { 306 306 if (val != 0) … … 309 309 310 310 #ifdef VBOX 311 void tcg_out_subi(TCGContext *s, int reg, tcg_target_long val)311 static void tcg_out_subi(TCGContext *s, int reg, tcg_target_long val) 312 312 { 313 313 if (val != 0) … … 666 666 break; 667 667 case 0: 668 /* movzbl */ 669 tcg_out_modrm(s, 0xb6 | P_EXT, data_reg, TCG_REG_EAX); 670 break; 668 671 case 1: 672 /* movzwl */ 673 tcg_out_modrm(s, 0xb7 | P_EXT, data_reg, TCG_REG_EAX); 674 break; 669 675 case 2: 670 676 default: … … 961 967 tcg_gen_stack_alignment_check(s); 962 968 # endif 963 964 969 tcg_out8(s, 0xe8); 965 970 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - -
trunk/src/recompiler/tcg/i386/tcg-target.h
r36140 r36170 22 22 * THE SOFTWARE. 23 23 */ 24 25 24 #define TCG_TARGET_I386 1 26 25 -
trunk/src/recompiler/tcg/tcg-op.h
r36125 r36170 22 22 * THE SOFTWARE. 23 23 */ 24 25 24 #include "tcg.h" 26 25 27 #ifdef CONFIG_DYNGEN_OP28 /* legacy dyngen operations */29 #include "gen-op.h"30 #endif31 32 26 int gen_new_label(void); 33 27 34 static inline void tcg_gen_op1(int opc, TCGv arg1) 35 { 36 *gen_opc_ptr++ = opc; 37 *gen_opparam_ptr++ = GET_TCGV(arg1); 28 static inline void tcg_gen_op1_i32(int opc, TCGv_i32 arg1) 29 { 30 *gen_opc_ptr++ = opc; 31 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 32 } 33 34 static inline void tcg_gen_op1_i64(int opc, TCGv_i64 arg1) 35 { 36 *gen_opc_ptr++ = opc; 37 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 38 38 } 39 39 … … 44 44 } 45 45 46 static inline void tcg_gen_op2(int opc, TCGv arg1, TCGv arg2) 47 { 48 *gen_opc_ptr++ = opc; 49 *gen_opparam_ptr++ = GET_TCGV(arg1); 50 *gen_opparam_ptr++ = GET_TCGV(arg2); 51 } 52 53 static inline void tcg_gen_op2i(int opc, TCGv arg1, TCGArg arg2) 54 { 55 *gen_opc_ptr++ = opc; 56 *gen_opparam_ptr++ = GET_TCGV(arg1); 46 static inline void tcg_gen_op2_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2) 47 { 48 *gen_opc_ptr++ = opc; 49 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 50 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 51 } 52 53 static inline void tcg_gen_op2_i64(int opc, TCGv_i64 arg1, TCGv_i64 arg2) 54 { 55 *gen_opc_ptr++ = opc; 56 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 57 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 58 } 59 60 static inline void tcg_gen_op2i_i32(int opc, TCGv_i32 arg1, TCGArg arg2) 61 { 62 *gen_opc_ptr++ = opc; 63 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 64 *gen_opparam_ptr++ = arg2; 65 } 66 67 static inline void tcg_gen_op2i_i64(int opc, TCGv_i64 arg1, TCGArg arg2) 68 { 69 *gen_opc_ptr++ = opc; 70 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 57 71 *gen_opparam_ptr++ = arg2; 58 72 } … … 65 79 } 66 80 67 static inline void tcg_gen_op3(int opc, TCGv arg1, TCGv arg2, TCGv arg3) 68 { 69 *gen_opc_ptr++ = opc; 70 *gen_opparam_ptr++ = GET_TCGV(arg1); 71 *gen_opparam_ptr++ = GET_TCGV(arg2); 72 *gen_opparam_ptr++ = GET_TCGV(arg3); 73 } 74 75 static inline void tcg_gen_op3i(int opc, TCGv arg1, TCGv arg2, TCGArg arg3) 76 { 77 *gen_opc_ptr++ = opc; 78 *gen_opparam_ptr++ = GET_TCGV(arg1); 79 *gen_opparam_ptr++ = GET_TCGV(arg2); 81 static inline void tcg_gen_op3_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2, 82 TCGv_i32 arg3) 83 { 84 *gen_opc_ptr++ = opc; 85 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 86 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 87 *gen_opparam_ptr++ = GET_TCGV_I32(arg3); 88 } 89 90 static inline void tcg_gen_op3_i64(int opc, TCGv_i64 arg1, TCGv_i64 arg2, 91 TCGv_i64 arg3) 92 { 93 *gen_opc_ptr++ = opc; 94 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 95 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 96 *gen_opparam_ptr++ = GET_TCGV_I64(arg3); 97 } 98 99 static inline void tcg_gen_op3i_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2, 100 TCGArg arg3) 101 { 102 *gen_opc_ptr++ = opc; 103 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 104 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 80 105 *gen_opparam_ptr++ = arg3; 81 106 } 82 107 83 static inline void tcg_gen_op4(int opc, TCGv arg1, TCGv arg2, TCGv arg3, 84 TCGv arg4) 85 { 86 *gen_opc_ptr++ = opc; 87 *gen_opparam_ptr++ = GET_TCGV(arg1); 88 *gen_opparam_ptr++ = GET_TCGV(arg2); 89 *gen_opparam_ptr++ = GET_TCGV(arg3); 90 *gen_opparam_ptr++ = GET_TCGV(arg4); 91 } 92 93 static inline void tcg_gen_op4i(int opc, TCGv arg1, TCGv arg2, TCGv arg3, 94 TCGArg arg4) 95 { 96 *gen_opc_ptr++ = opc; 97 *gen_opparam_ptr++ = GET_TCGV(arg1); 98 *gen_opparam_ptr++ = GET_TCGV(arg2); 99 *gen_opparam_ptr++ = GET_TCGV(arg3); 108 static inline void tcg_gen_op3i_i64(int opc, TCGv_i64 arg1, TCGv_i64 arg2, 109 TCGArg arg3) 110 { 111 *gen_opc_ptr++ = opc; 112 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 113 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 114 *gen_opparam_ptr++ = arg3; 115 } 116 117 static inline void tcg_gen_ldst_op_i32(int opc, TCGv_i32 val, TCGv_ptr base, 118 TCGArg offset) 119 { 120 *gen_opc_ptr++ = opc; 121 *gen_opparam_ptr++ = GET_TCGV_I32(val); 122 *gen_opparam_ptr++ = GET_TCGV_PTR(base); 123 *gen_opparam_ptr++ = offset; 124 } 125 126 static inline void tcg_gen_ldst_op_i64(int opc, TCGv_i64 val, TCGv_ptr base, 127 TCGArg offset) 128 { 129 *gen_opc_ptr++ = opc; 130 *gen_opparam_ptr++ = GET_TCGV_I64(val); 131 *gen_opparam_ptr++ = GET_TCGV_PTR(base); 132 *gen_opparam_ptr++ = offset; 133 } 134 135 static inline void tcg_gen_qemu_ldst_op_i64_i32(int opc, TCGv_i64 val, TCGv_i32 addr, 136 TCGArg mem_index) 137 { 138 *gen_opc_ptr++ = opc; 139 *gen_opparam_ptr++ = GET_TCGV_I64(val); 140 *gen_opparam_ptr++ = GET_TCGV_I32(addr); 141 *gen_opparam_ptr++ = mem_index; 142 } 143 144 static inline void tcg_gen_qemu_ldst_op_i64_i64(int opc, TCGv_i64 val, TCGv_i64 addr, 145 TCGArg mem_index) 146 { 147 *gen_opc_ptr++ = opc; 148 *gen_opparam_ptr++ = GET_TCGV_I64(val); 149 *gen_opparam_ptr++ = GET_TCGV_I64(addr); 150 *gen_opparam_ptr++ = mem_index; 151 } 152 153 static inline void tcg_gen_op4_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2, 154 TCGv_i32 arg3, TCGv_i32 arg4) 155 { 156 *gen_opc_ptr++ = opc; 157 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 158 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 159 *gen_opparam_ptr++ = GET_TCGV_I32(arg3); 160 *gen_opparam_ptr++ = GET_TCGV_I32(arg4); 161 } 162 163 static inline void tcg_gen_op4_i64(int opc, TCGv_i64 arg1, TCGv_i64 arg2, 164 TCGv_i64 arg3, TCGv_i64 arg4) 165 { 166 *gen_opc_ptr++ = opc; 167 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 168 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 169 *gen_opparam_ptr++ = GET_TCGV_I64(arg3); 170 *gen_opparam_ptr++ = GET_TCGV_I64(arg4); 171 } 172 173 static inline void tcg_gen_op4i_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2, 174 TCGv_i32 arg3, TCGArg arg4) 175 { 176 *gen_opc_ptr++ = opc; 177 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 178 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 179 *gen_opparam_ptr++ = GET_TCGV_I32(arg3); 100 180 *gen_opparam_ptr++ = arg4; 101 181 } 102 182 103 static inline void tcg_gen_op4ii(int opc, TCGv arg1, TCGv arg2, TCGArg arg3, 104 TCGArg arg4) 105 { 106 *gen_opc_ptr++ = opc; 107 *gen_opparam_ptr++ = GET_TCGV(arg1); 108 *gen_opparam_ptr++ = GET_TCGV(arg2); 183 static inline void tcg_gen_op4i_i64(int opc, TCGv_i64 arg1, TCGv_i64 arg2, 184 TCGv_i64 arg3, TCGArg arg4) 185 { 186 *gen_opc_ptr++ = opc; 187 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 188 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 189 *gen_opparam_ptr++ = GET_TCGV_I64(arg3); 190 *gen_opparam_ptr++ = arg4; 191 } 192 193 static inline void tcg_gen_op4ii_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2, 194 TCGArg arg3, TCGArg arg4) 195 { 196 *gen_opc_ptr++ = opc; 197 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 198 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 109 199 *gen_opparam_ptr++ = arg3; 110 200 *gen_opparam_ptr++ = arg4; 111 201 } 112 202 113 static inline void tcg_gen_op5(int opc, TCGv arg1, TCGv arg2, 114 TCGv arg3, TCGv arg4, 115 TCGv arg5) 116 { 117 *gen_opc_ptr++ = opc; 118 *gen_opparam_ptr++ = GET_TCGV(arg1); 119 *gen_opparam_ptr++ = GET_TCGV(arg2); 120 *gen_opparam_ptr++ = GET_TCGV(arg3); 121 *gen_opparam_ptr++ = GET_TCGV(arg4); 122 *gen_opparam_ptr++ = GET_TCGV(arg5); 123 } 124 125 static inline void tcg_gen_op5i(int opc, TCGv arg1, TCGv arg2, 126 TCGv arg3, TCGv arg4, 127 TCGArg arg5) 128 { 129 *gen_opc_ptr++ = opc; 130 *gen_opparam_ptr++ = GET_TCGV(arg1); 131 *gen_opparam_ptr++ = GET_TCGV(arg2); 132 *gen_opparam_ptr++ = GET_TCGV(arg3); 133 *gen_opparam_ptr++ = GET_TCGV(arg4); 203 static inline void tcg_gen_op4ii_i64(int opc, TCGv_i64 arg1, TCGv_i64 arg2, 204 TCGArg arg3, TCGArg arg4) 205 { 206 *gen_opc_ptr++ = opc; 207 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 208 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 209 *gen_opparam_ptr++ = arg3; 210 *gen_opparam_ptr++ = arg4; 211 } 212 213 static inline void tcg_gen_op5_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2, 214 TCGv_i32 arg3, TCGv_i32 arg4, TCGv_i32 arg5) 215 { 216 *gen_opc_ptr++ = opc; 217 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 218 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 219 *gen_opparam_ptr++ = GET_TCGV_I32(arg3); 220 *gen_opparam_ptr++ = GET_TCGV_I32(arg4); 221 *gen_opparam_ptr++ = GET_TCGV_I32(arg5); 222 } 223 224 static inline void tcg_gen_op5_i64(int opc, TCGv_i64 arg1, TCGv_i64 arg2, 225 TCGv_i64 arg3, TCGv_i64 arg4, TCGv_i64 arg5) 226 { 227 *gen_opc_ptr++ = opc; 228 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 229 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 230 *gen_opparam_ptr++ = GET_TCGV_I64(arg3); 231 *gen_opparam_ptr++ = GET_TCGV_I64(arg4); 232 *gen_opparam_ptr++ = GET_TCGV_I64(arg5); 233 } 234 235 static inline void tcg_gen_op5i_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2, 236 TCGv_i32 arg3, TCGv_i32 arg4, TCGArg arg5) 237 { 238 *gen_opc_ptr++ = opc; 239 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 240 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 241 *gen_opparam_ptr++ = GET_TCGV_I32(arg3); 242 *gen_opparam_ptr++ = GET_TCGV_I32(arg4); 134 243 *gen_opparam_ptr++ = arg5; 135 244 } 136 245 137 static inline void tcg_gen_op6(int opc, TCGv arg1, TCGv arg2, 138 TCGv arg3, TCGv arg4, 139 TCGv arg5, TCGv arg6) 140 { 141 *gen_opc_ptr++ = opc; 142 *gen_opparam_ptr++ = GET_TCGV(arg1); 143 *gen_opparam_ptr++ = GET_TCGV(arg2); 144 *gen_opparam_ptr++ = GET_TCGV(arg3); 145 *gen_opparam_ptr++ = GET_TCGV(arg4); 146 *gen_opparam_ptr++ = GET_TCGV(arg5); 147 *gen_opparam_ptr++ = GET_TCGV(arg6); 148 } 149 150 static inline void tcg_gen_op6ii(int opc, TCGv arg1, TCGv arg2, 151 TCGv arg3, TCGv arg4, 152 TCGArg arg5, TCGArg arg6) 153 { 154 *gen_opc_ptr++ = opc; 155 *gen_opparam_ptr++ = GET_TCGV(arg1); 156 *gen_opparam_ptr++ = GET_TCGV(arg2); 157 *gen_opparam_ptr++ = GET_TCGV(arg3); 158 *gen_opparam_ptr++ = GET_TCGV(arg4); 246 static inline void tcg_gen_op5i_i64(int opc, TCGv_i64 arg1, TCGv_i64 arg2, 247 TCGv_i64 arg3, TCGv_i64 arg4, TCGArg arg5) 248 { 249 *gen_opc_ptr++ = opc; 250 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 251 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 252 *gen_opparam_ptr++ = GET_TCGV_I64(arg3); 253 *gen_opparam_ptr++ = GET_TCGV_I64(arg4); 254 *gen_opparam_ptr++ = arg5; 255 } 256 257 static inline void tcg_gen_op6_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2, 258 TCGv_i32 arg3, TCGv_i32 arg4, TCGv_i32 arg5, 259 TCGv_i32 arg6) 260 { 261 *gen_opc_ptr++ = opc; 262 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 263 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 264 *gen_opparam_ptr++ = GET_TCGV_I32(arg3); 265 *gen_opparam_ptr++ = GET_TCGV_I32(arg4); 266 *gen_opparam_ptr++ = GET_TCGV_I32(arg5); 267 *gen_opparam_ptr++ = GET_TCGV_I32(arg6); 268 } 269 270 static inline void tcg_gen_op6_i64(int opc, TCGv_i64 arg1, TCGv_i64 arg2, 271 TCGv_i64 arg3, TCGv_i64 arg4, TCGv_i64 arg5, 272 TCGv_i64 arg6) 273 { 274 *gen_opc_ptr++ = opc; 275 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 276 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 277 *gen_opparam_ptr++ = GET_TCGV_I64(arg3); 278 *gen_opparam_ptr++ = GET_TCGV_I64(arg4); 279 *gen_opparam_ptr++ = GET_TCGV_I64(arg5); 280 *gen_opparam_ptr++ = GET_TCGV_I64(arg6); 281 } 282 283 static inline void tcg_gen_op6ii_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2, 284 TCGv_i32 arg3, TCGv_i32 arg4, TCGArg arg5, 285 TCGArg arg6) 286 { 287 *gen_opc_ptr++ = opc; 288 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 289 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 290 *gen_opparam_ptr++ = GET_TCGV_I32(arg3); 291 *gen_opparam_ptr++ = GET_TCGV_I32(arg4); 159 292 *gen_opparam_ptr++ = arg5; 160 293 *gen_opparam_ptr++ = arg6; 161 294 } 162 295 296 static inline void tcg_gen_op6ii_i64(int opc, TCGv_i64 arg1, TCGv_i64 arg2, 297 TCGv_i64 arg3, TCGv_i64 arg4, TCGArg arg5, 298 TCGArg arg6) 299 { 300 *gen_opc_ptr++ = opc; 301 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 302 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 303 *gen_opparam_ptr++ = GET_TCGV_I64(arg3); 304 *gen_opparam_ptr++ = GET_TCGV_I64(arg4); 305 *gen_opparam_ptr++ = arg5; 306 *gen_opparam_ptr++ = arg6; 307 } 308 163 309 static inline void gen_set_label(int n) 164 310 { … … 171 317 } 172 318 173 static inline void tcg_gen_mov_i32(TCGv ret, TCGvarg)174 { 175 if (GET_TCGV (ret) != GET_TCGV(arg))176 tcg_gen_op2 (INDEX_op_mov_i32, ret, arg);177 } 178 179 static inline void tcg_gen_movi_i32(TCGv ret, int32_t arg)180 { 181 tcg_gen_op2i (INDEX_op_movi_i32, ret, arg);319 static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg) 320 { 321 if (GET_TCGV_I32(ret) != GET_TCGV_I32(arg)) 322 tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg); 323 } 324 325 static inline void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg) 326 { 327 tcg_gen_op2i_i32(INDEX_op_movi_i32, ret, arg); 182 328 } 183 329 184 330 /* helper calls */ 185 #define TCG_HELPER_CALL_FLAGS 0 186 187 static inline void tcg_gen_helper_0_0(void *func) 188 { 189 TCGv t0; 190 t0 = tcg_const_ptr((tcg_target_long)func); 191 tcg_gen_call(&tcg_ctx, 192 t0, TCG_HELPER_CALL_FLAGS, 193 0, NULL, 0, NULL); 194 tcg_temp_free(t0); 195 } 196 197 static inline void tcg_gen_helper_0_1(void *func, TCGv arg) 198 { 199 TCGv t0; 200 t0 = tcg_const_ptr((tcg_target_long)func); 201 tcg_gen_call(&tcg_ctx, 202 t0, TCG_HELPER_CALL_FLAGS, 203 0, NULL, 1, &arg); 204 tcg_temp_free(t0); 205 } 206 207 static inline void tcg_gen_helper_0_2(void *func, TCGv arg1, TCGv arg2) 208 { 209 TCGv args[2]; 210 TCGv t0; 211 args[0] = arg1; 212 args[1] = arg2; 213 t0 = tcg_const_ptr((tcg_target_long)func); 214 tcg_gen_call(&tcg_ctx, 215 t0, TCG_HELPER_CALL_FLAGS, 216 0, NULL, 2, args); 217 tcg_temp_free(t0); 218 } 219 220 static inline void tcg_gen_helper_0_3(void *func, 221 TCGv arg1, TCGv arg2, TCGv arg3) 222 { 223 TCGv args[3]; 224 TCGv t0; 225 args[0] = arg1; 226 args[1] = arg2; 227 args[2] = arg3; 228 t0 = tcg_const_ptr((tcg_target_long)func); 229 tcg_gen_call(&tcg_ctx, 230 t0, TCG_HELPER_CALL_FLAGS, 231 0, NULL, 3, args); 232 tcg_temp_free(t0); 233 } 234 235 static inline void tcg_gen_helper_0_4(void *func, TCGv arg1, TCGv arg2, 236 TCGv arg3, TCGv arg4) 237 { 238 TCGv args[4]; 239 TCGv t0; 240 args[0] = arg1; 241 args[1] = arg2; 242 args[2] = arg3; 243 args[3] = arg4; 244 t0 = tcg_const_ptr((tcg_target_long)func); 245 tcg_gen_call(&tcg_ctx, 246 t0, TCG_HELPER_CALL_FLAGS, 247 0, NULL, 4, args); 248 tcg_temp_free(t0); 249 } 250 251 static inline void tcg_gen_helper_1_0(void *func, TCGv ret) 252 { 253 TCGv t0; 254 t0 = tcg_const_ptr((tcg_target_long)func); 255 tcg_gen_call(&tcg_ctx, 256 t0, TCG_HELPER_CALL_FLAGS, 257 1, &ret, 0, NULL); 258 tcg_temp_free(t0); 259 } 260 261 static inline void tcg_gen_helper_1_1(void *func, TCGv ret, TCGv arg1) 262 { 263 TCGv t0; 264 t0 = tcg_const_ptr((tcg_target_long)func); 265 tcg_gen_call(&tcg_ctx, 266 t0, TCG_HELPER_CALL_FLAGS, 267 1, &ret, 1, &arg1); 268 tcg_temp_free(t0); 269 } 270 271 static inline void tcg_gen_helper_1_2(void *func, TCGv ret, 272 TCGv arg1, TCGv arg2) 273 { 274 TCGv args[2]; 275 TCGv t0; 276 args[0] = arg1; 277 args[1] = arg2; 278 t0 = tcg_const_ptr((tcg_target_long)func); 279 tcg_gen_call(&tcg_ctx, 280 t0, TCG_HELPER_CALL_FLAGS, 281 1, &ret, 2, args); 282 tcg_temp_free(t0); 283 } 284 285 static inline void tcg_gen_helper_1_3(void *func, TCGv ret, 286 TCGv arg1, TCGv arg2, TCGv arg3) 287 { 288 TCGv args[3]; 289 TCGv t0; 290 args[0] = arg1; 291 args[1] = arg2; 292 args[2] = arg3; 293 t0 = tcg_const_ptr((tcg_target_long)func); 294 tcg_gen_call(&tcg_ctx, 295 t0, TCG_HELPER_CALL_FLAGS, 296 1, &ret, 3, args); 297 tcg_temp_free(t0); 298 } 299 300 static inline void tcg_gen_helper_1_4(void *func, TCGv ret, 301 TCGv arg1, TCGv arg2, TCGv arg3, 302 TCGv arg4) 303 { 304 TCGv args[4]; 305 TCGv t0; 306 args[0] = arg1; 307 args[1] = arg2; 308 args[2] = arg3; 309 args[3] = arg4; 310 t0 = tcg_const_ptr((tcg_target_long)func); 311 tcg_gen_call(&tcg_ctx, 312 t0, TCG_HELPER_CALL_FLAGS, 313 1, &ret, 4, args); 314 tcg_temp_free(t0); 331 static inline void tcg_gen_helperN(void *func, int flags, int sizemask, 332 TCGArg ret, int nargs, TCGArg *args) 333 { 334 TCGv_ptr fn; 335 fn = tcg_const_ptr((tcg_target_long)func); 336 tcg_gen_callN(&tcg_ctx, fn, flags, sizemask, ret, 337 nargs, args); 338 tcg_temp_free_ptr(fn); 339 } 340 341 /* FIXME: Should this be pure? */ 342 static inline void tcg_gen_helper64(void *func, TCGv_i64 ret, 343 TCGv_i64 a, TCGv_i64 b) 344 { 345 TCGv_ptr fn; 346 TCGArg args[2]; 347 fn = tcg_const_ptr((tcg_target_long)func); 348 args[0] = GET_TCGV_I64(a); 349 args[1] = GET_TCGV_I64(b); 350 tcg_gen_callN(&tcg_ctx, fn, 0, 7, GET_TCGV_I64(ret), 2, args); 351 tcg_temp_free_ptr(fn); 315 352 } 316 353 317 354 /* 32 bit ops */ 318 355 319 static inline void tcg_gen_ld8u_i32(TCGv ret, TCGvarg2, tcg_target_long offset)320 { 321 tcg_gen_ op3i(INDEX_op_ld8u_i32, ret, arg2, offset);322 } 323 324 static inline void tcg_gen_ld8s_i32(TCGv ret, TCGvarg2, tcg_target_long offset)325 { 326 tcg_gen_ op3i(INDEX_op_ld8s_i32, ret, arg2, offset);327 } 328 329 static inline void tcg_gen_ld16u_i32(TCGv ret, TCGvarg2, tcg_target_long offset)330 { 331 tcg_gen_ op3i(INDEX_op_ld16u_i32, ret, arg2, offset);332 } 333 334 static inline void tcg_gen_ld16s_i32(TCGv ret, TCGvarg2, tcg_target_long offset)335 { 336 tcg_gen_ op3i(INDEX_op_ld16s_i32, ret, arg2, offset);337 } 338 339 static inline void tcg_gen_ld_i32(TCGv ret, TCGvarg2, tcg_target_long offset)340 { 341 tcg_gen_ op3i(INDEX_op_ld_i32, ret, arg2, offset);342 } 343 344 static inline void tcg_gen_st8_i32(TCGv arg1, TCGvarg2, tcg_target_long offset)345 { 346 tcg_gen_ op3i(INDEX_op_st8_i32, arg1, arg2, offset);347 } 348 349 static inline void tcg_gen_st16_i32(TCGv arg1, TCGvarg2, tcg_target_long offset)350 { 351 tcg_gen_ op3i(INDEX_op_st16_i32, arg1, arg2, offset);352 } 353 354 static inline void tcg_gen_st_i32(TCGv arg1, TCGvarg2, tcg_target_long offset)355 { 356 tcg_gen_ op3i(INDEX_op_st_i32, arg1, arg2, offset);357 } 358 359 static inline void tcg_gen_add_i32(TCGv ret, TCGv arg1, TCGvarg2)360 { 361 tcg_gen_op3 (INDEX_op_add_i32, ret, arg1, arg2);362 } 363 364 static inline void tcg_gen_addi_i32(TCGv ret, TCGvarg1, int32_t arg2)356 static inline void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) 357 { 358 tcg_gen_ldst_op_i32(INDEX_op_ld8u_i32, ret, arg2, offset); 359 } 360 361 static inline void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) 362 { 363 tcg_gen_ldst_op_i32(INDEX_op_ld8s_i32, ret, arg2, offset); 364 } 365 366 static inline void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) 367 { 368 tcg_gen_ldst_op_i32(INDEX_op_ld16u_i32, ret, arg2, offset); 369 } 370 371 static inline void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) 372 { 373 tcg_gen_ldst_op_i32(INDEX_op_ld16s_i32, ret, arg2, offset); 374 } 375 376 static inline void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) 377 { 378 tcg_gen_ldst_op_i32(INDEX_op_ld_i32, ret, arg2, offset); 379 } 380 381 static inline void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) 382 { 383 tcg_gen_ldst_op_i32(INDEX_op_st8_i32, arg1, arg2, offset); 384 } 385 386 static inline void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) 387 { 388 tcg_gen_ldst_op_i32(INDEX_op_st16_i32, arg1, arg2, offset); 389 } 390 391 static inline void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) 392 { 393 tcg_gen_ldst_op_i32(INDEX_op_st_i32, arg1, arg2, offset); 394 } 395 396 static inline void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 397 { 398 tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2); 399 } 400 401 static inline void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 365 402 { 366 403 /* some cases can be optimized here */ … … 368 405 tcg_gen_mov_i32(ret, arg1); 369 406 } else { 370 TCGv t0 = tcg_const_i32(arg2);407 TCGv_i32 t0 = tcg_const_i32(arg2); 371 408 tcg_gen_add_i32(ret, arg1, t0); 372 tcg_temp_free (t0);409 tcg_temp_free_i32(t0); 373 410 } 374 411 } 375 412 376 static inline void tcg_gen_sub_i32(TCGv ret, TCGv arg1, TCGv arg2) 377 { 378 tcg_gen_op3(INDEX_op_sub_i32, ret, arg1, arg2); 379 } 380 381 static inline void tcg_gen_subi_i32(TCGv ret, TCGv arg1, int32_t arg2) 413 static inline void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 414 { 415 tcg_gen_op3_i32(INDEX_op_sub_i32, ret, arg1, arg2); 416 } 417 418 static inline void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2) 419 { 420 TCGv_i32 t0 = tcg_const_i32(arg1); 421 tcg_gen_sub_i32(ret, t0, arg2); 422 tcg_temp_free_i32(t0); 423 } 424 425 static inline void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 382 426 { 383 427 /* some cases can be optimized here */ … … 385 429 tcg_gen_mov_i32(ret, arg1); 386 430 } else { 387 TCGv t0 = tcg_const_i32(arg2);431 TCGv_i32 t0 = tcg_const_i32(arg2); 388 432 tcg_gen_sub_i32(ret, arg1, t0); 389 tcg_temp_free (t0);433 tcg_temp_free_i32(t0); 390 434 } 391 435 } 392 436 393 static inline void tcg_gen_and_i32(TCGv ret, TCGv arg1, TCGvarg2)394 { 395 tcg_gen_op3 (INDEX_op_and_i32, ret, arg1, arg2);396 } 397 398 static inline void tcg_gen_andi_i32(TCGv ret, TCGvarg1, int32_t arg2)437 static inline void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 438 { 439 tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2); 440 } 441 442 static inline void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 399 443 { 400 444 /* some cases can be optimized here */ … … 404 448 tcg_gen_mov_i32(ret, arg1); 405 449 } else { 406 TCGv t0 = tcg_const_i32(arg2);450 TCGv_i32 t0 = tcg_const_i32(arg2); 407 451 tcg_gen_and_i32(ret, arg1, t0); 408 tcg_temp_free (t0);452 tcg_temp_free_i32(t0); 409 453 } 410 454 } 411 455 412 static inline void tcg_gen_or_i32(TCGv ret, TCGv arg1, TCGvarg2)413 { 414 tcg_gen_op3 (INDEX_op_or_i32, ret, arg1, arg2);415 } 416 417 static inline void tcg_gen_ori_i32(TCGv ret, TCGvarg1, int32_t arg2)456 static inline void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 457 { 458 tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2); 459 } 460 461 static inline void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 418 462 { 419 463 /* some cases can be optimized here */ … … 423 467 tcg_gen_mov_i32(ret, arg1); 424 468 } else { 425 TCGv t0 = tcg_const_i32(arg2);469 TCGv_i32 t0 = tcg_const_i32(arg2); 426 470 tcg_gen_or_i32(ret, arg1, t0); 427 tcg_temp_free (t0);471 tcg_temp_free_i32(t0); 428 472 } 429 473 } 430 474 431 static inline void tcg_gen_xor_i32(TCGv ret, TCGv arg1, TCGvarg2)432 { 433 tcg_gen_op3 (INDEX_op_xor_i32, ret, arg1, arg2);434 } 435 436 static inline void tcg_gen_xori_i32(TCGv ret, TCGvarg1, int32_t arg2)475 static inline void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 476 { 477 tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2); 478 } 479 480 static inline void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 437 481 { 438 482 /* some cases can be optimized here */ … … 440 484 tcg_gen_mov_i32(ret, arg1); 441 485 } else { 442 TCGv t0 = tcg_const_i32(arg2);486 TCGv_i32 t0 = tcg_const_i32(arg2); 443 487 tcg_gen_xor_i32(ret, arg1, t0); 444 tcg_temp_free (t0);488 tcg_temp_free_i32(t0); 445 489 } 446 490 } 447 491 448 static inline void tcg_gen_shl_i32(TCGv ret, TCGv arg1, TCGvarg2)449 { 450 tcg_gen_op3 (INDEX_op_shl_i32, ret, arg1, arg2);451 } 452 453 static inline void tcg_gen_shli_i32(TCGv ret, TCGvarg1, int32_t arg2)492 static inline void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 493 { 494 tcg_gen_op3_i32(INDEX_op_shl_i32, ret, arg1, arg2); 495 } 496 497 static inline void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 454 498 { 455 499 if (arg2 == 0) { 456 500 tcg_gen_mov_i32(ret, arg1); 457 501 } else { 458 TCGv t0 = tcg_const_i32(arg2);502 TCGv_i32 t0 = tcg_const_i32(arg2); 459 503 tcg_gen_shl_i32(ret, arg1, t0); 460 tcg_temp_free (t0);504 tcg_temp_free_i32(t0); 461 505 } 462 506 } 463 507 464 static inline void tcg_gen_shr_i32(TCGv ret, TCGv arg1, TCGvarg2)465 { 466 tcg_gen_op3 (INDEX_op_shr_i32, ret, arg1, arg2);467 } 468 469 static inline void tcg_gen_shri_i32(TCGv ret, TCGvarg1, int32_t arg2)508 static inline void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 509 { 510 tcg_gen_op3_i32(INDEX_op_shr_i32, ret, arg1, arg2); 511 } 512 513 static inline void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 470 514 { 471 515 if (arg2 == 0) { 472 516 tcg_gen_mov_i32(ret, arg1); 473 517 } else { 474 TCGv t0 = tcg_const_i32(arg2);518 TCGv_i32 t0 = tcg_const_i32(arg2); 475 519 tcg_gen_shr_i32(ret, arg1, t0); 476 tcg_temp_free (t0);520 tcg_temp_free_i32(t0); 477 521 } 478 522 } 479 523 480 static inline void tcg_gen_sar_i32(TCGv ret, TCGv arg1, TCGvarg2)481 { 482 tcg_gen_op3 (INDEX_op_sar_i32, ret, arg1, arg2);483 } 484 485 static inline void tcg_gen_sari_i32(TCGv ret, TCGvarg1, int32_t arg2)524 static inline void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 525 { 526 tcg_gen_op3_i32(INDEX_op_sar_i32, ret, arg1, arg2); 527 } 528 529 static inline void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 486 530 { 487 531 if (arg2 == 0) { 488 532 tcg_gen_mov_i32(ret, arg1); 489 533 } else { 490 TCGv t0 = tcg_const_i32(arg2);534 TCGv_i32 t0 = tcg_const_i32(arg2); 491 535 tcg_gen_sar_i32(ret, arg1, t0); 492 tcg_temp_free (t0);536 tcg_temp_free_i32(t0); 493 537 } 494 538 } 495 539 496 static inline void tcg_gen_brcond_i32(int cond, TCGv arg1, TCGvarg2,540 static inline void tcg_gen_brcond_i32(int cond, TCGv_i32 arg1, TCGv_i32 arg2, 497 541 int label_index) 498 542 { 499 tcg_gen_op4ii (INDEX_op_brcond_i32, arg1, arg2, cond, label_index);500 } 501 502 static inline void tcg_gen_brcondi_i32(int cond, TCGv arg1, int32_t arg2,543 tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_index); 544 } 545 546 static inline void tcg_gen_brcondi_i32(int cond, TCGv_i32 arg1, int32_t arg2, 503 547 int label_index) 504 548 { 505 TCGv t0 = tcg_const_i32(arg2);549 TCGv_i32 t0 = tcg_const_i32(arg2); 506 550 tcg_gen_brcond_i32(cond, arg1, t0, label_index); 507 tcg_temp_free (t0);508 } 509 510 static inline void tcg_gen_mul_i32(TCGv ret, TCGv arg1, TCGvarg2)511 { 512 tcg_gen_op3 (INDEX_op_mul_i32, ret, arg1, arg2);513 } 514 515 static inline void tcg_gen_muli_i32(TCGv ret, TCGvarg1, int32_t arg2)516 { 517 TCGv t0 = tcg_const_i32(arg2);551 tcg_temp_free_i32(t0); 552 } 553 554 static inline void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 555 { 556 tcg_gen_op3_i32(INDEX_op_mul_i32, ret, arg1, arg2); 557 } 558 559 static inline void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 560 { 561 TCGv_i32 t0 = tcg_const_i32(arg2); 518 562 tcg_gen_mul_i32(ret, arg1, t0); 519 tcg_temp_free (t0);563 tcg_temp_free_i32(t0); 520 564 } 521 565 522 566 #ifdef TCG_TARGET_HAS_div_i32 523 static inline void tcg_gen_div_i32(TCGv ret, TCGv arg1, TCGvarg2)524 { 525 tcg_gen_op3 (INDEX_op_div_i32, ret, arg1, arg2);526 } 527 528 static inline void tcg_gen_rem_i32(TCGv ret, TCGv arg1, TCGvarg2)529 { 530 tcg_gen_op3 (INDEX_op_rem_i32, ret, arg1, arg2);531 } 532 533 static inline void tcg_gen_divu_i32(TCGv ret, TCGv arg1, TCGvarg2)534 { 535 tcg_gen_op3 (INDEX_op_divu_i32, ret, arg1, arg2);536 } 537 538 static inline void tcg_gen_remu_i32(TCGv ret, TCGv arg1, TCGvarg2)539 { 540 tcg_gen_op3 (INDEX_op_remu_i32, ret, arg1, arg2);541 } 542 #else 543 static inline void tcg_gen_div_i32(TCGv ret, TCGv arg1, TCGvarg2)544 { 545 TCGv t0;546 t0 = tcg_temp_new (TCG_TYPE_I32);567 static inline void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 568 { 569 tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2); 570 } 571 572 static inline void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 573 { 574 tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2); 575 } 576 577 static inline void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 578 { 579 tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2); 580 } 581 582 static inline void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 583 { 584 tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2); 585 } 586 #else 587 static inline void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 588 { 589 TCGv_i32 t0; 590 t0 = tcg_temp_new_i32(); 547 591 tcg_gen_sari_i32(t0, arg1, 31); 548 tcg_gen_op5 (INDEX_op_div2_i32, ret, t0, arg1, t0, arg2);549 tcg_temp_free (t0);550 } 551 552 static inline void tcg_gen_rem_i32(TCGv ret, TCGv arg1, TCGvarg2)553 { 554 TCGv t0;555 t0 = tcg_temp_new (TCG_TYPE_I32);592 tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2); 593 tcg_temp_free_i32(t0); 594 } 595 596 static inline void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 597 { 598 TCGv_i32 t0; 599 t0 = tcg_temp_new_i32(); 556 600 tcg_gen_sari_i32(t0, arg1, 31); 557 tcg_gen_op5 (INDEX_op_div2_i32, t0, ret, arg1, t0, arg2);558 tcg_temp_free (t0);559 } 560 561 static inline void tcg_gen_divu_i32(TCGv ret, TCGv arg1, TCGvarg2)562 { 563 TCGv t0;564 t0 = tcg_temp_new (TCG_TYPE_I32);601 tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2); 602 tcg_temp_free_i32(t0); 603 } 604 605 static inline void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 606 { 607 TCGv_i32 t0; 608 t0 = tcg_temp_new_i32(); 565 609 tcg_gen_movi_i32(t0, 0); 566 tcg_gen_op5 (INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2);567 tcg_temp_free (t0);568 } 569 570 static inline void tcg_gen_remu_i32(TCGv ret, TCGv arg1, TCGvarg2)571 { 572 TCGv t0;573 t0 = tcg_temp_new (TCG_TYPE_I32);610 tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2); 611 tcg_temp_free_i32(t0); 612 } 613 614 static inline void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 615 { 616 TCGv_i32 t0; 617 t0 = tcg_temp_new_i32(); 574 618 tcg_gen_movi_i32(t0, 0); 575 tcg_gen_op5 (INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2);576 tcg_temp_free (t0);619 tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2); 620 tcg_temp_free_i32(t0); 577 621 } 578 622 #endif … … 580 624 #if TCG_TARGET_REG_BITS == 32 581 625 582 static inline void tcg_gen_mov_i64(TCGv ret, TCGvarg)583 { 584 if (GET_TCGV (ret) != GET_TCGV(arg)) {585 tcg_gen_mov_i32( ret, arg);626 static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) 627 { 628 if (GET_TCGV_I64(ret) != GET_TCGV_I64(arg)) { 629 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 586 630 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); 587 631 } 588 632 } 589 633 590 static inline void tcg_gen_movi_i64(TCGv ret, int64_t arg)591 { 592 tcg_gen_movi_i32( ret, arg);634 static inline void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) 635 { 636 tcg_gen_movi_i32(TCGV_LOW(ret), arg); 593 637 tcg_gen_movi_i32(TCGV_HIGH(ret), arg >> 32); 594 638 } 595 639 596 static inline void tcg_gen_ld8u_i64(TCGv ret, TCGv arg2, tcg_target_long offset) 597 { 598 tcg_gen_ld8u_i32(ret, arg2, offset); 640 static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, 641 tcg_target_long offset) 642 { 643 tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset); 599 644 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 600 645 } 601 646 602 static inline void tcg_gen_ld8s_i64(TCGv ret, TCGv arg2, tcg_target_long offset) 603 { 604 tcg_gen_ld8s_i32(ret, arg2, offset); 605 tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31); 606 } 607 608 static inline void tcg_gen_ld16u_i64(TCGv ret, TCGv arg2, tcg_target_long offset) 609 { 610 tcg_gen_ld16u_i32(ret, arg2, offset); 647 static inline void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, 648 tcg_target_long offset) 649 { 650 tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset); 651 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), 31); 652 } 653 654 static inline void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, 655 tcg_target_long offset) 656 { 657 tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset); 611 658 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 612 659 } 613 660 614 static inline void tcg_gen_ld16s_i64(TCGv ret, TCGv arg2, tcg_target_long offset) 615 { 616 tcg_gen_ld16s_i32(ret, arg2, offset); 617 tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31); 618 } 619 620 static inline void tcg_gen_ld32u_i64(TCGv ret, TCGv arg2, tcg_target_long offset) 621 { 622 tcg_gen_ld_i32(ret, arg2, offset); 661 static inline void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, 662 tcg_target_long offset) 663 { 664 tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset); 665 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); 666 } 667 668 static inline void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, 669 tcg_target_long offset) 670 { 671 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); 623 672 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 624 673 } 625 674 626 static inline void tcg_gen_ld32s_i64(TCGv ret, TCGv arg2, tcg_target_long offset) 627 { 628 tcg_gen_ld_i32(ret, arg2, offset); 629 tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31); 630 } 631 632 static inline void tcg_gen_ld_i64(TCGv ret, TCGv arg2, tcg_target_long offset) 675 static inline void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, 676 tcg_target_long offset) 677 { 678 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); 679 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); 680 } 681 682 static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, 683 tcg_target_long offset) 633 684 { 634 685 /* since arg2 and ret have different types, they cannot be the … … 636 687 #ifdef TCG_TARGET_WORDS_BIGENDIAN 637 688 tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset); 638 tcg_gen_ld_i32( ret, arg2, offset + 4);639 #else 640 tcg_gen_ld_i32( ret, arg2, offset);689 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4); 690 #else 691 tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); 641 692 tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset + 4); 642 693 #endif 643 694 } 644 695 645 static inline void tcg_gen_st8_i64(TCGv arg1, TCGv arg2, tcg_target_long offset) 646 { 647 tcg_gen_st8_i32(arg1, arg2, offset); 648 } 649 650 static inline void tcg_gen_st16_i64(TCGv arg1, TCGv arg2, tcg_target_long offset) 651 { 652 tcg_gen_st16_i32(arg1, arg2, offset); 653 } 654 655 static inline void tcg_gen_st32_i64(TCGv arg1, TCGv arg2, tcg_target_long offset) 656 { 657 tcg_gen_st_i32(arg1, arg2, offset); 658 } 659 660 static inline void tcg_gen_st_i64(TCGv arg1, TCGv arg2, tcg_target_long offset) 696 static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, 697 tcg_target_long offset) 698 { 699 tcg_gen_st8_i32(TCGV_LOW(arg1), arg2, offset); 700 } 701 702 static inline void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, 703 tcg_target_long offset) 704 { 705 tcg_gen_st16_i32(TCGV_LOW(arg1), arg2, offset); 706 } 707 708 static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, 709 tcg_target_long offset) 710 { 711 tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset); 712 } 713 714 static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, 715 tcg_target_long offset) 661 716 { 662 717 #ifdef TCG_TARGET_WORDS_BIGENDIAN 663 718 tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset); 664 tcg_gen_st_i32( arg1, arg2, offset + 4);665 #else 666 tcg_gen_st_i32( arg1, arg2, offset);719 tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4); 720 #else 721 tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset); 667 722 tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset + 4); 668 723 #endif 669 724 } 670 725 671 static inline void tcg_gen_add_i64(TCGv ret, TCGv arg1, TCGv arg2) 672 { 673 tcg_gen_op6(INDEX_op_add2_i32, ret, TCGV_HIGH(ret), 674 arg1, TCGV_HIGH(arg1), arg2, TCGV_HIGH(arg2)); 675 } 676 677 static inline void tcg_gen_addi_i64(TCGv ret, TCGv arg1, int64_t arg2) 678 { 679 TCGv t0 = tcg_const_i64(arg2); 680 tcg_gen_add_i64(ret, arg1, t0); 681 tcg_temp_free(t0); 682 } 683 684 static inline void tcg_gen_sub_i64(TCGv ret, TCGv arg1, TCGv arg2) 685 { 686 tcg_gen_op6(INDEX_op_sub2_i32, ret, TCGV_HIGH(ret), 687 arg1, TCGV_HIGH(arg1), arg2, TCGV_HIGH(arg2)); 688 } 689 690 static inline void tcg_gen_subi_i64(TCGv ret, TCGv arg1, int64_t arg2) 691 { 692 TCGv t0 = tcg_const_i64(arg2); 693 tcg_gen_sub_i64(ret, arg1, t0); 694 tcg_temp_free(t0); 695 } 696 697 static inline void tcg_gen_and_i64(TCGv ret, TCGv arg1, TCGv arg2) 698 { 699 tcg_gen_and_i32(ret, arg1, arg2); 726 static inline void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 727 { 728 tcg_gen_op6_i32(INDEX_op_add2_i32, TCGV_LOW(ret), TCGV_HIGH(ret), 729 TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), 730 TCGV_HIGH(arg2)); 731 } 732 733 static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 734 { 735 tcg_gen_op6_i32(INDEX_op_sub2_i32, TCGV_LOW(ret), TCGV_HIGH(ret), 736 TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), 737 TCGV_HIGH(arg2)); 738 } 739 740 static inline void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 741 { 742 tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); 700 743 tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); 701 744 } 702 745 703 static inline void tcg_gen_andi_i64(TCGv ret, TCGvarg1, int64_t arg2)704 { 705 tcg_gen_andi_i32( ret, arg1, arg2);746 static inline void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 747 { 748 tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); 706 749 tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); 707 750 } 708 751 709 static inline void tcg_gen_or_i64(TCGv ret, TCGv arg1, TCGvarg2)710 { 711 tcg_gen_or_i32( ret, arg1, arg2);752 static inline void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 753 { 754 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); 712 755 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); 713 756 } 714 757 715 static inline void tcg_gen_ori_i64(TCGv ret, TCGvarg1, int64_t arg2)716 { 717 tcg_gen_ori_i32( ret, arg1, arg2);758 static inline void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 759 { 760 tcg_gen_ori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); 718 761 tcg_gen_ori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); 719 762 } 720 763 721 static inline void tcg_gen_xor_i64(TCGv ret, TCGv arg1, TCGvarg2)722 { 723 tcg_gen_xor_i32( ret, arg1, arg2);764 static inline void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 765 { 766 tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); 724 767 tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); 725 768 } 726 769 727 static inline void tcg_gen_xori_i64(TCGv ret, TCGvarg1, int64_t arg2)728 { 729 tcg_gen_xori_i32( ret, arg1, arg2);770 static inline void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 771 { 772 tcg_gen_xori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); 730 773 tcg_gen_xori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); 731 774 } … … 733 776 /* XXX: use generic code when basic block handling is OK or CPU 734 777 specific code (x86) */ 735 static inline void tcg_gen_shl_i64(TCGv ret, TCGv arg1, TCGvarg2)736 { 737 tcg_gen_helper _1_2(tcg_helper_shl_i64, ret, arg1, arg2);738 } 739 740 static inline void tcg_gen_shli_i64(TCGv ret, TCGvarg1, int64_t arg2)778 static inline void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 779 { 780 tcg_gen_helper64(tcg_helper_shl_i64, ret, arg1, arg2); 781 } 782 783 static inline void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 741 784 { 742 785 tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0); 743 786 } 744 787 745 static inline void tcg_gen_shr_i64(TCGv ret, TCGv arg1, TCGvarg2)746 { 747 tcg_gen_helper _1_2(tcg_helper_shr_i64, ret, arg1, arg2);748 } 749 750 static inline void tcg_gen_shri_i64(TCGv ret, TCGvarg1, int64_t arg2)788 static inline void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 789 { 790 tcg_gen_helper64(tcg_helper_shr_i64, ret, arg1, arg2); 791 } 792 793 static inline void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 751 794 { 752 795 tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0); 753 796 } 754 797 755 static inline void tcg_gen_sar_i64(TCGv ret, TCGv arg1, TCGvarg2)756 { 757 tcg_gen_helper _1_2(tcg_helper_sar_i64, ret, arg1, arg2);758 } 759 760 static inline void tcg_gen_sari_i64(TCGv ret, TCGvarg1, int64_t arg2)798 static inline void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 799 { 800 tcg_gen_helper64(tcg_helper_sar_i64, ret, arg1, arg2); 801 } 802 803 static inline void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 761 804 { 762 805 tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1); 763 806 } 764 807 765 static inline void tcg_gen_brcond_i64(int cond, TCGv arg1, TCGvarg2,808 static inline void tcg_gen_brcond_i64(int cond, TCGv_i64 arg1, TCGv_i64 arg2, 766 809 int label_index) 767 810 { 768 tcg_gen_op6ii(INDEX_op_brcond2_i32, 769 arg1, TCGV_HIGH(arg1), arg2, TCGV_HIGH(arg2), 770 cond, label_index); 771 } 772 773 static inline void tcg_gen_mul_i64(TCGv ret, TCGv arg1, TCGv arg2) 774 { 775 TCGv t0, t1; 776 777 t0 = tcg_temp_new(TCG_TYPE_I64); 778 t1 = tcg_temp_new(TCG_TYPE_I32); 779 780 tcg_gen_op4(INDEX_op_mulu2_i32, t0, TCGV_HIGH(t0), arg1, arg2); 781 782 tcg_gen_mul_i32(t1, arg1, TCGV_HIGH(arg2)); 811 tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, 812 TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), 813 TCGV_HIGH(arg2), cond, label_index); 814 } 815 816 static inline void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 817 { 818 TCGv_i64 t0; 819 TCGv_i32 t1; 820 821 t0 = tcg_temp_new_i64(); 822 t1 = tcg_temp_new_i32(); 823 824 tcg_gen_op4_i32(INDEX_op_mulu2_i32, TCGV_LOW(t0), TCGV_HIGH(t0), 825 TCGV_LOW(arg1), TCGV_LOW(arg2)); 826 827 tcg_gen_mul_i32(t1, TCGV_LOW(arg1), TCGV_HIGH(arg2)); 783 828 tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1); 784 tcg_gen_mul_i32(t1, TCGV_HIGH(arg1), arg2);829 tcg_gen_mul_i32(t1, TCGV_HIGH(arg1), TCGV_LOW(arg2)); 785 830 tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1); 786 831 787 832 tcg_gen_mov_i64(ret, t0); 788 tcg_temp_free(t0); 789 tcg_temp_free(t1); 790 } 791 792 static inline void tcg_gen_muli_i64(TCGv ret, TCGv arg1, int64_t arg2) 793 { 794 TCGv t0 = tcg_const_i64(arg2); 795 tcg_gen_mul_i64(ret, arg1, t0); 796 tcg_temp_free(t0); 797 } 798 799 static inline void tcg_gen_div_i64(TCGv ret, TCGv arg1, TCGv arg2) 800 { 801 tcg_gen_helper_1_2(tcg_helper_div_i64, ret, arg1, arg2); 802 } 803 804 static inline void tcg_gen_rem_i64(TCGv ret, TCGv arg1, TCGv arg2) 805 { 806 tcg_gen_helper_1_2(tcg_helper_rem_i64, ret, arg1, arg2); 807 } 808 809 static inline void tcg_gen_divu_i64(TCGv ret, TCGv arg1, TCGv arg2) 810 { 811 tcg_gen_helper_1_2(tcg_helper_divu_i64, ret, arg1, arg2); 812 } 813 814 static inline void tcg_gen_remu_i64(TCGv ret, TCGv arg1, TCGv arg2) 815 { 816 tcg_gen_helper_1_2(tcg_helper_remu_i64, ret, arg1, arg2); 817 } 818 819 #else 820 821 static inline void tcg_gen_mov_i64(TCGv ret, TCGv arg) 822 { 823 if (GET_TCGV(ret) != GET_TCGV(arg)) 824 tcg_gen_op2(INDEX_op_mov_i64, ret, arg); 825 } 826 827 static inline void tcg_gen_movi_i64(TCGv ret, int64_t arg) 828 { 829 tcg_gen_op2i(INDEX_op_movi_i64, ret, arg); 830 } 831 832 static inline void tcg_gen_ld8u_i64(TCGv ret, TCGv arg2, 833 tcg_temp_free_i64(t0); 834 tcg_temp_free_i32(t1); 835 } 836 837 static inline void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 838 { 839 tcg_gen_helper64(tcg_helper_div_i64, ret, arg1, arg2); 840 } 841 842 static inline void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 843 { 844 tcg_gen_helper64(tcg_helper_rem_i64, ret, arg1, arg2); 845 } 846 847 static inline void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 848 { 849 tcg_gen_helper64(tcg_helper_divu_i64, ret, arg1, arg2); 850 } 851 852 static inline void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 853 { 854 tcg_gen_helper64(tcg_helper_remu_i64, ret, arg1, arg2); 855 } 856 857 #else 858 859 static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) 860 { 861 if (GET_TCGV_I64(ret) != GET_TCGV_I64(arg)) 862 tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg); 863 } 864 865 static inline void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) 866 { 867 tcg_gen_op2i_i64(INDEX_op_movi_i64, ret, arg); 868 } 869 870 static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_i64 arg2, 833 871 tcg_target_long offset) 834 872 { 835 tcg_gen_ op3i(INDEX_op_ld8u_i64, ret, arg2, offset);836 } 837 838 static inline void tcg_gen_ld8s_i64(TCGv ret, TCGvarg2,873 tcg_gen_ldst_op_i64(INDEX_op_ld8u_i64, ret, arg2, offset); 874 } 875 876 static inline void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_i64 arg2, 839 877 tcg_target_long offset) 840 878 { 841 tcg_gen_ op3i(INDEX_op_ld8s_i64, ret, arg2, offset);842 } 843 844 static inline void tcg_gen_ld16u_i64(TCGv ret, TCGvarg2,879 tcg_gen_ldst_op_i64(INDEX_op_ld8s_i64, ret, arg2, offset); 880 } 881 882 static inline void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_i64 arg2, 845 883 tcg_target_long offset) 846 884 { 847 tcg_gen_ op3i(INDEX_op_ld16u_i64, ret, arg2, offset);848 } 849 850 static inline void tcg_gen_ld16s_i64(TCGv ret, TCGvarg2,885 tcg_gen_ldst_op_i64(INDEX_op_ld16u_i64, ret, arg2, offset); 886 } 887 888 static inline void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_i64 arg2, 851 889 tcg_target_long offset) 852 890 { 853 tcg_gen_ op3i(INDEX_op_ld16s_i64, ret, arg2, offset);854 } 855 856 static inline void tcg_gen_ld32u_i64(TCGv ret, TCGvarg2,891 tcg_gen_ldst_op_i64(INDEX_op_ld16s_i64, ret, arg2, offset); 892 } 893 894 static inline void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_i64 arg2, 857 895 tcg_target_long offset) 858 896 { 859 tcg_gen_ op3i(INDEX_op_ld32u_i64, ret, arg2, offset);860 } 861 862 static inline void tcg_gen_ld32s_i64(TCGv ret, TCGvarg2,897 tcg_gen_ldst_op_i64(INDEX_op_ld32u_i64, ret, arg2, offset); 898 } 899 900 static inline void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_i64 arg2, 863 901 tcg_target_long offset) 864 902 { 865 tcg_gen_ op3i(INDEX_op_ld32s_i64, ret, arg2, offset);866 } 867 868 static inline void tcg_gen_ld_i64(TCGv ret, TCGvarg2, tcg_target_long offset)869 { 870 tcg_gen_ op3i(INDEX_op_ld_i64, ret, arg2, offset);871 } 872 873 static inline void tcg_gen_st8_i64(TCGv arg1, TCGvarg2,903 tcg_gen_ldst_op_i64(INDEX_op_ld32s_i64, ret, arg2, offset); 904 } 905 906 static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_i64 arg2, tcg_target_long offset) 907 { 908 tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset); 909 } 910 911 static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_i64 arg2, 874 912 tcg_target_long offset) 875 913 { 876 tcg_gen_ op3i(INDEX_op_st8_i64, arg1, arg2, offset);877 } 878 879 static inline void tcg_gen_st16_i64(TCGv arg1, TCGvarg2,914 tcg_gen_ldst_op_i64(INDEX_op_st8_i64, arg1, arg2, offset); 915 } 916 917 static inline void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_i64 arg2, 880 918 tcg_target_long offset) 881 919 { 882 tcg_gen_ op3i(INDEX_op_st16_i64, arg1, arg2, offset);883 } 884 885 static inline void tcg_gen_st32_i64(TCGv arg1, TCGvarg2,920 tcg_gen_ldst_op_i64(INDEX_op_st16_i64, arg1, arg2, offset); 921 } 922 923 static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_i64 arg2, 886 924 tcg_target_long offset) 887 925 { 888 tcg_gen_op3i(INDEX_op_st32_i64, arg1, arg2, offset); 889 } 890 891 static inline void tcg_gen_st_i64(TCGv arg1, TCGv arg2, tcg_target_long offset) 892 { 893 tcg_gen_op3i(INDEX_op_st_i64, arg1, arg2, offset); 894 } 895 896 static inline void tcg_gen_add_i64(TCGv ret, TCGv arg1, TCGv arg2) 897 { 898 tcg_gen_op3(INDEX_op_add_i64, ret, arg1, arg2); 899 } 900 901 static inline void tcg_gen_addi_i64(TCGv ret, TCGv arg1, int64_t arg2) 902 { 903 TCGv t0 = tcg_const_i64(arg2); 904 tcg_gen_add_i64(ret, arg1, t0); 905 tcg_temp_free(t0); 906 } 907 908 static inline void tcg_gen_sub_i64(TCGv ret, TCGv arg1, TCGv arg2) 909 { 910 tcg_gen_op3(INDEX_op_sub_i64, ret, arg1, arg2); 911 } 912 913 static inline void tcg_gen_subi_i64(TCGv ret, TCGv arg1, int64_t arg2) 914 { 915 TCGv t0 = tcg_const_i64(arg2); 916 tcg_gen_sub_i64(ret, arg1, t0); 917 tcg_temp_free(t0); 918 } 919 920 static inline void tcg_gen_and_i64(TCGv ret, TCGv arg1, TCGv arg2) 921 { 922 tcg_gen_op3(INDEX_op_and_i64, ret, arg1, arg2); 923 } 924 925 static inline void tcg_gen_andi_i64(TCGv ret, TCGv arg1, int64_t arg2) 926 { 927 TCGv t0 = tcg_const_i64(arg2); 926 tcg_gen_ldst_op_i64(INDEX_op_st32_i64, arg1, arg2, offset); 927 } 928 929 static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_i64 arg2, tcg_target_long offset) 930 { 931 tcg_gen_ldst_op_i64(INDEX_op_st_i64, arg1, arg2, offset); 932 } 933 934 static inline void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 935 { 936 tcg_gen_op3_i64(INDEX_op_add_i64, ret, arg1, arg2); 937 } 938 939 static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 940 { 941 tcg_gen_op3_i64(INDEX_op_sub_i64, ret, arg1, arg2); 942 } 943 944 static inline void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 945 { 946 tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2); 947 } 948 949 static inline void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 950 { 951 TCGv_i64 t0 = tcg_const_i64(arg2); 928 952 tcg_gen_and_i64(ret, arg1, t0); 929 tcg_temp_free (t0);930 } 931 932 static inline void tcg_gen_or_i64(TCGv ret, TCGv arg1, TCGvarg2)933 { 934 tcg_gen_op3 (INDEX_op_or_i64, ret, arg1, arg2);935 } 936 937 static inline void tcg_gen_ori_i64(TCGv ret, TCGvarg1, int64_t arg2)938 { 939 TCGv t0 = tcg_const_i64(arg2);953 tcg_temp_free_i64(t0); 954 } 955 956 static inline void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 957 { 958 tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2); 959 } 960 961 static inline void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 962 { 963 TCGv_i64 t0 = tcg_const_i64(arg2); 940 964 tcg_gen_or_i64(ret, arg1, t0); 941 tcg_temp_free (t0);942 } 943 944 static inline void tcg_gen_xor_i64(TCGv ret, TCGv arg1, TCGvarg2)945 { 946 tcg_gen_op3 (INDEX_op_xor_i64, ret, arg1, arg2);947 } 948 949 static inline void tcg_gen_xori_i64(TCGv ret, TCGvarg1, int64_t arg2)950 { 951 TCGv t0 = tcg_const_i64(arg2);965 tcg_temp_free_i64(t0); 966 } 967 968 static inline void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 969 { 970 tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2); 971 } 972 973 static inline void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 974 { 975 TCGv_i64 t0 = tcg_const_i64(arg2); 952 976 tcg_gen_xor_i64(ret, arg1, t0); 953 tcg_temp_free (t0);954 } 955 956 static inline void tcg_gen_shl_i64(TCGv ret, TCGv arg1, TCGvarg2)957 { 958 tcg_gen_op3 (INDEX_op_shl_i64, ret, arg1, arg2);959 } 960 961 static inline void tcg_gen_shli_i64(TCGv ret, TCGvarg1, int64_t arg2)977 tcg_temp_free_i64(t0); 978 } 979 980 static inline void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 981 { 982 tcg_gen_op3_i64(INDEX_op_shl_i64, ret, arg1, arg2); 983 } 984 985 static inline void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 962 986 { 963 987 if (arg2 == 0) { 964 988 tcg_gen_mov_i64(ret, arg1); 965 989 } else { 966 TCGv t0 = tcg_const_i64(arg2);990 TCGv_i64 t0 = tcg_const_i64(arg2); 967 991 tcg_gen_shl_i64(ret, arg1, t0); 968 tcg_temp_free (t0);992 tcg_temp_free_i64(t0); 969 993 } 970 994 } 971 995 972 static inline void tcg_gen_shr_i64(TCGv ret, TCGv arg1, TCGvarg2)973 { 974 tcg_gen_op3 (INDEX_op_shr_i64, ret, arg1, arg2);975 } 976 977 static inline void tcg_gen_shri_i64(TCGv ret, TCGvarg1, int64_t arg2)996 static inline void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 997 { 998 tcg_gen_op3_i64(INDEX_op_shr_i64, ret, arg1, arg2); 999 } 1000 1001 static inline void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 978 1002 { 979 1003 if (arg2 == 0) { 980 1004 tcg_gen_mov_i64(ret, arg1); 981 1005 } else { 982 TCGv t0 = tcg_const_i64(arg2);1006 TCGv_i64 t0 = tcg_const_i64(arg2); 983 1007 tcg_gen_shr_i64(ret, arg1, t0); 984 tcg_temp_free (t0);1008 tcg_temp_free_i64(t0); 985 1009 } 986 1010 } 987 1011 988 static inline void tcg_gen_sar_i64(TCGv ret, TCGv arg1, TCGvarg2)989 { 990 tcg_gen_op3 (INDEX_op_sar_i64, ret, arg1, arg2);991 } 992 993 static inline void tcg_gen_sari_i64(TCGv ret, TCGvarg1, int64_t arg2)1012 static inline void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1013 { 1014 tcg_gen_op3_i64(INDEX_op_sar_i64, ret, arg1, arg2); 1015 } 1016 1017 static inline void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 994 1018 { 995 1019 if (arg2 == 0) { 996 1020 tcg_gen_mov_i64(ret, arg1); 997 1021 } else { 998 TCGv t0 = tcg_const_i64(arg2);1022 TCGv_i64 t0 = tcg_const_i64(arg2); 999 1023 tcg_gen_sar_i64(ret, arg1, t0); 1000 tcg_temp_free (t0);1024 tcg_temp_free_i64(t0); 1001 1025 } 1002 1026 } 1003 1027 1004 static inline void tcg_gen_brcond_i64(int cond, TCGv arg1, TCGvarg2,1028 static inline void tcg_gen_brcond_i64(int cond, TCGv_i64 arg1, TCGv_i64 arg2, 1005 1029 int label_index) 1006 1030 { 1007 tcg_gen_op4ii(INDEX_op_brcond_i64, arg1, arg2, cond, label_index); 1008 } 1009 1010 static inline void tcg_gen_mul_i64(TCGv ret, TCGv arg1, TCGv arg2) 1011 { 1012 tcg_gen_op3(INDEX_op_mul_i64, ret, arg1, arg2); 1013 } 1014 1015 static inline void tcg_gen_muli_i64(TCGv ret, TCGv arg1, int64_t arg2) 1016 { 1017 TCGv t0 = tcg_const_i64(arg2); 1031 tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, label_index); 1032 } 1033 1034 static inline void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1035 { 1036 tcg_gen_op3_i64(INDEX_op_mul_i64, ret, arg1, arg2); 1037 } 1038 1039 #ifdef TCG_TARGET_HAS_div_i64 1040 static inline void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1041 { 1042 tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2); 1043 } 1044 1045 static inline void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1046 { 1047 tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2); 1048 } 1049 1050 static inline void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1051 { 1052 tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2); 1053 } 1054 1055 static inline void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1056 { 1057 tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2); 1058 } 1059 #else 1060 static inline void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1061 { 1062 TCGv_i64 t0; 1063 t0 = tcg_temp_new_i64(); 1064 tcg_gen_sari_i64(t0, arg1, 63); 1065 tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); 1066 tcg_temp_free_i64(t0); 1067 } 1068 1069 static inline void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1070 { 1071 TCGv_i64 t0; 1072 t0 = tcg_temp_new_i64(); 1073 tcg_gen_sari_i64(t0, arg1, 63); 1074 tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); 1075 tcg_temp_free_i64(t0); 1076 } 1077 1078 static inline void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1079 { 1080 TCGv_i64 t0; 1081 t0 = tcg_temp_new_i64(); 1082 tcg_gen_movi_i64(t0, 0); 1083 tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); 1084 tcg_temp_free_i64(t0); 1085 } 1086 1087 static inline void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1088 { 1089 TCGv_i64 t0; 1090 t0 = tcg_temp_new_i64(); 1091 tcg_gen_movi_i64(t0, 0); 1092 tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); 1093 tcg_temp_free_i64(t0); 1094 } 1095 #endif 1096 1097 #endif 1098 1099 static inline void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 1100 { 1101 /* some cases can be optimized here */ 1102 if (arg2 == 0) { 1103 tcg_gen_mov_i64(ret, arg1); 1104 } else { 1105 TCGv_i64 t0 = tcg_const_i64(arg2); 1106 tcg_gen_add_i64(ret, arg1, t0); 1107 tcg_temp_free_i64(t0); 1108 } 1109 } 1110 1111 static inline void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2) 1112 { 1113 TCGv_i64 t0 = tcg_const_i64(arg1); 1114 tcg_gen_sub_i64(ret, t0, arg2); 1115 tcg_temp_free_i64(t0); 1116 } 1117 1118 static inline void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 1119 { 1120 /* some cases can be optimized here */ 1121 if (arg2 == 0) { 1122 tcg_gen_mov_i64(ret, arg1); 1123 } else { 1124 TCGv_i64 t0 = tcg_const_i64(arg2); 1125 tcg_gen_sub_i64(ret, arg1, t0); 1126 tcg_temp_free_i64(t0); 1127 } 1128 } 1129 static inline void tcg_gen_brcondi_i64(int cond, TCGv_i64 arg1, int64_t arg2, 1130 int label_index) 1131 { 1132 TCGv_i64 t0 = tcg_const_i64(arg2); 1133 tcg_gen_brcond_i64(cond, arg1, t0, label_index); 1134 tcg_temp_free_i64(t0); 1135 } 1136 1137 static inline void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 1138 { 1139 TCGv_i64 t0 = tcg_const_i64(arg2); 1018 1140 tcg_gen_mul_i64(ret, arg1, t0); 1019 tcg_temp_free(t0); 1020 } 1021 1022 #ifdef TCG_TARGET_HAS_div_i64 1023 static inline void tcg_gen_div_i64(TCGv ret, TCGv arg1, TCGv arg2) 1024 { 1025 tcg_gen_op3(INDEX_op_div_i64, ret, arg1, arg2); 1026 } 1027 1028 static inline void tcg_gen_rem_i64(TCGv ret, TCGv arg1, TCGv arg2) 1029 { 1030 tcg_gen_op3(INDEX_op_rem_i64, ret, arg1, arg2); 1031 } 1032 1033 static inline void tcg_gen_divu_i64(TCGv ret, TCGv arg1, TCGv arg2) 1034 { 1035 tcg_gen_op3(INDEX_op_divu_i64, ret, arg1, arg2); 1036 } 1037 1038 static inline void tcg_gen_remu_i64(TCGv ret, TCGv arg1, TCGv arg2) 1039 { 1040 tcg_gen_op3(INDEX_op_remu_i64, ret, arg1, arg2); 1041 } 1042 #else 1043 static inline void tcg_gen_div_i64(TCGv ret, TCGv arg1, TCGv arg2) 1044 { 1045 TCGv t0; 1046 t0 = tcg_temp_new(TCG_TYPE_I64); 1047 tcg_gen_sari_i64(t0, arg1, 63); 1048 tcg_gen_op5(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); 1049 tcg_temp_free(t0); 1050 } 1051 1052 static inline void tcg_gen_rem_i64(TCGv ret, TCGv arg1, TCGv arg2) 1053 { 1054 TCGv t0; 1055 t0 = tcg_temp_new(TCG_TYPE_I64); 1056 tcg_gen_sari_i64(t0, arg1, 63); 1057 tcg_gen_op5(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); 1058 tcg_temp_free(t0); 1059 } 1060 1061 static inline void tcg_gen_divu_i64(TCGv ret, TCGv arg1, TCGv arg2) 1062 { 1063 TCGv t0; 1064 t0 = tcg_temp_new(TCG_TYPE_I64); 1065 tcg_gen_movi_i64(t0, 0); 1066 tcg_gen_op5(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); 1067 tcg_temp_free(t0); 1068 } 1069 1070 static inline void tcg_gen_remu_i64(TCGv ret, TCGv arg1, TCGv arg2) 1071 { 1072 TCGv t0; 1073 t0 = tcg_temp_new(TCG_TYPE_I64); 1074 tcg_gen_movi_i64(t0, 0); 1075 tcg_gen_op5(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); 1076 tcg_temp_free(t0); 1077 } 1078 #endif 1079 1080 #endif 1081 1082 static inline void tcg_gen_brcondi_i64(int cond, TCGv arg1, int64_t arg2, 1083 int label_index) 1084 { 1085 TCGv t0 = tcg_const_i64(arg2); 1086 tcg_gen_brcond_i64(cond, arg1, t0, label_index); 1087 tcg_temp_free(t0); 1088 } 1141 tcg_temp_free_i64(t0); 1142 } 1143 1089 1144 1090 1145 /***************************************/ 1091 1146 /* optional operations */ 1092 1147 1093 static inline void tcg_gen_ext8s_i32(TCGv ret, TCGvarg)1148 static inline void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg) 1094 1149 { 1095 1150 #ifdef TCG_TARGET_HAS_ext8s_i32 1096 tcg_gen_op2 (INDEX_op_ext8s_i32, ret, arg);1151 tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg); 1097 1152 #else 1098 1153 tcg_gen_shli_i32(ret, arg, 24); … … 1101 1156 } 1102 1157 1103 static inline void tcg_gen_ext16s_i32(TCGv ret, TCGvarg)1158 static inline void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg) 1104 1159 { 1105 1160 #ifdef TCG_TARGET_HAS_ext16s_i32 1106 tcg_gen_op2 (INDEX_op_ext16s_i32, ret, arg);1161 tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg); 1107 1162 #else 1108 1163 tcg_gen_shli_i32(ret, arg, 16); … … 1113 1168 /* These are currently just for convenience. 1114 1169 We assume a target will recognise these automatically . */ 1115 static inline void tcg_gen_ext8u_i32(TCGv ret, TCGvarg)1170 static inline void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg) 1116 1171 { 1117 1172 tcg_gen_andi_i32(ret, arg, 0xffu); 1118 1173 } 1119 1174 1120 static inline void tcg_gen_ext16u_i32(TCGv ret, TCGvarg)1175 static inline void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg) 1121 1176 { 1122 1177 tcg_gen_andi_i32(ret, arg, 0xffffu); … … 1124 1179 1125 1180 /* Note: we assume the two high bytes are set to zero */ 1126 static inline void tcg_gen_bswap16_i32(TCGv ret, TCGvarg)1181 static inline void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg) 1127 1182 { 1128 1183 #ifdef TCG_TARGET_HAS_bswap16_i32 1129 tcg_gen_op2 (INDEX_op_bswap16_i32, ret, arg);1130 #else 1131 TCGv t0, t1;1132 t0 = tcg_temp_new (TCG_TYPE_I32);1133 t1 = tcg_temp_new (TCG_TYPE_I32);1184 tcg_gen_op2_i32(INDEX_op_bswap16_i32, ret, arg); 1185 #else 1186 TCGv_i32 t0, t1; 1187 t0 = tcg_temp_new_i32(); 1188 t1 = tcg_temp_new_i32(); 1134 1189 1135 1190 tcg_gen_shri_i32(t0, arg, 8); … … 1137 1192 tcg_gen_shli_i32(t1, t1, 8); 1138 1193 tcg_gen_or_i32(ret, t0, t1); 1139 tcg_temp_free (t0);1140 tcg_temp_free (t1);1141 #endif 1142 } 1143 1144 static inline void tcg_gen_bswap_i32(TCGv ret, TCGvarg)1194 tcg_temp_free_i32(t0); 1195 tcg_temp_free_i32(t1); 1196 #endif 1197 } 1198 1199 static inline void tcg_gen_bswap_i32(TCGv_i32 ret, TCGv_i32 arg) 1145 1200 { 1146 1201 #ifdef TCG_TARGET_HAS_bswap_i32 1147 tcg_gen_op2 (INDEX_op_bswap_i32, ret, arg);1148 #else 1149 TCGv t0, t1;1150 t0 = tcg_temp_new (TCG_TYPE_I32);1151 t1 = tcg_temp_new (TCG_TYPE_I32);1202 tcg_gen_op2_i32(INDEX_op_bswap_i32, ret, arg); 1203 #else 1204 TCGv_i32 t0, t1; 1205 t0 = tcg_temp_new_i32(); 1206 t1 = tcg_temp_new_i32(); 1152 1207 1153 1208 tcg_gen_shli_i32(t0, arg, 24); … … 1163 1218 tcg_gen_shri_i32(t1, arg, 24); 1164 1219 tcg_gen_or_i32(ret, t0, t1); 1165 tcg_temp_free (t0);1166 tcg_temp_free (t1);1220 tcg_temp_free_i32(t0); 1221 tcg_temp_free_i32(t1); 1167 1222 #endif 1168 1223 } 1169 1224 1170 1225 #if TCG_TARGET_REG_BITS == 32 1171 static inline void tcg_gen_ext8s_i64(TCGv ret, TCGvarg)1172 { 1173 tcg_gen_ext8s_i32( ret, arg);1174 tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);1175 } 1176 1177 static inline void tcg_gen_ext16s_i64(TCGv ret, TCGvarg)1178 { 1179 tcg_gen_ext16s_i32( ret, arg);1180 tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);1181 } 1182 1183 static inline void tcg_gen_ext32s_i64(TCGv ret, TCGvarg)1184 { 1185 tcg_gen_mov_i32( ret, arg);1186 tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);1187 } 1188 1189 static inline void tcg_gen_ext8u_i64(TCGv ret, TCGvarg)1190 { 1191 tcg_gen_ext8u_i32( ret, arg);1226 static inline void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg) 1227 { 1228 tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 1229 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); 1230 } 1231 1232 static inline void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg) 1233 { 1234 tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 1235 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); 1236 } 1237 1238 static inline void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg) 1239 { 1240 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 1241 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); 1242 } 1243 1244 static inline void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg) 1245 { 1246 tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 1192 1247 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 1193 1248 } 1194 1249 1195 static inline void tcg_gen_ext16u_i64(TCGv ret, TCGvarg)1196 { 1197 tcg_gen_ext16u_i32( ret, arg);1250 static inline void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg) 1251 { 1252 tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 1198 1253 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 1199 1254 } 1200 1255 1201 static inline void tcg_gen_ext32u_i64(TCGv ret, TCGvarg)1202 { 1203 tcg_gen_mov_i32( ret, arg);1256 static inline void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg) 1257 { 1258 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 1204 1259 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 1205 1260 } 1206 1261 1207 static inline void tcg_gen_trunc_i64_i32(TCGv ret, TCGvarg)1208 { 1209 tcg_gen_mov_i32(ret, arg);1210 } 1211 1212 static inline void tcg_gen_extu_i32_i64(TCGv ret, TCGvarg)1213 { 1214 tcg_gen_mov_i32( ret, arg);1262 static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg) 1263 { 1264 tcg_gen_mov_i32(ret, TCGV_LOW(arg)); 1265 } 1266 1267 static inline void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg) 1268 { 1269 tcg_gen_mov_i32(TCGV_LOW(ret), arg); 1215 1270 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 1216 1271 } 1217 1272 1218 static inline void tcg_gen_ext_i32_i64(TCGv ret, TCGvarg)1219 { 1220 tcg_gen_mov_i32( ret, arg);1221 tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);1222 } 1223 1224 static inline void tcg_gen_bswap_i64(TCGv ret, TCGvarg)1225 { 1226 TCGv t0, t1;1227 t0 = tcg_temp_new (TCG_TYPE_I32);1228 t1 = tcg_temp_new (TCG_TYPE_I32);1229 1230 tcg_gen_bswap_i32(t0, arg);1273 static inline void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg) 1274 { 1275 tcg_gen_mov_i32(TCGV_LOW(ret), arg); 1276 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); 1277 } 1278 1279 static inline void tcg_gen_bswap_i64(TCGv_i64 ret, TCGv_i64 arg) 1280 { 1281 TCGv_i32 t0, t1; 1282 t0 = tcg_temp_new_i32(); 1283 t1 = tcg_temp_new_i32(); 1284 1285 tcg_gen_bswap_i32(t0, TCGV_LOW(arg)); 1231 1286 tcg_gen_bswap_i32(t1, TCGV_HIGH(arg)); 1232 tcg_gen_mov_i32( ret, t1);1287 tcg_gen_mov_i32(TCGV_LOW(ret), t1); 1233 1288 tcg_gen_mov_i32(TCGV_HIGH(ret), t0); 1234 tcg_temp_free (t0);1235 tcg_temp_free (t1);1236 } 1237 #else 1238 1239 static inline void tcg_gen_ext8s_i64(TCGv ret, TCGvarg)1289 tcg_temp_free_i32(t0); 1290 tcg_temp_free_i32(t1); 1291 } 1292 #else 1293 1294 static inline void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg) 1240 1295 { 1241 1296 #ifdef TCG_TARGET_HAS_ext8s_i64 1242 tcg_gen_op2 (INDEX_op_ext8s_i64, ret, arg);1297 tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg); 1243 1298 #else 1244 1299 tcg_gen_shli_i64(ret, arg, 56); … … 1247 1302 } 1248 1303 1249 static inline void tcg_gen_ext16s_i64(TCGv ret, TCGvarg)1304 static inline void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg) 1250 1305 { 1251 1306 #ifdef TCG_TARGET_HAS_ext16s_i64 1252 tcg_gen_op2 (INDEX_op_ext16s_i64, ret, arg);1307 tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg); 1253 1308 #else 1254 1309 tcg_gen_shli_i64(ret, arg, 48); … … 1257 1312 } 1258 1313 1259 static inline void tcg_gen_ext32s_i64(TCGv ret, TCGvarg)1314 static inline void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg) 1260 1315 { 1261 1316 #ifdef TCG_TARGET_HAS_ext32s_i64 1262 tcg_gen_op2 (INDEX_op_ext32s_i64, ret, arg);1317 tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg); 1263 1318 #else 1264 1319 tcg_gen_shli_i64(ret, arg, 32); … … 1267 1322 } 1268 1323 1269 static inline void tcg_gen_ext8u_i64(TCGv ret, TCGvarg)1324 static inline void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg) 1270 1325 { 1271 1326 tcg_gen_andi_i64(ret, arg, 0xffu); 1272 1327 } 1273 1328 1274 static inline void tcg_gen_ext16u_i64(TCGv ret, TCGvarg)1329 static inline void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg) 1275 1330 { 1276 1331 tcg_gen_andi_i64(ret, arg, 0xffffu); 1277 1332 } 1278 1333 1279 static inline void tcg_gen_ext32u_i64(TCGv ret, TCGvarg)1334 static inline void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg) 1280 1335 { 1281 1336 tcg_gen_andi_i64(ret, arg, 0xffffffffu); … … 1284 1339 /* Note: we assume the target supports move between 32 and 64 bit 1285 1340 registers. This will probably break MIPS64 targets. */ 1286 static inline void tcg_gen_trunc_i64_i32(TCGv ret, TCGvarg)1287 { 1288 tcg_gen_mov_i32(ret, arg);1341 static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg) 1342 { 1343 tcg_gen_mov_i32(ret, MAKE_TCGV_I32(GET_TCGV_I64(arg))); 1289 1344 } 1290 1345 1291 1346 /* Note: we assume the target supports move between 32 and 64 bit 1292 1347 registers */ 1293 static inline void tcg_gen_extu_i32_i64(TCGv ret, TCGvarg)1294 { 1295 tcg_gen_andi_i64(ret, arg, 0xffffffffu);1348 static inline void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg) 1349 { 1350 tcg_gen_andi_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)), 0xffffffffu); 1296 1351 } 1297 1352 1298 1353 /* Note: we assume the target supports move between 32 and 64 bit 1299 1354 registers */ 1300 static inline void tcg_gen_ext_i32_i64(TCGv ret, TCGvarg)1301 { 1302 tcg_gen_ext32s_i64(ret, arg);1303 } 1304 1305 static inline void tcg_gen_bswap_i64(TCGv ret, TCGvarg)1355 static inline void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg) 1356 { 1357 tcg_gen_ext32s_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg))); 1358 } 1359 1360 static inline void tcg_gen_bswap_i64(TCGv_i64 ret, TCGv_i64 arg) 1306 1361 { 1307 1362 #ifdef TCG_TARGET_HAS_bswap_i64 1308 tcg_gen_op2 (INDEX_op_bswap_i64, ret, arg);1309 #else 1310 TCGv t0, t1;1311 t0 = tcg_temp_new (TCG_TYPE_I32);1312 t1 = tcg_temp_new (TCG_TYPE_I32);1363 tcg_gen_op2_i64(INDEX_op_bswap_i64, ret, arg); 1364 #else 1365 TCGv_i32 t0, t1; 1366 t0 = tcg_temp_new_i32(); 1367 t1 = tcg_temp_new_i32(); 1313 1368 1314 1369 tcg_gen_shli_i64(t0, arg, 56); … … 1340 1395 tcg_gen_shri_i64(t1, arg, 56); 1341 1396 tcg_gen_or_i64(ret, t0, t1); 1342 tcg_temp_free (t0);1343 tcg_temp_free (t1);1344 #endif 1345 } 1346 1347 #endif 1348 1349 static inline void tcg_gen_neg_i32(TCGv ret, TCGvarg)1397 tcg_temp_free_i32(t0); 1398 tcg_temp_free_i32(t1); 1399 #endif 1400 } 1401 1402 #endif 1403 1404 static inline void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg) 1350 1405 { 1351 1406 #ifdef TCG_TARGET_HAS_neg_i32 1352 tcg_gen_op2 (INDEX_op_neg_i32, ret, arg);1353 #else 1354 TCGv t0 = tcg_const_i32(0);1407 tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg); 1408 #else 1409 TCGv_i32 t0 = tcg_const_i32(0); 1355 1410 tcg_gen_sub_i32(ret, t0, arg); 1356 tcg_temp_free (t0);1357 #endif 1358 } 1359 1360 static inline void tcg_gen_neg_i64(TCGv ret, TCGvarg)1411 tcg_temp_free_i32(t0); 1412 #endif 1413 } 1414 1415 static inline void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg) 1361 1416 { 1362 1417 #ifdef TCG_TARGET_HAS_neg_i64 1363 tcg_gen_op2 (INDEX_op_neg_i64, ret, arg);1364 #else 1365 TCGv t0 = tcg_const_i64(0);1418 tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg); 1419 #else 1420 TCGv_i64 t0 = tcg_const_i64(0); 1366 1421 tcg_gen_sub_i64(ret, t0, arg); 1367 tcg_temp_free (t0);1368 #endif 1369 } 1370 1371 static inline void tcg_gen_not_i32(TCGv ret, TCGvarg)1422 tcg_temp_free_i64(t0); 1423 #endif 1424 } 1425 1426 static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg) 1372 1427 { 1373 1428 tcg_gen_xori_i32(ret, arg, -1); 1374 1429 } 1375 1430 1376 static inline void tcg_gen_not_i64(TCGv ret, TCGvarg)1431 static inline void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg) 1377 1432 { 1378 1433 tcg_gen_xori_i64(ret, arg, -1); 1379 1434 } 1380 1435 1381 static inline void tcg_gen_discard_i32(TCGv arg)1382 { 1383 tcg_gen_op1 (INDEX_op_discard, arg);1436 static inline void tcg_gen_discard_i32(TCGv_i32 arg) 1437 { 1438 tcg_gen_op1_i32(INDEX_op_discard, arg); 1384 1439 } 1385 1440 1386 1441 #if TCG_TARGET_REG_BITS == 32 1387 static inline void tcg_gen_discard_i64(TCGv arg)1388 { 1389 tcg_gen_discard_i32( arg);1442 static inline void tcg_gen_discard_i64(TCGv_i64 arg) 1443 { 1444 tcg_gen_discard_i32(TCGV_LOW(arg)); 1390 1445 tcg_gen_discard_i32(TCGV_HIGH(arg)); 1391 1446 } 1392 1447 #else 1393 static inline void tcg_gen_discard_i64(TCGv arg)1394 { 1395 tcg_gen_op1 (INDEX_op_discard, arg);1396 } 1397 #endif 1398 1399 static inline void tcg_gen_concat_i32_i64(TCGv dest, TCGv low, TCGvhigh)1448 static inline void tcg_gen_discard_i64(TCGv_i64 arg) 1449 { 1450 tcg_gen_op1_i64(INDEX_op_discard, arg); 1451 } 1452 #endif 1453 1454 static inline void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high) 1400 1455 { 1401 1456 #if TCG_TARGET_REG_BITS == 32 1402 tcg_gen_mov_i32( dest, low);1457 tcg_gen_mov_i32(TCGV_LOW(dest), low); 1403 1458 tcg_gen_mov_i32(TCGV_HIGH(dest), high); 1404 1459 #else 1405 TCGv tmp = tcg_temp_new (TCG_TYPE_I64);1460 TCGv_i64 tmp = tcg_temp_new_i64(); 1406 1461 /* This extension is only needed for type correctness. 1407 1462 We may be able to do better given target specific information. */ … … 1410 1465 tcg_gen_extu_i32_i64(dest, low); 1411 1466 tcg_gen_or_i64(dest, dest, tmp); 1412 tcg_temp_free (tmp);1413 #endif 1414 } 1415 1416 static inline void tcg_gen_concat32_i64(TCGv dest, TCGv low, TCGvhigh)1467 tcg_temp_free_i64(tmp); 1468 #endif 1469 } 1470 1471 static inline void tcg_gen_concat32_i64(TCGv_i64 dest, TCGv_i64 low, TCGv_i64 high) 1417 1472 { 1418 1473 #if TCG_TARGET_REG_BITS == 32 1419 tcg_gen_concat_i32_i64(dest, low, high);1420 #else 1421 TCGv tmp = tcg_temp_new(TCG_TYPE_I64);1474 tcg_gen_concat_i32_i64(dest, TCGV_LOW(low), TCGV_LOW(high)); 1475 #else 1476 TCGv_i64 tmp = tcg_temp_new_i64(); 1422 1477 tcg_gen_ext32u_i64(dest, low); 1423 1478 tcg_gen_shli_i64(tmp, high, 32); 1424 1479 tcg_gen_or_i64(dest, dest, tmp); 1425 tcg_temp_free(tmp); 1426 #endif 1480 tcg_temp_free_i64(tmp); 1481 #endif 1482 } 1483 1484 static inline void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1485 { 1486 TCGv_i32 t0; 1487 t0 = tcg_temp_new_i32(); 1488 tcg_gen_not_i32(t0, arg2); 1489 tcg_gen_and_i32(ret, arg1, t0); 1490 tcg_temp_free_i32(t0); 1491 } 1492 1493 static inline void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1494 { 1495 TCGv_i64 t0; 1496 t0 = tcg_temp_new_i64(); 1497 tcg_gen_not_i64(t0, arg2); 1498 tcg_gen_and_i64(ret, arg1, t0); 1499 tcg_temp_free_i64(t0); 1500 } 1501 1502 static inline void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1503 { 1504 TCGv_i32 t0; 1505 t0 = tcg_temp_new_i32(); 1506 tcg_gen_xor_i32(t0, arg1, arg2); 1507 tcg_gen_not_i32(ret, t0); 1508 tcg_temp_free_i32(t0); 1509 } 1510 1511 static inline void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1512 { 1513 TCGv_i64 t0; 1514 t0 = tcg_temp_new_i64(); 1515 tcg_gen_xor_i64(t0, arg1, arg2); 1516 tcg_gen_not_i64(ret, t0); 1517 tcg_temp_free_i64(t0); 1518 } 1519 1520 static inline void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1521 { 1522 TCGv_i32 t0; 1523 t0 = tcg_temp_new_i32(); 1524 tcg_gen_and_i32(t0, arg1, arg2); 1525 tcg_gen_not_i32(ret, t0); 1526 tcg_temp_free_i32(t0); 1527 } 1528 1529 static inline void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1530 { 1531 TCGv_i64 t0; 1532 t0 = tcg_temp_new_i64(); 1533 tcg_gen_and_i64(t0, arg1, arg2); 1534 tcg_gen_not_i64(ret, t0); 1535 tcg_temp_free_i64(t0); 1536 } 1537 1538 static inline void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1539 { 1540 TCGv_i32 t0; 1541 t0 = tcg_temp_new_i32(); 1542 tcg_gen_or_i32(t0, arg1, arg2); 1543 tcg_gen_not_i32(ret, t0); 1544 tcg_temp_free_i32(t0); 1545 } 1546 1547 static inline void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1548 { 1549 TCGv_i64 t0; 1550 t0 = tcg_temp_new_i64(); 1551 tcg_gen_or_i64(t0, arg1, arg2); 1552 tcg_gen_not_i64(ret, t0); 1553 tcg_temp_free_i64(t0); 1554 } 1555 1556 static inline void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1557 { 1558 TCGv_i32 t0; 1559 t0 = tcg_temp_new_i32(); 1560 tcg_gen_not_i32(t0, arg2); 1561 tcg_gen_or_i32(ret, arg1, t0); 1562 tcg_temp_free_i32(t0); 1563 } 1564 1565 static inline void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1566 { 1567 TCGv_i64 t0; 1568 t0 = tcg_temp_new_i64(); 1569 tcg_gen_not_i64(t0, arg2); 1570 tcg_gen_or_i64(ret, arg1, t0); 1571 tcg_temp_free_i64(t0); 1572 } 1573 1574 static inline void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1575 { 1576 TCGv_i32 t0, t1; 1577 1578 t0 = tcg_temp_new_i32(); 1579 t1 = tcg_temp_new_i32(); 1580 tcg_gen_shl_i32(t0, arg1, arg2); 1581 tcg_gen_subfi_i32(t1, 32, arg2); 1582 tcg_gen_shr_i32(t1, arg1, t1); 1583 tcg_gen_or_i32(ret, t0, t1); 1584 tcg_temp_free_i32(t0); 1585 tcg_temp_free_i32(t1); 1586 } 1587 1588 static inline void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1589 { 1590 TCGv_i64 t0, t1; 1591 1592 t0 = tcg_temp_new_i64(); 1593 t1 = tcg_temp_new_i64(); 1594 tcg_gen_shl_i64(t0, arg1, arg2); 1595 tcg_gen_subfi_i64(t1, 64, arg2); 1596 tcg_gen_shr_i64(t1, arg1, t1); 1597 tcg_gen_or_i64(ret, t0, t1); 1598 tcg_temp_free_i64(t0); 1599 tcg_temp_free_i64(t1); 1600 } 1601 1602 static inline void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 1603 { 1604 /* some cases can be optimized here */ 1605 if (arg2 == 0) { 1606 tcg_gen_mov_i32(ret, arg1); 1607 } else { 1608 TCGv_i32 t0, t1; 1609 t0 = tcg_temp_new_i32(); 1610 t1 = tcg_temp_new_i32(); 1611 tcg_gen_shli_i32(t0, arg1, arg2); 1612 tcg_gen_shri_i32(t1, arg1, 32 - arg2); 1613 tcg_gen_or_i32(ret, t0, t1); 1614 tcg_temp_free_i32(t0); 1615 tcg_temp_free_i32(t1); 1616 } 1617 } 1618 1619 static inline void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 1620 { 1621 /* some cases can be optimized here */ 1622 if (arg2 == 0) { 1623 tcg_gen_mov_i64(ret, arg1); 1624 } else { 1625 TCGv_i64 t0, t1; 1626 t0 = tcg_temp_new_i64(); 1627 t1 = tcg_temp_new_i64(); 1628 tcg_gen_shli_i64(t0, arg1, arg2); 1629 tcg_gen_shri_i64(t1, arg1, 64 - arg2); 1630 tcg_gen_or_i64(ret, t0, t1); 1631 tcg_temp_free_i64(t0); 1632 tcg_temp_free_i64(t1); 1633 } 1634 } 1635 1636 static inline void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1637 { 1638 TCGv_i32 t0, t1; 1639 1640 t0 = tcg_temp_new_i32(); 1641 t1 = tcg_temp_new_i32(); 1642 tcg_gen_shr_i32(t0, arg1, arg2); 1643 tcg_gen_subfi_i32(t1, 32, arg2); 1644 tcg_gen_shl_i32(t1, arg1, t1); 1645 tcg_gen_or_i32(ret, t0, t1); 1646 tcg_temp_free_i32(t0); 1647 tcg_temp_free_i32(t1); 1648 } 1649 1650 static inline void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1651 { 1652 TCGv_i64 t0, t1; 1653 1654 t0 = tcg_temp_new_i64(); 1655 t1 = tcg_temp_new_i64(); 1656 tcg_gen_shl_i64(t0, arg1, arg2); 1657 tcg_gen_subfi_i64(t1, 64, arg2); 1658 tcg_gen_shl_i64(t1, arg1, t1); 1659 tcg_gen_or_i64(ret, t0, t1); 1660 tcg_temp_free_i64(t0); 1661 tcg_temp_free_i64(t1); 1662 } 1663 1664 static inline void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) 1665 { 1666 /* some cases can be optimized here */ 1667 if (arg2 == 0) { 1668 tcg_gen_mov_i32(ret, arg1); 1669 } else { 1670 tcg_gen_rotli_i32(ret, arg1, 32 - arg2); 1671 } 1672 } 1673 1674 static inline void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) 1675 { 1676 /* some cases can be optimized here */ 1677 if (arg2 == 0) { 1678 tcg_gen_mov_i64(ret, arg1); 1679 } else { 1680 tcg_gen_rotli_i64(ret, arg1, 64 - arg2); 1681 } 1427 1682 } 1428 1683 … … 1434 1689 #endif 1435 1690 1691 #if TARGET_LONG_BITS == 32 1692 #define TCGv TCGv_i32 1693 #define tcg_temp_new() tcg_temp_new_i32() 1694 #define tcg_global_reg_new tcg_global_reg_new_i32 1695 #define tcg_global_mem_new tcg_global_mem_new_i32 1696 #define tcg_temp_local_new() tcg_temp_local_new_i32() 1697 #define tcg_temp_free tcg_temp_free_i32 1698 #define tcg_gen_qemu_ldst_op tcg_gen_op3i_i32 1699 #define tcg_gen_qemu_ldst_op_i64 tcg_gen_qemu_ldst_op_i64_i32 1700 #define TCGV_UNUSED(x) TCGV_UNUSED_I32(x) 1701 #define TCGV_EQUAL(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b)) 1702 #else 1703 #define TCGv TCGv_i64 1704 #define tcg_temp_new() tcg_temp_new_i64() 1705 #define tcg_global_reg_new tcg_global_reg_new_i64 1706 #define tcg_global_mem_new tcg_global_mem_new_i64 1707 #define tcg_temp_local_new() tcg_temp_local_new_i64() 1708 #define tcg_temp_free tcg_temp_free_i64 1709 #define tcg_gen_qemu_ldst_op tcg_gen_op3i_i64 1710 #define tcg_gen_qemu_ldst_op_i64 tcg_gen_qemu_ldst_op_i64_i64 1711 #define TCGV_UNUSED(x) TCGV_UNUSED_I64(x) 1712 #define TCGV_EQUAL(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b)) 1713 #endif 1714 1436 1715 /* debug info: write the PC of the corresponding QEMU CPU instruction */ 1437 1716 static inline void tcg_gen_debug_insn_start(uint64_t pc) … … 1460 1739 { 1461 1740 #if TARGET_LONG_BITS == 32 1462 tcg_gen_op3i(INDEX_op_qemu_ld8u, ret, addr, mem_index); 1463 #else 1464 tcg_gen_op4i(INDEX_op_qemu_ld8u, ret, addr, TCGV_HIGH(addr), mem_index); 1741 tcg_gen_op3i_i32(INDEX_op_qemu_ld8u, ret, addr, mem_index); 1742 #else 1743 tcg_gen_op4i_i32(INDEX_op_qemu_ld8u, TCGV_LOW(ret), TCGV_LOW(addr), 1744 TCGV_HIGH(addr), mem_index); 1465 1745 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 1466 1746 #endif … … 1470 1750 { 1471 1751 #if TARGET_LONG_BITS == 32 1472 tcg_gen_op3i(INDEX_op_qemu_ld8s, ret, addr, mem_index); 1473 #else 1474 tcg_gen_op4i(INDEX_op_qemu_ld8s, ret, addr, TCGV_HIGH(addr), mem_index); 1475 tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31); 1752 tcg_gen_op3i_i32(INDEX_op_qemu_ld8s, ret, addr, mem_index); 1753 #else 1754 tcg_gen_op4i_i32(INDEX_op_qemu_ld8s, TCGV_LOW(ret), TCGV_LOW(addr), 1755 TCGV_HIGH(addr), mem_index); 1756 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); 1476 1757 #endif 1477 1758 } … … 1480 1761 { 1481 1762 #if TARGET_LONG_BITS == 32 1482 tcg_gen_op3i(INDEX_op_qemu_ld16u, ret, addr, mem_index); 1483 #else 1484 tcg_gen_op4i(INDEX_op_qemu_ld16u, ret, addr, TCGV_HIGH(addr), mem_index); 1763 tcg_gen_op3i_i32(INDEX_op_qemu_ld16u, ret, addr, mem_index); 1764 #else 1765 tcg_gen_op4i_i32(INDEX_op_qemu_ld16u, TCGV_LOW(ret), TCGV_LOW(addr), 1766 TCGV_HIGH(addr), mem_index); 1485 1767 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 1486 1768 #endif … … 1490 1772 { 1491 1773 #if TARGET_LONG_BITS == 32 1492 tcg_gen_op3i(INDEX_op_qemu_ld16s, ret, addr, mem_index); 1493 #else 1494 tcg_gen_op4i(INDEX_op_qemu_ld16s, ret, addr, TCGV_HIGH(addr), mem_index); 1495 tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31); 1774 tcg_gen_op3i_i32(INDEX_op_qemu_ld16s, ret, addr, mem_index); 1775 #else 1776 tcg_gen_op4i_i32(INDEX_op_qemu_ld16s, TCGV_LOW(ret), TCGV_LOW(addr), 1777 TCGV_HIGH(addr), mem_index); 1778 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); 1496 1779 #endif 1497 1780 } … … 1500 1783 { 1501 1784 #if TARGET_LONG_BITS == 32 1502 tcg_gen_op3i(INDEX_op_qemu_ld32u, ret, addr, mem_index); 1503 #else 1504 tcg_gen_op4i(INDEX_op_qemu_ld32u, ret, addr, TCGV_HIGH(addr), mem_index); 1785 tcg_gen_op3i_i32(INDEX_op_qemu_ld32u, ret, addr, mem_index); 1786 #else 1787 tcg_gen_op4i_i32(INDEX_op_qemu_ld32u, TCGV_LOW(ret), TCGV_LOW(addr), 1788 TCGV_HIGH(addr), mem_index); 1505 1789 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 1506 1790 #endif … … 1510 1794 { 1511 1795 #if TARGET_LONG_BITS == 32 1512 tcg_gen_op3i(INDEX_op_qemu_ld32u, ret, addr, mem_index); 1513 #else 1514 tcg_gen_op4i(INDEX_op_qemu_ld32u, ret, addr, TCGV_HIGH(addr), mem_index); 1515 tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31); 1516 #endif 1517 } 1518 1519 static inline void tcg_gen_qemu_ld64(TCGv ret, TCGv addr, int mem_index) 1796 tcg_gen_op3i_i32(INDEX_op_qemu_ld32u, ret, addr, mem_index); 1797 #else 1798 tcg_gen_op4i_i32(INDEX_op_qemu_ld32u, TCGV_LOW(ret), TCGV_LOW(addr), 1799 TCGV_HIGH(addr), mem_index); 1800 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); 1801 #endif 1802 } 1803 1804 static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index) 1520 1805 { 1521 1806 #if TARGET_LONG_BITS == 32 1522 tcg_gen_op4i (INDEX_op_qemu_ld64, ret, TCGV_HIGH(ret), addr, mem_index);1523 #else 1524 tcg_gen_op5i (INDEX_op_qemu_ld64, ret, TCGV_HIGH(ret),1525 addr, TCGV_HIGH(addr), mem_index);1807 tcg_gen_op4i_i32(INDEX_op_qemu_ld64, TCGV_LOW(ret), TCGV_HIGH(ret), addr, mem_index); 1808 #else 1809 tcg_gen_op5i_i32(INDEX_op_qemu_ld64, TCGV_LOW(ret), TCGV_HIGH(ret), 1810 TCGV_LOW(addr), TCGV_HIGH(addr), mem_index); 1526 1811 #endif 1527 1812 } … … 1530 1815 { 1531 1816 #if TARGET_LONG_BITS == 32 1532 tcg_gen_op3i(INDEX_op_qemu_st8, arg, addr, mem_index); 1533 #else 1534 tcg_gen_op4i(INDEX_op_qemu_st8, arg, addr, TCGV_HIGH(addr), mem_index); 1817 tcg_gen_op3i_i32(INDEX_op_qemu_st8, arg, addr, mem_index); 1818 #else 1819 tcg_gen_op4i_i32(INDEX_op_qemu_st8, TCGV_LOW(arg), TCGV_LOW(addr), 1820 TCGV_HIGH(addr), mem_index); 1535 1821 #endif 1536 1822 } … … 1539 1825 { 1540 1826 #if TARGET_LONG_BITS == 32 1541 tcg_gen_op3i(INDEX_op_qemu_st16, arg, addr, mem_index); 1542 #else 1543 tcg_gen_op4i(INDEX_op_qemu_st16, arg, addr, TCGV_HIGH(addr), mem_index); 1827 tcg_gen_op3i_i32(INDEX_op_qemu_st16, arg, addr, mem_index); 1828 #else 1829 tcg_gen_op4i_i32(INDEX_op_qemu_st16, TCGV_LOW(arg), TCGV_LOW(addr), 1830 TCGV_HIGH(addr), mem_index); 1544 1831 #endif 1545 1832 } … … 1548 1835 { 1549 1836 #if TARGET_LONG_BITS == 32 1550 tcg_gen_op3i(INDEX_op_qemu_st32, arg, addr, mem_index); 1551 #else 1552 tcg_gen_op4i(INDEX_op_qemu_st32, arg, addr, TCGV_HIGH(addr), mem_index); 1553 #endif 1554 } 1555 1556 static inline void tcg_gen_qemu_st64(TCGv arg, TCGv addr, int mem_index) 1837 tcg_gen_op3i_i32(INDEX_op_qemu_st32, arg, addr, mem_index); 1838 #else 1839 tcg_gen_op4i_i32(INDEX_op_qemu_st32, TCGV_LOW(arg), TCGV_LOW(addr), 1840 TCGV_HIGH(addr), mem_index); 1841 #endif 1842 } 1843 1844 static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) 1557 1845 { 1558 1846 #if TARGET_LONG_BITS == 32 1559 tcg_gen_op4i(INDEX_op_qemu_st64, arg, TCGV_HIGH(arg), addr, mem_index); 1560 #else 1561 tcg_gen_op5i(INDEX_op_qemu_st64, arg, TCGV_HIGH(arg), 1562 addr, TCGV_HIGH(addr), mem_index); 1847 tcg_gen_op4i_i32(INDEX_op_qemu_st64, TCGV_LOW(arg), TCGV_HIGH(arg), addr, 1848 mem_index); 1849 #else 1850 tcg_gen_op5i_i32(INDEX_op_qemu_st64, TCGV_LOW(arg), TCGV_HIGH(arg), 1851 TCGV_LOW(addr), TCGV_HIGH(addr), mem_index); 1563 1852 #endif 1564 1853 } … … 1571 1860 static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index) 1572 1861 { 1573 tcg_gen_ op3i(INDEX_op_qemu_ld8u, ret, addr, mem_index);1862 tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld8u, ret, addr, mem_index); 1574 1863 } 1575 1864 1576 1865 static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index) 1577 1866 { 1578 tcg_gen_ op3i(INDEX_op_qemu_ld8s, ret, addr, mem_index);1867 tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld8s, ret, addr, mem_index); 1579 1868 } 1580 1869 1581 1870 static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index) 1582 1871 { 1583 tcg_gen_ op3i(INDEX_op_qemu_ld16u, ret, addr, mem_index);1872 tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld16u, ret, addr, mem_index); 1584 1873 } 1585 1874 1586 1875 static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index) 1587 1876 { 1588 tcg_gen_ op3i(INDEX_op_qemu_ld16s, ret, addr, mem_index);1877 tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld16s, ret, addr, mem_index); 1589 1878 } 1590 1879 1591 1880 static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index) 1592 1881 { 1593 tcg_gen_ op3i(INDEX_op_qemu_ld32u, ret, addr, mem_index);1882 tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32u, ret, addr, mem_index); 1594 1883 } 1595 1884 1596 1885 static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index) 1597 1886 { 1598 tcg_gen_ op3i(INDEX_op_qemu_ld32s, ret, addr, mem_index);1599 } 1600 1601 static inline void tcg_gen_qemu_ld64(TCGv ret, TCGv addr, int mem_index)1602 { 1603 tcg_gen_ op3i(INDEX_op_qemu_ld64, ret, addr, mem_index);1887 tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32s, ret, addr, mem_index); 1888 } 1889 1890 static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index) 1891 { 1892 tcg_gen_qemu_ldst_op_i64(INDEX_op_qemu_ld64, ret, addr, mem_index); 1604 1893 } 1605 1894 1606 1895 static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index) 1607 1896 { 1608 tcg_gen_ op3i(INDEX_op_qemu_st8, arg, addr, mem_index);1897 tcg_gen_qemu_ldst_op(INDEX_op_qemu_st8, arg, addr, mem_index); 1609 1898 } 1610 1899 1611 1900 static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index) 1612 1901 { 1613 tcg_gen_ op3i(INDEX_op_qemu_st16, arg, addr, mem_index);1902 tcg_gen_qemu_ldst_op(INDEX_op_qemu_st16, arg, addr, mem_index); 1614 1903 } 1615 1904 1616 1905 static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index) 1617 1906 { 1618 tcg_gen_ op3i(INDEX_op_qemu_st32, arg, addr, mem_index);1619 } 1620 1621 static inline void tcg_gen_qemu_st64(TCGv arg, TCGv addr, int mem_index)1622 { 1623 tcg_gen_ op3i(INDEX_op_qemu_st64, arg, addr, mem_index);1907 tcg_gen_qemu_ldst_op(INDEX_op_qemu_st32, arg, addr, mem_index); 1908 } 1909 1910 static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) 1911 { 1912 tcg_gen_qemu_ldst_op_i64(INDEX_op_qemu_st64, arg, addr, mem_index); 1624 1913 } 1625 1914 … … 1648 1937 #define tcg_gen_sub_tl tcg_gen_sub_i64 1649 1938 #define tcg_gen_neg_tl tcg_gen_neg_i64 1939 #define tcg_gen_subfi_tl tcg_gen_subfi_i64 1650 1940 #define tcg_gen_subi_tl tcg_gen_subi_i64 1651 1941 #define tcg_gen_and_tl tcg_gen_and_i64 … … 1680 1970 #define tcg_gen_ext32s_tl tcg_gen_ext32s_i64 1681 1971 #define tcg_gen_concat_tl_i64 tcg_gen_concat32_i64 1972 #define tcg_gen_andc_tl tcg_gen_andc_i64 1973 #define tcg_gen_eqv_tl tcg_gen_eqv_i64 1974 #define tcg_gen_nand_tl tcg_gen_nand_i64 1975 #define tcg_gen_nor_tl tcg_gen_nor_i64 1976 #define tcg_gen_orc_tl tcg_gen_orc_i64 1977 #define tcg_gen_rotl_tl tcg_gen_rotl_i64 1978 #define tcg_gen_rotli_tl tcg_gen_rotli_i64 1979 #define tcg_gen_rotr_tl tcg_gen_rotr_i64 1980 #define tcg_gen_rotri_tl tcg_gen_rotri_i64 1682 1981 #define tcg_const_tl tcg_const_i64 1982 #define tcg_const_local_tl tcg_const_local_i64 1683 1983 #else 1684 1984 #define TCG_TYPE_TL TCG_TYPE_I32 … … 1700 2000 #define tcg_gen_sub_tl tcg_gen_sub_i32 1701 2001 #define tcg_gen_neg_tl tcg_gen_neg_i32 2002 #define tcg_gen_subfi_tl tcg_gen_subfi_i32 1702 2003 #define tcg_gen_subi_tl tcg_gen_subi_i32 1703 2004 #define tcg_gen_and_tl tcg_gen_and_i32 … … 1732 2033 #define tcg_gen_ext32s_tl tcg_gen_mov_i32 1733 2034 #define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64 2035 #define tcg_gen_andc_tl tcg_gen_andc_i32 2036 #define tcg_gen_eqv_tl tcg_gen_eqv_i32 2037 #define tcg_gen_nand_tl tcg_gen_nand_i32 2038 #define tcg_gen_nor_tl tcg_gen_nor_i32 2039 #define tcg_gen_orc_tl tcg_gen_orc_i32 2040 #define tcg_gen_rotl_tl tcg_gen_rotl_i32 2041 #define tcg_gen_rotli_tl tcg_gen_rotli_i32 2042 #define tcg_gen_rotr_tl tcg_gen_rotr_i32 2043 #define tcg_gen_rotri_tl tcg_gen_rotri_i32 1734 2044 #define tcg_const_tl tcg_const_i32 2045 #define tcg_const_local_tl tcg_const_local_i32 1735 2046 #endif 1736 2047 … … 1744 2055 #define tcg_gen_ext_i32_ptr tcg_gen_ext_i32_i64 1745 2056 #endif /* TCG_TARGET_REG_BITS != 32 */ 1746 -
trunk/src/recompiler/tcg/tcg-opc.h
r29520 r36170 22 22 * THE SOFTWARE. 23 23 */ 24 25 #ifdef CONFIG_DYNGEN_OP26 #include "dyngen-opc.h"27 #endif28 29 24 #ifndef DEF2 30 25 #define DEF2(name, oargs, iargs, cargs, flags) DEF(name, oargs + iargs + cargs, 0) -
trunk/src/recompiler/tcg/tcg-runtime.c
r36140 r36170 22 22 * THE SOFTWARE. 23 23 */ 24 25 24 #include <stdarg.h> 26 25 #include <stdlib.h> … … 31 30 #include "config.h" 32 31 #include "osdep.h" 32 #include "cpu.h" // For TARGET_LONG_BITS 33 33 #include "tcg.h" 34 34 … … 67 67 return arg1 % arg2; 68 68 } 69 -
trunk/src/recompiler/tcg/tcg.c
r36140 r36170 43 43 #include <malloc.h> 44 44 #endif 45 #ifdef _AIX 46 #include <alloca.h> 47 #endif 45 48 46 49 #include "config.h" 47 50 #include "qemu-common.h" 51 #include "cache-utils.h" 48 52 49 53 /* Note: the long term plan is to reduce the dependancies on the QEMU … … 73 77 tcg_target_long value, tcg_target_long addend); 74 78 75 TCGOpDef tcg_op_defs[] = {79 static TCGOpDef tcg_op_defs[] = { 76 80 #define DEF(s, n, copy_size) { #s, 0, 0, n, n, 0, copy_size }, 77 81 #ifndef VBOX … … 85 89 }; 86 90 87 TCGRegSet tcg_target_available_regs[2];88 TCGRegSet tcg_target_call_clobber_regs;91 static TCGRegSet tcg_target_available_regs[2]; 92 static TCGRegSet tcg_target_call_clobber_regs; 89 93 90 94 /* XXX: move that inside the context */ … … 289 293 } 290 294 291 TCGv tcg_global_reg_new(TCGType type, int reg, const char *name) 295 static inline int tcg_global_reg_new_internal(TCGType type, int reg, 296 const char *name) 292 297 { 293 298 TCGContext *s = &tcg_ctx; … … 311 316 s->nb_globals++; 312 317 tcg_regset_set_reg(s->reserved_regs, reg); 313 return MAKE_TCGV(idx); 314 } 315 316 #if TCG_TARGET_REG_BITS == 32 317 /* temporary hack to avoid register shortage for tcg_qemu_st64() */ 318 TCGv tcg_global_reg2_new_hack(TCGType type, int reg1, int reg2, 319 const char *name) 320 { 321 TCGContext *s = &tcg_ctx; 322 TCGTemp *ts; 318 return idx; 319 } 320 321 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name) 322 { 323 323 int idx; 324 char buf[64]; 325 326 if (type != TCG_TYPE_I64) 327 tcg_abort(); 328 idx = s->nb_globals; 329 tcg_temp_alloc(s, s->nb_globals + 2); 330 ts = &s->temps[s->nb_globals]; 331 ts->base_type = type; 332 ts->type = TCG_TYPE_I32; 333 ts->fixed_reg = 1; 334 ts->reg = reg1; 335 pstrcpy(buf, sizeof(buf), name); 336 pstrcat(buf, sizeof(buf), "_0"); 337 ts->name = strdup(buf); 338 339 ts++; 340 ts->base_type = type; 341 ts->type = TCG_TYPE_I32; 342 ts->fixed_reg = 1; 343 ts->reg = reg2; 344 pstrcpy(buf, sizeof(buf), name); 345 pstrcat(buf, sizeof(buf), "_1"); 346 ts->name = strdup(buf); 347 348 s->nb_globals += 2; 349 return MAKE_TCGV(idx); 350 } 351 #endif 352 353 TCGv tcg_global_mem_new(TCGType type, int reg, tcg_target_long offset, 354 const char *name) 324 325 idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name); 326 return MAKE_TCGV_I32(idx); 327 } 328 329 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name) 330 { 331 int idx; 332 333 idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name); 334 return MAKE_TCGV_I64(idx); 335 } 336 337 static inline int tcg_global_mem_new_internal(TCGType type, int reg, 338 tcg_target_long offset, 339 const char *name) 355 340 { 356 341 TCGContext *s = &tcg_ctx; … … 408 393 s->nb_globals++; 409 394 } 410 return MAKE_TCGV(idx); 411 } 412 413 TCGv tcg_temp_new_internal(TCGType type, int temp_local) 395 return idx; 396 } 397 398 TCGv_i32 tcg_global_mem_new_i32(int reg, tcg_target_long offset, 399 const char *name) 400 { 401 int idx; 402 403 idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name); 404 return MAKE_TCGV_I32(idx); 405 } 406 407 TCGv_i64 tcg_global_mem_new_i64(int reg, tcg_target_long offset, 408 const char *name) 409 { 410 int idx; 411 412 idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name); 413 return MAKE_TCGV_I64(idx); 414 } 415 416 static inline int tcg_temp_new_internal(TCGType type, int temp_local) 414 417 { 415 418 TCGContext *s = &tcg_ctx; … … 459 462 } 460 463 } 461 return MAKE_TCGV(idx); 462 } 463 464 void tcg_temp_free(TCGv arg) 464 return idx; 465 } 466 467 TCGv_i32 tcg_temp_new_internal_i32(int temp_local) 468 { 469 int idx; 470 471 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local); 472 return MAKE_TCGV_I32(idx); 473 } 474 475 TCGv_i64 tcg_temp_new_internal_i64(int temp_local) 476 { 477 int idx; 478 479 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local); 480 return MAKE_TCGV_I64(idx); 481 } 482 483 static inline void tcg_temp_free_internal(int idx) 465 484 { 466 485 TCGContext *s = &tcg_ctx; 467 486 TCGTemp *ts; 468 int idx = GET_TCGV(arg);469 487 int k; 470 488 … … 480 498 } 481 499 482 483 TCGv tcg_const_i32(int32_t val) 484 { 485 TCGv t0; 486 t0 = tcg_temp_new(TCG_TYPE_I32); 500 void tcg_temp_free_i32(TCGv_i32 arg) 501 { 502 tcg_temp_free_internal(GET_TCGV_I32(arg)); 503 } 504 505 void tcg_temp_free_i64(TCGv_i64 arg) 506 { 507 tcg_temp_free_internal(GET_TCGV_I64(arg)); 508 } 509 510 TCGv_i32 tcg_const_i32(int32_t val) 511 { 512 TCGv_i32 t0; 513 t0 = tcg_temp_new_i32(); 487 514 tcg_gen_movi_i32(t0, val); 488 515 return t0; 489 516 } 490 517 491 TCGv tcg_const_i64(int64_t val) 492 { 493 TCGv t0; 494 t0 = tcg_temp_new(TCG_TYPE_I64); 518 TCGv_i64 tcg_const_i64(int64_t val) 519 { 520 TCGv_i64 t0; 521 t0 = tcg_temp_new_i64(); 522 tcg_gen_movi_i64(t0, val); 523 return t0; 524 } 525 526 TCGv_i32 tcg_const_local_i32(int32_t val) 527 { 528 TCGv_i32 t0; 529 t0 = tcg_temp_local_new_i32(); 530 tcg_gen_movi_i32(t0, val); 531 return t0; 532 } 533 534 TCGv_i64 tcg_const_local_i64(int64_t val) 535 { 536 TCGv_i64 t0; 537 t0 = tcg_temp_local_new_i64(); 495 538 tcg_gen_movi_i64(t0, val); 496 539 return t0; … … 520 563 } 521 564 522 static inline TCGType tcg_get_base_type(TCGContext *s, TCGv arg)523 {524 return s->temps[GET_TCGV(arg)].base_type;525 }526 527 static void tcg_gen_call_internal(TCGContext *s, TCGv func,528 unsigned int flags,529 unsigned int nb_rets, const TCGv *rets,530 unsigned int nb_params, const TCGv *params)531 {532 #ifndef VBOX533 int i;534 #else535 unsigned int i;536 #endif537 *gen_opc_ptr++ = INDEX_op_call;538 *gen_opparam_ptr++ = (nb_rets << 16) | (nb_params + 1);539 for(i = 0; i < nb_rets; i++) {540 *gen_opparam_ptr++ = GET_TCGV(rets[i]);541 }542 for(i = 0; i < nb_params; i++) {543 *gen_opparam_ptr++ = GET_TCGV(params[i]);544 }545 *gen_opparam_ptr++ = GET_TCGV(func);546 547 *gen_opparam_ptr++ = flags;548 /* total parameters, needed to go backward in the instruction stream */549 *gen_opparam_ptr++ = 1 + nb_rets + nb_params + 3;550 }551 552 553 #if TCG_TARGET_REG_BITS < 64554 565 /* Note: we convert the 64 bit args to 32 bit and do some alignment 555 566 and endian swap. Maybe it would be better to do the alignment 556 567 and endian swap in tcg_reg_alloc_call(). */ 557 void tcg_gen_call(TCGContext *s, TCGv func, unsigned int flags, 558 unsigned int nb_rets, const TCGv *rets, 559 unsigned int nb_params, const TCGv *args1) 560 { 561 TCGv ret, *args2, rets_2[2], arg; 562 int j, i, call_type; 563 564 if (nb_rets == 1) { 565 ret = rets[0]; 566 if (tcg_get_base_type(s, ret) == TCG_TYPE_I64) { 568 void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags, 569 int sizemask, TCGArg ret, int nargs, TCGArg *args) 570 { 571 int call_type; 572 int i; 573 int real_args; 574 int nb_rets; 575 TCGArg *nparam; 576 *gen_opc_ptr++ = INDEX_op_call; 577 nparam = gen_opparam_ptr++; 578 call_type = (flags & TCG_CALL_TYPE_MASK); 579 if (ret != TCG_CALL_DUMMY_ARG) { 580 #if TCG_TARGET_REG_BITS < 64 581 if (sizemask & 1) { 582 #ifdef TCG_TARGET_WORDS_BIGENDIAN 583 *gen_opparam_ptr++ = ret + 1; 584 *gen_opparam_ptr++ = ret; 585 #else 586 *gen_opparam_ptr++ = ret; 587 *gen_opparam_ptr++ = ret + 1; 588 #endif 567 589 nb_rets = 2; 568 #ifdef TCG_TARGET_WORDS_BIGENDIAN 569 rets_2[0] = TCGV_HIGH(ret); 570 rets_2[1] = ret; 571 #else 572 rets_2[0] = ret; 573 rets_2[1] = TCGV_HIGH(ret); 574 #endif 575 rets = rets_2; 576 } 577 } 578 args2 = alloca((nb_params * 3) * sizeof(TCGv)); 579 j = 0; 580 call_type = (flags & TCG_CALL_TYPE_MASK); 581 for(i = 0; i < nb_params; i++) { 582 arg = args1[i]; 583 if (tcg_get_base_type(s, arg) == TCG_TYPE_I64) { 590 } else 591 #endif 592 { 593 *gen_opparam_ptr++ = ret; 594 nb_rets = 1; 595 } 596 } else { 597 nb_rets = 0; 598 } 599 real_args = 0; 600 for (i = 0; i < nargs; i++) { 601 #if TCG_TARGET_REG_BITS < 64 602 if (sizemask & (2 << i)) { 584 603 #ifdef TCG_TARGET_I386 585 604 /* REGPARM case: if the third parameter is 64 bit, it is 586 605 allocated on the stack */ 587 if ( j== 2 && call_type == TCG_CALL_TYPE_REGPARM) {606 if (i == 2 && call_type == TCG_CALL_TYPE_REGPARM) { 588 607 call_type = TCG_CALL_TYPE_REGPARM_2; 589 608 flags = (flags & ~TCG_CALL_TYPE_MASK) | call_type; 590 609 } 591 args2[j++] = arg; 592 args2[j++] = TCGV_HIGH(arg); 593 #else 610 #endif 594 611 #ifdef TCG_TARGET_CALL_ALIGN_ARGS 595 612 /* some targets want aligned 64 bit args */ 596 if (j & 1) { 597 args2[j++] = TCG_CALL_DUMMY_ARG; 613 if (real_args & 1) { 614 *gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG; 615 real_args++; 598 616 } 599 617 #endif 600 618 #ifdef TCG_TARGET_WORDS_BIGENDIAN 601 args2[j++] = TCGV_HIGH(arg);602 args2[j++] = arg;619 *gen_opparam_ptr++ = args[i] + 1; 620 *gen_opparam_ptr++ = args[i]; 603 621 #else 604 args2[j++] = arg;605 args2[j++] = TCGV_HIGH(arg);606 #endif 607 #endif 608 } else {609 args2[j++] = arg; 610 }611 }612 tcg_gen_call_internal(s, func, flags,613 nb_rets, rets, j, args2);614 }615 #else 616 void tcg_gen_call(TCGContext *s, TCGv func, unsigned int flags, 617 unsigned int nb_rets, const TCGv *rets,618 unsigned int nb_params, const TCGv *args1) 619 { 620 tcg_gen_call_internal(s, func, flags, 621 nb_rets, rets, nb_params, args1);622 } 623 #endif 622 *gen_opparam_ptr++ = args[i]; 623 *gen_opparam_ptr++ = args[i] + 1; 624 #endif 625 real_args += 2; 626 } else 627 #endif 628 { 629 *gen_opparam_ptr++ = args[i]; 630 real_args++; 631 } 632 } 633 *gen_opparam_ptr++ = GET_TCGV_PTR(func); 634 635 *gen_opparam_ptr++ = flags; 636 637 *nparam = (nb_rets << 16) | (real_args + 1); 638 639 /* total parameters, needed to go backward in the instruction stream */ 640 *gen_opparam_ptr++ = 1 + nb_rets + real_args + 3; 641 } 624 642 625 643 #if TCG_TARGET_REG_BITS == 32 626 void tcg_gen_shifti_i64(TCGv ret, TCGvarg1,644 void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1, 627 645 int c, int right, int arith) 628 646 { 629 647 if (c == 0) { 630 tcg_gen_mov_i32( ret, arg1);648 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1)); 631 649 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1)); 632 650 } else if (c >= 32) { … … 634 652 if (right) { 635 653 if (arith) { 636 tcg_gen_sari_i32( ret, TCGV_HIGH(arg1), c);654 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c); 637 655 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31); 638 656 } else { 639 tcg_gen_shri_i32( ret, TCGV_HIGH(arg1), c);657 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c); 640 658 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 641 659 } 642 660 } else { 643 tcg_gen_shli_i32(TCGV_HIGH(ret), arg1, c);644 tcg_gen_movi_i32( ret, 0);661 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c); 662 tcg_gen_movi_i32(TCGV_LOW(ret), 0); 645 663 } 646 664 } else { 647 TCGv t0, t1;648 649 t0 = tcg_temp_new (TCG_TYPE_I32);650 t1 = tcg_temp_new (TCG_TYPE_I32);665 TCGv_i32 t0, t1; 666 667 t0 = tcg_temp_new_i32(); 668 t1 = tcg_temp_new_i32(); 651 669 if (right) { 652 670 tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c); … … 655 673 else 656 674 tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c); 657 tcg_gen_shri_i32( ret, arg1, c);658 tcg_gen_or_i32( ret, ret, t0);675 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c); 676 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0); 659 677 tcg_gen_mov_i32(TCGV_HIGH(ret), t1); 660 678 } else { 661 tcg_gen_shri_i32(t0, arg1, 32 - c);679 tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c); 662 680 /* Note: ret can be the same as arg1, so we use t1 */ 663 tcg_gen_shli_i32(t1, arg1, c);681 tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c); 664 682 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c); 665 683 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0); 666 tcg_gen_mov_i32( ret, t1);667 } 668 tcg_temp_free (t0);669 tcg_temp_free (t1);684 tcg_gen_mov_i32(TCGV_LOW(ret), t1); 685 } 686 tcg_temp_free_i32(t0); 687 tcg_temp_free_i32(t1); 670 688 } 671 689 } … … 712 730 } 713 731 714 char *tcg_get_arg_str(TCGContext *s, char *buf, int buf_size, TCGv arg) 715 { 716 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV(arg)); 732 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg) 733 { 734 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg)); 735 } 736 737 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg) 738 { 739 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg)); 717 740 } 718 741 … … 857 880 th = tcg_find_helper(s, val); 858 881 if (th) { 859 fprintf(outfile, th->name);882 fprintf(outfile, "%s", th->name); 860 883 } else { 861 884 if (c == INDEX_op_movi_i32) … … 1174 1197 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */ 1175 1198 default: 1176 if (op > INDEX_op_end) { 1177 args -= def->nb_args; 1178 nb_iargs = def->nb_iargs; 1179 nb_oargs = def->nb_oargs; 1180 1181 /* Test if the operation can be removed because all 1182 its outputs are dead. We assume that nb_oargs == 0 1183 implies side effects */ 1184 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { 1185 for(i = 0; i < nb_oargs; i++) { 1186 arg = args[i]; 1187 if (!dead_temps[arg]) 1188 goto do_not_remove; 1199 args -= def->nb_args; 1200 nb_iargs = def->nb_iargs; 1201 nb_oargs = def->nb_oargs; 1202 1203 /* Test if the operation can be removed because all 1204 its outputs are dead. We assume that nb_oargs == 0 1205 implies side effects */ 1206 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { 1207 for(i = 0; i < nb_oargs; i++) { 1208 arg = args[i]; 1209 if (!dead_temps[arg]) 1210 goto do_not_remove; 1211 } 1212 tcg_set_nop(s, gen_opc_buf + op_index, args, def->nb_args); 1213 #ifdef CONFIG_PROFILER 1214 s->del_op_count++; 1215 #endif 1216 } else { 1217 do_not_remove: 1218 1219 /* output args are dead */ 1220 for(i = 0; i < nb_oargs; i++) { 1221 arg = args[i]; 1222 dead_temps[arg] = 1; 1223 } 1224 1225 /* if end of basic block, update */ 1226 if (def->flags & TCG_OPF_BB_END) { 1227 tcg_la_bb_end(s, dead_temps); 1228 } else if (def->flags & TCG_OPF_CALL_CLOBBER) { 1229 /* globals are live */ 1230 memset(dead_temps, 0, s->nb_globals); 1231 } 1232 1233 /* input args are live */ 1234 dead_iargs = 0; 1235 for(i = 0; i < nb_iargs; i++) { 1236 arg = args[i + nb_oargs]; 1237 if (dead_temps[arg]) { 1238 dead_iargs |= (1 << i); 1189 1239 } 1190 tcg_set_nop(s, gen_opc_buf + op_index, args, def->nb_args); 1191 #ifdef CONFIG_PROFILER 1192 s->del_op_count++; 1193 #endif 1194 } else { 1195 do_not_remove: 1196 1197 /* output args are dead */ 1198 for(i = 0; i < nb_oargs; i++) { 1199 arg = args[i]; 1200 dead_temps[arg] = 1; 1201 } 1202 1203 /* if end of basic block, update */ 1204 if (def->flags & TCG_OPF_BB_END) { 1205 tcg_la_bb_end(s, dead_temps); 1206 } else if (def->flags & TCG_OPF_CALL_CLOBBER) { 1207 /* globals are live */ 1208 memset(dead_temps, 0, s->nb_globals); 1209 } 1210 1211 /* input args are live */ 1212 dead_iargs = 0; 1213 for(i = 0; i < nb_iargs; i++) { 1214 arg = args[i + nb_oargs]; 1215 if (dead_temps[arg]) { 1216 dead_iargs |= (1 << i); 1217 } 1218 dead_temps[arg] = 0; 1219 } 1220 s->op_dead_iargs[op_index] = dead_iargs; 1240 dead_temps[arg] = 0; 1221 1241 } 1222 } else { 1223 /* legacy dyngen operations */ 1224 args -= def->nb_args; 1225 /* mark end of basic block */ 1226 tcg_la_bb_end(s, dead_temps); 1242 s->op_dead_iargs[op_index] = dead_iargs; 1227 1243 } 1228 1244 break; … … 1877 1893 #ifdef CONFIG_PROFILER 1878 1894 1879 static int64_t dyngen_table_op_count[NB_OPS];1895 static int64_t tcg_table_op_count[NB_OPS]; 1880 1896 1881 1897 void dump_op_count(void) … … 1883 1899 int i; 1884 1900 FILE *f; 1885 f = fopen("/tmp/op1.log", "w"); 1886 for(i = 0; i < INDEX_op_end; i++) { 1887 fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, dyngen_table_op_count[i]); 1888 } 1889 fclose(f); 1890 f = fopen("/tmp/op2.log", "w"); 1901 f = fopen("/tmp/op.log", "w"); 1891 1902 for(i = INDEX_op_end; i < NB_OPS; i++) { 1892 fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, dyngen_table_op_count[i]);1903 fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, tcg_table_op_count[i]); 1893 1904 } 1894 1905 fclose(f); … … 1906 1917 1907 1918 #ifdef DEBUG_DISAS 1908 if (unlikely( loglevel & CPU_LOG_TB_OP)) {1909 fprintf(logfile,"OP:\n");1919 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { 1920 qemu_log("OP:\n"); 1910 1921 tcg_dump_ops(s, logfile); 1911 fprintf(logfile,"\n");1922 qemu_log("\n"); 1912 1923 } 1913 1924 #endif … … 1922 1933 1923 1934 #ifdef DEBUG_DISAS 1924 if (unlikely( loglevel & CPU_LOG_TB_OP_OPT)) {1925 fprintf(logfile,"OP after la:\n");1935 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) { 1936 qemu_log("OP after la:\n"); 1926 1937 tcg_dump_ops(s, logfile); 1927 fprintf(logfile,"\n");1938 qemu_log("\n"); 1928 1939 } 1929 1940 #endif … … 1940 1951 opc = gen_opc_buf[op_index]; 1941 1952 #ifdef CONFIG_PROFILER 1942 dyngen_table_op_count[opc]++;1953 tcg_table_op_count[opc]++; 1943 1954 #endif 1944 1955 def = &tcg_op_defs[opc]; … … 1995 2006 case INDEX_op_end: 1996 2007 goto the_end; 1997 1998 #ifdef CONFIG_DYNGEN_OP1999 case 0 ... INDEX_op_end - 1:2000 /* legacy dyngen ops */2001 #ifdef CONFIG_PROFILER2002 s->old_op_count++;2003 #endif2004 tcg_reg_alloc_bb_end(s, s->reserved_regs);2005 if (search_pc >= 0) {2006 s->code_ptr += def->copy_size;2007 args += def->nb_args;2008 } else {2009 args = dyngen_op(s, opc, args);2010 }2011 goto next;2012 #endif2013 2008 default: 2014 2009 /* Note: in order to speed up the code, it would be much … … 2033 2028 } 2034 2029 2035 int dyngen_code(TCGContext *s, uint8_t *gen_code_buf)2030 int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf) 2036 2031 { 2037 2032 #ifdef CONFIG_PROFILER … … 2061 2056 not be changed, though writing the same values is ok. 2062 2057 Return -1 if not found. */ 2063 int dyngen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset)2058 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset) 2064 2059 { 2065 2060 return tcg_gen_code_common(s, gen_code_buf, offset); … … 2082 2077 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n", 2083 2078 s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max); 2084 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",2085 s->op_count ? (double)s->old_op_count / s->op_count * 100.0 : 0);2086 2079 cpu_fprintf(f, "deleted ops/TB %0.2f\n", 2087 2080 s->tb_count ? -
trunk/src/recompiler/tcg/tcg.h
r36140 r36170 115 115 We use plain int by default to avoid this runtime overhead. 116 116 Users of tcg_gen_* don't need to know about any of this, and should 117 treat TCGv as an opaque type. */ 117 treat TCGv as an opaque type. 118 In additon we do typechecking for different types of variables. TCGv_i32 119 and TCGv_i64 are 32/64-bit variables respectively. TCGv and TCGv_ptr 120 are aliases for target_ulong and host pointer sized values respectively. 121 */ 118 122 119 123 //#define DEBUG_TCGV 1 … … 123 127 typedef struct 124 128 { 125 int n; 126 } TCGv; 127 128 #define MAKE_TCGV(i) __extension__ \ 129 ({ TCGv make_tcgv_tmp = {i}; make_tcgv_tmp;}) 130 #define GET_TCGV(t) ((t).n) 129 int i32; 130 } TCGv_i32; 131 132 typedef struct 133 { 134 int i64; 135 } TCGv_i64; 136 137 #define MAKE_TCGV_I32(i) __extension__ \ 138 ({ TCGv_i32 make_tcgv_tmp = {i}; make_tcgv_tmp;}) 139 #define MAKE_TCGV_I64(i) __extension__ \ 140 ({ TCGv_i64 make_tcgv_tmp = {i}; make_tcgv_tmp;}) 141 #define GET_TCGV_I32(t) ((t).i32) 142 #define GET_TCGV_I64(t) ((t).i64) 131 143 #if TCG_TARGET_REG_BITS == 32 132 #define TCGV_HIGH(t) MAKE_TCGV(GET_TCGV(t) + 1) 144 #define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t)) 145 #define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1) 133 146 #endif 134 147 135 148 #else /* !DEBUG_TCGV */ 136 149 137 typedef int TCGv; 138 #define MAKE_TCGV(x) (x) 139 #define GET_TCGV(t) (t) 150 typedef int TCGv_i32; 151 typedef int TCGv_i64; 152 #define MAKE_TCGV_I32(x) (x) 153 #define MAKE_TCGV_I64(x) (x) 154 #define GET_TCGV_I32(t) (t) 155 #define GET_TCGV_I64(t) (t) 140 156 #if TCG_TARGET_REG_BITS == 32 157 #define TCGV_LOW(t) (t) 141 158 #define TCGV_HIGH(t) ((t) + 1) 142 159 #endif … … 145 162 146 163 /* Dummy definition to avoid compiler warnings. */ 147 #define TCGV_UNUSED(x) x = MAKE_TCGV(-1) 164 #define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1) 165 #define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1) 148 166 149 167 /* call flags */ … … 159 177 160 178 /* used to align parameters */ 161 #define TCG_CALL_DUMMY_TCGV MAKE_TCGV (-1)179 #define TCG_CALL_DUMMY_TCGV MAKE_TCGV_I32(-1) 162 180 #define TCG_CALL_DUMMY_ARG ((TCGArg)(-1)) 163 181 … … 255 273 int64_t temp_count; 256 274 int temp_count_max; 257 int64_t old_op_count;258 275 int64_t del_op_count; 259 276 int64_t code_in_len; … … 297 314 void tcg_func_start(TCGContext *s); 298 315 299 int dyngen_code(TCGContext *s, uint8_t *gen_code_buf);300 int dyngen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset);316 int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf); 317 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset); 301 318 302 319 void tcg_set_frame(TCGContext *s, int reg, 303 320 tcg_target_long start, tcg_target_long size); 304 TCGv tcg_global_reg_new(TCGType type, int reg, const char *name); 305 TCGv tcg_global_reg2_new_hack(TCGType type, int reg1, int reg2, 306 const char *name); 307 TCGv tcg_global_mem_new(TCGType type, int reg, tcg_target_long offset, 308 const char *name); 309 TCGv tcg_temp_new_internal(TCGType type, int temp_local); 310 static inline TCGv tcg_temp_new(TCGType type) 311 { 312 return tcg_temp_new_internal(type, 0); 321 322 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name); 323 TCGv_i32 tcg_global_mem_new_i32(int reg, tcg_target_long offset, 324 const char *name); 325 TCGv_i32 tcg_temp_new_internal_i32(int temp_local); 326 static inline TCGv_i32 tcg_temp_new_i32(void) 327 { 328 return tcg_temp_new_internal_i32(0); 313 329 } 314 static inline TCGv tcg_temp_local_new(TCGType type)315 { 316 return tcg_temp_new_internal (type,1);330 static inline TCGv_i32 tcg_temp_local_new_i32(void) 331 { 332 return tcg_temp_new_internal_i32(1); 317 333 } 318 void tcg_temp_free(TCGv arg); 319 char *tcg_get_arg_str(TCGContext *s, char *buf, int buf_size, TCGv arg); 334 void tcg_temp_free_i32(TCGv_i32 arg); 335 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg); 336 337 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name); 338 TCGv_i64 tcg_global_mem_new_i64(int reg, tcg_target_long offset, 339 const char *name); 340 TCGv_i64 tcg_temp_new_internal_i64(int temp_local); 341 static inline TCGv_i64 tcg_temp_new_i64(void) 342 { 343 return tcg_temp_new_internal_i64(0); 344 } 345 static inline TCGv_i64 tcg_temp_local_new_i64(void) 346 { 347 return tcg_temp_new_internal_i64(1); 348 } 349 void tcg_temp_free_i64(TCGv_i64 arg); 350 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg); 351 320 352 void tcg_dump_info(FILE *f, 321 353 int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); … … 357 389 const char *args_ct_str[TCG_MAX_OP_ARGS]; 358 390 } TCGTargetOpDef; 359 360 extern TCGOpDef tcg_op_defs[];361 391 362 392 void tcg_target_init(TCGContext *s); … … 384 414 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs); 385 415 386 void tcg_gen_call(TCGContext *s, TCGv func, unsigned int flags,387 unsigned int nb_rets, const TCGv *rets,388 unsigned int nb_params, const TCGv *args1);389 void tcg_gen_shifti_i64(TCGv ret, TCGv arg1,390 int c, int right, int arith);391 392 /* only used for debugging purposes */393 void tcg_register_helper(void *func, const char *name);394 #define TCG_HELPER(func) tcg_register_helper(func, #func)395 const char *tcg_helper_get_name(TCGContext *s, void *func);396 void tcg_dump_ops(TCGContext *s, FILE *outfile);397 398 void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf);399 TCGv tcg_const_i32(int32_t val);400 TCGv tcg_const_i64(int64_t val);401 402 416 #if TCG_TARGET_REG_BITS == 32 403 417 #define tcg_const_ptr tcg_const_i32 404 418 #define tcg_add_ptr tcg_add_i32 405 419 #define tcg_sub_ptr tcg_sub_i32 420 #define TCGv_ptr TCGv_i32 421 #define GET_TCGV_PTR GET_TCGV_I32 422 #define tcg_global_reg_new_ptr tcg_global_reg_new_i32 423 #define tcg_global_mem_new_ptr tcg_global_mem_new_i32 424 #define tcg_temp_new_ptr tcg_temp_new_i32 425 #define tcg_temp_free_ptr tcg_temp_free_i32 406 426 #else 407 427 #define tcg_const_ptr tcg_const_i64 408 428 #define tcg_add_ptr tcg_add_i64 409 429 #define tcg_sub_ptr tcg_sub_i64 410 #endif 430 #define TCGv_ptr TCGv_i64 431 #define GET_TCGV_PTR GET_TCGV_I64 432 #define tcg_global_reg_new_ptr tcg_global_reg_new_i64 433 #define tcg_global_mem_new_ptr tcg_global_mem_new_i64 434 #define tcg_temp_new_ptr tcg_temp_new_i64 435 #define tcg_temp_free_ptr tcg_temp_free_i64 436 #endif 437 438 void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags, 439 int sizemask, TCGArg ret, int nargs, TCGArg *args); 440 441 void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1, 442 int c, int right, int arith); 443 444 /* only used for debugging purposes */ 445 void tcg_register_helper(void *func, const char *name); 446 const char *tcg_helper_get_name(TCGContext *s, void *func); 447 void tcg_dump_ops(TCGContext *s, FILE *outfile); 448 449 void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf); 450 TCGv_i32 tcg_const_i32(int32_t val); 451 TCGv_i64 tcg_const_i64(int64_t val); 452 TCGv_i32 tcg_const_local_i32(int32_t val); 453 TCGv_i64 tcg_const_local_i64(int64_t val); 411 454 412 455 void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type, … … 414 457 const TCGArg *tcg_gen_code_op(TCGContext *s, int opc, const TCGArg *args1, 415 458 unsigned int dead_iargs); 416 417 const TCGArg *dyngen_op(TCGContext *s, int opc, const TCGArg *opparam_ptr);418 459 419 460 /* tcg-runtime.c */ … … 431 472 extern uint8_t* code_gen_prologue; 432 473 #endif 433 434 #if defined(__powerpc__) && !defined(__powerpc64__) 474 #if defined(_ARCH_PPC) && !defined(_ARCH_PPC64) 435 475 #define tcg_qemu_tb_exec(tb_ptr) \ 436 476 ((long REGPARM __attribute__ ((longcall)) (*)(void *))code_gen_prologue)(tb_ptr) 437 477 #else 438 439 478 # if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM) 440 479 # define tcg_qemu_tb_exec(tb_ptr, ret) \ -
trunk/src/recompiler/tcg/x86_64/tcg-target.c
r36140 r36170 700 700 break; 701 701 case 0: 702 /* movzbq */ 703 tcg_out_modrm(s, 0xb6 | P_EXT | P_REXW, data_reg, TCG_REG_RAX); 704 break; 702 705 case 1: 706 /* movzwq */ 707 tcg_out_modrm(s, 0xb7 | P_EXT | P_REXW, data_reg, TCG_REG_RAX); 708 break; 703 709 case 2: 704 710 default: … … 733 739 bswap = 0; 734 740 #endif 735 736 741 switch(opc) { 737 742 case 0: -
trunk/src/recompiler/tests/linux-test.c
r33656 r36170 1 1 /* 2 2 * linux and CPU test 3 * 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard 5 5 * … … 16 16 * You should have received a copy of the GNU General Public License 17 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 19 * MA 02110-1301, USA. 19 20 */ 20 21 … … 69 70 { 70 71 if (ret < 0) { 71 error1(filename, line, "%m (ret=%d, errno=%d)", 72 error1(filename, line, "%m (ret=%d, errno=%d)", 72 73 ret, errno); 73 74 } … … 104 105 if (getcwd(cur_dir, sizeof(cur_dir)) == NULL) 105 106 error("getcwd"); 106 107 107 108 chk_error(mkdir(TESTPATH, 0755)); 108 109 109 110 chk_error(chdir(TESTPATH)); 110 111 111 112 /* open/read/write/close/readv/writev/lseek */ 112 113 … … 135 136 if (memcmp(buf, buf2, FILE_BUF_SIZE) != 0) 136 137 error("memcmp"); 137 138 138 139 #define FOFFSET 16 139 140 ret = chk_error(lseek(fd, FOFFSET, SEEK_SET)); … … 149 150 if (memcmp(buf + FOFFSET, buf3, FILE_BUF_SIZE - FOFFSET) != 0) 150 151 error("memcmp"); 151 152 152 153 chk_error(close(fd)); 153 154 … … 182 183 chk_error(fstat(fd, &st)); 183 184 chk_error(close(fd)); 184 185 185 186 if (st.st_size != 50) 186 187 error("stat size"); 187 188 if (!S_ISREG(st.st_mode)) 188 189 error("stat mode"); 189 190 190 191 /* symlink/lstat */ 191 192 chk_error(symlink("file2", "file3")); … … 193 194 if (!S_ISLNK(st.st_mode)) 194 195 error("stat mode"); 195 196 196 197 /* getdents */ 197 198 dir = opendir(TESTPATH); … … 252 253 if (ti >= 2) 253 254 error("gettimeofday"); 254 255 255 256 chk_error(getrusage(RUSAGE_SELF, &rusg1)); 256 257 for(i = 0;i < 10000; i++); … … 283 284 int len; 284 285 len = strlen(buf); 285 if (len < buf_size) 286 if (len < buf_size) 286 287 pstrcpy(buf + len, buf_size - len, s); 287 288 return buf; … … 338 339 if (val != SOCK_STREAM) 339 340 error("getsockopt"); 340 341 341 342 pid = chk_error(fork()); 342 343 if (pid == 0) { … … 430 431 431 432 stack1 = malloc(STACK_SIZE); 432 pid1 = chk_error(clone(thread1_func, stack1 + STACK_SIZE, 433 pid1 = chk_error(clone(thread1_func, stack1 + STACK_SIZE, 433 434 CLONE_VM | CLONE_FS | CLONE_FILES | SIGCHLD, "hello1")); 434 435 435 436 stack2 = malloc(STACK_SIZE); 436 pid2 = chk_error(clone(thread2_func, stack2 + STACK_SIZE, 437 pid2 = chk_error(clone(thread2_func, stack2 + STACK_SIZE, 437 438 CLONE_VM | CLONE_FS | CLONE_FILES | SIGCHLD, "hello2")); 438 439 … … 476 477 act.sa_flags = 0; 477 478 chk_error(sigaction(SIGALRM, &act, NULL)); 478 479 479 480 it.it_interval.tv_sec = 0; 480 481 it.it_interval.tv_usec = 10 * 1000; … … 486 487 oit.it_value.tv_usec != it.it_value.tv_usec) 487 488 error("itimer"); 488 489 489 490 while (alarm_count < 5) { 490 491 usleep(10 * 1000); … … 509 510 *(uint8_t *)0 = 0; 510 511 } 511 512 512 513 act.sa_handler = SIG_DFL; 513 514 sigemptyset(&act.sa_mask); -
trunk/src/recompiler/tests/qruncom.c
r36140 r36170 199 199 200 200 env = cpu_init("qemu32"); 201 202 /* set user mode state (XXX: should be done automatically by203 cpu_init ?) */204 env->user_mode_only = 1;205 201 206 202 cpu_x86_set_cpl(env, 3); -
trunk/src/recompiler/tests/sha1.c
r1 r36170 239 239 return 0; 240 240 } 241 242 -
trunk/src/recompiler/tests/test-i386-shift.h
r36140 r36170 184 184 #undef OP_NOBYTE 185 185 #undef EXECSHIFT 186 -
trunk/src/recompiler/tests/test-i386.c
r36140 r36170 16 16 * You should have received a copy of the GNU General Public License 17 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 19 * MA 02110-1301, USA. 19 20 */ 20 21 -
trunk/src/recompiler/tests/test-mmap.c
r36140 r36170 18 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 19 * GNU General Public License for more details. 20 * 20 * 21 21 * You should have received a copy of the GNU General Public License 22 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 24 * MA 02110-1301, USA. 24 25 */ 25 26 … … 65 66 66 67 len = pagesize + (pagesize * i & 7); 67 p1 = mmap(NULL, len, PROT_READ, 68 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 69 p2 = mmap(NULL, len, PROT_READ, 70 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 71 p3 = mmap(NULL, len, PROT_READ, 72 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 73 p4 = mmap(NULL, len, PROT_READ, 74 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 75 p5 = mmap(NULL, len, PROT_READ, 68 p1 = mmap(NULL, len, PROT_READ, 69 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 70 p2 = mmap(NULL, len, PROT_READ, 71 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 72 p3 = mmap(NULL, len, PROT_READ, 73 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 74 p4 = mmap(NULL, len, PROT_READ, 75 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 76 p5 = mmap(NULL, len, PROT_READ, 76 77 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 77 78 … … 120 121 121 122 len = 0x02000000; 122 p1 = mmap(NULL, len, PROT_READ, 123 p1 = mmap(NULL, len, PROT_READ, 123 124 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 124 125 … … 128 129 p = (uintptr_t) p1; 129 130 fail_unless ((p & pagemask) == 0); 130 131 131 132 /* Make sure we can read from the entire area. */ 132 133 memcpy (dummybuf, p1, pagesize); … … 147 148 { 148 149 int nlen; 149 p1 = mmap(NULL, pagesize, PROT_READ, 150 p1 = mmap(NULL, pagesize, PROT_READ, 150 151 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 151 152 fail_unless (p1 != MAP_FAILED); … … 154 155 memcpy (dummybuf, p1, pagesize); 155 156 156 p2 = mmap(NULL, pagesize, PROT_READ, 157 p2 = mmap(NULL, pagesize, PROT_READ, 157 158 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 158 159 fail_unless (p2 != MAP_FAILED); … … 164 165 munmap (p1, pagesize); 165 166 nlen = pagesize * 8; 166 p3 = mmap(NULL, nlen, PROT_READ, 167 p3 = mmap(NULL, nlen, PROT_READ, 167 168 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 168 169 169 170 /* Check if the mmaped areas collide. */ 170 if (p3 < p2 171 if (p3 < p2 171 172 && (p3 + nlen) > p2) 172 173 fail_unless (0); … … 193 194 194 195 /* Find a suitable address to start with. */ 195 addr = mmap(NULL, pagesize * 40, PROT_READ | PROT_WRITE, 196 addr = mmap(NULL, pagesize * 40, PROT_READ | PROT_WRITE, 196 197 MAP_PRIVATE | MAP_ANONYMOUS, 197 198 -1, 0); … … 202 203 { 203 204 /* Create submaps within our unfixed map. */ 204 p1 = mmap(addr, pagesize, PROT_READ, 205 p1 = mmap(addr, pagesize, PROT_READ, 205 206 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 206 207 -1, 0); 207 /* Make sure we get pages aligned with the pagesize. 208 /* Make sure we get pages aligned with the pagesize. 208 209 The target expects this. */ 209 210 p = (uintptr_t) p1; 210 211 fail_unless (p1 == addr); 211 fail_unless ((p & pagemask) == 0); 212 fail_unless ((p & pagemask) == 0); 212 213 memcpy (dummybuf, p1, pagesize); 213 214 munmap (p1, pagesize); … … 233 234 { 234 235 /* Create submaps within our unfixed map. */ 235 p1 = mmap(addr, pagesize, PROT_READ | PROT_WRITE, 236 p1 = mmap(addr, pagesize, PROT_READ | PROT_WRITE, 236 237 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 237 238 -1, 0); 238 /* Make sure we get pages aligned with the pagesize. 239 /* Make sure we get pages aligned with the pagesize. 239 240 The target expects this. */ 240 241 p = (uintptr_t) p1; 241 242 fail_unless (p1 == addr); 242 fail_unless ((p & pagemask) == 0); 243 fail_unless ((p & pagemask) == 0); 243 244 memcpy (p1, dummybuf, pagesize); 244 245 munmap (p1, pagesize); … … 260 261 261 262 len = pagesize; 262 p1 = mmap(NULL, len, PROT_READ, 263 MAP_PRIVATE, 263 p1 = mmap(NULL, len, PROT_READ, 264 MAP_PRIVATE, 264 265 test_fd, 0); 265 p2 = mmap(NULL, len, PROT_READ, 266 MAP_PRIVATE, 266 p2 = mmap(NULL, len, PROT_READ, 267 MAP_PRIVATE, 267 268 test_fd, pagesize); 268 p3 = mmap(NULL, len, PROT_READ, 269 MAP_PRIVATE, 269 p3 = mmap(NULL, len, PROT_READ, 270 MAP_PRIVATE, 270 271 test_fd, pagesize * 2); 271 272 … … 309 310 for (i = 0; i < 0x10; i++) 310 311 { 311 p1 = mmap(NULL, pagesize, PROT_READ, 312 MAP_PRIVATE, 313 test_fd, 312 p1 = mmap(NULL, pagesize, PROT_READ, 313 MAP_PRIVATE, 314 test_fd, 314 315 (test_fsize - sizeof *p1) & ~pagemask); 315 316 … … 341 342 342 343 /* Find a suitable address to start with. */ 343 addr = mmap(NULL, pagesize * 44, PROT_READ, 344 addr = mmap(NULL, pagesize * 44, PROT_READ, 344 345 MAP_PRIVATE | MAP_ANONYMOUS, 345 346 -1, 0); … … 351 352 { 352 353 /* Create submaps within our unfixed map. */ 353 p1 = mmap(addr, pagesize, PROT_READ, 354 MAP_PRIVATE | MAP_FIXED, 355 test_fd, 354 p1 = mmap(addr, pagesize, PROT_READ, 355 MAP_PRIVATE | MAP_FIXED, 356 test_fd, 356 357 (test_fsize - sizeof *p1) & ~pagemask); 357 358 … … 383 384 384 385 /* Find a suitable address to start with. */ 385 addr = mmap(NULL, pagesize * 40 * 4, PROT_READ, 386 addr = mmap(NULL, pagesize * 40 * 4, PROT_READ, 386 387 MAP_PRIVATE | MAP_ANONYMOUS, 387 388 -1, 0); … … 391 392 for (i = 0; i < 40; i++) 392 393 { 393 p1 = mmap(addr, pagesize, PROT_READ, 394 p1 = mmap(addr, pagesize, PROT_READ, 394 395 MAP_PRIVATE | MAP_FIXED, 395 396 test_fd, 0); 396 p2 = mmap(addr + pagesize, pagesize, PROT_READ, 397 p2 = mmap(addr + pagesize, pagesize, PROT_READ, 397 398 MAP_PRIVATE | MAP_FIXED, 398 399 test_fd, pagesize); 399 p3 = mmap(addr + pagesize * 2, pagesize, PROT_READ, 400 p3 = mmap(addr + pagesize * 2, pagesize, PROT_READ, 400 401 MAP_PRIVATE | MAP_FIXED, 401 402 test_fd, pagesize * 2); 402 p4 = mmap(addr + pagesize * 3, pagesize, PROT_READ, 403 p4 = mmap(addr + pagesize * 3, pagesize, PROT_READ, 403 404 MAP_PRIVATE | MAP_FIXED, 404 405 test_fd, pagesize * 3); 405 406 406 /* Make sure we get pages aligned with the pagesize. 407 /* Make sure we get pages aligned with the pagesize. 407 408 The target expects this. */ 408 409 fail_unless (p1 == (void *)addr); … … 454 455 for (i = 0; i < (pagesize * 4) / sizeof i; i++) 455 456 write (test_fd, &i, sizeof i); 456 /* Append a few extra writes to make the file end at non 457 /* Append a few extra writes to make the file end at non 457 458 page boundary. */ 458 459 write (test_fd, &i, sizeof i); i++; -
trunk/src/recompiler/tests/test_path.c
r36140 r36170 150 150 return 0; 151 151 } 152 -
trunk/src/recompiler/translate-all.c
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 139 139 s->code_time -= profile_getclock(); 140 140 #endif 141 gen_code_size = dyngen_code(s, gen_code_buf);141 gen_code_size = tcg_gen_code(s, gen_code_buf); 142 142 *gen_code_size_ptr = gen_code_size; 143 143 #ifdef CONFIG_PROFILER … … 152 152 153 153 #ifdef DEBUG_DISAS 154 if ( loglevel & CPU_LOG_TB_OUT_ASM) {155 fprintf(logfile,"OUT: [size=%d]\n", *gen_code_size_ptr);156 disas(logfile,tb->tc_ptr, *gen_code_size_ptr);157 fprintf(logfile,"\n");158 fflush(logfile);154 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { 155 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr); 156 log_disas(tb->tc_ptr, *gen_code_size_ptr); 157 qemu_log("\n"); 158 qemu_log_flush(); 159 159 } 160 160 #endif … … 202 202 s->tb_next = tb->tb_next; 203 203 #endif 204 j = dyngen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);204 j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr); 205 205 if (j < 0) 206 206 return -1;
Note:
See TracChangeset
for help on using the changeset viewer.