Changeset 102663 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Dec 21, 2023 1:55:07 AM (15 months ago)
- svn:sync-xref-src-repo-rev:
- 160825
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 1 added
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r102585 r102663 816 816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */ 817 817 #elif 1 818 pVCpu->iem.s.pbInstrBuf = NULL;819 818 pVCpu->iem.s.cbInstrBufTotal = 0; 820 819 RT_NOREF(cbInstr); -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp
r102634 r102663 61 61 * TB Helper Functions * 62 62 *********************************************************************************************************************************/ 63 #ifdef RT_ARCH_AMD64 64 DECLASM(void) iemNativeHlpAsmSafeWrapLogCpuState(void); 65 #endif 63 66 64 67 … … 67 70 * Builtin functions * 68 71 *********************************************************************************************************************************/ 72 73 /** 74 * Built-in function that does nothing. 75 * 76 * Whether this is called or not can be controlled by the entry in the 77 * IEMThreadedGenerator.katBltIns table. This can be useful to determine 78 * whether why behaviour changes when enabling the LogCpuState builtins. I.e. 79 * whether it's the reduced call count in the TBs or the threaded calls flushing 80 * register state. 81 */ 82 IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_Nop) 83 { 84 RT_NOREF(pReNative, pCallEntry); 85 return off; 86 } 87 88 89 /** 90 * Emits for for LogCpuState. 91 * 92 * This shouldn't have any relevant impact on the recompiler state. 93 */ 94 IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_LogCpuState) 95 { 96 #ifdef RT_ARCH_AMD64 97 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20); 98 /* push rax */ 99 pbCodeBuf[off++] = 0x50 + X86_GREG_xAX; 100 /* push imm32 */ 101 pbCodeBuf[off++] = 0x68; 102 pbCodeBuf[off++] = RT_BYTE1(pCallEntry->auParams[0]); 103 pbCodeBuf[off++] = RT_BYTE2(pCallEntry->auParams[0]); 104 pbCodeBuf[off++] = RT_BYTE3(pCallEntry->auParams[0]); 105 pbCodeBuf[off++] = RT_BYTE4(pCallEntry->auParams[0]); 106 /* mov rax, iemNativeHlpAsmSafeWrapLogCpuState */ 107 pbCodeBuf[off++] = X86_OP_REX_W; 108 pbCodeBuf[off++] = 0xb8 + X86_GREG_xAX; 109 *(uint64_t *)&pbCodeBuf[off] = (uintptr_t)iemNativeHlpAsmSafeWrapLogCpuState; 110 off += sizeof(uint64_t); 111 /* call rax */ 112 pbCodeBuf[off++] = 0xff; 113 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX); 114 /* pop rax */ 115 pbCodeBuf[off++] = 0x58 + X86_GREG_xAX; 116 /* pop rax */ 117 pbCodeBuf[off++] = 0x58 + X86_GREG_xAX; 118 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 119 120 #else 121 /** @todo Implement this */ 122 AssertFailed(); 123 RT_NOREF(pReNative, pCallEntry); 124 #endif 125 return off; 126 } 127 69 128 70 129 /** … … 211 270 Assert(cbInstr > 0); 212 271 Assert(cbInstr < 16); 272 #ifdef VBOX_STRICT 273 off = iemNativeEmitMarker(pReNative, off, 0x80000001); 274 #endif 213 275 214 276 /* … … 304 366 */ 305 367 #define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) \ 306 RT_NOREF( cbInstr); \368 RT_NOREF(a_cbInstr); \ 307 369 off = iemNativeEmitBltInConsiderLimChecking(pReNative, off) 308 370 … … 310 372 iemNativeEmitBltInConsiderLimChecking(PIEMRECOMPILERSTATE pReNative, uint32_t off) 311 373 { 374 #ifdef VBOX_STRICT 375 off = iemNativeEmitMarker(pReNative, off, 0x80000002); 376 #endif 377 312 378 /* 313 379 * This check must match the ones in the iem in iemGetTbFlagsForCurrentPc … … 380 446 */ 381 447 #define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) \ 382 RT_NOREF( cbInstr); \448 RT_NOREF(a_cbInstr); \ 383 449 off = iemNativeEmitBltInCheckOpcodes(pReNative, off, (a_pTb), (a_idxRange), (a_offRange)) 384 450 … … 388 454 Assert(idxRange < pTb->cRanges && pTb->cRanges <= RT_ELEMENTS(pTb->aRanges)); 389 455 Assert(offRange < pTb->aRanges[idxRange].cbOpcodes); 456 #ifdef VBOX_STRICT 457 off = iemNativeEmitMarker(pReNative, off, 0x80000003); 458 #endif 390 459 391 460 uint32_t const idxLabelObsoleteTb = iemNativeLabelCreate(pReNative, kIemNativeLabelType_ObsoleteTb); … … 428 497 { \ 429 498 pbCodeBuf[off++] = 0x74; /* jz near +5 */ \ 430 pbCodeBuf[off++] = 0x05 ; \499 pbCodeBuf[off++] = 0x05 /*+ 1*/; \ 431 500 offConsolidatedJump = off; \ 501 /*pbCodeBuf[off++] = 0xcc; */ \ 432 502 pbCodeBuf[off++] = 0xe9; /* jmp rel32 */ \ 433 503 iemNativeAddFixup(pReNative, off, idxLabelObsoleteTb, kIemNativeFixupType_Rel32, -4); \ … … 587 657 588 658 if (cbLeft & 4) 589 CHECK_OPCODES_CMPSX(0xa7, 0, 0); /* cost: 3 */659 CHECK_OPCODES_CMPSX(0xa7, 4, 0); /* cost: 3 */ 590 660 if (cbLeft & 2) 591 CHECK_OPCODES_CMPSX(0xa7, 0, X86_OP_PRF_SIZE_OP); /* cost: 4 */592 if (cbLeft & 2)593 CHECK_OPCODES_CMPSX(0xa6, 0, 0); /* cost: 3 */661 CHECK_OPCODES_CMPSX(0xa7, 2, X86_OP_PRF_SIZE_OP); /* cost: 4 */ 662 if (cbLeft & 1) 663 CHECK_OPCODES_CMPSX(0xa6, 1, 0); /* cost: 3 */ 594 664 595 665 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); … … 867 937 868 938 939 /** Duplicated in IEMAllThrdFuncsBltIn.cpp. */ 940 DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange) 941 { 942 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges))); 943 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage; 944 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages)); 945 if (idxPage == 0) 946 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 947 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK)); 948 return pTb->aGCPhysPages[idxPage - 1]; 949 } 950 951 952 /** 953 * Macro that implements PC check after a conditional branch. 954 */ 955 #define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_offRange, a_cbInstr) \ 956 RT_NOREF(a_cbInstr); \ 957 off = iemNativeEmitBltInCheckPcAfterBranch(pReNative, off, a_pTb, a_idxRange, a_offRange) 958 959 DECL_FORCE_INLINE(uint32_t) 960 iemNativeEmitBltInCheckPcAfterBranch(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb, 961 uint8_t idxRange, uint16_t offRange) 962 { 963 #ifdef VBOX_STRICT 964 off = iemNativeEmitMarker(pReNative, off, 0x80000004); 965 #endif 966 967 /* 968 * The GCPhysRangePageWithOffset value in the threaded function is a fixed 969 * constant for us here. 970 * 971 * We can pretend that iem.s.cbInstrBufTotal is X86_PAGE_SIZE here, because 972 * it serves no purpose as a CS.LIM, if that's needed we've just performed 973 * it, and as long as we don't implement code TLB reload code here there is 974 * no point in checking that the TLB data we're using is still valid. 975 * 976 * What we to do is. 977 * 1. Calculate the FLAT PC (RIP + CS.BASE). 978 * 2. Subtract iem.s.uInstrBufPc from it and getting 'off'. 979 * 3. The 'off' must be less than X86_PAGE_SIZE/cbInstrBufTotal or 980 * we're in the wrong spot and need to find a new TB. 981 * 4. Add 'off' to iem.s.GCPhysInstrBuf and compare with the 982 * GCPhysRangePageWithOffset constant mentioned above. 983 * 984 * The adding of CS.BASE to RIP can be skipped in the first step if we're 985 * in 64-bit code or flat 32-bit. 986 */ 987 988 /* Allocate registers for step 1. Get the shadowed stuff before allocating 989 the temp register, so we don't accidentally clobber something we'll be 990 needing again immediately. This is why we get idxRegCsBase here. */ 991 uint8_t const idxRegPc = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, 992 kIemNativeGstRegUse_ReadOnly); 993 uint8_t const idxRegCsBase = IEM_F_MODE_X86_IS_FLAT(pReNative->fExec) ? UINT8_MAX 994 : iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_BASE(X86_SREG_CS), 995 kIemNativeGstRegUse_ReadOnly); 996 997 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off); 998 999 #ifdef VBOX_STRICT 1000 /* Do assertions before idxRegTmp contains anything. */ 1001 Assert(RT_SIZEOFMEMB(VMCPUCC, iem.s.cbInstrBufTotal) == sizeof(uint16_t)); 1002 # ifdef RT_ARCH_AMD64 1003 { 1004 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8+2+1 + 11+2+1); 1005 /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */ 1006 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)) 1007 { 1008 /* cmp r/m64, imm8 */ 1009 pbCodeBuf[off++] = X86_OP_REX_W; 1010 pbCodeBuf[off++] = 0x83; 1011 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 7, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.cs.u64Base)); 1012 pbCodeBuf[off++] = 0; 1013 /* je rel8 */ 1014 pbCodeBuf[off++] = 0x74; 1015 pbCodeBuf[off++] = 1; 1016 /* int3 */ 1017 pbCodeBuf[off++] = 0xcc; 1018 1019 } 1020 1021 /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); - done later by the non-x86 code */ 1022 /* test r/m64, imm32 */ 1023 pbCodeBuf[off++] = X86_OP_REX_W; 1024 pbCodeBuf[off++] = 0xf7; 1025 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 0, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf)); 1026 pbCodeBuf[off++] = RT_BYTE1(X86_PAGE_OFFSET_MASK); 1027 pbCodeBuf[off++] = RT_BYTE2(X86_PAGE_OFFSET_MASK); 1028 pbCodeBuf[off++] = RT_BYTE3(X86_PAGE_OFFSET_MASK); 1029 pbCodeBuf[off++] = RT_BYTE4(X86_PAGE_OFFSET_MASK); 1030 /* jz rel8 */ 1031 pbCodeBuf[off++] = 0x74; 1032 pbCodeBuf[off++] = 1; 1033 /* int3 */ 1034 pbCodeBuf[off++] = 0xcc; 1035 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1036 } 1037 # else 1038 off = iemNativeEmitBrk(pReNative, off, 0x1234); 1039 1040 /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */ 1041 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)) 1042 { 1043 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.cs.u64Base)); 1044 # ifdef RT_ARCH_ARM64 1045 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 1046 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 1, idxRegTmp); 1047 pu32CodeBuf[off++] = Armv8A64MkInstrBrk(0x2004); 1048 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1049 # else 1050 # error "Port me!" 1051 # endif 1052 } 1053 # endif 1054 1055 #endif /* VBOX_STRICT */ 1056 1057 /* 1+2. Calculate 'off' first (into idxRegTmp). */ 1058 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.uInstrBufPc)); 1059 if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)) 1060 { 1061 #ifdef RT_ARCH_ARM64 1062 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1063 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegPc, idxRegTmp); 1064 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1065 #else 1066 off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp); 1067 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc); 1068 #endif 1069 } 1070 else 1071 { 1072 #ifdef RT_ARCH_ARM64 1073 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 1074 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegCsBase, idxRegTmp); 1075 pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegPc); 1076 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1077 #else 1078 off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp); 1079 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegCsBase); 1080 off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc); 1081 #endif 1082 iemNativeRegFreeTmp(pReNative, idxRegCsBase); 1083 } 1084 iemNativeRegFreeTmp(pReNative, idxRegPc); 1085 1086 /* 3. Check that off is less than X86_PAGE_SIZE/cbInstrBufTotal. */ 1087 off = iemNativeEmitCmpGprWithImm(pReNative, off, idxRegTmp, X86_PAGE_SIZE - 1); 1088 off = iemNativeEmitJaToNewLabel(pReNative, off, kIemNativeLabelType_CheckBranchMiss); 1089 1090 /* 4. Add iem.s.GCPhysInstrBuf and compare with GCPhysRangePageWithOffset. */ 1091 #ifdef RT_ARCH_AMD64 1092 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 1093 pbCodeBuf[off++] = idxRegTmp < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R; 1094 pbCodeBuf[off++] = 0x03; /* add r64, r/m64 */ 1095 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf)); 1096 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1097 1098 #elif defined(RT_ARCH_ARM64) 1099 uint8_t const idxRegTmp2 = iemNativeRegAllocTmp(pReNative, &off); 1100 1101 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp2, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf)); 1102 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1103 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegTmp, idxRegTmp2); 1104 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1105 1106 # ifdef VBOX_STRICT /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); */ 1107 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegTmp, X86_PAGE_OFFSET_MASK, true /*fSetFlags*/); 1108 off = iemNativeEmitJzToFixed(pReNative, off, 1); 1109 off = iemNativeEmitBrk(pReNative, off, 0x2005); 1110 # endif 1111 iemNativeRegFreeTmp(pReNative, idxRegTmp2); 1112 #else 1113 # error "Port me" 1114 #endif 1115 1116 RTGCPHYS const GCPhysRangePageWithOffset = ( iemTbGetRangePhysPageAddr(pTb, idxRange) 1117 | pTb->aRanges[idxRange].offPhysPage) 1118 + offRange; 1119 off = iemNativeEmitTestIfGprNotEqualImmAndJmpToNewLabel(pReNative, off, idxRegTmp, GCPhysRangePageWithOffset, 1120 kIemNativeLabelType_CheckBranchMiss); 1121 1122 iemNativeRegFreeTmp(pReNative, idxRegTmp); 1123 return off; 1124 } 1125 1126 869 1127 #ifdef BODY_CHECK_CS_LIM 870 1128 /** … … 958 1216 BODY_SET_CUR_INSTR(); 959 1217 BODY_CHECK_CS_LIM(cbInstr); 960 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);1218 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr); 961 1219 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 962 1220 //LogFunc(("okay\n")); … … 981 1239 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 982 1240 BODY_SET_CUR_INSTR(); 983 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);1241 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr); 984 1242 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 985 1243 //LogFunc(("okay\n")); … … 1006 1264 BODY_SET_CUR_INSTR(); 1007 1265 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr); 1008 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);1266 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr); 1009 1267 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 1010 1268 //LogFunc(("okay\n")); -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102634 r102663 1605 1605 the executable memory till we've returned our way back to iemTbExec as 1606 1606 that return path codes via the native code generated for the TB. */ 1607 Log7(("TB obsolete: %p at %04x:%08RX64\n", pVCpu->iem.s.pCurTbR3, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 1607 1608 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/); 1608 1609 return VINF_IEM_REEXEC_BREAK; … … 1622 1623 return VINF_IEM_REEXEC_BREAK; 1623 1624 } 1625 1626 1627 /** 1628 * Used by TB code when we missed a PC check after a branch. 1629 */ 1630 IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpCheckBranchMiss,(PVMCPUCC pVCpu)) 1631 { 1632 Log7(("TB jmp miss: %p at %04x:%08RX64; GCPhysWithOffset=%RGp, pbInstrBuf=%p\n", 1633 pVCpu->iem.s.pCurTbR3, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 1634 pVCpu->iem.s.GCPhysInstrBuf + pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base - pVCpu->iem.s.uInstrBufPc, 1635 pVCpu->iem.s.pbInstrBuf)); 1636 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); 1637 return VINF_IEM_REEXEC_BREAK; 1638 } 1639 1624 1640 1625 1641 … … 2449 2465 pReNative->Core.u64ArgVars = UINT64_MAX; 2450 2466 2451 AssertCompile(RT_ELEMENTS(pReNative->aidxUniqueLabels) == 8);2467 AssertCompile(RT_ELEMENTS(pReNative->aidxUniqueLabels) == 9); 2452 2468 pReNative->aidxUniqueLabels[0] = UINT32_MAX; 2453 2469 pReNative->aidxUniqueLabels[1] = UINT32_MAX; … … 2458 2474 pReNative->aidxUniqueLabels[6] = UINT32_MAX; 2459 2475 pReNative->aidxUniqueLabels[7] = UINT32_MAX; 2476 pReNative->aidxUniqueLabels[8] = UINT32_MAX; 2460 2477 2461 2478 /* Full host register reinit: */ … … 4850 4867 4851 4868 /** 4869 * Emits the code at the CheckBranchMiss label. 4870 */ 4871 static uint32_t iemNativeEmitCheckBranchMiss(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel) 4872 { 4873 uint32_t const idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_CheckBranchMiss); 4874 if (idxLabel != UINT32_MAX) 4875 { 4876 iemNativeLabelDefine(pReNative, idxLabel, off); 4877 4878 /* int iemNativeHlpCheckBranchMiss(PVMCPUCC pVCpu) */ 4879 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 4880 off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpCheckBranchMiss); 4881 4882 /* jump back to the return sequence. */ 4883 off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel); 4884 } 4885 return off; 4886 } 4887 4888 4889 /** 4852 4890 * Emits the code at the NeedCsLimChecking label. 4853 4891 */ … … 8184 8222 if (idxGstTmpReg >= 8 || idxVarReg >= 8) 8185 8223 pbCodeBuf[off++] = (idxGstTmpReg >= 8 ? X86_OP_REX_R : 0) | (idxVarReg >= 8 ? X86_OP_REX_B : 0); 8186 else if (idxGstTmpReg >= 4 )8224 else if (idxGstTmpReg >= 4 || idxVarReg >= 4) 8187 8225 pbCodeBuf[off++] = X86_OP_REX; 8188 8226 pbCodeBuf[off++] = 0x8a; … … 11169 11207 { 11170 11208 AssertReturnVoid((pTb->fFlags & IEMTB_F_TYPE_MASK) == IEMTB_F_TYPE_NATIVE); 11209 #if defined(RT_ARCH_AMD64) 11210 static const char * const a_apszMarkers[] = 11211 { 11212 "unknown0", "CheckCsLim", "ConsiderLimChecking", "CheckOpcodes", "PcAfterBranch", 11213 }; 11214 #endif 11171 11215 11172 11216 char szDisBuf[512]; … … 11374 11418 case kIemNativeLabelType_NeedCsLimChecking: 11375 11419 pszName = "NeedCsLimChecking"; 11420 break; 11421 case kIemNativeLabelType_CheckBranchMiss: 11422 pszName = "CheckBranchMiss"; 11376 11423 break; 11377 11424 case kIemNativeLabelType_If: … … 11442 11489 g_acIemThreadedFunctionUsedArgs[RT_HIWORD(uInfo)], 11443 11490 uInfo & 0x8000 ? "recompiled" : "todo"); 11491 else if ((uInfo & ~RT_BIT_32(31)) < RT_ELEMENTS(a_apszMarkers)) 11492 pHlp->pfnPrintf(pHlp, " %p: nop ; marker: %s\n", pNativeCur, a_apszMarkers[uInfo & ~RT_BIT_32(31)]); 11444 11493 else 11445 11494 pHlp->pfnPrintf(pHlp, " %p: nop ; unknown marker: %#x (%d)\n", pNativeCur, uInfo, uInfo); … … 11572 11621 g_acIemThreadedFunctionUsedArgs[RT_HIWORD(uInfo)], 11573 11622 uInfo & 0x8000 ? "recompiled" : "todo"); 11623 else if ((uInfo & ~RT_BIT_32(31)) < RT_ELEMENTS(a_apszMarkers)) 11624 pHlp->pfnPrintf(pHlp, " %p: nop ; marker: %s\n", pNativeCur, a_apszMarkers[uInfo & ~RT_BIT_32(31)]); 11574 11625 else 11575 11626 pHlp->pfnPrintf(pHlp, " %p: nop ; unknown marker: %#x (%d)\n", pNativeCur, uInfo, uInfo); … … 11777 11828 if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_NeedCsLimChecking)) 11778 11829 off = iemNativeEmitNeedCsLimChecking(pReNative, off, idxReturnLabel); 11830 if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_CheckBranchMiss)) 11831 off = iemNativeEmitCheckBranchMiss(pReNative, off, idxReturnLabel); 11779 11832 } 11780 11833 IEMNATIVE_CATCH_LONGJMP_BEGIN(pReNative, rc); -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp
r102624 r102663 79 79 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/); 80 80 return VINF_IEM_REEXEC_BREAK; 81 } 82 83 84 /** 85 * Built-in function that does absolutely nothing - for debugging. 86 * 87 * This can be used for artifically increasing the number of calls generated, or 88 * for triggering flushes associated with threaded calls. 89 */ 90 IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_Nop) 91 { 92 RT_NOREF(pVCpu, uParam0, uParam1, uParam2); 93 return VINF_SUCCESS; 94 } 95 96 97 98 /** 99 * This is also called from iemNativeHlpAsmSafeWrapLogCpuState. 100 */ 101 DECLASM(void) iemThreadedFunc_BltIn_LogCpuStateWorker(PVMCPU pVCpu) 102 { 103 #ifdef LOG_ENABLED 104 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3; 105 PCX86FXSTATE const pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87; 106 Log2(("**** LG%c fExec=%x pTb=%p cUsed=%u\n" 107 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n" 108 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n" 109 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n" 110 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n" 111 , pTb && (pTb->fFlags & IEMTB_F_TYPE_NATIVE) ? 'n' : 't', pVCpu->iem.s.fExec, pTb, pTb ? pTb->cUsed : 0, 112 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi, 113 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel, 114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel, 115 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u, 116 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK )); 117 #else 118 RT_NOREF(pVCpu); 119 #endif 120 } 121 122 123 /** 124 * Built-in function that logs the current CPU state - for debugging. 125 */ 126 IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_LogCpuState) 127 { 128 iemThreadedFunc_BltIn_LogCpuStateWorker(pVCpu); 129 RT_NOREF(uParam0, uParam1, uParam2); 130 return VINF_SUCCESS; 81 131 } 82 132 … … 348 398 * Macro that implements PC check after a conditional branch. 349 399 */ 350 #define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_ cbInstr) do { \400 #define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \ 351 401 /* Is RIP within the current code page? */ \ 352 402 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \ … … 354 404 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \ 355 405 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \ 356 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \ 357 | pTb->aRanges[(a_idxRange)].offPhysPage; \ 406 RTGCPHYS const GCPhysRangePageWithOffset = ( iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \ 407 | (a_pTb)->aRanges[(a_idxRange)].offPhysPage) \ 408 + (a_offRange); \ 358 409 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \ 359 && off < pVCpu->iem.s.cbInstrBufTotal) \410 && off < /*pVCpu->iem.s.cbInstrBufTotal - ignore flushes and CS.LIM is check elsewhere*/ X86_PAGE_SIZE) \ 360 411 { /* we're good */ } \ 361 412 else \ … … 450 501 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 451 502 BODY_CHECK_CS_LIM(cbInstr); 452 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);503 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr); 453 504 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 454 505 //LogFunc(("okay\n")); … … 470 521 uint32_t const offRange = (uint32_t)uParam2; 471 522 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 472 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);523 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr); 473 524 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 474 525 //LogFunc(("okay\n")); … … 492 543 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 493 544 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr); 494 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);545 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, offRange, cbInstr); 495 546 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 496 547 //LogFunc(("okay\n")); -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r102624 r102663 1960 1960 ## whether it has a native recompiler implementation. 1961 1961 katBltIns = ( 1962 ( 'Nop', 0, True ), 1963 ( 'LogCpuState', 0, True ), 1964 1962 1965 ( 'DeferToCImpl0', 2, True ), 1963 1966 ( 'CheckIrq', 0, True ), … … 1970 1973 ( 'CheckOpcodesConsiderCsLim', 3, True ), 1971 1974 1972 ( 'CheckCsLimAndPcAndOpcodes', 3, False),1973 ( 'CheckPcAndOpcodes', 3, False),1974 ( 'CheckPcAndOpcodesConsiderCsLim', 3, False),1975 ( 'CheckCsLimAndPcAndOpcodes', 3, True ), 1976 ( 'CheckPcAndOpcodes', 3, True ), 1977 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ), 1975 1978 1976 1979 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, False ), -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdRecompiler.cpp
r102557 r102663 640 640 int cLeft = IEMTBCACHE_PTR_GET_COUNT(pTbCache->apHash[idxHash]); 641 641 #endif 642 Log10(("TB lookup: fFlags=%#x GCPhysPc=%RGp idxHash=%#x: %p L %d\n", fFlags, GCPhysPc, idxHash, pTb, cLeft));643 642 while (pTb) 644 643 { … … 656 655 #ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER 657 656 if ((pTb->fFlags & IEMTB_F_TYPE_NATIVE) || pTb->cUsed != 16) 657 { 658 Log10(("TB lookup: fFlags=%#x GCPhysPc=%RGp idxHash=%#x: %p (@ %d / %d)\n", 659 fFlags, GCPhysPc, idxHash, pTb, IEMTBCACHE_PTR_GET_COUNT(pTbCache->apHash[idxHash]) - cLeft, 660 IEMTBCACHE_PTR_GET_COUNT(pTbCache->apHash[idxHash]) )); 658 661 return pTb; 662 } 663 Log10(("TB lookup: fFlags=%#x GCPhysPc=%RGp idxHash=%#x: %p (@ %d / %d) - recompiling\n", 664 fFlags, GCPhysPc, idxHash, pTb, IEMTBCACHE_PTR_GET_COUNT(pTbCache->apHash[idxHash]) - cLeft, 665 IEMTBCACHE_PTR_GET_COUNT(pTbCache->apHash[idxHash]) )); 659 666 return iemNativeRecompile(pVCpu, pTb); 660 667 #else 668 Log10(("TB lookup: fFlags=%#x GCPhysPc=%RGp idxHash=%#x: %p (@ %d / %d)\n", 669 fFlags, GCPhysPc, idxHash, pTb, IEMTBCACHE_PTR_GET_COUNT(pTbCache->apHash[idxHash]) - cLeft, 670 IEMTBCACHE_PTR_GET_COUNT(pTbCache->apHash[idxHash]) )); 661 671 return pTb; 662 672 #endif … … 677 687 AssertMsg(cLeft == 0, ("%d\n", cLeft)); 678 688 STAM_REL_COUNTER_INC(&pTbCache->cLookupMisses); 689 Log10(("TB lookup: fFlags=%#x GCPhysPc=%RGp idxHash=%#x: NULL - (%p L %d)\n", fFlags, GCPhysPc, idxHash, 690 IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]), IEMTBCACHE_PTR_GET_COUNT(pTbCache->apHash[idxHash]) )); 679 691 return pTb; 680 692 } … … 1483 1495 szInstr)); 1484 1496 1485 if (LogIs3Enabled())1486 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL); 1497 /*if (LogIs3Enabled()) - this outputs an insane amount of stuff, so disabled. 1498 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL); */ 1487 1499 } 1488 1500 else … … 1660 1672 } 1661 1673 1674 #ifdef LOG_ENABLED 1675 1676 /** 1677 * Inserts a NOP call. 1678 * 1679 * This is for debugging. 1680 * 1681 * @returns true on success, false if we're out of call entries. 1682 * @param pTb The translation block being compiled. 1683 */ 1684 bool iemThreadedCompileEmitNop(PIEMTB pTb) 1685 { 1686 /* Emit the call. */ 1687 uint32_t const idxCall = pTb->Thrd.cCalls; 1688 AssertReturn(idxCall < pTb->Thrd.cAllocated, false); 1689 PIEMTHRDEDCALLENTRY pCall = &pTb->Thrd.paCalls[idxCall]; 1690 pTb->Thrd.cCalls = (uint16_t)(idxCall + 1); 1691 pCall->enmFunction = kIemThreadedFunc_BltIn_Nop; 1692 pCall->idxInstr = pTb->cInstructions - 1; 1693 pCall->uUnused0 = 0; 1694 pCall->offOpcode = 0; 1695 pCall->cbOpcode = 0; 1696 pCall->idxRange = 0; 1697 pCall->auParams[0] = 0; 1698 pCall->auParams[1] = 0; 1699 pCall->auParams[2] = 0; 1700 return true; 1701 } 1702 1703 1704 /** 1705 * Called by iemThreadedCompile if cpu state logging is desired. 1706 * 1707 * @returns true on success, false if we're out of call entries. 1708 * @param pTb The translation block being compiled. 1709 */ 1710 bool iemThreadedCompileEmitLogCpuState(PIEMTB pTb) 1711 { 1712 /* Emit the call. */ 1713 uint32_t const idxCall = pTb->Thrd.cCalls; 1714 AssertReturn(idxCall < pTb->Thrd.cAllocated, false); 1715 PIEMTHRDEDCALLENTRY pCall = &pTb->Thrd.paCalls[idxCall]; 1716 pTb->Thrd.cCalls = (uint16_t)(idxCall + 1); 1717 pCall->enmFunction = kIemThreadedFunc_BltIn_LogCpuState; 1718 pCall->idxInstr = pTb->cInstructions - 1; 1719 pCall->uUnused0 = 0; 1720 pCall->offOpcode = 0; 1721 pCall->cbOpcode = 0; 1722 pCall->idxRange = 0; 1723 pCall->auParams[0] = RT_MAKE_U16(pCall->idxInstr, idxCall); /* currently not used, but whatever */ 1724 pCall->auParams[1] = 0; 1725 pCall->auParams[2] = 0; 1726 return true; 1727 } 1728 1729 #endif /* LOG_ENABLED */ 1662 1730 1663 1731 DECLINLINE(void) iemThreadedCopyOpcodeBytesInline(PCVMCPUCC pVCpu, uint8_t *pbDst, uint8_t cbInstr) … … 2294 2362 else 2295 2363 break; 2364 2365 #if defined(LOG_ENABLED) && 0 /* for debugging */ 2366 iemThreadedCompileEmitNop(pTb); 2367 iemThreadedCompileEmitLogCpuState(pTb); 2368 #endif 2296 2369 } 2297 2370 else -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r101539 r102663 964 964 965 965 void const *pvSharedPage = NULL; 966 if (PGM_PAGE_IS_SHARED(pPage)) 966 if (!PGM_PAGE_IS_SHARED(pPage)) 967 { 968 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys)); 969 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero); 970 pVM->pgm.s.cZeroPages--; 971 } 972 else 967 973 { 968 974 /* Mark this shared page for freeing/dereferencing. */ … … 978 984 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage); 979 985 AssertRC(rc); 980 }981 else982 {983 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));984 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);985 pVM->pgm.s.cZeroPages--;986 986 } 987 987 … … 995 995 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT); 996 996 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys); 997 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_ALLOCATED); 997 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, 998 !pvSharedPage 999 ? IEMTLBPHYSFLUSHREASON_ALLOCATED : IEMTLBPHYSFLUSHREASON_ALLOCATED_FROM_SHARED); 998 1000 999 1001 /* Copy the shared page contents to the replacement page. */ 1000 if (pvSharedPage) 1002 if (!pvSharedPage) 1003 { /* likely */ } 1004 else 1001 1005 { 1002 1006 /* Get the virtual address of the new page. */
Note:
See TracChangeset
for help on using the changeset viewer.