Changeset 102603 in vbox
- Timestamp:
- Dec 14, 2023 11:06:41 PM (14 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102593 r102603 1595 1595 1596 1596 1597 /** 1598 * Used by TB code when it wants to raise a \#GP(0). 1599 * @see iemThreadeFuncWorkerObsoleteTb 1600 */ 1601 IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpObsoleteTb,(PVMCPUCC pVCpu)) 1602 { 1603 /* We set fSafeToFree to false where as we're being called in the context 1604 of a TB callback function, which for native TBs means we cannot release 1605 the executable memory till we've returned our way back to iemTbExec as 1606 that return path codes via the native code generated for the TB. */ 1607 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/); 1608 return VINF_IEM_REEXEC_BREAK; 1609 } 1610 1611 1597 1612 /********************************************************************************************************************************* 1598 1613 * Helpers: Segmented memory fetches and stores. * … … 2420 2435 pReNative->Core.u64ArgVars = UINT64_MAX; 2421 2436 2422 AssertCompile(RT_ELEMENTS(pReNative->aidxUniqueLabels) == 6);2437 AssertCompile(RT_ELEMENTS(pReNative->aidxUniqueLabels) == 7); 2423 2438 pReNative->aidxUniqueLabels[0] = UINT32_MAX; 2424 2439 pReNative->aidxUniqueLabels[1] = UINT32_MAX; … … 2427 2442 pReNative->aidxUniqueLabels[4] = UINT32_MAX; 2428 2443 pReNative->aidxUniqueLabels[5] = UINT32_MAX; 2444 pReNative->aidxUniqueLabels[6] = UINT32_MAX; 2429 2445 2430 2446 /* Full host register reinit: */ … … 3291 3307 * This will be update if we need to move a variable from 3292 3308 * register to stack in order to satisfy the request. 3293 * @param fPreferVolatile Whet er to prefer volatile over non-volatile3309 * @param fPreferVolatile Whether to prefer volatile over non-volatile 3294 3310 * registers (@c true, default) or the other way around 3295 3311 * (@c false, for iemNativeRegAllocTmpForGuestReg()). … … 3318 3334 { 3319 3335 idxReg = iemNativeRegAllocFindFree(pReNative, poff, fPreferVolatile); 3336 AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP)); 3337 } 3338 return iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Tmp); 3339 } 3340 3341 3342 /** 3343 * Alternative version of iemNativeRegAllocTmp that takes mask with acceptable 3344 * registers. 3345 * 3346 * @returns The host register number; throws VBox status code on failure, 3347 * so no need to check the return value. 3348 * @param pReNative The native recompile state. 3349 * @param poff Pointer to the variable with the code buffer position. 3350 * This will be update if we need to move a variable from 3351 * register to stack in order to satisfy the request. 3352 * @param fRegMask Mask of acceptable registers. 3353 * @param fPreferVolatile Whether to prefer volatile over non-volatile 3354 * registers (@c true, default) or the other way around 3355 * (@c false, for iemNativeRegAllocTmpForGuestReg()). 3356 */ 3357 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpEx(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint32_t fRegMask, 3358 bool fPreferVolatile /*= true*/) 3359 { 3360 Assert(!(fRegMask & ~IEMNATIVE_HST_GREG_MASK)); 3361 Assert(!(fRegMask & IEMNATIVE_REG_FIXED_MASK)); 3362 3363 /* 3364 * Try find a completely unused register, preferably a call-volatile one. 3365 */ 3366 uint8_t idxReg; 3367 uint32_t fRegs = ~pReNative->Core.bmHstRegs 3368 & ~pReNative->Core.bmHstRegsWithGstShadow 3369 & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK) 3370 & fRegMask; 3371 if (fRegs) 3372 { 3373 if (fPreferVolatile) 3374 idxReg = (uint8_t)ASMBitFirstSetU32( fRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK 3375 ? fRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1; 3376 else 3377 idxReg = (uint8_t)ASMBitFirstSetU32( fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK 3378 ? fRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK : fRegs) - 1; 3379 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0); 3380 Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg))); 3381 } 3382 else 3383 { 3384 idxReg = iemNativeRegAllocFindFree(pReNative, poff, fPreferVolatile, fRegMask); 3320 3385 AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_TMP)); 3321 3386 } … … 3339 3404 * @param uImm The immediate value that the register must hold upon 3340 3405 * return. 3341 * @param fPreferVolatile Whet er to prefer volatile over non-volatile3406 * @param fPreferVolatile Whether to prefer volatile over non-volatile 3342 3407 * registers (@c true, default) or the other way around 3343 3408 * (@c false). … … 4765 4830 off = iemNativeEmitCheckCallRetAndPassUp(pReNative, off, pCallEntry->idxInstr); 4766 4831 4832 return off; 4833 } 4834 4835 4836 /** 4837 * Emits the code at the ObsoleteTb label. 4838 */ 4839 static uint32_t iemNativeEmitObsoleteTb(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel) 4840 { 4841 uint32_t const idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_ObsoleteTb); 4842 if (idxLabel != UINT32_MAX) 4843 { 4844 iemNativeLabelDefine(pReNative, idxLabel, off); 4845 4846 /* int iemNativeHlpObsoleteTb(PVMCPUCC pVCpu) */ 4847 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 4848 off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpObsoleteTb); 4849 4850 /* jump back to the return sequence. */ 4851 off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel); 4852 } 4767 4853 return off; 4768 4854 } … … 11045 11131 11046 11132 /** 11133 * Sets idxTbCurInstr in preparation of raising an exception. 11134 */ 11135 /** @todo Optimize this, so we don't set the same value more than once. Just 11136 * needs some tracking. */ 11137 #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING 11138 # define BODY_SET_CUR_INSTR() \ 11139 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, pCallEntry->idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr)) 11140 #else 11141 # define BODY_SET_CUR_INSTR() ((void)0) 11142 #endif 11143 11144 11145 /** 11047 11146 * Macro that emits the 16/32-bit CS.LIM check. 11048 11147 */ 11049 11148 #define BODY_CHECK_CS_LIM(a_cbInstr) \ 11050 off = iemNativeEmitBltInCheckCsLim(pReNative, off, (a_cbInstr) , pCallEntry->idxInstr)11149 off = iemNativeEmitBltInCheckCsLim(pReNative, off, (a_cbInstr)) 11051 11150 11052 11151 DECL_FORCE_INLINE(uint32_t) 11053 iemNativeEmitBltInCheckCsLim(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr , uint8_t idxInstr)11152 iemNativeEmitBltInCheckCsLim(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr) 11054 11153 { 11055 11154 Assert(cbInstr > 0); 11056 11155 Assert(cbInstr < 16); 11057 11058 /* Before we start, update the instruction number in case we raise an exception. */11059 #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING11060 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr));11061 #else11062 RT_NOREF(idxInstr);11063 #endif11064 11156 11065 11157 /* … … 11080 11172 kIemNativeGstRegUse_ReadOnly); 11081 11173 #ifdef RT_ARCH_AMD64 11082 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8 +1);11174 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 11083 11175 #elif defined(RT_ARCH_ARM64) 11084 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);11176 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 11085 11177 #else 11086 11178 # error "Port me" … … 11149 11241 } 11150 11242 11243 11244 /** 11245 * Macro that implements opcode (re-)checking. 11246 */ 11247 #define BODY_CHECK_OPCODES_DISABLED(a_pTb, a_idxRange, a_offRange, a_cbInstr) \ 11248 off = iemNativeEmitBltInCheckOpcodes(pReNative, off, (a_pTb), (a_idxRange), (a_offRange)) 11249 11250 DECL_FORCE_INLINE(uint32_t) 11251 iemNativeEmitBltInCheckOpcodes(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb, uint8_t idxRange, uint16_t offRange) 11252 { 11253 Assert(idxRange < pTb->cRanges && pTb->cRanges <= RT_ELEMENTS(pTb->aRanges)); 11254 Assert(offRange < pTb->aRanges[idxRange].cbOpcodes); 11255 11256 uint32_t const idxLabelObsoleteTb = iemNativeLabelCreate(pReNative, kIemNativeLabelType_ObsoleteTb); 11257 11258 /* 11259 * Where to start and how much to compare. 11260 * 11261 * Looking at the ranges produced when r160746 was running a DOS VM with TB 11262 * logging, the ranges can be anything from 1 byte to at least 0x197 bytes, 11263 * with the 6, 5, 4, 7, 8, 40, 3, 2, 9 and 10 being the top 10 in the sample. 11264 * 11265 * The top 10 for the early boot phase of a 64-bit debian 9.4 VM: 5, 9, 8, 11266 * 12, 10, 11, 6, 13, 15 and 16. Max 0x359 bytes. Same revision as above. 11267 */ 11268 uint16_t offPage = pTb->aRanges[idxRange].offPhysPage + offRange; 11269 uint16_t cbLeft = pTb->aRanges[idxRange].cbOpcodes - offRange; 11270 uint8_t const *pbOpcodes = &pTb->pabOpcodes[pTb->aRanges[idxRange].offOpcodes]; 11271 uint32_t offConsolidatedJump = UINT32_MAX; 11272 11273 #ifdef RT_ARCH_AMD64 11274 /* AMD64/x86 offers a bunch of options. Smaller stuff will can be 11275 completely inlined, for larger we use REPE CMPS. */ 11276 # define CHECK_OPCODES_CMP_IMMXX(a_idxReg, a_bOpcode) /* cost: 3 bytes */ do { \ 11277 pbCodeBuf[off++] = a_bOpcode; \ 11278 Assert(offPage < 127); \ 11279 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, 7, a_idxReg); \ 11280 pbCodeBuf[off++] = RT_BYTE1(offPage); \ 11281 } while (0) 11282 11283 # define CHECK_OPCODES_CMP_JMP() /* cost: 7 bytes first time, then 2 bytes */ do { \ 11284 if (offConsolidatedJump != UINT32_MAX) \ 11285 { \ 11286 int32_t const offDisp = (int32_t)offConsolidatedJump - (int32_t)(off + 2); \ 11287 Assert(offDisp >= -128); \ 11288 pbCodeBuf[off++] = 0x75; /* jnz near */ \ 11289 pbCodeBuf[off++] = (uint8_t)offDisp; \ 11290 } \ 11291 else \ 11292 { \ 11293 pbCodeBuf[off++] = 0x74; /* jz near +5 */ \ 11294 pbCodeBuf[off++] = 0x05; \ 11295 offConsolidatedJump = off; \ 11296 pbCodeBuf[off++] = 0xe9; /* jmp rel32 */ \ 11297 iemNativeAddFixup(pReNative, off, idxLabelObsoleteTb, kIemNativeFixupType_Rel32, -4); \ 11298 pbCodeBuf[off++] = 0x00; \ 11299 pbCodeBuf[off++] = 0x00; \ 11300 pbCodeBuf[off++] = 0x00; \ 11301 pbCodeBuf[off++] = 0x00; \ 11302 } \ 11303 } while (0) 11304 11305 # define CHECK_OPCODES_CMP_IMM32(a_idxReg) /* cost: 3+4+2 = 9 */ do { \ 11306 CHECK_OPCODES_CMP_IMMXX(a_idxReg, 0x81); \ 11307 pbCodeBuf[off++] = *pbOpcodes++; \ 11308 pbCodeBuf[off++] = *pbOpcodes++; \ 11309 pbCodeBuf[off++] = *pbOpcodes++; \ 11310 pbCodeBuf[off++] = *pbOpcodes++; \ 11311 cbLeft -= 4; \ 11312 offPage += 4; \ 11313 CHECK_OPCODES_CMP_JMP(); \ 11314 } while (0) 11315 11316 # define CHECK_OPCODES_CMP_IMM16(a_idxReg) /* cost: 1+3+2+2 = 8 */ do { \ 11317 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP; \ 11318 CHECK_OPCODES_CMP_IMMXX(a_idxReg, 0x81); \ 11319 pbCodeBuf[off++] = *pbOpcodes++; \ 11320 pbCodeBuf[off++] = *pbOpcodes++; \ 11321 cbLeft -= 2; \ 11322 offPage += 2; \ 11323 CHECK_OPCODES_CMP_JMP(); \ 11324 } while (0) 11325 11326 # define CHECK_OPCODES_CMP_IMM8(a_idxReg) /* cost: 3+1+2 = 6 */ do { \ 11327 CHECK_OPCODES_CMP_IMMXX(a_idxReg, 0x80); \ 11328 pbCodeBuf[off++] = *pbOpcodes++; \ 11329 cbLeft -= 1; \ 11330 offPage += 1; \ 11331 CHECK_OPCODES_CMP_JMP(); \ 11332 } while (0) 11333 11334 # define CHECK_OPCODES_CMPSX(a_bOpcode, a_cbToSubtract, a_bPrefix) /* cost: 2+2 = 4 */ do { \ 11335 if (a_bPrefix) \ 11336 pbCodeBuf[off++] = (a_bPrefix); \ 11337 pbCodeBuf[off++] = (a_bOpcode); \ 11338 CHECK_OPCODES_CMP_JMP(); \ 11339 cbLeft -= (a_cbToSubtract); \ 11340 } while (0) 11341 11342 # define CHECK_OPCODES_ECX_IMM(a_uValue) /* cost: 5 */ do { \ 11343 pbCodeBuf[off++] = 0xb8 + X86_GREG_xCX; \ 11344 pbCodeBuf[off++] = RT_BYTE1(a_uValue); \ 11345 pbCodeBuf[off++] = RT_BYTE2(a_uValue); \ 11346 pbCodeBuf[off++] = RT_BYTE3(a_uValue); \ 11347 pbCodeBuf[off++] = RT_BYTE4(a_uValue); \ 11348 } while (0) 11349 11350 if (cbLeft <= 24) 11351 { 11352 uint8_t const idxRegTmp = iemNativeRegAllocTmpEx(pReNative, &off, 11353 ( RT_BIT_32(X86_GREG_xAX) 11354 | RT_BIT_32(X86_GREG_xCX) 11355 | RT_BIT_32(X86_GREG_xDX) 11356 | RT_BIT_32(X86_GREG_xBX) 11357 | RT_BIT_32(X86_GREG_xSI) 11358 | RT_BIT_32(X86_GREG_xDI)) 11359 & ~IEMNATIVE_REG_FIXED_MASK); /* pick reg not requiring rex prefix */ 11360 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.pbInstrBuf)); 11361 if (offPage >= 128 - cbLeft) 11362 { 11363 off = iemNativeEmitAddGprImm(pReNative, off, idxRegTmp, offPage & ~(uint16_t)3); 11364 offPage &= 3; 11365 } 11366 11367 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5 + 14 + 54 + 8 + 6 /* = 87 */); 11368 11369 if (cbLeft > 8) 11370 switch (offPage & 3) 11371 { 11372 case 0: 11373 break; 11374 case 1: /* cost: 6 + 8 = 14 */ 11375 CHECK_OPCODES_CMP_IMM8(idxRegTmp); 11376 RT_FALL_THRU(); 11377 case 2: /* cost: 8 */ 11378 CHECK_OPCODES_CMP_IMM16(idxRegTmp); 11379 break; 11380 case 3: /* cost: 6 */ 11381 CHECK_OPCODES_CMP_IMM8(idxRegTmp); 11382 break; 11383 } 11384 11385 while (cbLeft >= 4) 11386 CHECK_OPCODES_CMP_IMM32(idxRegTmp); /* max iteration: 24/4 = 6; --> cost: 6 * 9 = 54 */ 11387 11388 if (cbLeft >= 2) 11389 CHECK_OPCODES_CMP_IMM16(idxRegTmp); /* cost: 8 */ 11390 if (cbLeft) 11391 CHECK_OPCODES_CMP_IMM8(idxRegTmp); /* cost: 6 */ 11392 11393 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 11394 iemNativeRegFreeTmp(pReNative, idxRegTmp); 11395 } 11396 else 11397 { 11398 /* RDI = &pbInstrBuf[offPage] */ 11399 uint8_t const idxRegDi = iemNativeRegAllocTmpEx(pReNative, &off, RT_BIT_32(X86_GREG_xDI)); 11400 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegDi, RT_UOFFSETOF(VMCPU, iem.s.pbInstrBuf)); 11401 if (offPage != 0) 11402 off = iemNativeEmitAddGprImm(pReNative, off, idxRegDi, offPage); 11403 11404 /* RSI = pbOpcodes */ 11405 uint8_t const idxRegSi = iemNativeRegAllocTmpEx(pReNative, &off, RT_BIT_32(X86_GREG_xSI)); 11406 off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegSi, (uintptr_t)pbOpcodes); 11407 11408 /* RCX = counts. */ 11409 uint8_t const idxRegCx = iemNativeRegAllocTmpEx(pReNative, &off, RT_BIT_32(X86_GREG_xCX)); 11410 11411 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5 + 10 + 5 + 5 + 3 + 4 + 3 /*= 35*/); 11412 11413 /** @todo profile and optimize this further. Maybe an idea to align by 11414 * offPage if the two cannot be reconsidled. */ 11415 /* Align by the page offset, so that at least one of the accesses are naturally aligned. */ 11416 switch (offPage & 7) /* max cost: 10 */ 11417 { 11418 case 0: 11419 break; 11420 case 1: /* cost: 3+4+3 = 10 */ 11421 CHECK_OPCODES_CMPSX(0xa6, 1, 0); 11422 RT_FALL_THRU(); 11423 case 2: /* cost: 4+3 = 7 */ 11424 CHECK_OPCODES_CMPSX(0xa7, 2, X86_OP_PRF_SIZE_OP); 11425 CHECK_OPCODES_CMPSX(0xa7, 4, 0); 11426 break; 11427 case 3: /* cost: 3+3 = 6 */ 11428 CHECK_OPCODES_CMPSX(0xa6, 1, 0); 11429 RT_FALL_THRU(); 11430 case 4: /* cost: 3 */ 11431 CHECK_OPCODES_CMPSX(0xa7, 4, 0); 11432 break; 11433 case 5: /* cost: 3+4 = 7 */ 11434 CHECK_OPCODES_CMPSX(0xa6, 1, 0); 11435 RT_FALL_THRU(); 11436 case 6: /* cost: 4 */ 11437 CHECK_OPCODES_CMPSX(0xa7, 2, X86_OP_PRF_SIZE_OP); 11438 break; 11439 case 7: /* cost: 3 */ 11440 CHECK_OPCODES_CMPSX(0xa6, 1, 0); 11441 break; 11442 } 11443 11444 /* Compare qwords: */ 11445 uint32_t const cQWords = cbLeft >> 3; 11446 CHECK_OPCODES_ECX_IMM(cQWords); /* cost: 5 */ 11447 11448 pbCodeBuf[off++] = X86_OP_PRF_REPZ; /* cost: 5 */ 11449 CHECK_OPCODES_CMPSX(0xa7, 0, X86_OP_REX_W); 11450 cbLeft &= 7; 11451 11452 if (cbLeft & 4) 11453 CHECK_OPCODES_CMPSX(0xa7, 0, 0); /* cost: 3 */ 11454 if (cbLeft & 2) 11455 CHECK_OPCODES_CMPSX(0xa7, 0, X86_OP_PRF_SIZE_OP); /* cost: 4 */ 11456 if (cbLeft & 2) 11457 CHECK_OPCODES_CMPSX(0xa6, 0, 0); /* cost: 3 */ 11458 11459 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 11460 iemNativeRegFreeTmp(pReNative, idxRegCx); 11461 iemNativeRegFreeTmp(pReNative, idxRegSi); 11462 iemNativeRegFreeTmp(pReNative, idxRegDi); 11463 } 11464 11465 #elif defined(RT_ARCH_ARM64) 11466 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off); 11467 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPU, iem.s.pbInstrBuf)); 11468 # if 0 11469 11470 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 11471 /** @todo continue here */ 11472 # else 11473 AssertReleaseFailed(); 11474 RT_NOREF(pReNative, off, pTb, idxRange, offRange); 11475 # endif 11476 iemNativeRegFreeTmp(pReNative, idxRegTmp); 11477 #else 11478 # error "Port me" 11479 #endif 11480 return off; 11481 } 11482 11483 11151 11484 #ifdef BODY_CHECK_CS_LIM 11152 11485 /** … … 11157 11490 { 11158 11491 uint32_t const cbInstr = (uint32_t)pCallEntry->auParams[0]; 11492 BODY_SET_CUR_INSTR(); 11159 11493 BODY_CHECK_CS_LIM(cbInstr); 11160 11494 return off; … … 11169 11503 */ 11170 11504 static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodes) 11171 {11172 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;11173 uint32_t const cbInstr = (uint32_t)pCallEntry->auParams[0];11174 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];11175 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];11176 BODY_CHECK_CS_LIM(cbInstr);11177 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);11178 return off;11179 }11180 #endif11181 11182 11183 #if defined(BODY_CHECK_OPCODES)11184 /**11185 * Built-in function for re-checking opcodes after an instruction that may have11186 * modified them.11187 */11188 static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodes)11189 11505 { 11190 11506 PCIEMTB const pTb = pReNative->pTbOrg; … … 11192 11508 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1]; 11193 11509 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2]; 11510 BODY_SET_CUR_INSTR(); 11511 BODY_CHECK_CS_LIM(cbInstr); 11194 11512 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 11195 11513 return off; … … 11198 11516 11199 11517 11200 #if defined(BODY_CHECK_OPCODES) && defined(BODY_CONSIDER_CS_LIM_CHECKING)11201 /** 11202 * Built-in function for re-checking opcodes a nd considering the need for CS.LIM11203 * checking after an instruction that may havemodified them.11204 */ 11205 static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodes ConsiderCsLim)11518 #if defined(BODY_CHECK_OPCODES) 11519 /** 11520 * Built-in function for re-checking opcodes after an instruction that may have 11521 * modified them. 11522 */ 11523 static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodes) 11206 11524 { 11207 11525 PCIEMTB const pTb = pReNative->pTbOrg; … … 11209 11527 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1]; 11210 11528 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2]; 11529 BODY_SET_CUR_INSTR(); 11530 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 11531 return off; 11532 } 11533 #endif 11534 11535 11536 #if defined(BODY_CHECK_OPCODES) && defined(BODY_CONSIDER_CS_LIM_CHECKING) 11537 /** 11538 * Built-in function for re-checking opcodes and considering the need for CS.LIM 11539 * checking after an instruction that may have modified them. 11540 */ 11541 static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(iemNativeRecompFunc_BltIn_CheckOpcodesConsiderCsLim) 11542 { 11543 PCIEMTB const pTb = pReNative->pTbOrg; 11544 uint32_t const cbInstr = (uint32_t)pCallEntry->auParams[0]; 11545 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1]; 11546 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2]; 11547 BODY_SET_CUR_INSTR(); 11211 11548 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr); 11212 11549 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); … … 11234 11571 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2]; 11235 11572 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 11573 BODY_SET_CUR_INSTR(); 11236 11574 BODY_CHECK_CS_LIM(cbInstr); 11237 11575 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr); … … 11257 11595 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2]; 11258 11596 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 11597 BODY_SET_CUR_INSTR(); 11259 11598 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr); 11260 11599 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); … … 11280 11619 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2]; 11281 11620 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 11621 BODY_SET_CUR_INSTR(); 11282 11622 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr); 11283 11623 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr); … … 11306 11646 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2]; 11307 11647 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 11648 BODY_SET_CUR_INSTR(); 11308 11649 BODY_CHECK_CS_LIM(cbInstr); 11309 11650 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr); … … 11332 11673 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2]; 11333 11674 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 11675 BODY_SET_CUR_INSTR(); 11334 11676 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr); 11335 11677 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); … … 11357 11699 uint32_t const offRange = (uint32_t)pCallEntry->auParams[2]; 11358 11700 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 11701 BODY_SET_CUR_INSTR(); 11359 11702 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr); 11360 11703 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr); … … 11390 11733 uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2]; 11391 11734 uint32_t const idxRange2 = idxRange1 + 1; 11735 BODY_SET_CUR_INSTR(); 11392 11736 BODY_CHECK_CS_LIM(cbInstr); 11393 11737 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr); … … 11418 11762 uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2]; 11419 11763 uint32_t const idxRange2 = idxRange1 + 1; 11764 BODY_SET_CUR_INSTR(); 11420 11765 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr); 11421 11766 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr); … … 11446 11791 uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2]; 11447 11792 uint32_t const idxRange2 = idxRange1 + 1; 11793 BODY_SET_CUR_INSTR(); 11448 11794 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr); 11449 11795 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr); … … 11472 11818 //uint32_t const offRange1 = (uint32_t)uParam2; 11473 11819 uint32_t const idxRange2 = idxRange1 + 1; 11820 BODY_SET_CUR_INSTR(); 11474 11821 BODY_CHECK_CS_LIM(cbInstr); 11475 11822 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr); … … 11497 11844 //uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2]; 11498 11845 uint32_t const idxRange2 = idxRange1 + 1; 11846 BODY_SET_CUR_INSTR(); 11499 11847 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr); 11500 11848 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr); … … 11521 11869 //uint32_t const offRange1 = (uint32_t)pCallEntry->auParams[2]; 11522 11870 uint32_t const idxRange2 = idxRange1 + 1; 11871 BODY_SET_CUR_INSTR(); 11523 11872 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr); 11524 11873 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr); … … 11541 11890 uint32_t const cbInstr = (uint32_t)pCallEntry->auParams[0]; 11542 11891 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1]; 11892 BODY_SET_CUR_INSTR(); 11543 11893 BODY_CHECK_CS_LIM(cbInstr); 11544 11894 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr); … … 11562 11912 uint32_t const cbInstr = (uint32_t)pCallEntry->auParams[0]; 11563 11913 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1]; 11914 BODY_SET_CUR_INSTR(); 11564 11915 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr); 11565 11916 //Assert(pVCpu->iem.s.offCurInstrStart == 0); … … 11583 11934 uint32_t const cbInstr = (uint32_t)pCallEntry->auParams[0]; 11584 11935 uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1]; 11936 BODY_SET_CUR_INSTR(); 11585 11937 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr); 11586 11938 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr); … … 11917 12269 case kIemNativeLabelType_RaiseGp0: 11918 12270 pszName = "RaiseGp0"; 12271 break; 12272 case kIemNativeLabelType_ObsoleteTb: 12273 pszName = "ObsoleteTb"; 11919 12274 break; 11920 12275 case kIemNativeLabelType_If: … … 12316 12671 if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_RaiseGp0)) 12317 12672 off = iemNativeEmitRaiseGp0(pReNative, off, idxReturnLabel); 12673 if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_ObsoleteTb)) 12674 off = iemNativeEmitObsoleteTb(pReNative, off, idxReturnLabel); 12318 12675 } 12319 12676 IEMNATIVE_CATCH_LONGJMP_BEGIN(pReNative, rc); -
trunk/src/VBox/VMM/include/IEMInternal.h
r102585 r102603 1362 1362 /** @name Decoder state. 1363 1363 * @{ */ 1364 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 1365 # ifdef IEM_WITH_CODE_TLB 1364 #ifdef IEM_WITH_CODE_TLB 1366 1365 /** The offset of the next instruction byte. */ 1367 1366 uint32_t offInstrNextByte; /* 0x08 */ … … 1383 1382 */ 1384 1383 uint8_t const *pbInstrBuf; /* 0x10 */ 1385 # 1384 # if ARCH_BITS == 32 1386 1385 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */ 1387 # 1386 # endif 1388 1387 /** The program counter corresponding to pbInstrBuf. 1389 1388 * This is set to a non-canonical address when we need to invalidate it. */ … … 1394 1393 * This takes the CS segment limit into account. */ 1395 1394 uint16_t cbInstrBufTotal; /* 0x28 */ 1395 # ifndef IEM_WITH_OPAQUE_DECODER_STATE 1396 1396 /** Offset into pbInstrBuf of the first byte of the current instruction. 1397 1397 * Can be negative to efficiently handle cross page instructions. */ … … 1420 1420 uint8_t bUnused; /* 0x35 */ 1421 1421 # endif 1422 # else /* !IEM_WITH_CODE_TLB */ 1422 # else /* IEM_WITH_OPAQUE_DECODER_STATE */ 1423 uint8_t abOpaqueDecoderPart1[0x36 - 0x2a]; 1424 # endif /* IEM_WITH_OPAQUE_DECODER_STATE */ 1425 1426 #else /* !IEM_WITH_CODE_TLB */ 1427 # ifndef IEM_WITH_OPAQUE_DECODER_STATE 1423 1428 /** The size of what has currently been fetched into abOpcode. */ 1424 1429 uint8_t cbOpcode; /* 0x08 */ … … 1441 1446 uint8_t uRexIndex; /* 0x12 */ 1442 1447 1443 # endif /* !IEM_WITH_CODE_TLB */ 1444 1448 # else /* IEM_WITH_OPAQUE_DECODER_STATE */ 1449 uint8_t abOpaqueDecoderPart1[0x13 - 0x08]; 1450 # endif /* IEM_WITH_OPAQUE_DECODER_STATE */ 1451 #endif /* !IEM_WITH_CODE_TLB */ 1452 1453 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 1445 1454 /** The effective operand mode. */ 1446 1455 IEMMODE enmEffOpSize; /* 0x36, 0x13 */ … … 1481 1490 uint8_t abAlignment2c[0x4f - 0x2f]; /* 0x2f */ 1482 1491 # endif 1492 1483 1493 #else /* IEM_WITH_OPAQUE_DECODER_STATE */ 1484 uint8_t abOpaqueDecoder[0x4f - 0x8]; 1494 # ifdef IEM_WITH_CODE_TLB 1495 uint8_t abOpaqueDecoderPart2[0x4f - 0x36]; 1496 # else 1497 uint8_t abOpaqueDecoderPart2[0x4f - 0x13]; 1498 # endif 1485 1499 #endif /* IEM_WITH_OPAQUE_DECODER_STATE */ 1486 1500 /** @} */ -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r102587 r102603 310 310 kIemNativeLabelType_NonZeroRetOrPassUp, 311 311 kIemNativeLabelType_RaiseGp0, 312 kIemNativeLabelType_ObsoleteTb, 312 313 /* Labels with data, potentially multiple instances per TB: */ 313 314 kIemNativeLabelType_FirstWithMultipleInstances, … … 806 807 807 808 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile = true); 809 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpEx(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint32_t fRegMask, 810 bool fPreferVolatile = true); 808 811 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm, 809 812 bool fPreferVolatile = true);
Note:
See TracChangeset
for help on using the changeset viewer.