Changeset 108409 in vbox for trunk/src/VBox/VMM/include
- Timestamp:
- Feb 27, 2025 10:35:39 AM (2 months ago)
- svn:sync-xref-src-repo-rev:
- 167777
- Location:
- trunk/src/VBox/VMM/include
- Files:
-
- 2 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/include/IEMInline.h
r108290 r108409 35 35 36 36 37 /* Documentation and forward declarations for target specific inline functions: */37 /* Documentation and forward declarations for inline functions required for every target: */ 38 38 39 39 RT_NO_WARN_UNUSED_INLINE_PROTOTYPE_BEGIN 40 41 /**42 * Calculates the the IEM_F_XXX flags.43 *44 * @returns IEM_F_XXX combination match the current CPU state.45 * @param pVCpu The cross context virtual CPU structure of the46 * calling thread.47 */48 DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT;49 50 #if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)51 /**52 * Invalidates the decoder state and asserts various stuff - strict builds only.53 *54 * @param pVCpu The cross context virtual CPU structure of the55 * calling thread.56 */57 DECLINLINE(void) iemInitExecTargetStrict(PVMCPUCC pVCpu) RT_NOEXCEPT;58 #endif59 40 60 41 RT_NO_WARN_UNUSED_INLINE_PROTOTYPE_END … … 198 179 199 180 200 #ifndef IEM_WITH_OPAQUE_DECODER_STATE201 202 # if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */203 204 /**205 * Initializes the execution state.206 *207 * @param pVCpu The cross context virtual CPU structure of the208 * calling thread.209 * @param fExecOpts Optional execution flags:210 * - IEM_F_BYPASS_HANDLERS211 * - IEM_F_X86_DISREGARD_LOCK212 *213 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal214 * side-effects in strict builds.215 */216 DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT217 {218 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);219 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));220 221 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;222 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | fExecOpts;223 pVCpu->iem.s.cActiveMappings = 0;224 pVCpu->iem.s.iNextMapping = 0;225 226 # ifdef VBOX_STRICT227 iemInitExecTargetStrict(pVCpu);228 # endif229 }230 231 232 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)233 /**234 * Performs a minimal reinitialization of the execution state.235 *236 * This is intended to be used by VM-exits, SMM, LOADALL and other similar237 * 'world-switch' types operations on the CPU. Currently only nested238 * hardware-virtualization uses it.239 *240 * @param pVCpu The cross context virtual CPU structure of the calling EMT.241 * @param cbInstr The instruction length (for flushing).242 */243 DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT244 {245 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);246 iemOpcodeFlushHeavy(pVCpu, cbInstr);247 }248 # endif249 250 # endif /* VBOX_INCLUDED_vmm_dbgf_h || DOXYGEN_RUNNING */251 252 /**253 * Counterpart to #iemInitExec that undoes evil strict-build stuff.254 *255 * @param pVCpu The cross context virtual CPU structure of the256 * calling thread.257 */258 DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT259 {260 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */261 # ifdef VBOX_STRICT262 # ifdef IEM_WITH_CODE_TLB263 NOREF(pVCpu);264 # else265 pVCpu->iem.s.cbOpcode = 0;266 # endif267 # else268 NOREF(pVCpu);269 # endif270 }271 272 273 /**274 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.275 *276 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.277 *278 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.279 * @param pVCpu The cross context virtual CPU structure of the calling thread.280 * @param rcStrict The status code to fiddle.281 */282 DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT283 {284 iemUninitExec(pVCpu);285 return iemExecStatusCodeFiddling(pVCpu, rcStrict);286 }287 288 #endif /* !IEM_WITH_OPAQUE_DECODER_STATE */289 290 291 181 292 182 /** @name Memory access. -
trunk/src/VBox/VMM/include/IEMInlineExec.h
r108325 r108409 1 1 /* $Id$ */ 2 2 /** @file 3 * IEM - Interpreted Execution Manager - Inline d Functions, Common.3 * IEM - Interpreted Execution Manager - Inline Exec/Decoder routines. 4 4 */ 5 5 … … 26 26 */ 27 27 28 #ifndef VMM_INCLUDED_SRC_include_IEMInline _h29 #define VMM_INCLUDED_SRC_include_IEMInline _h28 #ifndef VMM_INCLUDED_SRC_include_IEMInlineExec_h 29 #define VMM_INCLUDED_SRC_include_IEMInlineExec_h 30 30 #ifndef RT_WITHOUT_PRAGMA_ONCE 31 31 # pragma once … … 35 35 36 36 37 /* Documentation and forward declarations for target specific inline functions: */37 /* Documentation and forward declarations for inline functions required for every target: */ 38 38 39 39 RT_NO_WARN_UNUSED_INLINE_PROTOTYPE_BEGIN … … 61 61 62 62 63 /** 64 * Makes status code addjustments (pass up from I/O and access handler) 65 * as well as maintaining statistics. 66 * 67 * @returns Strict VBox status code to pass up. 68 * @param pVCpu The cross context virtual CPU structure of the calling thread. 69 * @param rcStrict The status from executing an instruction. 70 */ 71 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT 72 { 73 if (rcStrict != VINF_SUCCESS) 74 { 75 /* Deal with the cases that should be treated as VINF_SUCCESS first. */ 76 if ( rcStrict == VINF_IEM_YIELD_PENDING_FF 77 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX /** @todo r=bird: Why do we need TWO status codes here? */ 78 || rcStrict == VINF_VMX_VMEXIT 79 #endif 80 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 81 || rcStrict == VINF_SVM_VMEXIT 82 #endif 83 ) 84 { 85 rcStrict = pVCpu->iem.s.rcPassUp; 86 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 87 { /* likely */ } 88 else 89 pVCpu->iem.s.cRetPassUpStatus++; 90 } 91 else if (RT_SUCCESS(rcStrict)) 92 { 93 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) 94 || rcStrict == VINF_IOM_R3_IOPORT_READ 95 || rcStrict == VINF_IOM_R3_IOPORT_WRITE 96 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE 97 || rcStrict == VINF_IOM_R3_MMIO_READ 98 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE 99 || rcStrict == VINF_IOM_R3_MMIO_WRITE 100 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE 101 || rcStrict == VINF_CPUM_R3_MSR_READ 102 || rcStrict == VINF_CPUM_R3_MSR_WRITE 103 || rcStrict == VINF_EM_RAW_EMULATE_INSTR 104 || rcStrict == VINF_EM_RAW_TO_R3 105 || rcStrict == VINF_EM_TRIPLE_FAULT 106 || rcStrict == VINF_EM_EMULATE_SPLIT_LOCK 107 || rcStrict == VINF_GIM_R3_HYPERCALL 108 /* raw-mode / virt handlers only: */ 109 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT 110 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT 111 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT 112 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT 113 || rcStrict == VINF_SELM_SYNC_GDT 114 || rcStrict == VINF_CSAM_PENDING_ACTION 115 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE 116 /* nested hw.virt codes: */ 117 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE 118 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR 119 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 120 /** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */ 121 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp; 122 if (rcPassUp == VINF_SUCCESS) 123 pVCpu->iem.s.cRetInfStatuses++; 124 else if ( rcPassUp < VINF_EM_FIRST 125 || rcPassUp > VINF_EM_LAST 126 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict)) 127 { 128 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict))); 129 pVCpu->iem.s.cRetPassUpStatus++; 130 rcStrict = rcPassUp; 131 } 132 else 133 { 134 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict))); 135 pVCpu->iem.s.cRetInfStatuses++; 136 } 137 } 138 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED) 139 pVCpu->iem.s.cRetAspectNotImplemented++; 140 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED) 141 pVCpu->iem.s.cRetInstrNotImplemented++; 142 else 143 pVCpu->iem.s.cRetErrStatuses++; 144 } 145 else 146 { 147 rcStrict = pVCpu->iem.s.rcPassUp; 148 if (rcStrict != VINF_SUCCESS) 149 pVCpu->iem.s.cRetPassUpStatus++; 150 } 63 //#ifdef VBOX_VMM_TARGET_X86 64 //# include "VMMAll/target-x86/IEMInlineExec-x86.h" 65 //#elif defined(VBOX_VMM_TARGET_ARMV8) 66 //# include "VMMAll/target-armv8/IEMInlineExec-armv8.h" 67 //#endif 151 68 152 /* Just clear it here as well. */153 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;154 155 return rcStrict;156 }157 158 159 /**160 * Sets the pass up status.161 *162 * @returns VINF_SUCCESS.163 * @param pVCpu The cross context virtual CPU structure of the164 * calling thread.165 * @param rcPassUp The pass up status. Must be informational.166 * VINF_SUCCESS is not allowed.167 */168 DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT169 {170 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);171 172 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;173 if (rcOldPassUp == VINF_SUCCESS)174 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);175 /* If both are EM scheduling codes, use EM priority rules. */176 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST177 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)178 {179 if (rcPassUp < rcOldPassUp)180 {181 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));182 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);183 }184 else185 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));186 }187 /* Override EM scheduling with specific status code. */188 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)189 {190 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));191 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);192 }193 /* Don't override specific status code, first come first served. */194 else195 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));196 return VINF_SUCCESS;197 }198 199 200 #ifndef IEM_WITH_OPAQUE_DECODER_STATE201 69 202 70 # if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */ … … 244 112 { 245 113 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS); 114 # ifdef VBOX_VMM_TARGET_X86 246 115 iemOpcodeFlushHeavy(pVCpu, cbInstr); 116 # elif !defined(IEM_WITH_CODE_TLB) 117 pVCpu->iem.s.cbOpcode = cbInstr; 118 # else 119 pVCpu->iem.s.cbInstrBufTotal = 0; 120 RT_NOREF(cbInstr); 121 # endif 247 122 } 248 123 # endif … … 286 161 } 287 162 288 #endif /* !IEM_WITH_OPAQUE_DECODER_STATE */ 289 290 291 292 /** @name Memory access. 293 * 294 * @{ 295 */ 296 297 /** 298 * Maps a physical page. 299 * 300 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr). 301 * @param pVCpu The cross context virtual CPU structure of the calling thread. 302 * @param GCPhysMem The physical address. 303 * @param fAccess The intended access. 304 * @param ppvMem Where to return the mapping address. 305 * @param pLock The PGM lock. 306 */ 307 DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, 308 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT 309 { 310 #ifdef IEM_LOG_MEMORY_WRITES 311 if (fAccess & IEM_ACCESS_TYPE_WRITE) 312 return VERR_PGM_PHYS_TLB_CATCH_ALL; 313 #endif 314 315 /** @todo This API may require some improving later. A private deal with PGM 316 * regarding locking and unlocking needs to be struct. A couple of TLBs 317 * living in PGM, but with publicly accessible inlined access methods 318 * could perhaps be an even better solution. */ 319 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu, 320 GCPhysMem, 321 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), 322 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS), 323 ppvMem, 324 pLock); 325 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/ 326 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc)); 327 328 return rc; 329 } 330 331 332 /** 333 * Unmap a page previously mapped by iemMemPageMap. 334 * 335 * @param pVCpu The cross context virtual CPU structure of the calling thread. 336 * @param GCPhysMem The physical address. 337 * @param fAccess The intended access. 338 * @param pvMem What iemMemPageMap returned. 339 * @param pLock The PGM lock. 340 */ 341 DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, 342 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT 343 { 344 NOREF(pVCpu); 345 NOREF(GCPhysMem); 346 NOREF(fAccess); 347 NOREF(pvMem); 348 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock); 349 } 350 351 352 /* 353 * Unmap helpers. 354 */ 355 356 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 357 { 358 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 359 if (RT_LIKELY(bMapInfo == 0)) 360 return; 361 #endif 362 iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo); 363 } 364 365 366 DECL_INLINE_THROW(void) iemMemCommitAndUnmapAtJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 367 { 368 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 369 if (RT_LIKELY(bMapInfo == 0)) 370 return; 371 #endif 372 iemMemCommitAndUnmapAtSafeJmp(pVCpu, bMapInfo); 373 } 374 375 376 DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 377 { 378 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 379 if (RT_LIKELY(bMapInfo == 0)) 380 return; 381 #endif 382 iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo); 383 } 384 385 386 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 387 { 388 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 389 if (RT_LIKELY(bMapInfo == 0)) 390 return; 391 #endif 392 iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo); 393 } 394 395 DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT 396 { 397 #if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 398 if (RT_LIKELY(bMapInfo == 0)) 399 return; 400 #endif 401 iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo); 402 } 403 404 /** @} */ 405 406 407 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) 408 /** 409 * Adds an entry to the TLB trace buffer. 410 * 411 * @note Don't use directly, only via the IEMTLBTRACE_XXX macros. 412 */ 413 DECLINLINE(void) iemTlbTrace(PVMCPU pVCpu, IEMTLBTRACETYPE enmType, uint64_t u64Param, uint64_t u64Param2 = 0, 414 uint8_t bParam = 0, uint32_t u32Param = 0/*, uint16_t u16Param = 0 */) 415 { 416 uint32_t const fMask = RT_BIT_32(pVCpu->iem.s.cTlbTraceEntriesShift) - 1; 417 PIEMTLBTRACEENTRY const pEntry = &pVCpu->iem.s.paTlbTraceEntries[pVCpu->iem.s.idxTlbTraceEntry++ & fMask]; 418 pEntry->u64Param = u64Param; 419 pEntry->u64Param2 = u64Param2; 420 pEntry->u16Param = 0; //u16Param; 421 pEntry->u32Param = u32Param; 422 pEntry->bParam = bParam; 423 pEntry->enmType = enmType; 424 pEntry->rip = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; 425 } 426 #endif 427 428 #endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */ 163 #endif /* !VMM_INCLUDED_SRC_include_IEMInlineExec_h */ -
trunk/src/VBox/VMM/include/IEMInternal.h
r108370 r108409 1708 1708 /** The offset of the next instruction byte. */ 1709 1709 uint32_t offInstrNextByte; /* 0x08 */ 1710 /** The number of bytes available at pbInstrBuf for the current instruction. 1711 * This takes the max opcode length into account so that doesn't need to be 1712 * checked separately. */ 1713 uint32_t cbInstrBuf; /* 0x0c */ 1714 /** Pointer to the page containing RIP, user specified buffer or abOpcode. 1710 # if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING) 1711 /** X86: The number of bytes available at pbInstrBuf for the current 1712 * instruction. This takes the max opcode length into account so that doesn't 1713 * need to be checked separately. */ 1714 uint32_t cbInstrBuf; /* x86: 0x0c */ 1715 # else 1716 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots). 1717 * @note Set to zero when the code TLB is flushed to trigger TLB reload. */ 1718 uint32_t cbInstrBufTotal; /* !x86: 0x0c */ 1719 # endif 1720 /** Pointer to the page containing PC, user specified buffer or abOpcode. 1715 1721 * This can be NULL if the page isn't mappable for some reason, in which 1716 1722 * case we'll do fallback stuff. … … 1720 1726 * aligned pointer but pointer to the user data. 1721 1727 * 1722 * For instructions crossing pages, this will start on the first page and be1723 * advanced to the next page by the time we've decoded the instruction. This1728 * X86: For instructions crossing pages, this will start on the first page and 1729 * be advanced to the next page by the time we've decoded the instruction. This 1724 1730 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt> 1725 1731 */ … … 1733 1739 /** The guest physical address corresponding to pbInstrBuf. */ 1734 1740 RTGCPHYS GCPhysInstrBuf; /* 0x20 */ 1735 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots). 1741 # if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING) 1742 /** X86: The number of bytes available at pbInstrBuf in total (for IEMExecLots). 1736 1743 * This takes the CS segment limit into account. 1737 1744 * @note Set to zero when the code TLB is flushed to trigger TLB reload. */ 1738 uint16_t cbInstrBufTotal; /*0x28 */1739 /** Offset into pbInstrBuf of the first byte of the current instruction.1745 uint16_t cbInstrBufTotal; /* x86: 0x28 */ 1746 /** X86: Offset into pbInstrBuf of the first byte of the current instruction. 1740 1747 * Can be negative to efficiently handle cross page instructions. */ 1741 int16_t offCurInstrStart; /* 0x2a */ 1742 1743 # ifndef IEM_WITH_OPAQUE_DECODER_STATE 1744 /** The prefix mask (IEM_OP_PRF_XXX). */ 1745 uint32_t fPrefixes; /* 0x2c */ 1746 /** The extra REX ModR/M register field bit (REX.R << 3). */ 1747 uint8_t uRexReg; /* 0x30 */ 1748 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit 1748 int16_t offCurInstrStart; /* x86: 0x2a */ 1749 # endif 1750 1751 # if (!defined(IEM_WITH_OPAQUE_DECODER_STATE) && defined(VBOX_VMM_TARGET_X86)) || defined(DOXYGEN_RUNNING) 1752 /** X86: The prefix mask (IEM_OP_PRF_XXX). */ 1753 uint32_t fPrefixes; /* x86: 0x2c */ 1754 /** X86: The extra REX ModR/M register field bit (REX.R << 3). */ 1755 uint8_t uRexReg; /* x86: 0x30 */ 1756 /** X86: The extra REX ModR/M r/m field, SIB base and opcode reg bit 1749 1757 * (REX.B << 3). */ 1750 uint8_t uRexB; /*0x31 */1751 /** The extra REX SIB index field bit (REX.X << 3). */1752 uint8_t uRexIndex; /*0x32 */1753 1754 /** The effective segment register (X86_SREG_XXX). */1755 uint8_t iEffSeg; /*0x33 */1756 1757 /** The offset of the ModR/M byte relative to the start of the instruction. */1758 uint8_t offModRm; /*0x34 */1758 uint8_t uRexB; /* x86: 0x31 */ 1759 /** X86: The extra REX SIB index field bit (REX.X << 3). */ 1760 uint8_t uRexIndex; /* x86: 0x32 */ 1761 1762 /** X86: The effective segment register (X86_SREG_XXX). */ 1763 uint8_t iEffSeg; /* x86: 0x33 */ 1764 1765 /** X86: The offset of the ModR/M byte relative to the start of the instruction. */ 1766 uint8_t offModRm; /* x86: 0x34 */ 1759 1767 1760 1768 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 1761 /** The current offset into abOpcode. */1762 uint8_t offOpcode; /*0x35 */1769 /** X86: The current offset into abOpcode. */ 1770 uint8_t offOpcode; /* x86: 0x35 */ 1763 1771 # else 1764 uint8_t bUnused; /*0x35 */1772 uint8_t bUnused; /* x86: 0x35 */ 1765 1773 # endif 1766 # else /* IEM_WITH_OPAQUE_DECODER_STATE */ 1774 # else /* IEM_WITH_OPAQUE_DECODER_STATE || !X86 */ 1775 # ifdef VBOX_VMM_TARGET_X86 1767 1776 uint8_t abOpaqueDecoderPart1[0x36 - 0x2c]; 1768 # endif /* IEM_WITH_OPAQUE_DECODER_STATE */ 1777 # endif 1778 # endif /* IEM_WITH_OPAQUE_DECODER_STATE || !X86 */ 1769 1779 1770 1780 #else /* !IEM_WITH_CODE_TLB */ … … 1772 1782 /** The size of what has currently been fetched into abOpcode. */ 1773 1783 uint8_t cbOpcode; /* 0x08 */ 1774 /** The current offset into abOpcode. */ 1775 uint8_t offOpcode; /* 0x09 */ 1776 /** The offset of the ModR/M byte relative to the start of the instruction. */ 1777 uint8_t offModRm; /* 0x0a */ 1778 1779 /** The effective segment register (X86_SREG_XXX). */ 1780 uint8_t iEffSeg; /* 0x0b */ 1781 1782 /** The prefix mask (IEM_OP_PRF_XXX). */ 1783 uint32_t fPrefixes; /* 0x0c */ 1784 /** The extra REX ModR/M register field bit (REX.R << 3). */ 1785 uint8_t uRexReg; /* 0x10 */ 1786 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit 1784 1785 # ifdef VBOX_VMM_TARGET_X86 1786 /** X86: The current offset into abOpcode. */ 1787 uint8_t offOpcode; /* x86: 0x09 */ 1788 /** X86: The offset of the ModR/M byte relative to the start of the 1789 * instruction. */ 1790 uint8_t offModRm; /* x86: 0x0a */ 1791 1792 /** X86: The effective segment register (X86_SREG_XXX). */ 1793 uint8_t iEffSeg; /* x86: 0x0b */ 1794 1795 /** X86: The prefix mask (IEM_OP_PRF_XXX). */ 1796 uint32_t fPrefixes; /* x86: 0x0c */ 1797 /** X86: The extra REX ModR/M register field bit (REX.R << 3). */ 1798 uint8_t uRexReg; /* x86: 0x10 */ 1799 /** X86: The extra REX ModR/M r/m field, SIB base and opcode reg bit 1787 1800 * (REX.B << 3). */ 1788 uint8_t uRexB; /*0x11 */1789 /** The extra REX SIB index field bit (REX.X << 3). */1790 uint8_t uRexIndex; /*0x12 */1791 1801 uint8_t uRexB; /* x86: 0x11 */ 1802 /** X86: The extra REX SIB index field bit (REX.X << 3). */ 1803 uint8_t uRexIndex; /* x86: 0x12 */ 1804 # endif 1792 1805 # else /* IEM_WITH_OPAQUE_DECODER_STATE */ 1806 # ifndef VBOX_VMM_TARGET_X86 1807 uint8_t abOpaqueDecoderPart1[1]; 1808 # else 1793 1809 uint8_t abOpaqueDecoderPart1[0x13 - 0x08]; 1810 # endif 1794 1811 # endif /* IEM_WITH_OPAQUE_DECODER_STATE */ 1795 1812 #endif /* !IEM_WITH_CODE_TLB */ 1796 1813 1797 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 1798 /** The effective operand mode. */ 1799 IEMMODE enmEffOpSize; /* 0x36, 0x13 */ 1800 /** The default addressing mode. */ 1801 IEMMODE enmDefAddrMode; /* 0x37, 0x14 */ 1802 /** The effective addressing mode. */ 1803 IEMMODE enmEffAddrMode; /* 0x38, 0x15 */ 1804 /** The default operand mode. */ 1805 IEMMODE enmDefOpSize; /* 0x39, 0x16 */ 1806 1807 /** Prefix index (VEX.pp) for two byte and three byte tables. */ 1808 uint8_t idxPrefix; /* 0x3a, 0x17 */ 1809 /** 3rd VEX/EVEX/XOP register. 1814 #if (!defined(IEM_WITH_OPAQUE_DECODER_STATE) && (defined(VBOX_VMM_TARGET_X86) || !defined(IEM_WITH_CODE_TLB))) \ 1815 || defined(DOXGYEN_RUNNING) 1816 # ifdef VBOX_VMM_TARGET_X86 1817 /** X86: The effective operand mode. */ 1818 IEMMODE enmEffOpSize; /* x86: 0x36, 0x13 */ 1819 /** X86: The default addressing mode. */ 1820 IEMMODE enmDefAddrMode; /* x86: 0x37, 0x14 */ 1821 /** X86: The effective addressing mode. */ 1822 IEMMODE enmEffAddrMode; /* x86: 0x38, 0x15 */ 1823 /** X86: The default operand mode. */ 1824 IEMMODE enmDefOpSize; /* x86: 0x39, 0x16 */ 1825 1826 /** X86: Prefix index (VEX.pp) for two byte and three byte tables. */ 1827 uint8_t idxPrefix; /* x86: 0x3a, 0x17 */ 1828 /** X86: 3rd VEX/EVEX/XOP register. 1810 1829 * Please use IEM_GET_EFFECTIVE_VVVV to access. */ 1811 uint8_t uVex3rdReg; /*0x3b, 0x18 */1812 /** The VEX/EVEX/XOP length field. */1813 uint8_t uVexLength; /*0x3c, 0x19 */1814 /** Additional EVEX stuff. */1815 uint8_t fEvexStuff; /*0x3d, 0x1a */1816 1817 # ifndef IEM_WITH_CODE_TLB1830 uint8_t uVex3rdReg; /* x86: 0x3b, 0x18 */ 1831 /** X86: The VEX/EVEX/XOP length field. */ 1832 uint8_t uVexLength; /* x86: 0x3c, 0x19 */ 1833 /** X86: Additional EVEX stuff. */ 1834 uint8_t fEvexStuff; /* x86: 0x3d, 0x1a */ 1835 1836 # ifndef IEM_WITH_CODE_TLB 1818 1837 /** Explicit alignment padding. */ 1819 uint8_t abAlignment2a[1]; /* 0x1b */ 1838 uint8_t abAlignment2a[1]; /* x86: 0x1b */ 1839 # endif 1840 /** X86: The FPU opcode (FOP). */ 1841 uint16_t uFpuOpcode; /* x86: 0x3e, 0x1c */ 1842 # ifndef IEM_WITH_CODE_TLB 1843 /** Opcode buffer alignment padding. */ 1844 uint8_t abAlignment2b[2]; /* x86: 0x1e */ 1845 # endif 1846 # else /* !VBOX_VMM_TARGET_X86 */ 1847 /** Opcode buffer alignment padding. */ 1848 uint8_t abAlignment2b[3+4]; /* !x86: 0x09 */ 1849 # endif /* !VBOX_VMM_TARGET_X86 */ 1850 1851 /** The opcode bytes. */ 1852 # ifdef VBOX_VMM_TARGET_X86 1853 uint8_t abOpcode[15]; /* x86: 0x40, 0x20 */ 1854 # else 1855 union 1856 { 1857 uint8_t abOpcode[ 32]; /* !x86: 0x10 */ 1858 uint16_t au16Opcode[16]; 1859 uint32_t au32Opcode[ 8]; 1860 }; 1820 1861 # endif 1821 /** The FPU opcode (FOP). */1822 uint16_t uFpuOpcode; /* 0x3e, 0x1c */1823 # ifndef IEM_WITH_CODE_TLB1824 1862 /** Explicit alignment padding. */ 1825 uint8_t abAlignment2b[2]; /* 0x1e */ 1826 # endif 1827 1828 /** The opcode bytes. */ 1829 uint8_t abOpcode[15]; /* 0x40, 0x20 */ 1830 /** Explicit alignment padding. */ 1863 # ifdef VBOX_VMM_TARGET_X86 1864 # ifdef IEM_WITH_CODE_TLB 1865 //uint8_t abAlignment2c[0x4f - 0x4f]; /* x86: 0x4f */ 1866 # else 1867 uint8_t abAlignment2c[0x4f - 0x2f]; /* x86: 0x2f */ 1868 # endif 1869 # else 1870 uint8_t abAlignment2c[0x4f - 0x30]; /* !x86: 0x30 */ 1871 # endif 1872 1873 #else /* IEM_WITH_OPAQUE_DECODER_STATE || (!x86 && TLB) */ 1831 1874 # ifdef IEM_WITH_CODE_TLB 1832 //uint8_t abAlignment2c[0x4f - 0x4f]; /* 0x4f */ 1875 # ifdef VBOX_VMM_TARGET_X86 1876 uint8_t abOpaqueDecoderPart2[0x4f - 0x36]; 1877 # else 1878 uint8_t abOpaqueDecoderPart2[0x4f - 0x28]; 1879 # endif 1833 1880 # else 1834 uint8_t abAlignment2c[0x4f - 0x2f]; /* 0x2f */ 1835 # endif 1836 1837 #else /* IEM_WITH_OPAQUE_DECODER_STATE */ 1838 # ifdef IEM_WITH_CODE_TLB 1839 uint8_t abOpaqueDecoderPart2[0x4f - 0x36]; 1840 # else 1881 # ifdef VBOX_VMM_TARGET_X86 1841 1882 uint8_t abOpaqueDecoderPart2[0x4f - 0x13]; 1883 # else 1884 uint8_t abOpaqueDecoderPart2[0x4f - 0x09]; 1885 # endif 1842 1886 # endif 1843 1887 #endif /* IEM_WITH_OPAQUE_DECODER_STATE */ … … 1886 1930 } aMemBbMappings[3]; /* 0xb0 LB 0x48 */ 1887 1931 1888 /** The flags of the current exception / interrupt. */ 1932 /** The flags of the current exception / interrupt. 1933 * @note X86 specific? */ 1889 1934 uint32_t fCurXcpt; /* 0xf8 */ 1890 /** The current exception / interrupt. */ 1935 /** The current exception / interrupt. 1936 *@note X86 specific? */ 1891 1937 uint8_t uCurXcpt; /* 0xfc */ 1892 /** Exception / interrupt recursion depth. */ 1938 /** Exception / interrupt recursion depth. 1939 *@note X86 specific? */ 1893 1940 int8_t cXcptRecursions; /* 0xfb */ 1894 1941
Note:
See TracChangeset
for help on using the changeset viewer.