Changeset 75638 in vbox
- Timestamp:
- Nov 21, 2018 10:49:32 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r75611 r75638 17 17 18 18 19 /********************************************************************************************************************************* 20 * Defined Constants And Macros * 21 *********************************************************************************************************************************/ 19 22 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 20 23 /** -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r75632 r75638 17 17 18 18 19 /********************************************************************************************************************************* 20 * Defined Constants And Macros * 21 *********************************************************************************************************************************/ 19 22 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 23 /** 24 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their 25 * relative offsets. 26 */ 27 # ifdef IEM_WITH_CODE_TLB 28 # define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0) 29 # define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0) 30 # define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0) 31 # define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0) 32 # define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0) 33 # define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0) 34 # define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0) 35 # define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0) 36 # error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary." 37 # else /* !IEM_WITH_CODE_TLB */ 38 # define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \ 39 do \ 40 { \ 41 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \ 42 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \ 43 } while (0) 44 45 # define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib) 46 47 # define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \ 48 do \ 49 { \ 50 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \ 51 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \ 52 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \ 53 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \ 54 } while (0) 55 56 # define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \ 57 do \ 58 { \ 59 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \ 60 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \ 61 } while (0) 62 63 # define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \ 64 do \ 65 { \ 66 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \ 67 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \ 68 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \ 69 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \ 70 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \ 71 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \ 72 } while (0) 73 74 # define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \ 75 do \ 76 { \ 77 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \ 78 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \ 79 } while (0) 80 81 # define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \ 82 do \ 83 { \ 84 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \ 85 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \ 86 } while (0) 87 88 # define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \ 89 do \ 90 { \ 91 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \ 92 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \ 93 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \ 94 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \ 95 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \ 96 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \ 97 } while (0) 98 # endif /* !IEM_WITH_CODE_TLB */ 99 100 /** Gets the guest-physical address of the shadows VMCS for the given VCPU. */ 101 #define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs) 102 103 /** Whether a shadow VMCS is present for the given VCPU. */ 104 #define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS) 105 106 /** Gets the VMXON region pointer. */ 107 #define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon) 108 109 /** Gets the guest-physical address of the current VMCS for the given VCPU. */ 110 #define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs) 111 112 /** Whether a current VMCS is present for the given VCPU. */ 113 #define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS) 114 115 /** Assigns the guest-physical address of the current VMCS for the given VCPU. */ 116 #define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \ 117 do \ 118 { \ 119 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \ 120 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \ 121 } while (0) 122 123 /** Clears any current VMCS for the given VCPU. */ 124 #define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \ 125 do \ 126 { \ 127 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \ 128 } while (0) 129 130 /** Check for VMX instructions requiring to be in VMX operation. 131 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */ 132 #define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \ 133 do \ 134 { \ 135 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \ 136 { /* likely */ } \ 137 else \ 138 { \ 139 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \ 140 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \ 141 return iemRaiseUndefinedOpcode(a_pVCpu); \ 142 } \ 143 } while (0) 144 145 /** Marks a VM-entry failure with a diagnostic reason, logs and returns. */ 146 #define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \ 147 do \ 148 { \ 149 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \ 150 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \ 151 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \ 152 return VERR_VMX_VMENTRY_FAILED; \ 153 } while (0) 154 155 /** Marks a VM-exit failure with a diagnostic reason, logs and returns. */ 156 #define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \ 157 do \ 158 { \ 159 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \ 160 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \ 161 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \ 162 return VERR_VMX_VMEXIT_FAILED; \ 163 } while (0) 164 165 166 /********************************************************************************************************************************* 167 * Global Variables * 168 *********************************************************************************************************************************/ 20 169 /** @todo NSTVMX: The following VM-exit intercepts are pending: 21 170 * VMX_EXIT_IO_SMI … … 45 194 * VMX_EXIT_XRSTORS 46 195 */ 47 48 196 /** 49 197 * Map of VMCS field encodings to their virtual-VMCS structure offsets. … … 299 447 } 300 448 }; 301 302 303 /**304 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their305 * relative offsets.306 */307 # ifdef IEM_WITH_CODE_TLB308 # define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)309 # define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)310 # define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)311 # define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)312 # define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)313 # define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)314 # define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)315 # define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)316 # error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."317 # else /* !IEM_WITH_CODE_TLB */318 # define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \319 do \320 { \321 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \322 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \323 } while (0)324 325 # define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)326 327 # define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \328 do \329 { \330 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \331 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \332 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \333 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \334 } while (0)335 336 # define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \337 do \338 { \339 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \340 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \341 } while (0)342 343 # define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \344 do \345 { \346 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \347 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \348 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \349 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \350 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \351 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \352 } while (0)353 354 # define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \355 do \356 { \357 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \358 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \359 } while (0)360 361 # define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \362 do \363 { \364 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \365 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \366 } while (0)367 368 # define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \369 do \370 { \371 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \372 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \373 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \374 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \375 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \376 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \377 } while (0)378 # endif /* !IEM_WITH_CODE_TLB */379 380 /** Gets the guest-physical address of the shadows VMCS for the given VCPU. */381 #define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)382 383 /** Whether a shadow VMCS is present for the given VCPU. */384 #define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)385 386 /** Gets the VMXON region pointer. */387 #define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)388 389 /** Gets the guest-physical address of the current VMCS for the given VCPU. */390 #define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)391 392 /** Whether a current VMCS is present for the given VCPU. */393 #define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)394 395 /** Assigns the guest-physical address of the current VMCS for the given VCPU. */396 #define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \397 do \398 { \399 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \400 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \401 } while (0)402 403 /** Clears any current VMCS for the given VCPU. */404 #define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \405 do \406 { \407 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \408 } while (0)409 410 /** Check for VMX instructions requiring to be in VMX operation.411 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */412 #define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \413 do \414 { \415 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \416 { /* likely */ } \417 else \418 { \419 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \420 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \421 return iemRaiseUndefinedOpcode(a_pVCpu); \422 } \423 } while (0)424 425 /** Marks a VM-entry failure with a diagnostic reason, logs and returns. */426 #define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \427 do \428 { \429 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \430 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \431 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \432 return VERR_VMX_VMENTRY_FAILED; \433 } while (0)434 435 /** Marks a VM-exit failure with a diagnostic reason, logs and returns. */436 #define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \437 do \438 { \439 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \440 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \441 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \442 return VERR_VMX_VMEXIT_FAILED; \443 } while (0)444 449 445 450 … … 911 916 912 917 /** 913 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host914 * mask and the read-shadow (CR0/CR4 read).915 *916 * @returns The masked CR0/CR4.917 * @param pVCpu The cross context virtual CPU structure.918 * @param iCrReg The control register (either CR0 or CR4).919 * @param uGuestCrX The current guest CR0 or guest CR4.920 */921 IEM_STATIC uint64_t iemVmxMaskCr0CR4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t uGuestCrX)922 {923 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));924 Assert(iCrReg == 0 || iCrReg == 4);925 926 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);927 Assert(pVmcs);928 929 /*930 * For each CR0 or CR4 bit owned by the host, the corresponding bit is loaded from the931 * CR0 read shadow or CR4 read shadow. For each CR0 or CR4 bit that is not owned by the932 * host, the corresponding bit from the guest CR0 or guest CR4 is loaded.933 *934 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".935 */936 uint64_t fGstHostMask;937 uint64_t fReadShadow;938 if (iCrReg == 0)939 {940 fGstHostMask = pVmcs->u64Cr0Mask.u;941 fReadShadow = pVmcs->u64Cr0ReadShadow.u;942 }943 else944 {945 fGstHostMask = pVmcs->u64Cr4Mask.u;946 fReadShadow = pVmcs->u64Cr4ReadShadow.u;947 }948 949 uint64_t const fMaskedCrX = (fReadShadow & fGstHostMask) | (uGuestCrX & ~fGstHostMask);950 return fMaskedCrX;951 }952 953 954 /**955 * Gets VM-exit instruction information along with any displacement for an956 * instruction VM-exit.957 *958 * @returns The VM-exit instruction information.959 * @param pVCpu The cross context virtual CPU structure.960 * @param uExitReason The VM-exit reason.961 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).962 * @param pGCPtrDisp Where to store the displacement field. Optional, can be963 * NULL.964 */965 IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp)966 {967 RTGCPTR GCPtrDisp;968 VMXEXITINSTRINFO ExitInstrInfo;969 ExitInstrInfo.u = 0;970 971 /*972 * Get and parse the ModR/M byte from our decoded opcodes.973 */974 uint8_t bRm;975 uint8_t const offModRm = pVCpu->iem.s.offModRm;976 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);977 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))978 {979 /*980 * ModR/M indicates register addressing.981 *982 * The primary/secondary register operands are reported in the iReg1 or iReg2983 * fields depending on whether it is a read/write form.984 */985 uint8_t idxReg1;986 uint8_t idxReg2;987 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))988 {989 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;990 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;991 }992 else993 {994 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;995 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;996 }997 ExitInstrInfo.All.u2Scaling = 0;998 ExitInstrInfo.All.iReg1 = idxReg1;999 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;1000 ExitInstrInfo.All.fIsRegOperand = 1;1001 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;1002 ExitInstrInfo.All.iSegReg = 0;1003 ExitInstrInfo.All.iIdxReg = 0;1004 ExitInstrInfo.All.fIdxRegInvalid = 1;1005 ExitInstrInfo.All.iBaseReg = 0;1006 ExitInstrInfo.All.fBaseRegInvalid = 1;1007 ExitInstrInfo.All.iReg2 = idxReg2;1008 1009 /* Displacement not applicable for register addressing. */1010 GCPtrDisp = 0;1011 }1012 else1013 {1014 /*1015 * ModR/M indicates memory addressing.1016 */1017 uint8_t uScale = 0;1018 bool fBaseRegValid = false;1019 bool fIdxRegValid = false;1020 uint8_t iBaseReg = 0;1021 uint8_t iIdxReg = 0;1022 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)1023 {1024 /*1025 * Parse the ModR/M, displacement for 16-bit addressing mode.1026 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".1027 */1028 uint16_t u16Disp = 0;1029 uint8_t const offDisp = offModRm + sizeof(bRm);1030 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)1031 {1032 /* Displacement without any registers. */1033 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);1034 }1035 else1036 {1037 /* Register (index and base). */1038 switch (bRm & X86_MODRM_RM_MASK)1039 {1040 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;1041 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;1042 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;1043 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;1044 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;1045 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;1046 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;1047 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;1048 }1049 1050 /* Register + displacement. */1051 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)1052 {1053 case 0: break;1054 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;1055 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;1056 default:1057 {1058 /* Register addressing, handled at the beginning. */1059 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));1060 break;1061 }1062 }1063 }1064 1065 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */1066 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */1067 }1068 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)1069 {1070 /*1071 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.1072 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".1073 */1074 uint32_t u32Disp = 0;1075 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)1076 {1077 /* Displacement without any registers. */1078 uint8_t const offDisp = offModRm + sizeof(bRm);1079 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);1080 }1081 else1082 {1083 /* Register (and perhaps scale, index and base). */1084 uint8_t offDisp = offModRm + sizeof(bRm);1085 iBaseReg = (bRm & X86_MODRM_RM_MASK);1086 if (iBaseReg == 4)1087 {1088 /* An SIB byte follows the ModR/M byte, parse it. */1089 uint8_t bSib;1090 uint8_t const offSib = offModRm + sizeof(bRm);1091 IEM_SIB_GET_U8(pVCpu, bSib, offSib);1092 1093 /* A displacement may follow SIB, update its offset. */1094 offDisp += sizeof(bSib);1095 1096 /* Get the scale. */1097 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;1098 1099 /* Get the index register. */1100 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;1101 fIdxRegValid = RT_BOOL(iIdxReg != 4);1102 1103 /* Get the base register. */1104 iBaseReg = bSib & X86_SIB_BASE_MASK;1105 fBaseRegValid = true;1106 if (iBaseReg == 5)1107 {1108 if ((bRm & X86_MODRM_MOD_MASK) == 0)1109 {1110 /* Mod is 0 implies a 32-bit displacement with no base. */1111 fBaseRegValid = false;1112 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);1113 }1114 else1115 {1116 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */1117 iBaseReg = X86_GREG_xBP;1118 }1119 }1120 }1121 1122 /* Register + displacement. */1123 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)1124 {1125 case 0: /* Handled above */ break;1126 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;1127 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;1128 default:1129 {1130 /* Register addressing, handled at the beginning. */1131 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));1132 break;1133 }1134 }1135 }1136 1137 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */1138 }1139 else1140 {1141 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);1142 1143 /*1144 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.1145 * See Intel instruction spec. 2.2 "IA-32e Mode".1146 */1147 uint64_t u64Disp = 0;1148 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);1149 if (fRipRelativeAddr)1150 {1151 /*1152 * RIP-relative addressing mode.1153 *1154 * The displacement is 32-bit signed implying an offset range of +/-2G.1155 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".1156 */1157 uint8_t const offDisp = offModRm + sizeof(bRm);1158 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);1159 }1160 else1161 {1162 uint8_t offDisp = offModRm + sizeof(bRm);1163 1164 /*1165 * Register (and perhaps scale, index and base).1166 *1167 * REX.B extends the most-significant bit of the base register. However, REX.B1168 * is ignored while determining whether an SIB follows the opcode. Hence, we1169 * shall OR any REX.B bit -after- inspecting for an SIB byte below.1170 *1171 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".1172 */1173 iBaseReg = (bRm & X86_MODRM_RM_MASK);1174 if (iBaseReg == 4)1175 {1176 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */1177 uint8_t bSib;1178 uint8_t const offSib = offModRm + sizeof(bRm);1179 IEM_SIB_GET_U8(pVCpu, bSib, offSib);1180 1181 /* Displacement may follow SIB, update its offset. */1182 offDisp += sizeof(bSib);1183 1184 /* Get the scale. */1185 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;1186 1187 /* Get the index. */1188 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;1189 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */1190 1191 /* Get the base. */1192 iBaseReg = (bSib & X86_SIB_BASE_MASK);1193 fBaseRegValid = true;1194 if (iBaseReg == 5)1195 {1196 if ((bRm & X86_MODRM_MOD_MASK) == 0)1197 {1198 /* Mod is 0 implies a signed 32-bit displacement with no base. */1199 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);1200 }1201 else1202 {1203 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */1204 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;1205 }1206 }1207 }1208 iBaseReg |= pVCpu->iem.s.uRexB;1209 1210 /* Register + displacement. */1211 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)1212 {1213 case 0: /* Handled above */ break;1214 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;1215 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;1216 default:1217 {1218 /* Register addressing, handled at the beginning. */1219 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));1220 break;1221 }1222 }1223 }1224 1225 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;1226 }1227 1228 /*1229 * The primary or secondary register operand is reported in iReg2 depending1230 * on whether the primary operand is in read/write form.1231 */1232 uint8_t idxReg2;1233 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))1234 {1235 idxReg2 = bRm & X86_MODRM_RM_MASK;1236 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)1237 idxReg2 |= pVCpu->iem.s.uRexB;1238 }1239 else1240 {1241 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;1242 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)1243 idxReg2 |= pVCpu->iem.s.uRexReg;1244 }1245 ExitInstrInfo.All.u2Scaling = uScale;1246 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */1247 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;1248 ExitInstrInfo.All.fIsRegOperand = 0;1249 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;1250 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;1251 ExitInstrInfo.All.iIdxReg = iIdxReg;1252 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;1253 ExitInstrInfo.All.iBaseReg = iBaseReg;1254 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;1255 ExitInstrInfo.All.iReg2 = idxReg2;1256 }1257 1258 /*1259 * Handle exceptions to the norm for certain instructions.1260 * (e.g. some instructions convey an instruction identity in place of iReg2).1261 */1262 switch (uExitReason)1263 {1264 case VMX_EXIT_GDTR_IDTR_ACCESS:1265 {1266 Assert(VMXINSTRID_IS_VALID(uInstrId));1267 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));1268 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);1269 ExitInstrInfo.GdtIdt.u2Undef0 = 0;1270 break;1271 }1272 1273 case VMX_EXIT_LDTR_TR_ACCESS:1274 {1275 Assert(VMXINSTRID_IS_VALID(uInstrId));1276 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));1277 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);1278 ExitInstrInfo.LdtTr.u2Undef0 = 0;1279 break;1280 }1281 1282 case VMX_EXIT_RDRAND:1283 case VMX_EXIT_RDSEED:1284 {1285 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);1286 break;1287 }1288 }1289 1290 /* Update displacement and return the constructed VM-exit instruction information field. */1291 if (pGCPtrDisp)1292 *pGCPtrDisp = GCPtrDisp;1293 1294 return ExitInstrInfo.u;1295 }1296 1297 1298 /**1299 918 * Converts an IEM exception event type to a VMX event type. 1300 919 * … … 1821 1440 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12); 1822 1441 } 1442 } 1443 1444 1445 /** 1446 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host 1447 * mask and the read-shadow (CR0/CR4 read). 1448 * 1449 * @returns The masked CR0/CR4. 1450 * @param pVCpu The cross context virtual CPU structure. 1451 * @param iCrReg The control register (either CR0 or CR4). 1452 * @param uGuestCrX The current guest CR0 or guest CR4. 1453 */ 1454 IEM_STATIC uint64_t iemVmxMaskCr0CR4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t uGuestCrX) 1455 { 1456 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu)); 1457 Assert(iCrReg == 0 || iCrReg == 4); 1458 1459 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 1460 Assert(pVmcs); 1461 1462 /* 1463 * For each CR0 or CR4 bit owned by the host, the corresponding bit is loaded from the 1464 * CR0 read shadow or CR4 read shadow. For each CR0 or CR4 bit that is not owned by the 1465 * host, the corresponding bit from the guest CR0 or guest CR4 is loaded. 1466 * 1467 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation". 1468 */ 1469 uint64_t fGstHostMask; 1470 uint64_t fReadShadow; 1471 if (iCrReg == 0) 1472 { 1473 fGstHostMask = pVmcs->u64Cr0Mask.u; 1474 fReadShadow = pVmcs->u64Cr0ReadShadow.u; 1475 } 1476 else 1477 { 1478 fGstHostMask = pVmcs->u64Cr4Mask.u; 1479 fReadShadow = pVmcs->u64Cr4ReadShadow.u; 1480 } 1481 1482 uint64_t const fMaskedCrX = (fReadShadow & fGstHostMask) | (uGuestCrX & ~fGstHostMask); 1483 return fMaskedCrX; 1823 1484 } 1824 1485 … … 2189 1850 iemVmxVmexitSaveGuestSegRegs(pVCpu); 2190 1851 1852 /** @todo r=ramshankar: The below hack is no longer necessary because we invoke the 1853 * VM-exit after updating RIP. I'm leaving it in-place temporarily in case 1854 * we need to fix missing exit information or callers still setting 1855 * instruction-length field when it is not necessary. */ 2191 1856 #if 0 2192 1857 /* … … 2744 2409 2745 2410 return rcStrict; 2411 } 2412 2413 2414 /** 2415 * Gets VM-exit instruction information along with any displacement for an 2416 * instruction VM-exit. 2417 * 2418 * @returns The VM-exit instruction information. 2419 * @param pVCpu The cross context virtual CPU structure. 2420 * @param uExitReason The VM-exit reason. 2421 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX). 2422 * @param pGCPtrDisp Where to store the displacement field. Optional, can be 2423 * NULL. 2424 */ 2425 IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp) 2426 { 2427 RTGCPTR GCPtrDisp; 2428 VMXEXITINSTRINFO ExitInstrInfo; 2429 ExitInstrInfo.u = 0; 2430 2431 /* 2432 * Get and parse the ModR/M byte from our decoded opcodes. 2433 */ 2434 uint8_t bRm; 2435 uint8_t const offModRm = pVCpu->iem.s.offModRm; 2436 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm); 2437 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2438 { 2439 /* 2440 * ModR/M indicates register addressing. 2441 * 2442 * The primary/secondary register operands are reported in the iReg1 or iReg2 2443 * fields depending on whether it is a read/write form. 2444 */ 2445 uint8_t idxReg1; 2446 uint8_t idxReg2; 2447 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId)) 2448 { 2449 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg; 2450 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB; 2451 } 2452 else 2453 { 2454 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB; 2455 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg; 2456 } 2457 ExitInstrInfo.All.u2Scaling = 0; 2458 ExitInstrInfo.All.iReg1 = idxReg1; 2459 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode; 2460 ExitInstrInfo.All.fIsRegOperand = 1; 2461 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize; 2462 ExitInstrInfo.All.iSegReg = 0; 2463 ExitInstrInfo.All.iIdxReg = 0; 2464 ExitInstrInfo.All.fIdxRegInvalid = 1; 2465 ExitInstrInfo.All.iBaseReg = 0; 2466 ExitInstrInfo.All.fBaseRegInvalid = 1; 2467 ExitInstrInfo.All.iReg2 = idxReg2; 2468 2469 /* Displacement not applicable for register addressing. */ 2470 GCPtrDisp = 0; 2471 } 2472 else 2473 { 2474 /* 2475 * ModR/M indicates memory addressing. 2476 */ 2477 uint8_t uScale = 0; 2478 bool fBaseRegValid = false; 2479 bool fIdxRegValid = false; 2480 uint8_t iBaseReg = 0; 2481 uint8_t iIdxReg = 0; 2482 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT) 2483 { 2484 /* 2485 * Parse the ModR/M, displacement for 16-bit addressing mode. 2486 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte". 2487 */ 2488 uint16_t u16Disp = 0; 2489 uint8_t const offDisp = offModRm + sizeof(bRm); 2490 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6) 2491 { 2492 /* Displacement without any registers. */ 2493 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); 2494 } 2495 else 2496 { 2497 /* Register (index and base). */ 2498 switch (bRm & X86_MODRM_RM_MASK) 2499 { 2500 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break; 2501 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break; 2502 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break; 2503 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break; 2504 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break; 2505 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break; 2506 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break; 2507 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break; 2508 } 2509 2510 /* Register + displacement. */ 2511 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 2512 { 2513 case 0: break; 2514 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break; 2515 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break; 2516 default: 2517 { 2518 /* Register addressing, handled at the beginning. */ 2519 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm)); 2520 break; 2521 } 2522 } 2523 } 2524 2525 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */ 2526 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */ 2527 } 2528 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT) 2529 { 2530 /* 2531 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode. 2532 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte". 2533 */ 2534 uint32_t u32Disp = 0; 2535 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 2536 { 2537 /* Displacement without any registers. */ 2538 uint8_t const offDisp = offModRm + sizeof(bRm); 2539 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); 2540 } 2541 else 2542 { 2543 /* Register (and perhaps scale, index and base). */ 2544 uint8_t offDisp = offModRm + sizeof(bRm); 2545 iBaseReg = (bRm & X86_MODRM_RM_MASK); 2546 if (iBaseReg == 4) 2547 { 2548 /* An SIB byte follows the ModR/M byte, parse it. */ 2549 uint8_t bSib; 2550 uint8_t const offSib = offModRm + sizeof(bRm); 2551 IEM_SIB_GET_U8(pVCpu, bSib, offSib); 2552 2553 /* A displacement may follow SIB, update its offset. */ 2554 offDisp += sizeof(bSib); 2555 2556 /* Get the scale. */ 2557 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 2558 2559 /* Get the index register. */ 2560 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK; 2561 fIdxRegValid = RT_BOOL(iIdxReg != 4); 2562 2563 /* Get the base register. */ 2564 iBaseReg = bSib & X86_SIB_BASE_MASK; 2565 fBaseRegValid = true; 2566 if (iBaseReg == 5) 2567 { 2568 if ((bRm & X86_MODRM_MOD_MASK) == 0) 2569 { 2570 /* Mod is 0 implies a 32-bit displacement with no base. */ 2571 fBaseRegValid = false; 2572 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); 2573 } 2574 else 2575 { 2576 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */ 2577 iBaseReg = X86_GREG_xBP; 2578 } 2579 } 2580 } 2581 2582 /* Register + displacement. */ 2583 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 2584 { 2585 case 0: /* Handled above */ break; 2586 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break; 2587 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break; 2588 default: 2589 { 2590 /* Register addressing, handled at the beginning. */ 2591 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm)); 2592 break; 2593 } 2594 } 2595 } 2596 2597 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */ 2598 } 2599 else 2600 { 2601 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT); 2602 2603 /* 2604 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode. 2605 * See Intel instruction spec. 2.2 "IA-32e Mode". 2606 */ 2607 uint64_t u64Disp = 0; 2608 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5); 2609 if (fRipRelativeAddr) 2610 { 2611 /* 2612 * RIP-relative addressing mode. 2613 * 2614 * The displacement is 32-bit signed implying an offset range of +/-2G. 2615 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing". 2616 */ 2617 uint8_t const offDisp = offModRm + sizeof(bRm); 2618 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); 2619 } 2620 else 2621 { 2622 uint8_t offDisp = offModRm + sizeof(bRm); 2623 2624 /* 2625 * Register (and perhaps scale, index and base). 2626 * 2627 * REX.B extends the most-significant bit of the base register. However, REX.B 2628 * is ignored while determining whether an SIB follows the opcode. Hence, we 2629 * shall OR any REX.B bit -after- inspecting for an SIB byte below. 2630 * 2631 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings". 2632 */ 2633 iBaseReg = (bRm & X86_MODRM_RM_MASK); 2634 if (iBaseReg == 4) 2635 { 2636 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */ 2637 uint8_t bSib; 2638 uint8_t const offSib = offModRm + sizeof(bRm); 2639 IEM_SIB_GET_U8(pVCpu, bSib, offSib); 2640 2641 /* Displacement may follow SIB, update its offset. */ 2642 offDisp += sizeof(bSib); 2643 2644 /* Get the scale. */ 2645 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 2646 2647 /* Get the index. */ 2648 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex; 2649 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */ 2650 2651 /* Get the base. */ 2652 iBaseReg = (bSib & X86_SIB_BASE_MASK); 2653 fBaseRegValid = true; 2654 if (iBaseReg == 5) 2655 { 2656 if ((bRm & X86_MODRM_MOD_MASK) == 0) 2657 { 2658 /* Mod is 0 implies a signed 32-bit displacement with no base. */ 2659 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); 2660 } 2661 else 2662 { 2663 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */ 2664 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP; 2665 } 2666 } 2667 } 2668 iBaseReg |= pVCpu->iem.s.uRexB; 2669 2670 /* Register + displacement. */ 2671 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 2672 { 2673 case 0: /* Handled above */ break; 2674 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break; 2675 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break; 2676 default: 2677 { 2678 /* Register addressing, handled at the beginning. */ 2679 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm)); 2680 break; 2681 } 2682 } 2683 } 2684 2685 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp; 2686 } 2687 2688 /* 2689 * The primary or secondary register operand is reported in iReg2 depending 2690 * on whether the primary operand is in read/write form. 2691 */ 2692 uint8_t idxReg2; 2693 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId)) 2694 { 2695 idxReg2 = bRm & X86_MODRM_RM_MASK; 2696 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT) 2697 idxReg2 |= pVCpu->iem.s.uRexB; 2698 } 2699 else 2700 { 2701 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK; 2702 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT) 2703 idxReg2 |= pVCpu->iem.s.uRexReg; 2704 } 2705 ExitInstrInfo.All.u2Scaling = uScale; 2706 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */ 2707 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode; 2708 ExitInstrInfo.All.fIsRegOperand = 0; 2709 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize; 2710 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg; 2711 ExitInstrInfo.All.iIdxReg = iIdxReg; 2712 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid; 2713 ExitInstrInfo.All.iBaseReg = iBaseReg; 2714 ExitInstrInfo.All.iIdxReg = !fBaseRegValid; 2715 ExitInstrInfo.All.iReg2 = idxReg2; 2716 } 2717 2718 /* 2719 * Handle exceptions to the norm for certain instructions. 2720 * (e.g. some instructions convey an instruction identity in place of iReg2). 2721 */ 2722 switch (uExitReason) 2723 { 2724 case VMX_EXIT_GDTR_IDTR_ACCESS: 2725 { 2726 Assert(VMXINSTRID_IS_VALID(uInstrId)); 2727 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3)); 2728 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId); 2729 ExitInstrInfo.GdtIdt.u2Undef0 = 0; 2730 break; 2731 } 2732 2733 case VMX_EXIT_LDTR_TR_ACCESS: 2734 { 2735 Assert(VMXINSTRID_IS_VALID(uInstrId)); 2736 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3)); 2737 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId); 2738 ExitInstrInfo.LdtTr.u2Undef0 = 0; 2739 break; 2740 } 2741 2742 case VMX_EXIT_RDRAND: 2743 case VMX_EXIT_RDSEED: 2744 { 2745 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3); 2746 break; 2747 } 2748 } 2749 2750 /* Update displacement and return the constructed VM-exit instruction information field. */ 2751 if (pGCPtrDisp) 2752 *pGCPtrDisp = GCPtrDisp; 2753 2754 return ExitInstrInfo.u; 2746 2755 } 2747 2756 … … 3999 4008 { 4000 4009 Assert(offApic < XAPIC_OFF_END + 4); 4010 4011 /* Write only bits 11:0 of the APIC offset into the VM-exit qualification field. */ 4012 offApic &= UINT16_C(0xfff); 4001 4013 iemVmxVmcsSetExitQual(pVCpu, offApic); 4002 4014 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE);
Note:
See TracChangeset
for help on using the changeset viewer.