- Timestamp:
- Aug 29, 2018 6:12:35 AM (6 years ago)
- Location:
- trunk
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpumctx.h
r73606 r73937 562 562 uint32_t uVmcsR3Padding; 563 563 #endif 564 /** 0x308 - Padding. */ 565 uint8_t abPadding[0x3f0 - 0x308]; 564 /** 0X308 - Cache of the nested-guest shadow VMCS - R0 ptr. */ 565 R0PTRTYPE(PVMXVVMCS) pShadowVmcsR0; 566 #if HC_ARCH_BITS == 32 567 uint32_t uShadowVmcsR0Padding; 568 #endif 569 /** 0x310 - Cache of the nested-guest shadow VMCS - R3 ptr. */ 570 R3PTRTYPE(PVMXVVMCS) pShadowVmcsR3; 571 #if HC_ARCH_BITS == 32 572 uint32_t uShadowVmcsR3Padding; 573 #endif 574 /** 0x318 - Padding. */ 575 uint8_t abPadding[0x3f0 - 0x318]; 566 576 } vmx; 567 577 } CPUM_UNION_NM(s); -
trunk/include/VBox/vmm/hm_vmx.h
r73885 r73937 2872 2872 kVmxVInstrDiag_Vmclear_PtrWidth, 2873 2873 kVmxVInstrDiag_Vmclear_Success, 2874 /* VMWRITE. */ 2875 kVmxVInstrDiag_Vmwrite_Cpl, 2876 kVmxVInstrDiag_Vmwrite_FieldInvalid, 2877 kVmxVInstrDiag_Vmwrite_FieldRo, 2878 kVmxVInstrDiag_Vmwrite_LinkPtrInvalid, 2879 kVmxVInstrDiag_Vmwrite_PtrInvalid, 2880 kVmxVInstrDiag_Vmwrite_PtrMap, 2881 kVmxVInstrDiag_Vmwrite_Success, 2874 2882 /* Last member for determining array index limit. */ 2875 2883 kVmxVInstrDiag_Last … … 2892 2900 * The first 8 bytes are as per Intel spec. 24.2 "Format of the VMCS Region". 2893 2901 * 2894 * The offset and size of the VMCS state field (fVmcsState) is also fixed as we use 2895 * it to offset into guest memory. 2902 * The offset and size of the VMCS state field (fVmcsState) is also fixed (not by 2903 * Intel but for our own requirements) as we use it to offset into guest memory. 2904 * 2905 * We always treat natural-width fields as 64-bit in our implementation since 2906 * it's easier, allows for teleporation in the future and does not affect guest 2907 * software. 2896 2908 * 2897 2909 * Although the guest is supposed to access the VMCS only through the execution of … … 3341 3353 AssertCompileMemberOffset(VMXVVMCS, u64GuestCr0, 0x6c0); 3342 3354 AssertCompileMemberOffset(VMXVVMCS, u64HostCr0, 0x860); 3343 3344 /** Get the offset into VMCS data for a VMCS field given its encoding. */3345 #define VMX_V_VMCS_FIELD_OFFSET(a_Enc) ( ((a_VmcsFieldEnc).n.u8Index & 0x1f) \3346 | ((a_VmcsFieldEnc).n.u2Type << 5) \3347 | ((a_VmcsFieldEnc).n.u2Width << 7))3348 3349 /** Get the offset into VMCS data for a VMCS field given its encoding as an3350 * unsigned 32-bit number. */3351 #define VMX_V_VMCS_FIELD_OFFSET_U32(a_uEnc) ( (RT_BF_GET((a_uEnc), VMX_BF_VMCS_ENC_INDEX) & 0x1f) \3352 | (RT_BF_GET((a_uEnc), VMX_BF_VMCS_ENC_TYPE) << 5) \3353 | (RT_BF_GET((a_uEnc), VMX_BF_VMCS_ENC_WIDTH) << 7))3354 3355 /** @} */ 3355 3356 … … 3359 3360 */ 3360 3361 /** 3361 * Gets the width of a VMCS field given it's encoding. 3362 * Gets the effective width of a VMCS field given it's encoding adjusted for 3363 * HIGH/FULL access for 64-bit fields. 3362 3364 * 3363 * @returns The VMCS field width.3365 * @returns The effective VMCS field width. 3364 3366 * @param uFieldEnc The VMCS field encoding. 3365 3367 * … … 3367 3369 * supported VMCS field. 3368 3370 */ 3369 DECLINLINE(uint 32_t) HMVmxGetVmcsFieldWidth(uint32_t uFieldEnc)3371 DECLINLINE(uint8_t) HMVmxGetVmcsFieldWidthEff(uint32_t uFieldEnc) 3370 3372 { 3371 3373 /* Only the "HIGH" parts of all 64-bit fields have bit 0 set. */ -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r73885 r73937 34 34 { 35 35 /* Internal processing errors. */ 36 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_1 , "Ipe_1"),37 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_2 , "Ipe_2"),38 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_3 , "Ipe_3"),39 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_4 , "Ipe_4"),40 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_5 , "Ipe_5"),41 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_6 , "Ipe_6"),42 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_7 , "Ipe_7"),43 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_8 , "Ipe_8"),44 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_9 , "Ipe_9"),36 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_1 , "Ipe_1" ), 37 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_2 , "Ipe_2" ), 38 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_3 , "Ipe_3" ), 39 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_4 , "Ipe_4" ), 40 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_5 , "Ipe_5" ), 41 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_6 , "Ipe_6" ), 42 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_7 , "Ipe_7" ), 43 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_8 , "Ipe_8" ), 44 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_9 , "Ipe_9" ), 45 45 /* VMXON. */ 46 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_A20M , "A20M"),47 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cpl , "Cpl"),48 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0"),49 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0"),50 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Intercept , "Intercept"),51 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_LongModeCS , "LongModeCS"),52 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl"),53 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAbnormal , "PtrAbnormal"),54 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign , "PtrAlign"),55 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap , "PtrMap"),56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrPhysRead , "PtrPhysRead"),57 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth , "PtrWidth"),58 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode"),59 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success , "Success"),60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs"),61 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe , "Vmxe"),62 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmcsRevId , "VmcsRevId"),63 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRoot , "VmxRoot"),64 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRootCpl , "VmxRootCpl"),46 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_A20M , "A20M" ), 47 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cpl , "Cpl" ), 48 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0" ), 49 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0" ), 50 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Intercept , "Intercept" ), 51 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_LongModeCS , "LongModeCS" ), 52 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl" ), 53 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAbnormal , "PtrAbnormal" ), 54 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign , "PtrAlign" ), 55 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap , "PtrMap" ), 56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrPhysRead , "PtrPhysRead" ), 57 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth , "PtrWidth" ), 58 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode" ), 59 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success , "Success" ), 60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ), 61 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe , "Vmxe" ), 62 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmcsRevId , "VmcsRevId" ), 63 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRoot , "VmxRoot" ), 64 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRootCpl , "VmxRootCpl" ), 65 65 /* VMXOFF. */ 66 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Cpl , "Cpl"),67 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Intercept , "Intercept"),68 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_LongModeCS , "LongModeCS"),69 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_RealOrV86Mode , "RealOrV86Mode"),70 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Success , "Success"),71 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Vmxe , "Vmxe"),72 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_VmxRoot , "VmxRoot"),66 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Cpl , "Cpl" ), 67 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Intercept , "Intercept" ), 68 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_LongModeCS , "LongModeCS" ), 69 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_RealOrV86Mode , "RealOrV86Mode" ), 70 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Success , "Success" ), 71 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Vmxe , "Vmxe" ), 72 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_VmxRoot , "VmxRoot" ), 73 73 /* VMPTRLD. */ 74 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Cpl , "Cpl"),75 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAbnormal , "PtrAbnormal"),76 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAlign , "PtrAlign"),77 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrMap , "PtrMap"),78 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrReadPhys , "PtrReadPhys"),79 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrVmxon , "PtrVmxon"),80 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrWidth , "PtrWidth"),81 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_ShadowVmcs , "ShadowVmcs"),82 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Success , "Success"),83 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmcsRevId , "VmcsRevId")74 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Cpl , "Cpl" ), 75 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAbnormal , "PtrAbnormal" ), 76 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAlign , "PtrAlign" ), 77 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrMap , "PtrMap" ), 78 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrReadPhys , "PtrReadPhys" ), 79 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrVmxon , "PtrVmxon" ), 80 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrWidth , "PtrWidth" ), 81 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_ShadowVmcs , "ShadowVmcs" ), 82 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Success , "Success" ), 83 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmcsRevId , "VmcsRevId" ) 84 84 /* VMPTRST. */ 85 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Cpl , "Cpl"),86 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_PtrMap , "PtrMap"),87 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Success , "Success"),85 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Cpl , "Cpl" ), 86 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_PtrMap , "PtrMap" ), 87 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Success , "Success" ), 88 88 /* VMCLEAR. */ 89 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Cpl , "Cpl" ), 90 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAbnormal , "PtrAbnormal" ), 91 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAlign , "PtrAlign" ), 92 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrMap , "PtrMap" ), 93 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrReadPhys , "PtrReadPhys" ), 94 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrVmxon , "PtrVmxon" ), 95 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrWidth , "PtrWidth" ), 96 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Success , "Success" ) 89 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Cpl , "Cpl" ), 90 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAbnormal , "PtrAbnormal" ), 91 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAlign , "PtrAlign" ), 92 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrMap , "PtrMap" ), 93 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrReadPhys , "PtrReadPhys" ), 94 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrVmxon , "PtrVmxon" ), 95 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrWidth , "PtrWidth" ), 96 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Success , "Success" ), 97 /* VMWRITE. */ 98 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Cpl , "Cpl" ), 99 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldInvalid , "FieldInvalid" ), 100 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldRo , "FieldRo" ), 101 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_LinkPtrInvalid, "LinkPtrInvalid"), 102 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrInvalid , "PtrInvalid" ), 103 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrMap , "PtrMap" ), 104 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Success , "Success" ) 97 105 /* kVmxVInstrDiag_Last */ 98 106 }; -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r73787 r73937 12773 12773 } while (0) 12774 12774 12775 /** 12776 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz 12777 * prefixes are present. 12778 */ 12779 #define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \ 12780 do \ 12781 { \ 12782 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \ 12783 { /* likely */ } \ 12784 else \ 12785 return IEMOP_RAISE_INVALID_OPCODE(); \ 12786 } while (0) 12787 12775 12788 12776 12789 /** -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r73885 r73937 361 361 # endif /* !IEM_WITH_CODE_TLB */ 362 362 363 /** Whether a shadow VMCS is present for the given VCPU. */ 364 #define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS) 365 366 /** Gets the guest-physical address of the shadows VMCS for the given VCPU. */ 367 #define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u64VmcsLinkPtr.u) 368 363 369 /** Whether a current VMCS is present for the given VCPU. */ 364 #define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs!= NIL_RTGCPHYS)365 366 /** Gets the current VMCS for the given VCPU. */367 #define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)368 369 /** Sets a new VMCS asthe current VMCS for the given VCPU. */370 #define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS) 371 372 /** Gets the guest-physical address of the current VMCS for the given VCPU. */ 373 #define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs) 374 375 /** Assigns the guest-physical address of the current VMCS for the given VCPU. */ 370 376 #define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \ 371 377 do \ … … 383 389 384 390 385 #if 0 /* Disabled static fn until we use it with VMREAD/VMWRITE instruction implementation. */386 391 /** 387 392 * Returns whether the given VMCS field is valid and supported by our emulation. 388 393 * 389 * @param pVCpu The cross context virtual CPU structure.390 * @param encFieldThe VMCS field encoding.394 * @param pVCpu The cross context virtual CPU structure. 395 * @param uFieldEnc The VMCS field encoding. 391 396 * 392 397 * @remarks This takes into account the CPU features exposed to the guest. 393 398 */ 394 IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, VMXVMCSFIELDENC encField)399 IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint32_t uFieldEnc) 395 400 { 396 401 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu); 397 switch ( encField.u)402 switch (uFieldEnc) 398 403 { 399 404 /* … … 640 645 return false; 641 646 } 642 #endif 647 643 648 644 649 /** … … 1032 1037 1033 1038 /** 1039 * VMWRITE instruction execution worker. 1040 * 1041 * @param pVCpu The cross context virtual CPU structure. 1042 * @param cbInstr The instruction length. 1043 * @param uFieldEnc The VMCS field encoding. 1044 * @param u64Val The value to write (or guest linear address to the 1045 * value), @a pExitInstrInfo will indicate whether it's a 1046 * memory or register operand. 1047 * @param pExitInstrInfo Pointer to the VM-exit instruction information field. 1048 * @param GCPtrDisp The displacement field for @a GCPtrVmcs if any. 1049 */ 1050 IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint32_t uFieldEnc, uint64_t u64Val, 1051 PCVMXEXITINSTRINFO pExitInstrInfo, RTGCPTR GCPtrDisp) 1052 { 1053 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu)) 1054 { 1055 RT_NOREF(GCPtrDisp); 1056 /** @todo NSTVMX: intercept. */ 1057 /** @todo NSTVMX: VMCS shadowing intercept (VMREAD/VMWRITE bitmap). */ 1058 } 1059 1060 /* CPL. */ 1061 if (CPUMGetGuestCPL(pVCpu) > 0) 1062 { 1063 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 1064 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_Cpl; 1065 return iemRaiseGeneralProtectionFault0(pVCpu); 1066 } 1067 1068 /* VMCS pointer in root mode. */ 1069 if ( IEM_IS_VMX_ROOT_MODE(pVCpu) 1070 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu)) 1071 { 1072 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu))); 1073 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_PtrInvalid; 1074 iemVmxVmFailInvalid(pVCpu); 1075 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1076 return VINF_SUCCESS; 1077 } 1078 1079 /* VMCS-link pointer in non-root mode. */ 1080 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu) 1081 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu)) 1082 { 1083 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu))); 1084 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_PtrInvalid; 1085 iemVmxVmFailInvalid(pVCpu); 1086 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1087 return VINF_SUCCESS; 1088 } 1089 1090 /* If the VMWRITE instruction references memory, access the specified in memory operand. */ 1091 if (!pExitInstrInfo->VmreadVmwrite.fIsRegOperand) 1092 { 1093 uint8_t const uAddrSize = pExitInstrInfo->VmreadVmwrite.u3AddrSize; 1094 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff), 0 }; 1095 AssertRCReturn(uAddrSize != 3, VERR_IEM_IPE_1); 1096 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[uAddrSize]; 1097 1098 /* Read the value from the specified guest memory location. */ 1099 VBOXSTRICTRC rcStrict; 1100 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1101 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, pExitInstrInfo->VmreadVmwrite.iSegReg, GCPtrVal); 1102 else 1103 { 1104 uint32_t u32Val; 1105 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, pExitInstrInfo->VmreadVmwrite.iSegReg, GCPtrVal); 1106 u64Val = u32Val; 1107 } 1108 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 1109 { 1110 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict))); 1111 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_PtrMap; 1112 return rcStrict; 1113 } 1114 } 1115 1116 /* Supported VMCS field. */ 1117 if (!iemVmxIsVmcsFieldValid(pVCpu, uFieldEnc)) 1118 { 1119 Log(("vmwrite: VMCS field %#x invalid -> VMFail\n", uFieldEnc)); 1120 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_FieldInvalid; 1121 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT); 1122 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1123 return VINF_SUCCESS; 1124 } 1125 1126 /* Read-only VMCS field. */ 1127 bool const fReadOnlyField = HMVmxIsVmcsFieldReadOnly(uFieldEnc); 1128 if ( fReadOnlyField 1129 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll) 1130 { 1131 Log(("vmwrite: Write to read-only VMCS component -> VMFail\n", uFieldEnc)); 1132 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_FieldRo; 1133 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT); 1134 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1135 return VINF_SUCCESS; 1136 } 1137 1138 /* 1139 * Setup writing to the current or shadow VMCS. 1140 */ 1141 uint8_t *pbVmcs; 1142 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu)) 1143 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs); 1144 else 1145 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 1146 Assert(pbVmcs); 1147 1148 PCVMXVMCSFIELDENC pFieldEnc = (PCVMXVMCSFIELDENC)&uFieldEnc; 1149 uint8_t const uWidth = pFieldEnc->n.u2Width; 1150 uint8_t const uType = pFieldEnc->n.u2Type; 1151 uint8_t const uWidthType = (uWidth << 2) | uType; 1152 uint8_t const uIndex = pFieldEnc->n.u8Index; 1153 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2); 1154 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex]; 1155 1156 /* 1157 * Write the VMCS component based on the field's effective width. 1158 * 1159 * The effective width is 64-bit fields adjusted to 32-bits if the access-type 1160 * indicates high bits (little endian). 1161 */ 1162 uint8_t *pbField = pbVmcs + offField; 1163 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(uFieldEnc); 1164 switch (uEffWidth) 1165 { 1166 case VMX_VMCS_ENC_WIDTH_64BIT: 1167 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break; 1168 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break; 1169 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break; 1170 } 1171 1172 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_Success; 1173 iemVmxVmSucceed(pVCpu); 1174 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1175 return VINF_SUCCESS; 1176 } 1177 1178 1179 /** 1034 1180 * VMCLEAR instruction execution worker. 1035 1181 * … … 1067 1213 { 1068 1214 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict))); 1069 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vm ptrld_PtrMap;1215 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrMap; 1070 1216 return rcStrict; 1071 1217 } … … 1611 1757 } 1612 1758 1759 1760 /** 1761 * Implements 'VMWRITE' register. 1762 */ 1763 IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint32_t, u32VmcsFieldEnc, uint64_t, u64Val) 1764 { 1765 VMXEXITINSTRINFO ExitInstrInfo; 1766 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMWRITE, VMX_INSTR_ID_NONE, NULL /* pGCPtrDisp */); 1767 return iemVmxVmwrite(pVCpu, cbInstr, u32VmcsFieldEnc, u64Val, &ExitInstrInfo, 0 /* GCPtrDisp */); 1768 } 1769 1770 1771 /** 1772 * Implements 'VMWRITE' memory. 1773 */ 1774 IEM_CIMPL_DEF_2(iemCImpl_vmwrite_mem, uint32_t, u32VmcsFieldEnc, RTGCUINTPTR64, GCPtrVal) 1775 { 1776 RTGCPTR GCPtrDisp; 1777 VMXEXITINSTRINFO ExitInstrInfo; 1778 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMWRITE, VMX_INSTR_ID_NONE, &GCPtrDisp); 1779 return iemVmxVmwrite(pVCpu, cbInstr, u32VmcsFieldEnc, GCPtrVal, &ExitInstrInfo, GCPtrDisp); 1780 } 1781 1613 1782 #endif 1614 1783 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r73756 r73937 340 340 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor) 341 341 { 342 /** @todo r=ramshankar: We should use 343 * IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX and 344 * IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES here. */ 342 345 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); 343 346 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_xgetbv); … … 353 356 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor) 354 357 { 358 /** @todo r=ramshankar: We should use 359 * IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX and 360 * IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES here. */ 355 361 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); 356 362 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_xsetbv); … … 4280 4286 4281 4287 /** Opcode 0x0f 0x79 - VMWRITE Gy, Ey */ 4288 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 4289 FNIEMOP_DEF(iemOp_vmwrite_Gy_Ey) 4290 { 4291 IEMOP_MNEMONIC(vmwrite, "vmwrite Gy,Ey"); 4292 IEMOP_HLP_IN_VMX_OPERATION(); 4293 IEMOP_HLP_VMX_INSTR(); 4294 IEMMODE const enmEffOpSize = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? IEMMODE_64BIT : IEMMODE_32BIT; 4295 4296 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 4297 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 4298 { 4299 /* 4300 * Register, register. 4301 */ 4302 IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES(); 4303 if (enmEffOpSize == IEMMODE_64BIT) 4304 { 4305 IEM_MC_BEGIN(2, 0); 4306 IEM_MC_ARG(uint64_t, u64Enc, 0); 4307 IEM_MC_ARG(uint64_t, u64Val, 1); 4308 IEM_MC_FETCH_GREG_U64(u64Enc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4309 IEM_MC_FETCH_GREG_U64(u64Val, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 4310 IEM_MC_CALL_CIMPL_2(iemCImpl_vmwrite_reg, u64Enc, u64Val); 4311 IEM_MC_END(); 4312 } 4313 else 4314 { 4315 IEM_MC_BEGIN(2, 0); 4316 IEM_MC_ARG(uint32_t, u32Enc, 0); 4317 IEM_MC_ARG(uint32_t, u32Val, 1); 4318 IEM_MC_FETCH_GREG_U32(u32Enc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4319 IEM_MC_FETCH_GREG_U32(u32Val, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 4320 IEM_MC_CALL_CIMPL_2(iemCImpl_vmwrite_reg, u32Enc, u32Val); 4321 IEM_MC_END(); 4322 } 4323 } 4324 else 4325 { 4326 /* 4327 * Register, memory. 4328 */ 4329 if (enmEffOpSize == IEMMODE_64BIT) 4330 { 4331 IEM_MC_BEGIN(2, 0); 4332 IEM_MC_ARG(uint64_t, u64Enc, 0); 4333 IEM_MC_ARG(RTGCPTR, GCPtrVal, 1); 4334 IEM_MC_CALC_RM_EFF_ADDR(GCPtrVal, bRm, 0); 4335 IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES(); 4336 IEM_MC_FETCH_GREG_U64(u64Enc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4337 IEM_MC_CALL_CIMPL_2(iemCImpl_vmwrite_mem, u64Enc, GCPtrVal); 4338 IEM_MC_END(); 4339 } 4340 else 4341 { 4342 IEM_MC_BEGIN(2, 0); 4343 IEM_MC_ARG(uint32_t, u32Enc, 0); 4344 IEM_MC_ARG(RTGCPTR, GCPtrVal, 1); 4345 IEM_MC_CALC_RM_EFF_ADDR(GCPtrVal, bRm, 0); 4346 IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES(); 4347 IEM_MC_FETCH_GREG_U32(u32Enc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4348 IEM_MC_CALL_CIMPL_2(iemCImpl_vmwrite_mem, u32Enc, GCPtrVal); 4349 IEM_MC_END(); 4350 } 4351 } 4352 return VINF_SUCCESS; 4353 } 4354 #else 4282 4355 FNIEMOP_STUB(iemOp_vmwrite_Gy_Ey); 4356 #endif 4283 4357 /* Opcode 0x66 0x0f 0x79 - invalid */ 4284 4358 /* Opcode 0xf3 0x0f 0x79 - invalid */ … … 8430 8504 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 0); 8431 8505 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 8432 IEMOP_HLP_DONE_DECODING_NO_ LOCK_REPZ_OR_REPNZ_PREFIXES();8506 IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES(); 8433 8507 IEM_MC_CALL_CIMPL_1(iemCImpl_vmptrld, GCPtrEffSrc); 8434 8508 IEM_MC_END(); … … 8486 8560 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 0); 8487 8561 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 8488 IEMOP_HLP_DONE_DECODING_NO_ LOCK_REPZ_OR_REPNZ_PREFIXES();8562 IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES(); 8489 8563 IEM_MC_CALL_CIMPL_1(iemCImpl_vmptrst, GCPtrEffDst); 8490 8564 IEM_MC_END(); -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r73745 r73937 127 127 #define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() do { } while (0) 128 128 #define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() do { } while (0) 129 #define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() do { } while (0) 129 130 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 130 131 # define IEMOP_HLP_VMX_INSTR() do { } while (0) -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r73606 r73937 153 153 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR0); 154 154 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR3); 155 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR0); 156 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR3); 155 157 GEN_CHECK_OFF(CPUMCTX, hwvirt.fLocalForcedActions); 156 158 GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif);
Note:
See TracChangeset
for help on using the changeset viewer.