Changeset 77295 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Feb 13, 2019 10:55:28 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 128813
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r77169 r77295 915 915 * @param puValue Where to store the CR3-target value. 916 916 */ 917 DECLINLINE(uint64_t)iemVmxVmcsGetCr3TargetValue(PCVMXVVMCS pVmcs, uint8_t idxCr3Target)917 IEM_STATIC uint64_t iemVmxVmcsGetCr3TargetValue(PCVMXVVMCS pVmcs, uint8_t idxCr3Target) 918 918 { 919 919 Assert(idxCr3Target < VMX_V_CR3_TARGET_COUNT); … … 927 927 uint8_t const *pbField = pbVmcs + offField; 928 928 uint64_t const uCr3TargetValue = *(uint64_t *)pbField; 929 930 929 return uCr3TargetValue; 931 930 } … … 2817 2816 /* Update the VM-exit reason, the other relevant data fields are expected to be updated by the caller already. */ 2818 2817 pVmcs->u32RoExitReason = uExitReason; 2819 Log3(("vmexit: uExitReason=%#RX32 uExitQual=%#RX64\n", uExitReason, pVmcs->u64RoExitQual)); 2818 Log3(("vmexit: uExitReason=%#RX32 uExitQual=%#RX64 cs:rip=%04x:%#RX64\n", uExitReason, pVmcs->u64RoExitQual, 2819 IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip)); 2820 2820 2821 2821 /* … … 3250 3250 ExitInfo.uReason = VMX_EXIT_MOV_CRX; 3251 3251 ExitInfo.cbInstr = cbInstr; 3252 3253 3252 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */ 3254 3253 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_CLTS); … … 3286 3285 Assert(puNewCrX); 3287 3286 Assert(iCrReg == 0 || iCrReg == 4); 3287 Assert(iGReg < X86_GREG_COUNT); 3288 3288 3289 3289 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); … … 3324 3324 ExitInfo.uReason = VMX_EXIT_MOV_CRX; 3325 3325 ExitInfo.cbInstr = cbInstr; 3326 3327 3326 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, iCrReg) 3328 3327 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE) … … 3355 3354 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3356 3355 Assert(pVmcs); 3356 Assert(iGReg < X86_GREG_COUNT); 3357 3357 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3); 3358 3358 … … 3369 3369 ExitInfo.uReason = VMX_EXIT_MOV_CRX; 3370 3370 ExitInfo.cbInstr = cbInstr; 3371 3372 3371 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */ 3373 3372 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ) … … 3394 3393 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3395 3394 Assert(pVmcs); 3395 Assert(iGReg < X86_GREG_COUNT); 3396 3396 3397 3397 /* … … 3403 3403 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT) 3404 3404 { 3405 uint32_t uCr3TargetCount = pVmcs->u32Cr3TargetCount;3405 uint32_t const uCr3TargetCount = pVmcs->u32Cr3TargetCount; 3406 3406 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT); 3407 3407 3408 for (uint32_t idxCr3Target = 0; idxCr3Target < uCr3TargetCount; idxCr3Target++) 3409 { 3410 uint64_t const uCr3TargetValue = iemVmxVmcsGetCr3TargetValue(pVmcs, idxCr3Target); 3411 if (uNewCr3 != uCr3TargetValue) 3408 /* If the CR3-target count is 0, we must always cause a VM-exit. */ 3409 bool fIntercept = RT_BOOL(uCr3TargetCount == 0); 3410 if (!fIntercept) 3411 { 3412 for (uint32_t idxCr3Target = 0; idxCr3Target < uCr3TargetCount; idxCr3Target++) 3412 3413 { 3413 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n")); 3414 3415 VMXVEXITINFO ExitInfo; 3416 RT_ZERO(ExitInfo); 3417 ExitInfo.uReason = VMX_EXIT_MOV_CRX; 3418 ExitInfo.cbInstr = cbInstr; 3419 3420 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */ 3421 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE) 3422 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg); 3423 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 3414 uint64_t const uCr3TargetValue = iemVmxVmcsGetCr3TargetValue(pVmcs, idxCr3Target); 3415 if (uNewCr3 != uCr3TargetValue) 3416 { 3417 fIntercept = true; 3418 break; 3419 } 3424 3420 } 3421 } 3422 3423 if (fIntercept) 3424 { 3425 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n")); 3426 3427 VMXVEXITINFO ExitInfo; 3428 RT_ZERO(ExitInfo); 3429 ExitInfo.uReason = VMX_EXIT_MOV_CRX; 3430 ExitInfo.cbInstr = cbInstr; 3431 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */ 3432 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE) 3433 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg); 3434 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 3425 3435 } 3426 3436 } … … 3442 3452 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3443 3453 Assert(pVmcs); 3454 Assert(iGReg < X86_GREG_COUNT); 3444 3455 3445 3456 /* … … 3455 3466 ExitInfo.uReason = VMX_EXIT_MOV_CRX; 3456 3467 ExitInfo.cbInstr = cbInstr; 3457 3458 3468 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */ 3459 3469 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ) … … 3479 3489 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3480 3490 Assert(pVmcs); 3491 Assert(iGReg < X86_GREG_COUNT); 3481 3492 3482 3493 /* … … 3492 3503 ExitInfo.uReason = VMX_EXIT_MOV_CRX; 3493 3504 ExitInfo.cbInstr = cbInstr; 3494 3495 3505 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */ 3496 3506 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE) … … 3521 3531 Assert(iDrReg <= 7); 3522 3532 Assert(uInstrId == VMXINSTRID_MOV_TO_DRX || uInstrId == VMXINSTRID_MOV_FROM_DRX); 3533 Assert(iGReg < X86_GREG_COUNT); 3523 3534 3524 3535 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); … … 6935 6946 */ 6936 6947 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 6948 6949 /* 6950 * If VM-entry is not vectoring, block-by-STI and block-by-MovSS state must be loaded. 6951 * If VM-entry is vectoring, there is no block-by-STI or block-by-MovSS. 6952 * 6953 * See Intel spec. 26.6.1 "Interruptibility State". 6954 */ 6937 6955 bool const fEntryVectoring = HMVmxIsVmentryVectoring(pVmcs->u32EntryIntInfo, NULL /* puEntryIntInfoType */); 6938 if (!fEntryVectoring) 6939 { 6940 if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)) 6941 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 6942 else 6943 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 6944 6945 /* SMI blocking is irrelevant. We don't support SMIs yet. */ 6946 } 6947 else 6948 { 6949 /* When the VM-entry is not vectoring, there is no blocking by STI or Mov-SS. */ 6950 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6951 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6952 } 6956 if ( !fEntryVectoring 6957 && (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))) 6958 EMSetInhibitInterruptsPC(pVCpu, pVmcs->u64GuestRip.u); 6959 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6960 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6953 6961 6954 6962 /* NMI blocking. */ … … 6956 6964 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6957 6965 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6966 6967 /* SMI blocking is irrelevant. We don't support SMIs yet. */ 6958 6968 6959 6969 /* Loading PDPTEs will be taken care when we switch modes. We don't support EPT yet. */ … … 7341 7351 } 7342 7352 7343 /** @todo Distinguish block-by-M OV-SS from block-by-STI. Currently we7353 /** @todo Distinguish block-by-MovSS from block-by-STI. Currently we 7344 7354 * use block-by-STI here which is not quite correct. */ 7345 7355 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
Note:
See TracChangeset
for help on using the changeset viewer.