- Timestamp:
- May 28, 2019 5:26:38 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r78777 r78798 422 422 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst); 423 423 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, uint8_t cbInstr); 424 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg); 425 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg); 424 426 /** @} */ 425 427 … … 4951 4953 * The guest is not using paging, but the CPU (VT-x) has to. While the guest 4952 4954 * thinks it accesses physical memory directly, we use our identity-mapped 4953 * page 4955 * page table to map guest-linear to guest-physical addresses. EPT takes care 4954 4956 * of translating it to host-physical addresses. 4955 4957 */ … … 12837 12839 case VMX_EXIT_MOV_CRX: 12838 12840 { 12839 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12841 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12842 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12840 12843 AssertRCReturn(rc, rc); 12841 12844 … … 12855 12858 VMXVEXITINFO ExitInfo; 12856 12859 RT_ZERO(ExitInfo); 12857 ExitInfo.uReason = VMX_EXIT_MOV_CRX;12860 ExitInfo.uReason = uExitReason; 12858 12861 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12859 12862 ExitInfo.u64Qual = pVmxTransient->uExitQual; … … 12861 12864 } 12862 12865 else 12863 rcStrict = hmR0VmxExitMov CRx(pVCpu, pVmxTransient);12866 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmxTransient->cbInstr, iGReg, iCrReg); 12864 12867 break; 12865 12868 } … … 12867 12870 case VMX_EXIT_QUAL_CRX_ACCESS_READ: 12868 12871 { 12869 /** @todo NSTVMX: Implement me. */ 12872 /* 12873 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking). 12874 * CR2 reads do not cause a VM-exit. 12875 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control. 12876 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control. 12877 */ 12878 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual); 12879 if ( iCrReg == 3 12880 || iCrReg == 8) 12881 { 12882 static const uint32_t s_aCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0, 12883 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT }; 12884 uint32_t const uIntercept = s_aCrXReadIntercepts[iCrReg]; 12885 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, uIntercept)) 12886 { 12887 VMXVEXITINFO ExitInfo; 12888 RT_ZERO(ExitInfo); 12889 ExitInfo.uReason = uExitReason; 12890 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12891 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12892 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 12893 } 12894 else 12895 { 12896 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual); 12897 rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmxTransient->cbInstr, iGReg, iCrReg); 12898 } 12899 } 12900 else 12901 { 12902 pVCpu->hm.s.u32HMError = iCrReg; 12903 AssertMsgFailedReturn(("MOV from CR%d VM-exit must not happen\n", iCrReg), VERR_VMX_UNEXPECTED_EXIT); 12904 } 12870 12905 break; 12871 12906 } … … 12873 12908 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS: 12874 12909 { 12875 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);12876 AssertRCReturn(rc, rc);12877 12878 12910 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 12879 12911 Assert(pVmcsNstGst); … … 12885 12917 VMXVEXITINFO ExitInfo; 12886 12918 RT_ZERO(ExitInfo); 12887 ExitInfo.uReason = VMX_EXIT_MOV_CRX;12919 ExitInfo.uReason = uExitReason; 12888 12920 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12889 12921 ExitInfo.u64Qual = pVmxTransient->uExitQual; … … 12900 12932 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual); 12901 12933 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual); 12902 12903 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);12904 12934 if (fMemOperand) 12905 12935 { 12906 rc |= hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient); 12936 rc = hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient); 12937 AssertRCReturn(rc, rc); 12907 12938 GCPtrEffDst = pVmxTransient->uGuestLinearAddr; 12908 12939 } 12909 12940 else 12910 12941 GCPtrEffDst = NIL_RTGCPTR; 12911 AssertRCReturn(rc, rc);12912 12942 12913 12943 if (CPUMIsGuestVmxLmswInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, uNewMsw)) … … 12915 12945 VMXVEXITINFO ExitInfo; 12916 12946 RT_ZERO(ExitInfo); 12917 ExitInfo.uReason = VMX_EXIT_MOV_CRX;12947 ExitInfo.uReason = uExitReason; 12918 12948 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12919 12949 ExitInfo.u64GuestLinearAddr = GCPtrEffDst; … … 12930 12960 pVCpu->hm.s.u32HMError = uAccessType; 12931 12961 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType), 12932 VERR_VMX_UNEXPECTED_EX CEPTION);12962 VERR_VMX_UNEXPECTED_EXIT); 12933 12963 } 12934 12964 } … … 13070 13100 rcStrict = hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient); 13071 13101 break; 13102 } 13103 13104 if (rcStrict == VINF_IEM_RAISED_XCPT) 13105 { 13106 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13107 rcStrict = VINF_SUCCESS; 13072 13108 } 13073 13109 … … 14404 14440 VBOXSTRICTRC rcStrict; 14405 14441 PVM pVM = pVCpu->CTX_SUFF(pVM); 14406 RTGCUINTPTR const uExitQual= pVmxTransient->uExitQual;14407 uint32_t const uAccessType 14442 uint64_t const uExitQual = pVmxTransient->uExitQual; 14443 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual); 14408 14444 switch (uAccessType) 14409 14445 { … … 14411 14447 { 14412 14448 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0; 14413 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_REGISTER(uExitQual), 14414 VMX_EXIT_QUAL_CRX_GENREG(uExitQual)); 14449 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual); 14450 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual); 14451 14452 /* 14453 * MOV to CR3 only cause a VM-exit when one or more of the following are true: 14454 * - When nested paging isn't used. 14455 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables). 14456 * - We are executing in the VM debug loop. 14457 */ 14458 Assert( iCrReg != 3 14459 || !pVM->hm.s.fNestedPaging 14460 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx) 14461 || pVCpu->hm.s.fUsingDebugLoop); 14462 14463 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */ 14464 Assert( iCrReg != 8 14465 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 14466 14467 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmxTransient->cbInstr, iGReg, iCrReg); 14415 14468 AssertMsg( rcStrict == VINF_SUCCESS 14416 14469 || rcStrict == VINF_IEM_RAISED_XCPT 14417 14470 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 14418 14471 14419 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)) 14472 /* 14473 * This is a kludge for handling switches back to real mode when we try to use 14474 * V86 mode to run real mode code directly. Problem is that V86 mode cannot 14475 * deal with special selector values, so we have to return to ring-3 and run 14476 * there till the selector values are V86 mode compatible. 14477 * 14478 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the 14479 * latter is an alias for VINF_IEM_RAISED_XCPT which is converted to VINF_SUCCESs 14480 * at the end of this function. 14481 */ 14482 if ( iCrReg == 0 14483 && rcStrict == VINF_SUCCESS 14484 && !pVM->hm.s.vmx.fUnrestrictedGuest 14485 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx) 14486 && (uOldCr0 & X86_CR0_PE) 14487 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)) 14420 14488 { 14421 case 0: 14422 { 14423 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 14424 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 14425 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write); 14426 Log4Func(("CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0)); 14427 14428 /* 14429 * This is a kludge for handling switches back to real mode when we try to use 14430 * V86 mode to run real mode code directly. Problem is that V86 mode cannot 14431 * deal with special selector values, so we have to return to ring-3 and run 14432 * there till the selector values are V86 mode compatible. 14433 * 14434 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the 14435 * latter is an alias for VINF_IEM_RAISED_XCPT which is converted to VINF_SUCCESs 14436 * at the end of this function. 14437 */ 14438 if ( rc == VINF_SUCCESS 14439 && !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest 14440 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx) 14441 && (uOldCr0 & X86_CR0_PE) 14442 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) ) 14443 { 14444 /** @todo check selectors rather than returning all the time. */ 14445 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n")); 14446 rcStrict = VINF_EM_RESCHEDULE_REM; 14447 } 14448 break; 14449 } 14450 14451 case 2: 14452 { 14453 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write); 14454 /* Nothing to do here, CR2 it's not part of the VMCS. */ 14455 break; 14456 } 14457 14458 case 3: 14459 { 14460 Assert( !pVM->hm.s.fNestedPaging 14461 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx) 14462 || pVCpu->hm.s.fUsingDebugLoop); 14463 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write); 14464 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 14465 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3); 14466 Log4Func(("CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3)); 14467 break; 14468 } 14469 14470 case 4: 14471 { 14472 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write); 14473 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 14474 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4); 14475 Log4Func(("CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), 14476 pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0)); 14477 break; 14478 } 14479 14480 case 8: 14481 { 14482 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write); 14483 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 14484 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 14485 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR); 14486 break; 14487 } 14488 default: 14489 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))); 14490 break; 14489 /** @todo Check selectors rather than returning all the time. */ 14490 Assert(!pVmxTransient->fIsNestedGuest); 14491 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n")); 14492 rcStrict = VINF_EM_RESCHEDULE_REM; 14491 14493 } 14492 14494 break; … … 14495 14497 case VMX_EXIT_QUAL_CRX_ACCESS_READ: /* MOV from CRx */ 14496 14498 { 14497 Assert( !pVM->hm.s.fNestedPaging 14499 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual); 14500 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual); 14501 14502 /* 14503 * MOV from CR3 only cause a VM-exit when one or more of the following are true: 14504 * - When nested paging isn't used. 14505 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3). 14506 * - We are executing in the VM debug loop. 14507 */ 14508 Assert( iCrReg != 3 14509 || !pVM->hm.s.fNestedPaging 14498 14510 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx) 14499 || pVCpu->hm.s.fUsingDebugLoop 14500 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 3); 14501 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */14502 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)!= 814511 || pVCpu->hm.s.fUsingDebugLoop); 14512 14513 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */ 14514 Assert( iCrReg != 8 14503 14515 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 14504 14516 14505 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual), 14506 VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)); 14507 AssertMsg( rcStrict == VINF_SUCCESS 14508 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 14509 #ifdef VBOX_WITH_STATISTICS 14510 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)) 14511 { 14512 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break; 14513 case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break; 14514 case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break; 14515 case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break; 14516 case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break; 14517 } 14518 #endif 14519 Log4Func(("CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual), 14520 VBOXSTRICTRC_VAL(rcStrict))); 14521 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQual) == X86_GREG_xSP) 14522 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP); 14523 else 14524 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 14517 rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmxTransient->cbInstr, iGReg, iCrReg); 14525 14518 break; 14526 14519 } … … 14553 14546 else 14554 14547 GCPtrEffDst = NIL_RTGCPTR; 14555 14556 14548 rcStrict = hmR0VmxExitLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst); 14557 14549 break; … … 15660 15652 15661 15653 /** 15662 * VM-exit exception handler for LMSW.15654 * VM-exit helper for LMSW. 15663 15655 */ 15664 15656 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst) … … 15671 15663 15672 15664 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 15673 if (rcStrict == VINF_IEM_RAISED_XCPT)15674 {15675 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);15676 rcStrict = VINF_SUCCESS;15677 }15678 15665 return rcStrict; 15679 15666 } … … 15681 15668 15682 15669 /** 15683 * VM-exit exception handler for CLTS.15670 * VM-exit helper for CLTS. 15684 15671 */ 15685 15672 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, uint8_t cbInstr) … … 15688 15675 AssertMsg( rcStrict == VINF_SUCCESS 15689 15676 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 15677 15690 15678 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 15679 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 15691 15680 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 15692 15693 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);15694 if (rcStrict == VINF_IEM_RAISED_XCPT)15695 {15696 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);15697 rcStrict = VINF_SUCCESS;15698 }15699 15681 return rcStrict; 15700 15682 } 15683 15684 15685 /** 15686 * VM-exit helper for MOV from CRx (CRx read). 15687 */ 15688 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg) 15689 { 15690 Assert(iCrReg < 16); 15691 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs)); 15692 15693 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg); 15694 AssertMsg( rcStrict == VINF_SUCCESS 15695 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 15696 15697 if (iGReg == X86_GREG_xSP) 15698 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP); 15699 else 15700 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 15701 15702 #ifdef VBOX_WITH_STATISTICS 15703 switch (iCrReg) 15704 { 15705 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break; 15706 case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break; 15707 case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break; 15708 case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break; 15709 case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break; 15710 } 15711 #endif 15712 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict))); 15713 return rcStrict; 15714 } 15715 15716 15717 /** 15718 * VM-exit helper for MOV to CRx (CRx write). 15719 */ 15720 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg) 15721 { 15722 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg); 15723 AssertMsg( rcStrict == VINF_SUCCESS 15724 || rcStrict == VINF_IEM_RAISED_XCPT 15725 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 15726 15727 switch (iCrReg) 15728 { 15729 case 0: 15730 { 15731 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 15732 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write); 15733 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0)); 15734 break; 15735 } 15736 15737 case 2: 15738 { 15739 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write); 15740 /* Nothing to do here, CR2 it's not part of the VMCS. */ 15741 break; 15742 } 15743 15744 case 3: 15745 { 15746 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3); 15747 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write); 15748 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3)); 15749 break; 15750 } 15751 15752 case 4: 15753 { 15754 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4); 15755 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write); 15756 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), 15757 pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0)); 15758 break; 15759 } 15760 15761 case 8: 15762 { 15763 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 15764 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR); 15765 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write); 15766 break; 15767 } 15768 15769 default: 15770 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg)); 15771 break; 15772 } 15773 15774 return rcStrict; 15775 } 15776 15701 15777 15702 15778 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
Note:
See TracChangeset
for help on using the changeset viewer.