- Timestamp:
- Jun 6, 2013 2:54:23 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46415 r46419 247 247 * Internal Functions * 248 248 *******************************************************************************/ 249 static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush); 249 250 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr); 250 251 static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr, … … 944 945 945 946 /* 946 * Flush all VPIDs (in case we or any other hypervisor have been using VPIDs) so that947 * Flush all EPTP tagged-TLB entries (in case any other hypervisor have been using EPTPs) so that 947 948 * we can avoid an explicit flush while using new VPIDs. We would still need to flush 948 949 * each time while reusing a VPID after hitting the MaxASID limit once. 949 950 */ 950 951 if ( pVM 951 && pVM->hm.s.vmx.fVpid 952 && (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)) 953 { 954 hmR0VmxFlushVpid(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */); 952 && pVM->hm.s.fNestedPaging) 953 { 954 /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */ 955 Assert(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS); 956 hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS); 955 957 pCpu->fFlushAsidBeforeUse = false; 956 958 } 957 959 else 960 { 961 /** @todo This is still not perfect. If on host resume (pVM is NULL or a VM 962 * without NestedPaging triggered this function) we still have the risk 963 * of potentially running with stale TLB-entries from other hypervisors 964 * when later we use a VM with NestedPaging. To fix this properly we will 965 * have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read 966 * 'vmx_ept_vpid_caps' from it. Sigh. */ 958 967 pCpu->fFlushAsidBeforeUse = true; 968 } 959 969 960 970 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */ … … 1039 1049 * @returns VBox status code. 1040 1050 * @param pVM Pointer to the VM. 1041 * @param pVCpu Pointer to the VMCPU. 1051 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a 1052 * enmFlush). 1042 1053 * @param enmFlush Type of flush. 1043 1054 */ … … 1047 1058 Assert(pVM->hm.s.fNestedPaging); 1048 1059 1049 LogFlowFunc(("pVM=%p pVCpu=%p enmFlush=%d\n", pVM, pVCpu, enmFlush));1050 1051 1060 uint64_t descriptor[2]; 1052 descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP; 1061 if (enmFlush == VMX_FLUSH_EPT_ALL_CONTEXTS) 1062 descriptor[0] = 0; 1063 else 1064 { 1065 Assert(pVCpu); 1066 descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP; 1067 } 1053 1068 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */ 1054 1069 1055 1070 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]); 1056 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu->hm.s.vmx.HCPhysEPTP, rc)); 1057 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging); 1071 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0, 1072 rc)); 1073 if ( RT_SUCCESS(rc) 1074 && pVCpu) 1075 { 1076 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging); 1077 } 1058 1078 } 1059 1079
Note:
See TracChangeset
for help on using the changeset viewer.