Changeset 87479 in vbox
- Timestamp:
- Jan 29, 2021 2:46:18 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 142501
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r87472 r87479 493 493 * StatTlbShootdownFlush or StatTlbShootdown. 494 494 */ 495 static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)495 static void hmPokeCpuForTlbFlush(PVMCPUCC pVCpu, bool fAccountFlushStat) 496 496 { 497 497 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)) … … 502 502 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 503 503 #ifdef IN_RING0 504 RTCPUID idHostCpu = pVCpu->hm .s.idEnteredCpu;504 RTCPUID idHostCpu = pVCpu->hmr0.s.idEnteredCpu; 505 505 if (idHostCpu != NIL_RTCPUID) 506 506 hmR0PokeCpu(pVCpu, idHostCpu); -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r82968 r87479 1198 1198 { 1199 1199 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu); 1200 pVCpu->hm .s.idEnteredCpu= NIL_RTCPUID;1201 pVCpu->hm .s.idLastCpu= NIL_RTCPUID;1200 pVCpu->hmr0.s.idEnteredCpu = NIL_RTCPUID; 1201 pVCpu->hmr0.s.idLastCpu = NIL_RTCPUID; 1202 1202 1203 1203 /* We'll aways increment this the first time (host uses ASID 0). */ 1204 AssertReturn(!pVCpu->hm .s.uCurrentAsid, VERR_HM_IPE_3);1204 AssertReturn(!pVCpu->hmr0.s.uCurrentAsid, VERR_HM_IPE_3); 1205 1205 } 1206 1206 … … 1347 1347 1348 1348 Assert(pHostCpu->idCpu == idCpu && pHostCpu->idCpu != NIL_RTCPUID); 1349 pVCpu->hm .s.idEnteredCpu = idCpu;1349 pVCpu->hmr0.s.idEnteredCpu = idCpu; 1350 1350 return rc; 1351 1351 } … … 1388 1388 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */ 1389 1389 rc = g_HmR0.pfnEnterSession(pVCpu); 1390 AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu), pVCpu->hm .s.idEnteredCpu = NIL_RTCPUID, rc);1390 AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu), pVCpu->hmr0.s.idEnteredCpu = NIL_RTCPUID, rc); 1391 1391 1392 1392 /* Exports the host-state as we may be resuming code after a longjmp and quite 1393 1393 possibly now be scheduled on a different CPU. */ 1394 1394 rc = g_HmR0.pfnExportHostState(pVCpu); 1395 AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu), pVCpu->hm .s.idEnteredCpu = NIL_RTCPUID, rc);1395 AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu), pVCpu->hmr0.s.idEnteredCpu = NIL_RTCPUID, rc); 1396 1396 1397 1397 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE … … 1430 1430 1431 1431 /* For obtaining a non-zero ASID/VPID on next re-entry. */ 1432 pVCpu->hm .s.idLastCpu = NIL_RTCPUID;1432 pVCpu->hmr0.s.idLastCpu = NIL_RTCPUID; 1433 1433 } 1434 1434 1435 1435 /* Clear it while leaving HM context, hmPokeCpuForTlbFlush() relies on this. */ 1436 pVCpu->hm .s.idEnteredCpu = NIL_RTCPUID;1436 pVCpu->hmr0.s.idEnteredCpu = NIL_RTCPUID; 1437 1437 1438 1438 /* De-register the longjmp-to-ring 3 callback now that we have reliquished hardware resources. */ -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r87466 r87479 130 130 * used. */ 131 131 #define HMSVM_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \ 132 || (a_pVCpu)->hm .s.idEnteredCpu == RTMpCpuId(), \132 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \ 133 133 ("Illegal migration! Entered on CPU %u Current %u\n", \ 134 (a_pVCpu)->hm .s.idEnteredCpu, RTMpCpuId()));134 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId())); 135 135 136 136 /** Assert that we're not executing a nested-guest. */ … … 1283 1283 bool fNewAsid = false; 1284 1284 Assert(pHostCpu->idCpu != NIL_RTCPUID); 1285 if ( pVCpu->hm .s.idLastCpu != pHostCpu->idCpu1285 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu 1286 1286 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes 1287 1287 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM … … 1313 1313 { 1314 1314 pHostCpu->uCurrentAsid = 1; 1315 pVCpu->hm .s.uCurrentAsid= 1;1315 pVCpu->hmr0.s.uCurrentAsid = 1; 1316 1316 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 1317 pVCpu->hm .s.idLastCpu= pHostCpu->idCpu;1317 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 1318 1318 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1319 1319 … … 1348 1348 } 1349 1349 1350 pVCpu->hm .s.uCurrentAsid = pHostCpu->uCurrentAsid;1351 pVCpu->hm .s.idLastCpu = pHostCpu->idCpu;1352 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;1350 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid; 1351 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 1352 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 1353 1353 } 1354 1354 else … … 1365 1365 1366 1366 /* Update VMCB with the ASID. */ 1367 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hm .s.uCurrentAsid)1368 { 1369 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm .s.uCurrentAsid;1367 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hmr0.s.uCurrentAsid) 1368 { 1369 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hmr0.s.uCurrentAsid; 1370 1370 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID; 1371 1371 } 1372 1372 1373 AssertMsg(pVCpu->hm .s.idLastCpu == pHostCpu->idCpu,1374 ("vcpu idLastCpu=%u hostcpu idCpu=%u\n", pVCpu->hm .s.idLastCpu, pHostCpu->idCpu));1373 AssertMsg(pVCpu->hmr0.s.idLastCpu == pHostCpu->idCpu, 1374 ("vcpu idLastCpu=%u hostcpu idCpu=%u\n", pVCpu->hmr0.s.idLastCpu, pHostCpu->idCpu)); 1375 1375 AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes, 1376 1376 ("Flush count mismatch for cpu %u (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes)); 1377 1377 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid, 1378 1378 ("cpu%d uCurrentAsid = %x\n", pHostCpu->idCpu, pHostCpu->uCurrentAsid)); 1379 AssertMsg(pVCpu->hm .s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,1380 ("cpu%d VM uCurrentAsid = %x\n", pHostCpu->idCpu, pVCpu->hm .s.uCurrentAsid));1379 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < pVM->hm.s.uMaxAsid, 1380 ("cpu%d VM uCurrentAsid = %x\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid)); 1381 1381 1382 1382 #ifdef VBOX_WITH_STATISTICS … … 4222 4222 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 4223 4223 RTCPUID const idHostCpu = pHostCpu->idCpu; 4224 bool const fMigratedHostCpu = idHostCpu != pVCpu->hm .s.idLastCpu;4224 bool const fMigratedHostCpu = idHostCpu != pVCpu->hmr0.s.idLastCpu; 4225 4225 4226 4226 /* Setup TSC offsetting. */ … … 4265 4265 /* Flush the appropriate tagged-TLB entries. */ 4266 4266 hmR0SvmFlushTaggedTlb(pHostCpu, pVCpu, pVmcb); 4267 Assert(pVCpu->hm .s.idLastCpu == idHostCpu);4267 Assert(pVCpu->hmr0.s.idLastCpu == idHostCpu); 4268 4268 4269 4269 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); … … 4554 4554 { 4555 4555 Assert(!HMR0SuspendPending()); 4556 AssertMsg(pVCpu->hm .s.idEnteredCpu == RTMpCpuId(),4557 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm .s.idEnteredCpu,4556 AssertMsg(pVCpu->hmr0.s.idEnteredCpu == RTMpCpuId(), 4557 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hmr0.s.idEnteredCpu, 4558 4558 (unsigned)RTMpCpuId(), *pcLoops)); 4559 4559 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r87472 r87479 162 162 * used. */ 163 163 #define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \ 164 || (a_pVCpu)->hm .s.idEnteredCpu == RTMpCpuId(), \164 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \ 165 165 ("Illegal migration! Entered on CPU %u Current %u\n", \ 166 (a_pVCpu)->hm .s.idEnteredCpu, RTMpCpuId()))166 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId())) 167 167 168 168 /** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU … … 2910 2910 { 2911 2911 AssertPtr(pVCpu); 2912 AssertMsg(pVCpu->hm .s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));2913 AssertMsg(pVCpu->hm .s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));2914 au64Descriptor[0] = pVCpu->hm .s.uCurrentAsid;2912 AssertMsg(pVCpu->hmr0.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hmr0.s.uCurrentAsid)); 2913 AssertMsg(pVCpu->hmr0.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hmr0.s.uCurrentAsid)); 2914 au64Descriptor[0] = pVCpu->hmr0.s.uCurrentAsid; 2915 2915 au64Descriptor[1] = GCPtr; 2916 2916 } … … 2918 2918 int rc = VMXR0InvVPID(enmTlbFlush, &au64Descriptor[0]); 2919 2919 AssertMsg(rc == VINF_SUCCESS, 2920 ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm .s.uCurrentAsid : 0, GCPtr, rc));2920 ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hmr0.s.uCurrentAsid : 0, GCPtr, rc)); 2921 2921 2922 2922 if ( RT_SUCCESS(rc) … … 2986 2986 2987 2987 Assert(pHostCpu->idCpu != NIL_RTCPUID); 2988 pVCpu->hm .s.idLastCpu= pHostCpu->idCpu;2988 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 2989 2989 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 2990 2990 pVCpu->hm.s.fForceTLBFlush = false; … … 3035 3035 * cannot reuse the current ASID anymore. 3036 3036 */ 3037 if ( pVCpu->hm .s.idLastCpu!= pHostCpu->idCpu3037 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu 3038 3038 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes) 3039 3039 { … … 3046 3046 } 3047 3047 3048 pVCpu->hm .s.uCurrentAsid = pHostCpu->uCurrentAsid;3049 pVCpu->hm .s.idLastCpu = pHostCpu->idCpu;3050 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;3048 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid; 3049 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 3050 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 3051 3051 3052 3052 /* … … 3092 3092 HMVMX_UPDATE_FLUSH_SKIPPED_STAT(); 3093 3093 3094 Assert(pVCpu->hm .s.idLastCpu == pHostCpu->idCpu);3094 Assert(pVCpu->hmr0.s.idLastCpu == pHostCpu->idCpu); 3095 3095 Assert(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes); 3096 3096 AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes, … … 3098 3098 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid, 3099 3099 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu, 3100 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm .s.idLastCpu, pVCpu->hm.s.cTlbFlushes));3101 AssertMsg(pVCpu->hm .s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,3102 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm .s.uCurrentAsid));3100 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hmr0.s.idLastCpu, pVCpu->hm.s.cTlbFlushes)); 3101 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < pVM->hm.s.uMaxAsid, 3102 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid)); 3103 3103 3104 3104 /* Update VMCS with the VPID. */ 3105 int rc = VMXWriteVmcs16(VMX_VMCS16_VPID, pVCpu->hm .s.uCurrentAsid);3105 int rc = VMXWriteVmcs16(VMX_VMCS16_VPID, pVCpu->hmr0.s.uCurrentAsid); 3106 3106 AssertRC(rc); 3107 3107 … … 3131 3131 * A change in the TLB flush count implies the host CPU is online after a suspend/resume. 3132 3132 */ 3133 if ( pVCpu->hm .s.idLastCpu != pHostCpu->idCpu3133 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu 3134 3134 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes) 3135 3135 { … … 3153 3153 } 3154 3154 3155 pVCpu->hm .s.idLastCpu= pHostCpu->idCpu;3155 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 3156 3156 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 3157 3157 … … 3186 3186 * cannot reuse the current ASID anymore. 3187 3187 */ 3188 if ( pVCpu->hm .s.idLastCpu!= pHostCpu->idCpu3188 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu 3189 3189 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes) 3190 3190 { … … 3215 3215 3216 3216 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 3217 pVCpu->hm .s.idLastCpu = pHostCpu->idCpu;3217 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu; 3218 3218 if (pVCpu->hm.s.fForceTLBFlush) 3219 3219 { … … 3228 3228 pVCpu->hm.s.fForceTLBFlush = false; 3229 3229 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 3230 pVCpu->hm .s.uCurrentAsid = pHostCpu->uCurrentAsid;3230 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid; 3231 3231 if (pHostCpu->fFlushAsidBeforeUse) 3232 3232 { … … 3250 3250 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid, 3251 3251 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu, 3252 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm .s.idLastCpu, pVCpu->hm.s.cTlbFlushes));3253 AssertMsg(pVCpu->hm .s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,3254 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm .s.uCurrentAsid));3255 3256 int rc = VMXWriteVmcs16(VMX_VMCS16_VPID, pVCpu->hm .s.uCurrentAsid);3252 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hmr0.s.idLastCpu, pVCpu->hm.s.cTlbFlushes)); 3253 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < pVM->hm.s.uMaxAsid, 3254 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid)); 3255 3256 int rc = VMXWriteVmcs16(VMX_VMCS16_VPID, pVCpu->hmr0.s.uCurrentAsid); 3257 3257 AssertRC(rc); 3258 3258 } … … 6915 6915 hmR0VmxReadExitQualVmcs(pVmxTransient); 6916 6916 6917 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm .s.idEnteredCpu;6917 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu; 6918 6918 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted(). 6919 6919 Cannot do it here as we may have been long preempted. */ … … 8494 8494 VMXGetCurrentVmcs(&pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs); 8495 8495 pVCpu->hm.s.vmx.LastError.u32VmcsRev = *(uint32_t *)pVmcsInfo->pvVmcs; 8496 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm .s.idEnteredCpu;8496 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu; 8497 8497 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */ 8498 8498 } … … 11019 11019 */ 11020 11020 if ( !pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer 11021 || idCurrentCpu != pVCpu->hm .s.idLastCpu)11021 || idCurrentCpu != pVCpu->hmr0.s.idLastCpu) 11022 11022 { 11023 11023 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient); … … 11034 11034 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */ 11035 11035 hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu, pVmcsInfo); /* Invalidate the appropriate guest entries from the TLB. */ 11036 Assert(idCurrentCpu == pVCpu->hm .s.idLastCpu);11036 Assert(idCurrentCpu == pVCpu->hmr0.s.idLastCpu); 11037 11037 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Record the error reporting info. with the current host CPU. */ 11038 11038 pVmcsInfo->idHostCpuState = idCurrentCpu; /* Record the CPU for which the host-state has been exported. */ … … 14354 14354 14355 14355 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 14356 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14357 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared; 14358 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVmcsInfoShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest, 14356 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14357 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest, 14359 14358 ("uVector=%#x u32XcptBitmap=%#X32\n", 14360 14359 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap)); -
trunk/src/VBox/VMM/include/HMInternal.h
r87472 r87479 1001 1001 /** World switch exit counter. */ 1002 1002 uint32_t volatile cWorldSwitchExits; 1003 /** The last CPU we were executing code on (NIL_RTCPUID for the first time). */1004 RTCPUID idLastCpu;1005 1003 /** TLB flush count. */ 1006 1004 uint32_t cTlbFlushes; 1007 /** Current ASID in use by the VM. */1008 uint32_t uCurrentAsid;1009 1005 /** An additional error code used for some gurus. */ 1010 1006 uint32_t u32HMError; … … 1104 1100 HMEVENT Event; 1105 1101 1106 /** The CPU ID of the CPU currently owning the VMCS. Set in1107 * HMR0Enter and cleared in HMR0Leave. */1108 RTCPUID idEnteredCpu;1109 1110 1102 /** Current shadow paging mode for updating CR4. */ 1111 1103 PGMMODE enmShadowMode; 1104 uint32_t u32TemporaryPadding; 1112 1105 1113 1106 /** The PAE PDPEs used with Nested Paging (only valid when … … 1283 1276 typedef struct HMR0PERVCPU 1284 1277 { 1278 /** Current ASID in use by the VM. */ 1279 uint32_t uCurrentAsid; 1280 /** The last CPU we were executing code on (NIL_RTCPUID for the first time). */ 1281 RTCPUID idLastCpu; 1282 /** The CPU ID of the CPU currently owning the VMCS. Set in 1283 * HMR0Enter and cleared in HMR0Leave. */ 1284 RTCPUID idEnteredCpu; 1285 1286 uint32_t u32Padding0; 1287 1285 1288 union HM_NAMELESS_UNION_TAG(HMR0CPUUNION) /* no tag! */ 1286 1289 { -
trunk/src/VBox/VMM/include/HMInternal.mac
r87472 r87479 176 176 177 177 .cWorldSwitchExits resd 1 178 .idLastCpu resd 1179 178 .cTlbFlushes resd 1 180 .uCurrentAsid resd 1181 179 .u32HMError resd 1 182 180 .rcLastExitToR3 resd 1 … … 198 196 .Event.GCPtrFaultAddress RTGCPTR_RES 1 199 197 200 .idEnteredCpu resd 1201 198 .enmShadowMode resd 1 202 199 alignb 8 … … 231 228 232 229 struc HMR0PERVCPU 230 .uCurrentAsid resd 1 231 .idLastCpu resd 1 232 .idEnteredCpu resd 1 233 233 234 alignb 8 234 235 ;%if HMR0CPUVMX_size > HMR0CPUSVM_size
Note:
See TracChangeset
for help on using the changeset viewer.