- Timestamp:
- Jul 28, 2021 8:00:43 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 145961
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
r90348 r90379 249 249 250 250 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect); 251 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT); 251 252 /* ... not owned ... */ 252 253 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1)) … … 291 292 return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos); 292 293 293 #el se294 # if def IN_RING0294 #elif defined(IN_RING0) 295 # if 0 /* new code */ 295 296 /* 296 297 * In ring-0 context we have to take the special VT-x/AMD-V HM context into … … 304 305 * We must never block if VMMRZCallRing3Disable is active. 305 306 */ 306 307 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context 308 * and would be better off switching out of that while waiting for 309 * the lock. Several of the locks jumps back to ring-3 just to 310 * get the lock, the ring-3 code will then call the kernel to do 311 * the lock wait and when the call return it will call ring-0 312 * again and resume via in setjmp style. Not very efficient. */ 313 # if 0 314 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing 315 * callers not prepared for longjmp/blocking to 316 * use PDMCritSectTryEnter. */ 317 { 318 /* 319 * Leave HM context while waiting if necessary. 320 */ 321 int rc; 322 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 307 PVMCPUCC pVCpu = VMMGetCpu(pVM); 308 if (pVCpu) 309 { 310 VMMR0EMTBLOCKCTX Ctx; 311 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx); 312 if (rc == VINF_SUCCESS) 323 313 { 324 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000); 314 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 315 325 316 rc = pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos); 317 318 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx); 326 319 } 327 320 else 328 { 329 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000); 330 PVMCC pVM = pCritSect->s.CTX_SUFF(pVM); 331 PVMCPUCC pVCpu = VMMGetCpu(pVM); 332 HMR0Leave(pVM, pVCpu); 333 RTThreadPreemptRestore(NIL_RTTHREAD, XXX); 334 335 rc = pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos); 336 337 RTThreadPreemptDisable(NIL_RTTHREAD, XXX); 338 HMR0Enter(pVM, pVCpu); 339 } 321 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy); 340 322 return rc; 341 323 } 342 # else 324 325 /* Non-EMT. */ 326 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 327 return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos); 328 329 # else /* old code: */ 343 330 /* 344 331 * We preemption hasn't been disabled, we can block here in ring-0. … … 347 334 && ASMIntAreEnabled()) 348 335 return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos); 349 # endif350 # endif /* IN_RING0 */351 336 352 337 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock); … … 367 352 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy)); 368 353 return rcBusy; 369 #endif /* !IN_RING3 */ 354 # endif /* old code */ 355 #else 356 # error "Unsupported context" 357 #endif 370 358 } 371 359 … … 460 448 461 449 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect); 450 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT); 462 451 /* ... not owned ... */ 463 452 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1)) … … 485 474 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3); 486 475 #else 487 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock );476 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy); 488 477 #endif 489 478 LogFlow(("PDMCritSectTryEnter: locked\n")); … … 595 584 */ 596 585 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect); 597 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf ,586 AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf || hNativeSelf == NIL_RTNATIVETHREAD, 598 587 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName), 599 588 pCritSect->s.Core.NativeThreadOwner, hNativeSelf, -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r87792 r90379 907 907 rc = GMMR0InitPerVMData(pGVM); 908 908 int rc2 = PGMR0InitPerVMData(pGVM); 909 VMMR0InitPerVMData(pGVM); 909 910 DBGFR0InitPerVMData(pGVM); 910 911 PDMR0InitPerVMData(pGVM); -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r87606 r90379 1513 1513 /** 1514 1514 * Thread-context hook for HM. 1515 * 1516 * This is used together with RTThreadCtxHookCreate() on platforms which 1517 * supports it, and directly from VMMR0EmtPrepareForBlocking() and 1518 * VMMR0EmtResumeAfterBlocking() on platforms which don't. 1515 1519 * 1516 1520 * @param enmEvent The thread-context event. -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r90260 r90379 2298 2298 * Thread-context callback for AMD-V. 2299 2299 * 2300 * This is used together with RTThreadCtxHookCreate() on platforms which 2301 * supports it, and directly from VMMR0EmtPrepareForBlocking() and 2302 * VMMR0EmtResumeAfterBlocking() on platforms which don't. 2303 * 2300 2304 * @param enmEvent The thread-context event. 2301 2305 * @param pVCpu The cross context virtual CPU structure. … … 2312 2316 { 2313 2317 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2314 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));2315 2318 VMCPU_ASSERT_EMT(pVCpu); 2316 2319 … … 2337 2340 { 2338 2341 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2339 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));2340 2342 VMCPU_ASSERT_EMT(pVCpu); 2341 2343 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r90000 r90379 9351 9351 9352 9352 /** 9353 * The thread-context callback (only on platforms which support it). 9353 * The thread-context callback. 9354 * 9355 * This is used together with RTThreadCtxHookCreate() on platforms which 9356 * supports it, and directly from VMMR0EmtPrepareForBlocking() and 9357 * VMMR0EmtResumeAfterBlocking() on platforms which don't. 9354 9358 * 9355 9359 * @param enmEvent The thread-context event. … … 9368 9372 { 9369 9373 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 9370 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));9371 9374 VMCPU_ASSERT_EMT(pVCpu); 9372 9375 … … 9399 9402 { 9400 9403 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 9401 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));9402 9404 VMCPU_ASSERT_EMT(pVCpu); 9403 9405 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r90190 r90379 359 359 360 360 /** 361 * Initializes VMM specific members when the GVM structure is created. 362 * 363 * @param pGVM The global (ring-0) VM structure. 364 */ 365 VMMR0_INT_DECL(void) VMMR0InitPerVMData(PGVM pGVM) 366 { 367 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++) 368 { 369 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 370 pGVCpu->vmmr0.s.idHostCpu = NIL_RTCPUID; 371 pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX; 372 pGVCpu->vmmr0.s.fInHmContext = false; 373 pGVCpu->vmmr0.s.pPreemptState = NULL; 374 pGVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK; 375 } 376 } 377 378 379 /** 361 380 * Initiates the R0 driver for a particular VM instance. 362 381 * … … 954 973 * callback. 955 974 * 975 * This is used together with RTThreadCtxHookCreate() on platforms which 976 * supports it, and directly from VMMR0EmtPrepareForBlocking() and 977 * VMMR0EmtResumeAfterBlocking() on platforms which don't. 978 * 956 979 * @param enmEvent The thread-context event. 957 980 * @param pvUser Opaque pointer to the VMCPU. … … 983 1006 RTCPUID idHostCpu; 984 1007 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu); 985 pVCpu->iHostCpuSet = iHostCpuSet; 1008 pVCpu->vmmr0.s.iHostCpuSet = iHostCpuSet; 1009 ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, idHostCpu); 1010 pVCpu->iHostCpuSet = iHostCpuSet; 986 1011 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); 987 1012 … … 989 1014 rescheduled needs calculating, try force a return to ring-3. 990 1015 We unfortunately cannot do the measurements right here. */ 991 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 1016 if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 1017 { /* likely */ } 1018 else 992 1019 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 993 1020 … … 1009 1036 * have the same host CPU associated with it. 1010 1037 */ 1038 pVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX; 1039 ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID); 1011 1040 pVCpu->iHostCpuSet = UINT32_MAX; 1012 1041 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); … … 1036 1065 { 1037 1066 VMCPU_ASSERT_EMT(pVCpu); 1038 Assert(pVCpu->vmm .s.hCtxHook == NIL_RTTHREADCTXHOOK);1067 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK); 1039 1068 1040 1069 #if 1 /* To disable this stuff change to zero. */ 1041 int rc = RTThreadCtxHookCreate(&pVCpu->vmm .s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);1070 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu); 1042 1071 if (RT_SUCCESS(rc)) 1072 { 1073 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true; 1043 1074 return rc; 1075 } 1044 1076 #else 1045 1077 RT_NOREF(vmmR0ThreadCtxCallback); … … 1047 1079 #endif 1048 1080 1049 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK; 1081 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK; 1082 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false; 1050 1083 if (rc == VERR_NOT_SUPPORTED) 1051 1084 return VINF_SUCCESS; … … 1064 1097 VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu) 1065 1098 { 1066 int rc = RTThreadCtxHookDestroy(pVCpu->vmm .s.hCtxHook);1099 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook); 1067 1100 AssertRC(rc); 1068 pVCpu->vmm .s.hCtxHook = NIL_RTTHREADCTXHOOK;1101 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK; 1069 1102 } 1070 1103 … … 1101 1134 * Disable the context hook, if we got one. 1102 1135 */ 1103 if (pVCpu->vmm .s.hCtxHook != NIL_RTTHREADCTXHOOK)1136 if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1104 1137 { 1105 1138 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1106 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook); 1139 ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID); 1140 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook); 1107 1141 AssertRC(rc); 1108 1142 } … … 1118 1152 DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu) 1119 1153 { 1120 return RTThreadCtxHookIsEnabled(pVCpu->vmm .s.hCtxHook);1154 return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook); 1121 1155 } 1122 1156 … … 1413 1447 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1414 1448 RTThreadPreemptDisable(&PreemptState); 1449 pGVCpu->vmmr0.s.pPreemptState = &PreemptState; 1415 1450 1416 1451 /* … … 1423 1458 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 1424 1459 { 1460 pGVCpu->vmmr0.s.iHostCpuSet = iHostCpuSet; 1461 ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, idHostCpu); 1462 1425 1463 pGVCpu->iHostCpuSet = iHostCpuSet; 1426 1464 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu); … … 1448 1486 * Enable the context switching hook. 1449 1487 */ 1450 if (pGVCpu->vmm .s.hCtxHook != NIL_RTTHREADCTXHOOK)1488 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1451 1489 { 1452 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm .s.hCtxHook));1453 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm .s.hCtxHook); AssertRC(rc2);1490 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook)); 1491 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2); 1454 1492 } 1455 1493 … … 1460 1498 if (RT_SUCCESS(rc)) 1461 1499 { 1500 pGVCpu->vmmr0.s.fInHmContext = true; 1462 1501 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM); 1463 1502 … … 1469 1508 { 1470 1509 fPreemptRestored = true; 1510 pGVCpu->vmmr0.s.pPreemptState = NULL; 1471 1511 RTThreadPreemptRestore(&PreemptState); 1472 1512 } … … 1503 1543 #endif 1504 1544 1545 pGVCpu->vmmr0.s.fInHmContext = false; 1505 1546 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED); 1506 1547 } … … 1511 1552 * hook / restore preemption. 1512 1553 */ 1554 pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX; 1555 ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID); 1556 1513 1557 pGVCpu->iHostCpuSet = UINT32_MAX; 1514 1558 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); … … 1521 1565 * when we get here, but the IPRT API handles that. 1522 1566 */ 1523 if (pGVCpu->vmm .s.hCtxHook != NIL_RTTHREADCTXHOOK)1567 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1524 1568 { 1525 1569 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1526 RTThreadCtxHookDisable(pGVCpu->vmm .s.hCtxHook);1570 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook); 1527 1571 } 1528 1572 } … … 1540 1584 * preemption again before the RTThreadCtxHookDisable call. */ 1541 1585 if (!fPreemptRestored) 1586 { 1587 pGVCpu->vmmr0.s.pPreemptState = NULL; 1542 1588 RTThreadPreemptRestore(&PreemptState); 1589 } 1543 1590 1544 1591 pGVCpu->vmm.s.iLastGZRc = rc; … … 1570 1617 else 1571 1618 { 1619 pGVCpu->vmmr0.s.pPreemptState = NULL; 1620 pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX; 1621 ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID); 1572 1622 pGVCpu->iHostCpuSet = UINT32_MAX; 1573 1623 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); … … 2555 2605 { 2556 2606 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; 2607 } 2608 2609 2610 /** 2611 * Locking helper that deals with HM context and checks if the thread can block. 2612 * 2613 * @returns VINF_SUCCESS if we can block. Returns @a rcBusy or 2614 * VERR_VMM_CANNOT_BLOCK if not able to block. 2615 * @param pVCpu The cross context virtual CPU structure of the calling 2616 * thread. 2617 * @param rcBusy What to return in case of a blocking problem. Will IPE 2618 * if VINF_SUCCESS and we cannot block. 2619 * @param pszCaller The caller (for logging problems). 2620 * @param pvLock The lock address (for logging problems). 2621 * @param pCtx Where to return context info for the resume call. 2622 * @thread EMT(pVCpu) 2623 */ 2624 VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock, 2625 PVMMR0EMTBLOCKCTX pCtx) 2626 { 2627 const char *pszMsg; 2628 2629 /* 2630 * Check that we are allowed to block. 2631 */ 2632 if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu))) 2633 { 2634 /* 2635 * Are we in HM context and w/o a context hook? If so work the context hook. 2636 */ 2637 if (pVCpu->vmmr0.s.idHostCpu != NIL_RTCPUID) 2638 { 2639 Assert(pVCpu->vmmr0.s.iHostCpuSet != UINT32_MAX); 2640 Assert(pVCpu->vmmr0.s.fInHmContext); 2641 2642 if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK) 2643 { 2644 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu); 2645 if (pVCpu->vmmr0.s.pPreemptState) 2646 RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState); 2647 2648 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC; 2649 pCtx->fWasInHmContext = true; 2650 return VINF_SUCCESS; 2651 } 2652 } 2653 2654 if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState)) 2655 { 2656 /* 2657 * Not in HM context or we've got hooks, so just check that preemption 2658 * is enabled. 2659 */ 2660 if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD))) 2661 { 2662 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC; 2663 pCtx->fWasInHmContext = false; 2664 return VINF_SUCCESS; 2665 } 2666 pszMsg = "Preemption is disabled!"; 2667 } 2668 else 2669 pszMsg = "Preemption state w/o HM state!"; 2670 } 2671 else 2672 pszMsg = "Ring-3 calls are disabled!"; 2673 2674 static uint32_t volatile s_cWarnings = 0; 2675 if (++s_cWarnings < 50) 2676 SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy); 2677 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD; 2678 pCtx->fWasInHmContext = false; 2679 return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK; 2680 } 2681 2682 2683 /** 2684 * Counterpart to VMMR0EmtPrepareToBlock. 2685 * 2686 * @param pVCpu The cross context virtual CPU structure of the calling 2687 * thread. 2688 * @param pCtx The context structure used with VMMR0EmtPrepareToBlock. 2689 * @thread EMT(pVCpu) 2690 */ 2691 VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx) 2692 { 2693 AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC); 2694 if (pCtx->fWasInHmContext) 2695 { 2696 if (pVCpu->vmmr0.s.pPreemptState) 2697 RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState); 2698 2699 pCtx->fWasInHmContext = false; 2700 vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu); 2701 } 2702 pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD; 2557 2703 } 2558 2704 -
trunk/src/VBox/VMM/VMMR3/PDMCritSect.cpp
r90348 r90379 164 164 pCritSect->pszName = pszName; 165 165 166 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLock", pCritSect->pszName); 167 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZUnlock,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZUnlock", pCritSect->pszName); 168 STAMR3RegisterF(pVM, &pCritSect->StatContentionR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionR3", pCritSect->pszName); 166 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLock", pCritSect->pszName); 167 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLockBusy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLockBusy", pCritSect->pszName); 168 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZUnlock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZUnlock", pCritSect->pszName); 169 STAMR3RegisterF(pVM, &pCritSect->StatContentionR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionR3", pCritSect->pszName); 169 170 #ifdef VBOX_WITH_STATISTICS 170 STAMR3RegisterF(pVM, &pCritSect->StatLocked, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/Locked", pCritSect->pszName);171 STAMR3RegisterF(pVM, &pCritSect->StatLocked, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/Locked", pCritSect->pszName); 171 172 #endif 172 173 -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r90347 r90379 627 627 628 628 /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */ 629 if (pVM-> apCpusR3[0]->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)629 if (pVM->vmm.s.fIsUsingContextHooks) 630 630 LogRel(("VMM: Enabled thread-context hooks\n")); 631 631 else -
trunk/src/VBox/VMM/include/PDMInternal.h
r90348 r90379 438 438 /** R0/RC lock contention. */ 439 439 STAMCOUNTER StatContentionRZLock; 440 /** R0/RC lock contention, returning rcBusy or VERR_SEM_BUSY (try). */ 441 STAMCOUNTER StatContentionRZLockBusy; 440 442 /** R0/RC unlock contention. */ 441 443 STAMCOUNTER StatContentionRZUnlock; -
trunk/src/VBox/VMM/include/VMMInternal.h
r90189 r90379 267 267 * release logging purposes. */ 268 268 bool fIsPreemptPossible : 1; 269 /** Set if ring-0 uses context hooks. */ 270 bool fIsUsingContextHooks : 1; 269 271 270 272 bool afAlignment2[2]; /**< Alignment padding. */ … … 379 381 R0PTRTYPE(PVMMR0LOGGER) pR0RelLoggerR0; 380 382 381 /** Thread context switching hook (ring-0). */382 RTTHREADCTXHOOK hCtxHook;383 384 383 /** @name Rendezvous 385 384 * @{ */ … … 387 386 * attempts at recursive rendezvous. */ 388 387 bool volatile fInRendezvous; 389 bool afPadding1[ 10];388 bool afPadding1[2]; 390 389 /** @} */ 391 390 … … 461 460 typedef struct VMMR0PERVCPU 462 461 { 462 /** Which host CPU ID is this EMT running on. 463 * Only valid when in RC or HMR0 with scheduling disabled. */ 464 RTCPUID volatile idHostCpu; 465 /** The CPU set index corresponding to idHostCpu, UINT32_MAX if not valid. 466 * @remarks Best to make sure iHostCpuSet shares cache line with idHostCpu! */ 467 uint32_t volatile iHostCpuSet; 468 /** Set if we've entered HM context. */ 469 bool volatile fInHmContext; 470 471 bool afPadding[7]; 472 /** Pointer to the VMMR0EntryFast preemption state structure. 473 * This is used to temporarily restore preemption before blocking. */ 474 R0PTRTYPE(PRTTHREADPREEMPTSTATE) pPreemptState; 475 /** Thread context switching hook (ring-0). */ 476 RTTHREADCTXHOOK hCtxHook; 477 463 478 /** @name Arguments passed by VMMR0EntryEx via vmmR0CallRing3SetJmpEx. 464 479 * @note Cannot be put on the stack as the location may change and upset the 465 480 * validation of resume-after-ring-3-call logic. 466 481 * @{ */ 467 PGVM pGVM;468 VMCPUID idCpu;469 VMMR0OPERATION enmOperation;470 PSUPVMMR0REQHDR pReq;471 uint64_t u64Arg;472 PSUPDRVSESSION pSession;482 PGVM pGVM; 483 VMCPUID idCpu; 484 VMMR0OPERATION enmOperation; 485 PSUPVMMR0REQHDR pReq; 486 uint64_t u64Arg; 487 PSUPDRVSESSION pSession; 473 488 /** @} */ 474 489 } VMMR0PERVCPU; -
trunk/src/VBox/VMM/include/VMMInternal.mac
r90189 r90379 121 121 .pR0RelLoggerR0 RTR0PTR_RES 1 122 122 123 .hCtxHook RTR0PTR_RES 1124 125 123 .fInRendezvous resb 1 126 .afPadding1 resb 10124 .afPadding1 resb 2 127 125 .fMayHaltInRing0 resb 1 128 126 .cNsSpinBlockThreshold resd 1
Note:
See TracChangeset
for help on using the changeset viewer.