VirtualBox

Changeset 90379 in vbox for trunk/src


Ignore:
Timestamp:
Jul 28, 2021 8:00:43 PM (4 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
145961
Message:

VMM: Implementing blocking on critical sections in ring-0 HM context (actual code is disabled). bugref:6695

Location:
trunk/src/VBox/VMM
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r90348 r90379  
    249249
    250250    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
     251    AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
    251252    /* ... not owned ... */
    252253    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
     
    291292    return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
    292293
    293 #else
    294 # ifdef IN_RING0
     294#elif defined(IN_RING0)
     295# if 0 /* new code */
    295296    /*
    296297     * In ring-0 context we have to take the special VT-x/AMD-V HM context into
     
    304305     * We must never block if VMMRZCallRing3Disable is active.
    305306     */
    306 
    307     /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
    308      *        and would be better off switching out of that while waiting for
    309      *        the lock.  Several of the locks jumps back to ring-3 just to
    310      *        get the lock, the ring-3 code will then call the kernel to do
    311      *        the lock wait and when the call return it will call ring-0
    312      *        again and resume via in setjmp style.  Not very efficient. */
    313 #  if 0
    314     if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
    315                              * callers not prepared for longjmp/blocking to
    316                              * use PDMCritSectTryEnter. */
    317     {
    318         /*
    319          * Leave HM context while waiting if necessary.
    320          */
    321         int rc;
    322         if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
     307    PVMCPUCC pVCpu = VMMGetCpu(pVM);
     308    if (pVCpu)
     309    {
     310        VMMR0EMTBLOCKCTX Ctx;
     311        int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
     312        if (rc == VINF_SUCCESS)
    323313        {
    324             STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock,    1000000);
     314            Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     315
    325316            rc = pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
     317
     318            VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
    326319        }
    327320        else
    328         {
    329             STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
    330             PVMCC     pVM   = pCritSect->s.CTX_SUFF(pVM);
    331             PVMCPUCC  pVCpu = VMMGetCpu(pVM);
    332             HMR0Leave(pVM, pVCpu);
    333             RTThreadPreemptRestore(NIL_RTTHREAD, XXX);
    334 
    335             rc = pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
    336 
    337             RTThreadPreemptDisable(NIL_RTTHREAD, XXX);
    338             HMR0Enter(pVM, pVCpu);
    339         }
     321            STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
    340322        return rc;
    341323    }
    342 #  else
     324
     325    /* Non-EMT. */
     326    Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     327    return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
     328
     329# else /* old code: */
    343330    /*
    344331     * We preemption hasn't been disabled, we can block here in ring-0.
     
    347334        && ASMIntAreEnabled())
    348335        return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos);
    349 #  endif
    350 # endif /* IN_RING0 */
    351336
    352337    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
     
    367352    LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
    368353    return rcBusy;
    369 #endif /* !IN_RING3 */
     354# endif  /* old code */
     355#else
     356# error "Unsupported context"
     357#endif
    370358}
    371359
     
    460448
    461449    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
     450    AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
    462451    /* ... not owned ... */
    463452    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
     
    485474    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
    486475#else
    487     STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
     476    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
    488477#endif
    489478    LogFlow(("PDMCritSectTryEnter: locked\n"));
     
    595584     */
    596585    RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
    597     AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
     586    AssertReleaseMsgReturn(pCritSect->s.Core.NativeThreadOwner == hNativeSelf || hNativeSelf == NIL_RTNATIVETHREAD,
    598587                           ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
    599588                            pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
  • trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp

    r87792 r90379  
    907907                        rc = GMMR0InitPerVMData(pGVM);
    908908                        int rc2 = PGMR0InitPerVMData(pGVM);
     909                        VMMR0InitPerVMData(pGVM);
    909910                        DBGFR0InitPerVMData(pGVM);
    910911                        PDMR0InitPerVMData(pGVM);
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r87606 r90379  
    15131513/**
    15141514 * Thread-context hook for HM.
     1515 *
     1516 * This is used together with RTThreadCtxHookCreate() on platforms which
     1517 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
     1518 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
    15151519 *
    15161520 * @param   enmEvent        The thread-context event.
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r90260 r90379  
    22982298 * Thread-context callback for AMD-V.
    22992299 *
     2300 * This is used together with RTThreadCtxHookCreate() on platforms which
     2301 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
     2302 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
     2303 *
    23002304 * @param   enmEvent        The thread-context event.
    23012305 * @param   pVCpu           The cross context virtual CPU structure.
     
    23122316        {
    23132317            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2314             Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
    23152318            VMCPU_ASSERT_EMT(pVCpu);
    23162319
     
    23372340        {
    23382341            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2339             Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
    23402342            VMCPU_ASSERT_EMT(pVCpu);
    23412343
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r90000 r90379  
    93519351
    93529352/**
    9353  * The thread-context callback (only on platforms which support it).
     9353 * The thread-context callback.
     9354 *
     9355 * This is used together with RTThreadCtxHookCreate() on platforms which
     9356 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
     9357 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
    93549358 *
    93559359 * @param   enmEvent        The thread-context event.
     
    93689372        {
    93699373            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    9370             Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
    93719374            VMCPU_ASSERT_EMT(pVCpu);
    93729375
     
    93999402        {
    94009403            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    9401             Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
    94029404            VMCPU_ASSERT_EMT(pVCpu);
    94039405
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r90190 r90379  
    359359
    360360/**
     361 * Initializes VMM specific members when the GVM structure is created.
     362 *
     363 * @param   pGVM        The global (ring-0) VM structure.
     364 */
     365VMMR0_INT_DECL(void) VMMR0InitPerVMData(PGVM pGVM)
     366{
     367    for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
     368    {
     369        PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
     370        pGVCpu->vmmr0.s.idHostCpu       = NIL_RTCPUID;
     371        pGVCpu->vmmr0.s.iHostCpuSet     = UINT32_MAX;
     372        pGVCpu->vmmr0.s.fInHmContext    = false;
     373        pGVCpu->vmmr0.s.pPreemptState   = NULL;
     374        pGVCpu->vmmr0.s.hCtxHook        = NIL_RTTHREADCTXHOOK;
     375    }
     376}
     377
     378
     379/**
    361380 * Initiates the R0 driver for a particular VM instance.
    362381 *
     
    954973 * callback.
    955974 *
     975 * This is used together with RTThreadCtxHookCreate() on platforms which
     976 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
     977 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
     978 *
    956979 * @param   enmEvent    The thread-context event.
    957980 * @param   pvUser      Opaque pointer to the VMCPU.
     
    9831006            RTCPUID idHostCpu;
    9841007            uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
    985             pVCpu->iHostCpuSet   = iHostCpuSet;
     1008            pVCpu->vmmr0.s.iHostCpuSet = iHostCpuSet;
     1009            ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, idHostCpu);
     1010            pVCpu->iHostCpuSet = iHostCpuSet;
    9861011            ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
    9871012
     
    9891014               rescheduled needs calculating, try force a return to ring-3.
    9901015               We unfortunately cannot do the measurements right here. */
    991             if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
     1016            if (RT_LIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
     1017            { /* likely */ }
     1018            else
    9921019                VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    9931020
     
    10091036             * have the same host CPU associated with it.
    10101037             */
     1038            pVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;
     1039            ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);
    10111040            pVCpu->iHostCpuSet = UINT32_MAX;
    10121041            ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
     
    10361065{
    10371066    VMCPU_ASSERT_EMT(pVCpu);
    1038     Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
     1067    Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK);
    10391068
    10401069#if 1 /* To disable this stuff change to zero. */
    1041     int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
     1070    int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
    10421071    if (RT_SUCCESS(rc))
     1072    {
     1073        pVCpu->pGVM->vmm.s.fIsUsingContextHooks = true;
    10431074        return rc;
     1075    }
    10441076#else
    10451077    RT_NOREF(vmmR0ThreadCtxCallback);
     
    10471079#endif
    10481080
    1049     pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
     1081    pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
     1082    pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false;
    10501083    if (rc == VERR_NOT_SUPPORTED)
    10511084        return VINF_SUCCESS;
     
    10641097VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
    10651098{
    1066     int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
     1099    int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook);
    10671100    AssertRC(rc);
    1068     pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
     1101    pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK;
    10691102}
    10701103
     
    11011134     * Disable the context hook, if we got one.
    11021135     */
    1103     if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
     1136    if (pVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
    11041137    {
    11051138        Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1106         int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
     1139        ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);
     1140        int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook);
    11071141        AssertRC(rc);
    11081142    }
     
    11181152DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
    11191153{
    1120     return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
     1154    return RTThreadCtxHookIsEnabled(pVCpu->vmmr0.s.hCtxHook);
    11211155}
    11221156
     
    14131447                RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    14141448                RTThreadPreemptDisable(&PreemptState);
     1449                pGVCpu->vmmr0.s.pPreemptState = &PreemptState;
    14151450
    14161451                /*
     
    14231458                              && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
    14241459                {
     1460                    pGVCpu->vmmr0.s.iHostCpuSet = iHostCpuSet;
     1461                    ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, idHostCpu);
     1462
    14251463                    pGVCpu->iHostCpuSet = iHostCpuSet;
    14261464                    ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
     
    14481486                         * Enable the context switching hook.
    14491487                         */
    1450                         if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
     1488                        if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
    14511489                        {
    1452                             Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
    1453                             int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
     1490                            Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmmr0.s.hCtxHook));
     1491                            int rc2 = RTThreadCtxHookEnable(pGVCpu->vmmr0.s.hCtxHook); AssertRC(rc2);
    14541492                        }
    14551493
     
    14601498                        if (RT_SUCCESS(rc))
    14611499                        {
     1500                            pGVCpu->vmmr0.s.fInHmContext = true;
    14621501                            VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
    14631502
     
    14691508                            {
    14701509                                fPreemptRestored = true;
     1510                                pGVCpu->vmmr0.s.pPreemptState = NULL;
    14711511                                RTThreadPreemptRestore(&PreemptState);
    14721512                            }
     
    15031543#endif
    15041544
     1545                            pGVCpu->vmmr0.s.fInHmContext = false;
    15051546                            VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
    15061547                        }
     
    15111552                         * hook / restore preemption.
    15121553                         */
     1554                        pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;
     1555                        ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);
     1556
    15131557                        pGVCpu->iHostCpuSet = UINT32_MAX;
    15141558                        ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
     
    15211565                         *       when we get here, but the IPRT API handles that.
    15221566                         */
    1523                         if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
     1567                        if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK)
    15241568                        {
    15251569                            ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
    1526                             RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
     1570                            RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook);
    15271571                        }
    15281572                    }
     
    15401584                     *        preemption again before the RTThreadCtxHookDisable call. */
    15411585                    if (!fPreemptRestored)
     1586                    {
     1587                        pGVCpu->vmmr0.s.pPreemptState = NULL;
    15421588                        RTThreadPreemptRestore(&PreemptState);
     1589                    }
    15431590
    15441591                    pGVCpu->vmm.s.iLastGZRc = rc;
     
    15701617                else
    15711618                {
     1619                    pGVCpu->vmmr0.s.pPreemptState = NULL;
     1620                    pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;
     1621                    ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);
    15721622                    pGVCpu->iHostCpuSet = UINT32_MAX;
    15731623                    ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
     
    25552605{
    25562606    return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
     2607}
     2608
     2609
     2610/**
     2611 * Locking helper that deals with HM context and checks if the thread can block.
     2612 *
     2613 * @returns VINF_SUCCESS if we can block.  Returns @a rcBusy or
     2614 *          VERR_VMM_CANNOT_BLOCK if not able to block.
     2615 * @param   pVCpu       The cross context virtual CPU structure of the calling
     2616 *                      thread.
     2617 * @param   rcBusy      What to return in case of a blocking problem.  Will IPE
     2618 *                      if VINF_SUCCESS and we cannot block.
     2619 * @param   pszCaller   The caller (for logging problems).
     2620 * @param   pvLock      The lock address (for logging problems).
     2621 * @param   pCtx        Where to return context info for the resume call.
     2622 * @thread  EMT(pVCpu)
     2623 */
     2624VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
     2625                                           PVMMR0EMTBLOCKCTX pCtx)
     2626{
     2627    const char *pszMsg;
     2628
     2629    /*
     2630     * Check that we are allowed to block.
     2631     */
     2632    if (RT_LIKELY(VMMRZCallRing3IsEnabled(pVCpu)))
     2633    {
     2634        /*
     2635         * Are we in HM context and w/o a context hook?  If so work the context hook.
     2636         */
     2637        if (pVCpu->vmmr0.s.idHostCpu != NIL_RTCPUID)
     2638        {
     2639            Assert(pVCpu->vmmr0.s.iHostCpuSet != UINT32_MAX);
     2640            Assert(pVCpu->vmmr0.s.fInHmContext);
     2641
     2642            if (pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK)
     2643            {
     2644                vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_OUT, pVCpu);
     2645                if (pVCpu->vmmr0.s.pPreemptState)
     2646                    RTThreadPreemptRestore(pVCpu->vmmr0.s.pPreemptState);
     2647
     2648                pCtx->uMagic          = VMMR0EMTBLOCKCTX_MAGIC;
     2649                pCtx->fWasInHmContext = true;
     2650                return VINF_SUCCESS;
     2651            }
     2652        }
     2653
     2654        if (RT_LIKELY(!pVCpu->vmmr0.s.pPreemptState))
     2655        {
     2656            /*
     2657             * Not in HM context or we've got hooks, so just check that preemption
     2658             * is enabled.
     2659             */
     2660            if (RT_LIKELY(RTThreadPreemptIsEnabled(NIL_RTTHREAD)))
     2661            {
     2662                pCtx->uMagic          = VMMR0EMTBLOCKCTX_MAGIC;
     2663                pCtx->fWasInHmContext = false;
     2664                return VINF_SUCCESS;
     2665            }
     2666            pszMsg = "Preemption is disabled!";
     2667        }
     2668        else
     2669            pszMsg = "Preemption state w/o HM state!";
     2670    }
     2671    else
     2672        pszMsg = "Ring-3 calls are disabled!";
     2673
     2674    static uint32_t volatile s_cWarnings = 0;
     2675    if (++s_cWarnings < 50)
     2676        SUPR0Printf("VMMR0EmtPrepareToBlock: %s pvLock=%p pszCaller=%s rcBusy=%p\n", pszMsg, pvLock, pszCaller, rcBusy);
     2677    pCtx->uMagic          = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
     2678    pCtx->fWasInHmContext = false;
     2679    return rcBusy != VINF_SUCCESS ? rcBusy : VERR_VMM_CANNOT_BLOCK;
     2680}
     2681
     2682
     2683/**
     2684 * Counterpart to VMMR0EmtPrepareToBlock.
     2685 *
     2686 * @param   pVCpu       The cross context virtual CPU structure of the calling
     2687 *                      thread.
     2688 * @param   pCtx        The context structure used with VMMR0EmtPrepareToBlock.
     2689 * @thread  EMT(pVCpu)
     2690 */
     2691VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx)
     2692{
     2693    AssertReturnVoid(pCtx->uMagic == VMMR0EMTBLOCKCTX_MAGIC);
     2694    if (pCtx->fWasInHmContext)
     2695    {
     2696        if (pVCpu->vmmr0.s.pPreemptState)
     2697            RTThreadPreemptDisable(pVCpu->vmmr0.s.pPreemptState);
     2698
     2699        pCtx->fWasInHmContext = false;
     2700        vmmR0ThreadCtxCallback(RTTHREADCTXEVENT_IN, pVCpu);
     2701    }
     2702    pCtx->uMagic = VMMR0EMTBLOCKCTX_MAGIC_DEAD;
    25572703}
    25582704
  • trunk/src/VBox/VMM/VMMR3/PDMCritSect.cpp

    r90348 r90379  
    164164                pCritSect->pszName                   = pszName;
    165165
    166                 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLock,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,          NULL, "/PDM/CritSects/%s/ContentionRZLock", pCritSect->pszName);
    167                 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZUnlock,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,          NULL, "/PDM/CritSects/%s/ContentionRZUnlock", pCritSect->pszName);
    168                 STAMR3RegisterF(pVM, &pCritSect->StatContentionR3,      STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,          NULL, "/PDM/CritSects/%s/ContentionR3", pCritSect->pszName);
     166                STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLock,      STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,          NULL, "/PDM/CritSects/%s/ContentionRZLock", pCritSect->pszName);
     167                STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLockBusy,  STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,          NULL, "/PDM/CritSects/%s/ContentionRZLockBusy", pCritSect->pszName);
     168                STAMR3RegisterF(pVM, &pCritSect->StatContentionRZUnlock,    STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,          NULL, "/PDM/CritSects/%s/ContentionRZUnlock", pCritSect->pszName);
     169                STAMR3RegisterF(pVM, &pCritSect->StatContentionR3,          STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,          NULL, "/PDM/CritSects/%s/ContentionR3", pCritSect->pszName);
    169170#ifdef VBOX_WITH_STATISTICS
    170                 STAMR3RegisterF(pVM, &pCritSect->StatLocked,        STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/Locked", pCritSect->pszName);
     171                STAMR3RegisterF(pVM, &pCritSect->StatLocked,            STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/Locked", pCritSect->pszName);
    171172#endif
    172173
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r90347 r90379  
    627627
    628628    /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
    629     if (pVM->apCpusR3[0]->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
     629    if (pVM->vmm.s.fIsUsingContextHooks)
    630630        LogRel(("VMM: Enabled thread-context hooks\n"));
    631631    else
  • trunk/src/VBox/VMM/include/PDMInternal.h

    r90348 r90379  
    438438    /** R0/RC lock contention. */
    439439    STAMCOUNTER                     StatContentionRZLock;
     440    /** R0/RC lock contention, returning rcBusy or VERR_SEM_BUSY (try). */
     441    STAMCOUNTER                     StatContentionRZLockBusy;
    440442    /** R0/RC unlock contention. */
    441443    STAMCOUNTER                     StatContentionRZUnlock;
  • trunk/src/VBox/VMM/include/VMMInternal.h

    r90189 r90379  
    267267     * release logging purposes.  */
    268268    bool                        fIsPreemptPossible : 1;
     269    /** Set if ring-0 uses context hooks.  */
     270    bool                        fIsUsingContextHooks : 1;
    269271
    270272    bool                        afAlignment2[2]; /**< Alignment padding. */
     
    379381    R0PTRTYPE(PVMMR0LOGGER)     pR0RelLoggerR0;
    380382
    381     /** Thread context switching hook (ring-0). */
    382     RTTHREADCTXHOOK             hCtxHook;
    383 
    384383    /** @name Rendezvous
    385384     * @{ */
     
    387386     *  attempts at recursive rendezvous. */
    388387    bool volatile               fInRendezvous;
    389     bool                        afPadding1[10];
     388    bool                        afPadding1[2];
    390389    /** @} */
    391390
     
    461460typedef struct VMMR0PERVCPU
    462461{
     462    /** Which host CPU ID is this EMT running on.
     463     * Only valid when in RC or HMR0 with scheduling disabled. */
     464    RTCPUID volatile                    idHostCpu;
     465    /** The CPU set index corresponding to idHostCpu, UINT32_MAX if not valid.
     466     * @remarks Best to make sure iHostCpuSet shares cache line with idHostCpu! */
     467    uint32_t volatile                   iHostCpuSet;
     468    /** Set if we've entered HM context. */
     469    bool volatile                       fInHmContext;
     470
     471    bool                                afPadding[7];
     472    /** Pointer to the VMMR0EntryFast preemption state structure.
     473     * This is used to temporarily restore preemption before blocking.  */
     474    R0PTRTYPE(PRTTHREADPREEMPTSTATE)    pPreemptState;
     475    /** Thread context switching hook (ring-0). */
     476    RTTHREADCTXHOOK                     hCtxHook;
     477
    463478    /** @name Arguments passed by VMMR0EntryEx via vmmR0CallRing3SetJmpEx.
    464479     * @note Cannot be put on the stack as the location may change and upset the
    465480     *       validation of resume-after-ring-3-call logic.
    466481     * @{ */
    467     PGVM                pGVM;
    468     VMCPUID             idCpu;
    469     VMMR0OPERATION      enmOperation;
    470     PSUPVMMR0REQHDR     pReq;
    471     uint64_t            u64Arg;
    472     PSUPDRVSESSION      pSession;
     482    PGVM                                pGVM;
     483    VMCPUID                             idCpu;
     484    VMMR0OPERATION                      enmOperation;
     485    PSUPVMMR0REQHDR                     pReq;
     486    uint64_t                            u64Arg;
     487    PSUPDRVSESSION                      pSession;
    473488    /** @} */
    474489} VMMR0PERVCPU;
  • trunk/src/VBox/VMM/include/VMMInternal.mac

    r90189 r90379  
    121121        .pR0RelLoggerR0         RTR0PTR_RES 1
    122122
    123         .hCtxHook               RTR0PTR_RES 1
    124 
    125123        .fInRendezvous          resb 1
    126         .afPadding1             resb 10
     124        .afPadding1             resb 2
    127125        .fMayHaltInRing0        resb 1
    128126        .cNsSpinBlockThreshold  resd 1
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette