VirtualBox

Changeset 58830 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Nov 23, 2015 5:15:47 PM (9 years ago)
Author:
vboxsync
Message:

GVMMR0.cpp: Redid locking using critical sections, in the used list case a read/write one. Also optimized halting to not even requiring read locking when we're not doing early wake-ups (gvmmR0SchedDoWakeUps).

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp

    r58126 r58830  
    6363#include <iprt/asm.h>
    6464#include <iprt/asm-amd64-x86.h>
     65#include <iprt/critsect.h>
    6566#include <iprt/mem.h>
    6667#include <iprt/semaphore.h>
     
    260261    /** Alignment padding. */
    261262    uint32_t            u32Alignment;
     263    /** Mini lock for restricting early wake-ups to one thread. */
     264    bool volatile       fDoingEarlyWakeUps;
     265    bool                afPadding[3]; /**< explicit alignment padding. */
    262266    /** When the next halted or sleeping EMT will wake up.
    263267     * This is set to 0 when it needs recalculating and to UINT64_MAX when
     
    266270    /** The lock used to serialize VM creation, destruction and associated events that
    267271     * isn't performance critical. Owners may acquire the list lock. */
    268     RTSEMFASTMUTEX      CreateDestroyLock;
     272    RTCRITSECT          CreateDestroyLock;
    269273    /** The lock used to serialize used list updates and accesses.
    270274     * This indirectly includes scheduling since the scheduler will have to walk the
    271275     * used list to examin running VMs. Owners may not acquire any other locks. */
    272     RTSEMFASTMUTEX      UsedLock;
     276    RTCRITSECTRW        UsedLock;
    273277    /** The handle array.
    274278     * The size of this array defines the maximum number of currently running VMs.
     
    290294    uint32_t            nsMinSleepCompany;
    291295    /** @gcfgm{/GVMM/EarlyWakeUp1, 32-bit, 0, 100000000, 25000, ns}
    292      * The limit for the first round of early wakeups, given in nano seconds.
     296     * The limit for the first round of early wake-ups, given in nano seconds.
    293297     */
    294298    uint32_t            nsEarlyWakeUp1;
    295299    /** @gcfgm{/GVMM/EarlyWakeUp2, 32-bit, 0, 100000000, 50000, ns}
    296      * The limit for the second round of early wakeups, given in nano seconds.
     300     * The limit for the second round of early wake-ups, given in nano seconds.
    297301     */
    298302    uint32_t            nsEarlyWakeUp2;
     303
     304    /** Set if we're doing early wake-ups.
     305     * This reflects  nsEarlyWakeUp1 and nsEarlyWakeUp2.  */
     306    bool volatile       fDoEarlyWakeUps;
    299307
    300308    /** The number of entries in the host CPU array (aHostCpus). */
     
    303311    GVMMHOSTCPU         aHostCpus[1];
    304312} GVMM;
     313AssertCompileMemberAlignment(GVMM, CreateDestroyLock, 8);
     314AssertCompileMemberAlignment(GVMM, UsedLock, 8);
     315AssertCompileMemberAlignment(GVMM, uNsNextEmtWakeup, 8);
    305316/** Pointer to the GVMM instance data. */
    306317typedef GVMM *PGVMM;
     
    377388    if (!pGVMM)
    378389        return VERR_NO_MEMORY;
    379     int rc = RTSemFastMutexCreate(&pGVMM->CreateDestroyLock);
     390    int rc = RTCritSectInitEx(&pGVMM->CreateDestroyLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE,
     391                              "GVMM-CreateDestroyLock");
    380392    if (RT_SUCCESS(rc))
    381393    {
    382         rc = RTSemFastMutexCreate(&pGVMM->UsedLock);
     394        rc = RTCritSectRwInitEx(&pGVMM->UsedLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "GVMM-UsedLock");
    383395        if (RT_SUCCESS(rc))
    384396        {
     
    427439                pGVMM->nsEarlyWakeUp2    = 0;
    428440            }
     441            pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
    429442
    430443            /* The host CPU data. */
     
    491504
    492505            /* bail out. */
    493             RTSemFastMutexDestroy(pGVMM->UsedLock);
    494             pGVMM->UsedLock = NIL_RTSEMFASTMUTEX;
    495         }
    496         RTSemFastMutexDestroy(pGVMM->CreateDestroyLock);
    497         pGVMM->CreateDestroyLock = NIL_RTSEMFASTMUTEX;
     506            RTCritSectRwDelete(&pGVMM->UsedLock);
     507        }
     508        RTCritSectDelete(&pGVMM->CreateDestroyLock);
    498509    }
    499510
     
    543554     */
    544555    pGVMM->u32Magic = ~GVMM_MAGIC;
    545     RTSemFastMutexDestroy(pGVMM->UsedLock);
    546     pGVMM->UsedLock = NIL_RTSEMFASTMUTEX;
    547     RTSemFastMutexDestroy(pGVMM->CreateDestroyLock);
    548     pGVMM->CreateDestroyLock = NIL_RTSEMFASTMUTEX;
     556    RTCritSectRwDelete(&pGVMM->UsedLock);
     557    RTCritSectDelete(&pGVMM->CreateDestroyLock);
    549558
    550559    pGVMM->iFreeHead = 0;
     
    620629    {
    621630        if (u64Value <= RT_NS_100MS)
     631        {
    622632            pGVMM->nsEarlyWakeUp1 = u64Value;
     633            pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
     634        }
    623635        else
    624636            rc = VERR_OUT_OF_RANGE;
     
    627639    {
    628640        if (u64Value <= RT_NS_100MS)
     641        {
    629642            pGVMM->nsEarlyWakeUp2 = u64Value;
     643            pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0;
     644        }
    630645        else
    631646            rc = VERR_OUT_OF_RANGE;
     
    681696
    682697/**
    683  * Try acquire the 'used' lock.
     698 * Acquire the 'used' lock in shared mode.
     699 *
     700 * This prevents destruction of the VM while we're in ring-0.
     701 *
     702 * @returns IPRT status code, see RTSemFastMutexRequest.
     703 * @param   a_pGVMM     The GVMM instance data.
     704 * @sa      GVMMR0_USED_SHARED_UNLOCK, GVMMR0_USED_EXCLUSIVE_LOCK
     705 */
     706#define GVMMR0_USED_SHARED_LOCK(a_pGVMM)        RTCritSectRwEnterShared(&(a_pGVMM)->UsedLock)
     707
     708/**
     709 * Release the 'used' lock in when owning it in shared mode.
     710 *
     711 * @returns IPRT status code, see RTSemFastMutexRequest.
     712 * @param   a_pGVMM     The GVMM instance data.
     713 * @sa      GVMMR0_USED_SHARED_LOCK
     714 */
     715#define GVMMR0_USED_SHARED_UNLOCK(a_pGVMM)      RTCritSectRwLeaveShared(&(a_pGVMM)->UsedLock)
     716
     717/**
     718 * Acquire the 'used' lock in exclusive mode.
     719 *
     720 * Only use this function when making changes to the used list.
     721 *
     722 * @returns IPRT status code, see RTSemFastMutexRequest.
     723 * @param   a_pGVMM     The GVMM instance data.
     724 * @sa      GVMMR0_USED_EXCLUSIVE_UNLOCK
     725 */
     726#define GVMMR0_USED_EXCLUSIVE_LOCK(a_pGVMM)     RTCritSectRwEnterExcl(&(a_pGVMM)->UsedLock)
     727
     728/**
     729 * Release the 'used' lock when owning it in exclusive mode.
     730 *
     731 * @returns IPRT status code, see RTSemFastMutexRelease.
     732 * @param   pGVMM   The GVMM instance data.
     733 * @sa      GVMMR0_USED_EXCLUSIVE_LOCK, GVMMR0_USED_SHARED_UNLOCK
     734 */
     735#define GVMMR0_USED_EXCLUSIVE_UNLOCK(a_pGVMM)   RTCritSectRwLeaveExcl(&(a_pGVMM)->UsedLock)
     736
     737
     738/**
     739 * Try acquire the 'create & destroy' lock.
    684740 *
    685741 * @returns IPRT status code, see RTSemFastMutexRequest.
    686742 * @param   pGVMM   The GVMM instance data.
    687743 */
    688 DECLINLINE(int) gvmmR0UsedLock(PGVMM pGVMM)
    689 {
    690     LogFlow(("++gvmmR0UsedLock(%p)\n", pGVMM));
    691     int rc = RTSemFastMutexRequest(pGVMM->UsedLock);
    692     LogFlow(("gvmmR0UsedLock(%p)->%Rrc\n", pGVMM, rc));
     744DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM)
     745{
     746    LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM));
     747    int rc = RTCritSectEnter(&pGVMM->CreateDestroyLock);
     748    LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc));
    693749    return rc;
    694750}
     
    696752
    697753/**
    698  * Release the 'used' lock.
    699  *
    700  * @returns IPRT status code, see RTSemFastMutexRelease.
    701  * @param   pGVMM   The GVMM instance data.
    702  */
    703 DECLINLINE(int) gvmmR0UsedUnlock(PGVMM pGVMM)
    704 {
    705     LogFlow(("--gvmmR0UsedUnlock(%p)\n", pGVMM));
    706     int rc = RTSemFastMutexRelease(pGVMM->UsedLock);
    707     AssertRC(rc);
    708     return rc;
    709 }
    710 
    711 
    712 /**
    713  * Try acquire the 'create & destroy' lock.
     754 * Release the 'create & destroy' lock.
    714755 *
    715756 * @returns IPRT status code, see RTSemFastMutexRequest.
    716757 * @param   pGVMM   The GVMM instance data.
    717758 */
    718 DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM)
    719 {
    720     LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM));
    721     int rc = RTSemFastMutexRequest(pGVMM->CreateDestroyLock);
    722     LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc));
    723     return rc;
    724 }
    725 
    726 
    727 /**
    728  * Release the 'create & destroy' lock.
    729  *
    730  * @returns IPRT status code, see RTSemFastMutexRequest.
    731  * @param   pGVMM   The GVMM instance data.
    732  */
    733759DECLINLINE(int) gvmmR0CreateDestroyUnlock(PGVMM pGVMM)
    734760{
    735761    LogFlow(("--gvmmR0CreateDestroyUnlock(%p)\n", pGVMM));
    736     int rc = RTSemFastMutexRelease(pGVMM->CreateDestroyLock);
     762    int rc = RTCritSectLeave(&pGVMM->CreateDestroyLock);
    737763    AssertRC(rc);
    738764    return rc;
     
    830856                 * Move the handle from the free to used list and perform permission checks.
    831857                 */
    832                 rc = gvmmR0UsedLock(pGVMM);
     858                rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
    833859                AssertRC(rc);
    834860
     
    844870                pHandle->ProcId   = NIL_RTPROCESS;
    845871
    846                 gvmmR0UsedUnlock(pGVMM);
     872                GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
    847873
    848874                rc = SUPR0ObjVerifyAccess(pHandle->pvObj, pSession, NULL);
     
    924950
    925951                                        /* complete the handle - take the UsedLock sem just to be careful. */
    926                                         rc = gvmmR0UsedLock(pGVMM);
     952                                        rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
    927953                                        AssertRC(rc);
    928954
     
    941967                                            VBOXVMM_R0_GVMM_VM_CREATED(pGVM, pVM, ProcId, (void *)hEMT0, cCpus);
    942968
    943                                             gvmmR0UsedUnlock(pGVMM);
     969                                            GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
    944970                                            gvmmR0CreateDestroyUnlock(pGVMM);
    945971
     
    948974                                            return VINF_SUCCESS;
    949975                                        }
     976
     977                                        GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
    950978                                    }
    951979
     
    12411269    int rc = gvmmR0CreateDestroyLock(pGVMM);
    12421270    AssertRC(rc);
    1243     rc = gvmmR0UsedLock(pGVMM);
     1271    rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
    12441272    AssertRC(rc);
    12451273
     
    12501278    {
    12511279        SUPR0Printf("GVM: used list index %d is out of range!\n", pHandle->iNext);
    1252         gvmmR0UsedUnlock(pGVMM);
     1280        GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
    12531281        gvmmR0CreateDestroyUnlock(pGVMM);
    12541282        return;
     
    12661294            {
    12671295                SUPR0Printf("GVM: used list index %d is out of range!\n", iPrev);
    1268                 gvmmR0UsedUnlock(pGVMM);
     1296                GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
    12691297                gvmmR0CreateDestroyUnlock(pGVMM);
    12701298                return;
     
    12831311        {
    12841312            SUPR0Printf("GVM: can't find the handle previous previous of %d!\n", pHandle->iSelf);
    1285             gvmmR0UsedUnlock(pGVMM);
     1313            GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
    12861314            gvmmR0CreateDestroyUnlock(pGVMM);
    12871315            return;
     
    13021330    {
    13031331        pGVMM->cEMTs -= pGVM->cCpus;
    1304         gvmmR0UsedUnlock(pGVMM);
     1332        GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
    13051333
    13061334        gvmmR0CleanupVM(pGVM);
     
    13481376
    13491377        /* Re-acquire the UsedLock before freeing the handle since we're updating handle fields. */
    1350         rc = gvmmR0UsedLock(pGVMM);
     1378        rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM);
    13511379        AssertRC(rc);
    13521380    }
     
    13651393    ASMAtomicWriteU32(&pHandle->ProcId,          NIL_RTPROCESS);
    13661394
    1367     gvmmR0UsedUnlock(pGVMM);
     1395    GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM);
    13681396    gvmmR0CreateDestroyUnlock(pGVMM);
    13691397    LogFlow(("gvmmR0HandleObjDestructor: returns\n"));
     
    13891417    PGVM pGVM;
    13901418    PGVMM pGVMM;
    1391     int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /* fTakeUsedLock */);
     1419    int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /* fTakeUsedLock */); /** @todo take lock here. */
    13921420    if (RT_FAILURE(rc))
    13931421        return rc;
     
    14451473 * @param   ppGVM           Where to store the GVM pointer.
    14461474 * @param   ppGVMM          Where to store the pointer to the GVMM instance data.
    1447  * @param   fTakeUsedLock   Whether to take the used lock or not.
    1448  *                          Be very careful if not taking the lock as it's possible that
    1449  *                          the VM will disappear then.
     1475 * @param   fTakeUsedLock   Whether to take the used lock or not.  We take it in
     1476 *                          shared mode when requested.
     1477 *
     1478 *                          Be very careful if not taking the lock as it's
     1479 *                          possible that the VM will disappear then!
    14501480 *
    14511481 * @remark  This will not assert on an invalid pVM but try return silently.
     
    14791509    if (fTakeUsedLock)
    14801510    {
    1481         int rc = gvmmR0UsedLock(pGVMM);
     1511        int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
    14821512        AssertRCReturn(rc, rc);
    14831513
     
    14891519                        ||  pGVM->pVM != pVM))
    14901520        {
    1491             gvmmR0UsedUnlock(pGVMM);
     1521            GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    14921522            return VERR_INVALID_HANDLE;
    14931523        }
     
    15241554 * @remark  This will not take the 'used'-lock because it doesn't do
    15251555 *          nesting and this function will be used from under the lock.
     1556 *          Update: This is no longer true.  Consider taking the lock in shared
     1557 *          mode!
    15261558 */
    15271559GVMMR0DECL(int) GVMMR0ByVM(PVM pVM, PGVM *ppGVM)
     
    16811713     * the user.
    16821714     */
    1683     if (   !pGVMM->nsEarlyWakeUp1
    1684         && !pGVMM->nsEarlyWakeUp2)
     1715    if (!pGVMM->fDoEarlyWakeUps)
    16851716        return 0;
    16861717
     
    16931724    if (   pGVMM->cHaltedEMTs == 0
    16941725        || uNsEarlyWakeUp2 > pGVMM->uNsNextEmtWakeup)
     1726        return 0;
     1727
     1728    /*
     1729     * Only one thread doing this at a time.
     1730     */
     1731    if (!ASMAtomicCmpXchgBool(&pGVMM->fDoingEarlyWakeUps, true, false))
    16951732        return 0;
    16961733
     
    18091846    pGVMM->uNsNextEmtWakeup = u64Min;
    18101847
     1848    ASMAtomicWriteBool(&pGVMM->fDoingEarlyWakeUps, false);
    18111849    return cWoken;
    18121850}
     
    18441882
    18451883    /*
    1846      * Take the UsedList semaphore, get the current time
    1847      * and check if anyone needs waking up.
    1848      * Interrupts must NOT be disabled at this point because we ask for GIP time!
    1849      */
    1850     rc = gvmmR0UsedLock(pGVMM);
    1851     AssertRC(rc);
    1852     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1884     * If we're doing early wake-ups, we must take the UsedList lock before we
     1885     * start querying the current time.
     1886     * Note! Interrupts must NOT be disabled at this point because we ask for GIP time!
     1887     */
     1888    bool const fDoEarlyWakeUps = pGVMM->fDoEarlyWakeUps;
     1889    if (fDoEarlyWakeUps)
     1890    {
     1891        rc = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc);
     1892        GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1893    }
    18531894
    18541895    pCurGVCpu->gvmm.s.iCpuEmt = ASMGetApicId();
     
    18611902    const uint64_t u64NowGip = RTTimeNanoTS();
    18621903    GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    1863     pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip);
    1864     GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1904
     1905    if (fDoEarlyWakeUps)
     1906    {
     1907        pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip);
     1908        GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     1909    }
    18651910
    18661911    /*
     
    18771922        if (cNsInterval > RT_NS_1SEC)
    18781923            u64ExpireGipTime = u64NowGip + RT_NS_1SEC;
    1879         if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup)
    1880             pGVMM->uNsNextEmtWakeup = u64ExpireGipTime;
    18811924        ASMAtomicWriteU64(&pCurGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime);
    18821925        ASMAtomicIncU32(&pGVMM->cHaltedEMTs);
    1883         gvmmR0UsedUnlock(pGVMM);
     1926        if (fDoEarlyWakeUps)
     1927        {
     1928            if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup)
     1929                pGVMM->uNsNextEmtWakeup = u64ExpireGipTime;
     1930            GVMMR0_USED_SHARED_UNLOCK(pGVMM);
     1931        }
    18841932        GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    18851933
     
    19071955    {
    19081956        pGVM->gvmm.s.StatsSched.cHaltNotBlocking++;
    1909         gvmmR0UsedUnlock(pGVMM);
     1957        if (fDoEarlyWakeUps)
     1958            GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    19101959        GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    19111960        RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti);
     
    19932042            GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
    19942043
    1995             if (fTakeUsedLock)
     2044            if (fTakeUsedLock && pGVMM->fDoEarlyWakeUps)
    19962045            {
    19972046                /*
     
    20092058        if (fTakeUsedLock)
    20102059        {
    2011             int rc2 = gvmmR0UsedUnlock(pGVMM);
     2060            int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    20122061            AssertRC(rc2);
    20132062            GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     
    20922141        if (fTakeUsedLock)
    20932142        {
    2094             int rc2 = gvmmR0UsedUnlock(pGVMM);
     2143            int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    20952144            AssertRC(rc2);
    20962145        }
     
    21652214        }
    21662215
    2167         int rc2 = gvmmR0UsedUnlock(pGVMM);
     2216        int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    21682217        AssertRC(rc2);
    21692218        GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
     
    22192268    if (RT_SUCCESS(rc))
    22202269    {
    2221         rc = gvmmR0UsedLock(pGVMM);
    2222         AssertRC(rc);
    2223         pGVM->gvmm.s.StatsSched.cPollCalls++;
    2224 
    2225         Assert(ASMGetFlags() & X86_EFL_IF);
    2226         const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
    2227 
    2228         if (!fYield)
     2270        /*
     2271         * We currently only implement helping doing wakeups (fYield = false), so don't
     2272         * bother taking the lock if gvmmR0SchedDoWakeUps is not going to do anything.
     2273         */
     2274        if (!fYield && pGVMM->fDoEarlyWakeUps)
     2275        {
     2276            rc = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc);
     2277            pGVM->gvmm.s.StatsSched.cPollCalls++;
     2278
     2279            Assert(ASMGetFlags() & X86_EFL_IF);
     2280            const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
     2281
    22292282            pGVM->gvmm.s.StatsSched.cPollWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
     2283
     2284            GVMMR0_USED_SHARED_UNLOCK(pGVMM);
     2285        }
     2286        /*
     2287         * Not quite sure what we could do here...
     2288         */
     2289        else if (fYield)
     2290            rc = VERR_NOT_IMPLEMENTED; /** @todo implement this... */
    22302291        else
    2231         {
    2232             /** @todo implement this... */
    2233             rc = VERR_NOT_IMPLEMENTED;
    2234         }
    2235 
    2236         gvmmR0UsedUnlock(pGVMM);
     2292            rc = VINF_SUCCESS;
    22372293    }
    22382294
     
    24462502        memset(&pStats->SchedVM, 0, sizeof(pStats->SchedVM));
    24472503
    2448         int rc = gvmmR0UsedLock(pGVMM);
     2504        int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
    24492505        AssertRCReturn(rc, rc);
    24502506    }
     
    25192575    pStats->cHostCpus = iDstCpu;
    25202576
    2521     gvmmR0UsedUnlock(pGVMM);
     2577    GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    25222578
    25232579    return VINF_SUCCESS;
     
    25942650        GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE);
    25952651
    2596         int rc = gvmmR0UsedLock(pGVMM);
     2652        int rc = GVMMR0_USED_SHARED_LOCK(pGVMM);
    25972653        AssertRCReturn(rc, rc);
    25982654    }
     
    26342690    }
    26352691
    2636     gvmmR0UsedUnlock(pGVMM);
     2692    GVMMR0_USED_SHARED_UNLOCK(pGVMM);
    26372693
    26382694    return VINF_SUCCESS;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette