Changeset 58830 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Nov 23, 2015 5:15:47 PM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r58126 r58830 63 63 #include <iprt/asm.h> 64 64 #include <iprt/asm-amd64-x86.h> 65 #include <iprt/critsect.h> 65 66 #include <iprt/mem.h> 66 67 #include <iprt/semaphore.h> … … 260 261 /** Alignment padding. */ 261 262 uint32_t u32Alignment; 263 /** Mini lock for restricting early wake-ups to one thread. */ 264 bool volatile fDoingEarlyWakeUps; 265 bool afPadding[3]; /**< explicit alignment padding. */ 262 266 /** When the next halted or sleeping EMT will wake up. 263 267 * This is set to 0 when it needs recalculating and to UINT64_MAX when … … 266 270 /** The lock used to serialize VM creation, destruction and associated events that 267 271 * isn't performance critical. Owners may acquire the list lock. */ 268 RT SEMFASTMUTEXCreateDestroyLock;272 RTCRITSECT CreateDestroyLock; 269 273 /** The lock used to serialize used list updates and accesses. 270 274 * This indirectly includes scheduling since the scheduler will have to walk the 271 275 * used list to examin running VMs. Owners may not acquire any other locks. */ 272 RT SEMFASTMUTEXUsedLock;276 RTCRITSECTRW UsedLock; 273 277 /** The handle array. 274 278 * The size of this array defines the maximum number of currently running VMs. … … 290 294 uint32_t nsMinSleepCompany; 291 295 /** @gcfgm{/GVMM/EarlyWakeUp1, 32-bit, 0, 100000000, 25000, ns} 292 * The limit for the first round of early wake ups, given in nano seconds.296 * The limit for the first round of early wake-ups, given in nano seconds. 293 297 */ 294 298 uint32_t nsEarlyWakeUp1; 295 299 /** @gcfgm{/GVMM/EarlyWakeUp2, 32-bit, 0, 100000000, 50000, ns} 296 * The limit for the second round of early wake ups, given in nano seconds.300 * The limit for the second round of early wake-ups, given in nano seconds. 297 301 */ 298 302 uint32_t nsEarlyWakeUp2; 303 304 /** Set if we're doing early wake-ups. 305 * This reflects nsEarlyWakeUp1 and nsEarlyWakeUp2. */ 306 bool volatile fDoEarlyWakeUps; 299 307 300 308 /** The number of entries in the host CPU array (aHostCpus). */ … … 303 311 GVMMHOSTCPU aHostCpus[1]; 304 312 } GVMM; 313 AssertCompileMemberAlignment(GVMM, CreateDestroyLock, 8); 314 AssertCompileMemberAlignment(GVMM, UsedLock, 8); 315 AssertCompileMemberAlignment(GVMM, uNsNextEmtWakeup, 8); 305 316 /** Pointer to the GVMM instance data. */ 306 317 typedef GVMM *PGVMM; … … 377 388 if (!pGVMM) 378 389 return VERR_NO_MEMORY; 379 int rc = RTSemFastMutexCreate(&pGVMM->CreateDestroyLock); 390 int rc = RTCritSectInitEx(&pGVMM->CreateDestroyLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, 391 "GVMM-CreateDestroyLock"); 380 392 if (RT_SUCCESS(rc)) 381 393 { 382 rc = RT SemFastMutexCreate(&pGVMM->UsedLock);394 rc = RTCritSectRwInitEx(&pGVMM->UsedLock, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "GVMM-UsedLock"); 383 395 if (RT_SUCCESS(rc)) 384 396 { … … 427 439 pGVMM->nsEarlyWakeUp2 = 0; 428 440 } 441 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0; 429 442 430 443 /* The host CPU data. */ … … 491 504 492 505 /* bail out. */ 493 RTSemFastMutexDestroy(pGVMM->UsedLock); 494 pGVMM->UsedLock = NIL_RTSEMFASTMUTEX; 495 } 496 RTSemFastMutexDestroy(pGVMM->CreateDestroyLock); 497 pGVMM->CreateDestroyLock = NIL_RTSEMFASTMUTEX; 506 RTCritSectRwDelete(&pGVMM->UsedLock); 507 } 508 RTCritSectDelete(&pGVMM->CreateDestroyLock); 498 509 } 499 510 … … 543 554 */ 544 555 pGVMM->u32Magic = ~GVMM_MAGIC; 545 RTSemFastMutexDestroy(pGVMM->UsedLock); 546 pGVMM->UsedLock = NIL_RTSEMFASTMUTEX; 547 RTSemFastMutexDestroy(pGVMM->CreateDestroyLock); 548 pGVMM->CreateDestroyLock = NIL_RTSEMFASTMUTEX; 556 RTCritSectRwDelete(&pGVMM->UsedLock); 557 RTCritSectDelete(&pGVMM->CreateDestroyLock); 549 558 550 559 pGVMM->iFreeHead = 0; … … 620 629 { 621 630 if (u64Value <= RT_NS_100MS) 631 { 622 632 pGVMM->nsEarlyWakeUp1 = u64Value; 633 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0; 634 } 623 635 else 624 636 rc = VERR_OUT_OF_RANGE; … … 627 639 { 628 640 if (u64Value <= RT_NS_100MS) 641 { 629 642 pGVMM->nsEarlyWakeUp2 = u64Value; 643 pGVMM->fDoEarlyWakeUps = pGVMM->nsEarlyWakeUp1 > 0 && pGVMM->nsEarlyWakeUp2 > 0; 644 } 630 645 else 631 646 rc = VERR_OUT_OF_RANGE; … … 681 696 682 697 /** 683 * Try acquire the 'used' lock. 698 * Acquire the 'used' lock in shared mode. 699 * 700 * This prevents destruction of the VM while we're in ring-0. 701 * 702 * @returns IPRT status code, see RTSemFastMutexRequest. 703 * @param a_pGVMM The GVMM instance data. 704 * @sa GVMMR0_USED_SHARED_UNLOCK, GVMMR0_USED_EXCLUSIVE_LOCK 705 */ 706 #define GVMMR0_USED_SHARED_LOCK(a_pGVMM) RTCritSectRwEnterShared(&(a_pGVMM)->UsedLock) 707 708 /** 709 * Release the 'used' lock in when owning it in shared mode. 710 * 711 * @returns IPRT status code, see RTSemFastMutexRequest. 712 * @param a_pGVMM The GVMM instance data. 713 * @sa GVMMR0_USED_SHARED_LOCK 714 */ 715 #define GVMMR0_USED_SHARED_UNLOCK(a_pGVMM) RTCritSectRwLeaveShared(&(a_pGVMM)->UsedLock) 716 717 /** 718 * Acquire the 'used' lock in exclusive mode. 719 * 720 * Only use this function when making changes to the used list. 721 * 722 * @returns IPRT status code, see RTSemFastMutexRequest. 723 * @param a_pGVMM The GVMM instance data. 724 * @sa GVMMR0_USED_EXCLUSIVE_UNLOCK 725 */ 726 #define GVMMR0_USED_EXCLUSIVE_LOCK(a_pGVMM) RTCritSectRwEnterExcl(&(a_pGVMM)->UsedLock) 727 728 /** 729 * Release the 'used' lock when owning it in exclusive mode. 730 * 731 * @returns IPRT status code, see RTSemFastMutexRelease. 732 * @param pGVMM The GVMM instance data. 733 * @sa GVMMR0_USED_EXCLUSIVE_LOCK, GVMMR0_USED_SHARED_UNLOCK 734 */ 735 #define GVMMR0_USED_EXCLUSIVE_UNLOCK(a_pGVMM) RTCritSectRwLeaveExcl(&(a_pGVMM)->UsedLock) 736 737 738 /** 739 * Try acquire the 'create & destroy' lock. 684 740 * 685 741 * @returns IPRT status code, see RTSemFastMutexRequest. 686 742 * @param pGVMM The GVMM instance data. 687 743 */ 688 DECLINLINE(int) gvmmR0 UsedLock(PGVMM pGVMM)689 { 690 LogFlow(("++gvmmR0 UsedLock(%p)\n", pGVMM));691 int rc = RT SemFastMutexRequest(pGVMM->UsedLock);692 LogFlow(("gvmmR0 UsedLock(%p)->%Rrc\n", pGVMM, rc));744 DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM) 745 { 746 LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM)); 747 int rc = RTCritSectEnter(&pGVMM->CreateDestroyLock); 748 LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc)); 693 749 return rc; 694 750 } … … 696 752 697 753 /** 698 * Release the 'used' lock. 699 * 700 * @returns IPRT status code, see RTSemFastMutexRelease. 701 * @param pGVMM The GVMM instance data. 702 */ 703 DECLINLINE(int) gvmmR0UsedUnlock(PGVMM pGVMM) 704 { 705 LogFlow(("--gvmmR0UsedUnlock(%p)\n", pGVMM)); 706 int rc = RTSemFastMutexRelease(pGVMM->UsedLock); 707 AssertRC(rc); 708 return rc; 709 } 710 711 712 /** 713 * Try acquire the 'create & destroy' lock. 754 * Release the 'create & destroy' lock. 714 755 * 715 756 * @returns IPRT status code, see RTSemFastMutexRequest. 716 757 * @param pGVMM The GVMM instance data. 717 758 */ 718 DECLINLINE(int) gvmmR0CreateDestroyLock(PGVMM pGVMM)719 {720 LogFlow(("++gvmmR0CreateDestroyLock(%p)\n", pGVMM));721 int rc = RTSemFastMutexRequest(pGVMM->CreateDestroyLock);722 LogFlow(("gvmmR0CreateDestroyLock(%p)->%Rrc\n", pGVMM, rc));723 return rc;724 }725 726 727 /**728 * Release the 'create & destroy' lock.729 *730 * @returns IPRT status code, see RTSemFastMutexRequest.731 * @param pGVMM The GVMM instance data.732 */733 759 DECLINLINE(int) gvmmR0CreateDestroyUnlock(PGVMM pGVMM) 734 760 { 735 761 LogFlow(("--gvmmR0CreateDestroyUnlock(%p)\n", pGVMM)); 736 int rc = RT SemFastMutexRelease(pGVMM->CreateDestroyLock);762 int rc = RTCritSectLeave(&pGVMM->CreateDestroyLock); 737 763 AssertRC(rc); 738 764 return rc; … … 830 856 * Move the handle from the free to used list and perform permission checks. 831 857 */ 832 rc = gvmmR0UsedLock(pGVMM);858 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM); 833 859 AssertRC(rc); 834 860 … … 844 870 pHandle->ProcId = NIL_RTPROCESS; 845 871 846 gvmmR0UsedUnlock(pGVMM);872 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM); 847 873 848 874 rc = SUPR0ObjVerifyAccess(pHandle->pvObj, pSession, NULL); … … 924 950 925 951 /* complete the handle - take the UsedLock sem just to be careful. */ 926 rc = gvmmR0UsedLock(pGVMM);952 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM); 927 953 AssertRC(rc); 928 954 … … 941 967 VBOXVMM_R0_GVMM_VM_CREATED(pGVM, pVM, ProcId, (void *)hEMT0, cCpus); 942 968 943 gvmmR0UsedUnlock(pGVMM);969 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM); 944 970 gvmmR0CreateDestroyUnlock(pGVMM); 945 971 … … 948 974 return VINF_SUCCESS; 949 975 } 976 977 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM); 950 978 } 951 979 … … 1241 1269 int rc = gvmmR0CreateDestroyLock(pGVMM); 1242 1270 AssertRC(rc); 1243 rc = gvmmR0UsedLock(pGVMM);1271 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM); 1244 1272 AssertRC(rc); 1245 1273 … … 1250 1278 { 1251 1279 SUPR0Printf("GVM: used list index %d is out of range!\n", pHandle->iNext); 1252 gvmmR0UsedUnlock(pGVMM);1280 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM); 1253 1281 gvmmR0CreateDestroyUnlock(pGVMM); 1254 1282 return; … … 1266 1294 { 1267 1295 SUPR0Printf("GVM: used list index %d is out of range!\n", iPrev); 1268 gvmmR0UsedUnlock(pGVMM);1296 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM); 1269 1297 gvmmR0CreateDestroyUnlock(pGVMM); 1270 1298 return; … … 1283 1311 { 1284 1312 SUPR0Printf("GVM: can't find the handle previous previous of %d!\n", pHandle->iSelf); 1285 gvmmR0UsedUnlock(pGVMM);1313 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM); 1286 1314 gvmmR0CreateDestroyUnlock(pGVMM); 1287 1315 return; … … 1302 1330 { 1303 1331 pGVMM->cEMTs -= pGVM->cCpus; 1304 gvmmR0UsedUnlock(pGVMM);1332 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM); 1305 1333 1306 1334 gvmmR0CleanupVM(pGVM); … … 1348 1376 1349 1377 /* Re-acquire the UsedLock before freeing the handle since we're updating handle fields. */ 1350 rc = gvmmR0UsedLock(pGVMM);1378 rc = GVMMR0_USED_EXCLUSIVE_LOCK(pGVMM); 1351 1379 AssertRC(rc); 1352 1380 } … … 1365 1393 ASMAtomicWriteU32(&pHandle->ProcId, NIL_RTPROCESS); 1366 1394 1367 gvmmR0UsedUnlock(pGVMM);1395 GVMMR0_USED_EXCLUSIVE_UNLOCK(pGVMM); 1368 1396 gvmmR0CreateDestroyUnlock(pGVMM); 1369 1397 LogFlow(("gvmmR0HandleObjDestructor: returns\n")); … … 1389 1417 PGVM pGVM; 1390 1418 PGVMM pGVMM; 1391 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /* fTakeUsedLock */); 1419 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, false /* fTakeUsedLock */); /** @todo take lock here. */ 1392 1420 if (RT_FAILURE(rc)) 1393 1421 return rc; … … 1445 1473 * @param ppGVM Where to store the GVM pointer. 1446 1474 * @param ppGVMM Where to store the pointer to the GVMM instance data. 1447 * @param fTakeUsedLock Whether to take the used lock or not. 1448 * Be very careful if not taking the lock as it's possible that 1449 * the VM will disappear then. 1475 * @param fTakeUsedLock Whether to take the used lock or not. We take it in 1476 * shared mode when requested. 1477 * 1478 * Be very careful if not taking the lock as it's 1479 * possible that the VM will disappear then! 1450 1480 * 1451 1481 * @remark This will not assert on an invalid pVM but try return silently. … … 1479 1509 if (fTakeUsedLock) 1480 1510 { 1481 int rc = gvmmR0UsedLock(pGVMM);1511 int rc = GVMMR0_USED_SHARED_LOCK(pGVMM); 1482 1512 AssertRCReturn(rc, rc); 1483 1513 … … 1489 1519 || pGVM->pVM != pVM)) 1490 1520 { 1491 gvmmR0UsedUnlock(pGVMM);1521 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 1492 1522 return VERR_INVALID_HANDLE; 1493 1523 } … … 1524 1554 * @remark This will not take the 'used'-lock because it doesn't do 1525 1555 * nesting and this function will be used from under the lock. 1556 * Update: This is no longer true. Consider taking the lock in shared 1557 * mode! 1526 1558 */ 1527 1559 GVMMR0DECL(int) GVMMR0ByVM(PVM pVM, PGVM *ppGVM) … … 1681 1713 * the user. 1682 1714 */ 1683 if ( !pGVMM->nsEarlyWakeUp1 1684 && !pGVMM->nsEarlyWakeUp2) 1715 if (!pGVMM->fDoEarlyWakeUps) 1685 1716 return 0; 1686 1717 … … 1693 1724 if ( pGVMM->cHaltedEMTs == 0 1694 1725 || uNsEarlyWakeUp2 > pGVMM->uNsNextEmtWakeup) 1726 return 0; 1727 1728 /* 1729 * Only one thread doing this at a time. 1730 */ 1731 if (!ASMAtomicCmpXchgBool(&pGVMM->fDoingEarlyWakeUps, true, false)) 1695 1732 return 0; 1696 1733 … … 1809 1846 pGVMM->uNsNextEmtWakeup = u64Min; 1810 1847 1848 ASMAtomicWriteBool(&pGVMM->fDoingEarlyWakeUps, false); 1811 1849 return cWoken; 1812 1850 } … … 1844 1882 1845 1883 /* 1846 * Take the UsedList semaphore, get the current time 1847 * and check if anyone needs waking up. 1848 * Interrupts must NOT be disabled at this point because we ask for GIP time! 1849 */ 1850 rc = gvmmR0UsedLock(pGVMM); 1851 AssertRC(rc); 1852 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1884 * If we're doing early wake-ups, we must take the UsedList lock before we 1885 * start querying the current time. 1886 * Note! Interrupts must NOT be disabled at this point because we ask for GIP time! 1887 */ 1888 bool const fDoEarlyWakeUps = pGVMM->fDoEarlyWakeUps; 1889 if (fDoEarlyWakeUps) 1890 { 1891 rc = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc); 1892 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1893 } 1853 1894 1854 1895 pCurGVCpu->gvmm.s.iCpuEmt = ASMGetApicId(); … … 1861 1902 const uint64_t u64NowGip = RTTimeNanoTS(); 1862 1903 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1863 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip); 1864 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1904 1905 if (fDoEarlyWakeUps) 1906 { 1907 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip); 1908 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1909 } 1865 1910 1866 1911 /* … … 1877 1922 if (cNsInterval > RT_NS_1SEC) 1878 1923 u64ExpireGipTime = u64NowGip + RT_NS_1SEC; 1879 if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup)1880 pGVMM->uNsNextEmtWakeup = u64ExpireGipTime;1881 1924 ASMAtomicWriteU64(&pCurGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime); 1882 1925 ASMAtomicIncU32(&pGVMM->cHaltedEMTs); 1883 gvmmR0UsedUnlock(pGVMM); 1926 if (fDoEarlyWakeUps) 1927 { 1928 if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup) 1929 pGVMM->uNsNextEmtWakeup = u64ExpireGipTime; 1930 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 1931 } 1884 1932 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1885 1933 … … 1907 1955 { 1908 1956 pGVM->gvmm.s.StatsSched.cHaltNotBlocking++; 1909 gvmmR0UsedUnlock(pGVMM); 1957 if (fDoEarlyWakeUps) 1958 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 1910 1959 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1911 1960 RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti); … … 1993 2042 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1994 2043 1995 if (fTakeUsedLock )2044 if (fTakeUsedLock && pGVMM->fDoEarlyWakeUps) 1996 2045 { 1997 2046 /* … … 2009 2058 if (fTakeUsedLock) 2010 2059 { 2011 int rc2 = gvmmR0UsedUnlock(pGVMM);2060 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM); 2012 2061 AssertRC(rc2); 2013 2062 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); … … 2092 2141 if (fTakeUsedLock) 2093 2142 { 2094 int rc2 = gvmmR0UsedUnlock(pGVMM);2143 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM); 2095 2144 AssertRC(rc2); 2096 2145 } … … 2165 2214 } 2166 2215 2167 int rc2 = gvmmR0UsedUnlock(pGVMM);2216 int rc2 = GVMMR0_USED_SHARED_UNLOCK(pGVMM); 2168 2217 AssertRC(rc2); 2169 2218 GVMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); … … 2219 2268 if (RT_SUCCESS(rc)) 2220 2269 { 2221 rc = gvmmR0UsedLock(pGVMM); 2222 AssertRC(rc); 2223 pGVM->gvmm.s.StatsSched.cPollCalls++; 2224 2225 Assert(ASMGetFlags() & X86_EFL_IF); 2226 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */ 2227 2228 if (!fYield) 2270 /* 2271 * We currently only implement helping doing wakeups (fYield = false), so don't 2272 * bother taking the lock if gvmmR0SchedDoWakeUps is not going to do anything. 2273 */ 2274 if (!fYield && pGVMM->fDoEarlyWakeUps) 2275 { 2276 rc = GVMMR0_USED_SHARED_LOCK(pGVMM); AssertRC(rc); 2277 pGVM->gvmm.s.StatsSched.cPollCalls++; 2278 2279 Assert(ASMGetFlags() & X86_EFL_IF); 2280 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */ 2281 2229 2282 pGVM->gvmm.s.StatsSched.cPollWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now); 2283 2284 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 2285 } 2286 /* 2287 * Not quite sure what we could do here... 2288 */ 2289 else if (fYield) 2290 rc = VERR_NOT_IMPLEMENTED; /** @todo implement this... */ 2230 2291 else 2231 { 2232 /** @todo implement this... */ 2233 rc = VERR_NOT_IMPLEMENTED; 2234 } 2235 2236 gvmmR0UsedUnlock(pGVMM); 2292 rc = VINF_SUCCESS; 2237 2293 } 2238 2294 … … 2446 2502 memset(&pStats->SchedVM, 0, sizeof(pStats->SchedVM)); 2447 2503 2448 int rc = gvmmR0UsedLock(pGVMM);2504 int rc = GVMMR0_USED_SHARED_LOCK(pGVMM); 2449 2505 AssertRCReturn(rc, rc); 2450 2506 } … … 2519 2575 pStats->cHostCpus = iDstCpu; 2520 2576 2521 gvmmR0UsedUnlock(pGVMM);2577 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 2522 2578 2523 2579 return VINF_SUCCESS; … … 2594 2650 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE); 2595 2651 2596 int rc = gvmmR0UsedLock(pGVMM);2652 int rc = GVMMR0_USED_SHARED_LOCK(pGVMM); 2597 2653 AssertRCReturn(rc, rc); 2598 2654 } … … 2634 2690 } 2635 2691 2636 gvmmR0UsedUnlock(pGVMM);2692 GVMMR0_USED_SHARED_UNLOCK(pGVMM); 2637 2693 2638 2694 return VINF_SUCCESS;
Note:
See TracChangeset
for help on using the changeset viewer.