Changeset 37452 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jun 14, 2011 6:13:48 PM (14 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IOMAll.cpp
r37443 r37452 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Oracle Corporation7 * Copyright (C) 2006-2011 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 1159 1159 return rcStrict; 1160 1160 } 1161 1162 1163 /** 1164 * Fress an MMIO range after the reference counter has become zero. 1165 * 1166 * @param pVM The VM handle. 1167 * @param pRange The range to free. 1168 */ 1169 void iomMmioFreeRange(PVM pVM, PIOMMMIORANGE pRange) 1170 { 1171 MMHyperFree(pVM, pRange); 1172 } 1173 -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r37424 r37452 79 79 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE! 80 80 */ 81 DECLINLINE(int)iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)81 static int iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb) 82 82 { 83 83 #ifdef VBOX_WITH_STATISTICS 84 PIOMMMIOSTATS pStats = iomM MIOGetStats(pVM, GCPhysFault, pRange);84 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange); 85 85 Assert(pStats); 86 86 #endif … … 89 89 int rc; 90 90 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback))) 91 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */ 91 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), 92 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */ 92 93 else 93 94 rc = VINF_SUCCESS; … … 104 105 { 105 106 #ifdef VBOX_WITH_STATISTICS 106 PIOMMMIOSTATS pStats = iomM MIOGetStats(pVM, GCPhys, pRange);107 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange); 107 108 Assert(pStats); 108 109 #endif … … 303 304 304 305 305 #if def IOM_WITH_MOVS_SUPPORT306 #if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working */ 306 307 /** 307 308 * [REP] MOVSB … … 322 323 * @param ppStat Which sub-sample to attribute this call to. 323 324 */ 324 static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat) 325 static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, 326 PSTAMPROFILE *ppStat) 325 327 { 326 328 /* … … 456 458 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK; 457 459 if ( RT_SUCCESS(rc) 458 && (pMMIODst = iomM MIOGetRange(pVM, PhysDst)))460 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst))) 459 461 { 460 462 /** @todo implement per-device locks for MMIO access. */ … … 466 468 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; }); 467 469 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3) 470 { 471 iomMmioReleaseRange(pVM, pRange); 468 472 return VINF_IOM_HC_MMIO_READ_WRITE; 473 } 469 474 470 475 /* copy loop. */ … … 485 490 cTransfers--; 486 491 } 492 iomMmioReleaseRange(pVM, pRange); 487 493 } 488 494 else … … 1077 1083 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; 1078 1084 Assert(pRange); 1079 Assert(pRange == iomMMIOGetRange(pVM, GCPhysFault)); 1080 /** @todo implement per-device locks for MMIO access. It can replace the IOM 1081 * lock for most of the code, provided that we retake the lock while 1082 * deregistering PIOMMMIORANGE to deal with remapping/access races 1083 * (unlikely, but an SMP guest shouldn't cause us to crash). */ 1084 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect)); 1085 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault)); 1085 1086 1086 1087 #ifdef VBOX_WITH_STATISTICS … … 1088 1089 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything. 1089 1090 */ 1090 PIOMMMIOSTATS pStats = iomM MIOGetStats(pVM, GCPhysFault, pRange);1091 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange); 1091 1092 if (!pStats) 1092 1093 { … … 1132 1133 1133 1134 /* 1135 * Retain the range and do locking. 1136 */ 1137 iomMmioRetainRange(pRange); 1138 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns); 1139 PPDMCRITSECT pLock = pDevIns->CTX_SUFF(pCritSect); 1140 if (!pLock) 1141 pLock = &pVM->iom.s.EmtLock; 1142 else 1143 { 1144 iomUnlock(pVM); 1145 rc = PDMCritSectEnter(pLock, VINF_IOM_HC_MMIO_READ_WRITE); 1146 if (rc != VINF_SUCCESS) 1147 { 1148 iomMmioReleaseRange(pVM, pRange); 1149 return rc; 1150 } 1151 } 1152 1153 /* 1134 1154 * Disassemble the instruction and interpret it. 1135 1155 */ … … 1141 1161 if (RT_FAILURE(rc)) 1142 1162 { 1143 iomUnlock(pVM); 1163 iomMmioReleaseRange(pVM, pRange); 1164 PDMCritSectLeave(pLock); 1144 1165 return rc; 1145 1166 } … … 1168 1189 { 1169 1190 if (uErrorCode == UINT32_MAX) 1170 return VINF_IOM_HC_MMIO_READ_WRITE; 1171 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c); 1172 PSTAMPROFILE pStat = NULL; 1173 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat); 1174 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c); 1191 rc = VINF_IOM_HC_MMIO_READ_WRITE; 1192 else 1193 { 1194 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c); 1195 PSTAMPROFILE pStat = NULL; 1196 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat); 1197 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c); 1198 } 1175 1199 break; 1176 1200 } … … 1271 1295 1272 1296 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a); 1273 iomUnlock(pVM); 1297 iomMmioReleaseRange(pVM, pRange); 1298 PDMCritSectLeave(pLock); 1274 1299 return rc; 1275 1300 } … … 1310 1335 return VINF_IOM_HC_MMIO_READ_WRITE; 1311 1336 #endif 1312 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomM MIOGetRange(pVM, GCPhysFault));1337 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMmioGetRange(pVM, GCPhysFault)); 1313 1338 iomUnlock(pVM); 1314 1339 return VBOXSTRICTRC_VAL(rcStrict); 1315 1340 } 1341 1316 1342 1317 1343 #ifdef IN_RING3 … … 1329 1355 * @param pvUser Pointer to the MMIO range entry. 1330 1356 */ 1331 DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser) 1357 DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, 1358 PGMACCESSTYPE enmAccessType, void *pvUser) 1332 1359 { 1333 1360 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; 1334 1361 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler); 1335 1362 1336 /* Take the IOM lock before performing any MMIO. */ 1363 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf)); 1364 AssertPtr(pRange); 1365 1366 /* 1367 * Validate the range. 1368 */ 1337 1369 int rc = iomLock(pVM); 1338 1370 AssertRC(rc); 1339 1340 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf)); 1341 1342 Assert(pRange); 1343 Assert(pRange == iomMMIOGetRange(pVM, GCPhysFault)); 1344 /** @todo implement per-device locks for MMIO access. It can replace the IOM 1345 * lock for most of the code, provided that we retake the lock while 1346 * deregistering PIOMMMIORANGE to deal with remapping/access races 1347 * (unlikely, but an SMP guest shouldn't cause us to crash). */ 1348 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect)); 1349 1371 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault)); 1372 1373 /* 1374 * Perform locking. 1375 */ 1376 iomMmioRetainRange(pRange); 1377 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns); 1378 PPDMCRITSECT pLock = pDevIns->CTX_SUFF(pCritSect); 1379 if (!pLock) 1380 pLock = &pVM->iom.s.EmtLock; 1381 else 1382 { 1383 iomUnlock(pVM); 1384 rc = PDMCritSectEnter(pLock, VINF_IOM_HC_MMIO_READ_WRITE); 1385 if (rc != VINF_SUCCESS) 1386 { 1387 iomMmioReleaseRange(pVM, pRange); 1388 return rc; 1389 } 1390 } 1391 1392 /* 1393 * Perform the access. 1394 */ 1350 1395 if (enmAccessType == PGMACCESSTYPE_READ) 1351 1396 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf); … … 1354 1399 1355 1400 AssertRC(rc); 1356 iomUnlock(pVM); 1401 iomMmioReleaseRange(pVM, pRange); 1402 PDMCritSectLeave(pLock); 1357 1403 return rc; 1358 1404 } 1359 1405 #endif /* IN_RING3 */ 1406 1360 1407 1361 1408 /** … … 1385 1432 * Lookup the current context range node and statistics. 1386 1433 */ 1387 PIOMMMIORANGE pRange = iomM MIOGetRange(pVM, GCPhys);1434 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys); 1388 1435 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue)); 1389 1436 if (!pRange) … … 1392 1439 return VERR_INTERNAL_ERROR; 1393 1440 } 1394 /** @todo implement per-device locks for MMIO access. */1395 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));1396 1441 #ifdef VBOX_WITH_STATISTICS 1397 PIOMMMIOSTATS pStats = iomM MIOGetStats(pVM, GCPhys, pRange);1442 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange); 1398 1443 if (!pStats) 1399 1444 { … … 1410 1455 if (pRange->CTX_SUFF(pfnReadCallback)) 1411 1456 { 1457 /* 1458 * Perform locking. 1459 */ 1460 iomMmioRetainRange(pRange); 1461 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns); 1462 PPDMCRITSECT pLock = pDevIns->CTX_SUFF(pCritSect); 1463 if (!pLock) 1464 pLock = &pVM->iom.s.EmtLock; 1465 else 1466 { 1467 iomUnlock(pVM); 1468 rc = PDMCritSectEnter(pLock, VINF_IOM_HC_MMIO_WRITE); 1469 if (rc != VINF_SUCCESS) 1470 { 1471 iomMmioReleaseRange(pVM, pRange); 1472 return rc; 1473 } 1474 } 1475 1412 1476 /* 1413 1477 * Perform the read and deal with the result. … … 1420 1484 case VINF_SUCCESS: 1421 1485 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue)); 1422 iomUnlock(pVM); 1486 iomMmioReleaseRange(pVM, pRange); 1487 PDMCritSectLeave(pLock); 1423 1488 return rc; 1424 1489 #ifndef IN_RING3 … … 1429 1494 default: 1430 1495 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc)); 1431 iomUnlock(pVM); 1496 iomMmioReleaseRange(pVM, pRange); 1497 PDMCritSectLeave(pLock); 1432 1498 return rc; 1433 1499 … … 1442 1508 } 1443 1509 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc)); 1444 iomUnlock(pVM); 1510 iomMmioReleaseRange(pVM, pRange); 1511 PDMCritSectLeave(pLock); 1445 1512 return VINF_SUCCESS; 1446 1513 … … 1455 1522 } 1456 1523 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc)); 1457 iomUnlock(pVM); 1524 iomMmioReleaseRange(pVM, pRange); 1525 PDMCritSectLeave(pLock); 1458 1526 return VINF_SUCCESS; 1459 1527 } 1528 /* not reached */ 1460 1529 } 1461 1530 #ifndef IN_RING3 … … 1469 1538 1470 1539 /* 1471 * Lookup the ring-3 range.1540 * Unassigned memory - this is actually not supposed t happen... 1472 1541 */ 1473 1542 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */ 1474 1543 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a); 1475 /* Unassigned memory; this is actually not supposed to happen. */1476 1544 switch (cbValue) 1477 1545 { 1478 case 1: *(uint8_t *)pu32Value= UINT8_C(0xff); break;1546 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break; 1479 1547 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break; 1480 1548 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break; … … 1514 1582 * Lookup the current context range node. 1515 1583 */ 1516 PIOMMMIORANGE pRange = iomM MIOGetRange(pVM, GCPhys);1584 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys); 1517 1585 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue)); 1518 1586 if (!pRange) … … 1521 1589 return VERR_INTERNAL_ERROR; 1522 1590 } 1523 /** @todo implement per-device locks for MMIO access. */1524 Assert(!pRange->CTX_SUFF(pDevIns) || !pRange->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));1525 1591 #ifdef VBOX_WITH_STATISTICS 1526 PIOMMMIOSTATS pStats = iomM MIOGetStats(pVM, GCPhys, pRange);1592 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange); 1527 1593 if (!pStats) 1528 1594 { … … 1537 1603 #endif /* VBOX_WITH_STATISTICS */ 1538 1604 1539 /*1540 * Perform the write if there's a write handler. R0/GC may have1541 * to defer it to ring-3.1542 */1543 1605 if (pRange->CTX_SUFF(pfnWriteCallback)) 1544 1606 { 1607 /* 1608 * Perform locking. 1609 */ 1610 iomMmioRetainRange(pRange); 1611 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns); 1612 PPDMCRITSECT pLock = pDevIns->CTX_SUFF(pCritSect); 1613 if (!pLock) 1614 pLock = &pVM->iom.s.EmtLock; 1615 else 1616 { 1617 iomUnlock(pVM); 1618 rc = PDMCritSectEnter(pLock, VINF_IOM_HC_MMIO_READ); 1619 if (rc != VINF_SUCCESS) 1620 { 1621 iomMmioReleaseRange(pVM, pRange); 1622 return rc; 1623 } 1624 } 1625 1626 /* 1627 * Perform the write. 1628 */ 1545 1629 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a); 1546 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, &u32Value, (unsigned)cbValue); 1630 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), 1631 GCPhys, &u32Value, (unsigned)cbValue); 1547 1632 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a); 1548 1633 #ifndef IN_RING3 … … 1552 1637 #endif 1553 1638 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, rc)); 1554 iomUnlock(pVM); 1639 iomMmioReleaseRange(pVM, pRange); 1640 PDMCritSectLeave(pLock); 1555 1641 return rc; 1556 1642 } … … 1919 2005 { 1920 2006 /* Currently only called from the VGA device during MMIO. */ 1921 Assert(IOMIsLockOwner(pVM));1922 2007 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags)); 1923 1924 2008 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER); 1925 1926 2009 PVMCPU pVCpu = VMMGetCpu(pVM); 1927 2010 … … 1932 2015 return VINF_SUCCESS; /* ignore */ 1933 2016 2017 PDMCritSectEnter(&pVM->iom.s.EmtLock, VINF_SUCCESS); 2018 1934 2019 /* 1935 2020 * Lookup the context range node the page belongs to. 1936 2021 */ 1937 PIOMMMIORANGE pRange = iomM MIOGetRange(pVM, GCPhys);2022 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys); 1938 2023 AssertMsgReturn(pRange, 1939 2024 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND); … … 1949 2034 1950 2035 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped); 2036 2037 PDMCritSectLeave(&pVM->iom.s.EmtLock); 1951 2038 AssertRCReturn(rc, rc); 1952 2039 -
trunk/src/VBox/VMM/VMMAll/PDMAll.cpp
r36822 r37452 301 301 } 302 302 303 /** 304 * Write MSR in APIC range. 303 304 /** 305 * Write a MSR in APIC range. 305 306 * 306 307 * @returns VBox status code. … … 323 324 } 324 325 325 /** 326 * Read MSR in APIC range. 326 327 /** 328 * Read a MSR in APIC range. 327 329 * 328 330 * @returns VBox status code. -
trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
r37443 r37452 238 238 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos); 239 239 240 #elif defined(IN_RING0) 240 #else 241 # ifdef IN_RING0 241 242 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context 242 243 * and would be better off switching out of that while waiting for … … 245 246 * the lock wait and when the call return it will call ring-0 246 247 * again and resume via in setjmp style. Not very efficient. */ 247 # if 0248 # if 0 248 249 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing 249 250 * callers not prepared for longjmp/blocking to … … 274 275 return rc; 275 276 } 276 # else277 # else 277 278 /* 278 279 * We preemption hasn't been disabled, we can block here in ring-0. … … 281 282 && ASMIntAreEnabled()) 282 283 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos); 283 # endif 284 # endif 285 #endif /* IN_RING0 */ 284 286 285 287 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock); 288 289 /* 290 * Call ring-3 to acquire the critical section? 291 */ 292 if (rcBusy == VINF_SUCCESS) 293 { 294 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM); 295 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 296 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect)); 297 } 298 299 /* 300 * Return busy. 301 */ 286 302 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy)); 287 303 return rcBusy; 288 289 #else /* IN_RC */ 290 /* 291 * Return busy. 292 */ 293 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock); 294 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy)); 295 return rcBusy; 296 #endif /* IN_RC */ 304 #endif /* !IN_RING3 */ 297 305 } 298 306 … … 307 315 * @param pCritSect The PDM critical section to enter. 308 316 * @param rcBusy The status code to return when we're in GC or R0 309 * and the section is busy. 317 * and the section is busy. Pass VINF_SUCCESS to 318 * acquired the critical section thru a ring-3 319 * call if necessary. 310 320 */ 311 321 VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy) 312 322 { 323 int rc; 324 #ifndef IN_RING3 325 if (rcBusy == VINF_SUCCESS) 326 { 327 # ifndef PDMCRITSECT_STRICT 328 rc = pdmCritSectEnter(pCritSect, VERR_SEM_BUSY, NULL); 329 # else 330 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); 331 rc = pdmCritSectEnter(pCritSect, VERR_SEM_BUSY, &SrcPos); 332 # endif 333 if (rc == VERR_SEM_BUSY) 334 { 335 336 } 337 } 338 else 339 #endif /* !IN_RING3 */ 340 { 313 341 #ifndef PDMCRITSECT_STRICT 314 return pdmCritSectEnter(pCritSect, rcBusy, NULL); 315 #else 316 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); 317 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos); 318 #endif 342 rc = pdmCritSectEnter(pCritSect, rcBusy, NULL); 343 #else 344 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API(); 345 rc = pdmCritSectEnter(pCritSect, rcBusy, &SrcPos); 346 #endif 347 } 348 return rc; 319 349 } 320 350 … … 329 359 * @param pCritSect The PDM critical section to enter. 330 360 * @param rcBusy The status code to return when we're in GC or R0 331 * and the section is busy. 361 * and the section is busy. Pass VINF_SUCCESS to 362 * acquired the critical section thru a ring-3 363 * call if necessary. 332 364 * @param uId Some kind of locking location ID. Typically a 333 365 * return address up the stack. Optional (0). -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r37362 r37452 2232 2232 VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe) 2233 2233 { 2234 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); 2234 /** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */ 2235 2235 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe)); 2236 2236 -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r37414 r37452 812 812 * @param pTimer The timer which clock lock we wish to take. 813 813 * @param rcBusy What to return in ring-0 and raw-mode context 814 * if the lock is busy. 814 * if the lock is busy. Pass VINF_SUCCESS to 815 * acquired the critical section thru a ring-3 816 call if necessary. 815 817 * 816 818 * @remarks Currently only supported on timers using the virtual sync clock.
Note:
See TracChangeset
for help on using the changeset viewer.