Changeset 39034 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Oct 19, 2011 11:43:52 AM (13 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR3
- Files:
-
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/CPUMDbg.cpp
r35625 r39034 264 264 static DECLCALLBACK(int) cpumR3RegGstGet_crX(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 265 265 { 266 PVMCPU pVCpu = (PVMCPU)pvUser; 267 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 268 266 PVMCPU pVCpu = (PVMCPU)pvUser; 269 267 VMCPU_ASSERT_EMT(pVCpu); 270 268 … … 289 287 { 290 288 int rc; 291 PVMCPU pVCpu = (PVMCPU)pvUser; 292 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 289 PVMCPU pVCpu = (PVMCPU)pvUser; 293 290 294 291 VMCPU_ASSERT_EMT(pVCpu); … … 346 343 static DECLCALLBACK(int) cpumR3RegGstGet_drX(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 347 344 { 348 PVMCPU pVCpu = (PVMCPU)pvUser; 349 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 350 345 PVMCPU pVCpu = (PVMCPU)pvUser; 351 346 VMCPU_ASSERT_EMT(pVCpu); 352 347 … … 371 366 { 372 367 int rc; 373 PVMCPU pVCpu = (PVMCPU)pvUser; 374 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 368 PVMCPU pVCpu = (PVMCPU)pvUser; 375 369 376 370 VMCPU_ASSERT_EMT(pVCpu); … … 418 412 static DECLCALLBACK(int) cpumR3RegGstGet_msr(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 419 413 { 420 PVMCPU pVCpu = (PVMCPU)pvUser; 421 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 422 423 VMCPU_ASSERT_EMT(pVCpu); 414 PVMCPU pVCpu = (PVMCPU)pvUser; 415 VMCPU_ASSERT_EMT(pVCpu); 416 424 417 uint64_t u64Value; 425 418 int rc = CPUMQueryGuestMsr(pVCpu, pDesc->offRegister, &u64Value); … … 447 440 int rc; 448 441 PVMCPU pVCpu = (PVMCPU)pvUser; 449 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister;450 442 451 443 VMCPU_ASSERT_EMT(pVCpu); … … 498 490 static DECLCALLBACK(int) cpumR3RegGstGet_stN(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 499 491 { 500 PVMCPU pVCpu = (PVMCPU)pvUser; 501 void const *pv = (uint8_t const *)&pVCpu->cpum.s.Guest + pDesc->offRegister; 502 492 PVMCPU pVCpu = (PVMCPU)pvUser; 503 493 VMCPU_ASSERT_EMT(pVCpu); 504 494 Assert(pDesc->enmType == DBGFREGVALTYPE_R80); … … 587 577 static DECLCALLBACK(int) cpumR3RegHyperGet_drX(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 588 578 { 589 PVMCPU pVCpu = (PVMCPU)pvUser; 590 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 591 579 PVMCPU pVCpu = (PVMCPU)pvUser; 592 580 VMCPU_ASSERT_EMT(pVCpu); 593 581 -
trunk/src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp
r35346 r39034 396 396 ASMAtomicXchgHandle(&pVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(hAlias)], hRealAliasFor, &hAsOld); 397 397 uint32_t cRefs = RTDbgAsRelease(hAsOld); 398 Assert(cRefs > 0); 399 Assert(cRefs != UINT32_MAX); 398 Assert(cRefs > 0); Assert(cRefs != UINT32_MAX); NOREF(cRefs); 400 399 rc = VINF_SUCCESS; 401 400 } -
trunk/src/VBox/VMM/VMMR3/DBGFReg.cpp
r38838 r39034 471 471 { 472 472 bool fInserted2 = RTStrSpaceInsert(&pVM->dbgf.s.RegSpace, &paLookupRecs[iLookupRec].Core); 473 AssertMsg(fInserted2, ("'%s'", paLookupRecs[iLookupRec].Core.pszString)); 473 AssertMsg(fInserted2, ("'%s'", paLookupRecs[iLookupRec].Core.pszString)); NOREF(fInserted2); 474 474 } 475 475 … … 1725 1725 static void dbgfR3RegNmQueryAllInSet(PCDBGFREGSET pSet, size_t cRegsToQuery, PDBGFREGENTRYNM paRegs, size_t cRegs) 1726 1726 { 1727 int rc = VINF_SUCCESS;1728 1729 1727 if (cRegsToQuery > pSet->cDescs) 1730 1728 cRegsToQuery = pSet->cDescs; -
trunk/src/VBox/VMM/VMMR3/EMHwaccm.cpp
r35346 r39034 182 182 #endif 183 183 { 184 #ifdef LOG_ENABLED 184 185 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 186 #endif 185 187 int rc; 186 188 -
trunk/src/VBox/VMM/VMMR3/FTM.cpp
r38838 r39034 882 882 pNode->pPage = (void *)(pNode + 1); 883 883 bool fRet = RTAvlGCPhysInsert(&pVM->ftm.s.standby.pPhysPageTree, &pNode->Core); 884 Assert(fRet); 884 Assert(fRet); NOREF(fRet); 885 885 } 886 886 … … 1018 1018 return VINF_SUCCESS; 1019 1019 1020 /** todo:verify VM config. */1020 /** @todo verify VM config. */ 1021 1021 1022 1022 /* … … 1031 1031 * Command processing loop. 1032 1032 */ 1033 bool fDone = false;1033 //bool fDone = false; 1034 1034 for (;;) 1035 1035 { -
trunk/src/VBox/VMM/VMMR3/HWACCM.cpp
r38838 r39034 1560 1560 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1561 1561 { 1562 PVMCPU pVCpu = &pVM->aCpus[i]; 1562 PVMCPU pVCpu = &pVM->aCpus[i]; NOREF(pVCpu); 1563 1563 1564 1564 #ifdef VBOX_WITH_STATISTICS -
trunk/src/VBox/VMM/VMMR3/PATM.cpp
r36969 r39034 5626 5626 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */ 5627 5627 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core); 5628 Assert(fInserted); 5628 Assert(fInserted); NOREF(fInserted); 5629 5629 5630 5630 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC)); … … 5678 5678 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */ 5679 5679 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core); 5680 Assert(fInserted); 5680 Assert(fInserted); NOREF(fInserted); 5681 5681 5682 5682 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */ -
trunk/src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp
r39031 r39034 1085 1085 size_t cbRead) 1086 1086 { 1087 #ifdef VBOX_WITH_STATISTICS 1087 1088 PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint; 1089 #endif 1088 1090 1089 1091 LogFlowFunc(("pTask=%#p pEndpoint=%#p off=%RTfoff paSegments=%#p cSegments=%zu cbRead=%zu\n", -
trunk/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp
r37596 r39034 1 1 /* $Id$ */ 2 2 /** @file 3 * PDM Async I/O - Transport data asynchronous in R3 using EMT. 4 * Async File I/O manager. 3 * PDM Async I/O - Async File I/O manager. 5 4 */ 6 5 7 6 /* 8 * Copyright (C) 2006-20 08Oracle Corporation7 * Copyright (C) 2006-2011 Oracle Corporation 9 8 * 10 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 16 15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. 17 16 */ 17 18 /******************************************************************************* 19 * Header Files * 20 *******************************************************************************/ 18 21 #define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION 19 22 #define RT_STRICT … … 29 32 30 33 /** The update period for the I/O load statistics in ms. */ 31 #define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 100034 #define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 1000 32 35 /** Maximum number of requests a manager will handle. */ 33 #define PDMACEPFILEMGR_REQS_STEP 512 36 #define PDMACEPFILEMGR_REQS_STEP 512 37 34 38 35 39 /******************************************************************************* … … 47 51 int rc, size_t cbTransfered); 48 52 53 49 54 int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr) 50 55 { 51 int rc = VINF_SUCCESS;52 53 56 pAioMgr->cRequestsActiveMax = PDMACEPFILEMGR_REQS_STEP; 54 57 55 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS);58 int rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS); 56 59 if (rc == VERR_OUT_OF_RANGE) 57 60 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, pAioMgr->cRequestsActiveMax); … … 245 248 static void pdmacFileAioMgrNormalBalanceLoad(PPDMACEPFILEMGR pAioMgr) 246 249 { 247 PPDMACEPFILEMGR pAioMgrNew = NULL;248 int rc = VINF_SUCCESS;249 250 250 /* 251 251 * Check if balancing would improve the situation. … … 253 253 if (pdmacFileAioMgrNormalIsBalancePossible(pAioMgr)) 254 254 { 255 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass; 256 257 rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgrNew, PDMACEPFILEMGRTYPE_ASYNC); 255 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass; 256 PPDMACEPFILEMGR pAioMgrNew = NULL; 257 258 int rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgrNew, PDMACEPFILEMGRTYPE_ASYNC); 258 259 if (RT_SUCCESS(rc)) 259 260 { … … 323 324 static int pdmacFileAioMgrNormalGrow(PPDMACEPFILEMGR pAioMgr) 324 325 { 325 int rc = VINF_SUCCESS;326 RTFILEAIOCTX hAioCtxNew = NIL_RTFILEAIOCTX;327 328 326 LogFlowFunc(("pAioMgr=%#p\n", pAioMgr)); 329 327 … … 342 340 { 343 341 RTFileClose(pCurr->hFile); 344 rc = RTFileOpen(&pCurr->hFile, pCurr->Core.pszUri, pCurr->fFlags); 345 AssertRC(rc); 342 int rc2 = RTFileOpen(&pCurr->hFile, pCurr->Core.pszUri, pCurr->fFlags); AssertRC(rc); 346 343 347 344 pCurr = pCurr->AioMgr.pEndpointNext; … … 352 349 pAioMgr->cRequestsActiveMax += PDMACEPFILEMGR_REQS_STEP; 353 350 354 rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS); 351 RTFILEAIOCTX hAioCtxNew = NIL_RTFILEAIOCTX; 352 int rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS); 355 353 if (rc == VERR_OUT_OF_RANGE) 356 354 rc = RTFileAioCtxCreate(&hAioCtxNew, pAioMgr->cRequestsActiveMax); … … 360 358 /* Close the old context. */ 361 359 rc = RTFileAioCtxDestroy(pAioMgr->hAioCtx); 362 AssertRC(rc); 360 AssertRC(rc); /** @todo r=bird: Ignoring error code, will propagate. */ 363 361 364 362 pAioMgr->hAioCtx = hAioCtxNew; … … 387 385 /* Assign the file to the new context. */ 388 386 pCurr = pAioMgr->pEndpointsHead; 389 390 387 while (pCurr) 391 388 { 392 389 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pCurr->hFile); 393 AssertRC(rc); 390 AssertRC(rc); /** @todo r=bird: Ignoring error code, will propagate. */ 394 391 395 392 pCurr = pCurr->AioMgr.pEndpointNext; … … 420 417 DECLINLINE(bool) pdmacFileAioMgrNormalRcIsFatal(int rcReq) 421 418 { 422 return 423 424 425 426 419 return rcReq == VERR_DEV_IO_ERROR 420 || rcReq == VERR_FILE_IO_ERROR 421 || rcReq == VERR_DISK_IO_ERROR 422 || rcReq == VERR_DISK_FULL 423 || rcReq == VERR_FILE_TOO_BIG; 427 424 } 428 425 … … 504 501 static RTFILEAIOREQ pdmacFileAioMgrNormalRequestAlloc(PPDMACEPFILEMGR pAioMgr) 505 502 { 506 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;507 508 503 /* Get a request handle. */ 504 RTFILEAIOREQ hReq; 509 505 if (pAioMgr->iFreeEntry > 0) 510 506 { … … 517 513 { 518 514 int rc = RTFileAioReqCreate(&hReq); 519 AssertRC (rc);515 AssertRCReturn(rc, NIL_RTFILEAIOREQ); 520 516 } 521 517 … … 546 542 PRTFILEAIOREQ pahReqs, unsigned cReqs) 547 543 { 548 int rc;549 550 544 pAioMgr->cRequestsActive += cReqs; 551 545 pEndpoint->AioMgr.cRequestsActive += cReqs; … … 554 548 LogFlow(("Endpoint has a total of %d active requests now\n", pEndpoint->AioMgr.cRequestsActive)); 555 549 556 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, pahReqs, cReqs);550 int rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, pahReqs, cReqs); 557 551 if (RT_FAILURE(rc)) 558 552 { … … 612 606 } 613 607 else if (rcReq != VERR_FILE_AIO_IN_PROGRESS) 614 {615 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(pahReqs[i]);616 617 608 pdmacFileAioMgrNormalReqCompleteRc(pAioMgr, pahReqs[i], rcReq, 0); 618 }619 609 } 620 610 … … 645 635 PPDMACTASKFILE pTask) 646 636 { 647 PPDMACFILERANGELOCK pRangeLock = NULL; /** < Range lock */648 649 637 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE 650 638 || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ, 651 639 ("Invalid task type %d\n", pTask->enmTransferType)); 652 640 641 PPDMACFILERANGELOCK pRangeLock; 653 642 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetRangeGet(pEndpoint->AioMgr.pTreeRangesLocked, offStart); 654 643 if (!pRangeLock) … … 666 655 /* Check whether we have one of the situations explained below */ 667 656 if ( pRangeLock 668 #if 0 /** @todo :later. For now we will just block all requests if they interfere */657 #if 0 /** @todo later. For now we will just block all requests if they interfere */ 669 658 && ( (pRangeLock->fReadLock && pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE) 670 659 || (!pRangeLock->fReadLock) … … 745 734 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq) 746 735 { 747 int rc = VINF_SUCCESS;748 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;749 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;750 void *pvBuf = pTask->DataSeg.pvSeg;751 752 736 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE 753 754 755 737 || (uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) <= pEndpoint->cbFile, 738 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n", 739 pTask->Off, pTask->DataSeg.cbSeg, pEndpoint->cbFile)); 756 740 757 741 pTask->fPrefetch = false; … … 776 760 * the same range. This will result in data corruption if both are executed concurrently. 777 761 */ 762 int rc = VINF_SUCCESS; 778 763 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, pTask->Off, pTask->DataSeg.cbSeg, pTask); 779 780 764 if (!fLocked) 781 765 { 782 766 /* Get a request handle. */ 783 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);767 RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr); 784 768 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n")); 785 769 … … 823 807 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq) 824 808 { 825 int rc = VINF_SUCCESS;826 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;827 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;828 void *pvBuf = pTask->DataSeg.pvSeg;829 830 809 /* 831 810 * Check if the alignment requirements are met. … … 862 841 * the same range. This will result in data corruption if both are executed concurrently. 863 842 */ 843 int rc = VINF_SUCCESS; 864 844 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbToTransfer, pTask); 865 866 845 if (!fLocked) 867 846 { 847 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass; 848 void *pvBuf = pTask->DataSeg.pvSeg; 849 868 850 /* Get a request handle. */ 869 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);851 RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr); 870 852 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n")); 871 853 … … 963 945 RTFILEAIOREQ apReqs[20]; 964 946 unsigned cRequests = 0; 965 unsigned cMaxRequests = pAioMgr->cRequestsActiveMax - pAioMgr->cRequestsActive; 966 int rc = VINF_SUCCESS; 947 int rc = VINF_SUCCESS; 967 948 968 949 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE, … … 1147 1128 pEndpoint->AioMgr.pReqsPendingTail = NULL; 1148 1129 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint); 1149 AssertRC(rc); 1130 AssertRC(rc); /** @todo r=bird: status code potentially overwritten. */ 1150 1131 } 1151 1132 … … 1592 1573 int pdmacFileAioMgrNormal(RTTHREAD ThreadSelf, void *pvUser) 1593 1574 { 1594 int rc= VINF_SUCCESS;1595 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser;1596 uint64_t uMillisEnd= RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;1597 1598 while ( (pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING)1599 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING)1600 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING))1575 int rc = VINF_SUCCESS; 1576 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser; 1577 uint64_t uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD; 1578 1579 while ( pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING 1580 || pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING 1581 || pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING) 1601 1582 { 1602 1583 if (!pAioMgr->cRequestsActive) -
trunk/src/VBox/VMM/VMMR3/PDMBlkCache.cpp
r38880 r39034 953 953 /* Insert into the tree. */ 954 954 bool fInserted = RTAvlrU64Insert(pBlkCache->pTree, &pEntry->Core); 955 Assert(fInserted); 955 Assert(fInserted); NOREF(fInserted); 956 956 957 957 /* Add to the dirty list. */ … … 1545 1545 static PPDMBLKCACHEENTRY pdmBlkCacheGetCacheEntryByOffset(PPDMBLKCACHE pBlkCache, uint64_t off) 1546 1546 { 1547 PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache; 1548 PPDMBLKCACHEENTRY pEntry = NULL; 1549 1550 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache); 1547 STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeGet, Cache); 1551 1548 1552 1549 RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); 1553 pEntry = (PPDMBLKCACHEENTRY)RTAvlrU64RangeGet(pBlkCache->pTree, off);1550 PPDMBLKCACHEENTRY pEntry = (PPDMBLKCACHEENTRY)RTAvlrU64RangeGet(pBlkCache->pTree, off); 1554 1551 if (pEntry) 1555 1552 pdmBlkCacheEntryRef(pEntry); 1556 1553 RTSemRWReleaseRead(pBlkCache->SemRWEntries); 1557 1554 1558 STAM_PROFILE_ADV_STOP(&p Cache->StatTreeGet, Cache);1555 STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeGet, Cache); 1559 1556 1560 1557 return pEntry; … … 1573 1570 PPDMBLKCACHEENTRY *ppEntryAbove) 1574 1571 { 1575 PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache; 1576 1577 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache); 1572 STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeGet, Cache); 1578 1573 1579 1574 RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); … … 1587 1582 RTSemRWReleaseRead(pBlkCache->SemRWEntries); 1588 1583 1589 STAM_PROFILE_ADV_STOP(&p Cache->StatTreeGet, Cache);1584 STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeGet, Cache); 1590 1585 } 1591 1586 1592 1587 static void pdmBlkCacheInsertEntry(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEENTRY pEntry) 1593 1588 { 1594 PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache; 1595 1596 STAM_PROFILE_ADV_START(&pCache->StatTreeInsert, Cache); 1589 STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeInsert, Cache); 1597 1590 RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); 1598 1591 bool fInserted = RTAvlrU64Insert(pBlkCache->pTree, &pEntry->Core); 1599 AssertMsg(fInserted, ("Node was not inserted into tree\n")); 1600 STAM_PROFILE_ADV_STOP(&p Cache->StatTreeInsert, Cache);1592 AssertMsg(fInserted, ("Node was not inserted into tree\n")); NOREF(fInserted); 1593 STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeInsert, Cache); 1601 1594 RTSemRWReleaseWrite(pBlkCache->SemRWEntries); 1602 1595 } … … 2475 2468 { 2476 2469 pdmBlkCacheLockEnter(pCache); 2477 pdmBlkCacheEntryRemoveFromList(pEntry); 2470 pdmBlkCacheEntryRemoveFromList(pEntry); 2478 2471 2479 2472 STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache); … … 2521 2514 { 2522 2515 pdmBlkCacheLockEnter(pCache); 2523 pdmBlkCacheEntryRemoveFromList(pEntry); 2516 pdmBlkCacheEntryRemoveFromList(pEntry); 2524 2517 2525 2518 RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); … … 2538 2531 { 2539 2532 pdmBlkCacheLockEnter(pCache); 2540 pdmBlkCacheEntryRemoveFromList(pEntry); 2533 pdmBlkCacheEntryRemoveFromList(pEntry); 2541 2534 2542 2535 RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); … … 2581 2574 PPDMBLKCACHEREQ pReq = pWaiter->pReq; 2582 2575 2583 pdmBlkCacheReqUpdate(pBlkCache, p Waiter->pReq, rc, true);2576 pdmBlkCacheReqUpdate(pBlkCache, pReq, rc, true); 2584 2577 2585 2578 RTMemFree(pWaiter); -
trunk/src/VBox/VMM/VMMR3/PDMUsb.cpp
r37879 r39034 1210 1210 PVM pVM = pUsbIns->Internal.s.pVM; 1211 1211 VM_ASSERT_EMT(pVM); 1212 /** @todo int rc = DBGFR3InfoRegisterUsb(pVM, pszName, pszDesc, pfnHandler, pUsbIns); */1212 NOREF(pVM); /** @todo int rc = DBGFR3InfoRegisterUsb(pVM, pszName, pszDesc, pfnHandler, pUsbIns); */ 1213 1213 int rc = VERR_NOT_IMPLEMENTED; AssertFailed(); 1214 1214 -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r38953 r39034 3296 3296 VMMR3DECL(int) PGMR3ChangeMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmGuestMode) 3297 3297 { 3298 #if HC_ARCH_BITS == 32 3298 3299 bool fIsOldGuestPagingMode64Bits = (pVCpu->pgm.s.enmGuestMode >= PGMMODE_AMD64); 3300 #endif 3299 3301 bool fIsNewGuestPagingMode64Bits = (enmGuestMode >= PGMMODE_AMD64); 3300 3302 -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r37187 r39034 204 204 /* partial read that failed, chop it up in pages. */ 205 205 *pcbRead = 0; 206 size_t const cbReq = cb;207 206 rc = VINF_SUCCESS; 208 207 while (cb > 0) … … 1466 1465 1467 1466 uint32_t iFirst, iLast; 1468 uint64_t u64BaseAddress =pgmR3DumpHierarchyCalcRange(pState, X86_PD_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast);1467 pgmR3DumpHierarchyCalcRange(pState, X86_PD_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast); 1469 1468 for (uint32_t i = iFirst; i <= iLast; i++) 1470 1469 { … … 2143 2142 2144 2143 uint32_t iFirst, iLast; 2145 uint64_t u64BaseAddress =pgmR3DumpHierarchyCalcRange(pState, X86_PD_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast);2144 pgmR3DumpHierarchyCalcRange(pState, X86_PD_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast); 2146 2145 for (uint32_t i = iFirst; i <= iLast; i++) 2147 2146 { -
trunk/src/VBox/VMM/VMMR3/PGMMap.cpp
r36891 r39034 1017 1017 { 1018 1018 PPGM pPGM = &pVM->pgm.s; 1019 #ifdef VBOX_STRICT 1019 1020 PVMCPU pVCpu = VMMGetCpu(pVM); 1021 #endif 1020 1022 pgmLock(pVM); /* to avoid assertions */ 1021 1023 -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r38956 r39034 4262 4262 4263 4263 #ifdef VBOX_STRICT 4264 bool fOk = true;4265 4264 uint32_t i; 4266 4265 for (i = iClear; i < pVM->pgm.s.cHandyPages; i++) -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r37354 r39034 853 853 /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */ 854 854 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & PAGE_BASE_GC_MASK); 855 Assert (rc == VINF_SUCCESS);855 AssertRCSuccess(rc); 856 856 pPage->fDirty = false; 857 857 -
trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
r38953 r39034 3097 3097 static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass) 3098 3098 { 3099 int rc; 3100 PPGM pPGM = &pVM->pgm.s; 3099 int rc; 3101 3100 3102 3101 /* -
trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
r38953 r39034 219 219 /* We must stall other VCPUs as we'd otherwise have to send IPI flush commands for every single change we make. */ 220 220 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, pgmR3SharedModuleRegRendezvous, &idCpu); 221 Assert (rc == VINF_SUCCESS);221 AssertRCSuccess(rc); 222 222 } 223 223 #endif -
trunk/src/VBox/VMM/VMMR3/PGMShw.h
r35333 r39034 186 186 PPGMPOOLPAGE pNewShwPageCR3; 187 187 PVM pVM = pVCpu->pVMR3; 188 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);189 188 190 189 Assert(HWACCMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging); -
trunk/src/VBox/VMM/VMMR3/SELM.cpp
r35346 r39034 1840 1840 * Figure out the size of what need to monitor. 1841 1841 */ 1842 bool fNoRing1Stack = true;1843 1842 /* We're not interested in any 16-bit TSSes. */ 1844 1843 uint32_t cbMonitoredTss = cbTss; -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r38838 r39034 1537 1537 * Change to the DESTROY state. 1538 1538 */ 1539 TMTIMERSTATE enmState = pTimer->enmState; 1540 TMTIMERSTATE enmNewState = enmState; 1539 TMTIMERSTATE const enmState = pTimer->enmState; 1541 1540 Log2(("TMTimerDestroy: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n", 1542 1541 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries)); … … 2128 2127 2129 2128 /* Check if stopped by expired timer. */ 2130 uint64_t u64Expire = pNext->u64Expire;2131 if (u64Now >= pNext->u64Expire)2129 uint64_t const u64Expire = pNext->u64Expire; 2130 if (u64Now >= u64Expire) 2132 2131 { 2133 2132 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStop); 2134 u64Now = pNext->u64Expire;2133 u64Now = u64Expire; 2135 2134 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64Now); 2136 2135 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false); -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r38838 r39034 1767 1767 { 1768 1768 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM); 1769 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); 1769 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); NOREF(rc2); 1770 1770 } 1771 1771 }
Note:
See TracChangeset
for help on using the changeset viewer.