Changeset 39034 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Oct 19, 2011 11:43:52 AM (13 years ago)
- svn:sync-xref-src-repo-rev:
- 74454
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 38 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r37731 r39034 118 118 PDISCPUSTATE pDis = (PDISCPUSTATE)pvUserdata; 119 119 PEMDISSTATE pState = (PEMDISSTATE)pDis->apvUserData[0]; 120 # ifndef IN_RING0 120 121 PVM pVM = pState->pVM; 122 # endif 121 123 PVMCPU pVCpu = pState->pVCpu; 122 124 … … 3113 3115 Assert(!IOMIsLockOwner(pVM)); 3114 3116 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY); 3115 Assert Msg(rc == VINF_SUCCESS, ("%Rrc\n", rc));3117 AssertRCSuccess(rc); 3116 3118 } 3117 3119 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r38079 r39034 1510 1510 IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel) 1511 1511 { 1512 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1512 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/ 1513 1513 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg); 1514 1514 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg); … … 1799 1799 IEMMODE, enmEffOpSize) 1800 1800 { 1801 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1801 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/ 1802 1802 VBOXSTRICTRC rcStrict; 1803 1803 -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r38677 r39034 1353 1353 VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault) 1354 1354 { 1355 int rc2 = IOM_LOCK(pVM); 1355 int rc2 = IOM_LOCK(pVM); NOREF(rc2); 1356 1356 #ifndef IN_RING3 1357 1357 if (rc2 == VERR_SEM_BUSY) -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r38953 r39034 1057 1057 static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD) 1058 1058 { 1059 PPGMCPU pPGM = &pVCpu->pgm.s;1060 1059 PVM pVM = pVCpu->CTX_SUFF(pVM); 1061 1060 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); … … 1156 1155 DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD) 1157 1156 { 1158 PPGMCPU pPGM = &pVCpu->pgm.s;1159 1157 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 1160 1158 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4); 1161 1159 1162 PGM_LOCK_ASSERT_OWNER( PGMCPU2VM(pPGM));1160 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM)); 1163 1161 1164 1162 AssertReturn(pPml4e, VERR_INTERNAL_ERROR); … … 1922 1920 VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3) 1923 1921 { 1924 PVM pVM = pVCpu->CTX_SUFF(pVM);1925 1926 1922 VMCPU_ASSERT_EMT(pVCpu); 1927 1923 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3)); 1928 1924 1929 1925 /* We assume we're only called in nested paging mode. */ 1930 Assert(pV M->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);1931 Assert(pV M->pgm.s.fMappingsDisabled);1926 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT); 1927 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled); 1932 1928 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)); 1933 1929 … … 1977 1973 VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal) 1978 1974 { 1979 PVM pVM = pVCpu->CTX_SUFF(pVM);1980 1975 int rc; 1981 1976 … … 2021 2016 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3; 2022 2017 2023 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; 2018 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old); 2024 2019 RTGCPHYS GCPhysCR3; 2025 2020 switch (pVCpu->pgm.s.enmGuestMode) … … 2089 2084 { 2090 2085 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; 2091 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled); 2086 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed); 2087 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled); 2092 2088 } 2093 2089 } … … 2119 2115 VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer) 2120 2116 { 2121 PVM pVM = pVCpu->CTX_SUFF(pVM);2122 2117 PGMMODE enmGuestMode; 2123 2118 … … 2164 2159 2165 2160 #ifdef IN_RING3 2166 return PGMR3ChangeMode(pV M, pVCpu, enmGuestMode);2161 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode); 2167 2162 #else 2168 2163 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n")); -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r38953 r39034 442 442 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken) 443 443 { 444 PVM pVM = pVCpu->CTX_SUFF(pVM); 444 PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 445 445 446 446 *pfLockTaken = false; … … 1162 1162 # else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */ 1163 1163 /* PML4 */ 1164 const unsigned iPml4 = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;1164 /*const unsigned iPml4 = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;*/ 1165 1165 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 1166 1166 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; … … 1802 1802 static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr) 1803 1803 { 1804 PVM pVM = pVCpu->CTX_SUFF(pVM);1805 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1804 PVM pVM = pVCpu->CTX_SUFF(pVM); 1805 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool); 1806 1806 LogFlow(("SyncPage: GCPtrPage=%RGv cPages=%u uErr=%#x\n", GCPtrPage, cPages, uErr)); 1807 1807 … … 1951 1951 */ 1952 1952 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK; 1953 const unsigned iPTDstPage = iPTDst;1954 1953 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 1955 1954 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ … … 2538 2537 { 2539 2538 PVM pVM = pVCpu->CTX_SUFF(pVM); 2540 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2539 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool); 2541 2540 2542 2541 #if 0 /* rarely useful; leave for debugging. */ … … 2627 2626 rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT)); 2628 2627 # else 2629 AssertFailed(); 2628 AssertFailed(); NOREF(pMapping); /* can't happen for amd64 */ 2630 2629 # endif 2631 2630 if (RT_FAILURE(rc)) … … 3364 3363 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr) 3365 3364 { 3366 PVM pVM = pVCpu->CTX_SUFF(pVM); 3365 PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 3367 3366 3368 3367 LogFlow(("VerifyAccessSyncPage: GCPtrPage=%RGv fPage=%#x uErr=%#x\n", GCPtrPage, fPage, uErr)); … … 3553 3552 PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal) 3554 3553 { 3555 PVM pVM = pVCpu->CTX_SUFF(pVM); 3554 PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 3556 3555 3557 3556 LogFlow(("SyncCR3 fGlobal=%d\n", !!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))); … … 3648 3647 unsigned cErrors = 0; 3649 3648 PVM pVM = pVCpu->CTX_SUFF(pVM); 3650 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3649 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool); 3651 3650 3652 3651 #if PGM_GST_TYPE == PGM_TYPE_PAE … … 4402 4401 PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3) 4403 4402 { 4404 PVM pVM = pVCpu->CTX_SUFF(pVM); 4403 PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 4405 4404 4406 4405 /* Update guest paging info. */ … … 4631 4630 4632 4631 int rc = VINF_SUCCESS; 4633 PVM pVM = pVCpu->CTX_SUFF(pVM); 4632 PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 4634 4633 4635 4634 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r38956 r39034 1934 1934 } 1935 1935 1936 #if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/ 1937 1938 /** 1939 * Maps and locks a guest CR3 or PD (PAE) page. 1940 * 1941 * @returns VINF_SUCCESS on success. 1942 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical 1943 * page but has no physical backing. 1944 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid 1945 * GC physical address. 1946 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses 1947 * a dynamic ram chunk boundary 1948 * 1949 * @param pVM The VM handle. 1950 * @param GCPhys The GC physical address to convert. 1951 * @param pR3Ptr Where to store the R3 pointer on success. This may or 1952 * may not be valid in ring-0 depending on the 1953 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option. 1954 * 1955 * @remarks The caller must own the PGM lock. 1956 */ 1957 int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr) 1958 { 1959 1960 PPGMRAMRANGE pRam; 1961 PPGMPAGE pPage; 1962 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam); 1963 if (RT_SUCCESS(rc)) 1964 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr); 1965 Assert(rc <= VINF_SUCCESS); 1966 return rc; 1967 } 1968 1969 1970 int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr) 1971 { 1972 1973 } 1974 1975 #endif 1936 1976 1937 1977 /** … … 2048 2088 * Deal with any physical handlers. 2049 2089 */ 2090 #ifdef IN_RING3 2050 2091 PPGMPHYSHANDLER pPhys = NULL; 2092 #endif 2051 2093 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL) 2052 2094 { -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r37354 r39034 2163 2163 static int pgmPoolCacheAlloc(PPGMPOOL pPool, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage) 2164 2164 { 2165 #ifndef IN_RC2166 const PVM pVM = pPool->CTX_SUFF(pVM);2167 #endif2168 2165 /* 2169 2166 * Look up the GCPhys in the hash. … … 5095 5092 PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys) 5096 5093 { 5097 PVM pVM = pPool->CTX_SUFF(pVM); 5098 PGM_LOCK_ASSERT_OWNER(pVM); 5094 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); 5099 5095 5100 5096 /* … … 5117 5113 PPGMPOOLPAGE pgmPoolQueryPageForDbg(PPGMPOOL pPool, RTHCPHYS HCPhys) 5118 5114 { 5119 PVM pVM = pPool->CTX_SUFF(pVM); 5120 PGM_LOCK_ASSERT_OWNER(pVM); 5115 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); 5121 5116 return (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK); 5122 5117 } -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r37527 r39034 2103 2103 VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext) 2104 2104 { 2105 PVM pVM = pTimer->CTX_SUFF(pVM);2106 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */2107 2108 2105 switch (pTimer->enmClock) 2109 2106 { … … 2136 2133 VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext) 2137 2134 { 2138 PVM pVM = pTimer->CTX_SUFF(pVM);2139 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */2140 2141 2135 switch (pTimer->enmClock) 2142 2136 { … … 2169 2163 VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext) 2170 2164 { 2171 PVM pVM = pTimer->CTX_SUFF(pVM);2172 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */2173 2174 2165 switch (pTimer->enmClock) 2175 2166 { -
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r38536 r39034 4209 4209 } 4210 4210 4211 bool ret = RTAvlGCPtrInsert(&pGVM->gmm.s.pSharedModuleTree, &pRecVM->Core);4212 Assert( ret);4211 bool fInsert = RTAvlGCPtrInsert(&pGVM->gmm.s.pSharedModuleTree, &pRecVM->Core); 4212 Assert(fInsert); NOREF(fInsert); 4213 4213 4214 4214 Log(("GMMR0RegisterSharedModule: new local module %s\n", pszModuleName)); … … 4281 4281 rc = VINF_SUCCESS; 4282 4282 4283 bool ret = RTAvlGCPtrInsert(&pGMM->pGlobalSharedModuleTree, &pGlobalModule->Core);4284 Assert( ret);4283 bool fInsert = RTAvlGCPtrInsert(&pGMM->pGlobalSharedModuleTree, &pGlobalModule->Core); 4284 Assert(fInsert); NOREF(fInsert); 4285 4285 4286 4286 Log(("GMMR0RegisterSharedModule: new global module %s\n", pszModuleName)); … … 4697 4697 static DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM) 4698 4698 { 4699 PG VM pGVM = (PGVM)pvGVM;4700 PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)pNode;4699 PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)pNode; 4700 NOREF(pvGVM); 4701 4701 4702 4702 Assert(pRecVM->pGlobalModule || pRecVM->fCollision); -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r38707 r39034 4406 4406 descriptor[0] = pVCpu->hwaccm.s.uCurrentASID; 4407 4407 descriptor[1] = GCPtr; 4408 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); 4408 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc); 4409 4409 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu->hwaccm.s.uCurrentASID, GCPtr, rc)); 4410 4410 } -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r38954 r39034 1437 1437 VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu) 1438 1438 { 1439 PVM pVM = pVCpu->pVMR0;1440 1439 if (pVCpu->vmm.s.pR0LoggerR0) 1441 1440 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true; … … 1450 1449 VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu) 1451 1450 { 1452 PVM pVM = pVCpu->pVMR0;1453 1451 if (pVCpu->vmm.s.pR0LoggerR0) 1454 1452 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false; -
trunk/src/VBox/VMM/VMMR3/CPUMDbg.cpp
r35625 r39034 264 264 static DECLCALLBACK(int) cpumR3RegGstGet_crX(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 265 265 { 266 PVMCPU pVCpu = (PVMCPU)pvUser; 267 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 268 266 PVMCPU pVCpu = (PVMCPU)pvUser; 269 267 VMCPU_ASSERT_EMT(pVCpu); 270 268 … … 289 287 { 290 288 int rc; 291 PVMCPU pVCpu = (PVMCPU)pvUser; 292 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 289 PVMCPU pVCpu = (PVMCPU)pvUser; 293 290 294 291 VMCPU_ASSERT_EMT(pVCpu); … … 346 343 static DECLCALLBACK(int) cpumR3RegGstGet_drX(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 347 344 { 348 PVMCPU pVCpu = (PVMCPU)pvUser; 349 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 350 345 PVMCPU pVCpu = (PVMCPU)pvUser; 351 346 VMCPU_ASSERT_EMT(pVCpu); 352 347 … … 371 366 { 372 367 int rc; 373 PVMCPU pVCpu = (PVMCPU)pvUser; 374 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 368 PVMCPU pVCpu = (PVMCPU)pvUser; 375 369 376 370 VMCPU_ASSERT_EMT(pVCpu); … … 418 412 static DECLCALLBACK(int) cpumR3RegGstGet_msr(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 419 413 { 420 PVMCPU pVCpu = (PVMCPU)pvUser; 421 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 422 423 VMCPU_ASSERT_EMT(pVCpu); 414 PVMCPU pVCpu = (PVMCPU)pvUser; 415 VMCPU_ASSERT_EMT(pVCpu); 416 424 417 uint64_t u64Value; 425 418 int rc = CPUMQueryGuestMsr(pVCpu, pDesc->offRegister, &u64Value); … … 447 440 int rc; 448 441 PVMCPU pVCpu = (PVMCPU)pvUser; 449 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister;450 442 451 443 VMCPU_ASSERT_EMT(pVCpu); … … 498 490 static DECLCALLBACK(int) cpumR3RegGstGet_stN(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 499 491 { 500 PVMCPU pVCpu = (PVMCPU)pvUser; 501 void const *pv = (uint8_t const *)&pVCpu->cpum.s.Guest + pDesc->offRegister; 502 492 PVMCPU pVCpu = (PVMCPU)pvUser; 503 493 VMCPU_ASSERT_EMT(pVCpu); 504 494 Assert(pDesc->enmType == DBGFREGVALTYPE_R80); … … 587 577 static DECLCALLBACK(int) cpumR3RegHyperGet_drX(void *pvUser, PCDBGFREGDESC pDesc, PDBGFREGVAL pValue) 588 578 { 589 PVMCPU pVCpu = (PVMCPU)pvUser; 590 void const *pv = (uint8_t const *)&pVCpu->cpum + pDesc->offRegister; 591 579 PVMCPU pVCpu = (PVMCPU)pvUser; 592 580 VMCPU_ASSERT_EMT(pVCpu); 593 581 -
trunk/src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp
r35346 r39034 396 396 ASMAtomicXchgHandle(&pVM->dbgf.s.ahAsAliases[DBGF_AS_ALIAS_2_INDEX(hAlias)], hRealAliasFor, &hAsOld); 397 397 uint32_t cRefs = RTDbgAsRelease(hAsOld); 398 Assert(cRefs > 0); 399 Assert(cRefs != UINT32_MAX); 398 Assert(cRefs > 0); Assert(cRefs != UINT32_MAX); NOREF(cRefs); 400 399 rc = VINF_SUCCESS; 401 400 } -
trunk/src/VBox/VMM/VMMR3/DBGFReg.cpp
r38838 r39034 471 471 { 472 472 bool fInserted2 = RTStrSpaceInsert(&pVM->dbgf.s.RegSpace, &paLookupRecs[iLookupRec].Core); 473 AssertMsg(fInserted2, ("'%s'", paLookupRecs[iLookupRec].Core.pszString)); 473 AssertMsg(fInserted2, ("'%s'", paLookupRecs[iLookupRec].Core.pszString)); NOREF(fInserted2); 474 474 } 475 475 … … 1725 1725 static void dbgfR3RegNmQueryAllInSet(PCDBGFREGSET pSet, size_t cRegsToQuery, PDBGFREGENTRYNM paRegs, size_t cRegs) 1726 1726 { 1727 int rc = VINF_SUCCESS;1728 1729 1727 if (cRegsToQuery > pSet->cDescs) 1730 1728 cRegsToQuery = pSet->cDescs; -
trunk/src/VBox/VMM/VMMR3/EMHwaccm.cpp
r35346 r39034 182 182 #endif 183 183 { 184 #ifdef LOG_ENABLED 184 185 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 186 #endif 185 187 int rc; 186 188 -
trunk/src/VBox/VMM/VMMR3/FTM.cpp
r38838 r39034 882 882 pNode->pPage = (void *)(pNode + 1); 883 883 bool fRet = RTAvlGCPhysInsert(&pVM->ftm.s.standby.pPhysPageTree, &pNode->Core); 884 Assert(fRet); 884 Assert(fRet); NOREF(fRet); 885 885 } 886 886 … … 1018 1018 return VINF_SUCCESS; 1019 1019 1020 /** todo:verify VM config. */1020 /** @todo verify VM config. */ 1021 1021 1022 1022 /* … … 1031 1031 * Command processing loop. 1032 1032 */ 1033 bool fDone = false;1033 //bool fDone = false; 1034 1034 for (;;) 1035 1035 { -
trunk/src/VBox/VMM/VMMR3/HWACCM.cpp
r38838 r39034 1560 1560 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1561 1561 { 1562 PVMCPU pVCpu = &pVM->aCpus[i]; 1562 PVMCPU pVCpu = &pVM->aCpus[i]; NOREF(pVCpu); 1563 1563 1564 1564 #ifdef VBOX_WITH_STATISTICS -
trunk/src/VBox/VMM/VMMR3/PATM.cpp
r36969 r39034 5626 5626 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */ 5627 5627 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core); 5628 Assert(fInserted); 5628 Assert(fInserted); NOREF(fInserted); 5629 5629 5630 5630 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC)); … … 5678 5678 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */ 5679 5679 bool fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core); 5680 Assert(fInserted); 5680 Assert(fInserted); NOREF(fInserted); 5681 5681 5682 5682 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */ -
trunk/src/VBox/VMM/VMMR3/PDMAsyncCompletionFile.cpp
r39031 r39034 1085 1085 size_t cbRead) 1086 1086 { 1087 #ifdef VBOX_WITH_STATISTICS 1087 1088 PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint; 1089 #endif 1088 1090 1089 1091 LogFlowFunc(("pTask=%#p pEndpoint=%#p off=%RTfoff paSegments=%#p cSegments=%zu cbRead=%zu\n", -
trunk/src/VBox/VMM/VMMR3/PDMAsyncCompletionFileNormal.cpp
r37596 r39034 1 1 /* $Id$ */ 2 2 /** @file 3 * PDM Async I/O - Transport data asynchronous in R3 using EMT. 4 * Async File I/O manager. 3 * PDM Async I/O - Async File I/O manager. 5 4 */ 6 5 7 6 /* 8 * Copyright (C) 2006-20 08Oracle Corporation7 * Copyright (C) 2006-2011 Oracle Corporation 9 8 * 10 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 16 15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. 17 16 */ 17 18 /******************************************************************************* 19 * Header Files * 20 *******************************************************************************/ 18 21 #define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION 19 22 #define RT_STRICT … … 29 32 30 33 /** The update period for the I/O load statistics in ms. */ 31 #define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 100034 #define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 1000 32 35 /** Maximum number of requests a manager will handle. */ 33 #define PDMACEPFILEMGR_REQS_STEP 512 36 #define PDMACEPFILEMGR_REQS_STEP 512 37 34 38 35 39 /******************************************************************************* … … 47 51 int rc, size_t cbTransfered); 48 52 53 49 54 int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr) 50 55 { 51 int rc = VINF_SUCCESS;52 53 56 pAioMgr->cRequestsActiveMax = PDMACEPFILEMGR_REQS_STEP; 54 57 55 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS);58 int rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS); 56 59 if (rc == VERR_OUT_OF_RANGE) 57 60 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, pAioMgr->cRequestsActiveMax); … … 245 248 static void pdmacFileAioMgrNormalBalanceLoad(PPDMACEPFILEMGR pAioMgr) 246 249 { 247 PPDMACEPFILEMGR pAioMgrNew = NULL;248 int rc = VINF_SUCCESS;249 250 250 /* 251 251 * Check if balancing would improve the situation. … … 253 253 if (pdmacFileAioMgrNormalIsBalancePossible(pAioMgr)) 254 254 { 255 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass; 256 257 rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgrNew, PDMACEPFILEMGRTYPE_ASYNC); 255 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass; 256 PPDMACEPFILEMGR pAioMgrNew = NULL; 257 258 int rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgrNew, PDMACEPFILEMGRTYPE_ASYNC); 258 259 if (RT_SUCCESS(rc)) 259 260 { … … 323 324 static int pdmacFileAioMgrNormalGrow(PPDMACEPFILEMGR pAioMgr) 324 325 { 325 int rc = VINF_SUCCESS;326 RTFILEAIOCTX hAioCtxNew = NIL_RTFILEAIOCTX;327 328 326 LogFlowFunc(("pAioMgr=%#p\n", pAioMgr)); 329 327 … … 342 340 { 343 341 RTFileClose(pCurr->hFile); 344 rc = RTFileOpen(&pCurr->hFile, pCurr->Core.pszUri, pCurr->fFlags); 345 AssertRC(rc); 342 int rc2 = RTFileOpen(&pCurr->hFile, pCurr->Core.pszUri, pCurr->fFlags); AssertRC(rc); 346 343 347 344 pCurr = pCurr->AioMgr.pEndpointNext; … … 352 349 pAioMgr->cRequestsActiveMax += PDMACEPFILEMGR_REQS_STEP; 353 350 354 rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS); 351 RTFILEAIOCTX hAioCtxNew = NIL_RTFILEAIOCTX; 352 int rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS); 355 353 if (rc == VERR_OUT_OF_RANGE) 356 354 rc = RTFileAioCtxCreate(&hAioCtxNew, pAioMgr->cRequestsActiveMax); … … 360 358 /* Close the old context. */ 361 359 rc = RTFileAioCtxDestroy(pAioMgr->hAioCtx); 362 AssertRC(rc); 360 AssertRC(rc); /** @todo r=bird: Ignoring error code, will propagate. */ 363 361 364 362 pAioMgr->hAioCtx = hAioCtxNew; … … 387 385 /* Assign the file to the new context. */ 388 386 pCurr = pAioMgr->pEndpointsHead; 389 390 387 while (pCurr) 391 388 { 392 389 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pCurr->hFile); 393 AssertRC(rc); 390 AssertRC(rc); /** @todo r=bird: Ignoring error code, will propagate. */ 394 391 395 392 pCurr = pCurr->AioMgr.pEndpointNext; … … 420 417 DECLINLINE(bool) pdmacFileAioMgrNormalRcIsFatal(int rcReq) 421 418 { 422 return 423 424 425 426 419 return rcReq == VERR_DEV_IO_ERROR 420 || rcReq == VERR_FILE_IO_ERROR 421 || rcReq == VERR_DISK_IO_ERROR 422 || rcReq == VERR_DISK_FULL 423 || rcReq == VERR_FILE_TOO_BIG; 427 424 } 428 425 … … 504 501 static RTFILEAIOREQ pdmacFileAioMgrNormalRequestAlloc(PPDMACEPFILEMGR pAioMgr) 505 502 { 506 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;507 508 503 /* Get a request handle. */ 504 RTFILEAIOREQ hReq; 509 505 if (pAioMgr->iFreeEntry > 0) 510 506 { … … 517 513 { 518 514 int rc = RTFileAioReqCreate(&hReq); 519 AssertRC (rc);515 AssertRCReturn(rc, NIL_RTFILEAIOREQ); 520 516 } 521 517 … … 546 542 PRTFILEAIOREQ pahReqs, unsigned cReqs) 547 543 { 548 int rc;549 550 544 pAioMgr->cRequestsActive += cReqs; 551 545 pEndpoint->AioMgr.cRequestsActive += cReqs; … … 554 548 LogFlow(("Endpoint has a total of %d active requests now\n", pEndpoint->AioMgr.cRequestsActive)); 555 549 556 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, pahReqs, cReqs);550 int rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, pahReqs, cReqs); 557 551 if (RT_FAILURE(rc)) 558 552 { … … 612 606 } 613 607 else if (rcReq != VERR_FILE_AIO_IN_PROGRESS) 614 {615 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(pahReqs[i]);616 617 608 pdmacFileAioMgrNormalReqCompleteRc(pAioMgr, pahReqs[i], rcReq, 0); 618 }619 609 } 620 610 … … 645 635 PPDMACTASKFILE pTask) 646 636 { 647 PPDMACFILERANGELOCK pRangeLock = NULL; /** < Range lock */648 649 637 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE 650 638 || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ, 651 639 ("Invalid task type %d\n", pTask->enmTransferType)); 652 640 641 PPDMACFILERANGELOCK pRangeLock; 653 642 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetRangeGet(pEndpoint->AioMgr.pTreeRangesLocked, offStart); 654 643 if (!pRangeLock) … … 666 655 /* Check whether we have one of the situations explained below */ 667 656 if ( pRangeLock 668 #if 0 /** @todo :later. For now we will just block all requests if they interfere */657 #if 0 /** @todo later. For now we will just block all requests if they interfere */ 669 658 && ( (pRangeLock->fReadLock && pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE) 670 659 || (!pRangeLock->fReadLock) … … 745 734 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq) 746 735 { 747 int rc = VINF_SUCCESS;748 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;749 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;750 void *pvBuf = pTask->DataSeg.pvSeg;751 752 736 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE 753 754 755 737 || (uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) <= pEndpoint->cbFile, 738 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n", 739 pTask->Off, pTask->DataSeg.cbSeg, pEndpoint->cbFile)); 756 740 757 741 pTask->fPrefetch = false; … … 776 760 * the same range. This will result in data corruption if both are executed concurrently. 777 761 */ 762 int rc = VINF_SUCCESS; 778 763 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, pTask->Off, pTask->DataSeg.cbSeg, pTask); 779 780 764 if (!fLocked) 781 765 { 782 766 /* Get a request handle. */ 783 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);767 RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr); 784 768 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n")); 785 769 … … 823 807 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq) 824 808 { 825 int rc = VINF_SUCCESS;826 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;827 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;828 void *pvBuf = pTask->DataSeg.pvSeg;829 830 809 /* 831 810 * Check if the alignment requirements are met. … … 862 841 * the same range. This will result in data corruption if both are executed concurrently. 863 842 */ 843 int rc = VINF_SUCCESS; 864 844 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbToTransfer, pTask); 865 866 845 if (!fLocked) 867 846 { 847 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass; 848 void *pvBuf = pTask->DataSeg.pvSeg; 849 868 850 /* Get a request handle. */ 869 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);851 RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr); 870 852 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n")); 871 853 … … 963 945 RTFILEAIOREQ apReqs[20]; 964 946 unsigned cRequests = 0; 965 unsigned cMaxRequests = pAioMgr->cRequestsActiveMax - pAioMgr->cRequestsActive; 966 int rc = VINF_SUCCESS; 947 int rc = VINF_SUCCESS; 967 948 968 949 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE, … … 1147 1128 pEndpoint->AioMgr.pReqsPendingTail = NULL; 1148 1129 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint); 1149 AssertRC(rc); 1130 AssertRC(rc); /** @todo r=bird: status code potentially overwritten. */ 1150 1131 } 1151 1132 … … 1592 1573 int pdmacFileAioMgrNormal(RTTHREAD ThreadSelf, void *pvUser) 1593 1574 { 1594 int rc= VINF_SUCCESS;1595 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser;1596 uint64_t uMillisEnd= RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;1597 1598 while ( (pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING)1599 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING)1600 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING))1575 int rc = VINF_SUCCESS; 1576 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser; 1577 uint64_t uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD; 1578 1579 while ( pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING 1580 || pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING 1581 || pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING) 1601 1582 { 1602 1583 if (!pAioMgr->cRequestsActive) -
trunk/src/VBox/VMM/VMMR3/PDMBlkCache.cpp
r38880 r39034 953 953 /* Insert into the tree. */ 954 954 bool fInserted = RTAvlrU64Insert(pBlkCache->pTree, &pEntry->Core); 955 Assert(fInserted); 955 Assert(fInserted); NOREF(fInserted); 956 956 957 957 /* Add to the dirty list. */ … … 1545 1545 static PPDMBLKCACHEENTRY pdmBlkCacheGetCacheEntryByOffset(PPDMBLKCACHE pBlkCache, uint64_t off) 1546 1546 { 1547 PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache; 1548 PPDMBLKCACHEENTRY pEntry = NULL; 1549 1550 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache); 1547 STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeGet, Cache); 1551 1548 1552 1549 RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); 1553 pEntry = (PPDMBLKCACHEENTRY)RTAvlrU64RangeGet(pBlkCache->pTree, off);1550 PPDMBLKCACHEENTRY pEntry = (PPDMBLKCACHEENTRY)RTAvlrU64RangeGet(pBlkCache->pTree, off); 1554 1551 if (pEntry) 1555 1552 pdmBlkCacheEntryRef(pEntry); 1556 1553 RTSemRWReleaseRead(pBlkCache->SemRWEntries); 1557 1554 1558 STAM_PROFILE_ADV_STOP(&p Cache->StatTreeGet, Cache);1555 STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeGet, Cache); 1559 1556 1560 1557 return pEntry; … … 1573 1570 PPDMBLKCACHEENTRY *ppEntryAbove) 1574 1571 { 1575 PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache; 1576 1577 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache); 1572 STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeGet, Cache); 1578 1573 1579 1574 RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); … … 1587 1582 RTSemRWReleaseRead(pBlkCache->SemRWEntries); 1588 1583 1589 STAM_PROFILE_ADV_STOP(&p Cache->StatTreeGet, Cache);1584 STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeGet, Cache); 1590 1585 } 1591 1586 1592 1587 static void pdmBlkCacheInsertEntry(PPDMBLKCACHE pBlkCache, PPDMBLKCACHEENTRY pEntry) 1593 1588 { 1594 PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache; 1595 1596 STAM_PROFILE_ADV_START(&pCache->StatTreeInsert, Cache); 1589 STAM_PROFILE_ADV_START(&pBlkCache->pCache->StatTreeInsert, Cache); 1597 1590 RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); 1598 1591 bool fInserted = RTAvlrU64Insert(pBlkCache->pTree, &pEntry->Core); 1599 AssertMsg(fInserted, ("Node was not inserted into tree\n")); 1600 STAM_PROFILE_ADV_STOP(&p Cache->StatTreeInsert, Cache);1592 AssertMsg(fInserted, ("Node was not inserted into tree\n")); NOREF(fInserted); 1593 STAM_PROFILE_ADV_STOP(&pBlkCache->pCache->StatTreeInsert, Cache); 1601 1594 RTSemRWReleaseWrite(pBlkCache->SemRWEntries); 1602 1595 } … … 2475 2468 { 2476 2469 pdmBlkCacheLockEnter(pCache); 2477 pdmBlkCacheEntryRemoveFromList(pEntry); 2470 pdmBlkCacheEntryRemoveFromList(pEntry); 2478 2471 2479 2472 STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache); … … 2521 2514 { 2522 2515 pdmBlkCacheLockEnter(pCache); 2523 pdmBlkCacheEntryRemoveFromList(pEntry); 2516 pdmBlkCacheEntryRemoveFromList(pEntry); 2524 2517 2525 2518 RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); … … 2538 2531 { 2539 2532 pdmBlkCacheLockEnter(pCache); 2540 pdmBlkCacheEntryRemoveFromList(pEntry); 2533 pdmBlkCacheEntryRemoveFromList(pEntry); 2541 2534 2542 2535 RTSemRWRequestWrite(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); … … 2581 2574 PPDMBLKCACHEREQ pReq = pWaiter->pReq; 2582 2575 2583 pdmBlkCacheReqUpdate(pBlkCache, p Waiter->pReq, rc, true);2576 pdmBlkCacheReqUpdate(pBlkCache, pReq, rc, true); 2584 2577 2585 2578 RTMemFree(pWaiter); -
trunk/src/VBox/VMM/VMMR3/PDMUsb.cpp
r37879 r39034 1210 1210 PVM pVM = pUsbIns->Internal.s.pVM; 1211 1211 VM_ASSERT_EMT(pVM); 1212 /** @todo int rc = DBGFR3InfoRegisterUsb(pVM, pszName, pszDesc, pfnHandler, pUsbIns); */1212 NOREF(pVM); /** @todo int rc = DBGFR3InfoRegisterUsb(pVM, pszName, pszDesc, pfnHandler, pUsbIns); */ 1213 1213 int rc = VERR_NOT_IMPLEMENTED; AssertFailed(); 1214 1214 -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r38953 r39034 3296 3296 VMMR3DECL(int) PGMR3ChangeMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmGuestMode) 3297 3297 { 3298 #if HC_ARCH_BITS == 32 3298 3299 bool fIsOldGuestPagingMode64Bits = (pVCpu->pgm.s.enmGuestMode >= PGMMODE_AMD64); 3300 #endif 3299 3301 bool fIsNewGuestPagingMode64Bits = (enmGuestMode >= PGMMODE_AMD64); 3300 3302 -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r37187 r39034 204 204 /* partial read that failed, chop it up in pages. */ 205 205 *pcbRead = 0; 206 size_t const cbReq = cb;207 206 rc = VINF_SUCCESS; 208 207 while (cb > 0) … … 1466 1465 1467 1466 uint32_t iFirst, iLast; 1468 uint64_t u64BaseAddress =pgmR3DumpHierarchyCalcRange(pState, X86_PD_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast);1467 pgmR3DumpHierarchyCalcRange(pState, X86_PD_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast); 1469 1468 for (uint32_t i = iFirst; i <= iLast; i++) 1470 1469 { … … 2143 2142 2144 2143 uint32_t iFirst, iLast; 2145 uint64_t u64BaseAddress =pgmR3DumpHierarchyCalcRange(pState, X86_PD_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast);2144 pgmR3DumpHierarchyCalcRange(pState, X86_PD_SHIFT, X86_PG_ENTRIES, &iFirst, &iLast); 2146 2145 for (uint32_t i = iFirst; i <= iLast; i++) 2147 2146 { -
trunk/src/VBox/VMM/VMMR3/PGMMap.cpp
r36891 r39034 1017 1017 { 1018 1018 PPGM pPGM = &pVM->pgm.s; 1019 #ifdef VBOX_STRICT 1019 1020 PVMCPU pVCpu = VMMGetCpu(pVM); 1021 #endif 1020 1022 pgmLock(pVM); /* to avoid assertions */ 1021 1023 -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r38956 r39034 4262 4262 4263 4263 #ifdef VBOX_STRICT 4264 bool fOk = true;4265 4264 uint32_t i; 4266 4265 for (i = iClear; i < pVM->pgm.s.cHandyPages; i++) -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r37354 r39034 853 853 /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */ 854 854 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & PAGE_BASE_GC_MASK); 855 Assert (rc == VINF_SUCCESS);855 AssertRCSuccess(rc); 856 856 pPage->fDirty = false; 857 857 -
trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
r38953 r39034 3097 3097 static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass) 3098 3098 { 3099 int rc; 3100 PPGM pPGM = &pVM->pgm.s; 3099 int rc; 3101 3100 3102 3101 /* -
trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
r38953 r39034 219 219 /* We must stall other VCPUs as we'd otherwise have to send IPI flush commands for every single change we make. */ 220 220 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, pgmR3SharedModuleRegRendezvous, &idCpu); 221 Assert (rc == VINF_SUCCESS);221 AssertRCSuccess(rc); 222 222 } 223 223 #endif -
trunk/src/VBox/VMM/VMMR3/PGMShw.h
r35333 r39034 186 186 PPGMPOOLPAGE pNewShwPageCR3; 187 187 PVM pVM = pVCpu->pVMR3; 188 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);189 188 190 189 Assert(HWACCMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging); -
trunk/src/VBox/VMM/VMMR3/SELM.cpp
r35346 r39034 1840 1840 * Figure out the size of what need to monitor. 1841 1841 */ 1842 bool fNoRing1Stack = true;1843 1842 /* We're not interested in any 16-bit TSSes. */ 1844 1843 uint32_t cbMonitoredTss = cbTss; -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r38838 r39034 1537 1537 * Change to the DESTROY state. 1538 1538 */ 1539 TMTIMERSTATE enmState = pTimer->enmState; 1540 TMTIMERSTATE enmNewState = enmState; 1539 TMTIMERSTATE const enmState = pTimer->enmState; 1541 1540 Log2(("TMTimerDestroy: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n", 1542 1541 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries)); … … 2128 2127 2129 2128 /* Check if stopped by expired timer. */ 2130 uint64_t u64Expire = pNext->u64Expire;2131 if (u64Now >= pNext->u64Expire)2129 uint64_t const u64Expire = pNext->u64Expire; 2130 if (u64Now >= u64Expire) 2132 2131 { 2133 2132 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStop); 2134 u64Now = pNext->u64Expire;2133 u64Now = u64Expire; 2135 2134 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64Now); 2136 2135 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false); -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r38838 r39034 1767 1767 { 1768 1768 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM); 1769 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); 1769 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); NOREF(rc2); 1770 1770 } 1771 1771 } -
trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp
r36627 r39034 1567 1567 return VERR_INVALID_PARAMETER; 1568 1568 1569 1570 int rc = VINF_SUCCESS;1571 1569 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1572 1570 -
trunk/src/VBox/VMM/include/IOMInline.h
r37467 r39034 86 86 Assert(cRefs > 1); 87 87 Assert(cRefs < _1M); 88 NOREF(cRefs); 88 89 } 89 90 -
trunk/src/VBox/VMM/include/PDMAsyncCompletionFileInternal.h
r37596 r39034 328 328 /** File handle. */ 329 329 RTFILE hFile; 330 /** 331 * Real size of the file. Only updated if 332 * data is appended. 333 */ 330 /** Real size of the file. Only updated if data is appended. */ 334 331 volatile uint64_t cbFile; 335 332 /** List of new tasks. */ -
trunk/src/VBox/VMM/include/PGMInline.h
r38965 r39034 679 679 PX86PDPT pGuestPdpt; 680 680 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt); 681 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); 681 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); 682 682 return pGuestPdpt; 683 683 } -
trunk/src/VBox/VMM/include/PGMInternal.h
r38956 r39034 3901 3901 int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv); 3902 3902 int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr); 3903 int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr); 3903 3904 int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv); 3904 3905 int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock);
Note:
See TracChangeset
for help on using the changeset viewer.