Changeset 39034 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Oct 19, 2011 11:43:52 AM (13 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r37731 r39034 118 118 PDISCPUSTATE pDis = (PDISCPUSTATE)pvUserdata; 119 119 PEMDISSTATE pState = (PEMDISSTATE)pDis->apvUserData[0]; 120 # ifndef IN_RING0 120 121 PVM pVM = pState->pVM; 122 # endif 121 123 PVMCPU pVCpu = pState->pVCpu; 122 124 … … 3113 3115 Assert(!IOMIsLockOwner(pVM)); 3114 3116 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY); 3115 Assert Msg(rc == VINF_SUCCESS, ("%Rrc\n", rc));3117 AssertRCSuccess(rc); 3116 3118 } 3117 3119 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r38079 r39034 1510 1510 IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel) 1511 1511 { 1512 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1512 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/ 1513 1513 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg); 1514 1514 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg); … … 1799 1799 IEMMODE, enmEffOpSize) 1800 1800 { 1801 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1801 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/ 1802 1802 VBOXSTRICTRC rcStrict; 1803 1803 -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r38677 r39034 1353 1353 VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault) 1354 1354 { 1355 int rc2 = IOM_LOCK(pVM); 1355 int rc2 = IOM_LOCK(pVM); NOREF(rc2); 1356 1356 #ifndef IN_RING3 1357 1357 if (rc2 == VERR_SEM_BUSY) -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r38953 r39034 1057 1057 static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD) 1058 1058 { 1059 PPGMCPU pPGM = &pVCpu->pgm.s;1060 1059 PVM pVM = pVCpu->CTX_SUFF(pVM); 1061 1060 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); … … 1156 1155 DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD) 1157 1156 { 1158 PPGMCPU pPGM = &pVCpu->pgm.s;1159 1157 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 1160 1158 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4); 1161 1159 1162 PGM_LOCK_ASSERT_OWNER( PGMCPU2VM(pPGM));1160 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM)); 1163 1161 1164 1162 AssertReturn(pPml4e, VERR_INTERNAL_ERROR); … … 1922 1920 VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3) 1923 1921 { 1924 PVM pVM = pVCpu->CTX_SUFF(pVM);1925 1926 1922 VMCPU_ASSERT_EMT(pVCpu); 1927 1923 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3)); 1928 1924 1929 1925 /* We assume we're only called in nested paging mode. */ 1930 Assert(pV M->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);1931 Assert(pV M->pgm.s.fMappingsDisabled);1926 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT); 1927 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled); 1932 1928 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)); 1933 1929 … … 1977 1973 VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal) 1978 1974 { 1979 PVM pVM = pVCpu->CTX_SUFF(pVM);1980 1975 int rc; 1981 1976 … … 2021 2016 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3; 2022 2017 2023 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; 2018 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old); 2024 2019 RTGCPHYS GCPhysCR3; 2025 2020 switch (pVCpu->pgm.s.enmGuestMode) … … 2089 2084 { 2090 2085 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; 2091 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled); 2086 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed); 2087 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled); 2092 2088 } 2093 2089 } … … 2119 2115 VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer) 2120 2116 { 2121 PVM pVM = pVCpu->CTX_SUFF(pVM);2122 2117 PGMMODE enmGuestMode; 2123 2118 … … 2164 2159 2165 2160 #ifdef IN_RING3 2166 return PGMR3ChangeMode(pV M, pVCpu, enmGuestMode);2161 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode); 2167 2162 #else 2168 2163 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n")); -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r38953 r39034 442 442 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken) 443 443 { 444 PVM pVM = pVCpu->CTX_SUFF(pVM); 444 PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 445 445 446 446 *pfLockTaken = false; … … 1162 1162 # else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */ 1163 1163 /* PML4 */ 1164 const unsigned iPml4 = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;1164 /*const unsigned iPml4 = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;*/ 1165 1165 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 1166 1166 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; … … 1802 1802 static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr) 1803 1803 { 1804 PVM pVM = pVCpu->CTX_SUFF(pVM);1805 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1804 PVM pVM = pVCpu->CTX_SUFF(pVM); 1805 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool); 1806 1806 LogFlow(("SyncPage: GCPtrPage=%RGv cPages=%u uErr=%#x\n", GCPtrPage, cPages, uErr)); 1807 1807 … … 1951 1951 */ 1952 1952 unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK; 1953 const unsigned iPTDstPage = iPTDst;1954 1953 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 1955 1954 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ … … 2538 2537 { 2539 2538 PVM pVM = pVCpu->CTX_SUFF(pVM); 2540 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2539 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool); 2541 2540 2542 2541 #if 0 /* rarely useful; leave for debugging. */ … … 2627 2626 rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT)); 2628 2627 # else 2629 AssertFailed(); 2628 AssertFailed(); NOREF(pMapping); /* can't happen for amd64 */ 2630 2629 # endif 2631 2630 if (RT_FAILURE(rc)) … … 3364 3363 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr) 3365 3364 { 3366 PVM pVM = pVCpu->CTX_SUFF(pVM); 3365 PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 3367 3366 3368 3367 LogFlow(("VerifyAccessSyncPage: GCPtrPage=%RGv fPage=%#x uErr=%#x\n", GCPtrPage, fPage, uErr)); … … 3553 3552 PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal) 3554 3553 { 3555 PVM pVM = pVCpu->CTX_SUFF(pVM); 3554 PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 3556 3555 3557 3556 LogFlow(("SyncCR3 fGlobal=%d\n", !!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))); … … 3648 3647 unsigned cErrors = 0; 3649 3648 PVM pVM = pVCpu->CTX_SUFF(pVM); 3650 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 3649 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool); 3651 3650 3652 3651 #if PGM_GST_TYPE == PGM_TYPE_PAE … … 4402 4401 PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3) 4403 4402 { 4404 PVM pVM = pVCpu->CTX_SUFF(pVM); 4403 PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 4405 4404 4406 4405 /* Update guest paging info. */ … … 4631 4630 4632 4631 int rc = VINF_SUCCESS; 4633 PVM pVM = pVCpu->CTX_SUFF(pVM); 4632 PVM pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM); 4634 4633 4635 4634 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r38956 r39034 1934 1934 } 1935 1935 1936 #if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/ 1937 1938 /** 1939 * Maps and locks a guest CR3 or PD (PAE) page. 1940 * 1941 * @returns VINF_SUCCESS on success. 1942 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical 1943 * page but has no physical backing. 1944 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid 1945 * GC physical address. 1946 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses 1947 * a dynamic ram chunk boundary 1948 * 1949 * @param pVM The VM handle. 1950 * @param GCPhys The GC physical address to convert. 1951 * @param pR3Ptr Where to store the R3 pointer on success. This may or 1952 * may not be valid in ring-0 depending on the 1953 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option. 1954 * 1955 * @remarks The caller must own the PGM lock. 1956 */ 1957 int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr) 1958 { 1959 1960 PPGMRAMRANGE pRam; 1961 PPGMPAGE pPage; 1962 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam); 1963 if (RT_SUCCESS(rc)) 1964 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr); 1965 Assert(rc <= VINF_SUCCESS); 1966 return rc; 1967 } 1968 1969 1970 int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr) 1971 { 1972 1973 } 1974 1975 #endif 1936 1976 1937 1977 /** … … 2048 2088 * Deal with any physical handlers. 2049 2089 */ 2090 #ifdef IN_RING3 2050 2091 PPGMPHYSHANDLER pPhys = NULL; 2092 #endif 2051 2093 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL) 2052 2094 { -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r37354 r39034 2163 2163 static int pgmPoolCacheAlloc(PPGMPOOL pPool, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage) 2164 2164 { 2165 #ifndef IN_RC2166 const PVM pVM = pPool->CTX_SUFF(pVM);2167 #endif2168 2165 /* 2169 2166 * Look up the GCPhys in the hash. … … 5095 5092 PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys) 5096 5093 { 5097 PVM pVM = pPool->CTX_SUFF(pVM); 5098 PGM_LOCK_ASSERT_OWNER(pVM); 5094 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); 5099 5095 5100 5096 /* … … 5117 5113 PPGMPOOLPAGE pgmPoolQueryPageForDbg(PPGMPOOL pPool, RTHCPHYS HCPhys) 5118 5114 { 5119 PVM pVM = pPool->CTX_SUFF(pVM); 5120 PGM_LOCK_ASSERT_OWNER(pVM); 5115 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); 5121 5116 return (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK); 5122 5117 } -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r37527 r39034 2103 2103 VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext) 2104 2104 { 2105 PVM pVM = pTimer->CTX_SUFF(pVM);2106 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */2107 2108 2105 switch (pTimer->enmClock) 2109 2106 { … … 2136 2133 VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext) 2137 2134 { 2138 PVM pVM = pTimer->CTX_SUFF(pVM);2139 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */2140 2141 2135 switch (pTimer->enmClock) 2142 2136 { … … 2169 2163 VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext) 2170 2164 { 2171 PVM pVM = pTimer->CTX_SUFF(pVM);2172 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */2173 2174 2165 switch (pTimer->enmClock) 2175 2166 {
Note:
See TracChangeset
for help on using the changeset viewer.