Changeset 30326 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jun 21, 2010 12:35:33 PM (15 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r29250 r30326 825 825 826 826 /** 827 * Sets (replaces) the page flags for a range of pages in the shadow context.828 *829 * @returns VBox status.830 * @param pVCpu VMCPU handle.831 * @param GCPtr The address of the first page.832 * @param cb The size of the range in bytes.833 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.834 * @remark You must use PGMMapSetPage() for pages in a mapping.835 */836 VMMDECL(int) PGMShwSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)837 {838 return PGMShwModifyPage(pVCpu, GCPtr, cb, fFlags, 0);839 }840 841 842 /**843 827 * Modify page flags for a range of pages in the shadow context. 844 828 * … … 848 832 * @param pVCpu VMCPU handle. 849 833 * @param GCPtr Virtual address of the first page in the range. 850 * @param cb Size (in bytes) of the range to apply the modification to.851 834 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course. 852 835 * @param fMask The AND mask - page flags X86_PTE_*. 853 836 * Be very CAREFUL when ~'ing constants which could be 32-bit! 837 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags. 854 838 * @remark You must use PGMMapModifyPage() for pages in a mapping. 855 839 */ 856 VMMDECL(int) PGMShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)840 DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags) 857 841 { 858 842 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags)); 859 Assert(cb); 860 861 /* 862 * Align the input. 863 */ 864 cb += GCPtr & PAGE_OFFSET_MASK; 865 cb = RT_ALIGN_Z(cb, PAGE_SIZE); 866 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */ 867 868 /* 869 * Call worker. 870 */ 843 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT))); 844 845 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */ 846 871 847 PVM pVM = pVCpu->CTX_SUFF(pVM); 872 848 pgmLock(pVM); 873 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);849 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags); 874 850 pgmUnlock(pVM); 875 851 return rc; 876 852 } 853 854 855 /** 856 * Changing the page flags for a single page in the shadow page tables so as to 857 * make it read-only. 858 * 859 * @returns VBox status code. 860 * @param pVCpu VMCPU handle. 861 * @param GCPtr Virtual address of the first page in the range. 862 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags. 863 */ 864 VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags) 865 { 866 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags); 867 } 868 869 870 /** 871 * Changing the page flags for a single page in the shadow page tables so as to 872 * make it writable. 873 * 874 * The call must know with 101% certainty that the guest page tables maps this 875 * as writable too. This function will deal shared, zero and write monitored 876 * pages. 877 * 878 * @returns VBox status code. 879 * @param pVCpu VMCPU handle. 880 * @param GCPtr Virtual address of the first page in the range. 881 * @param fMmio2 Set if it is an MMIO2 page. 882 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags. 883 */ 884 VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags) 885 { 886 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags); 887 } 888 889 890 /** 891 * Changing the page flags for a single page in the shadow page tables so as to 892 * make it not present. 893 * 894 * @returns VBox status code. 895 * @param pVCpu VMCPU handle. 896 * @param GCPtr Virtual address of the first page in the range. 897 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags. 898 */ 899 VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags) 900 { 901 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags); 902 } 903 877 904 878 905 /** … … 2156 2183 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 2157 2184 2185 /** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */ 2186 DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv) 2187 { 2188 pgmLock(pVM); 2189 2190 /* 2191 * Convert it to a writable page and it on to PGMDynMapHCPage. 2192 */ 2193 int rc; 2194 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys); 2195 if (RT_LIKELY(pPage)) 2196 { 2197 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys); 2198 if (RT_SUCCESS(rc)) 2199 { 2200 //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage)); 2201 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2202 rc = pgmR0DynMapHCPageInlined(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage), ppv); 2203 #else 2204 rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv); 2205 #endif 2206 } 2207 else 2208 AssertRC(rc); 2209 } 2210 else 2211 { 2212 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys)); 2213 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 2214 } 2215 2216 pgmUnlock(pVM); 2217 return rc; 2218 } 2219 2158 2220 /** 2159 2221 * Temporarily maps one guest page specified by GC physical address. … … 2171 2233 { 2172 2234 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys)); 2173 2174 /* 2175 * Get the ram range. 2176 */ 2177 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 2178 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb) 2179 pRam = pRam->CTX_SUFF(pNext); 2180 if (!pRam) 2181 { 2182 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys)); 2183 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 2184 } 2185 2186 /* 2187 * Pass it on to PGMDynMapHCPage. 2188 */ 2189 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]); 2190 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys)); 2191 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2192 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv); 2193 #else 2194 PGMDynMapHCPage(pVM, HCPhys, ppv); 2195 #endif 2196 return VINF_SUCCESS; 2235 return pgmDynMapGCPageInternal(pVM, GCPhys, ppv); 2197 2236 } 2198 2237 … … 2215 2254 VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv) 2216 2255 { 2217 /* 2218 * Get the ram range. 2219 */ 2220 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 2221 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb) 2222 pRam = pRam->CTX_SUFF(pNext); 2223 if (!pRam) 2224 { 2225 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys)); 2226 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 2227 } 2228 2229 /* 2230 * Pass it on to PGMDynMapHCPage. 2231 */ 2232 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]); 2233 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2234 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv); 2235 #else 2236 PGMDynMapHCPage(pVM, HCPhys, ppv); 2237 #endif 2238 *ppv = (void *)((uintptr_t)*ppv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK)); 2239 return VINF_SUCCESS; 2256 void *pv; 2257 int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv); 2258 if (RT_SUCCESS(rc)) 2259 { 2260 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK)); 2261 return VINF_SUCCESS; 2262 } 2263 return rc; 2240 2264 } 2241 2265 … … 2381 2405 VMMDECL(void) PGMDynCheckLocks(PVM pVM) 2382 2406 { 2383 for (unsigned i =0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)2407 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++) 2384 2408 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]); 2385 2409 } … … 2394 2418 * @copydoc FNRTSTRFORMATTYPE */ 2395 2419 static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput, 2396 const char *pszType, void const *pvValue,2397 int cchWidth, int cchPrecision, unsigned fFlags,2398 void *pvUser)2420 const char *pszType, void const *pvValue, 2421 int cchWidth, int cchPrecision, unsigned fFlags, 2422 void *pvUser) 2399 2423 { 2400 2424 size_t cch; … … 2490 2514 2491 2515 #endif /* !IN_R0 || LOG_ENABLED */ 2492 2493 2516 2494 2517 /** -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r30301 r30326 1332 1332 if (rc == VINF_SUCCESS) 1333 1333 { 1334 rc = PGMShwM odifyPage(pVCpu, pvFault, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);1334 rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT); 1335 1335 AssertMsg(rc == VINF_SUCCESS 1336 1336 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */ -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r28800 r30326 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Oracle Corporation7 * Copyright (C) 2006-2010 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 118 118 RT_C_DECLS_BEGIN 119 119 PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys); 120 PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask );120 PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags); 121 121 RT_C_DECLS_END 122 122 … … 272 272 * @param fMask The AND mask - page flags X86_PTE_*. 273 273 * Be extremely CAREFUL with ~'ing values because they can be 32-bit! 274 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags. 274 275 * @remark You must use PGMMapModifyPage() for pages in a mapping. 275 276 */ 276 PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask )277 PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags) 277 278 { 278 279 # if PGM_SHW_TYPE == PGM_TYPE_NESTED … … 353 354 if (pPT->a[iPTE].n.u1Present) 354 355 { 355 SHWPTE Pte; 356 357 Pte.u = (pPT->a[iPTE].u & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK); 358 ASMAtomicWriteSize(&pPT->a[iPTE], Pte.u); 359 Assert(pPT->a[iPTE].n.u1Present); 356 SHWPTE const OrgPte = pPT->a[iPTE]; 357 SHWPTE NewPte; 358 359 NewPte.u = (OrgPte.u & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK); 360 Assert(NewPte.n.u1Present); 361 if (!NewPte.n.u1Present) 362 { 363 /** @todo Some CSAM code path might end up here and upset 364 * the page pool. */ 365 AssertFailed(); 366 } 367 else if ( NewPte.n.u1Write 368 && !OrgPte.n.u1Write 369 && !(fOpFlags & PGM_MK_PG_IS_MMIO2) ) 370 { 371 /** @todo Optimize \#PF handling by caching data. We can 372 * then use this when PGM_MK_PG_IS_WRITE_FAULT is 373 * set instead of resolving the guest physical 374 * address yet again. */ 375 RTGCPHYS GCPhys; 376 uint64_t fGstPte; 377 rc = PGMGstGetPage(pVCpu, GCPtr, &fGstPte, &GCPhys); 378 AssertRC(rc); 379 if (RT_SUCCESS(rc)) 380 { 381 Assert(fGstPte & X86_PTE_RW); 382 PPGMPAGE pPage = pgmPhysGetPage(&pVCpu->CTX_SUFF(pVM)->pgm.s, GCPhys); 383 Assert(pPage); 384 if (pPage) 385 { 386 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys); 387 AssertRCReturn(rc, rc); 388 Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GCPhys, pPage)); 389 } 390 } 391 } 392 393 ASMAtomicWriteSize(&pPT->a[iPTE], NewPte.u); 360 394 # if PGM_SHW_TYPE == PGM_TYPE_EPT 361 395 HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
Note:
See TracChangeset
for help on using the changeset viewer.