- Timestamp:
- Nov 11, 2021 2:39:45 PM (3 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r92368 r92371 994 994 * @param GCPhys The address of the page. 995 995 * 996 * @remarks Must be called from within the PGM critical section. It may997 * nip back to ring-3/0 in some cases.996 * @remarks Must be called from within the PGM critical section. It may block 997 * on GMM and host mutexes/locks, leaving HM context. 998 998 */ 999 999 int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys) … … 1004 1004 1005 1005 /* 1006 * Prereqs.1006 * Check Prereqs. 1007 1007 */ 1008 1008 PGM_LOCK_ASSERT_OWNER(pVM); 1009 1009 Assert(PGMIsUsingLargePages(pVM)); 1010 1010 1011 /* 1012 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page. 1013 */ 1011 1014 PPGMPAGE pFirstPage; 1012 1015 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage); 1013 if ( RT_SUCCESS(rc) 1014 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM) 1015 { 1016 if ( RT_SUCCESS(rc) 1017 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM 1018 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO) 1019 { 1020 /* 1021 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE, 1022 * since they are unallocated. 1023 */ 1016 1024 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage); 1017 1018 /* Don't call this function for already allocated pages. */1019 1025 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE); 1020 1021 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE 1022 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO) 1023 { 1024 /* Lazy approach: check all pages in the 2 MB range. 1025 * The whole range must be ram and unallocated. */ 1026 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE) 1027 { 1028 /* 1029 * Now, make sure all the other pages in the 2 MB is in the same state. 1030 */ 1026 1031 GCPhys = GCPhysBase; 1027 for (unsigned iPage = 0; iPage < _2M/PAGE_SIZE; iPage++) 1032 unsigned cLeft = _2M / PAGE_SIZE; 1033 while (cLeft-- > 0) 1028 1034 { 1029 PPGMPAGE pSubPage; 1030 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage); 1031 if ( RT_FAILURE(rc) 1032 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */ 1033 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */ 1035 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys); 1036 if ( pSubPage 1037 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */ 1038 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */ 1034 1039 { 1035 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc)); 1040 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE); 1041 GCPhys += PAGE_SIZE; 1042 } 1043 else 1044 { 1045 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n", 1046 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1)); 1047 1036 1048 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */ 1037 1049 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused); … … 1039 1051 return VERR_PGM_INVALID_LARGE_PAGE_RANGE; 1040 1052 } 1041 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);1042 GCPhys += PAGE_SIZE;1043 1053 } 1044 1054 … … 1047 1057 */ 1048 1058 # ifdef IN_RING3 1049 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhys , NULL);1059 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL); 1050 1060 # elif defined(IN_RING0) 1051 rc = PGMR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);1061 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase); 1052 1062 # else 1053 1063 # error "Port me" -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r92368 r92371 302 302 * must clear the new pages. 303 303 */ 304 VMMR0_INT_DECL(int) PGMR0PhysAllocateLargePage(PGVM pGVM, VMCPUID idCpu, RTGCPHYS GCPhys) 305 { 306 /* 307 * Validate inputs. 308 */ 309 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); 310 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER); 304 int pgmR0PhysAllocateLargePage(PGVM pGVM, VMCPUID idCpu, RTGCPHYS GCPhys) 305 { 311 306 PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]); 312 Assert(!pGVM->pgm.s.cLargeHandyPages);313 314 /* The caller might have done this already, but since we're ring-3 callable we315 need to make sure everything is fine before starting the allocation here. */316 for (unsigned i = 0; i < _2M / PAGE_SIZE; i++)317 {318 PPGMPAGE pPage;319 int rc = pgmPhysGetPageEx(pGVM, GCPhys + i * PAGE_SIZE, &pPage);320 AssertRCReturn(rc, rc);321 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, VERR_PGM_PHYS_NOT_RAM);322 AssertReturn(PGM_PAGE_IS_ZERO(pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);323 }324 307 325 308 /* … … 415 398 416 399 return VINF_SUCCESS; 400 } 401 402 403 /** 404 * Allocate a large page at @a GCPhys. 405 * 406 * @returns The following VBox status codes. 407 * @retval VINF_SUCCESS on success. 408 * @retval VINF_EM_NO_MEMORY if we're out of memory. 409 * 410 * @param pGVM The global (ring-0) VM structure. 411 * @param idCpu The ID of the calling EMT. 412 * @param GCPhys The guest physical address of the page. 413 * 414 * @thread EMT(idCpu) 415 * 416 * @remarks Must be called from within the PGM critical section. The caller 417 * must clear the new pages. 418 */ 419 VMMR0_INT_DECL(int) PGMR0PhysAllocateLargePage(PGVM pGVM, VMCPUID idCpu, RTGCPHYS GCPhys) 420 { 421 /* 422 * Validate inputs. 423 */ 424 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); 425 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER); 426 427 int rc = PGM_LOCK(pGVM); 428 AssertRCReturn(rc, rc); 429 430 /* The caller might have done this already, but since we're ring-3 callable we 431 need to make sure everything is fine before starting the allocation here. */ 432 for (unsigned i = 0; i < _2M / PAGE_SIZE; i++) 433 { 434 PPGMPAGE pPage; 435 rc = pgmPhysGetPageEx(pGVM, GCPhys + i * PAGE_SIZE, &pPage); 436 AssertRCReturnStmt(rc, PGM_UNLOCK(pGVM), rc); 437 AssertReturnStmt(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, PGM_UNLOCK(pGVM), VERR_PGM_PHYS_NOT_RAM); 438 AssertReturnStmt(PGM_PAGE_IS_ZERO(pPage), PGM_UNLOCK(pGVM), VERR_PGM_UNEXPECTED_PAGE_STATE); 439 } 440 441 /* 442 * Call common code. 443 */ 444 rc = pgmR0PhysAllocateLargePage(pGVM, idCpu, GCPhys); 445 446 PGM_UNLOCK(pGVM); 447 return rc; 417 448 } 418 449 -
trunk/src/VBox/VMM/include/PGMInternal.h
r92368 r92371 3808 3808 int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys); 3809 3809 int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys); 3810 #ifdef IN_RING0 3811 int pgmR0PhysAllocateLargePage(PGVM pGVM, VMCPUID idCpu, RTGCPHYS GCPhys); 3812 #endif 3810 3813 int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage); 3811 3814 int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys);
Note:
See TracChangeset
for help on using the changeset viewer.