Changeset 91271 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 16, 2021 7:42:37 AM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 146930
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r91266 r91271 2331 2331 fMask |= X86_CR4_FSGSBASE; 2332 2332 return fMask; 2333 } 2334 2335 2336 /** 2337 * Sets the PAE PDPTEs for the guest. 2338 * 2339 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2340 * @param pPaePdes The PAE PDPTEs to set. 2341 */ 2342 VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes) 2343 { 2344 Assert(paPaePdpes); 2345 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++) 2346 pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u; 2347 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3; 2333 2348 } 2334 2349 -
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r91037 r91271 733 733 734 734 /** 735 * Return the PAE PDPE entries.736 *737 * @returns Pointer to the PAE PDPE array.738 * @param pVCpu The cross context virtual CPU structure.739 */740 VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)741 {742 return &pVCpu->hm.s.aPdpes[0];743 }744 745 746 /**747 735 * Sets or clears the single instruction flag. 748 736 * -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r91250 r91271 2363 2363 2364 2364 /** 2365 * Gets the PAE PDPEs values cached by the CPU.2366 *2367 * @returns VBox status code.2368 * @param pVCpu The cross context virtual CPU structure.2369 * @param paPdpes Where to return the four PDPEs. The array2370 * pointed to must have 4 entries.2371 */2372 VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPUCC pVCpu, PX86PDPE paPdpes)2373 {2374 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);2375 2376 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];2377 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];2378 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];2379 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];2380 return VINF_SUCCESS;2381 }2382 2383 2384 /**2385 * Sets the PAE PDPEs values cached by the CPU.2386 *2387 * @remarks This must be called *AFTER* PGMUpdateCR3.2388 *2389 * @param pVCpu The cross context virtual CPU structure.2390 * @param paPdpes The four PDPE values. The array pointed to must2391 * have exactly 4 entries.2392 *2393 * @remarks No-long-jump zone!!!2394 */2395 VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPdpes)2396 {2397 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);2398 2399 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)2400 {2401 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)2402 {2403 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];2404 2405 /* Force lazy remapping if it changed in any way. */2406 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;2407 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;2408 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;2409 }2410 }2411 2412 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);2413 }2414 2415 2416 /**2417 2365 * Gets the current CR3 register value for the shadow memory context. 2418 2366 * @returns CR3 value. … … 2424 2372 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS); 2425 2373 return pPoolPage->Core.Key; 2374 } 2375 2376 2377 /** 2378 * Forces lazy remapping of the guest's PAE page-directory structures. 2379 * 2380 * @param pVCpu The cross context virtual CPU structure. 2381 */ 2382 static void pgmGstUpdatePaePdpes(PVMCPU pVCpu) 2383 { 2384 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++) 2385 { 2386 pVCpu->pgm.s.apGstPaePDsR3[i] = 0; 2387 pVCpu->pgm.s.apGstPaePDsR0[i] = 0; 2388 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 2389 } 2426 2390 } 2427 2391 … … 2528 2492 else 2529 2493 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3)); 2494 2495 /* 2496 * Update PAE PDPTEs. 2497 */ 2498 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode)) 2499 pgmGstUpdatePaePdpes(pVCpu); 2530 2500 } 2531 2501 … … 2595 2565 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */ 2596 2566 } 2567 /* 2568 * Update PAE PDPTEs. 2569 */ 2570 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode)) 2571 pgmGstUpdatePaePdpes(pVCpu); 2597 2572 2598 2573 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3); -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r91250 r91271 4343 4343 * Map the 4 PDs too. 4344 4344 */ 4345 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu); 4345 X86PDPE aGstPaePdpes[X86_PG_PAE_PDPE_ENTRIES]; 4346 memcpy(&aGstPaePdpes, HCPtrGuestCR3, sizeof(aGstPaePdpes)); 4347 CPUMSetGuestPaePdpes(pVCpu, &aGstPaePdpes[0]); 4346 4348 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++) 4347 4349 { 4348 pVCpu->pgm.s.aGstPaePdpeRegs[i].u = pGuestPDPT->a[i].u;4349 if ( pGuestPDPT->a[i].u & X86_PDPE_P)4350 X86PDPE PaePdpe = aGstPaePdpes[i]; 4351 if (PaePdpe.u & X86_PDPE_P) 4350 4352 { 4351 4353 RTHCPTR HCPtr; 4352 RTGCPHYS GCPhys = PGM_A20_APPLY(pVCpu, pGuestPDPT->a[i].u & X86_PDPE_PG_MASK);4354 RTGCPHYS GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK); 4353 4355 PGM_LOCK_VOID(pVM); 4354 4356 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r91265 r91271 3995 3995 { 3996 3996 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 3997 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));3998 3997 3999 3998 /* Could happen as a result of longjump. */ -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r91265 r91271 5898 5898 if (CPUMIsGuestInPAEModeEx(pCtx)) 5899 5899 { 5900 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 5901 AssertRC(rc); 5902 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRC(rc); 5903 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRC(rc); 5904 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRC(rc); 5905 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRC(rc); 5900 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc); 5901 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc); 5902 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc); 5903 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc); 5906 5904 } 5907 5905 … … 7964 7962 } 7965 7963 7966 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. 7967 Note: CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date. */ 7964 /* 7965 * If the guest is in PAE mode, sync back the PDPE's into the guest state. 7966 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date. 7967 */ 7968 7968 if (CPUMIsGuestInPAEModeEx(pCtx)) 7969 7969 { 7970 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRC(rc); 7971 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRC(rc); 7972 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRC(rc); 7973 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRC(rc); 7974 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES); 7970 X86PDPE aPaePdpes[4]; 7971 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc); 7972 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc); 7973 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc); 7974 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc); 7975 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes))) 7976 { 7977 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes)); 7978 /* PGM now updates PAE PDPTEs while updating CR3. */ 7979 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3); 7980 } 7975 7981 } 7976 7982 } … … 8044 8050 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); 8045 8051 } 8046 8047 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))8048 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);8049 8050 8052 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 8051 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));8052 8053 } 8053 8054 … … 10902 10903 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 10903 10904 } 10904 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))10905 {10906 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);10907 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));10908 }10909 10905 10910 10906 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX … … 11279 11275 { 11280 11276 VMMRZCallRing3Enable(pVCpu); 11281 11282 11277 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 11283 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));11284 11278 11285 11279 #ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE … … 15639 15633 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE: 15640 15634 { 15635 /* 15636 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest 15637 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine 15638 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import 15639 * PAE PDPTEs as well. 15640 */ 15641 15641 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 15642 15642 AssertRCReturn(rc, rc); -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r91245 r91271 732 732 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM 733 733 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3 734 | VMCPU_FF_ HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3| VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL734 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL 735 735 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM; 736 736 -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r91120 r91271 2685 2685 SSMR3PutStructEx(pSSM, pZmm16Hi, sizeof(*pZmm16Hi), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumZmm16HiFields, NULL); 2686 2686 } 2687 SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[0].u); 2688 SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[1].u); 2689 SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[2].u); 2690 SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[3].u); 2687 2691 if (pVM->cpum.s.GuestFeatures.fSvm) 2688 2692 { … … 2961 2965 PX86XSAVEZMM16HI pZmm16Hi = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_ZMM_16HI_BIT, PX86XSAVEZMM16HI); 2962 2966 SSMR3GetStructEx(pSSM, pZmm16Hi, sizeof(*pZmm16Hi), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumZmm16HiFields, NULL); 2967 } 2968 if (uVersion >= CPUM_SAVED_STATE_VERSION_PAE_PDPES) 2969 { 2970 SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[0].u); 2971 SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[1].u); 2972 SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[2].u); 2973 SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[3].u); 2963 2974 } 2964 2975 if (uVersion >= CPUM_SAVED_STATE_VERSION_HWVIRT_SVM) … … 3589 3600 pszPrefix, pCtx->msrSFMASK, 3590 3601 pszPrefix, pCtx->msrKERNELGSBASE); 3602 3603 if (CPUMIsGuestInPAEModeEx(pCtx)) 3604 for (unsigned i = 0; i < RT_ELEMENTS(pCtx->aPaePdpes); i++) 3605 pHlp->pfnPrintf(pHlp, "%sPAE PDPTE %u =%016RX64\n", pszPrefix, i, pCtx->aPaePdpes[i]); 3591 3606 break; 3592 3607 } -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r91264 r91271 1458 1458 } 1459 1459 1460 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */1461 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))1462 {1463 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);1464 if (CPUMIsGuestInPAEMode(pVCpu))1465 {1466 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);1467 AssertPtr(pPdpes);1468 1469 PGMGstUpdatePaePdpes(pVCpu, pPdpes);1470 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));1471 }1472 else1473 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);1474 }1475 1476 1460 /* IEM has pending work (typically memory write after INS instruction). */ 1477 1461 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)) -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r91249 r91271 801 801 pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR; 802 802 pPGM->aGCPhysGstPaePDs[i] = NIL_RTGCPHYS; 803 pPGM->aGstPaePdpeRegs[i].u = UINT64_MAX;804 803 pPGM->aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS; 805 804 } -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r91246 r91271 2648 2648 PRINT_FLAG(VMCPU_FF_,REQUEST); 2649 2649 PRINT_FLAG(VMCPU_FF_,HM_UPDATE_CR3); 2650 PRINT_FLAG(VMCPU_FF_,HM_UPDATE_PAE_PDPES);2651 2650 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3); 2652 2651 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL); -
trunk/src/VBox/VMM/include/CPUMInternal.h
r91266 r91271 115 115 * @{ */ 116 116 /** The current saved state version. */ 117 #define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2 117 #define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_PAE_PDPES 118 /** The saved state version with PAE PDPEs added. */ 119 #define CPUM_SAVED_STATE_VERSION_PAE_PDPES 21 118 120 /** The saved state version with more virtual VMCS fields and CPUMCTX VMX fields. */ 119 121 #define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2 20 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r91266 r91271 224 224 alignb 8 225 225 .Guest.fExtrn resq 1 226 .Guest.aPaePdpes resq 4 226 227 alignb 8 227 228 .Guest.hwvirt.svm.uMsrHSavePa resq 1 -
trunk/src/VBox/VMM/include/PGMInternal.h
r91250 r91271 3554 3554 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */ 3555 3555 RTGCPHYS aGCPhysGstPaePDs[4]; 3556 /** The values of the 4 PDPE CPU registers (PAE).3557 * @todo Not really maintained by PGM atm, only by VT-x in EPT mode. Should3558 * load on cr3 load and use instead of guest memory version like real3559 * HW. We probably should move these to the CPUMCTX and treat them3560 * like the rest of the register wrt exporting to VT-x and import back. */3561 X86PDPE aGstPaePdpeRegs[4];3562 3556 /** The physical addresses of the monitored guest page directories (PAE). */ 3563 3557 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
Note:
See TracChangeset
for help on using the changeset viewer.