VirtualBox

Changeset 91271 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Sep 16, 2021 7:42:37 AM (3 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
146930
Message:

VMM: bugref:10092 Moved the PAE PDPTEs out of PGM into CPUMCTX.

Location:
trunk/src/VBox/VMM
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r91266 r91271  
    23312331        fMask |= X86_CR4_FSGSBASE;
    23322332    return fMask;
     2333}
     2334
     2335
     2336/**
     2337 * Sets the PAE PDPTEs for the guest.
     2338 *
     2339 * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
     2340 * @param   pPaePdes    The PAE PDPTEs to set.
     2341 */
     2342VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes)
     2343{
     2344    Assert(paPaePdpes);
     2345    for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.s.Guest.aPaePdpes); i++)
     2346        pVCpu->cpum.s.Guest.aPaePdpes[i].u = paPaePdpes[i].u;
     2347    pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_CR3;
    23332348}
    23342349
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r91037 r91271  
    733733
    734734/**
    735  * Return the PAE PDPE entries.
    736  *
    737  * @returns Pointer to the PAE PDPE array.
    738  * @param   pVCpu       The cross context virtual CPU structure.
    739  */
    740 VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
    741 {
    742     return &pVCpu->hm.s.aPdpes[0];
    743 }
    744 
    745 
    746 /**
    747735 * Sets or clears the single instruction flag.
    748736 *
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r91250 r91271  
    23632363
    23642364/**
    2365  * Gets the PAE PDPEs values cached by the CPU.
    2366  *
    2367  * @returns VBox status code.
    2368  * @param   pVCpu               The cross context virtual CPU structure.
    2369  * @param   paPdpes             Where to return the four PDPEs. The array
    2370  *                              pointed to must have 4 entries.
    2371  */
    2372 VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPUCC pVCpu, PX86PDPE paPdpes)
    2373 {
    2374     Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
    2375 
    2376     paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
    2377     paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
    2378     paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
    2379     paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
    2380     return VINF_SUCCESS;
    2381 }
    2382 
    2383 
    2384 /**
    2385  * Sets the PAE PDPEs values cached by the CPU.
    2386  *
    2387  * @remarks This must be called *AFTER* PGMUpdateCR3.
    2388  *
    2389  * @param   pVCpu               The cross context virtual CPU structure.
    2390  * @param   paPdpes             The four PDPE values. The array pointed to must
    2391  *                              have exactly 4 entries.
    2392  *
    2393  * @remarks No-long-jump zone!!!
    2394  */
    2395 VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPdpes)
    2396 {
    2397     Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
    2398 
    2399     for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
    2400     {
    2401         if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
    2402         {
    2403             pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
    2404 
    2405             /* Force lazy remapping if it changed in any way. */
    2406             pVCpu->pgm.s.apGstPaePDsR3[i]     = 0;
    2407             pVCpu->pgm.s.apGstPaePDsR0[i]     = 0;
    2408             pVCpu->pgm.s.aGCPhysGstPaePDs[i]  = NIL_RTGCPHYS;
    2409         }
    2410     }
    2411 
    2412     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
    2413 }
    2414 
    2415 
    2416 /**
    24172365 * Gets the current CR3 register value for the shadow memory context.
    24182366 * @returns CR3 value.
     
    24242372    AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
    24252373    return pPoolPage->Core.Key;
     2374}
     2375
     2376
     2377/**
     2378 * Forces lazy remapping of the guest's PAE page-directory structures.
     2379 *
     2380 * @param   pVCpu   The cross context virtual CPU structure.
     2381 */
     2382static void pgmGstUpdatePaePdpes(PVMCPU pVCpu)
     2383{
     2384    for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
     2385    {
     2386        pVCpu->pgm.s.apGstPaePDsR3[i]     = 0;
     2387        pVCpu->pgm.s.apGstPaePDsR0[i]     = 0;
     2388        pVCpu->pgm.s.aGCPhysGstPaePDs[i]  = NIL_RTGCPHYS;
     2389    }
    24262390}
    24272391
     
    25282492        else
    25292493            STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
     2494
     2495        /*
     2496         * Update PAE PDPTEs.
     2497         */
     2498        if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
     2499            pgmGstUpdatePaePdpes(pVCpu);
    25302500    }
    25312501
     
    25952565        AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
    25962566    }
     2567    /*
     2568     * Update PAE PDPTEs.
     2569     */
     2570    else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
     2571        pgmGstUpdatePaePdpes(pVCpu);
    25972572
    25982573    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r91250 r91271  
    43434343         * Map the 4 PDs too.
    43444344         */
    4345         PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
     4345        X86PDPE aGstPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
     4346        memcpy(&aGstPaePdpes, HCPtrGuestCR3, sizeof(aGstPaePdpes));
     4347        CPUMSetGuestPaePdpes(pVCpu, &aGstPaePdpes[0]);
    43464348        for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    43474349        {
    4348             pVCpu->pgm.s.aGstPaePdpeRegs[i].u = pGuestPDPT->a[i].u;
    4349             if (pGuestPDPT->a[i].u & X86_PDPE_P)
     4350            X86PDPE PaePdpe = aGstPaePdpes[i];
     4351            if (PaePdpe.u & X86_PDPE_P)
    43504352            {
    43514353                RTHCPTR     HCPtr;
    4352                 RTGCPHYS    GCPhys = PGM_A20_APPLY(pVCpu, pGuestPDPT->a[i].u & X86_PDPE_PG_MASK);
     4354                RTGCPHYS    GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
    43534355                PGM_LOCK_VOID(pVM);
    43544356                PPGMPAGE    pPage  = pgmPhysGetPage(pVM, GCPhys);
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r91265 r91271  
    39953995{
    39963996    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    3997     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    39983997
    39993998    /* Could happen as a result of longjump. */
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r91265 r91271  
    58985898                if (CPUMIsGuestInPAEModeEx(pCtx))
    58995899                {
    5900                     rc  = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
    5901                     AssertRC(rc);
    5902                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);     AssertRC(rc);
    5903                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);     AssertRC(rc);
    5904                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);     AssertRC(rc);
    5905                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);     AssertRC(rc);
     5900                    rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u);     AssertRC(rc);
     5901                    rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u);     AssertRC(rc);
     5902                    rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u);     AssertRC(rc);
     5903                    rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u);     AssertRC(rc);
    59065904                }
    59075905
     
    79647962                        }
    79657963
    7966                         /* If the guest is in PAE mode, sync back the PDPE's into the guest state.
    7967                            Note: CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date. */
     7964                        /*
     7965                         * If the guest is in PAE mode, sync back the PDPE's into the guest state.
     7966                         * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
     7967                         */
    79687968                        if (CPUMIsGuestInPAEModeEx(pCtx))
    79697969                        {
    7970                             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);     AssertRC(rc);
    7971                             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);     AssertRC(rc);
    7972                             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);     AssertRC(rc);
    7973                             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);     AssertRC(rc);
    7974                             VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
     7970                            X86PDPE aPaePdpes[4];
     7971                            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u);     AssertRC(rc);
     7972                            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u);     AssertRC(rc);
     7973                            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u);     AssertRC(rc);
     7974                            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u);     AssertRC(rc);
     7975                            if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
     7976                            {
     7977                                memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
     7978                                /* PGM now updates PAE PDPTEs while updating CR3. */
     7979                                VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
     7980                            }
    79757981                        }
    79767982                    }
     
    80448050            PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
    80458051        }
    8046 
    8047         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
    8048             PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
    8049 
    80508052        Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    8051         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    80528053    }
    80538054
     
    1090210903        Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    1090310904    }
    10904     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
    10905     {
    10906         PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
    10907         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    10908     }
    1090910905
    1091010906#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     
    1127911275        {
    1128011276            VMMRZCallRing3Enable(pVCpu);
    11281 
    1128211277            Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    11283             Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    1128411278
    1128511279#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
     
    1563915633        case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
    1564015634        {
     15635            /*
     15636             * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
     15637             * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
     15638             * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
     15639             * PAE PDPTEs as well.
     15640             */
    1564115641            int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1564215642            AssertRCReturn(rc, rc);
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r91245 r91271  
    732732    uint64_t const fCpuFFs = VMCPU_FF_TIMER                   | VMCPU_FF_PDM_CRITSECT         | VMCPU_FF_IEM
    733733                           | VMCPU_FF_REQUEST                 | VMCPU_FF_DBGF                 | VMCPU_FF_HM_UPDATE_CR3
    734                            | VMCPU_FF_HM_UPDATE_PAE_PDPES     | VMCPU_FF_PGM_SYNC_CR3         | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
     734                           | VMCPU_FF_PGM_SYNC_CR3            | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    735735                           | VMCPU_FF_TO_R3                   | VMCPU_FF_IOM;
    736736
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r91120 r91271  
    26852685            SSMR3PutStructEx(pSSM, pZmm16Hi, sizeof(*pZmm16Hi), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumZmm16HiFields, NULL);
    26862686        }
     2687        SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[0].u);
     2688        SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[1].u);
     2689        SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[2].u);
     2690        SSMR3PutU64(pSSM, pGstCtx->aPaePdpes[3].u);
    26872691        if (pVM->cpum.s.GuestFeatures.fSvm)
    26882692        {
     
    29612965                    PX86XSAVEZMM16HI pZmm16Hi = CPUMCTX_XSAVE_C_PTR(pGstCtx, XSAVE_C_ZMM_16HI_BIT, PX86XSAVEZMM16HI);
    29622966                    SSMR3GetStructEx(pSSM, pZmm16Hi, sizeof(*pZmm16Hi), SSMSTRUCT_FLAGS_FULL_STRUCT, g_aCpumZmm16HiFields, NULL);
     2967                }
     2968                if (uVersion >= CPUM_SAVED_STATE_VERSION_PAE_PDPES)
     2969                {
     2970                    SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[0].u);
     2971                    SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[1].u);
     2972                    SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[2].u);
     2973                    SSMR3GetU64(pSSM, &pGstCtx->aPaePdpes[3].u);
    29632974                }
    29642975                if (uVersion >= CPUM_SAVED_STATE_VERSION_HWVIRT_SVM)
     
    35893600                pszPrefix, pCtx->msrSFMASK,
    35903601                pszPrefix, pCtx->msrKERNELGSBASE);
     3602
     3603            if (CPUMIsGuestInPAEModeEx(pCtx))
     3604                for (unsigned i = 0; i < RT_ELEMENTS(pCtx->aPaePdpes); i++)
     3605                    pHlp->pfnPrintf(pHlp, "%sPAE PDPTE %u  =%016RX64\n", pszPrefix, i, pCtx->aPaePdpes[i]);
    35913606            break;
    35923607    }
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r91264 r91271  
    14581458    }
    14591459
    1460     /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
    1461     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
    1462     {
    1463         CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
    1464         if (CPUMIsGuestInPAEMode(pVCpu))
    1465         {
    1466             PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
    1467             AssertPtr(pPdpes);
    1468 
    1469             PGMGstUpdatePaePdpes(pVCpu, pPdpes);
    1470             Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    1471         }
    1472         else
    1473             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
    1474     }
    1475 
    14761460    /* IEM has pending work (typically memory write after INS instruction). */
    14771461    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r91249 r91271  
    801801            pPGM->apGstPaePDsR0[i]             = NIL_RTR0PTR;
    802802            pPGM->aGCPhysGstPaePDs[i]          = NIL_RTGCPHYS;
    803             pPGM->aGstPaePdpeRegs[i].u         = UINT64_MAX;
    804803            pPGM->aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
    805804        }
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r91246 r91271  
    26482648        PRINT_FLAG(VMCPU_FF_,REQUEST);
    26492649        PRINT_FLAG(VMCPU_FF_,HM_UPDATE_CR3);
    2650         PRINT_FLAG(VMCPU_FF_,HM_UPDATE_PAE_PDPES);
    26512650        PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
    26522651        PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
  • trunk/src/VBox/VMM/include/CPUMInternal.h

    r91266 r91271  
    115115 * @{ */
    116116/** The current saved state version. */
    117 #define CPUM_SAVED_STATE_VERSION                CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2
     117#define CPUM_SAVED_STATE_VERSION                CPUM_SAVED_STATE_VERSION_PAE_PDPES
     118/** The saved state version with PAE PDPEs added. */
     119#define CPUM_SAVED_STATE_VERSION_PAE_PDPES      21
    118120/** The saved state version with more virtual VMCS fields and CPUMCTX VMX fields. */
    119121#define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2   20
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r91266 r91271  
    224224    alignb 8
    225225    .Guest.fExtrn             resq    1
     226    .Guest.aPaePdpes          resq    4
    226227    alignb 8
    227228    .Guest.hwvirt.svm.uMsrHSavePa            resq         1
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r91250 r91271  
    35543554    /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
    35553555    RTGCPHYS                        aGCPhysGstPaePDs[4];
    3556     /** The values of the 4 PDPE CPU registers (PAE).
    3557      * @todo Not really maintained by PGM atm, only by VT-x in EPT mode. Should
    3558      *       load on cr3 load and use instead of guest memory version like real
    3559      *       HW.  We probably should move these to the CPUMCTX and treat them
    3560      *       like the rest of the register wrt exporting to VT-x and import back. */
    3561     X86PDPE                         aGstPaePdpeRegs[4];
    35623556    /** The physical addresses of the monitored guest page directories (PAE). */
    35633557    RTGCPHYS                        aGCPhysGstPaePDsMonitored[4];
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette