VirtualBox

Changeset 92541 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Nov 22, 2021 6:35:38 AM (3 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:10092 Allow forcing mapping/unmapping of CR3 even when the paging mode deosn't actually change. This is required for VMX/SVM guest transitions.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r91985 r92541  
    30453045}
    30463046
     3047
     3048/**
     3049 * Checks whether the guest is in VMX non-root mode and using EPT paging and the
     3050 * nested-guest is in PAE mode.
     3051 *
     3052 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
     3053 * @param   pVCpu   The cross context virtual CPU structure.
     3054 */
     3055VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu)
     3056{
     3057    return    CPUMIsGuestVmxEptPagingEnabledEx(&pVCpu->cpum.s.Guest)
     3058           && CPUMIsGuestInPAEModeEx(&pVCpu->cpum.s.Guest);
     3059}
     3060
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r91997 r92541  
    39223922        /* ignore informational status codes */
    39233923    }
    3924     rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
     3924    rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
     3925                             false /* fForce */);
    39253926
    39263927    /* TR selector is at offset 0x16. */
     
    58945895            Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
    58955896
    5896             rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
     5897            rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
     5898                                     false /* fForce */);
    58975899            break;
    58985900        }
     
    60976099            Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX);
    60986100
    6099             rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
     6101            rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
     6102                                     false /* fForce */);
    61006103            break;
    61016104        }
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r92493 r92541  
    9292     * see comment in iemMemPageTranslateAndCheckAccess().
    9393     */
    94     int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
     94    int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
     95                           true /* fForce */);
    9596    AssertRCReturn(rc, rc);
    9697
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r92493 r92541  
    12261226     * see comment in iemMemPageTranslateAndCheckAccess().
    12271227     */
    1228     int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
     1228    int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
     1229                           true /* fForce */);
    12291230    AssertRCReturn(rc, rc);
    12301231
  • trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h

    r92495 r92541  
    11101110    if (fMaybeChangedMode)
    11111111    {
    1112         int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
     1112        int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
     1113                               false /* fForce */);
    11131114        AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
    11141115    }
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r92493 r92541  
    18221822                          PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
    18231823{
     1824    /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
    18241825    Assert(   pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
    18251826           && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
     
    27132714
    27142715#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2715     if (   CPUMIsGuestVmxEptPagingEnabled(pVCpu)
    2716         && CPUMIsGuestInPAEMode(pVCpu))
     2716    if (CPUMIsGuestVmxEptPaePagingEnabled(pVCpu))
    27172717    {
    27182718        PGMPTWALK    Walk;
     
    27872787 * @param   cr4         The new cr4.
    27882788 * @param   efer        The new extended feature enable register.
    2789  */
    2790 VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
     2789 * @param   fForce      Whether to force a mode change.
     2790 */
     2791VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
    27912792{
    27922793    VMCPU_ASSERT_EMT(pVCpu);
     
    28322833     * Did it change?
    28332834     */
    2834     if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
     2835    if (   !fForce
     2836        && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
    28352837        return VINF_SUCCESS;
    28362838
     
    37533755    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    37543756    PGM_LOCK_VOID(pVM);
    3755     if (pVCpu->pgm.s.uEptPtr != uEptPtr)
    3756     {
    3757         pVCpu->pgm.s.uEptPtr = uEptPtr;
    3758         pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
    3759         pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
    3760     }
     3757    pVCpu->pgm.s.uEptPtr = uEptPtr;
    37613758    PGM_UNLOCK(pVM);
    37623759}
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r92426 r92541  
    41524152#endif
    41534153    {
     4154        /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to guest-physical
     4155         *        by calling SLAT phys walk. */
     4156
    41544157        /*
    41554158         * Map the page CR3 points at.
     
    43064309
    43074310    /*
     4311     * Update second-level address translation info.
     4312     */
     4313#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     4314    pVCpu->pgm.s.pGstEptPml4R3 = 0;
     4315    pVCpu->pgm.s.pGstEptPml4R0 = 0;
     4316#endif
     4317
     4318    /*
    43084319     * Update shadow paging info.
    43094320     */
  • trunk/src/VBox/VMM/VMMAll/PGMAllGstSlatEpt.cpp.h

    r92481 r92541  
    9797     *     See Intel spec. Table 26-7 "Exit Qualification for EPT Violations".
    9898     *
    99      *   - X_USER is Cumulative but relevant only when mode-based execute control for EPT
     99     *   - X_USER is cumulative but relevant only when mode-based execute control for EPT
    100100     *     which we currently don't support it (asserted below).
    101101     *
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette