VirtualBox

Changeset 96761 in vbox


Ignore:
Timestamp:
Sep 16, 2022 5:40:42 AM (2 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested VMX: bugref:10092 Comments, assertions. Remove call to PGMShwMakePageNotPresent for the APIC-access page to try force shadow table entry to be not-present, this won't do. It must be instead be handled conditionally in (SyncHandlerPte).

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp

    r96407 r96761  
    12011201     *     instruction. Interrupt inhibition for any nested-guest instruction
    12021202     *     is supplied by the guest-interruptibility state VMCS field and will
    1203      *     be set up as part of loading the guest state.
     1203     *     be set up as part of loading the guest state. Technically
     1204     *     blocking-by-STI is possible with VMLAUNCH/VMRESUME but we currently
     1205     *     disallow it since we can't distinguish it from blocking-by-MovSS
     1206     *     and no nested-hypervisor we care about uses STI immediately
     1207     *     followed by VMLAUNCH/VMRESUME.
    12041208     *
    12051209     *   - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
     
    12471251    int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
    12481252                           true /* fForce */);
    1249     AssertRCReturn(rc, rc);
     1253    if (RT_SUCCESS(rc))
     1254    { /* likely */ }
     1255    else
     1256        return rc;
    12501257
    12511258    /* Invalidate IEM TLBs now that we've forced a PGM mode change. */
     
    14671474         *        currently. */
    14681475        pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
     1476
     1477        /* Clear inhibition unconditionally since we've ensured it isn't set prior to executing VMLAUNCH/VMRESUME. */
     1478        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    14691479    }
    14701480    /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
     
    24522462    pVmcs->u64RoExitQual.u = u64ExitQual;
    24532463
    2454     LogFlow(("vmexit: reason=%u qual=%#RX64 cs:rip=%04x:%#RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", uExitReason,
     2464    LogFlow(("vmexit: reason=%u qual=%#RX64 cs:rip=%04x:%#RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 eflags=%#RX32\n", uExitReason,
    24552465             pVmcs->u64RoExitQual.u, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0,
    2456              pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4));
     2466             pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.eflags.u32));
    24572467
    24582468    /*
     
    46224632         */
    46234633        iemVmxVirtApicSetPendingWrite(pVCpu, offAccess);
     4634
     4635        LogFlowFunc(("Write access at offset %#x not intercepted -> Wrote %#RX32\n", offAccess, u32Data));
    46244636    }
    46254637    else
     
    46404652        u32Data &= s_auAccessSizeMasks[cbAccess];
    46414653        *(uint32_t *)pvData = u32Data;
     4654
     4655        LogFlowFunc(("Read access at offset %#x not intercepted -> Read %#RX32\n", offAccess, u32Data));
    46424656    }
    46434657
     
    70307044        && (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)))
    70317045        EMSetInhibitInterruptsPC(pVCpu, pVmcs->u64GuestRip.u);
    7032     else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    7033         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     7046    else
     7047        Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    70347048
    70357049    /* NMI blocking. */
     
    71467160                                                pVM->iem.s.hVmxApicAccessPage, 0 /*uUser*/, NULL /*pszDesc*/);
    71477161            if (RT_SUCCESS(rc))
    7148             {
    7149                 /*
    7150                  * This to make double sure we trigger EPT violations (rather than EPT misconfigs)
    7151                  * in case we somehow managed to sync the page when CPUMIsGuestVmxApicAccessPageAddr
    7152                  * returned false while sycing its PTE in (SyncHandlerPte).
    7153                  */
    7154                 PGMShwMakePageNotPresent(pVCpu, GCPhysApicAccess, 0 /* fOpFlags */);
    7155             }
     7162            { /* likely */ }
    71567163            else
    71577164                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg);
     
    98869893        uint32_t const fAccess   = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_DATA_W : IEM_ACCESS_DATA_R;
    98879894        uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
     9895
     9896        LogFlowFunc(("Fault at %#RGp (cbBuf=%u fAccess=%#x)\n", GCPhysFault, cbBuf, fAccess));
    98889897        VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
    98899898        if (RT_FAILURE(rcStrict))
     
    99189927     * Handle the VMX APIC-access page only when the guest is in VMX non-root mode.
    99199928     * Otherwise we must deregister the page and allow regular RAM access.
    9920      * Failing to do so lands us with endless EPT misconfiguration VM-exits.
     9929     * Failing to do so lands us with endless EPT VM-exits.
    99219930     */
    99229931    RTGCPHYS const GCPhysPage = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
     
    99409949        uint16_t const offAccess         = GCPhysNestedFault & GUEST_PAGE_OFFSET_MASK;
    99419950        bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, 1 /* cbAccess */, fAccess);
     9951        LogFlowFunc(("#PF at %#RGp (GCPhysNestedFault=%#RGp offAccess=%#x)\n", GCPhysFault, GCPhysNestedFault, offAccess));
    99429952        if (fIntercept)
    99439953        {
     
    99469956             * within the APIC-access page. Currently only HM is supported.
    99479957             */
    9948             AssertMsg(VM_IS_HM_ENABLED(pVM), ("VM-exit auxiliary info. fetching not supported for execution engine %d\n",
    9949                                               pVM->bMainExecutionEngine));
     9958            AssertMsg(VM_IS_HM_ENABLED(pVM),
     9959                      ("VM-exit auxiliary info. fetching not supported for execution engine %d\n", pVM->bMainExecutionEngine));
     9960
    99509961            HMEXITAUX HmExitAux;
    99519962            RT_ZERO(HmExitAux);
     
    99619972             * Refer to @bugref{10092#c33s} for a more detailed explanation.
    99629973             */
    9963             AssertMsg(HmExitAux.Vmx.uReason == VMX_EXIT_EPT_VIOLATION,
    9964                       ("Unexpected call to APIC-access page #PF handler for %#RGp off=%u uErr=%#RGx uReason=%u\n",
    9965                        GCPhysPage, offAccess, uErr, HmExitAux.Vmx.uReason));
     9974            AssertMsgReturn(HmExitAux.Vmx.uReason == VMX_EXIT_EPT_VIOLATION,
     9975                            ("Unexpected call to APIC-access page #PF handler for %#RGp offAcesss=%u uErr=%#RGx uReason=%u\n",
     9976                             GCPhysPage, offAccess, uErr, HmExitAux.Vmx.uReason), VERR_IEM_IPE_7);
    99669977
    99679978            /*
     
    99799990                else
    99809991                    enmAccess = VMXAPICACCESS_LINEAR_READ;
     9992
     9993                /* For linear-address accesss the instruction length must be valid. */
     9994                AssertMsg(HmExitAux.Vmx.cbInstr > 0,
     9995                          ("Invalid APIC-access VM-exit instruction length. cbInstr=%u\n", HmExitAux.Vmx.cbInstr));
    99819996            }
    99829997            else
     
    999010005                    enmAccess = VMXAPICACCESS_PHYSICAL_INSTR;
    999110006                }
     10007
     10008                /* For physical accesses the instruction length is undefined, we zero it for safety and consistency. */
     10009                HmExitAux.Vmx.cbInstr = 0;
    999210010            }
    999310011
     
    1000710025             * Raise the APIC-access VM-exit.
    1000810026             */
     10027            LogFlowFunc(("Raising APIC-access VM-exit from #PF handler at offset %#x\n", offAccess));
    1000910028            VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
    1001010029            return iemExecStatusCodeFiddling(pVCpu, rcStrict);
     
    1001810037         * the APIC-access page (which we derive from the faulting address).
    1001910038         */
     10039        LogFlowFunc(("Access at offset %#x not intercepted -> VINF_EM_RAW_EMULATE_INSTR\n", offAccess));
    1002010040        return VINF_EM_RAW_EMULATE_INSTR;
    1002110041    }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette