VirtualBox

Changeset 79659 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jul 10, 2019 8:38:39 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Nested VMX: bugref:9180 Fix delivering host NMIs to the correct host CPU on the unlikely but possible case where we might get preempted to a different host CPU before dispatching the NMI (only affects some hosts).

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r79653 r79659  
    2626#include <iprt/thread.h>
    2727#include <iprt/mem.h>
     28#include <iprt/mp.h>
    2829
    2930#include <VBox/vmm/pdmapi.h>
     
    1059810599
    1059910600/**
    10600  * Wrapper for dispatching host NMIs.
     10601 * Worker function passed to RTMpOnSpecific() that is to be called on the target
     10602 * CPU.
     10603 *
     10604 * @param   idCpu       The ID for the CPU the function is called on.
     10605 * @param   pvUser1     Null, not used.
     10606 * @param   pvUser2     Null, not used.
     10607 */
     10608static DECLCALLBACK(void) hmR0DispatchHostNmi(RTCPUID idCpu, void *pvUser1, void *pvUser2)
     10609{
     10610    RT_NOREF2(pvUser1, pvUser2);
     10611    VMXDispatchHostNmi();
     10612}
     10613
     10614
     10615/**
     10616 * Dispatching a host NMI on the host CPU that received the NMI.
    1060110617 *
    1060210618 * @returns VBox status code.
    10603  * @param   pVCpu   The cross context virtual CPU structure.
    10604  */
    10605 static int hmR0VmxExitHostNmi(PVMCPU pVCpu)
    10606 {
    10607     VMXDispatchHostNmi();
    10608     STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
    10609     return VINF_SUCCESS;
     10619 * @param   pVCpu       The cross context virtual CPU structure.
     10620 * @param   pVmcsInfo   The VMCS info. object corresponding to the VMCS that was
     10621 *                      executing when receiving the host NMI.
     10622 */
     10623static int hmR0VmxExitHostNmi(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
     10624{
     10625    RTCPUID const idCpu = pVmcsInfo->idHostCpu;
     10626
     10627    /*
     10628     * We don't want to delay dispatching the NMI any more than we have to. However,
     10629     * we have already chosen -not- to dispatch NMIs when interrupts were still disabled
     10630     * after executing guest or nested-guest code for the following reasons:
     10631     *
     10632     *   - We would need to perform VMREADs with interrupts disabled and is orders of
     10633     *     magnitude worse with nested virtualization.
     10634     *
     10635     *   - It affects the common VM-exit scenario and keep interrupts disabled for a
     10636     *     longer period of time just for handling an edge case like host NMIs.
     10637     *
     10638     * Let's cover the most likely scenario first. Check if we are on the target CPU
     10639     * and dispatch the NMI right away. This should be much faster than calling into
     10640     * RTMpOnSpecific() machinery.
     10641     */
     10642    bool fDispatched = false;
     10643    RTCCUINTREG const fEFlags = ASMIntDisableFlags();
     10644    if (idCpu == RTMpCpuId())
     10645    {
     10646        VMXDispatchHostNmi();
     10647        fDispatched = true;
     10648    }
     10649    ASMSetFlags(fEFlags);
     10650    if (fDispatched)
     10651    {
     10652        STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
     10653        return VINF_SUCCESS;
     10654    }
     10655
     10656    /*
     10657     * RTMpOnSpecific() waits until the worker function has run on the target CPU. So
     10658     * there should be no race or recursion even if we are unlucky enough to be preempted
     10659     * (to the target CPU) without dispatching the host NMI above.
     10660     */
     10661    STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGCIpi);
     10662    return RTMpOnSpecific(idCpu, &hmR0DispatchHostNmi, NULL /* pvUser1 */,  NULL /* pvUser2 */);
    1061010663}
    1061110664
     
    1265612709        uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
    1265712710        if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
    12658             return hmR0VmxExitHostNmi(pVCpu);
     12711            return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
    1265912712    }
    1266012713
     
    1466614719         */
    1466714720        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
    14668         return hmR0VmxExitHostNmi(pVCpu);
     14721        return hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
    1466914722    }
    1467014723
     
    1678216835         */
    1678316836        case VMX_EXIT_INT_INFO_TYPE_NMI:
    16784             return hmR0VmxExitHostNmi(pVCpu);
     16837            return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
    1678516838
    1678616839        /*
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette