VirtualBox

Changeset 46512 in vbox for trunk


Ignore:
Timestamp:
Jun 12, 2013 3:22:14 PM (12 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: AMD-V bits.

Location:
trunk
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/hm_svm.h

    r46503 r46512  
    7171 */
    7272/** Invalid guest state in VMCB. */
    73 #define SVM_EXIT_INVALID                -1
     73#define SVM_EXIT_INVALID                (-1)
    7474/** Read from CR0-CR15. */
    7575#define SVM_EXIT_READ_CR0               0x0
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r46508 r46512  
    2929*   Defined Constants And Macros                                               *
    3030*******************************************************************************/
     31#ifdef VBOX_WITH_STATISTICS
     32# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
     33        if ((u64ExitCode) == SVM_EXIT_NPF) \
     34            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
     35        else \
     36            STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
     37        } while (0)
     38#else
     39# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
     40#endif
     41
    3142/** @name Segment attribute conversion between CPU and AMD-V VMCB format.
    3243 *
     
    129140#endif
    130141
    131     /** The #VMEXIT exit code. */
     142    /** The #VMEXIT exit code (the EXITCODE field in the VMCB). */
    132143    uint64_t        u64ExitCode;
    133144} SVMTRANSIENT, *PSVMTRANSIENT;
     
    144155    /** Reading this MSR does not cause a VM-exit. */
    145156    SVMMSREXIT_PASSTHRU_READ
    146 } VMXMSREXITREAD;
     157} SVMMSREXITREAD;
    147158
    148159/**
     
    155166    /** Writing to this MSR does not cause a VM-exit. */
    156167    SVMMSREXIT_PASSTHRU_WRITE
    157 } VMXMSREXITWRITE;
     168} SVMMSREXITWRITE;
    158169
    159170
     
    162173*******************************************************************************/
    163174static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
     175
     176DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
    164177
    165178
     
    489502        ASMBitClear(pbMsrBitmap, ulBit + 1);
    490503
    491     pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
     504    pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    492505}
    493506
     
    582595
    583596        /* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from memory. */
    584         pVmcb->u64VmcbCleanBits = 0;
     597        pVmcb->ctrl.u64VmcbCleanBits = 0;
    585598
    586599        /* The guest ASID MBNZ, set it to 1. The host uses 0. */
     
    749762    {
    750763        pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
    751         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
     764        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
    752765    }
    753766
     
    852865    {
    853866        pVmcb->ctrl.u32InterceptException |= RT_BIT(u32Xcpt);
    854         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     867        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    855868    }
    856869}
     
    858871DECLINLINE(void) hmR0SvmRemoveXcptIntercept(uint32_t u32Xcpt)
    859872{
    860 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     873#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
    861874    if (pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt))
    862875    {
    863876        pVmcb->ctrl.u32InterceptException &= ~RT_BIT(u32Xcpt);
    864         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     877        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    865878    }
    866879#endif
     
    935948
    936949        pVmcb->guest.u64CR0 = u64GuestCR0;
    937         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
     950        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
    938951        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
    939952    }
     
    945958    {
    946959        pVmcb->guest.u64CR2 = pCtx->cr2;
    947         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
     960        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
    948961        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
    949962    }
     
    965978
    966979            pVmcb->ctrl.u64NestedPagingCR3  = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
    967             pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
     980            pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
    968981            Assert(pVmcb->ctrl.u64NestedPagingCR3);
    969982            pVmcb->guest.u64CR3 = pCtx->cr3;
     
    972985            pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
    973986
    974         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
     987        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
    975988        pVCpu->hm.s.fContextUseFlags &= HM_CHANGED_GUEST_CR3;
    976989    }
     
    10171030
    10181031        pVmcb->guest.u64CR4 = u64GuestCR4;
    1019         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
     1032        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
    10201033        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
    10211034    }
     
    10451058        HMSVM_LOAD_SEG_REG(GS, cs);
    10461059
    1047         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
     1060        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
    10481061        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
    10491062    }
     
    10681081        pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
    10691082        pVmcb->guest.GDTR.u64Base  = pCtx->gdtr.pGdt;
    1070         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
     1083        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
    10711084        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
    10721085    }
     
    10771090        pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
    10781091        pVmcb->guest.IDTR.u64Base  = pCtx->idtr.pIdt;
    1079         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
     1092        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
    10801093        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
    10811094    }
     
    11511164    pVmcb->guest.u64DR7 = pCtx->dr[7];
    11521165    pVmcb->guest.u64DR6 = pCtx->dr[6];
    1153     pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
     1166    pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
    11541167
    11551168    bool fInterceptDB     = false;
     
    11721185            pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
    11731186            pVmcb->guest.u64DR6 = CPUMGetHyperDR6(pVCpu);
    1174             pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
     1187            pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
    11751188        }
    11761189        Assert(CPUMIsHyperDebugStateActive(pVCpu));
     
    12061219            pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
    12071220            pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
    1208             pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1221            pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    12091222        }
    12101223    }
     
    12161229            pVmcb->ctrl.u16InterceptRdDRx = 0;
    12171230            pVmcb->ctrl.u16InterceptWrDRx = 0;
    1218             pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1231            pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    12191232        }
    12201233    }
     
    12901303VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    12911304{
    1292     AssertPtr(pVCpu);
    12931305    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    12941306    NOREF(pVM);
     1307    NOREF(pVCpu);
    12951308    NOREF(pCtx);
    12961309
    1297     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    1298 
    1299     /** -xxx- todo. */
    1300 
     1310    /* Nothing to do here. Everything is taken care of in hmR0SvmLongJmpToRing3(). */
    13011311    return VINF_SUCCESS;
    13021312}
     
    13991409
    14001410    /*
    1401      * Save all the MSRs that can be changed by the guest without causing a world switch.
    1402      * FS & GS base are saved with HMSVM_SAVE_SEG_REG.
     1411     * Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
     1412     */
     1413    pMixedCtx->cr2        = pVmcb->guest.u64CR2;
     1414
     1415    /*
     1416     * Guest MSRs.
    14031417     */
    14041418    pMixedCtx->msrSTAR         = pVmcb->guest.u64STAR;            /* legacy syscall eip, cs & ss */
     
    14111425    pMixedCtx->SysEnter.esp    = pVmcb->guest.u64SysEnterESP;
    14121426
    1413     /* Can be updated behind our back in the nested paging case. */
    1414     pMixedCtx->cr2        = pVmcb->guest.u64CR2;
    1415 
    1416     /* Segment registers:  CS, SS, DS, ES, FS, GS. */
     1427    /*
     1428     * Guest segment registers (includes FS, GS base MSRs for 64-bit guests).
     1429     */
    14171430    HMSVM_SAVE_SEG_REG(CS, ss);
    14181431    HMSVM_SAVE_SEG_REG(SS, cs);
     
    14611474
    14621475    /*
    1463      * Descriptor Table Registers: TR, IDTR, GDTR, LDTR.
     1476     * Guest Descriptor-Table registers.
    14641477     */
    14651478    HMSVM_SAVE_SEG_REG(TR, tr);
    14661479    HMSVM_SAVE_SEG_REG(LDTR, ldtr);
    1467 
    14681480    pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit;
    14691481    pMixedCtx->gdtr.pGdt  = pVmcb->guest.GDTR.u64Base;
     
    14731485
    14741486    /*
    1475      * Debug registers.
     1487     * Guest Debug registers.
    14761488     */
    14771489    pMixedCtx->dr[6] = pVmcb->guest.u64DR6;
     
    14801492    /*
    14811493     * With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now.
     1494     * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
    14821495     */
    14831496    if (   pVM->hm.s.fNestedPaging
     
    14851498    {
    14861499        CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3);
    1487         PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3);       /* This may longjmp to ring-3 hence done at the very end. */
     1500        PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3);
    14881501    }
    14891502}
     
    15061519    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    15071520
    1508     /* Restore FPU state if necessary and resync on next R0 reentry .*/
     1521    /* Restore host FPU state if necessary and resync on next R0 reentry .*/
    15091522    if (CPUMIsGuestFPUStateActive(pVCpu))
    15101523    {
     
    15141527    }
    15151528
    1516     /* Restore debug registers if necessary and resync on next R0 reentry. */
     1529    /* Restore host debug registers if necessary and resync on next R0 reentry. */
    15171530    if (CPUMIsGuestDebugStateActive(pVCpu))
    15181531    {
     
    15311544    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    15321545    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
     1546}
     1547
     1548
     1549/**
     1550 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
     1551 * any remaining host state) before we longjump to ring-3 and possibly get
     1552 * preempted.
     1553 *
     1554 * @param   pVCpu           Pointer to the VMCPU.
     1555 * @param   enmOperation    The operation causing the ring-3 longjump.
     1556 * @param   pvUser          The user argument (pointer to the possibly
     1557 *                          out-of-date guest-CPU context).
     1558 *
     1559 * @remarks Must never be called with @a enmOperation ==
     1560 *          VMMCALLRING3_VM_R0_ASSERTION.
     1561 */
     1562DECLCALLBACK(void) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
     1563{
     1564    /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
     1565    Assert(pVCpu);
     1566    Assert(pvUser);
     1567    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     1568    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1569
     1570    VMMRZCallRing3Disable(pVCpu);
     1571    Assert(VMMR0IsLogFlushDisabled(pVCpu));
     1572    Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));
     1573    hmR0SvmLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
     1574    VMMRZCallRing3Enable(pVCpu);
    15331575}
    15341576
     
    16261668    }
    16271669
    1628     pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1670    pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    16291671}
    16301672
     
    17061748
    17071749    /* Refer AMD spec. 15.20 "Event Injection" for the format. */
    1708     uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
    17091750    if (enmTrpmEvent == TRPM_TRAP)
    17101751    {
     
    17521793 * @param   pvCpu           Pointer to the VMCPU.
    17531794 */
    1754 static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
     1795static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu)
    17551796{
    17561797    Assert(pVCpu->hm.s.Event.fPending);
     
    17961837    else if (uVectorType == SVM_EVENT_SOFTWARE_INT)
    17971838    {
    1798         AssertMsg(   uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
     1839        AssertMsg(   uVectorType == SVM_EVENT_SOFTWARE_INT
    17991840                  || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
    18001841                  ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
     
    18531894        pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0;     /* Not necessary as we #VMEXIT for delivering the interrupt. */
    18541895        pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
    1855         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1896        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    18561897    }
    18571898}
     
    19581999{
    19592000    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     2001    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     2002
    19602003    if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
    19612004    {
    19622005        HMDumpRegs(pVM, pVCpu, pCtx);
    19632006#ifdef VBOX_STRICT
     2007        Log4(("ctrl.u64VmcbCleanBits             %#RX64\n",   pVmcb->ctrl.u64VmcbCleanBits));
    19642008        Log4(("ctrl.u16InterceptRdCRx            %#x\n",      pVmcb->ctrl.u16InterceptRdCRx));
    19652009        Log4(("ctrl.u16InterceptWrCRx            %#x\n",      pVmcb->ctrl.u16InterceptWrCRx));
     
    21932237{
    21942238    /* Check force flag actions that might require us to go back to ring-3. */
    2195     int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pCtx);
     2239    int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
    21962240    if (rc != VINF_SUCCESS)
    21972241        return rc;
     
    23412385
    23422386    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    2343     pVmcb->u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;             /* Mark the VMCB-state cache as unmodified by VMM. */
     2387    pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;             /* Mark the VMCB-state cache as unmodified by VMM. */
    23442388
    23452389    /* Restore host's TSC_AUX if required. */
     
    23612405
    23622406    ASMSetFlags(pSvmTransient->uEFlags);                        /* Enable interrupts. */
    2363     VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx);
     2407    VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pMixedCtx);
    23642408    VMMRZCallRing3Enable(pVCpu);                                /* It is now safe to do longjmps to ring-3!!! */
    23652409
     
    24302474
    24312475        /* Handle the #VMEXIT. */
    2432         AssertMsg(SvmTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    2433         STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    2434 
    2435         /* -xxx- todo. */
    2436 
     2476        AssertMsg(SvmTransient.u64ExitCode != SVM_EXIT_INVALID, ("%#x\n", SvmTransient.u64ExitCode));
     2477        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
     2478        rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
    24372479        if (rc != VINF_SUCCESS)
    24382480            break;
     
    24532495}
    24542496
     2497
     2498/**
     2499 * Handles a #VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
     2500 *
     2501 * @returns VBox status code (informational status codes included).
     2502 * @param   pVCpu           Pointer to the VMCPU.
     2503 * @param   pCtx            Pointer to the guest-CPU context.
     2504 * @param   pSvmTransient   Pointer to the SVM transient structure.
     2505 */
     2506DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient)
     2507{
     2508    int rc;
     2509    uint32_t u32ExitCode = pSvmTransient->u64ExitCode;
     2510    switch (u32ExitCode)
     2511    {
     2512
     2513    }
     2514    return rc;
     2515
     2516}
     2517
     2518#ifdef DEBUG
     2519/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
     2520# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
     2521    RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
     2522
     2523# define HMSVM_ASSERT_PREEMPT_CPUID() \
     2524   do \
     2525   { \
     2526        RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
     2527        AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
     2528   } while (0)
     2529
     2530# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \
     2531            do { \
     2532                AssertPtr(pVCpu); \
     2533                AssertPtr(pMixedCtx); \
     2534                AssertPtr(pSvmTransient); \
     2535                Assert(ASMIntAreEnabled()); \
     2536                Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
     2537                HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
     2538                Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
     2539                Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
     2540                if (VMMR0IsLogFlushDisabled(pVCpu)) \
     2541                    HMSVM_ASSERT_PREEMPT_CPUID(); \
     2542                HMSVM_STOP_EXIT_DISPATCH_PROF(); \
     2543            } while (0)
     2544#else   /* Release builds */
     2545# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { } while(0)
     2546#endif
     2547
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette