VirtualBox

Changeset 45408 in vbox


Ignore:
Timestamp:
Apr 8, 2013 2:01:55 PM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
84836
Message:

VMM/VMMR0: HM bits. Fixed issue with MOV-SS and interrupt inhibition. Use interrupt-window exiting for interrupt-inhibited state as well for better guest interrupt latency.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45387 r45408  
    7373#define VMX_UPDATED_GUEST_SYSENTER_EIP_MSR      RT_BIT(16)
    7474#define VMX_UPDATED_GUEST_SYSENTER_ESP_MSR      RT_BIT(17)
    75 #define VMX_UPDATED_GUEST_INTR_STATE            RT_BIT(18)
    76 #define VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS  RT_BIT(19)
    77 #define VMX_UPDATED_GUEST_ACTIVITY_STATE        RT_BIT(20)
    78 #define VMX_UPDATED_GUEST_APIC_STATE            RT_BIT(21)
     75#define VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS  RT_BIT(18)
     76#define VMX_UPDATED_GUEST_ACTIVITY_STATE        RT_BIT(19)
     77#define VMX_UPDATED_GUEST_APIC_STATE            RT_BIT(20)
    7978#define VMX_UPDATED_GUEST_ALL                   (  VMX_UPDATED_GUEST_FPU                   \
    8079                                                 | VMX_UPDATED_GUEST_RIP                   \
     
    9594                                                 | VMX_UPDATED_GUEST_SYSENTER_EIP_MSR      \
    9695                                                 | VMX_UPDATED_GUEST_SYSENTER_ESP_MSR      \
    97                                                  | VMX_UPDATED_GUEST_INTR_STATE            \
    9896                                                 | VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS  \
    9997                                                 | VMX_UPDATED_GUEST_ACTIVITY_STATE        \
     
    24362434
    24372435/**
     2436 * Loads the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
     2437 * into the guest-state area in the VMCS.
     2438 *
     2439 * @param   pVM         Pointer to the VM.
     2440 * @param   pVCpu       Pointer to the VMCPU.
     2441 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     2442 *                      out-of-sync. Make sure to update the required fields
     2443 *                      before using them.
     2444 *
     2445 * @remarks No-long-jump zone!!!
     2446 */
     2447DECLINLINE(void) hmR0VmxLoadGuestIntrState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     2448{
     2449    /*
     2450     * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
     2451     * inhibit interrupts or clear any existing interrupt-inhibition.
     2452     */
     2453    uint32_t uIntrState = 0;
     2454    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     2455    {
     2456        /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
     2457        AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (VMX_UPDATED_GUEST_RIP | VMX_UPDATED_GUEST_RFLAGS)),
     2458                  ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
     2459        if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
     2460        {
     2461            /*
     2462             * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
     2463             * VT-x the flag's condition to be cleared is met and thus the cleared state is correct.
     2464             * hmR0VmxInjectPendingInterrupt() relies on us clearing this flag here.
     2465             */
     2466            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     2467        }
     2468        else if (pMixedCtx->eflags.Bits.u1IF)
     2469            uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
     2470        else
     2471            uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
     2472    }
     2473
     2474    Assert(!(uIntrState & 0xfffffff0));                             /* Bits 31:4 MBZ. */
     2475    Assert((uIntrState & 0x3) != 0x3);                              /* Block-by-STI and MOV SS cannot be simultaneously set. */
     2476    int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
     2477    AssertRC(rc);
     2478}
     2479
     2480
     2481/**
    24382482 * Loads the guest's RIP into the guest-state area in the VMCS.
    24392483 *
     
    35993643        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
    36003644    }
    3601     return rc;
    3602 }
    3603 
    3604 
    3605 /**
    3606  * Loads the guest-interruptibility state (or "interrupt shadow" as AMD calls
    3607  * it) into the guest-state area in the VMCS.
    3608  *
    3609  * @returns VBox status code.
    3610  * @param   pVM         Pointer to the VM.
    3611  * @param   pVCpu       Pointer to the VMCPU.
    3612  * @param   pCtx        Pointer to the guest-CPU context.
    3613  *
    3614  * @remarks No-long-jump zone!!!
    3615  * @remarks Requires RIP, RFLAGS.
    3616  */
    3617 DECLINLINE(int) hmR0VmxLoadGuestIntrState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    3618 {
    3619     if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_INTR_STATE))
    3620         return VINF_SUCCESS;
    3621 
    3622     /*
    3623      * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
    3624      * inhibit interrupts or clear any existing interrupt-inhibition.
    3625      */
    3626     uint32_t uIntrState = 0;
    3627     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    3628     {
    3629         if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
    3630         {
    3631             /*
    3632              * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in VT-x
    3633              * the flag's condition to be cleared is met and thus the cleared state is correct. Additionally, this means
    3634              * we need not re-read the VMCS field on the VM-exit path and clear/set this flag on every VM-exit. Finally,
    3635              * hmR0VmxInjectPendingInterrupt() relies on us clearing this flag here.
    3636              */
    3637             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    3638             uIntrState = 0;     /* Clear interrupt inhibition. */
    3639         }
    3640         else if (pCtx->eflags.u32 & X86_EFL_IF)
    3641         {
    3642             /** @todo Pretty sure we don't need to check for Rflags.IF here.
    3643              *        Interrupt-shadow only matters when RIP changes. */
    3644             /*
    3645              * We don't have enough information to distinguish a block-by-STI vs. block-by-MOV SS. Intel seems to think there
    3646              * is a slight difference regarding MOV SS additionally blocking some debug exceptions.
    3647              * See Intel spec. 24.2.2 "Guest Non-Register State" table "Format of Interruptibility State".
    3648              */
    3649             uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
    3650         }
    3651     }
    3652     else
    3653         uIntrState = 0;         /* No interrupt inhibition. */
    3654 
    3655     Assert((uIntrState & 0x3) != 0x3);                              /* Block-by-STI and MOV SS cannot be simultaneously set. */
    3656     Assert((pCtx->eflags.u32 & X86_EFL_IF) || uIntrState == 0);     /* If EFLAGS.IF is not set, no interrupt inhibition. */
    3657     int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
    3658     AssertRCReturn(rc ,rc);
    3659     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_INTR_STATE;
    36603645    return rc;
    36613646}
     
    44444429 * @param   uVector     The exception vector.
    44454430 */
    4446 DECLINLINE(bool) hmR0VmxIsBenignXcpt(const uint8_t uVector)
     4431DECLINLINE(bool) hmR0VmxIsBenignXcpt(const uint32_t uVector)
    44474432{
    44484433    switch (uVector)
     
    44744459 * @param   uVector     The exception vector.
    44754460 */
    4476 DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint8_t uVector)
     4461DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
    44774462{
    44784463    switch (uVector)
     
    45404525        AssertRCReturn(rc, rc);
    45414526
    4542         uint8_t uIntType    = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
    4543         uint8_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
    4544         uint8_t uIdtVector  = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
     4527        uint32_t uIntType    = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
     4528        uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
     4529        uint32_t uIdtVector  = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
    45454530
    45464531        typedef enum
     
    47514736
    47524737    RTGCUINTREG uVal = 0;
    4753     int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP,     &uVal);
     4738    int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uVal);
    47544739    AssertRCReturn(rc, rc);
    47554740    pMixedCtx->rsp = uVal;
     
    48104795
    48114796/**
    4812  * Saves the guest's interruptibility state.
    4813  *
    4814  * @returns VBox status code.
     4797 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
     4798 * from the guest-state area in the VMCS.
     4799 *
    48154800 * @param   pVM         Pointer to the VM.
    48164801 * @param   pVCpu       Pointer to the VMCPU.
     
    48214806 * @remarks No-long-jump zone!!!
    48224807 */
    4823 DECLINLINE(int) hmR0VmxSaveGuestIntrState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    4824 {
    4825     if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_INTR_STATE)
    4826         return VINF_SUCCESS;
    4827 
     4808DECLINLINE(void) hmR0VmxSaveGuestIntrState(PVM pVM,  PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4809{
    48284810    uint32_t uIntrState = 0;
    48294811    int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
     4812    AssertRC(rc);
     4813
    48304814    if (!uIntrState)
    48314815        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     
    48354819               || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
    48364820        rc  = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    4837         rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);    /* RFLAGS is needed in hmR0VmxLoadGuestIntrState(). */
    4838         AssertRCReturn(rc, rc);
     4821        rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);    /* for hmR0VmxLoadGuestIntrState(). */
     4822        AssertRC(rc);
    48394823        EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
    48404824        Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    48414825    }
    4842 
    4843     pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_INTR_STATE;
    4844     return rc;
    48454826}
    48464827
     
    53095290    rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx);
    53105291    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    5311 
    5312     rc = hmR0VmxSaveGuestIntrState(pVM, pVCpu, pMixedCtx);
    5313     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestIntrState failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    53145292
    53155293    rc = hmR0VmxSaveGuestActivityState(pVM, pVCpu, pMixedCtx);
     
    54975475{
    54985476    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    5499     Log(("hmR0VmxLongJmpToRing3: rcExit=%d\n", rcExit));
    55005477
    55015478    int rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
     
    56035580
    56045581    VMMRZCallRing3Disable(pVCpu);
     5582    Log(("hmR0VmxLongJmpToRing3\n"));
    56055583    hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
    56065584    VMMRZCallRing3Enable(pVCpu);
     
    56875665 *                          before using them.
    56885666 *
    5689  * @remarks This must be called only after hmR0VmxSaveGuestIntrState().
     5667 * @remarks Must be called after hmR0VmxLoadGuestIntrState().
    56905668 */
    56915669static int hmR0VmxInjectPendingInterrupt(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     
    57275705        {
    57285706            /*
    5729              * When external interrupts are pending and the guest has disabled interrupts, cause a VM-exit using "interrupt-window
    5730              * exiting" so we can deliver the interrupt when the guest is ready to receive them. Otherwise, if the guest
    5731              * can receive interrupts now, convert the PDM interrupt into a TRPM event and inject it.
     5707             * If the guest can receive interrupts now (interrupts enabled and no interrupt inhibition is active) convert
     5708             * the PDM interrupt into a TRPM event and inject it.
    57325709             */
    5733             if (!(pMixedCtx->eflags.u32 & X86_EFL_IF))  /** @todo we can use interrupt-window exiting for block-by-STI. */
    5734             {
    5735                 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
    5736                 {
    5737                     pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
    5738                     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
    5739                     AssertRCReturn(rc, rc);
    5740                 }
    5741                 /* else we will deliver interrupts whenever the guest exits next and it's in a state to receive interrupts. */
    5742             }
    5743             else if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     5710            if (   (pMixedCtx->eflags.u32 & X86_EFL_IF)
     5711                && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    57445712            {
    57455713                uint8_t u8Interrupt = 0;
     
    57535721                else
    57545722                {
    5755                     /** @todo Does this actually happen? If not turn it into an assertion.   */
     5723                    /** @todo Does this actually happen? If not turn it into an assertion. */
    57565724                    Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
    57575725                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    57585726                }
    57595727            }
     5728            else if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
     5729            {
     5730                /* Instruct VT-x to cause an interrupt-window exit as soon as the guest is ready to receive interrupts again. */
     5731                pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
     5732                rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     5733                AssertRCReturn(rc, rc);
     5734            }
     5735            /* else we will deliver interrupts whenever the guest exits next and it's in a state to receive interrupts. */
    57605736        }
    57615737    }
     
    62326208    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    62336209
    6234     rc = hmR0VmxLoadGuestIntrState(pVM, pVCpu, pCtx);
    6235     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestIntrState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    6236 
    62376210    rc = hmR0VmxSetupVMRunHandler(pVM, pVCpu, pCtx);
    62386211    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     
    63266299     * This is why this is done after all possible exits-to-ring-3 paths in this code.
    63276300     */
     6301    hmR0VmxLoadGuestIntrState(pVM, pVCpu, pMixedCtx);
    63286302    rc = hmR0VmxInjectPendingInterrupt(pVM, pVCpu, pMixedCtx);
    63296303    AssertRCReturn(rc, rc);
     
    64726446    }
    64736447
    6474     /* We need to update our interruptibility-state on every VM-exit and VM-entry. */
    6475     rc = hmR0VmxSaveGuestIntrState(pVM, pVCpu, pMixedCtx);
    6476     AssertRC(rc);
    6477     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_INTR_STATE;
    6478 
    6479     /*
    6480      * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever we
    6481      * eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3; also why we do
    6482      * it outside of hmR0VmxSaveGuestState() which must never cause longjmps.
    6483      */
    6484     if (   !pVmxTransient->fVMEntryFailed
    6485         && (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    6486         && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
    6487     {
    6488         rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
    6489         AssertRC(rc);
    6490         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
     6448    if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
     6449    {
     6450        /* Update the guest interruptibility-state from the VMCS. */
     6451        hmR0VmxSaveGuestIntrState(pVM, pVCpu, pMixedCtx);
     6452
     6453        /*
     6454         * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
     6455         * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3; also why
     6456         * we do it outside of hmR0VmxSaveGuestState() which must never cause longjmps.
     6457         */
     6458        if (   (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     6459            && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
     6460        {
     6461            rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
     6462            AssertRC(rc);
     6463            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
     6464        }
    64916465    }
    64926466}
     
    67226696{
    67236697    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    6724     int rc = VERR_INTERNAL_ERROR_5;
    6725 #ifdef DEBUG
    6726     rc = hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
    6727     AssertRC(rc);
    6728     Assert(pMixedCtx->eflags.u32 & X86_EFL_IF);
    6729 #endif
    67306698
    67316699    /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
    67326700    pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
    6733     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     6701    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
    67346702    AssertRCReturn(rc, rc);
    67356703
     
    74787446
    74797447    const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
    7480     const uint8_t uAccessType            = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
     7448    const uint32_t uAccessType           = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
    74817449    switch (uAccessType)
    74827450    {
     
    83068274                EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
    83078275                Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    8308                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_INTR_STATE | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
     8276                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
    83098277                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
    83108278                break;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette