VirtualBox

Changeset 55129 in vbox


Ignore:
Timestamp:
Apr 8, 2015 11:31:47 AM (10 years ago)
Author:
vboxsync
Message:

VMM/GIM: Allow dynamic enabling of #UD traps and per-VCPU hypercalls.

Location:
trunk
Files:
15 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/gim.h

    r55118 r55129  
    175175VMM_INT_DECL(int)           GIMHypercall(PVMCPU pVCpu, PCPUMCTX pCtx);
    176176VMM_INT_DECL(int)           GIMXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis);
    177 VMM_INT_DECL(bool)          GIMShouldTrapXcptUD(PVM pVM);
     177VMM_INT_DECL(bool)          GIMShouldTrapXcptUD(PVMCPU pVCpu);
    178178VMM_INT_DECL(VBOXSTRICTRC)  GIMReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
    179179VMM_INT_DECL(VBOXSTRICTRC)  GIMWriteMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue);
  • trunk/include/VBox/vmm/hm.h

    r55118 r55129  
    144144VMM_INT_DECL(int)               HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
    145145VMM_INT_DECL(bool)              HMSetSingleInstruction(PVMCPU pVCpu, bool fEnable);
    146 VMM_INT_DECL(void)              HMHypercallsEnable(PVM pVM);
    147 VMM_INT_DECL(void)              HMHypercallsDisable(PVM pVM);
     146VMM_INT_DECL(void)              HMHypercallsEnable(PVMCPU pVCpu);
     147VMM_INT_DECL(void)              HMHypercallsDisable(PVMCPU pVCpu);
    148148
    149149#ifndef IN_RC
  • trunk/include/VBox/vmm/vmm.h

    r55118 r55129  
    269269VMM_INT_DECL(void)          VMMTrashVolatileXMMRegs(void);
    270270VMM_INT_DECL(int)           VMMPatchHypercall(PVM pVM, void *pvBuf, size_t cbBuf, size_t *pcbWritten);
    271 VMM_INT_DECL(void)          VMMHypercallsEnable(PVM pVM);
    272 VMM_INT_DECL(void)          VMMHypercallsDisable(PVM pVM);
     271VMM_INT_DECL(void)          VMMHypercallsEnable(PVMCPU pVCpu);
     272VMM_INT_DECL(void)          VMMHypercallsDisable(PVMCPU pVCpu);
    273273
    274274
  • trunk/src/VBox/VMM/VMMAll/GIMAll.cpp

    r55118 r55129  
    149149 *
    150150 * @returns true if needed, false otherwise.
    151  * @param   pVM         Pointer to the VM.
    152  */
    153 VMM_INT_DECL(bool) GIMShouldTrapXcptUD(PVM pVM)
    154 {
     151 * @param   pVCpu       Pointer to the VMCPU.
     152 */
     153VMM_INT_DECL(bool) GIMShouldTrapXcptUD(PVMCPU pVCpu)
     154{
     155    PVM pVM = pVCpu->CTX_SUFF(pVM);
    155156    if (!GIMIsEnabled(pVM))
    156157        return false;
     
    159160    {
    160161        case GIMPROVIDERID_KVM:
    161             return gimKvmShouldTrapXcptUD(pVM);
     162            return gimKvmShouldTrapXcptUD(pVCpu);
    162163
    163164        default:
  • trunk/src/VBox/VMM/VMMAll/GIMAllKvm.cpp

    r55118 r55129  
    324324 * For raw-mode VMs, this function will always return true. See gimR3KvmInit().
    325325 *
    326  * @param   pVM         Pointer to the VM.
    327  */
    328 VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVM pVM)
    329 {
     326 * @param   pVCpu       Pointer to the VMCPU.
     327 */
     328VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVMCPU pVCpu)
     329{
     330    PVM pVM = pVCpu->CTX_SUFF(pVM);
    330331    return pVM->gim.s.u.Kvm.fTrapXcptUD;
    331332}
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r55118 r55129  
    510510 * Notifies HM that paravirtualized hypercalls are now enabled.
    511511 *
    512  * @param   pVM     Pointer to the VM.
    513  */
    514 VMM_INT_DECL(void) HMHypercallsEnable(PVM pVM)
    515 {
    516     pVM->hm.s.fHypercallsEnabled = true;
     512 * @param   pVCpu   Pointer to the VMCPU.
     513 */
     514VMM_INT_DECL(void) HMHypercallsEnable(PVMCPU pVCpu)
     515{
     516    pVCpu->hm.s.fHypercallsEnabled = true;
    517517}
    518518
     
    521521 * Notifies HM that paravirtualized hypercalls are now disabled.
    522522 *
    523  * @param   pVM     Pointer to the VM.
    524  */
    525 VMM_INT_DECL(void) HMHypercallsDisable(PVM pVM)
    526 {
    527     pVM->hm.s.fHypercallsEnabled = false;
    528 }
    529 
     523 * @param   pVCpu   Pointer to the VMCPU.
     524 */
     525VMM_INT_DECL(void) HMHypercallsDisable(PVMCPU pVCpu)
     526{
     527    pVCpu->hm.s.fHypercallsEnabled = false;
     528}
     529
     530
     531/**
     532 * Notifies HM that GIM provider wants to trap #UD.
     533 *
     534 * @param   pVCpu   Pointer to the VMCPU.
     535 */
     536VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu)
     537{
     538    pVCpu->hm.s.fGIMTrapXcptUD = true;
     539    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     540}
     541
     542
     543/**
     544 * Notifies HM that GIM provider no longer wants to trap #UD.
     545 *
     546 * @param   pVCpu   Pointer to the VMCPU.
     547 */
     548VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu)
     549{
     550    pVCpu->hm.s.fGIMTrapXcptUD = false;
     551    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     552}
     553
  • trunk/src/VBox/VMM/VMMAll/VMMAll.cpp

    r55118 r55129  
    439439 * Notifies VMM that paravirtualized hypercalls are now enabled.
    440440 *
    441  * @param   pVM     Pointer to the VM.
    442  */
    443 VMM_INT_DECL(void) VMMHypercallsEnable(PVM pVM)
     441 * @param   pVCpu   Pointer to the VMCPU.
     442 */
     443VMM_INT_DECL(void) VMMHypercallsEnable(PVMCPU pVCpu)
    444444{
    445445    /* If there is anything to do for raw-mode, do it here. */
    446446#ifndef IN_RC
    447     if (HMIsEnabled(pVM))
    448         HMHypercallsEnable(pVM);
     447    if (HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
     448        HMHypercallsEnable(pVCpu);
    449449#endif
    450450}
     
    454454 * Notifies VMM that paravirtualized hypercalls are now disabled.
    455455 *
    456  * @param   pVM     Pointer to the VM.
    457  */
    458 VMM_INT_DECL(void) VMMHypercallsDisable(PVM pVM)
     456 * @param   pVCpu   Pointer to the VMCPU.
     457 */
     458VMM_INT_DECL(void) VMMHypercallsDisable(PVMCPU pVCpu)
    459459{
    460460    /* If there is anything to do for raw-mode, do it here. */
    461461#ifndef IN_RC
    462     if (HMIsEnabled(pVM))
    463         HMHypercallsDisable(pVM);
    464 #endif
    465 }
    466 
     462    if (HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
     463        HMHypercallsDisable(pVCpu);
     464#endif
     465}
     466
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r55118 r55129  
    12141214    pVM->hm.s.uMaxAsid                  = g_HvmR0.uMaxAsid;
    12151215
    1216     pVM->hm.s.fGIMTrapXcptUD            = GIMShouldTrapXcptUD(pVM);
    1217 
    12181216    if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */
    12191217    {
     
    12291227    {
    12301228        PVMCPU pVCpu = &pVM->aCpus[i];
    1231         pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    1232         pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
     1229        pVCpu->hm.s.idEnteredCpu   = NIL_RTCPUID;
     1230        pVCpu->hm.s.idLastCpu      = NIL_RTCPUID;
     1231        pVCpu->hm.s.fGIMTrapXcptUD = GIMShouldTrapXcptUD(pVCpu);
    12331232
    12341233        /* We'll aways increment this the first time (host uses ASID 0). */
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r55118 r55129  
    670670    Assert(pVM->hm.s.svm.fSupported);
    671671
    672     uint32_t const fGimXcptIntercepts = pVM->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0;
    673672    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    674673    {
     
    787786
    788787        /* Apply the exceptions intercepts needed by the GIM provider. */
    789         pVmcb->ctrl.u32InterceptException |= fGimXcptIntercepts;
     788        if (pVCpu->hm.s.fGIMTrapXcptUD)
     789            pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_UD);
    790790
    791791        /*
     
    16341634
    16351635/**
     1636 * Loads the exception interrupts required for guest execution in the VMCB.
     1637 *
     1638 * @returns VBox status code.
     1639 * @param   pVCpu       Pointer to the VMCPU.
     1640 * @param   pVmcb       Pointer to the VM control block.
     1641 * @param   pCtx        Pointer to the guest-CPU context.
     1642 */
     1643static int hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1644{
     1645    int rc = VINF_SUCCESS;
     1646    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
     1647    {
     1648        if (pVCpu->hm.s.fGIMTrapXcptUD)
     1649            hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_UD);
     1650        else
     1651            hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_UD);
     1652        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     1653    }
     1654    return rc;
     1655}
     1656
     1657
     1658/**
    16361659 * Sets up the appropriate function to run guest code.
    16371660 *
     
    18161839    rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx);
    18171840    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     1841
     1842    rc = hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb, pCtx);
     1843    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    18181844
    18191845    rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx);
     
    50085034    else if (rc == VERR_NOT_FOUND)
    50095035    {
    5010         PVM pVM = pVCpu->CTX_SUFF(pVM);
    5011         if (pVM->hm.s.fHypercallsEnabled)
     5036        if (pVCpu->hm.s.fHypercallsEnabled)
    50125037        {
    50135038            rc = GIMHypercall(pVCpu, pCtx);
     
    52275252    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    52285253
    5229     PVM pVM = pVCpu->CTX_SUFF(pVM);
    5230     if (pVM->hm.s.fGIMTrapXcptUD)
     5254    if (pVCpu->hm.s.fGIMTrapXcptUD)
    52315255        GIMXcptUD(pVCpu, pCtx, NULL /* pDis */);
    52325256    else
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r55118 r55129  
    26352635    LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
    26362636
    2637     uint32_t u32XcptBitmap = 0;
     2637    uint32_t u32XcptBitmap = pVCpu->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0;
    26382638
    26392639    /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
     
    35543554
    35553555/**
     3556 * Loads the exception intercepts required for guest execution in the VMCS.
     3557 *
     3558 * @returns VBox status code.
     3559 * @param   pVCpu       Pointer to the VMCPU.
     3560 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     3561 *                      out-of-sync. Make sure to update the required fields
     3562 *                      before using them.
     3563 */
     3564static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3565{
     3566    NOREF(pMixedCtx);
     3567    int rc = VINF_SUCCESS;
     3568    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
     3569    {
     3570        /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */
     3571        if (pVCpu->hm.s.fGIMTrapXcptUD)
     3572            pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD);
     3573        else
     3574        {
     3575#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     3576            pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_UD);
     3577#endif
     3578        }
     3579
     3580        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
     3581        AssertRCReturn(rc, rc);
     3582
     3583        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     3584        Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu,
     3585              pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu)));
     3586    }
     3587    return rc;
     3588}
     3589
     3590
     3591/**
    35563592 * Loads the guest's RIP into the guest-state area in the VMCS.
    35573593 *
     
    37793815            pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
    37803816        }
     3817        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
    37813818
    37823819        if (fInterceptNM)
     
    38233860        u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);          /* Always enable caching. */
    38243861
    3825         /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
     3862        /* Write VT-x's view of the guest CR0 into the VMCS. */
    38263863        rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
    3827         AssertRCReturn(rc, rc);
    3828         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
    38293864        AssertRCReturn(rc, rc);
    38303865        Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0,
     
    42204255    if (   fInterceptDB
    42214256        || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     4257    {
    42224258        pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
     4259        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     4260    }
    42234261    else
    42244262    {
    42254263#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    42264264        pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
    4227 #endif
    4228     }
    4229     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
    4230     AssertRCReturn(rc, rc);
     4265        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
     4266#endif
     4267    }
    42314268
    42324269    /*
     
    82958332    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    82968333
     8334    rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx);
     8335    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     8336
    82978337    /*
    82988338     * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
     
    83548394#endif
    83558395        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
     8396    }
     8397
     8398    /* Loading CR0, debug state might have changed intercepts, update VMCS. */
     8399    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
     8400    {
     8401        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
     8402        AssertRC(rc);
     8403        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
    83568404    }
    83578405
     
    1024310291    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
    1024410292
    10245     PVM pVM = pVCpu->CTX_SUFF(pVM);
    10246     if (pVM->hm.s.fHypercallsEnabled)
     10293    if (pVCpu->hm.s.fHypercallsEnabled)
    1024710294    {
    1024810295#if 0
     
    1140511452#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    1140611453            pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
    11407             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
    11408             AssertRCReturn(rc, rc);
     11454            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
    1140911455#endif
    1141011456        }
  • trunk/src/VBox/VMM/VMMR3/GIMHv.cpp

    r55118 r55129  
    633633        GIMR3Mmio2Unmap(pVM, pRegion);
    634634        Assert(!pRegion->fMapped);
    635         VMMHypercallsDisable(pVM);
     635        for (VMCPUID i = 0; i < pVM->cCpus; i++)
     636            VMMHypercallsDisable(&pVM->aCpus[i]);
    636637        LogRel(("GIM: HyperV: Disabled Hypercall-page\n"));
    637638        return VINF_SUCCESS;
     
    690691
    691692            /*
    692              * Notify VMM that hypercalls are now enabled.
     693             * Notify VMM that hypercalls are now enabled for all VCPUs.
    693694             */
    694             VMMHypercallsEnable(pVM);
     695            for (VMCPUID i = 0; i < pVM->cCpus; i++)
     696                VMMHypercallsEnable(&pVM->aCpus[i]);
    695697
    696698            LogRel(("GIM: HyperV: Enabled hypercalls at %#RGp\n", GCPhysHypercallPage));
  • trunk/src/VBox/VMM/VMMR3/GIMKvm.cpp

    r55118 r55129  
    143143
    144144    /*
    145      * Setup #UD and hypercall behaviour.
    146      */
    147     VMMHypercallsEnable(pVM);
     145     * Setup hypercall and #UD handling.
     146     */
     147    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     148        VMMHypercallsEnable(&pVM->aCpus[i]);
     149
    148150    if (ASMIsAmdCpu())
    149151    {
     
    157159        pKvm->uOpCodeNative = OP_VMCALL;
    158160    }
     161
    159162    /* We always need to trap VMCALL/VMMCALL hypercall using #UDs for raw-mode VMs. */
    160163    if (!HMIsEnabled(pVM))
  • trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp

    r55118 r55129  
    610610            rc = EMInterpretInstructionDisasState(pVCpu, &Cpu, pRegFrame, PC, EMCODETYPE_SUPERVISOR);
    611611        }
    612         else if (GIMShouldTrapXcptUD(pVM))
     612        else if (GIMShouldTrapXcptUD(pVCpu))
    613613        {
    614614            LogFlow(("TRPMGCTrap06Handler: -> GIMXcptUD\n"));
  • trunk/src/VBox/VMM/include/GIMKvmInternal.h

    r55118 r55129  
    261261VMM_INT_DECL(VBOXSTRICTRC)      gimKvmReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue);
    262262VMM_INT_DECL(VBOXSTRICTRC)      gimKvmWriteMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue);
    263 VMM_INT_DECL(bool)              gimKvmShouldTrapXcptUD(PVM pVM);
     263VMM_INT_DECL(bool)              gimKvmShouldTrapXcptUD(PVMCPU pVCpu);
    264264VMM_INT_DECL(int)               gimKvmXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis);
    265265
  • trunk/src/VBox/VMM/include/HMInternal.h

    r55118 r55129  
    164164#define HM_CHANGED_GUEST_EFER_MSR                RT_BIT(16)
    165165#define HM_CHANGED_GUEST_LAZY_MSRS               RT_BIT(17)     /* Shared */
     166#define HM_CHANGED_GUEST_XCPT_INTERCEPTS         RT_BIT(18)
    166167/* VT-x specific state. */
    167 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(18)
    168 #define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(19)
    169 #define HM_CHANGED_VMX_GUEST_APIC_STATE          RT_BIT(20)
    170 #define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(21)
    171 #define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(22)
     168#define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(19)
     169#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(20)
     170#define HM_CHANGED_VMX_GUEST_APIC_STATE          RT_BIT(21)
     171#define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(22)
     172#define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(23)
    172173/* AMD-V specific state. */
    173 #define HM_CHANGED_SVM_GUEST_APIC_STATE          RT_BIT(18)
    174 #define HM_CHANGED_SVM_RESERVED1                 RT_BIT(19)
    175 #define HM_CHANGED_SVM_RESERVED2                 RT_BIT(20)
    176 #define HM_CHANGED_SVM_RESERVED3                 RT_BIT(21)
    177 #define HM_CHANGED_SVM_RESERVED4                 RT_BIT(22)
     174#define HM_CHANGED_SVM_GUEST_APIC_STATE          RT_BIT(19)
     175#define HM_CHANGED_SVM_RESERVED1                 RT_BIT(20)
     176#define HM_CHANGED_SVM_RESERVED2                 RT_BIT(21)
     177#define HM_CHANGED_SVM_RESERVED3                 RT_BIT(22)
     178#define HM_CHANGED_SVM_RESERVED4                 RT_BIT(23)
    178179
    179180#define HM_CHANGED_ALL_GUEST                     (  HM_CHANGED_GUEST_CR0                \
     
    195196                                                  | HM_CHANGED_GUEST_EFER_MSR           \
    196197                                                  | HM_CHANGED_GUEST_LAZY_MSRS          \
     198                                                  | HM_CHANGED_GUEST_XCPT_INTERCEPTS    \
    197199                                                  | HM_CHANGED_VMX_GUEST_AUTO_MSRS      \
    198200                                                  | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
     
    201203                                                  | HM_CHANGED_VMX_EXIT_CTLS)
    202204
    203 #define HM_CHANGED_HOST_CONTEXT                  RT_BIT(23)
     205#define HM_CHANGED_HOST_CONTEXT                  RT_BIT(24)
    204206
    205207/* Bits shared between host and guest. */
     
    343345    /** Set when TPR patching is active. */
    344346    bool                        fTPRPatchingActive;
    345     /** Whether #UD needs to be intercepted (required by certain GIM providers). */
    346     bool                        fGIMTrapXcptUD;
    347     /** Whether paravirt. hypercalls are enabled. */
    348     bool                        fHypercallsEnabled;
    349     bool                        u8Alignment[1];
     347    bool                        u8Alignment[3];
    350348
    351349    /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */
     
    584582    /** Whether to preload the guest-FPU state to avoid #NM VM-exit overhead. */
    585583    bool                        fPreloadGuestFpu;
     584
     585    /** Whether #UD needs to be intercepted (required by certain GIM providers). */
     586    bool                        fGIMTrapXcptUD;
     587    /** Whether paravirt. hypercalls are enabled. */
     588    bool                        fHypercallsEnabled;
     589    uint8_t                     u8Alignment0[6];
    586590
    587591    /** World switch exit counter. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette