VirtualBox

Changeset 71933 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Apr 20, 2018 10:54:01 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HM: Nested hw.virt: Fix the issue with preserving reserved bits across VMRUN/VM-exit.

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r71927 r71933  
    132132 * in IEM).
    133133 *
    134  * @param   pVCpu   The cross context virtual CPU structure.
    135  * @param   pCtx    Pointer to the guest-CPU context.
     134 * @param   pVCpu           The cross context virtual CPU structure.
     135 * @param   pCtx            Pointer to the guest-CPU context.
    136136 *
    137137 * @sa      hmR0SvmVmRunCacheVmcb.
     
    139139VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx)
    140140{
    141     /*
    142      * Restore the nested-guest VMCB fields which have been modified for executing
    143      * the nested-guest under SVM R0.
    144      */
    145141    if (pCtx->hwvirt.svm.fHMCachedVmcb)
    146142    {
    147         PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    148         PSVMVMCBCTRL        pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    149         PSVMVMCBSTATESAVE   pVmcbNstGstState = &pVmcbNstGst->guest;
    150         PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     143        PSVMVMCBCTRL        pVmcbNstGstCtrl  = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
     144        PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    151145
    152146        /*
    153          * The fields that are guaranteed to be read-only during SVM guest execution
    154          * can safely be restored from our VMCB cache. Other fields like control registers
    155          * are already updated by hardware-assisted SVM or by IEM. We only restore those
    156          * fields that are potentially modified by hardware-assisted SVM.
     147         * Restore fields as our own code might look at the VMCB controls as part
     148         * of the #VMEXIT handling. Otherwise, we don't need to restore the current
     149         * fields because none of them are written by a physical CPU on #VMEXIT.
    157150         */
    158         pVmcbNstGstCtrl->u16InterceptRdCRx        = pNstGstVmcbCache->u16InterceptRdCRx;
    159         pVmcbNstGstCtrl->u16InterceptWrCRx        = pNstGstVmcbCache->u16InterceptWrCRx;
    160         pVmcbNstGstCtrl->u16InterceptRdDRx        = pNstGstVmcbCache->u16InterceptRdDRx;
    161         pVmcbNstGstCtrl->u16InterceptWrDRx        = pNstGstVmcbCache->u16InterceptWrDRx;
    162         pVmcbNstGstCtrl->u16PauseFilterCount      = pNstGstVmcbCache->u16PauseFilterCount;
    163         pVmcbNstGstCtrl->u16PauseFilterThreshold  = pNstGstVmcbCache->u16PauseFilterThreshold;
    164         pVmcbNstGstCtrl->u32InterceptXcpt         = pNstGstVmcbCache->u32InterceptXcpt;
    165         pVmcbNstGstCtrl->u64InterceptCtrl         = pNstGstVmcbCache->u64InterceptCtrl;
    166         pVmcbNstGstState->u64DBGCTL               = pNstGstVmcbCache->u64DBGCTL;
    167         pVmcbNstGstCtrl->u32VmcbCleanBits         = pNstGstVmcbCache->u32VmcbCleanBits;
    168         pVmcbNstGstCtrl->u64IOPMPhysAddr          = pNstGstVmcbCache->u64IOPMPhysAddr;
    169         pVmcbNstGstCtrl->u64MSRPMPhysAddr         = pNstGstVmcbCache->u64MSRPMPhysAddr;
    170         pVmcbNstGstCtrl->u64TSCOffset             = pNstGstVmcbCache->u64TSCOffset;
    171         pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pNstGstVmcbCache->fVIntrMasking;
    172         pVmcbNstGstCtrl->TLBCtrl                  = pNstGstVmcbCache->TLBCtrl;
    173 
    174         /*
    175          * If the nested-hypervisor isn't using nested-paging (and thus shadow paging
    176          * is used by HM), we restore the original PAT MSR from the nested-guest VMCB.
    177          * Otherwise, the nested-guest-CPU PAT MSR would've already been saved here by
    178          * hardware-assisted SVM or by IEM.
    179          */
    180         if (!pNstGstVmcbCache->u1NestedPaging)
    181             pVmcbNstGstState->u64PAT = pNstGstVmcbCache->u64PAT;
    182 
    183         pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pNstGstVmcbCache->u1NestedPaging;
    184         pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt               = pNstGstVmcbCache->u1LbrVirt;
     151        pVmcbNstGstCtrl->u16InterceptRdCRx                 = pVmcbNstGstCache->u16InterceptRdCRx;
     152        pVmcbNstGstCtrl->u16InterceptWrCRx                 = pVmcbNstGstCache->u16InterceptWrCRx;
     153        pVmcbNstGstCtrl->u16InterceptRdDRx                 = pVmcbNstGstCache->u16InterceptRdDRx;
     154        pVmcbNstGstCtrl->u16InterceptWrDRx                 = pVmcbNstGstCache->u16InterceptWrDRx;
     155        pVmcbNstGstCtrl->u16PauseFilterThreshold           = pVmcbNstGstCache->u16PauseFilterThreshold;
     156        pVmcbNstGstCtrl->u16PauseFilterCount               = pVmcbNstGstCache->u16PauseFilterCount;
     157        pVmcbNstGstCtrl->u32InterceptXcpt                  = pVmcbNstGstCache->u32InterceptXcpt;
     158        pVmcbNstGstCtrl->u64InterceptCtrl                  = pVmcbNstGstCache->u64InterceptCtrl;
     159        pVmcbNstGstCtrl->u64TSCOffset                      = pVmcbNstGstCache->u64TSCOffset;
     160        pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking          = pVmcbNstGstCache->fVIntrMasking;
     161        pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVmcbNstGstCache->fNestedPaging;
     162        pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt               = pVmcbNstGstCache->fLbrVirt;
    185163        pCtx->hwvirt.svm.fHMCachedVmcb = false;
    186164    }
     
    580558    Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
    581559    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    582     return RT_BOOL(pVmcbNstGstCache->u1NestedPaging);
     560    return pVmcbNstGstCache->fNestedPaging;
    583561}
    584562
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r71867 r71933  
    124124
    125125        /*
    126          * Save the nested-guest state into the VMCB state-save area.
    127          */
    128         PSVMVMCB           pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    129         PSVMVMCBCTRL       pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    130         PSVMVMCBSTATESAVE  pVmcbNstGstState = &pVmcbNstGst->guest;
    131 
    132         HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbNstGstState, ES, es);
    133         HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbNstGstState, CS, cs);
    134         HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbNstGstState, SS, ss);
    135         HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbNstGstState, DS, ds);
    136         pVmcbNstGstState->GDTR.u32Limit = pCtx->gdtr.cbGdt;
    137         pVmcbNstGstState->GDTR.u64Base  = pCtx->gdtr.pGdt;
    138         pVmcbNstGstState->IDTR.u32Limit = pCtx->idtr.cbIdt;
    139         pVmcbNstGstState->IDTR.u64Base  = pCtx->idtr.pIdt;
    140         pVmcbNstGstState->u64EFER       = pCtx->msrEFER;
    141         pVmcbNstGstState->u64CR4        = pCtx->cr4;
    142         pVmcbNstGstState->u64CR3        = pCtx->cr3;
    143         pVmcbNstGstState->u64CR2        = pCtx->cr2;
    144         pVmcbNstGstState->u64CR0        = pCtx->cr0;
    145         /** @todo Nested paging. */
    146         pVmcbNstGstState->u64RFlags     = pCtx->rflags.u64;
    147         pVmcbNstGstState->u64RIP        = pCtx->rip;
    148         pVmcbNstGstState->u64RSP        = pCtx->rsp;
    149         pVmcbNstGstState->u64RAX        = pCtx->rax;
    150         pVmcbNstGstState->u64DR7        = pCtx->dr[7];
    151         pVmcbNstGstState->u64DR6        = pCtx->dr[6];
    152         pVmcbNstGstState->u8CPL         = pCtx->ss.Attr.n.u2Dpl;   /* See comment in CPUMGetGuestCPL(). */
    153         Assert(CPUMGetGuestCPL(pVCpu) == pCtx->ss.Attr.n.u2Dpl);
    154         if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx))
    155             pVmcbNstGstState->u64PAT = pCtx->msrPAT;
    156 
    157         PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
    158 
    159         /*
    160          * Save additional state and intercept information.
     126         * Map the nested-guest VMCB from its location in guest memory.
     127         * Write exactly what the CPU does on #VMEXIT thereby preserving most other bits in the
     128         * guest's VMCB in memory, see @bugref{7243#c113} and related comment on iemSvmVmrun().
    161129         *
    162          *   - Interrupt shadow: Tracked using VMCPU_FF_INHIBIT_INTERRUPTS and RIP.
    163          *   - V_TPR: Already updated by iemCImpl_load_CrX or by the physical CPU for
    164          *     hardware-assisted SVM execution.
    165          *   - V_IRQ: Tracked using VMCPU_FF_INTERRUPT_NESTED_GUEST force-flag and updated below.
    166          */
    167         if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    168             && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
    169         {
    170             pVmcbCtrl->IntShadow.n.u1IntShadow = 1;
    171 
    172             /* Clear the inhibit-interrupt force-flag so as to not affect the outer guest. */
    173             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    174             LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", pCtx->rip));
    175         }
    176 
    177         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
    178         {
    179             Assert(pVmcbCtrl->IntCtrl.n.u1VIrqPending);
    180             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    181         }
    182         else
    183             pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
    184 
    185         /* Save exit information. */
    186         pVmcbCtrl->u64ExitCode  = uExitCode;
    187         pVmcbCtrl->u64ExitInfo1 = uExitInfo1;
    188         pVmcbCtrl->u64ExitInfo2 = uExitInfo2;
    189 
    190         /*
    191          * Update the exit interrupt-information field if this #VMEXIT happened as a result
    192          * of delivering an event through IEM.
    193          *
    194          * Don't update the exit interrupt-information field if the event wasn't being injected
    195          * through IEM, as it may have been updated by real hardware if the nested-guest was
    196          * executed using hardware-assisted SVM.
    197          */
    198         {
    199             uint8_t  uExitIntVector;
    200             uint32_t uExitIntErr;
    201             uint32_t fExitIntFlags;
    202             bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,
    203                                                          NULL /* uExitIntCr2 */);
    204             if (fRaisingEvent)
     130         */
     131        PSVMVMCB       pVmcbMem;
     132        PGMPAGEMAPLOCK PgLockMem;
     133        PSVMVMCBCTRL   pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
     134        rcStrict = iemMemPageMap(pVCpu, pCtx->hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, (void **)&pVmcbMem, &PgLockMem);
     135        if (rcStrict == VINF_SUCCESS)
     136        {
     137            /*
     138             * Notify HM in case the nested-guest was executed using hardware-assisted SVM (which
     139             * would have modified some VMCB state) that might need to be restored on #VMEXIT before
     140             * writing the VMCB back to guest memory.
     141             */
     142            HMSvmNstGstVmExitNotify(pVCpu, pCtx);
     143
     144            /*
     145             * Save the nested-guest state into the VMCB state-save area.
     146             */
     147            PSVMVMCBSTATESAVE pVmcbMemState = &pVmcbMem->guest;
     148            HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbMemState, ES, es);
     149            HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbMemState, CS, cs);
     150            HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbMemState, SS, ss);
     151            HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbMemState, DS, ds);
     152            pVmcbMemState->GDTR.u32Limit   = pCtx->gdtr.cbGdt;
     153            pVmcbMemState->GDTR.u64Base    = pCtx->gdtr.pGdt;
     154            pVmcbMemState->IDTR.u32Limit   = pCtx->idtr.cbIdt;
     155            pVmcbMemState->IDTR.u64Base    = pCtx->idtr.pIdt;
     156            pVmcbMemState->u64EFER         = pCtx->msrEFER;
     157            pVmcbMemState->u64CR4          = pCtx->cr4;
     158            pVmcbMemState->u64CR3          = pCtx->cr3;
     159            pVmcbMemState->u64CR2          = pCtx->cr2;
     160            pVmcbMemState->u64CR0          = pCtx->cr0;
     161            /** @todo Nested paging. */
     162            pVmcbMemState->u64RFlags       = pCtx->rflags.u64;
     163            pVmcbMemState->u64RIP          = pCtx->rip;
     164            pVmcbMemState->u64RSP          = pCtx->rsp;
     165            pVmcbMemState->u64RAX          = pCtx->rax;
     166            pVmcbMemState->u64DR7          = pCtx->dr[7];
     167            pVmcbMemState->u64DR6          = pCtx->dr[6];
     168            pVmcbMemState->u8CPL           = pCtx->ss.Attr.n.u2Dpl;   /* See comment in CPUMGetGuestCPL(). */
     169            Assert(CPUMGetGuestCPL(pVCpu) == pCtx->ss.Attr.n.u2Dpl);
     170            if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx))
     171                pVmcbMemState->u64PAT = pCtx->msrPAT;
     172
     173            /*
     174             * Save additional state and intercept information.
     175             *
     176             *   - V_IRQ: Tracked using VMCPU_FF_INTERRUPT_NESTED_GUEST force-flag and updated below.
     177             *   - V_TPR: Already updated by iemCImpl_load_CrX or by the physical CPU for
     178             *     hardware-assisted SVM execution.
     179             *   - Interrupt shadow: Tracked using VMCPU_FF_INHIBIT_INTERRUPTS and RIP.
     180             */
     181            PSVMVMCBCTRL pVmcbMemCtrl = &pVmcbMem->ctrl;
     182            if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))        /* V_IRQ. */
    205183            {
    206                 pVmcbCtrl->ExitIntInfo.n.u1Valid  = 1;
    207                 pVmcbCtrl->ExitIntInfo.n.u8Vector = uExitIntVector;
    208                 pVmcbCtrl->ExitIntInfo.n.u3Type   = iemGetSvmEventType(uExitIntVector, fExitIntFlags);
    209                 if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)
     184                Assert(pVmcbCtrl->IntCtrl.n.u1VIrqPending);
     185                pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 1;
     186                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
     187            }
     188            else
     189                pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 0;
     190
     191            pVmcbMemCtrl->IntCtrl.n.u8VTPR = pVmcbCtrl->IntCtrl.n.u8VTPR;           /* V_TPR. */
     192
     193            if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)          /* Interrupt shadow. */
     194                && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
     195            {
     196                pVmcbMemCtrl->IntShadow.n.u1IntShadow = 1;
     197                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     198                LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", pCtx->rip));
     199            }
     200            else
     201                pVmcbMemCtrl->IntShadow.n.u1IntShadow = 0;
     202
     203            /*
     204             * Save nRIP, instruction length and byte fields.
     205             */
     206            pVmcbMemCtrl->u64NextRIP     = pVmcbCtrl->u64NextRIP;
     207            pVmcbMemCtrl->cbInstrFetched = pVmcbCtrl->cbInstrFetched;
     208            memcpy(&pVmcbMemCtrl->abInstr[0], &pVmcbCtrl->abInstr[0], sizeof(pVmcbMemCtrl->abInstr));
     209
     210            /*
     211             * Save exit information.
     212             */
     213            pVmcbMemCtrl->u64ExitCode  = uExitCode;
     214            pVmcbMemCtrl->u64ExitInfo1 = uExitInfo1;
     215            pVmcbMemCtrl->u64ExitInfo2 = uExitInfo2;
     216
     217            /*
     218             * Update the exit interrupt-information field if this #VMEXIT happened as a result
     219             * of delivering an event through IEM.
     220             *
     221             * Don't update the exit interrupt-information field if the event wasn't being injected
     222             * through IEM, as it would have been updated by real hardware if the nested-guest was
     223             * executed using hardware-assisted SVM.
     224             */
     225            {
     226                uint8_t  uExitIntVector;
     227                uint32_t uExitIntErr;
     228                uint32_t fExitIntFlags;
     229                bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,
     230                                                             NULL /* uExitIntCr2 */);
     231                if (fRaisingEvent)
    210232                {
    211                     pVmcbCtrl->ExitIntInfo.n.u1ErrorCodeValid = true;
    212                     pVmcbCtrl->ExitIntInfo.n.u32ErrorCode     = uExitIntErr;
     233                    pVmcbCtrl->ExitIntInfo.n.u1Valid  = 1;
     234                    pVmcbCtrl->ExitIntInfo.n.u8Vector = uExitIntVector;
     235                    pVmcbCtrl->ExitIntInfo.n.u3Type   = iemGetSvmEventType(uExitIntVector, fExitIntFlags);
     236                    if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)
     237                    {
     238                        pVmcbCtrl->ExitIntInfo.n.u1ErrorCodeValid = true;
     239                        pVmcbCtrl->ExitIntInfo.n.u32ErrorCode     = uExitIntErr;
     240                    }
    213241                }
    214242            }
    215         }
    216 
    217         /*
    218          * Clear event injection in the VMCB.
    219          */
    220         pVmcbCtrl->EventInject.n.u1Valid = 0;
    221 
    222         /*
    223          * Notify HM in case the nested-guest was executed using hardware-assisted SVM (which
    224          * would have modified some VMCB state) that need to be restored on #VMEXIT before
    225          * writing the VMCB back to guest memory.
    226          */
    227         HMSvmNstGstVmExitNotify(pVCpu, pCtx);
    228 
    229         /*
    230          * Write back the nested-guest's VMCB to its guest physical memory location.
    231          */
    232         rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->hwvirt.svm.GCPhysVmcb, pVmcbNstGst, sizeof(*pVmcbNstGst));
     243
     244            /*
     245             * Save the exit interrupt-information field.
     246             * We choose to write the whole field including reserved bits as it was observed on an
     247             * AMD Ryzen 5 Pro 1500 that the CPU does not preserve reserved bits in EXITINTINFO.
     248             */
     249            pVmcbMemCtrl->ExitIntInfo = pVmcbCtrl->ExitIntInfo;
     250
     251            /*
     252             * Clear event injection.
     253             */
     254            pVmcbMemCtrl->EventInject.n.u1Valid = 0;
     255
     256            iemMemPageUnmap(pVCpu, pCtx->hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, pVmcbMem, &PgLockMem);
     257        }
    233258
    234259        /*
     
    239264         * the nested-guest or not.
    240265         */
    241         memset(pVmcbNstGstCtrl, 0, sizeof(*pVmcbNstGstCtrl));
     266        memset(pVmcbCtrl, 0, sizeof(*pVmcbCtrl));
    242267        Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    243268
     
    251276        }
    252277
    253         if (RT_SUCCESS(rcStrict))
     278        if (rcStrict == VINF_SUCCESS)
    254279        {
    255280            /** @todo Nested paging. */
     
    278303        else
    279304        {
    280             LogFlow(("iemSvmVmexit: Writing VMCB at %#RGp failed. rc=%Rrc\n", pCtx->hwvirt.svm.GCPhysVmcb,
    281                      VBOXSTRICTRC_VAL(rcStrict)));
     305            LogFlow(("iemSvmVmexit: Mapping VMCB at %#RGp failed. rc=%Rrc\n", pCtx->hwvirt.svm.GCPhysVmcb, VBOXSTRICTRC_VAL(rcStrict)));
    282306            rcStrict = VERR_SVM_VMEXIT_FAILED;
    283307        }
     
    340364
    341365    /*
    342      * Read the guest VMCB state.
     366     * Read the guest VMCB.
    343367     */
    344368    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    346370    if (RT_SUCCESS(rc))
    347371    {
     372        /*
     373         * AMD-V seems to preserve reserved fields and only writes back selected, recognized
     374         * fields on #VMEXIT. However, not all reserved  bits are preserved (e.g, EXITINTINFO)
     375         * but in our implementation we try to preserve as much as we possibly can.
     376         *
     377         * We could read the entire page here and only write back the relevant fields on
     378         * #VMEXIT but since our internal VMCB is also being used by HM during hardware-assisted
     379         * SVM execution, it creates a potential for a nested-hypervisor to set bits that are
     380         * currently reserved but may be recognized as features bits in future CPUs causing
     381         * unexpected & undesired results. Hence, we zero out unrecognized fields here as we
     382         * typically enter hardware-assisted SVM soon anyway, see @bugref{7243#c113}.
     383         */
    348384        PSVMVMCBCTRL      pVmcbCtrl   = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
    349385        PSVMVMCBSTATESAVE pVmcbNstGst = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->guest;
     386
     387        RT_ZERO(pVmcbCtrl->u8Reserved0);
     388        RT_ZERO(pVmcbCtrl->u8Reserved1);
     389        RT_ZERO(pVmcbCtrl->u8Reserved2);
     390        RT_ZERO(pVmcbNstGst->u8Reserved0);
     391        RT_ZERO(pVmcbNstGst->u8Reserved1);
     392        RT_ZERO(pVmcbNstGst->u8Reserved2);
     393        RT_ZERO(pVmcbNstGst->u8Reserved3);
     394        RT_ZERO(pVmcbNstGst->u8Reserved4);
     395        RT_ZERO(pVmcbNstGst->u8Reserved5);
     396        pVmcbCtrl->u32Reserved0                   = 0;
     397        pVmcbCtrl->TLBCtrl.n.u24Reserved          = 0;
     398        pVmcbCtrl->IntCtrl.n.u6Reserved           = 0;
     399        pVmcbCtrl->IntCtrl.n.u3Reserved           = 0;
     400        pVmcbCtrl->IntCtrl.n.u5Reserved           = 0;
     401        pVmcbCtrl->IntCtrl.n.u24Reserved          = 0;
     402        pVmcbCtrl->IntShadow.n.u30Reserved        = 0;
     403        pVmcbCtrl->ExitIntInfo.n.u19Reserved      = 0;
     404        pVmcbCtrl->NestedPagingCtrl.n.u29Reserved = 0;
     405        pVmcbCtrl->EventInject.n.u19Reserved      = 0;
     406        pVmcbCtrl->LbrVirt.n.u30Reserved          = 0;
    350407
    351408        /*
     
    363420            && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
    364421        {
    365             Log(("iemSvmVmrun: Nested paging not supported -> #VMEXIT\n"));
    366             return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     422            Log(("iemSvmVmrun: Nested paging not supported -> Disabling\n"));
     423            pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging = 0;
    367424        }
    368425
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71927 r71933  
    24842484static bool hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
    24852485{
    2486     PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    2487     PCSVMVMCBCTRL       pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    2488     PCSVMVMCBSTATESAVE  pVmcbNstGstState = &pVmcbNstGst->guest;
    2489     PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    2490 
    24912486    /*
    24922487     * Cache the nested-guest programmed VMCB fields if we have not cached it yet.
     
    24992494    if (!fWasCached)
    25002495    {
     2496        PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     2497        PCSVMVMCBCTRL       pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
     2498        PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     2499
    25012500        pVmcbNstGstCache->u16InterceptRdCRx       = pVmcbNstGstCtrl->u16InterceptRdCRx;
    25022501        pVmcbNstGstCache->u16InterceptWrCRx       = pVmcbNstGstCtrl->u16InterceptWrCRx;
    25032502        pVmcbNstGstCache->u16InterceptRdDRx       = pVmcbNstGstCtrl->u16InterceptRdDRx;
    25042503        pVmcbNstGstCache->u16InterceptWrDRx       = pVmcbNstGstCtrl->u16InterceptWrDRx;
     2504        pVmcbNstGstCache->u16PauseFilterThreshold = pVmcbNstGstCtrl->u16PauseFilterThreshold;
    25052505        pVmcbNstGstCache->u16PauseFilterCount     = pVmcbNstGstCtrl->u16PauseFilterCount;
    2506         pVmcbNstGstCache->u16PauseFilterThreshold = pVmcbNstGstCtrl->u16PauseFilterThreshold;
    25072506        pVmcbNstGstCache->u32InterceptXcpt        = pVmcbNstGstCtrl->u32InterceptXcpt;
    25082507        pVmcbNstGstCache->u64InterceptCtrl        = pVmcbNstGstCtrl->u64InterceptCtrl;
    2509         pVmcbNstGstCache->u64CR0                  = pVmcbNstGstState->u64CR0;
    2510         pVmcbNstGstCache->u64CR3                  = pVmcbNstGstState->u64CR3;
    2511         pVmcbNstGstCache->u64CR4                  = pVmcbNstGstState->u64CR4;
    2512         pVmcbNstGstCache->u64EFER                 = pVmcbNstGstState->u64EFER;
    2513         pVmcbNstGstCache->u64PAT                  = pVmcbNstGstState->u64PAT;
    2514         pVmcbNstGstCache->u64DBGCTL               = pVmcbNstGstState->u64DBGCTL;
    2515         pVmcbNstGstCache->u64IOPMPhysAddr         = pVmcbNstGstCtrl->u64IOPMPhysAddr;
    2516         pVmcbNstGstCache->u64MSRPMPhysAddr        = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
    25172508        pVmcbNstGstCache->u64TSCOffset            = pVmcbNstGstCtrl->u64TSCOffset;
    2518         pVmcbNstGstCache->u32VmcbCleanBits        = pVmcbNstGstCtrl->u32VmcbCleanBits;
    25192509        pVmcbNstGstCache->fVIntrMasking           = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
    2520         pVmcbNstGstCache->TLBCtrl                 = pVmcbNstGstCtrl->TLBCtrl;
    2521         pVmcbNstGstCache->u1NestedPaging          = pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging;
    2522         pVmcbNstGstCache->u1LbrVirt               = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
     2510        pVmcbNstGstCache->fNestedPaging           = pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging;
     2511        pVmcbNstGstCache->fLbrVirt                = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
    25232512        pCtx->hwvirt.svm.fHMCachedVmcb            = true;
    25242513        Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n"));
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r71856 r71933  
    37743774        PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    37753775        pHlp->pfnPrintf(pHlp, "CPU[%u]: HM SVM nested-guest VMCB cache\n", pVCpu->idCpu);
    3776         pHlp->pfnPrintf(pHlp, "  fHMCachedVmcb     = %#RTbool\n", pCtx->hwvirt.svm.fHMCachedVmcb);
    3777         pHlp->pfnPrintf(pHlp, "  u16InterceptRdCRx = %#RX16\n",   pVmcbNstGstCache->u16InterceptRdCRx);
    3778         pHlp->pfnPrintf(pHlp, "  u16InterceptWrCRx = %#RX16\n",   pVmcbNstGstCache->u16InterceptWrCRx);
    3779         pHlp->pfnPrintf(pHlp, "  u16InterceptRdDRx = %#RX16\n",   pVmcbNstGstCache->u16InterceptRdDRx);
    3780         pHlp->pfnPrintf(pHlp, "  u16InterceptWrDRx = %#RX16\n",   pVmcbNstGstCache->u16InterceptWrDRx);
    3781         pHlp->pfnPrintf(pHlp, "  u32InterceptXcpt  = %#RX32\n",   pVmcbNstGstCache->u32InterceptXcpt);
    3782         pHlp->pfnPrintf(pHlp, "  u64InterceptCtrl  = %#RX64\n",   pVmcbNstGstCache->u64InterceptCtrl);
    3783         pHlp->pfnPrintf(pHlp, "  u64IOPMPhysAddr   = %#RX64\n",   pVmcbNstGstCache->u64IOPMPhysAddr);
    3784         pHlp->pfnPrintf(pHlp, "  u64MSRPMPhysAddr  = %#RX64\n",   pVmcbNstGstCache->u64MSRPMPhysAddr);
    3785         pHlp->pfnPrintf(pHlp, "  u64TSCOffset      = %#RX64\n",   pVmcbNstGstCache->u64TSCOffset);
    3786         pHlp->pfnPrintf(pHlp, "  u32VmcbCleanBits  = %#RX32\n",   pVmcbNstGstCache->u32VmcbCleanBits);
    3787         pHlp->pfnPrintf(pHlp, "  TLBCtrl           = %#RX64\n",   pVmcbNstGstCache->TLBCtrl);
    3788         pHlp->pfnPrintf(pHlp, "    u32ASID           = %#RX64\n", pVmcbNstGstCache->TLBCtrl.n.u32ASID);
    3789         pHlp->pfnPrintf(pHlp, "    u8TLBFlush        = %#RX64\n", pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush);
    3790         pHlp->pfnPrintf(pHlp, "  u1NestedPaging    = %RTbool\n",  pVmcbNstGstCache->u1NestedPaging);
    3791         pHlp->pfnPrintf(pHlp, "  u1LbrVirt         = %RTbool\n",  pVmcbNstGstCache->u1LbrVirt);
    3792         pHlp->pfnPrintf(pHlp, "  u64CR0            = %#RX64\n",   pVmcbNstGstCache->u64CR0);
    3793         pHlp->pfnPrintf(pHlp, "  u64CR3            = %#RX64\n",   pVmcbNstGstCache->u64CR3);
    3794         pHlp->pfnPrintf(pHlp, "  u64CR4            = %#RX64\n",   pVmcbNstGstCache->u64CR4);
    3795         pHlp->pfnPrintf(pHlp, "  u64EFER           = %#RX64\n",   pVmcbNstGstCache->u64EFER);
    3796         pHlp->pfnPrintf(pHlp, "  u64DBGCTL         = %#RX64\n",   pVmcbNstGstCache->u64DBGCTL);
    3797         pHlp->pfnPrintf(pHlp, "  fVIntrMasking     = %RTbool\n",  pVmcbNstGstCache->fVIntrMasking);
     3776        pHlp->pfnPrintf(pHlp, "  fHMCachedVmcb           = %#RTbool\n", pCtx->hwvirt.svm.fHMCachedVmcb);
     3777        pHlp->pfnPrintf(pHlp, "  u16InterceptRdCRx       = %#RX16\n",   pVmcbNstGstCache->u16InterceptRdCRx);
     3778        pHlp->pfnPrintf(pHlp, "  u16InterceptWrCRx       = %#RX16\n",   pVmcbNstGstCache->u16InterceptWrCRx);
     3779        pHlp->pfnPrintf(pHlp, "  u16InterceptRdDRx       = %#RX16\n",   pVmcbNstGstCache->u16InterceptRdDRx);
     3780        pHlp->pfnPrintf(pHlp, "  u16InterceptWrDRx       = %#RX16\n",   pVmcbNstGstCache->u16InterceptWrDRx);
     3781        pHlp->pfnPrintf(pHlp, "  u16PauseFilterThreshold = %#RX16\n",   pVmcbNstGstCache->u16PauseFilterThreshold);
     3782        pHlp->pfnPrintf(pHlp, "  u16PauseFilterCount     = %#RX16\n",   pVmcbNstGstCache->u16PauseFilterCount);
     3783        pHlp->pfnPrintf(pHlp, "  u32InterceptXcpt        = %#RX32\n",   pVmcbNstGstCache->u32InterceptXcpt);
     3784        pHlp->pfnPrintf(pHlp, "  u64InterceptCtrl        = %#RX64\n",   pVmcbNstGstCache->u64InterceptCtrl);
     3785        pHlp->pfnPrintf(pHlp, "  u64TSCOffset            = %#RX64\n",   pVmcbNstGstCache->u64TSCOffset);
     3786        pHlp->pfnPrintf(pHlp, "  fVIntrMasking           = %RTbool\n",  pVmcbNstGstCache->fVIntrMasking);
     3787        pHlp->pfnPrintf(pHlp, "  fNestedPaging           = %RTbool\n",  pVmcbNstGstCache->fNestedPaging);
     3788        pHlp->pfnPrintf(pHlp, "  fLbrVirt                = %RTbool\n",  pVmcbNstGstCache->fLbrVirt);
    37983789    }
    37993790    else
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette