VirtualBox

Changeset 66015 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Mar 9, 2017 3:39:05 PM (8 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
113871
Message:

VMM: Nested Hw.virt: Bits.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • TabularUnified trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r66008 r66015  
    182182 * @param   pVCpu               The cross context virtual CPU structure.
    183183 * @param   pCtx                Pointer to the guest-CPU context.
    184  */
    185 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx)
    186 {
    187     RT_NOREF2(pVCpu, pCtx);
     184 * @param   pVmcb               The VMCB of the nested-guest.
     185 * @param   pHostState          The host-state save area in the guest.
     186 */
     187VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, PSVMHOSTSTATE pHostState)
     188{
     189    Assert(pHostState);
     190    Assert(pVmcb);
     191
     192    /*
     193     * Save host state.
     194     */
     195    pHostState->es       = pCtx->es;
     196    pHostState->cs       = pCtx->cs;
     197    pHostState->ss       = pCtx->ss;
     198    pHostState->ds       = pCtx->ds;
     199    pHostState->gdtr     = pCtx->gdtr;
     200    pHostState->idtr     = pCtx->idtr;
     201    pHostState->uEferMsr = pCtx->msrEFER;
     202    pHostState->uCr0     = pCtx->cr0;
     203    pHostState->uCr3     = pCtx->cr3;
     204    pHostState->uCr4     = pCtx->cr4;
     205    pHostState->rflags   = pCtx->rflags;
     206    pHostState->uRip     = pCtx->rip;
     207    pHostState->uRsp     = pCtx->rsp;
     208    pHostState->uRax     = pCtx->rax;
     209
     210    /*
     211     * Load controls from VMCB.
     212     */
     213    pCtx->hwvirt.svm.u16InterceptRdCRx = pVmcb->ctrl.u16InterceptRdCRx;
     214    pCtx->hwvirt.svm.u16InterceptWrCRx = pVmcb->ctrl.u16InterceptWrCRx;
     215    pCtx->hwvirt.svm.u16InterceptRdDRx = pVmcb->ctrl.u16InterceptRdDRx;
     216    pCtx->hwvirt.svm.u16InterceptWrDRx = pVmcb->ctrl.u16InterceptWrDRx;
     217    pCtx->hwvirt.svm.u64InterceptCtrl  = pVmcb->ctrl.u64InterceptCtrl;
     218    pCtx->hwvirt.svm.u32InterceptXcpt  = pVmcb->ctrl.u32InterceptXcpt;
     219    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
     220    {
     221        Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n"));
     222        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     223    }
     224    if (!pVmcb->ctrl.TLBCtrl.n.u32ASID)
     225    {
     226        Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n"));
     227        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     228    }
     229
     230
     231    /** @todo the rest. */
    188232
    189233    return VERR_NOT_IMPLEMENTED;
     
    197241 * @param   pVCpu       The cross context virtual CPU structure.
    198242 * @param   pCtx        The guest-CPU context.
    199  * @param   iExitCode   The exit reason.
     243 * @param   uExitCode   The exit code.
    200244 * @param   uExitInfo1  The exit info. 1 field.
    201245 * @param   uExitInfo2  The exit info. 2 field.
    202246 */
    203 VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, int64_t iExitCode, uint64_t uExitInfo1,
     247VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
    204248                                             uint64_t uExitInfo2)
    205249{
    206250    if (   CPUMIsGuestInNestedHwVirtMode(pCtx)
    207         || iExitCode == SVM_EXIT_INVALID)
     251        || uExitCode == SVM_EXIT_INVALID)
    208252    {
    209253        RT_NOREF(pVCpu);
     
    211255        pCtx->hwvirt.svm.fGif = 0;
    212256
    213         /** @todo implement VMEXIT. */
     257        /** @todo implement \#VMEXIT. */
    214258
    215259        return VINF_SUCCESS;
     
    217261    else
    218262    {
    219         Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%RI64 uExitInfo1=%RU64 uExitInfo2=%RU64\n", iExitCode,
     263        Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode,
    220264             uExitInfo1, uExitInfo2));
    221265        RT_NOREF2(uExitInfo1, uExitInfo2);
     
    225269}
    226270
    227 
    228 /**
    229  * Peforms the functions of a VMRUN instruction.
    230  *
    231  * @returns Strict VBox status code.
    232  * @param   pVCpu       The cross context virtual CPU structure.
    233  * @param   pCtx        The guest-CPU context.
    234  */
    235 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmRun(PVMCPU pVCpu, PCPUMCTX pCtx)
    236 {
    237     RT_NOREF2(pVCpu, pCtx);
    238     return VERR_NOT_IMPLEMENTED;
    239 }
    240 
  • TabularUnified trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r66000 r66015  
    58955895    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
    58965896    {
    5897         Log(("vmrun: Guest intercept -> VMexit\n"));
     5897        Log(("vmrun: Guest intercept -> #VMEXIT\n"));
    58985898        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    58995899    }
    59005900#endif
    59015901
    5902     /** @todo think - I probably need to map both the HSAVE area page and the
    5903      *        guest VMCB via iemMemPageMap here and do the copying? */
    5904     pCtx->hwvirt.svm.GCPhysNstGstVmcb = GCPhysVmcb;
    59055902    void *pvVmcb;
    59065903    PGMPAGEMAPLOCK PgLockVmcb;
    59075904    VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb);
    59085905    if (rcStrict == VINF_SUCCESS)
    5909         return HMSvmVmrun(pVCpu, pCtx);
     5906    {
     5907        pCtx->hwvirt.svm.GCPhysNstGstVmcb = GCPhysVmcb;
     5908
     5909        RTGCPHYS GCPhysHostState = pCtx->hwvirt.svm.uMsrHSavePa;
     5910        /** @todo SVM does not validate the host-state area beyond checking the
     5911         *        alignment and range of the physical address. Nothing to prevent users
     5912         *        from using MMIO or other weird stuff in which case anything might
     5913         *        happen. */
     5914        void *pvHostState;
     5915        PGMPAGEMAPLOCK PgLockHostState;
     5916        rcStrict = iemMemPageMap(pVCpu, GCPhysHostState, IEM_ACCESS_DATA_RW, &pvHostState, &PgLockHostState);
     5917        if (rcStrict == VINF_SUCCESS)
     5918        {
     5919            PSVMHOSTSTATE pHostState = (PSVMHOSTSTATE)pvHostState;
     5920            PSVMVMCB      pVmcb      = (PSVMVMCB)pvVmcb;
     5921            rcStrict = HMSvmVmrun(pVCpu, pCtx, pVmcb, pHostState);
     5922
     5923            iemMemPageUnmap(pVCpu, GCPhysHostState, IEM_ACCESS_DATA_RW, pvHostState, &PgLockHostState);
     5924        }
     5925        iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, pvVmcb, &PgLockVmcb);
     5926    }
    59105927    RT_NOREF(cbInstr);
    59115928    return rcStrict;
     
    59225939    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
    59235940    {
    5924         Log(("vmrun: Guest intercept -> VMexit\n"));
     5941        Log(("vmrun: Guest intercept -> #VMEXIT\n"));
    59255942        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    59265943    }
     
    59505967    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
    59515968    {
    5952         Log(("vmload: Guest intercept -> VMexit\n"));
     5969        Log(("vmload: Guest intercept -> #VMEXIT\n"));
    59535970        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    59545971    }
     
    60016018    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
    60026019    {
    6003         Log(("vmsave: Guest intercept -> VMexit\n"));
     6020        Log(("vmsave: Guest intercept -> #VMEXIT\n"));
    60046021        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60056022    }
     
    60526069    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
    60536070    {
    6054         Log(("clgi: Guest intercept -> VMexit\n"));
     6071        Log(("clgi: Guest intercept -> #VMEXIT\n"));
    60556072        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60566073    }
     
    60736090    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
    60746091    {
    6075         Log2(("stgi: Guest intercept -> VMexit\n"));
     6092        Log2(("stgi: Guest intercept -> #VMEXIT\n"));
    60766093        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60776094    }
     
    60946111    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
    60956112    {
    6096         Log2(("invlpga: Guest intercept -> VMexit\n"));
     6113        Log2(("invlpga: Guest intercept -> #VMEXIT\n"));
    60976114        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60986115    }
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette