VirtualBox

Changeset 69409 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Oct 27, 2017 4:14:12 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
118738
Message:

VMM/IEM: Nested Hw.virt: VMRUN fixes.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r69141 r69409  
    7272     */
    7373    int rc = PGMChangeMode(pVCpu, pCtx->cr0 | X86_CR0_PE, pCtx->cr4, pCtx->msrEFER);
     74#ifdef IN_RING3
     75    Assert(rc != VINF_PGM_CHANGE_MODE);
     76#endif
    7477    AssertRCReturn(rc, rc);
    7578
     
    239242             * Reload the guest's "host state".
    240243             */
    241             CPUMSvmVmExitRestoreHostState(pCtx);
     244            CPUMSvmVmExitRestoreHostState(pVCpu, pCtx);
    242245
    243246            /*
     
    291294    LogFlow(("iemSvmVmrun\n"));
    292295
     296#ifdef IN_RING0
     297    /*
     298     * Until PGM can handle switching the guest paging mode in ring-0,
     299     * there's no point in trying to emulate VMRUN in ring-0 as we have
     300     * to go back to ring-3 anyway, see @bugref{7243#c48}.
     301     */
     302    return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
     303#endif
     304
    293305    /*
    294306     * Cache the physical address of the VMCB for #VMEXIT exceptions.
     
    451463        /*
    452464         * Continue validating guest-state and controls.
    453          */
    454         /* EFER, CR0 and CR4. */
     465         *
     466         * We pass CR0 as 0 to CPUMQueryValidatedGuestEfer below to skip the illegal
     467         * EFER.LME bit transition check. We pass the nested-guest's EFER as both the
     468         * old and new EFER value to not have any guest EFER bits influence the new
     469         * nested-guest EFER.
     470         */
    455471        uint64_t uValidEfer;
    456         rc = CPUMQueryValidatedGuestEfer(pVM, pVmcbNstGst->u64CR0, pVmcbNstGst->u64EFER, pVmcbNstGst->u64EFER, &uValidEfer);
     472        rc = CPUMQueryValidatedGuestEfer(pVM, 0 /* CR0 */, pVmcbNstGst->u64EFER, pVmcbNstGst->u64EFER, &uValidEfer);
    457473        if (RT_FAILURE(rc))
    458474        {
     
    460476            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    461477        }
     478
     479        /* Validate paging and CPU mode bits. */
    462480        bool const fSvm                     = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME);
    463481        bool const fLongModeSupported       = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);
     
    546564        pCtx->rsp        = pVmcbNstGst->u64RSP;
    547565        pCtx->rip        = pVmcbNstGst->u64RIP;
    548         pCtx->msrEFER    = uValidEfer;
     566        CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, uValidEfer);
    549567
    550568        /* Mask DR6, DR7 bits mandatory set/clear bits. */
     
    569587        { /* likely */ }
    570588        else if (RT_SUCCESS(rcStrict))
     589        {
     590            LogFlow(("iemSvmVmrun: iemSvmWorldSwitch returned %Rrc, setting passup status\n", VBOXSTRICTRC_VAL(rcStrict)));
    571591            rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
     592        }
    572593        else
    573594        {
     
    634655                     pCtx->cs.Sel, pCtx->rip, pCtx->cr0, pCtx->cr3, pCtx->cr4, pCtx->msrEFER, pCtx->rflags.u64));
    635656
     657        LogFlow(("iemSvmVmrun: returns %d\n", VBOXSTRICTRC_VAL(rcStrict)));
    636658        return rcStrict;
    637659    }
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette