VirtualBox

Ignore:
Timestamp:
Aug 31, 2018 8:17:31 AM (6 years ago)
Author:
vboxsync
Message:

VMM/IEM, HM: Nested VMX: bugref:9180 Implement VMREAD, added using decoded IEM APIs for
VMXON, VMREAD, VMWRITE in VMX R0 code.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r73959 r73983  
    183183    hmR0VmxExportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
    184184                                 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
     185
     186#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     187/** Macro that does the necessary privilege checks and intercepted VM-exits for
     188 *  guests that attempted to execute a VMX instruction. */
     189#define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
     190    do \
     191    { \
     192        VBOXSTRICTRC rcStrictTmp = hmR0VmxCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
     193        if (rcStrictTmp == VINF_SUCCESS) \
     194        { /* likely */ } \
     195        else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
     196        { \
     197            Assert((a_pVCpu)->hm.s.Event.fPending); \
     198            Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
     199            return VINF_SUCCESS; \
     200        } \
     201        else \
     202        { \
     203            int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
     204            AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
     205        } \
     206    } while (0)
     207#endif  /* VBOX_WITH_NESTED_HWVIRT_VMX */
    185208
    186209
     
    58465869
    58475870/**
    5848  * Decodes the memory operand of a VM-exit due to instruction execution.
     5871 * Decodes the memory operand of an instruction that caused a VM-exit.
    58495872 *
    58505873 * The VM-exit qualification field provides the displacement field for memory
     
    58575880 * @param   pVCpu           The cross context virtual CPU structure.
    58585881 * @param   pExitInstrInfo  Pointer to the VM-exit instruction information.
    5859  * @param   fIsWrite        Whether the operand is a destination memory operand
    5860  *                          (i.e. writeable memory location) or not.
     5882 * @param   fIsDstOperand   Whether the operand is a destination memory
     5883 *                          operand (i.e. writeable memory location) or not.
    58615884 * @param   GCPtrDisp       The instruction displacement field, if any. For
    58625885 *                          RIP-relative addressing pass RIP + displacement here.
    5863  * @param   pGCPtrMem       Where to store the destination memory operand.
    5864  */
    5865 static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, PCVMXEXITINSTRINFO pExitInstrInfo, RTGCPTR GCPtrDisp, bool fIsWrite,
    5866                                             PRTGCPTR pGCPtrMem)
     5886 * @param   pGCPtrMem       Where to store the effective destination memory address.
     5887 */
     5888static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, PCVMXEXITINSTRINFO pExitInstrInfo, RTGCPTR GCPtrDisp,
     5889                                            bool fIsDstOperand, PRTGCPTR pGCPtrMem)
    58675890{
    58685891    Assert(pExitInstrInfo);
     
    59515974        {
    59525975            /* Check permissions for the data segment. */
    5953             if (   fIsWrite
     5976            if (   fIsDstOperand
    59545977                && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
    59555978            {
     
    59966019        {
    59976020            /* Check permissions for the code segment. */
    5998             if (   fIsWrite
     6021            if (   fIsDstOperand
    59996022                || !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ))
    60006023            {
     
    60406063 *
    60416064 * @param   pVCpu           The cross context virtual CPU structure.
    6042  * @param   pVmxTransient   Pointer to the VMX transient structure.
     6065 * @param   uExitReason     The VM-exit reason.
    60436066 *
    60446067 * @todo    NstVmx: Document other error codes when VM-exit is implemented.
    60456068 * @remarks No-long-jump zone!!!
    60466069 */
    6047 static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     6070static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, uint32_t uExitReason)
    60486071{
    60496072    HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
     
    60596082    }
    60606083
    6061     if (pVmxTransient->uExitReason == VMX_EXIT_VMXON)
     6084    if (uExitReason == VMX_EXIT_VMXON)
    60626085    {
    60636086        /*
     
    1342213445    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1342313446
    13424     /** @todo NSTVMX: Vmwrite. */
    13425     hmR0VmxSetPendingXcptUD(pVCpu);
    13426     return VINF_SUCCESS;
    13427 }
    13428 
    13429 
    13430 /**
    13431  * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
    13432  */
    13433 HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    13434 {
    13435     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    13436 
    13437     int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13438     rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    13439     AssertRCReturn(rc, rc);
    13440 
    13441     VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr);
    13442     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    13443     {
    13444         /* VMXOFF on success changes the internal hwvirt state but not anything that's visible to the guest. */
    13445         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
    13446     }
    13447     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    13448     {
    13449         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    13450         rcStrict = VINF_SUCCESS;
    13451     }
    13452     return rcStrict;
    13453 }
    13454 
    13455 
    13456 /**
    13457  * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
    13458  */
    13459 HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    13460 {
    13461     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    13462 
    1346313447    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1346413448    rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     
    1346713451    AssertRCReturn(rc, rc);
    1346813452
    13469     VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToVmxInstr(pVCpu, pVmxTransient);
     13453    HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
     13454
     13455    VMXVEXITINFO ExitInfo;
     13456    RT_ZERO(ExitInfo);
     13457    ExitInfo.uReason     = pVmxTransient->uExitReason;
     13458    ExitInfo.u64Qual     = pVmxTransient->uExitQual;
     13459    ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
     13460    ExitInfo.cbInstr     = pVmxTransient->cbInstr;
     13461    if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
     13462    {
     13463        RTGCPTR GCPtrVal;
     13464        VBOXSTRICTRC rcStrict = hmR0VmxDecodeMemOperand(pVCpu, &ExitInfo.InstrInfo, ExitInfo.u64Qual, false /* fIsDstOperand */,
     13465                                                         &GCPtrVal);
     13466        if (rcStrict == VINF_SUCCESS)
     13467        { /* likely */ }
     13468        else if (rcStrict == VINF_HM_PENDING_XCPT)
     13469        {
     13470            Assert(pVCpu->hm.s.Event.fPending);
     13471            Log4Func(("Memory operand decoding failed, raising xcpt %#x\n",
     13472                      VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));
     13473            return VINF_SUCCESS;
     13474        }
     13475        else
     13476        {
     13477            Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     13478            return rcStrict;
     13479        }
     13480        ExitInfo.GCPtrEffAddr = GCPtrVal;
     13481    }
     13482
     13483    VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
     13484    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     13485        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
     13486    else if (rcStrict == VINF_IEM_RAISED_XCPT)
     13487    {
     13488        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13489        rcStrict = VINF_SUCCESS;
     13490    }
     13491    return rcStrict;
     13492}
     13493
     13494
     13495/**
     13496 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
     13497 */
     13498HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13499{
     13500    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13501
     13502    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     13503    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     13504    AssertRCReturn(rc, rc);
     13505
     13506    HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
     13507
     13508    VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr);
     13509    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     13510    {
     13511        /* VMXOFF on success changes the internal hwvirt state but not anything that's visible to the guest. */
     13512        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
     13513    }
     13514    else if (rcStrict == VINF_IEM_RAISED_XCPT)
     13515    {
     13516        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13517        rcStrict = VINF_SUCCESS;
     13518    }
     13519    return rcStrict;
     13520}
     13521
     13522
     13523/**
     13524 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
     13525 */
     13526HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13527{
     13528    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13529
     13530    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     13531    rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     13532    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     13533    rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     13534    AssertRCReturn(rc, rc);
     13535
     13536    HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
     13537
     13538    VMXVEXITINFO ExitInfo;
     13539    RT_ZERO(ExitInfo);
     13540    ExitInfo.uReason     = pVmxTransient->uExitReason;
     13541    ExitInfo.u64Qual     = pVmxTransient->uExitQual;
     13542    ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
     13543    ExitInfo.cbInstr     = pVmxTransient->cbInstr;
     13544
     13545    RTGCPTR GCPtrVmxon;
     13546    VBOXSTRICTRC rcStrict = hmR0VmxDecodeMemOperand(pVCpu, &ExitInfo.InstrInfo, ExitInfo.u64Qual, false /* fIsDstOperand */,
     13547                                                    &GCPtrVmxon);
    1347013548    if (rcStrict == VINF_SUCCESS)
    1347113549    { /* likely */ }
    1347213550    else if (rcStrict == VINF_HM_PENDING_XCPT)
    1347313551    {
    13474         Log4Func(("Privilege checks failed, raising xcpt %#x!\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));
     13552        Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));
    1347513553        return VINF_SUCCESS;
    1347613554    }
     
    1348013558        return rcStrict;
    1348113559    }
    13482 
    13483     RTGCPTR            GCPtrVmxon;
    13484     PCVMXEXITINSTRINFO pExitInstrInfo = &pVmxTransient->ExitInstrInfo;
    13485     RTGCPTR const      GCPtrDisp      =  pVmxTransient->uExitQual;
    13486     rcStrict = hmR0VmxDecodeMemOperand(pVCpu, pExitInstrInfo, GCPtrDisp, false /*fIsWrite*/,  &GCPtrVmxon);
    13487     if (rcStrict == VINF_SUCCESS)
    13488     { /* likely */ }
    13489     else if (rcStrict == VINF_HM_PENDING_XCPT)
    13490     {
    13491         Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));
    13492         return VINF_SUCCESS;
    13493     }
    13494     else
    13495     {
    13496         Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13497         return rcStrict;
    13498     }
    13499 
    13500     VMXVEXITINFO ExitInfo;
    13501     RT_ZERO(ExitInfo);
    13502     ExitInfo.ExitInstrInfo.u = pExitInstrInfo->u;
    13503     ExitInfo.u64ExitQual     = GCPtrDisp;
    13504     uint8_t const iEffSeg    = pExitInstrInfo->VmreadVmwrite.iSegReg;
    13505     rcStrict = IEMExecDecodedVmxon(pVCpu, pVmxTransient->cbInstr, iEffSeg, GCPtrVmxon, &ExitInfo);
     13560    ExitInfo.GCPtrEffAddr = GCPtrVmxon;
     13561
     13562    rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
    1350613563    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1350713564        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette