Changeset 78481 in vbox for trunk/src/VBox
- Timestamp:
- May 13, 2019 9:52:54 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 130516
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r78371 r78481 1217 1217 * @param pVCpu The cross context virtual CPU structure. 1218 1218 * @param pCtx Pointer to the guest-CPU context. 1219 * 1220 * @remarks Can be called from ring-0 as well as ring-3. 1219 1221 */ 1220 1222 VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx) 1221 1223 { 1222 1224 NOREF(pCtx); 1225 1226 /* 1227 * Make sure we need to merge the nested-guest VMCS on next nested-guest 1228 * VM entry (if we VM-exit in ring-0 and continue in ring-0 till the next 1229 * nested-guest VM-entry). 1230 */ 1223 1231 pVCpu->hm.s.vmx.fMergedNstGstCtls = false; 1232 1233 CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_ALL); 1234 AssertMsg(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL), 1235 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", pVCpu->cpum.GstCtx.fExtrn, CPUMCTX_EXTRN_ALL)); 1236 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 1237 1238 1224 1239 } 1225 1240 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r78237 r78481 982 982 IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu); 983 983 IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu); 984 IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);985 IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);986 IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmi(PVMCPU pVCpu);987 IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);988 IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason);989 984 IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess); 990 985 IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess); … … 15932 15927 15933 15928 /** 15929 * Interface for HM and EM to emulate a VM-exit due to an instruction. 15930 * 15931 * This is meant to be used for those instructions that VMX provides additional 15932 * decoding information beyond just the instruction length! 15933 * 15934 * @returns Strict VBox status code. 15935 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15936 * @param pExitInfo Pointer to the VM-exit information struct. 15937 * @thread EMT(pVCpu) 15938 */ 15939 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo) 15940 { 15941 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo); 15942 if (pVCpu->iem.s.cActiveMappings) 15943 iemMemRollback(pVCpu); 15944 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 15945 } 15946 15947 15948 /** 15949 * Interface for HM and EM to emulate a VM-exit due to an instruction. 15950 * 15951 * This is meant to be used for those instructions that VMX provides only the 15952 * instruction length. 15953 * 15954 * @returns Strict VBox status code. 15955 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15956 * @param pExitInfo The VM-exit reason. 15957 * @param cbInstr The instruction length in bytes. 15958 * @thread EMT(pVCpu) 15959 */ 15960 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr) 15961 { 15962 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr); 15963 if (pVCpu->iem.s.cActiveMappings) 15964 iemMemRollback(pVCpu); 15965 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 15966 } 15967 15968 15969 /** 15934 15970 * Interface for HM and EM to emulate the VMREAD instruction. 15935 15971 * -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r78455 r78481 660 660 case VMX_VMCS_RO_EXIT_QUALIFICATION: 661 661 case VMX_VMCS_RO_IO_RCX: 662 case VMX_VMCS_RO_IO_RS X:662 case VMX_VMCS_RO_IO_RSI: 663 663 case VMX_VMCS_RO_IO_RDI: 664 664 case VMX_VMCS_RO_IO_RIP: … … 1089 1089 { 1090 1090 return CPUMSetGuestVmxVmFailInvalid(&pVCpu->cpum.GstCtx); 1091 }1092 1093 1094 /**1095 * Implements VMFailValid for VMX instruction failure.1096 *1097 * @param pVCpu The cross context virtual CPU structure.1098 * @param enmInsErr The VM instruction error.1099 */1100 DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)1101 {1102 return CPUMSetGuestVmxVmFailValid(&pVCpu->cpum.GstCtx, enmInsErr);1103 1091 } 1104 1092 … … 2785 2773 } 2786 2774 2775 /* The following VMCS fields are unsupported since we don't injecting SMIs into a guest. */ 2776 Assert(pVmcs->u64RoIoRcx.u == 0); 2777 Assert(pVmcs->u64RoIoRsi.u == 0); 2778 Assert(pVmcs->u64RoIoRdi.u == 0); 2779 Assert(pVmcs->u64RoIoRip.u == 0); 2780 2787 2781 /* 2788 2782 * Save the guest state back into the VMCS. … … 2886 2880 * @param pExitInfo Pointer to the VM-exit instruction information struct. 2887 2881 */ 2888 DECLINLINE(VBOXSTRICTRC)iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)2882 IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo) 2889 2883 { 2890 2884 /* -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r78371 r78481 7615 7615 } 7616 7616 } 7617 7618 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 7619 # if 0 7620 /** @todo NSTVMX: We handle each of these fields individually by passing it to IEM 7621 * VM-exit handlers. We might handle it differently when using the fast path. */ 7622 /* 7623 * The hardware virtualization state currently consists of VMCS fields that may be 7624 * modified by execution of the nested-guest (that are not part of the general 7625 * guest state) and is visible to guest software. Hence, it is technically part of 7626 * the guest-CPU state when executing a nested-guest. 7627 */ 7628 if ( (fWhat & CPUMCTX_EXTRN_HWVIRT) 7629 && CPUMIsGuestInVmxNonRootMode(pCtx)) 7630 { 7631 PVMXVVMCS pGstVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 7632 rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pGstVmcs->u32RoExitReason); 7633 rc |= VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pGstVmcs->u64RoExitQual.u); 7634 VMXLOCAL_BREAK_RC(rc); 7635 7636 /* 7637 * VM-entry can fail due to invalid-guest state, machine-check events and 7638 * MSR loading failures. Other than VM-exit reason and VM-exit qualification 7639 * all other VMCS fields are left unmodified on VM-entry failure. 7640 * 7641 * See Intel spec. 26.7 "VM-entry Failures During Or After Loading Guest State". 7642 */ 7643 bool const fEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(pGstVmcs->u32RoExitReason); 7644 if (!fEntryFailed) 7645 { 7646 /* 7647 * Some notes on VMCS fields that may need importing when the fast path 7648 * is implemented. Currently we fully emulate VMLAUNCH/VMRESUME in IEM. 7649 * 7650 * Requires fixing up when using hardware-assisted VMX: 7651 * - VM-exit interruption info: Shouldn't reflect host interrupts/NMIs. 7652 * - VM-exit interruption error code: Cleared to 0 when not appropriate. 7653 * - IDT-vectoring info: Think about this. 7654 * - IDT-vectoring error code: Think about this. 7655 * 7656 * Emulated: 7657 * - Guest-interruptiblity state: Derived from FFs and RIP. 7658 * - Guest pending debug exceptions: Derived from DR6. 7659 * - Guest activity state: Emulated from EM state. 7660 * - Guest PDPTEs: Currently all 0s since we don't support nested EPT. 7661 * - Entry-interrupt info: Emulated, cleared to 0. 7662 */ 7663 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pGstVmcs->u32RoExitIntInfo); 7664 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pGstVmcs->u32RoExitIntErrCode); 7665 rc |= VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pGstVmcs->u32RoIdtVectoringInfo); 7666 rc |= VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pGstVmcs->u32RoIdtVectoringErrCode); 7667 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pGstVmcs->u32RoExitInstrLen); 7668 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pGstVmcs->u32RoExitIntInfo); 7669 rc |= VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pGstVmcs->u64RoGuestPhysAddr.u); 7670 rc |= VMXReadVmcsGstN(VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pGstVmcs->u64RoGuestLinearAddr.u); 7671 /** @todo NSTVMX: Save and adjust preemption timer value. */ 7672 } 7673 7674 VMXLOCAL_BREAK_RC(rc); 7675 } 7676 # endif 7677 #endif 7617 7678 } 7618 7679 } while (0); … … 10669 10730 * Check if VMLAUNCH/VMRESUME succeeded. 10670 10731 * If this failed, we cause a guru meditation and cease further execution. 10732 * 10733 * However, if we are executing a nested-guest we might fail if we use the 10734 * fast path rather than fully emulating VMLAUNCH/VMRESUME instruction in IEM. 10671 10735 */ 10672 10736 if (RT_LIKELY(rcVMRun == VINF_SUCCESS)) … … 10734 10798 } 10735 10799 } 10800 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 10801 else if (pVmxTransient->fIsNestedGuest) 10802 { 10803 # if 0 10804 /* 10805 * Copy the VM-instruction error field to the guest VMCS. 10806 */ 10807 /** @todo NSTVMX: Verify we're using the fast path. */ 10808 uint32_t u32RoVmInstrError; 10809 rc = VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &u32RoVmInstrError); 10810 AssertRCReturn(rc, rc); 10811 PVMXVVMCS pGstVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 10812 pGstVmcs->u32RoVmInstrError = u32RoVmInstrError; 10813 /** @todo NSTVMX: Advance guest RIP and other fast path related restoration. */ 10814 # else 10815 AssertMsgFailed(("VMLAUNCH/VMRESUME failed but shouldn't happen when VMLAUNCH/VMRESUME was emulated in IEM!\n")); 10816 # endif 10817 } 10818 #endif 10736 10819 else 10737 10820 Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed)); … … 12433 12516 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12434 12517 { 12518 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 12435 12519 uint32_t const rcReason = pVmxTransient->uExitReason; 12436 12520 switch (rcReason) 12437 12521 { 12438 12522 case VMX_EXIT_EPT_MISCONFIG: 12523 rcStrict = hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient); 12524 break; 12525 12439 12526 case VMX_EXIT_EPT_VIOLATION: 12527 rcStrict = hmR0VmxExitEptViolation(pVCpu, pVmxTransient); 12528 break; 12529 12440 12530 case VMX_EXIT_IO_INSTR: 12531 { 12532 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 12533 AssertRCReturn(rc, rc); 12534 12535 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual); 12536 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual); 12537 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1); 12538 12539 /* Size of the I/O accesses in bytes. */ 12540 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; 12541 uint8_t const cbAccess = s_aIOSizes[uIOSize]; 12542 12543 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess)) 12544 { 12545 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12546 AssertRCReturn(rc, rc); 12547 12548 VMXVEXITINFO ExitInfo; 12549 RT_ZERO(ExitInfo); 12550 ExitInfo.uReason = pVmxTransient->uExitReason; 12551 ExitInfo.cbInstr = pVmxTransient->cbInstr; 12552 ExitInfo.u64Qual = pVmxTransient->uExitQual; 12553 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 12554 } 12555 else 12556 rcStrict = hmR0VmxExitIoInstr(pVCpu, pVmxTransient); 12557 break; 12558 } 12559 12441 12560 case VMX_EXIT_CPUID: 12442 12561 case VMX_EXIT_RDTSC: … … 12503 12622 return hmR0VmxExitErrUndefined(pVCpu, pVmxTransient); 12504 12623 } 12505 #undef VMEXIT_CALL_RET 12624 12625 return rcStrict; 12506 12626 } 12507 12627 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ … … 14083 14203 14084 14204 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */ 14085 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);14086 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQual);14087 bool fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);14088 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);14089 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);14090 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;14091 AssertReturn(uIO Width <= 3 && uIOWidth!= 2, VERR_VMX_IPE_1);14205 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual); 14206 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual); 14207 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT); 14208 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual); 14209 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 14210 bool const fDbgStepping = pVCpu->hm.s.fSingleInstruction; 14211 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1); 14092 14212 14093 14213 /* … … 14112 14232 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */ 14113 14233 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */ 14114 uint32_t const cbValue = s_aIOSizes[uIO Width];14234 uint32_t const cbValue = s_aIOSizes[uIOSize]; 14115 14235 uint32_t const cbInstr = pVmxTransient->cbInstr; 14116 14236 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */ … … 14161 14281 */ 14162 14282 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 14163 uint32_t const uAndVal = s_aIOOpAnd[uIO Width];14283 uint32_t const uAndVal = s_aIOOpAnd[uIOSize]; 14164 14284 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual)); 14165 14285 if (fIOWrite) … … 14288 14408 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 14289 14409 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "", 14290 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIO Width));14410 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize)); 14291 14411 14292 14412 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
Note:
See TracChangeset
for help on using the changeset viewer.