Changeset 78525 in vbox
- Timestamp:
- May 15, 2019 4:57:07 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 130565
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/iem.h
r78481 r78525 348 348 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo); 349 349 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr); 350 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo); 350 351 #endif 351 352 /** @} */ -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r78481 r78525 16193 16193 16194 16194 /** 16195 * Interface for HM and EM to emulate the INVVPID instruction. 16196 * 16197 * @returns Strict VBox status code. 16198 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 16199 * @param pExitInfo Pointer to the VM-exit information struct. 16200 * @thread EMT(pVCpu) 16201 */ 16202 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo) 16203 { 16204 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4); 16205 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK); 16206 Assert(pExitInfo); 16207 16208 iemInitExec(pVCpu, false /*fBypassHandlers*/); 16209 16210 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg; 16211 uint8_t const cbInstr = pExitInfo->cbInstr; 16212 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr; 16213 uint64_t const uInvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT 16214 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2) 16215 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2); 16216 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, uInvvpidType, pExitInfo); 16217 Assert(!pVCpu->iem.s.cActiveMappings); 16218 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 16219 } 16220 16221 16222 /** 16195 16223 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses} 16196 16224 * -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r78481 r78525 178 178 * VMX_EXIT_EPT_MISCONFIG 179 179 * VMX_EXIT_INVEPT 180 * VMX_EXIT_INVVPID181 180 * VMX_EXIT_RDRAND 182 181 * VMX_EXIT_VMFUNC … … 2990 2989 case VMX_EXIT_INVEPT: 2991 2990 case VMX_EXIT_INVPCID: 2991 case VMX_EXIT_INVVPID: 2992 2992 case VMX_EXIT_LDTR_TR_ACCESS: 2993 2993 case VMX_EXIT_GDTR_IDTR_ACCESS: … … 8512 8512 8513 8513 /** 8514 * INVVPID instruction execution worker. 8515 * 8516 * @returns Strict VBox status code. 8517 * @param pVCpu The cross context virtual CPU structure. 8518 * @param cbInstr The instruction length in bytes. 8519 * @param iEffSeg The segment of the invvpid descriptor. 8520 * @param GCPtrInvvpidDesc The address of invvpid descriptor. 8521 * @param uInvvpidType The invalidation type. 8522 * @param pExitInfo Pointer to the VM-exit information struct. Optional, 8523 * can be NULL. 8524 * 8525 * @remarks Common VMX instruction checks are already expected to by the caller, 8526 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks. 8527 */ 8528 IEM_STATIC VBOXSTRICTRC iemVmxInvvpid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc, 8529 uint64_t uInvvpidType, PCVMXVEXITINFO pExitInfo) 8530 { 8531 /* Check if INVVPID instruction is supported, otherwise raise #UD. */ 8532 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVpid) 8533 return iemRaiseUndefinedOpcode(pVCpu); 8534 8535 /* Nested-guest intercept. */ 8536 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu)) 8537 { 8538 if (pExitInfo) 8539 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo); 8540 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_INVVPID, VMXINSTRID_NONE, cbInstr); 8541 } 8542 8543 /* CPL. */ 8544 if (pVCpu->iem.s.uCpl != 0) 8545 { 8546 Log(("invvpid: CPL != 0 -> #GP(0)\n")); 8547 return iemRaiseGeneralProtectionFault0(pVCpu); 8548 } 8549 8550 /* 8551 * Validate INVVPID invalidation type. 8552 * 8553 * Each of the types have a supported bit in IA32_VMX_EPT_VPID_CAP MSR. 8554 * In theory, it's possible for a CPU to not support flushing individual addresses 8555 * but all the other types or any other combination. 8556 */ 8557 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps; 8558 uint8_t const fTypeIndivAddr = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR); 8559 uint8_t const fTypeSingleCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX); 8560 uint8_t const fTypeAllCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX); 8561 uint8_t const fTypeSingleCtxRetainGlobals = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS); 8562 if ( (fTypeIndivAddr && uInvvpidType == VMXTLBFLUSHVPID_INDIV_ADDR) 8563 || (fTypeSingleCtx && uInvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT) 8564 || (fTypeAllCtx && uInvvpidType == VMXTLBFLUSHVPID_ALL_CONTEXTS) 8565 || (fTypeSingleCtxRetainGlobals && uInvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)) 8566 { /* likely */ } 8567 else 8568 { 8569 Log(("invvpid: invalid/unrecognized invvpid type %#x -> VMFail\n", uInvvpidType)); 8570 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_TypeInvalid; 8571 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND); 8572 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 8573 return VINF_SUCCESS; 8574 } 8575 8576 /* 8577 * Fetch the invvpid descriptor from guest memory. 8578 */ 8579 RTUINT128U uDesc; 8580 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvvpidDesc); 8581 if (rcStrict == VINF_SUCCESS) 8582 { 8583 /* 8584 * Validate the descriptor. 8585 */ 8586 if (uDesc.s.Lo > 0xfff) 8587 { 8588 Log(("invvpid: reserved bits set in invvpid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo)); 8589 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_DescRsvd; 8590 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND); 8591 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 8592 return VINF_SUCCESS; 8593 } 8594 8595 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3); 8596 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi; 8597 uint8_t const uVpid = uDesc.s.Lo & UINT64_C(0xfff); 8598 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3; 8599 switch (uInvvpidType) 8600 { 8601 case VMXTLBFLUSHVPID_INDIV_ADDR: 8602 { 8603 if (uVpid != 0) 8604 { 8605 if (IEM_IS_CANONICAL(GCPtrInvAddr)) 8606 { 8607 /* Invalidate mappings for the linear address tagged with VPID. */ 8608 /** @todo PGM support for VPID? Currently just flush everything. */ 8609 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */); 8610 iemVmxVmSucceed(pVCpu); 8611 } 8612 else 8613 { 8614 Log(("invvpid: invalidation address %#RGP is not canonical -> VMFail\n", GCPtrInvAddr)); 8615 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidAddr; 8616 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND); 8617 } 8618 } 8619 else 8620 { 8621 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, uInvvpidType)); 8622 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidVpid; 8623 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND); 8624 } 8625 break; 8626 } 8627 8628 case VMXTLBFLUSHVPID_SINGLE_CONTEXT: 8629 { 8630 if (uVpid != 0) 8631 { 8632 /* Invalidate all mappings with VPID. */ 8633 /** @todo PGM support for VPID? Currently just flush everything. */ 8634 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */); 8635 iemVmxVmSucceed(pVCpu); 8636 } 8637 else 8638 { 8639 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, uInvvpidType)); 8640 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type1InvalidVpid; 8641 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND); 8642 } 8643 break; 8644 } 8645 8646 case VMXTLBFLUSHVPID_ALL_CONTEXTS: 8647 { 8648 /* Invalidate all mappings with non-zero VPIDs. */ 8649 /** @todo PGM support for VPID? Currently just flush everything. */ 8650 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */); 8651 iemVmxVmSucceed(pVCpu); 8652 break; 8653 } 8654 8655 case VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS: 8656 { 8657 if (uVpid != 0) 8658 { 8659 /* Invalidate all mappings with VPID except global translations. */ 8660 /** @todo PGM support for VPID? Currently just flush everything. */ 8661 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */); 8662 iemVmxVmSucceed(pVCpu); 8663 } 8664 else 8665 { 8666 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, uInvvpidType)); 8667 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type3InvalidVpid; 8668 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND); 8669 } 8670 break; 8671 } 8672 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 8673 } 8674 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 8675 } 8676 return rcStrict; 8677 } 8678 8679 8680 /** 8514 8681 * VMXON instruction execution worker. 8515 8682 * … … 8898 9065 8899 9066 /** 9067 * Implements 'INVVPID'. 9068 */ 9069 IEM_CIMPL_DEF_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType) 9070 { 9071 return iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, uInvvpidType, NULL /* pExitInfo */); 9072 } 9073 9074 9075 /** 8900 9076 * Implements VMX's implementation of PAUSE. 8901 9077 */ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h
r76553 r78525 304 304 /** Opcode 0x66 0x0f 0x38 0x80. */ 305 305 FNIEMOP_STUB(iemOp_invept_Gy_Mdq); 306 306 307 /** Opcode 0x66 0x0f 0x38 0x81. */ 308 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 309 FNIEMOP_DEF(iemOp_invvpid_Gy_Mdq) 310 { 311 IEMOP_MNEMONIC(invvpid, "invvpid Gy,Mdq"); 312 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 313 IEMOP_HLP_IN_VMX_OPERATION("invvpid", kVmxVDiag_Invvpid); 314 IEMOP_HLP_VMX_INSTR("invvpid", kVmxVDiag_Invvpid); 315 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 316 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) 317 { 318 /* Register, memory. */ 319 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT) 320 { 321 IEM_MC_BEGIN(3, 0); 322 IEM_MC_ARG(uint8_t, iEffSeg, 0); 323 IEM_MC_ARG(RTGCPTR, GCPtrInvvpidDesc, 1); 324 IEM_MC_ARG(uint64_t, uInvvpidType, 2); 325 IEM_MC_FETCH_GREG_U64(uInvvpidType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 326 IEM_MC_CALC_RM_EFF_ADDR(GCPtrInvvpidDesc, bRm, 0); 327 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 328 IEM_MC_CALL_CIMPL_3(iemCImpl_invvpid, iEffSeg, GCPtrInvvpidDesc, uInvvpidType); 329 IEM_MC_END(); 330 } 331 else 332 { 333 IEM_MC_BEGIN(3, 0); 334 IEM_MC_ARG(uint8_t, iEffSeg, 0); 335 IEM_MC_ARG(RTGCPTR, GCPtrInvvpidDesc, 1); 336 IEM_MC_ARG(uint32_t, uInvvpidType, 2); 337 IEM_MC_FETCH_GREG_U32(uInvvpidType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 338 IEM_MC_CALC_RM_EFF_ADDR(GCPtrInvvpidDesc, bRm, 0); 339 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 340 IEM_MC_CALL_CIMPL_3(iemCImpl_invvpid, iEffSeg, GCPtrInvvpidDesc, uInvvpidType); 341 IEM_MC_END(); 342 } 343 } 344 Log(("iemOp_invvpid_Gy_Mdq: invalid encoding -> #UD\n")); 345 return IEMOP_RAISE_INVALID_OPCODE(); 346 } 347 #else 307 348 FNIEMOP_STUB(iemOp_invvpid_Gy_Mdq); 349 #endif 350 308 351 /** Opcode 0x66 0x0f 0x38 0x82. */ 309 352 FNIEMOP_DEF(iemOp_invpcid_Gy_Mdq)
Note:
See TracChangeset
for help on using the changeset viewer.