- Timestamp:
- Oct 4, 2018 6:07:20 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r74565 r74603 5492 5492 */ 5493 5493 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2); 5494 if (rcStrict0 != VINF_ HM_INTERCEPT_NOT_ACTIVE)5494 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE) 5495 5495 return rcStrict0; 5496 5496 } … … 15079 15079 * 15080 15080 * @returns Strict VBox status code. 15081 * @param pVCpu The cross context virtual CPU structure. 15082 * @param cbInstr The instruction length in bytes. 15083 * @param uValue The value to load into CR0. 15081 * @param pVCpu The cross context virtual CPU structure. 15082 * @param cbInstr The instruction length in bytes. 15083 * @param uValue The value to load into CR0. 15084 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a 15085 * memory operand. Otherwise pass NIL_RTGCPTR. 15084 15086 * 15085 15087 * @remarks In ring-0 not all of the state needs to be synced in. 15086 15088 */ 15087 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue )15089 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst) 15088 15090 { 15089 15091 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 15090 15092 15091 15093 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15092 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_ 1(iemCImpl_lmsw, uValue);15094 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst); 15093 15095 Assert(!pVCpu->iem.s.cActiveMappings); 15094 15096 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r74592 r74603 5743 5743 * 5744 5744 * @param u16NewMsw The new value. 5745 */ 5746 IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw) 5745 * @param GCPtrEffDst The guest-linear address of the source operand in case 5746 * of a memory operand. For register operand, pass 5747 * NIL_RTGCPTR. 5748 */ 5749 IEM_CIMPL_DEF_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst) 5747 5750 { 5748 5751 if (pVCpu->iem.s.uCpl != 0) 5749 5752 return iemRaiseGeneralProtectionFault0(pVCpu); 5750 5753 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM); 5754 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 5755 5756 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 5757 /* Check nested-guest VMX intercept and get updated MSW if there's no VM-exit. */ 5758 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu)) 5759 { 5760 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrLmsw(pVCpu, pVCpu->cpum.GstCtx.cr0, &u16NewMsw, GCPtrEffDst, cbInstr); 5761 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE) 5762 return rcStrict; 5763 } 5764 #else 5765 RT_NOREF_PV(GCPtrEffDst); 5766 #endif 5751 5767 5752 5768 /* 5753 5769 * Compose the new CR0 value and call common worker. 5754 5770 */ 5755 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 5756 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 5771 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 5757 5772 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 5758 5773 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */); … … 6370 6385 if (rcStrict == VINF_SVM_VMEXIT) 6371 6386 return VINF_SUCCESS; 6372 if (rcStrict != VINF_ HM_INTERCEPT_NOT_ACTIVE)6387 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE) 6373 6388 { 6374 6389 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict))); … … 6446 6461 if (rcStrict == VINF_SVM_VMEXIT) 6447 6462 return VINF_SUCCESS; 6448 if (rcStrict != VINF_ HM_INTERCEPT_NOT_ACTIVE)6463 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE) 6449 6464 { 6450 6465 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict))); … … 6526 6541 if (rcStrict == VINF_SVM_VMEXIT) 6527 6542 return VINF_SUCCESS; 6528 if (rcStrict != VINF_ HM_INTERCEPT_NOT_ACTIVE)6543 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE) 6529 6544 { 6530 6545 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg, … … 6619 6634 if (rcStrict == VINF_SVM_VMEXIT) 6620 6635 return VINF_SUCCESS; 6621 if (rcStrict != VINF_ HM_INTERCEPT_NOT_ACTIVE)6636 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE) 6622 6637 { 6623 6638 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg, -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r74336 r74603 1214 1214 if (rcStrict == VINF_SVM_VMEXIT) 1215 1215 return VINF_SUCCESS; 1216 if (rcStrict != VINF_ HM_INTERCEPT_NOT_ACTIVE)1216 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE) 1217 1217 { 1218 1218 Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, … … 1285 1285 if (rcStrict == VINF_SVM_VMEXIT) 1286 1286 return VINF_SUCCESS; 1287 if (rcStrict != VINF_ HM_INTERCEPT_NOT_ACTIVE)1287 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE) 1288 1288 { 1289 1289 Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8, … … 1486 1486 if (rcStrict == VINF_SVM_VMEXIT) 1487 1487 return VINF_SUCCESS; 1488 if (rcStrict != VINF_ HM_INTERCEPT_NOT_ACTIVE)1488 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE) 1489 1489 { 1490 1490 Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, … … 1545 1545 if (rcStrict == VINF_SVM_VMEXIT) 1546 1546 return VINF_SUCCESS; 1547 if (rcStrict != VINF_ HM_INTERCEPT_NOT_ACTIVE)1547 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE) 1548 1548 { 1549 1549 Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8, -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r74357 r74603 947 947 } 948 948 949 return VINF_ HM_INTERCEPT_NOT_ACTIVE;949 return VINF_SVM_INTERCEPT_NOT_ACTIVE; 950 950 } 951 951 … … 997 997 * intercepts). */ 998 998 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n")); 999 return VINF_ HM_INTERCEPT_NOT_ACTIVE;999 return VINF_SVM_INTERCEPT_NOT_ACTIVE; 1000 1000 } 1001 1001 … … 1061 1061 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */); 1062 1062 } 1063 return VINF_ HM_INTERCEPT_NOT_ACTIVE;1063 return VINF_SVM_INTERCEPT_NOT_ACTIVE; 1064 1064 } 1065 1065 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r74592 r74603 681 681 case VMX_VMCS_RO_IO_RDI: 682 682 case VMX_VMCS_RO_IO_RIP: 683 case VMX_VMCS_RO_ EXIT_GUEST_LINEAR_ADDR:return true;683 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true; 684 684 685 685 /* Guest-state fields. */ … … 2608 2608 * instruction execution. 2609 2609 * 2610 * In our implementation, all undefined fields are generally cleared (caller's 2611 * responsibility). 2610 * In our implementation in IEM, all undefined fields are generally cleared. However, 2611 * if the caller supplies information (from say the physical CPU directly) it is 2612 * then possible that the undefined fields not cleared. 2612 2613 * 2613 2614 * See Intel spec. 27.2.1 "Basic VM-Exit Information". … … 2755 2756 2756 2757 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 2758 } 2759 2760 2761 /** 2762 * VMX VM-exit handler for VM-exits due to LMSW. 2763 * 2764 * @returns Strict VBox status code. 2765 * @param pVCpu The cross context virtual CPU structure. 2766 * @param uGuestCr0 The current guest CR0. 2767 * @param pu16NewMsw The machine-status word specified in LMSW's source 2768 * operand. This will be updated depending on the VMX 2769 * guest/host CR0 mask if LMSW is not intercepted. 2770 * @param GCPtrEffDst The guest-linear address of the source operand in case 2771 * of a memory operand. For register operand, pass 2772 * NIL_RTGCPTR. 2773 * @param cbInstr The instruction length (in bytes). 2774 */ 2775 IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPU pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw, RTGCPTR GCPtrEffDst, 2776 uint8_t cbInstr) 2777 { 2778 /* 2779 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow. 2780 * 2781 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4". 2782 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally". 2783 */ 2784 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 2785 Assert(pVmcs); 2786 Assert(pu16NewMsw); 2787 2788 bool fIntercept = false; 2789 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u; 2790 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u; 2791 2792 /* 2793 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the 2794 * CR0.PE case first, before the rest of the bits in the MSW. 2795 * 2796 * If CR0.PE is owned by the host and CR0.PE differs between the 2797 * MSW (source operand) and the read-shadow, we must cause a VM-exit. 2798 */ 2799 if ( (fGstHostMask & X86_CR0_PE) 2800 && (*pu16NewMsw & X86_CR0_PE) 2801 && !(fReadShadow & X86_CR0_PE)) 2802 fIntercept = true; 2803 2804 /* 2805 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding 2806 * bits differ between the MSW (source operand) and the read-shadow, we must 2807 * cause a VM-exit. 2808 */ 2809 uint32_t fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 2810 if ((fReadShadow & fGstHostLmswMask) != (*pu16NewMsw & fGstHostLmswMask)) 2811 fIntercept = true; 2812 2813 if (fIntercept) 2814 { 2815 Log2(("lmsw: Guest intercept -> VM-exit\n")); 2816 2817 VMXVEXITINFO ExitInfo; 2818 RT_ZERO(ExitInfo); 2819 ExitInfo.uReason = VMX_EXIT_MOV_CRX; 2820 ExitInfo.cbInstr = cbInstr; 2821 2822 bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR); 2823 if (fMemOperand) 2824 { 2825 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst)); 2826 ExitInfo.u64GuestLinearAddr = GCPtrEffDst; 2827 } 2828 2829 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */ 2830 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_LMSW) 2831 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP, fMemOperand) 2832 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, *pu16NewMsw); 2833 2834 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo); 2835 } 2836 2837 /* 2838 * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the 2839 * CR0 guest/host mask must be left unmodified. 2840 * 2841 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation". 2842 */ 2843 fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 2844 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (*pu16NewMsw & ~fGstHostLmswMask); 2845 2846 return VINF_VMX_INTERCEPT_NOT_ACTIVE; 2757 2847 } 2758 2848 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r74155 r74603 547 547 { 548 548 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 549 IEM_MC_BEGIN(1, 0); 550 IEM_MC_ARG(uint16_t, u16Tmp, 0); 549 IEM_MC_BEGIN(2, 0); 550 IEM_MC_ARG(uint16_t, u16Tmp, 0); 551 IEM_MC_ARG_CONST(RTGCPTR, GCPtrEffDst, NIL_RTGCPTR, 1); 551 552 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 552 IEM_MC_CALL_CIMPL_ 1(iemCImpl_lmsw, u16Tmp);553 IEM_MC_END(); 554 } 555 else 556 { 557 IEM_MC_BEGIN( 1, 1);558 IEM_MC_ARG(uint16_t, u16Tmp, 0);559 IEM_MC_ LOCAL(RTGCPTR, GCPtrEffDst);553 IEM_MC_CALL_CIMPL_2(iemCImpl_lmsw, u16Tmp, GCPtrEffDst); 554 IEM_MC_END(); 555 } 556 else 557 { 558 IEM_MC_BEGIN(2, 0); 559 IEM_MC_ARG(uint16_t, u16Tmp, 0); 560 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 1); 560 561 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 561 562 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 562 563 IEM_MC_FETCH_MEM_U16(u16Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 563 IEM_MC_CALL_CIMPL_ 1(iemCImpl_lmsw, u16Tmp);564 IEM_MC_CALL_CIMPL_2(iemCImpl_lmsw, u16Tmp, GCPtrEffDst); 564 565 IEM_MC_END(); 565 566 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r74545 r74603 78 78 #define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5) 79 79 #define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6) 80 #define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7) 80 81 /** @} */ 81 82 … … 263 264 /** The VM-exit exit code qualification. */ 264 265 uint64_t uExitQual; 266 /** The Guest-linear address. */ 267 uint64_t uGuestLinearAddr; 265 268 266 269 /** The VM-exit interruption-information field. */ … … 724 727 725 728 /** 726 * Reads the exit code qualification from the VMCS into the VMX transient 727 * structure. 729 * Reads the VM-exit Qualification from the VMCS into the VMX transient structure. 728 730 * 729 731 * @returns VBox status code. … … 739 741 AssertRCReturn(rc, rc); 740 742 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION; 743 } 744 return VINF_SUCCESS; 745 } 746 747 748 /** 749 * Reads the Guest-linear address from the VMCS into the VMX transient structure. 750 * 751 * @returns VBox status code. 752 * @param pVCpu The cross context virtual CPU structure of the 753 * calling EMT. (Required for the VMCS cache case.) 754 * @param pVmxTransient Pointer to the VMX transient structure. 755 */ 756 DECLINLINE(int) hmR0VmxReadGuestLinearAddrVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 757 { 758 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR)) 759 { 760 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr); NOREF(pVCpu); 761 AssertRCReturn(rc, rc); 762 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR; 741 763 } 742 764 return VINF_SUCCESS; … … 12282 12304 { 12283 12305 /* Note! LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here. */ 12284 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual)); 12306 rc = hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient); 12307 AssertRCReturn(rc, rc); 12308 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual), 12309 pVmxTransient->uGuestLinearAddr); 12285 12310 AssertMsg( rcStrict == VINF_SUCCESS 12286 12311 || rcStrict == VINF_IEM_RAISED_XCPT
Note:
See TracChangeset
for help on using the changeset viewer.