Changeset 81206 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Oct 10, 2019 10:58:18 AM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r81153 r81206 4849 4849 * Exports the guest APIC TPR state into the VMCS. 4850 4850 * 4851 * @returns VBox status code.4852 4851 * @param pVCpu The cross context virtual CPU structure. 4853 4852 * @param pVmxTransient The VMX-transient structure. … … 4855 4854 * @remarks No-long-jump zone!!! 4856 4855 */ 4857 static inthmR0VmxExportGuestApicTpr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)4856 static void hmR0VmxExportGuestApicTpr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient) 4858 4857 { 4859 4858 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR) … … 4876 4875 uint8_t u8PendingIntr = 0; 4877 4876 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr); 4878 AssertRC Return(rc,rc);4877 AssertRC(rc); 4879 4878 4880 4879 /* … … 4903 4902 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR); 4904 4903 } 4905 return VINF_SUCCESS;4906 4904 } 4907 4905 … … 4967 4965 * Exports the exception intercepts required for guest execution in the VMCS. 4968 4966 * 4969 * @returns VBox status code.4970 4967 * @param pVCpu The cross context virtual CPU structure. 4971 4968 * @param pVmxTransient The VMX-transient structure. … … 4973 4970 * @remarks No-long-jump zone!!! 4974 4971 */ 4975 static inthmR0VmxExportGuestXcptIntercepts(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)4972 static void hmR0VmxExportGuestXcptIntercepts(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient) 4976 4973 { 4977 4974 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS) … … 4987 4984 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS); 4988 4985 } 4989 return VINF_SUCCESS;4990 4986 } 4991 4987 … … 4994 4990 * Exports the guest's RIP into the guest-state area in the VMCS. 4995 4991 * 4996 * @returns VBox status code.4997 4992 * @param pVCpu The cross context virtual CPU structure. 4998 4993 * 4999 4994 * @remarks No-long-jump zone!!! 5000 4995 */ 5001 static inthmR0VmxExportGuestRip(PVMCPUCC pVCpu)4996 static void hmR0VmxExportGuestRip(PVMCPUCC pVCpu) 5002 4997 { 5003 4998 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP) … … 5011 5006 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip)); 5012 5007 } 5013 return VINF_SUCCESS;5014 5008 } 5015 5009 … … 5018 5012 * Exports the guest's RSP into the guest-state area in the VMCS. 5019 5013 * 5020 * @returns VBox status code.5021 5014 * @param pVCpu The cross context virtual CPU structure. 5022 5015 * 5023 5016 * @remarks No-long-jump zone!!! 5024 5017 */ 5025 static inthmR0VmxExportGuestRsp(PVMCPUCC pVCpu)5018 static void hmR0VmxExportGuestRsp(PVMCPUCC pVCpu) 5026 5019 { 5027 5020 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP) … … 5035 5028 Log4Func(("rsp=%#RX64\n", pVCpu->cpum.GstCtx.rsp)); 5036 5029 } 5037 return VINF_SUCCESS;5038 5030 } 5039 5031 … … 5042 5034 * Exports the guest's RFLAGS into the guest-state area in the VMCS. 5043 5035 * 5044 * @returns VBox status code.5045 5036 * @param pVCpu The cross context virtual CPU structure. 5046 5037 * @param pVmxTransient The VMX-transient structure. … … 5048 5039 * @remarks No-long-jump zone!!! 5049 5040 */ 5050 static inthmR0VmxExportGuestRflags(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)5041 static void hmR0VmxExportGuestRflags(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient) 5051 5042 { 5052 5043 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS) … … 5100 5091 Log4Func(("eflags=%#RX32\n", fEFlags.u32)); 5101 5092 } 5102 return VINF_SUCCESS;5103 5093 } 5104 5094 … … 9087 9077 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 9088 9078 9089 rc = hmR0VmxExportGuestApicTpr(pVCpu, pVmxTransient); 9090 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 9091 9092 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu, pVmxTransient); 9093 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 9094 9095 rc = hmR0VmxExportGuestRip(pVCpu); 9096 rc |= hmR0VmxExportGuestRsp(pVCpu); 9097 rc |= hmR0VmxExportGuestRflags(pVCpu, pVmxTransient); 9098 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 9079 hmR0VmxExportGuestApicTpr(pVCpu, pVmxTransient); 9080 hmR0VmxExportGuestXcptIntercepts(pVCpu, pVmxTransient); 9081 hmR0VmxExportGuestRip(pVCpu); 9082 hmR0VmxExportGuestRsp(pVCpu); 9083 hmR0VmxExportGuestRflags(pVCpu, pVmxTransient); 9099 9084 9100 9085 rc = hmR0VmxExportGuestHwvirtState(pVCpu, pVmxTransient); … … 9141 9126 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */ 9142 9127 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS) 9143 { 9144 rc = hmR0VmxExportGuestRflags(pVCpu, pVmxTransient); 9145 AssertRC(rc); 9146 } 9128 hmR0VmxExportGuestRflags(pVCpu, pVmxTransient); 9147 9129 } 9148 9130 … … 9182 9164 9183 9165 /* 9184 * For many exits it's only RIP that changes and hence try to export it first9166 * For many exits it's only RIP/RSP/RFLAGS that changes and hence try to export it first 9185 9167 * without going through a lot of change flag checks. 9186 9168 */ 9187 VBOXSTRICTRC rcStrict; 9188 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 9189 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); 9190 if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP) 9191 { 9192 rcStrict = hmR0VmxExportGuestRip(pVCpu); 9193 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 9194 { /* likely */} 9195 else 9196 AssertMsgFailedReturn(("Failed to export guest RIP! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict); 9169 VBOXSTRICTRC rcStrict; 9170 uint64_t const fCtxMask = HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE; 9171 uint64_t const fMinimalMask = HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT; 9172 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 9173 9174 /* If only RIP/RSP/RFLAGS/HWVIRT changed, export only those (quicker, happens more often).*/ 9175 if ( (fCtxChanged & fMinimalMask) 9176 && !(fCtxChanged & (fCtxMask & ~fMinimalMask))) 9177 { 9178 hmR0VmxExportGuestRip(pVCpu); 9179 hmR0VmxExportGuestRsp(pVCpu); 9180 hmR0VmxExportGuestRflags(pVCpu, pVmxTransient); 9181 rcStrict = hmR0VmxExportGuestHwvirtState(pVCpu, pVmxTransient); 9197 9182 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal); 9198 9183 } 9199 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 9184 /* If anything else also changed, go through the full export routine and export as required. */ 9185 else if (fCtxChanged & fCtxMask) 9200 9186 { 9201 9187 rcStrict = hmR0VmxExportGuestState(pVCpu, pVmxTransient); … … 9211 9197 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull); 9212 9198 } 9213 else 9214 rcStrict = VINF_SUCCESS; 9199 /* else: Nothing changed, nothing to load here. */ 9215 9200 9216 9201 #ifdef VBOX_STRICT 9217 9202 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */ 9218 9203 fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 9219 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();9220 9204 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)), 9221 9205 ("fCtxChanged=%#RX64\n", fCtxChanged)); … … 14842 14826 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr); 14843 14827 if (rcStrict == VINF_SUCCESS) 14844 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS 14845 | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX); 14828 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 14846 14829 else if (rcStrict == VINF_IEM_RAISED_XCPT) 14847 14830 {
Note:
See TracChangeset
for help on using the changeset viewer.