- Timestamp:
- Apr 11, 2013 8:08:37 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45487 r45496 4751 4751 * 4752 4752 * @returns VBox status code. 4753 * @param pVM Pointer to the VM.4754 4753 * @param pVCpu Pointer to the VMCPU. 4755 4754 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 4759 4758 * @remarks No-long-jump zone!!! 4760 4759 */ 4761 DECLINLINE(int) hmR0VmxSaveGuestCR0(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)4760 DECLINLINE(int) hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4762 4761 { 4763 4762 int rc = VINF_SUCCESS; … … 4782 4781 * 4783 4782 * @returns VBox status code. 4784 * @param pVM Pointer to the VM.4785 4783 * @param pVCpu Pointer to the VMCPU. 4786 4784 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 4790 4788 * @remarks No-long-jump zone!!! 4791 4789 */ 4792 DECLINLINE(int) hmR0VmxSaveGuestCR4(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)4790 DECLINLINE(int) hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4793 4791 { 4794 4792 int rc = VINF_SUCCESS; … … 4812 4810 * 4813 4811 * @returns VBox status code. 4814 * @param pVM Pointer to the VM.4815 4812 * @param pVCpu Pointer to the VMCPU. 4816 4813 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 4820 4817 * @remarks No-long-jump zone!!! 4821 4818 */ 4822 DECLINLINE(int) hmR0VmxSaveGuestRip(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)4819 DECLINLINE(int) hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4823 4820 { 4824 4821 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RIP) … … 4838 4835 * 4839 4836 * @returns VBox status code. 4840 * @param pVM Pointer to the VM.4841 4837 * @param pVCpu Pointer to the VMCPU. 4842 4838 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 4846 4842 * @remarks No-long-jump zone!!! 4847 4843 */ 4848 DECLINLINE(int) hmR0VmxSaveGuestRsp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)4844 DECLINLINE(int) hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4849 4845 { 4850 4846 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RSP) … … 4864 4860 * 4865 4861 * @returns VBox status code. 4866 * @param pVM Pointer to the VM.4867 4862 * @param pVCpu Pointer to the VMCPU. 4868 4863 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 4872 4867 * @remarks No-long-jump zone!!! 4873 4868 */ 4874 DECLINLINE(int) hmR0VmxSaveGuestRflags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)4869 DECLINLINE(int) hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4875 4870 { 4876 4871 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RFLAGS) … … 4885 4880 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4886 4881 { 4882 PVM pVM = pVCpu->CTX_SUFF(pVM); 4887 4883 Assert(pVM->hm.s.vmx.pRealModeTSS); 4888 4884 Log(("Saving real-mode RFLAGS VT-x view=%#RX64\n", pMixedCtx->rflags.u64)); … … 4900 4896 * guest-CPU context. 4901 4897 */ 4902 static int hmR0VmxSaveGuestGprs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)4903 { 4904 int rc = hmR0VmxSaveGuestRip(pV M, pVCpu, pMixedCtx);4905 rc |= hmR0VmxSaveGuestRsp(pV M, pVCpu, pMixedCtx);4906 rc |= hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx);4898 static int hmR0VmxSaveGuestGprs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4899 { 4900 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 4901 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx); 4902 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 4907 4903 return rc; 4908 4904 } … … 4913 4909 * from the guest-state area in the VMCS. 4914 4910 * 4915 * @param pVM Pointer to the VM.4916 4911 * @param pVCpu Pointer to the VMCPU. 4917 4912 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 4921 4916 * @remarks No-long-jump zone!!! 4922 4917 */ 4923 DECLINLINE(void) hmR0VmxSaveGuestIntrState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)4918 DECLINLINE(void) hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4924 4919 { 4925 4920 uint32_t uIntrState = 0; … … 4933 4928 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI 4934 4929 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 4935 rc = hmR0VmxSaveGuestRip(pV M, pVCpu, pMixedCtx);4936 rc |= hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx); /* for hmR0VmxLoadGuestIntrState(). */4930 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 4931 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxLoadGuestIntrState(). */ 4937 4932 AssertRC(rc); 4938 4933 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); … … 4946 4941 * 4947 4942 * @returns VBox status code. 4948 * @param pVM Pointer to the VM.4949 4943 * @param pVCpu Pointer to the VMCPU. 4950 4944 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 4954 4948 * @remarks No-long-jump zone!!! 4955 4949 */ 4956 DECLINLINE(int) hmR0VmxSaveGuestActivityState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)4950 DECLINLINE(int) hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4957 4951 { 4958 4952 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */ … … 4967 4961 * 4968 4962 * @returns VBox status code. 4969 * @param pVM Pointer to the VM.4970 4963 * @param pVCpu Pointer to the VMCPU. 4971 4964 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 4975 4968 * @remarks No-long-jump zone!!! 4976 4969 */ 4977 DECLINLINE(int) hmR0VmxSaveGuestSysenterMsrs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)4970 DECLINLINE(int) hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4978 4971 { 4979 4972 int rc = VINF_SUCCESS; … … 5008 5001 * 5009 5002 * @returns VBox status code. 5010 * @param pVM Pointer to the VM.5011 5003 * @param pVCpu Pointer to the VMCPU. 5012 5004 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 5016 5008 * @remarks No-long-jump zone!!! 5017 5009 */ 5018 DECLINLINE(int) hmR0VmxSaveGuestFSBaseMsr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)5010 DECLINLINE(int) hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5019 5011 { 5020 5012 RTGCUINTREG uVal = 0; … … 5035 5027 * 5036 5028 * @returns VBox status code. 5037 * @param pVM Pointer to the VM.5038 5029 * @param pVCpu Pointer to the VMCPU. 5039 5030 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 5043 5034 * @remarks No-long-jump zone!!! 5044 5035 */ 5045 DECLINLINE(int) hmR0VmxSaveGuestGSBaseMsr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)5036 DECLINLINE(int) hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5046 5037 { 5047 5038 RTGCUINTREG uVal = 0; … … 5062 5053 * 5063 5054 * @returns VBox status code. 5064 * @param pVM Pointer to the VM.5065 5055 * @param pVCpu Pointer to the VMCPU. 5066 5056 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 5070 5060 * @remarks No-long-jump zone!!! 5071 5061 */ 5072 static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)5062 static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5073 5063 { 5074 5064 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS) … … 5107 5097 * 5108 5098 * @returns VBox status code. 5109 * @param pVM Pointer to the VM.5110 5099 * @param pVCpu Pointer to the VMCPU. 5111 5100 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 5115 5104 * @remarks No-long-jump zone!!! 5116 5105 */ 5117 DECLINLINE(int) hmR0VmxSaveGuestControlRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)5106 DECLINLINE(int) hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5118 5107 { 5119 5108 RTGCUINTREG uVal = 0; … … 5122 5111 5123 5112 /* Guest CR0. Guest FPU. */ 5124 rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);5113 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 5125 5114 5126 5115 /* Guest CR4. */ 5127 rc |= hmR0VmxSaveGuestCR4(pV M, pVCpu, pMixedCtx);5116 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); 5128 5117 AssertRCReturn(rc, rc); 5129 5118 … … 5131 5120 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR3)) 5132 5121 { 5122 PVM pVM = pVCpu->CTX_SUFF(pVM); 5133 5123 if ( pVM->hm.s.fNestedPaging 5134 5124 && CPUMIsGuestPagingEnabledEx(pMixedCtx)) … … 5143 5133 5144 5134 /* We require EFER to check PAE mode. */ 5145 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pV M, pVCpu, pMixedCtx);5135 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 5146 5136 5147 5137 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */ … … 5217 5207 * 5218 5208 * @returns VBox status code. 5219 * @param pVM Pointer to the VM.5220 5209 * @param pVCpu Pointer to the VMCPU. 5221 5210 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 5225 5214 * @remarks No-long-jump zone!!! 5226 5215 */ 5227 static int hmR0VmxSaveGuestSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)5216 static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5228 5217 { 5229 5218 #ifdef VMX_USE_CACHED_VMCS_ACCESSES … … 5242 5231 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_SEGMENT_REGS)) 5243 5232 { 5244 rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);5233 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 5245 5234 rc |= VMXLOCAL_READ_SEG(CS, cs); 5246 5235 rc |= VMXLOCAL_READ_SEG(SS, ss); … … 5297 5286 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_TR)) 5298 5287 { 5299 rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);5288 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 5300 5289 5301 5290 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */ … … 5314 5303 * 5315 5304 * @returns VBox status code. 5316 * @param pVM Pointer to the VM.5317 5305 * @param pVCpu Pointer to the VMCPU. 5318 5306 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 5322 5310 * @remarks No-long-jump zone!!! 5323 5311 */ 5324 DECLINLINE(int) hmR0VmxSaveGuestDebugRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)5312 DECLINLINE(int) hmR0VmxSaveGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5325 5313 { 5326 5314 int rc = VINF_SUCCESS; … … 5341 5329 * 5342 5330 * @returns VBox status code. 5343 * @param pVM Pointer to the VM.5344 5331 * @param pVCpu Pointer to the VMCPU. 5345 5332 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 5349 5336 * @remarks No-long-jump zone!!! 5350 5337 */ 5351 DECLINLINE(int) hmR0VmxSaveGuestApicState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)5338 DECLINLINE(int) hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5352 5339 { 5353 5340 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */ … … 5362 5349 * 5363 5350 * @returns VBox status code. 5364 * @param pVM Pointer to the VM.5365 5351 * @param pVCpu Pointer to the VMCPU. 5366 5352 * @param pMixedCtx Pointer to the guest-CPU context. The data may be … … 5368 5354 * before using them. 5369 5355 */ 5370 static int hmR0VmxSaveGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5371 { 5372 Assert(pVM); 5356 static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5357 { 5373 5358 Assert(pVCpu); 5374 5359 Assert(pMixedCtx); … … 5379 5364 VMMRZCallRing3Disable(pVCpu); 5380 5365 5381 int rc = hmR0VmxSaveGuestGprs(pV M, pVCpu, pMixedCtx);5382 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGprs failed! rc=%Rrc (pV M=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5383 5384 rc = hmR0VmxSaveGuestControlRegs(pV M, pVCpu, pMixedCtx);5385 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pV M=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5386 5387 rc = hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);5388 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pV M=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5389 5390 rc = hmR0VmxSaveGuestDebugRegs(pV M, pVCpu, pMixedCtx);5391 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pV M=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5392 5393 rc = hmR0VmxSaveGuestSysenterMsrs(pV M, pVCpu, pMixedCtx);5394 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pV M=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5395 5396 rc = hmR0VmxSaveGuestFSBaseMsr(pV M, pVCpu, pMixedCtx);5397 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pV M=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5398 5399 rc = hmR0VmxSaveGuestGSBaseMsr(pV M, pVCpu, pMixedCtx);5400 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pV M=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5401 5402 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pV M, pVCpu, pMixedCtx);5403 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pV M=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5404 5405 rc = hmR0VmxSaveGuestActivityState(pV M, pVCpu, pMixedCtx);5406 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pV M=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5407 5408 rc = hmR0VmxSaveGuestApicState(pV M, pVCpu, pMixedCtx);5409 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pV M=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5366 int rc = hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx); 5367 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGprs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5368 5369 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 5370 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5371 5372 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 5373 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5374 5375 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx); 5376 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5377 5378 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx); 5379 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5380 5381 rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx); 5382 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5383 5384 rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx); 5385 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5386 5387 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 5388 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5389 5390 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx); 5391 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5392 5393 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx); 5394 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5410 5395 5411 5396 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == VMX_UPDATED_GUEST_ALL, … … 5449 5434 { 5450 5435 /* We need the control registers now, make sure the guest-CPU context is updated. */ 5451 rc = hmR0VmxSaveGuestControlRegs(pV M, pVCpu, pMixedCtx);5436 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 5452 5437 AssertRCReturn(rc, rc); 5453 5438 … … 5590 5575 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 5591 5576 5592 int rc = hmR0VmxSaveGuestState(pV M, pVCpu, pMixedCtx);5577 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 5593 5578 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == VMX_UPDATED_GUEST_ALL); 5594 5579 AssertRC(rc); … … 5639 5624 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 5640 5625 5641 /* We want to see what the guest-state was before VM-entry, don't resync here, as we will never continue guest execution.*/ 5642 if (rcExit == VERR_VMX_INVALID_GUEST_STATE) 5626 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE)) 5627 { 5628 /* We want to see what the guest-state was before VM-entry, don't resync here, as we won't continue guest execution. */ 5643 5629 return; 5630 } 5631 else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR)) 5632 { 5633 VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys); 5634 pVCpu->hm.s.vmx.lasterror.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs; 5635 pVCpu->hm.s.vmx.lasterror.idEnteredCpu = pVCpu->hm.s.idEnteredCpu; 5636 pVCpu->hm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId(); 5637 return; 5638 } 5644 5639 5645 5640 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */ … … 5812 5807 5813 5808 /* We need the guests's RFLAGS for sure from this point on, make sure it is updated. */ 5814 int rc = hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx);5809 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 5815 5810 AssertRCReturn(rc, rc); 5816 5811 … … 6052 6047 6053 6048 /* We require CR0 to check if the guest is in real-mode. */ 6054 int rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);6049 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 6055 6050 AssertRCReturn(rc, rc); 6056 6051 … … 6072 6067 6073 6068 /* Save the required guest state bits from the VMCS. */ 6074 rc = hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);6075 rc |= hmR0VmxSaveGuestGprs(pV M, pVCpu, pMixedCtx);6069 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 6070 rc |= hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx); 6076 6071 AssertRCReturn(rc, rc); 6077 6072 … … 6491 6486 6492 6487 /* Need guest's LSTAR MSR (which is part of the auto load/store MSRs in the VMCS), ensure we have the updated one. */ 6493 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pV M, pVCpu, pMixedCtx);6488 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 6494 6489 AssertRC(rc); 6495 6490 … … 6547 6542 if (pVM->hm.s.fTPRPatchingActive) 6548 6543 { 6549 int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pV M, pVCpu, pMixedCtx);6544 int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 6550 6545 AssertRC(rc); 6551 6546 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); /* MSR_K8_LSTAR contains the guest TPR. */ … … 6577 6572 { 6578 6573 /* Update the guest interruptibility-state from the VMCS. */ 6579 hmR0VmxSaveGuestIntrState(pV M, pVCpu, pMixedCtx);6574 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx); 6580 6575 6581 6576 /* … … 6784 6779 6785 6780 6781 /** 6782 * Advances the guest RIP after reading it from the VMCS. 6783 * 6784 * @returns VBox status code. 6785 * @param pVCpu Pointer to the VMCPU. 6786 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6787 * out-of-sync. Make sure to update the required fields 6788 * before using them. 6789 * @param pVmxTransient Pointer to the VMX transient structure. 6790 * 6791 * @remarks No-long-jump zone!!! 6792 */ 6793 DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 6794 { 6795 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 6796 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 6797 AssertRCReturn(rc, rc); 6798 6799 pMixedCtx->rip += pVmxTransient->cbInstr; 6800 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP; 6801 return rc; 6802 } 6803 6804 6786 6805 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 6787 6806 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */ … … 6852 6871 default: 6853 6872 { 6854 rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);6873 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 6855 6874 AssertRCReturn(rc, rc); 6856 6875 … … 6927 6946 { 6928 6947 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 6929 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);6930 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);6931 AssertRCReturn(rc, rc);6932 6933 pMixedCtx->rip += pVmxTransient->cbInstr;6934 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;6935 6936 6948 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd); 6937 return VINF_SUCCESS;6949 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 6938 6950 } 6939 6951 … … 6945 6957 { 6946 6958 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 6947 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);6948 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);6949 AssertRCReturn(rc, rc);6950 6951 pMixedCtx->rip += pVmxTransient->cbInstr;6952 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;6953 6954 6959 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd); 6955 return VINF_SUCCESS;6960 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 6956 6961 } 6957 6962 … … 6966 6971 if (RT_LIKELY(rc == VINF_SUCCESS)) 6967 6972 { 6968 rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 6969 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 6970 AssertRCReturn(rc, rc); 6973 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 6971 6974 Assert(pVmxTransient->cbInstr == 2); 6972 6973 Log(("hmR0VmxExitCpuid: RIP=%#RX64\n", pMixedCtx->rip));6974 pMixedCtx->rip += pVmxTransient->cbInstr;6975 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;6976 6975 } 6977 6976 else … … 6991 6990 { 6992 6991 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 6993 int rc = hmR0VmxSaveGuestCR4(pV M, pVCpu, pMixedCtx);6992 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); 6994 6993 AssertRCReturn(rc, rc); 6995 6994 … … 7008 7007 { 7009 7008 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7010 int rc = hmR0VmxSaveGuestCR4(pV M, pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */7009 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */ 7011 7010 AssertRCReturn(rc, rc); 7012 7011 … … 7014 7013 if (RT_LIKELY(rc == VINF_SUCCESS)) 7015 7014 { 7016 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 7017 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 7018 AssertRCReturn(rc, rc); 7015 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 7019 7016 Assert(pVmxTransient->cbInstr == 2); 7020 7021 pMixedCtx->rip += pVmxTransient->cbInstr;7022 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;7023 7024 7017 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */ 7025 7018 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING) … … 7042 7035 { 7043 7036 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7044 int rc = hmR0VmxSaveGuestCR4(pV M, pVCpu, pMixedCtx);/** @todo review if CR4 is really required by EM. */7045 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pV M, pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */7037 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */ 7038 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */ 7046 7039 AssertRCReturn(rc, rc); 7047 7040 … … 7049 7042 if (RT_LIKELY(rc == VINF_SUCCESS)) 7050 7043 { 7051 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 7052 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 7053 AssertRCReturn(rc, rc); 7044 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 7054 7045 Assert(pVmxTransient->cbInstr == 3); 7055 7056 pMixedCtx->rip += pVmxTransient->cbInstr;7057 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;7058 7059 7046 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */ 7060 7047 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING) … … 7077 7064 { 7078 7065 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7079 int rc = hmR0VmxSaveGuestCR4(pV M, pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */7080 rc |= hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */7066 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */ 7067 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */ 7081 7068 AssertRCReturn(rc, rc); 7082 7069 … … 7084 7071 if (RT_LIKELY(rc == VINF_SUCCESS)) 7085 7072 { 7086 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 7087 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 7088 AssertRCReturn(rc, rc); 7073 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 7089 7074 Assert(pVmxTransient->cbInstr == 2); 7090 7091 pMixedCtx->rip += pVmxTransient->cbInstr;7092 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;7093 7075 } 7094 7076 else … … 7109 7091 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7110 7092 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 7111 rc |= hmR0VmxSaveGuestControlRegs(pV M, pVCpu, pMixedCtx);7093 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 7112 7094 AssertRCReturn(rc, rc); 7113 7095 … … 7115 7097 rc = VBOXSTRICTRC_VAL(rc2); 7116 7098 if (RT_LIKELY(rc == VINF_SUCCESS)) 7117 { 7118 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 7119 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 7120 AssertRCReturn(rc, rc); 7121 7122 pMixedCtx->rip += pVmxTransient->cbInstr; 7123 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP; 7124 } 7099 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 7125 7100 else 7126 7101 { … … 7140 7115 { 7141 7116 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7142 int rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);7143 rc |= hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx);7144 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);7117 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7118 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 7119 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7145 7120 AssertRCReturn(rc, rc); 7146 7121 7147 7122 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 7148 7123 if (RT_LIKELY(rc == VINF_SUCCESS)) 7149 { 7150 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 7151 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 7152 AssertRCReturn(rc, rc); 7153 7154 pMixedCtx->rip += pVmxTransient->cbInstr; 7155 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP; 7156 } 7124 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 7157 7125 else 7158 7126 { … … 7171 7139 { 7172 7140 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7173 int rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);7174 rc |= hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx);7175 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);7141 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7142 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 7143 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7176 7144 AssertRCReturn(rc, rc); 7177 7145 … … 7181 7149 || rc == VINF_EM_HALT)) 7182 7150 { 7183 int rc3 = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 7184 rc3 |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 7151 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 7185 7152 AssertRCReturn(rc3, rc3); 7186 7187 pMixedCtx->rip += pVmxTransient->cbInstr;7188 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;7189 7153 7190 7154 if ( rc == VINF_EM_HALT … … 7297 7261 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7298 7262 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT); 7299 int rc = hmR0VmxSaveGuestRip(pV M, pVCpu, pMixedCtx);7300 rc |= hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx);7263 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 7264 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 7301 7265 AssertRCReturn(rc, rc); 7302 7266 … … 7379 7343 rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient); 7380 7344 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState); 7381 rc |= hmR0VmxSaveGuestState(pV M, pVCpu, pMixedCtx);7345 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 7382 7346 AssertRCReturn(rc, rc); 7383 7347 … … 7479 7443 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7480 7444 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */ 7481 int rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);7482 rc |= hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx);7483 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);7445 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7446 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 7447 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7484 7448 AssertRCReturn(rc, rc); 7485 7449 … … 7492 7456 if (RT_LIKELY(rc == VINF_SUCCESS)) 7493 7457 { 7494 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 7495 rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 7496 AssertRCReturn(rc, rc); 7497 7458 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 7498 7459 Assert(pVmxTransient->cbInstr == 2); 7499 pMixedCtx->rip += pVmxTransient->cbInstr;7500 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;7501 7460 } 7502 7461 return rc; … … 7510 7469 { 7511 7470 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7512 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 7513 AssertRCReturn(rc, rc); 7514 Assert(pVmxTransient->cbInstr == 2); 7515 7471 int rc = VINF_SUCCESS; 7516 7472 /* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */ 7517 7473 if ( pVM->hm.s.fTPRPatchingActive … … 7525 7481 } 7526 7482 7527 rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 7528 AssertRCReturn(rc, rc); 7529 pMixedCtx->rip += pVmxTransient->cbInstr; 7530 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP; 7483 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 7484 Assert(pVmxTransient->cbInstr == 2); 7531 7485 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr); 7532 7486 return VINF_SUCCESS; … … 7569 7523 7570 7524 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */ 7571 rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);7572 rc |= hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx);7573 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);7525 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7526 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 7527 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7574 7528 AssertRCReturn(rc, rc); 7575 7529 … … 7581 7535 if (RT_LIKELY(rc == VINF_SUCCESS)) 7582 7536 { 7583 rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 7584 AssertRCReturn(rc, rc); 7585 7586 pMixedCtx->rip += pVmxTransient->cbInstr; 7587 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP; 7537 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 7588 7538 7589 7539 /* If this is an X2APIC WRMSR access, update the APIC state as well. */ … … 7657 7607 #if 0 7658 7608 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */ 7659 rc = hmR0VmxSaveGuestState(pV M, pVCpu, pMixedCtx);7609 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 7660 7610 #else 7661 rc = hmR0VmxSaveGuestGprs(pV M, pVCpu, pMixedCtx);7662 rc |= hmR0VmxSaveGuestControlRegs(pV M, pVCpu, pMixedCtx);7663 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);7611 rc = hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx); 7612 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 7613 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7664 7614 #endif 7665 7615 AssertRCReturn(rc, rc); … … 7675 7625 Log(("CR0 write rc=%d\n", rc)); 7676 7626 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 7627 break; 7628 case 2: /* C2 **/ 7629 /* Nothing to do here, CR2 it's not part of the VMCS. */ 7677 7630 break; 7678 7631 case 3: /* CR3 */ … … 7702 7655 { 7703 7656 /* EMInterpretCRxRead() requires EFER MSR, CS. */ 7704 rc = hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);7657 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7705 7658 AssertRCReturn(rc, rc); 7706 7659 Assert( !pVM->hm.s.fNestedPaging … … 7723 7676 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */ 7724 7677 { 7725 rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);7678 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7726 7679 AssertRCReturn(rc, rc); 7727 7680 rc = EMInterpretCLTS(pVM, pVCpu); … … 7735 7688 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */ 7736 7689 { 7737 rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);7690 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7738 7691 AssertRCReturn(rc, rc); 7739 7692 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification)); … … 7756 7709 if (RT_SUCCESS(rc)) 7757 7710 { 7758 int rc2 = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 7759 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 7711 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 7760 7712 AssertRCReturn(rc2, rc2); 7761 pMixedCtx->rip += pVmxTransient->cbInstr;7762 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;7763 7713 } 7764 7714 … … 7777 7727 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 7778 7728 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 7779 rc |= hmR0VmxSaveGuestRip(pV M, pVCpu, pMixedCtx);7780 rc |= hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */7781 rc |= hmR0VmxSaveGuestControlRegs(pV M, pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */7782 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */7729 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 7730 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */ 7731 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */ 7732 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */ 7783 7733 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */ 7784 7734 AssertRCReturn(rc, rc); … … 7866 7816 if (RT_LIKELY(rc == VINF_SUCCESS)) 7867 7817 { 7868 rc = hmR0VmxSaveGuestDebugRegs(pV M, pVCpu, pMixedCtx); /* For DR7. */7818 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx); /* For DR7. */ 7869 7819 AssertRCReturn(rc, rc); 7870 7820 … … 8010 7960 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now 8011 7961 * just sync the whole thing. */ 8012 rc = hmR0VmxSaveGuestState(pV M, pVCpu, pMixedCtx);7962 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 8013 7963 #else 8014 7964 /* Aggressive state sync. for now. */ 8015 rc = hmR0VmxSaveGuestGprs(pV M, pVCpu, pMixedCtx);8016 rc |= hmR0VmxSaveGuestControlRegs(pV M, pVCpu, pMixedCtx);8017 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);7965 rc = hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx); 7966 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 7967 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 8018 7968 #endif 8019 7969 AssertRCReturn(rc, rc); … … 8111 8061 */ 8112 8062 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 8113 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);8063 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 8114 8064 AssertRCReturn(rc, rc); 8115 8065 … … 8134 8084 if (RT_SUCCESS(rc)) 8135 8085 { 8136 int rc2 = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 8137 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 8086 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 8138 8087 AssertRCReturn(rc2, rc2); 8139 pMixedCtx->rip += pVmxTransient->cbInstr;8140 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;8141 8088 } 8142 8089 return rc; … … 8164 8111 8165 8112 #if 0 8166 rc = hmR0VmxSaveGuestState(pV M, pVCpu, pMixedCtx); /** @todo Can we do better? */8113 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */ 8167 8114 #else 8168 8115 /* Aggressive state sync. for now. */ 8169 rc |= hmR0VmxSaveGuestGprs(pV M, pVCpu, pMixedCtx);8170 rc |= hmR0VmxSaveGuestControlRegs(pV M, pVCpu, pMixedCtx);8171 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);8116 rc |= hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx); 8117 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 8118 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 8172 8119 #endif 8173 8120 AssertRCReturn(rc, rc); … … 8215 8162 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 8216 8163 #if 0 8217 rc |= hmR0VmxSaveGuestState(pV M, pVCpu, pMixedCtx); /** @todo Can we do better? */8164 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */ 8218 8165 #else 8219 8166 /* Aggressive state sync. for now. */ 8220 rc |= hmR0VmxSaveGuestGprs(pV M, pVCpu, pMixedCtx);8221 rc |= hmR0VmxSaveGuestControlRegs(pV M, pVCpu, pMixedCtx);8222 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);8167 rc |= hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx); 8168 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 8169 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 8223 8170 #endif 8224 8171 AssertRCReturn(rc, rc); … … 8273 8220 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 8274 8221 8275 int rc = hmR0VmxSaveGuestCR0(pV M, pVCpu, pMixedCtx);8222 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 8276 8223 AssertRCReturn(rc, rc); 8277 8224 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); … … 8300 8247 /** @todo Try optimize this by not saving the entire guest state unless 8301 8248 * really needed. */ 8302 int rc = hmR0VmxSaveGuestState(pV M, pVCpu, pMixedCtx);8249 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 8303 8250 AssertRCReturn(rc, rc); 8304 8251 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); … … 8331 8278 8332 8279 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 8333 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);8334 rc |= hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx);8280 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 8281 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 8335 8282 AssertRCReturn(rc, rc); 8336 8283 … … 8387 8334 8388 8335 /* We require CR0 and EFER. EFER is always up-to-date. */ 8389 int rc = hmR0VmxSaveGuestControlRegs(pV M, pVCpu, pMixedCtx);8336 int rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 8390 8337 AssertRCReturn(rc, rc); 8391 8338 … … 8431 8378 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient); 8432 8379 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 8433 rc |= hmR0VmxSaveGuestRip(pV M, pVCpu, pMixedCtx);8380 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 8434 8381 Log(("#GP Gst: RIP %#RX64\n", pMixedCtx->rip)); 8435 8382 rc |= hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx, … … 8449 8396 8450 8397 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */ 8451 rc = hmR0VmxSaveGuestState(pV M, pVCpu, pMixedCtx);8398 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 8452 8399 AssertRCReturn(rc, rc); 8453 8400 … … 8710 8657 8711 8658 #ifdef VBOX_HM_WITH_GUEST_PATCHING 8712 rc = hmR0VmxSaveGuestControlRegs(pV M, pVCpu, pMixedCtx);8713 rc |= hmR0VmxSaveGuestSegmentRegs(pV M, pVCpu, pMixedCtx);8714 rc |= hmR0VmxSaveGuestRflags(pV M, pVCpu, pMixedCtx);8659 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 8660 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 8661 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 8715 8662 AssertRCReturn(rc, rc); 8716 8663 /* Shortcut for APIC TPR access, only for 32-bit guests. */ … … 8729 8676 && GCPhys == GCPhysApicBase) 8730 8677 { 8731 rc = hmR0VmxSaveGuestRip(pV M, pVCpu, pMixedCtx);8678 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 8732 8679 AssertRCReturn(rc, rc); 8733 8680 … … 8744 8691 TRPMSetErrorCode(pVCpu, pVmxTransient->uExitIntrErrorCode); 8745 8692 8746 rc = hmR0VmxSaveGuestState(pV M, pVCpu, pMixedCtx);8693 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 8747 8694 AssertRCReturn(rc, rc); 8748 8695
Note:
See TracChangeset
for help on using the changeset viewer.