Changeset 59149 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Dec 16, 2015 11:12:58 AM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r59141 r59149 1257 1257 1258 1258 /* Update number of guest MSRs to load/store across the world-switch. */ 1259 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);1260 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); AssertRCReturn(rc, rc);1259 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); 1260 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); 1261 1261 1262 1262 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */ 1263 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc); 1263 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); 1264 AssertRCReturn(rc, rc); 1264 1265 1265 1266 /* Update the VCPU's copy of the MSR count. */ … … 2485 2486 val |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT; /* Enable pause-loop exiting. */ 2486 2487 2487 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); 2488 AssertRCReturn(rc, rc); 2489 2490 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks); 2488 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); 2489 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks); 2491 2490 AssertRCReturn(rc, rc); 2492 2491 } … … 2536 2535 #if 0 2537 2536 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestCR3AndCR4())*/ 2538 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);2539 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);2537 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); 2538 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); 2540 2539 2541 2540 /* … … 2544 2543 * We thus use the exception bitmap to control it rather than use both. 2545 2544 */ 2546 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);2547 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);2545 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); 2546 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); 2548 2547 2549 2548 /** @todo Explore possibility of using IO-bitmaps. */ 2550 2549 /* All IO & IOIO instructions cause VM-exits. */ 2551 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);2552 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);2550 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); 2551 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); 2553 2552 2554 2553 /* Initialize the MSR-bitmap area. */ 2555 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc); 2556 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc); 2557 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc); 2554 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); 2555 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); 2556 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); 2557 AssertRCReturn(rc, rc); 2558 2558 #endif 2559 2559 … … 2561 2561 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr); 2562 2562 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */ 2563 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr); 2564 AssertRCReturn(rc, rc); 2565 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr); 2563 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr); 2564 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr); 2566 2565 AssertRCReturn(rc, rc); 2567 2566 … … 2578 2577 #if 0 2579 2578 /* Setup debug controls */ 2580 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */ 2581 AssertRCReturn(rc, rc); 2582 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); 2579 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */ 2580 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); 2583 2581 AssertRCReturn(rc, rc); 2584 2582 #endif … … 2924 2922 2925 2923 /* Write these host selector fields into the host-state area in the VMCS. */ 2926 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);2927 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);2924 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); 2925 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); 2928 2926 #if HC_ARCH_BITS == 64 2929 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);2930 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);2931 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);2932 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);2927 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); 2928 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); 2929 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); 2930 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); 2933 2931 #else 2934 2932 NOREF(uSelDS); … … 2937 2935 NOREF(uSelGS); 2938 2936 #endif 2939 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc); 2937 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); 2938 AssertRCReturn(rc, rc); 2940 2939 2941 2940 /* … … 2948 2947 ASMGetGDTR(&Gdtr); 2949 2948 ASMGetIDTR(&Idtr); 2950 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc); 2951 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc); 2949 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); 2950 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); 2951 AssertRCReturn(rc, rc); 2952 2952 2953 2953 #if HC_ARCH_BITS == 64 … … 3030 3030 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE); 3031 3031 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE); 3032 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc); 3033 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc); 3032 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); 3033 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); 3034 AssertRCReturn(rc, rc); 3034 3035 3035 3036 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */ … … 3070 3071 * Host Sysenter MSRs. 3071 3072 */ 3072 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 3073 AssertRCReturn(rc, rc); 3073 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 3074 3074 #if HC_ARCH_BITS == 32 3075 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 3076 AssertRCReturn(rc, rc); 3077 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 3075 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 3076 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 3078 3077 #else 3079 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 3080 AssertRCReturn(rc, rc); 3081 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 3078 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 3079 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 3082 3080 #endif 3083 3081 AssertRCReturn(rc, rc); … … 3598 3596 { 3599 3597 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); 3600 AssertRCReturn(rc, rc); 3601 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx); 3602 AssertRCReturn(rc, rc); 3603 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx); 3598 rc |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx); 3599 rc |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx); 3604 3600 AssertRCReturn(rc, rc); 3605 3601 return rc; … … 3855 3851 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) 3856 3852 { 3857 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc); 3858 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc); 3859 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc); 3860 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc); 3861 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc); 3853 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 3854 AssertRCReturn(rc, rc); 3855 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); 3856 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); 3857 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); 3858 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); 3859 AssertRCReturn(rc, rc); 3862 3860 } 3863 3861 … … 4345 4343 { 4346 4344 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */ 4347 AssertRCReturn(rc, rc); 4348 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */ 4349 AssertRCReturn(rc, rc); 4350 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/ 4345 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */ 4346 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/ 4351 4347 AssertRCReturn(rc, rc); 4352 4348 … … 4517 4513 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */ 4518 4514 4519 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc); 4520 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc); 4521 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc); 4522 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc); 4515 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); 4516 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); 4517 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); 4518 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); 4519 AssertRCReturn(rc, rc); 4523 4520 4524 4521 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR); … … 4531 4528 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR)) 4532 4529 { 4533 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc); 4534 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc); 4530 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); 4531 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); 4532 AssertRCReturn(rc, rc); 4535 4533 4536 4534 /* Validate. */ … … 4553 4551 u32Access = pMixedCtx->ldtr.Attr.u; 4554 4552 4555 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc); 4556 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc); 4557 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc); 4558 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc); 4553 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); 4554 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); 4555 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); 4556 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); 4557 AssertRCReturn(rc, rc); 4559 4558 4560 4559 /* Validate. */ … … 4582 4581 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR)) 4583 4582 { 4584 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc); 4585 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc); 4583 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); 4584 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); 4585 AssertRCReturn(rc, rc); 4586 4586 4587 4587 /* Validate. */ … … 4630 4630 if (pVM->hm.s.fAllow64BitGuests) 4631 4631 { 4632 int rc = VINF_SUCCESS; 4633 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false, NULL); 4634 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false, NULL); 4635 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false, NULL); 4636 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL); 4632 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false, NULL); 4633 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false, NULL); 4634 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false, NULL); 4635 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL); 4637 4636 AssertRCReturn(rc, rc); 4638 4637 # ifdef LOG_ENABLED … … 5625 5624 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo); 5626 5625 5627 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 5628 AssertRCReturn(rc2, rc2); 5629 rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 5630 AssertRCReturn(rc2, rc2); 5626 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); AssertRCReturn(rc2, rc2); 5627 rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); AssertRCReturn(rc2, rc2); 5631 5628 5632 5629 VBOXSTRICTRC rcStrict = VINF_SUCCESS; … … 5828 5825 { 5829 5826 uint32_t uVal = 0; 5830 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);5831 AssertRCReturn(rc, rc);5832 5833 5827 uint32_t uShadow = 0; 5834 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow); 5828 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal); 5829 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow); 5835 5830 AssertRCReturn(rc, rc); 5836 5831 … … 5866 5861 uint32_t uVal = 0; 5867 5862 uint32_t uShadow = 0; 5868 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal); 5869 AssertRCReturn(rc, rc); 5870 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow); 5863 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal); 5864 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow); 5871 5865 AssertRCReturn(rc, rc); 5872 5866 … … 6240 6234 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */ 6241 6235 { 6242 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc); 6243 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc); 6244 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc); 6245 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc); 6236 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); 6237 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); 6238 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); 6239 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); 6240 AssertRCReturn(rc, rc); 6246 6241 6247 6242 if (VMMRZCallRing3IsEnabled(pVCpu)) … … 6396 6391 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS)) 6397 6392 { 6398 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc); 6399 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc); 6400 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc); 6401 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc); 6402 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc); 6403 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc); 6404 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc); 6393 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 6394 AssertRCReturn(rc, rc); 6395 6396 rc = VMXLOCAL_READ_SEG(CS, cs); 6397 rc |= VMXLOCAL_READ_SEG(SS, ss); 6398 rc |= VMXLOCAL_READ_SEG(DS, ds); 6399 rc |= VMXLOCAL_READ_SEG(ES, es); 6400 rc |= VMXLOCAL_READ_SEG(FS, fs); 6401 rc |= VMXLOCAL_READ_SEG(GS, gs); 6402 AssertRCReturn(rc, rc); 6405 6403 6406 6404 /* Restore segment attributes for real-on-v86 mode hack. */ … … 6450 6448 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR)) 6451 6449 { 6452 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);6453 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);AssertRCReturn(rc, rc);6450 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 6451 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc); 6454 6452 pMixedCtx->gdtr.pGdt = u64Val; 6455 6453 pMixedCtx->gdtr.cbGdt = u32Val; … … 6460 6458 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR)) 6461 6459 { 6462 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);6463 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);AssertRCReturn(rc, rc);6460 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 6461 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc); 6464 6462 pMixedCtx->idtr.pIdt = u64Val; 6465 6463 pMixedCtx->idtr.cbIdt = u32Val; … … 11330 11328 #if 0 11331 11329 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 11332 AssertRCReturn(rc, rc);11333 11330 #else 11334 11331 /* Aggressive state sync. for now. */ … … 11599 11596 11600 11597 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11601 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);11602 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);11598 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/); 11599 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); 11603 11600 AssertRCReturn(rc, rc); 11604 11601 … … 11970 11967 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2); 11971 11968 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 11972 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);11969 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11973 11970 AssertRCReturn(rc, rc); 11974 11971 … … 11977 11974 PVM pVM = pVCpu->CTX_SUFF(pVM); 11978 11975 VBOXSTRICTRC rcStrict; 11979 rc 11976 rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/); 11980 11977 switch (uAccessType) 11981 11978 { … … 12409 12406 /* Aggressive state sync. for now. */ 12410 12407 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 12411 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);12412 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);12408 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 12409 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 12413 12410 #endif 12414 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);12411 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 12415 12412 AssertRCReturn(rc, rc); 12416 12413
Note:
See TracChangeset
for help on using the changeset viewer.