- Timestamp:
- Jun 21, 2018 4:02:03 PM (7 years ago)
- Location:
- trunk
- Files:
-
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r72522 r72643 1376 1376 * @param pCtx Current CPU context. 1377 1377 */ 1378 DECLINLINE(bool) CPUMIsGuestInLongModeEx(PC PUMCTX pCtx)1378 DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCCPUMCTX pCtx) 1379 1379 { 1380 1380 return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA; … … 1452 1452 if (!pVmcb) 1453 1453 return false; 1454 if ( !pCtx->hwvirt.svm.fHMCachedVmcb)1455 return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl &fIntercept);1456 return HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,fIntercept);1454 if (HMHasGuestSvmVmcbCached(pVCpu)) 1455 return HMIsGuestSvmCtrlInterceptSet(pVCpu, fIntercept); 1456 return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fIntercept); 1457 1457 } 1458 1458 … … 1471 1471 if (!pVmcb) 1472 1472 return false; 1473 if ( !pCtx->hwvirt.svm.fHMCachedVmcb)1474 return RT_BOOL(pVmcb->ctrl.u16InterceptRdCRx & (UINT16_C(1) << uCr));1475 return HMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr);1473 if (HMHasGuestSvmVmcbCached(pVCpu)) 1474 return HMIsGuestSvmReadCRxInterceptSet(pVCpu, uCr); 1475 return RT_BOOL(pVmcb->ctrl.u16InterceptRdCRx & (UINT16_C(1) << uCr)); 1476 1476 } 1477 1477 … … 1490 1490 if (!pVmcb) 1491 1491 return false; 1492 if ( !pCtx->hwvirt.svm.fHMCachedVmcb)1493 return RT_BOOL(pVmcb->ctrl.u16InterceptWrCRx & (UINT16_C(1) << uCr));1494 return HMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr);1492 if (HMHasGuestSvmVmcbCached(pVCpu)) 1493 return HMIsGuestSvmWriteCRxInterceptSet(pVCpu, uCr); 1494 return RT_BOOL(pVmcb->ctrl.u16InterceptWrCRx & (UINT16_C(1) << uCr)); 1495 1495 } 1496 1496 … … 1509 1509 if (!pVmcb) 1510 1510 return false; 1511 if ( !pCtx->hwvirt.svm.fHMCachedVmcb)1512 return RT_BOOL(pVmcb->ctrl.u16InterceptRdDRx & (UINT16_C(1) << uDr));1513 return HMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr);1511 if (HMHasGuestSvmVmcbCached(pVCpu)) 1512 return HMIsGuestSvmReadDRxInterceptSet(pVCpu, uDr); 1513 return RT_BOOL(pVmcb->ctrl.u16InterceptRdDRx & (UINT16_C(1) << uDr)); 1514 1514 } 1515 1515 … … 1528 1528 if (!pVmcb) 1529 1529 return false; 1530 if ( !pCtx->hwvirt.svm.fHMCachedVmcb)1531 return RT_BOOL(pVmcb->ctrl.u16InterceptWrDRx & (UINT16_C(1) << uDr));1532 return HMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr);1530 if (HMHasGuestSvmVmcbCached(pVCpu)) 1531 return HMIsGuestSvmWriteDRxInterceptSet(pVCpu, uDr); 1532 return RT_BOOL(pVmcb->ctrl.u16InterceptWrDRx & (UINT16_C(1) << uDr)); 1533 1533 } 1534 1534 … … 1547 1547 if (!pVmcb) 1548 1548 return false; 1549 if ( !pCtx->hwvirt.svm.fHMCachedVmcb)1550 return RT_BOOL(pVmcb->ctrl.u32InterceptXcpt & (UINT32_C(1) << uVector));1551 return HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector);1549 if (HMHasGuestSvmVmcbCached(pVCpu)) 1550 return HMIsGuestSvmXcptInterceptSet(pVCpu, uVector); 1551 return RT_BOOL(pVmcb->ctrl.u32InterceptXcpt & (UINT32_C(1) << uVector)); 1552 1552 } 1553 1553 … … 1564 1564 { 1565 1565 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 1566 Assert(pVmcb); 1567 if (!pCtx->hwvirt.svm.fHMCachedVmcb) 1568 return pVmcb->ctrl.IntCtrl.n.u1VIntrMasking; 1569 return HMIsGuestSvmVirtIntrMasking(pVCpu, pCtx); 1566 if (!pVmcb) 1567 return false; 1568 if (HMHasGuestSvmVmcbCached(pVCpu)) 1569 return HMIsGuestSvmVirtIntrMasking(pVCpu); 1570 return pVmcb->ctrl.IntCtrl.n.u1VIntrMasking; 1570 1571 } 1571 1572 … … 1582 1583 { 1583 1584 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 1584 Assert(pVmcb); 1585 if (!pCtx->hwvirt.svm.fHMCachedVmcb) 1586 return pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging; 1587 return HMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx); 1585 if (!pVmcb) 1586 return false; 1587 if (HMHasGuestSvmVmcbCached(pVCpu)) 1588 return HMIsGuestSvmNestedPagingEnabled(pVCpu); 1589 return pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging; 1588 1590 } 1589 1591 … … 1600 1602 { 1601 1603 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 1602 Assert(pVmcb); 1603 if (!pCtx->hwvirt.svm.fHMCachedVmcb) 1604 return pVmcb->ctrl.u16PauseFilterCount; 1605 return HMGetGuestSvmPauseFilterCount(pVCpu, pCtx); 1604 if (!pVmcb) 1605 return false; 1606 if (HMHasGuestSvmVmcbCached(pVCpu)) 1607 return HMGetGuestSvmPauseFilterCount(pVCpu); 1608 return pVmcb->ctrl.u16PauseFilterCount; 1606 1609 } 1607 1610 -
trunk/include/VBox/vmm/cpum.mac
r71833 r72643 270 270 .hwvirt.svm.cPauseFilterThreshold resw 1 271 271 .hwvirt.svm.fInterceptEvents resb 1 272 .hwvirt.svm.fHMCachedVmcb resb 1273 272 alignb 8 274 273 .hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1 -
trunk/include/VBox/vmm/cpumctx.h
r72510 r72643 511 511 /** 0x3c4 - Whether the injected event is subject to event intercepts. */ 512 512 bool fInterceptEvents; 513 /** 0x3c5 - Whether parts of the VMCB are cached (and potentially modified) by HM. */ 514 bool fHMCachedVmcb; 515 /** 0x3c6 - Padding. */ 516 bool afPadding[2]; 513 /** 0x3c5 - Padding. */ 514 bool afPadding[3]; 517 515 /** 0x3c8 - MSR permission bitmap - R0 ptr. */ 518 516 R0PTRTYPE(void *) pvMsrBitmapR0; … … 764 762 765 763 /** @name CPUMCTX_EXTRN_XXX 766 * Used toparts of the CPUM state that is externalized and needs fetching764 * Used for parts of the CPUM state that is externalized and needs fetching 767 765 * before use. 768 766 * … … 893 891 | CPUMCTX_EXTRN_SYSENTER_MSRS | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS) 894 892 893 /** Hardware-virtualization (SVM or VMX) state is kept externally. */ 894 #define CPUMCTX_EXTRN_HWVIRT UINT64_C(0x0000020000000000) 895 895 896 /** Mask of bits the keepers can use for state tracking. */ 896 897 #define CPUMCTX_EXTRN_KEEPER_STATE_MASK UINT64_C(0xffff000000000000) … … 905 906 #define CPUMCTX_EXTRN_NEM_WIN_MASK UINT64_C(0x0007000000000000) 906 907 908 /** HM/SVM: Inhibit maskable interrupts (VMCPU_FF_INHIBIT_INTERRUPTS). */ 909 #define CPUMCTX_EXTRN_HM_SVM_INT_SHADOW UINT64_C(0x0001000000000000) 910 /** HM/SVM: Nested-guest interrupt pending (VMCPU_FF_INTERRUPT_NESTED_GUEST). */ 911 #define CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ UINT64_C(0x0002000000000000) 912 /** HM/SVM: Mask. */ 913 #define CPUMCTX_EXTRN_HM_SVM_MASK UINT64_C(0x0003000000000000) 914 915 /** HM/VMX: Guest-interruptibility state (VMCPU_FF_INHIBIT_INTERRUPTS, 916 * VMCPU_FF_BLOCK_NMIS). */ 917 #define CPUMCTX_EXTRN_HM_VMX_INT_STATE UINT64_C(0x0001000000000000) 918 /** HM/VMX: Mask. */ 919 #define CPUMCTX_EXTRN_HM_VMX_MASK UINT64_C(0x0001000000000000) 920 907 921 /** All CPUM state bits, not including keeper specific ones. */ 908 #define CPUMCTX_EXTRN_ALL UINT64_C(0x00000 1fffffffffc)922 #define CPUMCTX_EXTRN_ALL UINT64_C(0x000003fffffffffc) 909 923 /** @} */ 910 924 -
trunk/include/VBox/vmm/hm.h
r72599 r72643 216 216 VMMR0_INT_DECL(void) HMR0NotifyCpumModifiedHostCr0(PVMCPU VCpu); 217 217 VMMR0_INT_DECL(bool) HMR0SuspendPending(void); 218 VMMR0_INT_DECL(int) HMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt); 219 VMMR0_INT_DECL(int) HMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat); 218 220 219 221 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) -
trunk/include/VBox/vmm/hm_svm.h
r72065 r72643 1067 1067 /** Cache of the LBR virtualization bit. */ 1068 1068 bool fLbrVirt; 1069 /** Whether the VMCB is cached by HM. */ 1070 bool fCacheValid; 1069 1071 /** Alignment. */ 1070 bool afPadding0[ 5];1072 bool afPadding0[4]; 1071 1073 } SVMNESTEDVMCBCACHE; 1072 1074 #pragma pack() … … 1076 1078 typedef const SVMNESTEDVMCBCACHE *PCSVMNESTEDVMCBCACHE; 1077 1079 AssertCompileSizeAlignment(SVMNESTEDVMCBCACHE, 8); 1078 1079 #ifdef IN_RING01080 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);1081 #endif /* IN_RING0 */1082 1080 1083 1081 /** … … 1140 1138 * Don't add any more functions here unless there is no other option. 1141 1139 */ 1142 VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fIntercept); 1143 VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr); 1144 VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr); 1145 VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr); 1146 VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr); 1147 VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector); 1148 VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx); 1149 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx); 1150 VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx); 1140 VMM_INT_DECL(bool) HMHasGuestSvmVmcbCached(PVMCPU pVCpu); 1141 VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, uint64_t fIntercept); 1142 VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr); 1143 VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr); 1144 VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr); 1145 VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr); 1146 VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, uint8_t uVector); 1147 VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu); 1148 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu); 1149 VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu); 1150 1151 1151 /** @} */ 1152 1152 -
trunk/include/VBox/vmm/hm_vmx.h
r69107 r72643 2535 2535 } 2536 2536 2537 #ifdef IN_RING02538 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);2539 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys);2540 #endif /* IN_RING0 */2541 2542 2537 /** @} */ 2543 2538 -
trunk/include/VBox/vmm/iem.h
r72592 r72643 218 218 | CPUMCTX_EXTRN_DR7 /* for memory breakpoints */ ) 219 219 220 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 221 /** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecSvmVmexit(). 222 * IEM will ASSUME the caller has ensured these are already present. */ 223 # define IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK ( CPUMCTX_EXTRN_RSP \ 224 | CPUMCTX_EXTRN_RAX \ 225 | CPUMCTX_EXTRN_RIP \ 226 | CPUMCTX_EXTRN_RFLAGS \ 227 | CPUMCTX_EXTRN_CS \ 228 | CPUMCTX_EXTRN_SS \ 229 | CPUMCTX_EXTRN_DS \ 230 | CPUMCTX_EXTRN_ES \ 231 | CPUMCTX_EXTRN_GDTR \ 232 | CPUMCTX_EXTRN_IDTR \ 233 | CPUMCTX_EXTRN_CR_MASK \ 234 | CPUMCTX_EXTRN_EFER \ 235 | CPUMCTX_EXTRN_DR6 \ 236 | CPUMCTX_EXTRN_DR7 \ 237 | CPUMCTX_EXTRN_OTHER_MSRS \ 238 | CPUMCTX_EXTRN_HWVIRT) 239 #endif 220 240 221 241 VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu); -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r72488 r72643 2450 2450 * RPL = CPL. Weird. 2451 2451 */ 2452 Assert(!(pVCpu->cpum.s.Guest.fExtrn & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS))); 2452 2453 uint32_t uCpl; 2453 2454 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE) … … 2766 2767 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 2767 2768 { 2768 if (! pCtx->hwvirt.svm.fHMCachedVmcb)2769 if (!HMHasGuestSvmVmcbCached(pVCpu)) 2769 2770 { 2770 2771 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); … … 2807 2808 } 2808 2809 2809 case CPUMCTX_EXTRN_KEEPER_HM: /** @todo make HM use CPUMCTX_EXTRN_XXX. */ 2810 case CPUMCTX_EXTRN_KEEPER_HM: 2811 { 2812 #ifdef IN_RING0 2813 int rc = HMR0ImportStateOnDemand(pVCpu, &pVCpu->cpum.s.Guest, fExtrnImport); 2814 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc)); 2815 return rc; 2816 #else 2817 return VINF_SUCCESS; 2818 #endif 2819 } 2810 2820 default: 2811 2821 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2); -
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r72462 r72643 82 82 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual); 83 83 #ifdef IN_RING0 84 PVM pVM = pVCpu->CTX_SUFF(pVM); 85 if (pVM->hm.s.vmx.fSupported) 86 return VMXR0InvalidatePage(pVM, pVCpu, GCVirt); 87 88 Assert(pVM->hm.s.svm.fSupported); 89 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt); 90 84 return HMR0InvalidatePage(pVCpu, GCVirt); 91 85 #else 92 86 hmQueueInvlPage(pVCpu, GCVirt); … … 288 282 return VINF_SUCCESS; 289 283 290 #ifdef IN_RING0 291 if (pVM->hm.s.vmx.fSupported) 292 { 293 VMCPUID idThisCpu = VMMGetCpuId(pVM); 294 295 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 296 { 297 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 298 299 if (idThisCpu == idCpu) 300 { 301 /** @todo r=ramshankar: Intel does not support flushing by guest physical 302 * address either. See comment in VMXR0InvalidatePhysPage(). Fix this. */ 303 VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys); 304 } 305 else 306 { 307 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 308 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/); 309 } 310 } 311 return VINF_SUCCESS; 312 } 313 314 /* AMD-V doesn't support invalidation with guest physical addresses; see 315 comment in SVMR0InvalidatePhysPage. */ 316 Assert(pVM->hm.s.svm.fSupported); 317 #else 318 NOREF(GCPhys); 319 #endif 320 321 HMFlushTLBOnAllVCpus(pVM); 322 return VINF_SUCCESS; 284 /* 285 * AMD-V: Doesn't support invalidation with guest physical addresses. 286 * 287 * VT-x: Doesn't support invalidation with guest physical addresses. 288 * INVVPID instruction takes only a linear address while invept only flushes by EPT 289 * not individual addresses. 290 * 291 * We update the force flag and flush before the next VM-entry, see @bugref{6568}. 292 */ 293 RT_NOREF(GCPhys); 294 /** @todo Remove or figure out to way to update the Phys STAT counter. */ 295 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys); */ 296 return HMFlushTLBOnAllVCpus(pVM); 323 297 } 324 298 -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r72462 r72643 130 130 VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx) 131 131 { 132 if (pCtx->hwvirt.svm.fHMCachedVmcb) 133 { 134 PSVMVMCBCTRL pVmcbNstGstCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 135 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 136 132 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 133 if (pVmcbNstGstCache->fCacheValid) 134 { 137 135 /* 138 136 * Restore fields as our own code might look at the VMCB controls as part … … 141 139 * by a physical CPU on #VMEXIT. 142 140 */ 141 PSVMVMCBCTRL pVmcbNstGstCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 143 142 pVmcbNstGstCtrl->u16InterceptRdCRx = pVmcbNstGstCache->u16InterceptRdCRx; 144 143 pVmcbNstGstCtrl->u16InterceptWrCRx = pVmcbNstGstCache->u16InterceptWrCRx; … … 153 152 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVmcbNstGstCache->fNestedPaging; 154 153 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcbNstGstCache->fLbrVirt; 155 p Ctx->hwvirt.svm.fHMCachedVmcb= false;154 pVmcbNstGstCache->fCacheValid = false; 156 155 } 157 156 … … 166 165 * change here. 167 166 */ 167 /** @todo Only signal state needed for VM-exit (e.g. skip 168 * LDTR, TR etc., see IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK. 169 * Do this while extending HM_CHANGED_xxx flags. See 170 * todo in hmR0SvmHandleExitNested(). */ 168 171 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 169 172 } … … 209 212 { 210 213 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 211 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 212 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); 213 NOREF(pCtx); 214 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 214 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx); 215 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 216 Assert(pVmcbNstGstCache->fCacheValid); 215 217 return uTicks + pVmcbNstGstCache->u64TSCOffset; 216 218 } … … 401 403 402 404 /** 405 * Returns whether HM has cached the nested-guest VMCB. 406 * 407 * If the VMCB is cached by HM, it means HM may have potentially modified the 408 * VMCB for execution using hardware-assisted SVM. 409 * 410 * @returns true if HM has cached the nested-guest VMCB, false otherwise. 411 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 412 */ 413 VMM_INT_DECL(bool) HMHasGuestSvmVmcbCached(PVMCPU pVCpu) 414 { 415 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 416 return pVmcbNstGstCache->fCacheValid; 417 } 418 419 420 /** 403 421 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept 404 422 * active. … … 406 424 * @returns @c true if in intercept is set, @c false otherwise. 407 425 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 408 * @param pCtx Pointer to the context.409 426 * @param fIntercept The SVM control/instruction intercept, see 410 427 * SVM_CTRL_INTERCEPT_*. 411 428 */ 412 VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx,uint64_t fIntercept)413 { 414 Assert( pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);429 VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, uint64_t fIntercept) 430 { 431 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 415 432 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 416 433 return RT_BOOL(pVmcbNstGstCache->u64InterceptCtrl & fIntercept); … … 423 440 * @returns @c true if in intercept is set, @c false otherwise. 424 441 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 425 * @param pCtx Pointer to the context.426 442 * @param uCr The CR register number (0 to 15). 427 443 */ 428 VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx,uint8_t uCr)444 VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr) 429 445 { 430 446 Assert(uCr < 16); 431 Assert( pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);447 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 432 448 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 433 449 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdCRx & (1 << uCr)); … … 440 456 * @returns @c true if in intercept is set, @c false otherwise. 441 457 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 442 * @param pCtx Pointer to the context.443 458 * @param uCr The CR register number (0 to 15). 444 459 */ 445 VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx,uint8_t uCr)460 VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr) 446 461 { 447 462 Assert(uCr < 16); 448 Assert( pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);463 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 449 464 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 450 465 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrCRx & (1 << uCr)); … … 457 472 * @returns @c true if in intercept is set, @c false otherwise. 458 473 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 459 * @param pCtx Pointer to the context.460 474 * @param uDr The DR register number (0 to 15). 461 475 */ 462 VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx,uint8_t uDr)476 VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr) 463 477 { 464 478 Assert(uDr < 16); 465 Assert( pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);479 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 466 480 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 467 481 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdDRx & (1 << uDr)); … … 474 488 * @returns @c true if in intercept is set, @c false otherwise. 475 489 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 476 * @param pCtx Pointer to the context.477 490 * @param uDr The DR register number (0 to 15). 478 491 */ 479 VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx,uint8_t uDr)492 VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr) 480 493 { 481 494 Assert(uDr < 16); 482 Assert( pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);495 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 483 496 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 484 497 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrDRx & (1 << uDr)); … … 491 504 * @returns true if in intercept is active, false otherwise. 492 505 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 493 * @param pCtx Pointer to the context.494 506 * @param uVector The exception / interrupt vector. 495 507 */ 496 VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx,uint8_t uVector)508 VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, uint8_t uVector) 497 509 { 498 510 Assert(uVector < 32); 499 Assert( pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);511 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 500 512 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 501 513 return RT_BOOL(pVmcbNstGstCache->u32InterceptXcpt & (1 << uVector)); … … 508 520 * @returns true if virtual-interrupts are masked, @c false otherwise. 509 521 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 510 * @param pCtx Pointer to the context. 511 */ 512 VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx) 513 { 514 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx); 522 */ 523 VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu) 524 { 525 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 515 526 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 516 527 return pVmcbNstGstCache->fVIntrMasking; … … 523 534 * @returns true if nested-paging is enabled, @c false otherwise. 524 535 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 525 * @param pCtx Pointer to the context. 526 */ 527 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx) 528 { 529 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx); 536 */ 537 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu) 538 { 539 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 530 540 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 531 541 return pVmcbNstGstCache->fNestedPaging; … … 538 548 * @returns The pause-filter count. 539 549 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 540 * @param pCtx Pointer to the context. 541 */ 542 VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx) 543 { 544 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx); 550 */ 551 VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu) 552 { 553 Assert(HMHasGuestSvmVmcbCached(pVCpu)); 545 554 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 546 555 return pVmcbNstGstCache->u16PauseFilterCount; -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r72642 r72643 15166 15166 VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) 15167 15167 { 15168 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_ MUST_MASK);15168 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); 15169 15169 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 15170 15170 if (pVCpu->iem.s.cActiveMappings) -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r72518 r72643 143 143 || uExitCode == SVM_EXIT_INVALID) 144 144 { 145 LogFlow(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,146 pVCpu->cpum.GstCtx. rip, uExitCode, uExitInfo1, uExitInfo2));145 LogFlow(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", 146 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode, uExitInfo1, uExitInfo2)); 147 147 148 148 /* … … 824 824 * below. */ 825 825 LogFlow(("iemSvmVmrun: Injecting event: %04x:%08RX64 vec=%#x type=%d uErr=%u cr2=%#RX64 cr3=%#RX64 efer=%#RX64\n", 826 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uVector, enmType, uErrorCode, pVCpu->cpum.GstCtx.cr2, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.msrEFER)); 826 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uVector, enmType, uErrorCode, pVCpu->cpum.GstCtx.cr2, 827 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.msrEFER)); 827 828 828 829 /* … … 840 841 else 841 842 LogFlow(("iemSvmVmrun: Entering nested-guest: %04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 efer=%#RX64 efl=%#x\n", 842 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER, pVCpu->cpum.GstCtx.rflags.u64)); 843 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, 844 pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER, pVCpu->cpum.GstCtx.rflags.u64)); 843 845 844 846 LogFlow(("iemSvmVmrun: returns %d\n", VBOXSTRICTRC_VAL(rcStrict))); … … 1264 1266 { 1265 1267 LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode)); 1268 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR 1269 | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS); 1270 1266 1271 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs); 1267 1272 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs); … … 1400 1405 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter) 1401 1406 { 1407 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT); 1408 1402 1409 /* TSC based pause-filter thresholding. */ 1403 1410 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r72208 r72643 96 96 DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM)); 97 97 DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVM pVM)); 98 DECLR0CALLBACKMEMBER(int, pfnSetupVM ,(PVM pVM));98 DECLR0CALLBACKMEMBER(int, pfnSetupVM, (PVM pVM)); 99 99 /** @} */ 100 100 … … 1644 1644 1645 1645 /** 1646 * Invalidates a guest page from the host TLB. 1647 * 1648 * @param pVCpu The cross context virtual CPU structure. 1649 * @param GCVirt Page to invalidate. 1650 */ 1651 VMMR0_INT_DECL(int) HMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt) 1652 { 1653 PVM pVM = pVCpu->CTX_SUFF(pVM); 1654 if (pVM->hm.s.vmx.fSupported) 1655 return VMXR0InvalidatePage(pVM, pVCpu, GCVirt); 1656 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt); 1657 } 1658 1659 1660 /** 1646 1661 * Returns the cpu structure for the current cpu. 1647 1662 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!). … … 1679 1694 return; 1680 1695 } 1696 1697 1698 /** 1699 * Interface for importing state on demand (used by IEM). 1700 * 1701 * @returns VBox status code. 1702 * @param pVCpu The cross context CPU structure. 1703 * @param pCtx The target CPU context. 1704 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 1705 */ 1706 VMMR0_INT_DECL(int) HMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 1707 { 1708 /** @todo Intel. */ 1709 #if 0 1710 if (pVCpu->CTX_SUFF(pVM).hm.s.vmx.fSupported) 1711 return VMXR0ImportStateOnDemand(pVCpu, pCtx, fWhat); 1712 #endif 1713 return SVMR0ImportStateOnDemand(pVCpu, pCtx, fWhat); 1714 } 1715 1681 1716 1682 1717 #ifdef VBOX_WITH_RAW_MODE -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72639 r72643 80 80 #define HMSVM_EXIT_DECL static int 81 81 82 /** 83 * Subset of the guest-CPU state that is kept by SVM R0 code while executing the 84 * guest using hardware-assisted SVM. 85 * 86 * This excludes state like TSC AUX, GPRs (other than RSP, RAX) which are always 87 * are swapped and restored across the world-switch and also registers like 88 * EFER, PAT MSR etc. which cannot be modified by the guest without causing a 89 * \#VMEXIT. 90 */ 91 #define HMSVM_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \ 92 | CPUMCTX_EXTRN_RFLAGS \ 93 | CPUMCTX_EXTRN_RAX \ 94 | CPUMCTX_EXTRN_RSP \ 95 | CPUMCTX_EXTRN_SREG_MASK \ 96 | CPUMCTX_EXTRN_CR0 \ 97 | CPUMCTX_EXTRN_CR2 \ 98 | CPUMCTX_EXTRN_CR3 \ 99 | CPUMCTX_EXTRN_TABLE_MASK \ 100 | CPUMCTX_EXTRN_DR6 \ 101 | CPUMCTX_EXTRN_DR7 \ 102 | CPUMCTX_EXTRN_KERNEL_GS_BASE \ 103 | CPUMCTX_EXTRN_SYSCALL_MSRS \ 104 | CPUMCTX_EXTRN_SYSENTER_MSRS \ 105 | CPUMCTX_EXTRN_HWVIRT \ 106 | CPUMCTX_EXTRN_HM_SVM_MASK) 107 108 /** Macro for importing guest state from the VMCB back into CPUMCTX. */ 109 #define HMSVM_CPUMCTX_IMPORT_STATE(a_pVCpu, a_pCtx, a_fWhat) \ 110 do { \ 111 hmR0SvmImportGuestState((a_pVCpu), (a_pCtx), (a_fWhat)); \ 112 } while (0) 113 114 /** Assert that the required state bits are fetched. */ 115 #define HMSVM_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \ 116 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \ 117 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz))) 118 82 119 /** Macro for checking and returning from the using function for 83 120 * \#VMEXIT intercepts that maybe caused during delivering of another … … 92 129 else if ( rc == VINF_EM_RESET \ 93 130 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \ 131 { \ 132 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); \ 94 133 return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_SHUTDOWN, 0, 0)); \ 134 } \ 95 135 else \ 96 136 return rc; \ … … 108 148 #endif 109 149 110 /** 111 * Updates interrupt shadow for the current RIP. 112 */ 150 /** Macro which updates interrupt shadow for the current RIP. */ 113 151 #define HMSVM_UPDATE_INTR_SHADOW(pVCpu, pCtx) \ 114 152 do { \ … … 154 192 /** Validate segment descriptor granularity bit. */ 155 193 #ifdef VBOX_STRICT 156 # define HMSVM_ASSERT_SEG_GRANULARITY( reg) \157 AssertMsg( ! pMixedCtx->reg.Attr.n.u1Present \158 || ( pMixedCtx->reg.Attr.n.u1Granularity \159 ? ( pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \160 : pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \161 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", pMixedCtx->reg.u32Limit, \162 pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base))194 # define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) \ 195 AssertMsg( !(a_pCtx)->reg.Attr.n.u1Present \ 196 || ( (a_pCtx)->reg.Attr.n.u1Granularity \ 197 ? ((a_pCtx)->reg.u32Limit & 0xfff) == 0xfff \ 198 : (a_pCtx)->reg.u32Limit <= UINT32_C(0xfffff)), \ 199 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", (a_pCtx)->reg.u32Limit, \ 200 (a_pCtx)->reg.Attr.u, (a_pCtx)->reg.u64Base)) 163 201 #else 164 # define HMSVM_ASSERT_SEG_GRANULARITY( reg)do { } while (0)202 # define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) do { } while (0) 165 203 #endif 166 204 … … 320 358 * @returns VBox status code. 321 359 * @param pVCpu The cross context virtual CPU structure. 322 * @param p MixedCtxPointer to the guest-CPU context.360 * @param pCtx Pointer to the guest-CPU context. 323 361 * @param pSvmTransient Pointer to the SVM-transient structure. 324 362 */ … … 329 367 * Internal Functions * 330 368 *********************************************************************************************************************************/ 331 static void hmR0SvmSetMsrPermission(PC PUMCTX pCtx, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,369 static void hmR0SvmSetMsrPermission(PCCPUMCTX pCtx, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead, 332 370 SVMMSREXITWRITE enmWrite); 333 371 static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu); 334 static void hmR0SvmLeave(PVMCPU pVCpu); 372 static void hmR0SvmLeave(PVMCPU pVCpu, bool fImportState); 373 static void hmR0SvmImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat); 374 335 375 336 376 /** @name \#VMEXIT handlers. … … 385 425 /** @} */ 386 426 387 static int hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX p MixedCtx, PSVMTRANSIENT pSvmTransient);427 static int hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 388 428 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 389 429 static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); … … 402 442 403 443 #ifdef VBOX_STRICT 404 # define HMSVM_LOG_CS RT_BIT_32(0) 405 # define HMSVM_LOG_SS RT_BIT_32(1) 406 # define HMSVM_LOG_FS RT_BIT_32(2) 407 # define HMSVM_LOG_GS RT_BIT_32(3) 408 # define HMSVM_LOG_LBR RT_BIT_32(4) 409 # define HMSVM_LOG_ALL ( HMSVM_LOG_CS \ 444 # define HMSVM_LOG_RBP_RSP RT_BIT_32(0) 445 # define HMSVM_LOG_CR_REGS RT_BIT_32(1) 446 # define HMSVM_LOG_CS RT_BIT_32(2) 447 # define HMSVM_LOG_SS RT_BIT_32(3) 448 # define HMSVM_LOG_FS RT_BIT_32(4) 449 # define HMSVM_LOG_GS RT_BIT_32(5) 450 # define HMSVM_LOG_LBR RT_BIT_32(6) 451 # define HMSVM_LOG_ALL ( HMSVM_LOG_RBP_RSP \ 452 | HMSVM_LOG_CR_REGS \ 453 | HMSVM_LOG_CS \ 410 454 | HMSVM_LOG_SS \ 411 455 | HMSVM_LOG_FS \ … … 421 465 * @param pszPrefix Log prefix. 422 466 * @param fFlags Log flags, see HMSVM_LOG_XXX. 423 * @param uVerbose The verbosity level, currently unused.424 */ 425 static void hmR0SvmLogState(PVMCPU pVCpu, PCSVMVMCB pVmcb, PC PUMCTX pCtx, const char *pszPrefix, uint32_t fFlags,467 * @param uVerboses The verbosity level, currently unused. 468 */ 469 static void hmR0SvmLogState(PVMCPU pVCpu, PCSVMVMCB pVmcb, PCCPUMCTX pCtx, const char *pszPrefix, uint32_t fFlags, 426 470 uint8_t uVerbose) 427 471 { 428 472 RT_NOREF2(pVCpu, uVerbose); 429 473 430 Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", pszPrefix, pCtx->cs.Sel, pCtx->rip, 431 pCtx->rflags.u, pCtx->cr0, pCtx->cr3, pCtx->cr4)); 432 Log4(("%s: rsp=%#RX64 rbp=%#RX64 rdi=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp, pCtx->rdi)); 474 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS); 475 Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX64\n", pszPrefix, pCtx->cs.Sel, pCtx->rip, pCtx->rflags.u)); 476 477 if (fFlags & HMSVM_LOG_RBP_RSP) 478 { 479 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RBP); 480 Log4(("%s: rsp=%#RX64 rbp=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp)); 481 } 482 483 if (fFlags & HMSVM_LOG_CR_REGS) 484 { 485 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4); 486 Log4(("%s: cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", pszPrefix, pCtx->cr0, pCtx->cr3, pCtx->cr4)); 487 } 488 433 489 if (fFlags & HMSVM_LOG_CS) 434 490 { 491 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS); 435 492 Log4(("%s: cs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base, 436 493 pCtx->cs.u32Limit, pCtx->cs.Attr.u)); … … 438 495 if (fFlags & HMSVM_LOG_SS) 439 496 { 497 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS); 440 498 Log4(("%s: ss={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base, 441 499 pCtx->ss.u32Limit, pCtx->ss.Attr.u)); … … 443 501 if (fFlags & HMSVM_LOG_FS) 444 502 { 503 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS); 445 504 Log4(("%s: fs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base, 446 505 pCtx->fs.u32Limit, pCtx->fs.Attr.u)); … … 448 507 if (fFlags & HMSVM_LOG_GS) 449 508 { 509 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS); 450 510 Log4(("%s: gs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base, 451 511 pCtx->gs.u32Limit, pCtx->gs.Attr.u)); … … 753 813 * @param pCtx Pointer to the guest-CPU context. 754 814 */ 755 DECLINLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPU pVCpu, PC PUMCTX pCtx)815 DECLINLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPU pVCpu, PCCPUMCTX pCtx) 756 816 { 757 817 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 827 887 * caller needs to take care of this. 828 888 */ 829 static void hmR0SvmSetMsrPermission(PC PUMCTX pCtx, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead,889 static void hmR0SvmSetMsrPermission(PCCPUMCTX pCtx, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead, 830 890 SVMMSREXITWRITE enmWrite) 831 891 { … … 1093 1153 * @returns Pointer to the nested-guest VMCB cache. 1094 1154 * @param pVCpu The cross context virtual CPU structure. 1095 * @param pCtx Pointer to the guest-CPU context. 1096 */ 1097 DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPU pVCpu, PCPUMCTX pCtx) 1155 */ 1156 DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPU pVCpu) 1098 1157 { 1099 1158 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1100 Assert(p Ctx->hwvirt.svm.fHMCachedVmcb); RT_NOREF(pCtx);1159 Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid); 1101 1160 return &pVCpu->hm.s.svm.NstGstVmcbCache; 1102 1161 #else 1103 RT_NOREF 2(pVCpu, pCtx);1162 RT_NOREF(pVCpu); 1104 1163 return NULL; 1105 1164 #endif … … 1154 1213 * @param pHostCpu Pointer to the HM host-CPU info. 1155 1214 */ 1156 static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PC PUMCTX pCtx, PSVMVMCB pVmcb, PHMGLOBALCPUINFO pHostCpu)1215 static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb, PHMGLOBALCPUINFO pHostCpu) 1157 1216 { 1158 1217 #ifndef VBOX_WITH_NESTED_HWVIRT_SVM … … 1394 1453 * are not intercepting it. 1395 1454 */ 1396 DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPU pVCpu, PC PUMCTX pCtx, PSVMVMCB pVmcb, uint8_t uXcpt)1455 DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb, uint8_t uXcpt) 1397 1456 { 1398 1457 Assert(uXcpt != X86_XCPT_DB); … … 1406 1465 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1407 1466 { 1408 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu , pCtx);1467 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu); 1409 1468 fRemove = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(uXcpt)); 1410 1469 } … … 1453 1512 * are not intercepting it. 1454 1513 */ 1455 DECLINLINE(bool) hmR0SvmClearCtrlIntercept(PVMCPU pVCpu, PC PUMCTX pCtx, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)1514 DECLINLINE(bool) hmR0SvmClearCtrlIntercept(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb, uint64_t fCtrlIntercept) 1456 1515 { 1457 1516 if (pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept) … … 1462 1521 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1463 1522 { 1464 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu , pCtx);1523 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu); 1465 1524 fRemove = !(pVmcbNstGstCache->u64InterceptCtrl & fCtrlIntercept); 1466 1525 } … … 1493 1552 * @remarks No-long-jump zone!!! 1494 1553 */ 1495 static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PC PUMCTX pCtx)1554 static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 1496 1555 { 1497 1556 /* The guest FPU is now always pre-loaded before executing guest code, see @bugref{7243#c101}. */ … … 1542 1601 { 1543 1602 /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */ 1544 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu , pCtx);1603 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu); 1545 1604 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(0)) 1546 1605 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(0)); … … 1575 1634 * @remarks No-long-jump zone!!! 1576 1635 */ 1577 static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PC PUMCTX pCtx)1636 static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 1578 1637 { 1579 1638 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 1671 1730 { 1672 1731 /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */ 1673 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu , pCtx);1732 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu); 1674 1733 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(4)) 1675 1734 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(4)); … … 1704 1763 * @remarks No-long-jump zone!!! 1705 1764 */ 1706 static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PC PUMCTX pCtx)1765 static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 1707 1766 { 1708 1767 /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */ … … 1764 1823 * @remarks No-long-jump zone!!! 1765 1824 */ 1766 static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PC PUMCTX pCtx)1825 static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 1767 1826 { 1768 1827 /* Guest Sysenter MSRs. */ 1769 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs; 1770 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip; 1771 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp; 1828 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR)) 1829 { 1830 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs; 1831 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 1832 } 1833 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR)) 1834 { 1835 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip; 1836 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 1837 } 1838 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR)) 1839 { 1840 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp; 1841 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 1842 } 1772 1843 1773 1844 /* … … 1787 1858 { 1788 1859 /* Load these always as the guest may modify FS/GS base using MSRs in 64-bit mode which we don't intercept. */ 1789 pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;1790 pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;1791 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;1860 //pVmcb->guest.FS.u64Base = pCtx->fs.u64Base; 1861 //pVmcb->guest.GS.u64Base = pCtx->gs.u64Base; 1862 //pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG; 1792 1863 } 1793 1864 else … … 1801 1872 } 1802 1873 1803 /** @todo The following are used in 64-bit only (SYSCALL/SYSRET) but they might1804 * be writable in 32-bit mode. Clarify with AMD spec.*/1874 /** @todo HM_CHANGED_GUEST_SYSCALL_MSRS, 1875 * HM_CHANGED_GUEST_KERNEL_GS_BASE */ 1805 1876 pVmcb->guest.u64STAR = pCtx->msrSTAR; 1806 1877 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR; … … 1837 1908 * @remarks Requires EFLAGS to be up-to-date in the VMCB! 1838 1909 */ 1839 static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PC PUMCTX pCtx)1910 static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 1840 1911 { 1841 1912 bool fInterceptMovDRx = false; … … 2005 2076 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. 2006 2077 */ 2007 static void hmR0SvmLoadGuestHwvirtStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PC PUMCTX pCtx)2078 static void hmR0SvmLoadGuestHwvirtStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCCPUMCTX pCtx) 2008 2079 { 2009 2080 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_HWVIRT)) … … 2021 2092 uint16_t const uGuestPauseFilterCount = pVM->hm.s.svm.cPauseFilter; 2022 2093 uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks; 2023 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_PAUSE))2094 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE)) 2024 2095 { 2025 2096 pVmcbNstGstCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount); … … 2046 2117 * @param pCtx Pointer to the guest-CPU context. 2047 2118 */ 2048 static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PC PUMCTX pCtx)2119 static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 2049 2120 { 2050 2121 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE)) … … 2066 2137 pVCpu->hm.s.svm.fSyncVTpr = false; 2067 2138 2068 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */ 2069 if (pVM->hm.s.fTPRPatchingActive) 2070 { 2071 pCtx->msrLSTAR = u8Tpr; 2139 if (!pVM->hm.s.fTPRPatchingActive) 2140 { 2141 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */ 2142 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4); 2143 2144 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */ 2145 if (fPendingIntr) 2146 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8); 2147 else 2148 { 2149 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 2150 pVCpu->hm.s.svm.fSyncVTpr = true; 2151 } 2152 2153 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL); 2154 } 2155 else 2156 { 2157 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */ 2158 pVmcb->guest.u64LSTAR = u8Tpr; 2072 2159 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap; 2073 2160 … … 2082 2169 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 2083 2170 } 2084 else2085 {2086 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */2087 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);2088 2089 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */2090 if (fPendingIntr)2091 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);2092 else2093 {2094 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);2095 pVCpu->hm.s.svm.fSyncVTpr = true;2096 }2097 2098 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL);2099 }2100 2171 } 2101 2172 … … 2113 2184 * @param pCtx Pointer to the guest-CPU context. 2114 2185 */ 2115 static void hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PC PUMCTX pCtx)2186 static void hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 2116 2187 { 2117 2188 /* If we modify intercepts from here, please check & adjust hmR0SvmLoadGuestXcptInterceptsNested() … … 2150 2221 * @param pCtx Pointer to the nested-guest-CPU context. 2151 2222 */ 2152 static void hmR0SvmMergeVmcbCtrlsNested(PVMCPU pVCpu, PC PUMCTX pCtx)2223 static void hmR0SvmMergeVmcbCtrlsNested(PVMCPU pVCpu, PCCPUMCTX pCtx) 2153 2224 { 2154 2225 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2300 2371 if (!pVCpu->hm.s.fLeaveDone) 2301 2372 { 2302 hmR0SvmLeave(pVCpu );2373 hmR0SvmLeave(pVCpu, false /* fImportState */); 2303 2374 pVCpu->hm.s.fLeaveDone = true; 2304 2375 } … … 2377 2448 * @remarks No-long-jump zone!!! 2378 2449 */ 2379 static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PC PUMCTX pCtx)2450 static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx) 2380 2451 { 2381 2452 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); … … 2405 2476 #endif 2406 2477 2478 /* hmR0SvmLoadGuestApicState() must be called -after- hmR0SvmLoadGuestMsrs() as we 2479 may overwrite LSTAR MSR in the VMCB in the case of TPR patching. */ 2407 2480 rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx); 2408 2481 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); … … 2451 2524 * @param pCtx Pointer to the nested-guest-CPU context. 2452 2525 */ 2453 static void hmR0SvmMergeMsrpmNested(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu, PC PUMCTX pCtx)2526 static void hmR0SvmMergeMsrpmNested(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu, PCCPUMCTX pCtx) 2454 2527 { 2455 2528 uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hm.s.svm.pvMsrBitmap; … … 2473 2546 * @sa HMSvmNstGstVmExitNotify. 2474 2547 */ 2475 static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu, PC PUMCTX pCtx)2548 static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu, PCCPUMCTX pCtx) 2476 2549 { 2477 2550 /* … … 2482 2555 * cache and restore it, see AMD spec. 15.25.4 "Nested Paging and VMRUN/#VMEXIT". 2483 2556 */ 2484 bool const fWasCached = pCtx->hwvirt.svm.fHMCachedVmcb; 2557 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 2558 bool const fWasCached = pVmcbNstGstCache->fCacheValid; 2485 2559 if (!fWasCached) 2486 2560 { 2487 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2488 PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2489 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 2490 2561 PCSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2562 PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2491 2563 pVmcbNstGstCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx; 2492 2564 pVmcbNstGstCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx; … … 2501 2573 pVmcbNstGstCache->fNestedPaging = pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging; 2502 2574 pVmcbNstGstCache->fLbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt; 2503 p Ctx->hwvirt.svm.fHMCachedVmcb= true;2575 pVmcbNstGstCache->fCacheValid = true; 2504 2576 Log4(("hmR0SvmCacheVmcbNested: Cached VMCB fields\n")); 2505 2577 } … … 2519 2591 * @param pCtx Pointer to the nested-guest-CPU context. 2520 2592 */ 2521 static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu, PC PUMCTX pCtx)2593 static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu, PCCPUMCTX pCtx) 2522 2594 { 2523 2595 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); … … 2591 2663 * @remarks No-long-jump zone!!! 2592 2664 */ 2593 static int hmR0SvmLoadGuestStateNested(PVMCPU pVCpu, PC PUMCTX pCtx)2665 static int hmR0SvmLoadGuestStateNested(PVMCPU pVCpu, PCCPUMCTX pCtx) 2594 2666 { 2595 2667 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x); … … 2664 2736 * @remarks No-long-jump zone!!! 2665 2737 */ 2666 static void hmR0SvmLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PC PUMCTX pCtx)2738 static void hmR0SvmLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 2667 2739 { 2668 2740 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 2699 2771 2700 2772 /** 2773 * Worker for SVMR0ImportStateOnDemand. 2774 * 2775 * @param pVCpu The cross context virtual CPU structure. 2776 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. 2777 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 2778 */ 2779 static void hmR0SvmImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 2780 { 2781 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 2782 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest; 2783 PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl; 2784 2785 Log4(("hmR0SvmImportGuestState: fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat)); 2786 if (pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL) 2787 { 2788 fWhat &= pCtx->fExtrn; 2789 2790 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2791 if (fWhat & CPUMCTX_EXTRN_HWVIRT) 2792 { 2793 if ( !CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 2794 && pVmcbCtrl->IntCtrl.n.u1VGifEnable) 2795 { 2796 /* We don't yet support passing VGIF feature to the guest. */ 2797 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fVGif); 2798 pCtx->hwvirt.fGif = pVmcbCtrl->IntCtrl.n.u1VGif; 2799 } 2800 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HWVIRT); 2801 } 2802 2803 if (fWhat & CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ) 2804 { 2805 if ( !pVmcbCtrl->IntCtrl.n.u1VIrqPending 2806 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 2807 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 2808 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ); 2809 } 2810 #else 2811 ASMAtomicUoAndU64(&pCtx->fExtrn, ~(CPUMCTX_EXTRN_HWVIRT | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)); 2812 #endif 2813 2814 if (fWhat & CPUMCTX_EXTRN_HM_SVM_INT_SHADOW) 2815 { 2816 if (pVmcbCtrl->IntShadow.n.u1IntShadow) 2817 EMSetInhibitInterruptsPC(pVCpu, pVmcbGuest->u64RIP); 2818 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2819 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2820 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_SVM_INT_SHADOW); 2821 } 2822 2823 if (fWhat & CPUMCTX_EXTRN_RIP) 2824 { 2825 pCtx->rip = pVmcbGuest->u64RIP; 2826 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RIP); 2827 } 2828 2829 if (fWhat & CPUMCTX_EXTRN_RFLAGS) 2830 { 2831 pCtx->eflags.u32 = pVmcbGuest->u64RFlags; 2832 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RFLAGS); 2833 } 2834 2835 if (fWhat & CPUMCTX_EXTRN_RSP) 2836 { 2837 pCtx->rsp = pVmcbGuest->u64RSP; 2838 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RSP); 2839 } 2840 2841 if (fWhat & CPUMCTX_EXTRN_RAX) 2842 { 2843 pCtx->rax = pVmcbGuest->u64RAX; 2844 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RAX); 2845 } 2846 2847 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 2848 { 2849 if (fWhat & CPUMCTX_EXTRN_CS) 2850 { 2851 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, CS, cs); 2852 /* 2853 * Correct the hidden CS granularity bit. Haven't seen it being wrong in any other 2854 * register (yet). 2855 */ 2856 /** @todo SELM might need to be fixed as it too should not care about the 2857 * granularity bit. See @bugref{6785}. */ 2858 if ( !pCtx->cs.Attr.n.u1Granularity 2859 && pCtx->cs.Attr.n.u1Present 2860 && pCtx->cs.u32Limit > UINT32_C(0xfffff)) 2861 { 2862 Assert((pCtx->cs.u32Limit & 0xfff) == 0xfff); 2863 pCtx->cs.Attr.n.u1Granularity = 1; 2864 } 2865 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, cs); 2866 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CS); 2867 } 2868 if (fWhat & CPUMCTX_EXTRN_SS) 2869 { 2870 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, SS, ss); 2871 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ss); 2872 /* 2873 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the VMCB and uses that 2874 * and thus it's possible that when the CPL changes during guest execution that the SS DPL 2875 * isn't updated by AMD-V. Observed on some AMD Fusion CPUs with 64-bit guests. 2876 * See AMD spec. 15.5.1 "Basic operation". 2877 */ 2878 Assert(!(pVmcbGuest->u8CPL & ~0x3)); 2879 uint8_t const uCpl = pVmcbGuest->u8CPL; 2880 if (pCtx->ss.Attr.n.u2Dpl != uCpl) 2881 pCtx->ss.Attr.n.u2Dpl = uCpl & 0x3; 2882 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SS); 2883 } 2884 if (fWhat & CPUMCTX_EXTRN_DS) 2885 { 2886 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, DS, ds); 2887 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ds); 2888 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DS); 2889 } 2890 if (fWhat & CPUMCTX_EXTRN_ES) 2891 { 2892 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, ES, es); 2893 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, es); 2894 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_ES); 2895 } 2896 if (fWhat & CPUMCTX_EXTRN_FS) 2897 { 2898 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, FS, fs); 2899 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, fs); 2900 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_FS); 2901 } 2902 if (fWhat & CPUMCTX_EXTRN_GS) 2903 { 2904 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, GS, gs); 2905 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, gs); 2906 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GS); 2907 } 2908 } 2909 2910 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK) 2911 { 2912 if (fWhat & CPUMCTX_EXTRN_TR) 2913 { 2914 /* 2915 * Fixup TR attributes so it's compatible with Intel. Important when saved-states 2916 * are used between Intel and AMD, see @bugref{6208#c39}. 2917 * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode. 2918 */ 2919 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, TR, tr); 2920 if (pCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) 2921 { 2922 if ( pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL 2923 || CPUMIsGuestInLongModeEx(pCtx)) 2924 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY; 2925 else if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL) 2926 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY; 2927 } 2928 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_TR); 2929 } 2930 2931 if (fWhat & CPUMCTX_EXTRN_LDTR) 2932 { 2933 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, LDTR, ldtr); 2934 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_LDTR); 2935 } 2936 2937 if (fWhat & CPUMCTX_EXTRN_GDTR) 2938 { 2939 pCtx->gdtr.cbGdt = pVmcbGuest->GDTR.u32Limit; 2940 pCtx->gdtr.pGdt = pVmcbGuest->GDTR.u64Base; 2941 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GDTR); 2942 } 2943 2944 if (fWhat & CPUMCTX_EXTRN_IDTR) 2945 { 2946 pCtx->idtr.cbIdt = pVmcbGuest->IDTR.u32Limit; 2947 pCtx->idtr.pIdt = pVmcbGuest->IDTR.u64Base; 2948 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_IDTR); 2949 } 2950 } 2951 2952 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 2953 { 2954 pCtx->msrSTAR = pVmcbGuest->u64STAR; 2955 pCtx->msrLSTAR = pVmcbGuest->u64LSTAR; 2956 pCtx->msrCSTAR = pVmcbGuest->u64CSTAR; 2957 pCtx->msrSFMASK = pVmcbGuest->u64SFMASK; 2958 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSCALL_MSRS); 2959 } 2960 2961 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 2962 { 2963 pCtx->SysEnter.cs = pVmcbGuest->u64SysEnterCS; 2964 pCtx->SysEnter.eip = pVmcbGuest->u64SysEnterEIP; 2965 pCtx->SysEnter.esp = pVmcbGuest->u64SysEnterESP; 2966 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSENTER_MSRS); 2967 } 2968 2969 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 2970 { 2971 pCtx->msrKERNELGSBASE = pVmcbGuest->u64KernelGSBase; 2972 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KERNEL_GS_BASE); 2973 } 2974 2975 if (fWhat & CPUMCTX_EXTRN_DR_MASK) 2976 { 2977 if (fWhat & CPUMCTX_EXTRN_DR6) 2978 { 2979 if (!pVCpu->hm.s.fUsingHyperDR7) 2980 pCtx->dr[6] = pVmcbGuest->u64DR6; 2981 else 2982 CPUMSetHyperDR6(pVCpu, pVmcbGuest->u64DR6); 2983 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR6); 2984 } 2985 2986 if (fWhat & CPUMCTX_EXTRN_DR7) 2987 { 2988 if (!pVCpu->hm.s.fUsingHyperDR7) 2989 pCtx->dr[7] = pVmcbGuest->u64DR7; 2990 else 2991 Assert(pVmcbGuest->u64DR7 == CPUMGetHyperDR7(pVCpu)); 2992 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR7); 2993 } 2994 } 2995 2996 if (fWhat & CPUMCTX_EXTRN_CR_MASK) 2997 { 2998 if (fWhat & CPUMCTX_EXTRN_CR0) 2999 { 3000 /* We intercept changes to all CR0 bits except maybe TS & MP bits. */ 3001 uint64_t const uCr0 = (pCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP)) 3002 | (pVmcbGuest->u64CR0 & (X86_CR0_TS | X86_CR0_MP)); 3003 CPUMSetGuestCR0(pVCpu, uCr0); 3004 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR0); 3005 } 3006 3007 if (fWhat & CPUMCTX_EXTRN_CR2) 3008 { 3009 pCtx->cr2 = pVmcbGuest->u64CR2; 3010 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR2); 3011 } 3012 3013 if (fWhat & CPUMCTX_EXTRN_CR3) 3014 { 3015 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging 3016 && pCtx->cr3 != pVmcbGuest->u64CR3) 3017 { 3018 CPUMSetGuestCR3(pVCpu, pVmcbGuest->u64CR3); 3019 if (VMMRZCallRing3IsEnabled(pVCpu)) 3020 { 3021 Log4(("hmR0SvmImportGuestState: Calling PGMUpdateCR3\n")); 3022 PGMUpdateCR3(pVCpu, pVmcbGuest->u64CR3); 3023 } 3024 else 3025 { 3026 Log4(("hmR0SvmImportGuestState: Setting VMCPU_FF_HM_UPDATE_CR3\n")); 3027 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3); 3028 } 3029 } 3030 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR3); 3031 } 3032 3033 /* Changes to CR4 are always intercepted. */ 3034 } 3035 3036 /* If everything has been imported, clear the HM keeper bit. */ 3037 if (!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)) 3038 { 3039 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KEEPER_HM); 3040 Assert(!pCtx->fExtrn); 3041 } 3042 } 3043 else 3044 Assert(!pCtx->fExtrn); 3045 3046 /* 3047 * Honor any pending CR3 updates. 3048 * 3049 * Consider this scenario: #VMEXIT -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp 3050 * -> hmR0SvmCallRing3Callback() -> VMMRZCallRing3Disable() -> hmR0SvmImportGuestState() 3051 * -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp -> continue with #VMEXIT 3052 * handling -> hmR0SvmImportGuestState() and here we are. 3053 * 3054 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be 3055 * up-to-date and thus any CR3-saves -before- the VM-exit (longjmp) would've postponed the CR3 3056 * update via the force-flag and cleared CR3 from fExtrn. Any SVM R0 VM-exit handler that requests 3057 * CR3 to be saved will end up here and we call PGMUpdateCR3(). 3058 * 3059 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again, 3060 * and does not process force-flag like regular exits to ring-3 either, we cover for it here. 3061 */ 3062 if ( VMMRZCallRing3IsEnabled(pVCpu) 3063 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 3064 { 3065 Assert(pCtx->cr3 == pVmcbGuest->u64CR3); 3066 PGMUpdateCR3(pVCpu, pCtx->cr3); 3067 } 3068 } 3069 3070 3071 /** 2701 3072 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU 2702 3073 * context. … … 2706 3077 * 2707 3078 * @returns VBox status code. 2708 * @param pVCpu The cross context virtual CPU structure. 2709 * @param pMixedCtx Pointer to the guest-CPU or nested-guest-CPU 2710 * context. The data may be out-of-sync. Make sure to 2711 * update the required fields before using them. 2712 * @param pVmcb Pointer to the VM control block. 2713 */ 2714 static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PCSVMVMCB pVmcb) 3079 * @param pVCpu The cross context virtual CPU structure. 3080 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. The 3081 * data may be out-of-sync. Make sure to update the required 3082 * fields before using them. 3083 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 3084 */ 3085 VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 3086 { 3087 hmR0SvmImportGuestState(pVCpu, pCtx, fWhat); 3088 return VINF_SUCCESS; 3089 } 3090 3091 3092 /** 3093 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU 3094 * context. 3095 * 3096 * Currently there is no residual state left in the CPU that is not updated in the 3097 * VMCB. 3098 * 3099 * @returns VBox status code. 3100 * @param pVCpu The cross context virtual CPU structure. 3101 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. The 3102 * data may be out-of-sync. Make sure to update the required 3103 * fields before using them. 3104 * @param pVmcb Pointer to the VM control block. 3105 */ 3106 static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb) 2715 3107 { 2716 3108 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 2717 3109 2718 pMixedCtx->rip = pVmcb->guest.u64RIP;2719 pMixedCtx->rsp = pVmcb->guest.u64RSP;2720 pMixedCtx->eflags.u32 = pVmcb->guest.u64RFlags;2721 pMixedCtx->rax = pVmcb->guest.u64RAX;2722 2723 PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;2724 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM2725 if (!CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx))2726 {2727 if (pVmcbCtrl->IntCtrl.n.u1VGifEnable)2728 {2729 /*2730 * Guest Virtual GIF (Global Interrupt Flag).2731 * We don't yet support passing VGIF feature to the guest.2732 */2733 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fVGif);2734 pMixedCtx->hwvirt.fGif = pVmcbCtrl->IntCtrl.n.u1VGif;2735 }2736 }2737 else2738 {2739 /*2740 * Nested-guest interrupt pending.2741 * Sync nested-guest's V_IRQ and its force-flag.2742 */2743 if ( !pVmcbCtrl->IntCtrl.n.u1VIrqPending2744 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))2745 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);2746 }2747 #endif2748 2749 3110 /* 2750 * Guest interrupt shadow. 3111 * Always import the following: 3112 * 3113 * - RIP, RFLAGS, int. shadow, GIF: we need them when as we evaluate 3114 * injecting events before re-entering guest execution. 3115 * 3116 * - GPRS: Only RAX, RSP are in the VMCB. All the other GPRs are swapped 3117 * by the assembly switcher code. Import these two always just to simplify 3118 * assumptions on GPRs. 3119 * 3120 * - SREG: We load them all together so we have to save all of them. 3121 * 3122 * - KERNEL_GS_BASE, SYSCALL MSRS: We don't have a HM_CHANGED_GUEST flag 3123 * for it yet 2751 3124 */ 2752 if (pVmcbCtrl->IntShadow.n.u1IntShadow) 2753 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 2754 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2755 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2756 2757 /* 2758 * Guest control registers: CR0, CR2, CR3 (handled at the end). 2759 * Accesses to other control registers are always intercepted. 2760 */ 2761 pMixedCtx->cr2 = pVmcb->guest.u64CR2; 2762 2763 /* If we're not intercepting changes to CR0 TS & MP bits, sync those bits here. */ 2764 if (!(pVmcbCtrl->u16InterceptWrCRx & RT_BIT(0))) 2765 { 2766 pMixedCtx->cr0 = (pMixedCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP)) 2767 | (pVmcb->guest.u64CR0 & (X86_CR0_TS | X86_CR0_MP)); 2768 } 2769 2770 /* 2771 * Guest MSRs. 2772 */ 2773 pMixedCtx->msrSTAR = pVmcb->guest.u64STAR; /* legacy syscall eip, cs & ss */ 2774 pMixedCtx->msrLSTAR = pVmcb->guest.u64LSTAR; /* 64-bit mode syscall rip */ 2775 pMixedCtx->msrCSTAR = pVmcb->guest.u64CSTAR; /* compatibility mode syscall rip */ 2776 pMixedCtx->msrSFMASK = pVmcb->guest.u64SFMASK; /* syscall flag mask */ 2777 pMixedCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; /* swapgs exchange value */ 2778 pMixedCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS; 2779 pMixedCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP; 2780 pMixedCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP; 2781 2782 /* 2783 * Guest segment registers (includes FS, GS base MSRs for 64-bit guests). 2784 */ 2785 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, CS, cs); 2786 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, SS, ss); 2787 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, DS, ds); 2788 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, ES, es); 2789 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, FS, fs); 2790 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, GS, gs); 2791 2792 /* 2793 * Correct the hidden CS granularity bit. Haven't seen it being wrong in any other 2794 * register (yet). 2795 */ 2796 /** @todo SELM might need to be fixed as it too should not care about the 2797 * granularity bit. See @bugref{6785}. */ 2798 if ( !pMixedCtx->cs.Attr.n.u1Granularity 2799 && pMixedCtx->cs.Attr.n.u1Present 2800 && pMixedCtx->cs.u32Limit > UINT32_C(0xfffff)) 2801 { 2802 Assert((pMixedCtx->cs.u32Limit & 0xfff) == 0xfff); 2803 pMixedCtx->cs.Attr.n.u1Granularity = 1; 2804 } 2805 2806 HMSVM_ASSERT_SEG_GRANULARITY(cs); 2807 HMSVM_ASSERT_SEG_GRANULARITY(ss); 2808 HMSVM_ASSERT_SEG_GRANULARITY(ds); 2809 HMSVM_ASSERT_SEG_GRANULARITY(es); 2810 HMSVM_ASSERT_SEG_GRANULARITY(fs); 2811 HMSVM_ASSERT_SEG_GRANULARITY(gs); 2812 2813 /* 2814 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the VMCB and uses that 2815 * and thus it's possible that when the CPL changes during guest execution that the SS DPL 2816 * isn't updated by AMD-V. Observed on some AMD Fusion CPUs with 64-bit guests. 2817 * See AMD spec. 15.5.1 "Basic operation". 2818 */ 2819 Assert(!(pVmcb->guest.u8CPL & ~0x3)); 2820 uint8_t const uCpl = pVmcb->guest.u8CPL; 2821 if (pMixedCtx->ss.Attr.n.u2Dpl != uCpl) 2822 { 2823 Log4(("hmR0SvmSaveGuestState: CPL differs. SS.DPL=%u, CPL=%u, overwriting SS.DPL!\n", pMixedCtx->ss.Attr.n.u2Dpl, uCpl)); 2824 pMixedCtx->ss.Attr.n.u2Dpl = pVmcb->guest.u8CPL & 0x3; 2825 } 2826 2827 /* 2828 * Guest TR. 2829 * Fixup TR attributes so it's compatible with Intel. Important when saved-states are used 2830 * between Intel and AMD. See @bugref{6208#c39}. 2831 * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode. 2832 */ 2833 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, TR, tr); 2834 if (pMixedCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) 2835 { 2836 if ( pMixedCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL 2837 || CPUMIsGuestInLongModeEx(pMixedCtx)) 2838 pMixedCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY; 2839 else if (pMixedCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL) 2840 pMixedCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY; 2841 } 2842 2843 /* 2844 * Guest Descriptor-Table registers (GDTR, IDTR, LDTR). 2845 */ 2846 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, LDTR, ldtr); 2847 pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit; 2848 pMixedCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base; 2849 2850 pMixedCtx->idtr.cbIdt = pVmcb->guest.IDTR.u32Limit; 2851 pMixedCtx->idtr.pIdt = pVmcb->guest.IDTR.u64Base; 2852 2853 /* 2854 * Guest Debug registers. 2855 */ 2856 if (!pVCpu->hm.s.fUsingHyperDR7) 2857 { 2858 pMixedCtx->dr[6] = pVmcb->guest.u64DR6; 2859 pMixedCtx->dr[7] = pVmcb->guest.u64DR7; 2860 } 2861 else 2862 { 2863 Assert(pVmcb->guest.u64DR7 == CPUMGetHyperDR7(pVCpu)); 2864 CPUMSetHyperDR6(pVCpu, pVmcb->guest.u64DR6); 2865 } 2866 2867 /* 2868 * With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now. 2869 * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3. 2870 */ 2871 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging 2872 && pMixedCtx->cr3 != pVmcb->guest.u64CR3) 2873 { 2874 CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3); 2875 PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3); 2876 } 2877 2878 #ifdef VBOX_STRICT 2879 if (CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx)) 2880 hmR0SvmLogState(pVCpu, pVmcb, pMixedCtx, "hmR0SvmSaveGuestStateNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 0 /* uVerbose */); 3125 /** @todo Extend HM_CHANGED_GUEST_xxx so that we avoid saving segment 3126 * registers, kernel GS base and other MSRs each time. */ 3127 hmR0SvmImportGuestState(pVCpu, pCtx, CPUMCTX_EXTRN_RIP 3128 | CPUMCTX_EXTRN_SYSCALL_MSRS 3129 | CPUMCTX_EXTRN_KERNEL_GS_BASE 3130 | CPUMCTX_EXTRN_RFLAGS 3131 | CPUMCTX_EXTRN_RAX 3132 | CPUMCTX_EXTRN_SREG_MASK 3133 | CPUMCTX_EXTRN_RSP 3134 | CPUMCTX_EXTRN_HWVIRT 3135 | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW 3136 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ); 3137 3138 #ifdef DEBUG_ramshankar 3139 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 3140 { 3141 hmR0SvmImportGuestState(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 3142 hmR0SvmLogState(pVCpu, pVmcb, pCtx, "hmR0SvmSaveGuestStateNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 0 /* uVerbose */); 3143 } 3144 #else 3145 RT_NOREF(pVmcb); 2881 3146 #endif 2882 3147 } … … 2887 3152 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V. 2888 3153 * 2889 * @param pVCpu The cross context virtual CPU structure. 3154 * @param pVCpu The cross context virtual CPU structure. 3155 * @param fImportState Whether to import the guest state from the VMCB back 3156 * to the guest-CPU context. 2890 3157 * 2891 3158 * @remarks No-long-jmp zone!!! 2892 3159 */ 2893 static void hmR0SvmLeave(PVMCPU pVCpu )3160 static void hmR0SvmLeave(PVMCPU pVCpu, bool fImportState) 2894 3161 { 2895 3162 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 2902 3169 */ 2903 3170 3171 /* Save the guest state if necessary. */ 3172 if (fImportState) 3173 hmR0SvmImportGuestState(pVCpu, &pVCpu->cpum.GstCtx, HMSVM_CPUMCTX_EXTRN_ALL); 3174 2904 3175 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 2905 if (CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu)) 2906 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); /** @todo r=ramshankar: This shouldn't be necessary, it's set in HMR0EnterCpu. */ 3176 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu); 2907 3177 2908 3178 /* … … 2917 3187 } 2918 3188 #endif 2919 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */)) 2920 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);/** @todo r=ramshankar: This shouldn't be necessary, it's set in HMR0EnterCpu. */ 3189 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */); 2921 3190 2922 3191 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); … … 2936 3205 * Leaves the AMD-V session. 2937 3206 * 3207 * Only used while returning to ring-3 either due to longjump or exits to 3208 * ring-3. 3209 * 2938 3210 * @returns VBox status code. 2939 3211 * @param pVCpu The cross context virtual CPU structure. … … 2949 3221 if (!pVCpu->hm.s.fLeaveDone) 2950 3222 { 2951 hmR0SvmLeave(pVCpu );3223 hmR0SvmLeave(pVCpu, true /* fImportState */); 2952 3224 pVCpu->hm.s.fLeaveDone = true; 2953 3225 } … … 3129 3401 * @remarks No-long-jump zone!!! 3130 3402 */ 3131 static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PC PUMCTX pCtx, PSVMVMCB pVmcb)3403 static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb) 3132 3404 { 3133 3405 /* … … 3249 3521 3250 3522 /* Update CR2 of the guest. */ 3523 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR2); 3251 3524 if (pCtx->cr2 != uFaultAddress) 3252 3525 { … … 3300 3573 * @param pVCpu The cross context virtual CPU structure. 3301 3574 * @param pVmcb Pointer to the guest VM control block. 3302 * @param pCtx Pointer to the guest-CPU context.3303 3575 * @param pEvent Pointer to the event. 3304 3576 * … … 3306 3578 * @remarks Requires CR0! 3307 3579 */ 3308 DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent) 3309 { 3310 NOREF(pVCpu); NOREF(pCtx); 3311 3580 DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMEVENT pEvent) 3581 { 3312 3582 Assert(!pVmcb->ctrl.EventInject.n.u1Valid); 3313 3583 pVmcb->ctrl.EventInject.u = pEvent->u; … … 3442 3712 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag. 3443 3713 */ 3444 DECLINLINE(bool) hmR0SvmIsIntrShadowActive(PVMCPU pVCpu, PC PUMCTX pCtx)3714 DECLINLINE(bool) hmR0SvmIsIntrShadowActive(PVMCPU pVCpu, PCCPUMCTX pCtx) 3445 3715 { 3446 3716 /* … … 3550 3820 { 3551 3821 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 3822 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT 3823 | CPUMCTX_EXTRN_RFLAGS 3824 | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW 3825 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ); 3552 3826 3553 3827 Assert(!pVCpu->hm.s.Event.fPending); … … 3560 3834 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3561 3835 3562 Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool Intr. pending=%RTbool NMI pending=%RTbool\n",3836 Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntrPending=%RTbool fNmiPending=%RTbool\n", 3563 3837 fVirtualGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC), 3564 3838 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))); … … 3580 3854 { 3581 3855 Log4(("Intercepting NMI -> #VMEXIT\n")); 3856 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); 3582 3857 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0); 3583 3858 } … … 3620 3895 { 3621 3896 Log4(("Intercepting INTR -> #VMEXIT\n")); 3897 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); 3622 3898 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 3623 3899 } … … 3666 3942 { 3667 3943 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 3944 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT 3945 | CPUMCTX_EXTRN_RFLAGS 3946 | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW); 3947 3668 3948 Assert(!pVCpu->hm.s.Event.fPending); 3669 3949 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); … … 3764 4044 * prematurely. 3765 4045 */ 3766 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PC PUMCTX pCtx, PSVMVMCB pVmcb)4046 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb) 3767 4047 { 3768 4048 Assert(!TRPMHasTrap(pVCpu)); … … 3825 4105 */ 3826 4106 Log4(("Injecting pending HM event\n")); 3827 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx,&Event);4107 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, &Event); 3828 4108 pVCpu->hm.s.Event.fPending = false; 3829 4109 … … 3869 4149 HMSVM_ASSERT_PREEMPT_SAFE(); 3870 4150 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 4151 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 4152 3871 4153 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 3872 3873 4154 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE) 3874 4155 { … … 4027 4308 { 4028 4309 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 4029 4030 /* On AMD-V we don't need to update CR3, PAE PDPES lazily. See hmR0SvmSaveGuestState(). */ 4310 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 4031 4311 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 4032 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 4312 4313 /* Could happen as a result of longjump. */ 4314 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 4315 { 4316 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CR3)); 4317 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); 4318 } 4033 4319 4034 4320 /* Update pending interrupts into the APIC's IRR. */ … … 4157 4443 4158 4444 /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware-assisted SVM. */ 4159 Assert(p Ctx->hwvirt.svm.fHMCachedVmcb);4445 Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid); 4160 4446 4161 4447 /* … … 4260 4546 if (pVCpu->hm.s.svm.fSyncVTpr) 4261 4547 { 4548 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 4262 4549 if (pVM->hm.s.fTPRPatchingActive) 4263 pSvmTransient->u8GuestTpr = p Ctx->msrLSTAR;4550 pSvmTransient->u8GuestTpr = pVmcb->guest.u64LSTAR; 4264 4551 else 4265 {4266 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;4267 4552 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR; 4268 }4269 4553 } 4270 4554 … … 4323 4607 * @remarks No-long-jump zone!!! 4324 4608 */ 4325 static void hmR0SvmPreRunGuestCommitted(PVMCPU pVCpu, PC PUMCTX pCtx, PSVMTRANSIENT pSvmTransient)4609 static void hmR0SvmPreRunGuestCommitted(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4326 4610 { 4327 4611 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 4455 4739 DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 4456 4740 { 4741 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4742 pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 4743 4457 4744 /* 4458 4745 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations … … 4471 4758 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4472 4759 /** 4473 * Undoes the TSC offset applied for an SVM nested-guest and returns the TSC4474 * value for the guest.4475 *4476 * @returns The TSC offset after undoing any nested-guest TSC offset.4477 * @param pVCpu The cross context virtual CPU structure of the calling EMT.4478 * @param uTicks The nested-guest TSC.4479 *4480 * @note If you make any changes to this function, please check if4481 * hmR0SvmNstGstUndoTscOffset() needs adjusting.4482 *4483 * @sa HMSvmNstGstApplyTscOffset().4484 */4485 DECLINLINE(uint64_t) hmR0SvmNstGstUndoTscOffset(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uTicks)4486 {4487 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); RT_NOREF(pCtx);4488 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;4489 return uTicks - pVmcbNstGstCache->u64TSCOffset;4490 }4491 4492 4493 /**4494 4760 * Wrapper for running the nested-guest code in AMD-V. 4495 4761 * … … 4503 4769 DECLINLINE(int) hmR0SvmRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 4504 4770 { 4771 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4772 pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 4773 4505 4774 /* 4506 4775 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations … … 4515 4784 #endif 4516 4785 } 4786 4787 4788 /** 4789 * Undoes the TSC offset applied for an SVM nested-guest and returns the TSC 4790 * value for the guest. 4791 * 4792 * @returns The TSC offset after undoing any nested-guest TSC offset. 4793 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 4794 * @param uTicks The nested-guest TSC. 4795 * 4796 * @note If you make any changes to this function, please check if 4797 * hmR0SvmNstGstUndoTscOffset() needs adjusting. 4798 * 4799 * @sa HMSvmNstGstApplyTscOffset(). 4800 */ 4801 DECLINLINE(uint64_t) hmR0SvmNstGstUndoTscOffset(PVMCPU pVCpu, uint64_t uTicks) 4802 { 4803 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 4804 Assert(pVmcbNstGstCache->fCacheValid); 4805 return uTicks - pVmcbNstGstCache->u64TSCOffset; 4806 } 4517 4807 #endif 4518 4808 … … 4522 4812 * 4523 4813 * @param pVCpu The cross context virtual CPU structure. 4524 * @param p MixedCtx Pointer to the guest-CPU context. The data maybe4814 * @param pCtx Pointer to the guest-CPU context. The data maybe 4525 4815 * out-of-sync. Make sure to update the required fields 4526 4816 * before using them. … … 4532 4822 * unconditionally when it is safe to do so. 4533 4823 */ 4534 static void hmR0SvmPostRunGuest(PVMCPU pVCpu, PCPUMCTX p MixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)4824 static void hmR0SvmPostRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun) 4535 4825 { 4536 4826 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 4552 4842 { 4553 4843 /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMSvmNstGstVmExitNotify(). */ 4554 uint64_t const uGstTsc = hmR0SvmNstGstUndoTscOffset(pVCpu, pMixedCtx,uHostTsc + pVmcbCtrl->u64TSCOffset);4844 uint64_t const uGstTsc = hmR0SvmNstGstUndoTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset); 4555 4845 TMCpuTickSetLastSeen(pVCpu, uGstTsc); 4556 4846 } … … 4581 4871 } 4582 4872 4583 pSvmTransient->u64ExitCode = pVmcbCtrl->u64ExitCode;/* Save the #VMEXIT reason. */4873 pSvmTransient->u64ExitCode = pVmcbCtrl->u64ExitCode; /* Save the #VMEXIT reason. */ 4584 4874 pVmcbCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */ 4585 4875 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */ 4586 4876 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */ 4587 4877 4588 hmR0SvmSaveGuestState(pVCpu, p MixedCtx, pVmcb);/* Save the guest state from the VMCB to the guest-CPU context. */4878 hmR0SvmSaveGuestState(pVCpu, pCtx, pVmcb); /* Save the guest state from the VMCB to the guest-CPU context. */ 4589 4879 4590 4880 if ( pSvmTransient->u64ExitCode != SVM_EXIT_INVALID … … 4594 4884 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */ 4595 4885 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive 4596 && (p MixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)4597 { 4598 int rc = APICSetTpr(pVCpu, p MixedCtx->msrLSTAR & 0xff);4886 && (pVmcb->guest.u64LSTAR & 0xff) != pSvmTransient->u8GuestTpr) 4887 { 4888 int rc = APICSetTpr(pVCpu, pVmcb->guest.u64LSTAR & 0xff); 4599 4889 AssertRC(rc); 4600 4890 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); … … 4609 4899 } 4610 4900 4901 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 4611 4902 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_SVM, pSvmTransient->u64ExitCode & EMEXIT_F_TYPE_MASK), 4612 p MixedCtx->cs.u64Base + pMixedCtx->rip, uHostTsc);4903 pCtx->cs.u64Base + pCtx->rip, uHostTsc); 4613 4904 } 4614 4905 … … 4858 5149 4859 5150 /* Invalid nested-guest state. Cause a #VMEXIT but assert on strict builds. */ 5151 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 4860 5152 AssertMsgFailed(("Invalid nested-guest state. rc=%Rrc u64ExitCode=%#RX64\n", rc, SvmTransient.u64ExitCode)); 4861 5153 rc = VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0, 0)); … … 4979 5271 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX); 4980 5272 4981 #define HM_SVM_VMEXIT_NESTED(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 4982 VBOXSTRICTRC_TODO(IEMExecSvmVmexit(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2)) 5273 /** @todo Use IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK instead of 5274 * HMSVM_CPUMCTX_EXTRN_ALL below. See todo in 5275 * HMSvmNstGstVmExitNotify(). */ 5276 #define NST_GST_VMEXIT_CALL_RET(a_pVCpu, a_pCtx, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 5277 do { \ 5278 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); \ 5279 return VBOXSTRICTRC_TODO(IEMExecSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2))); \ 5280 } while (0) 4983 5281 4984 5282 /* … … 4986 5284 * by the nested-guest. If it isn't, it should be handled by the (outer) guest. 4987 5285 */ 4988 PSVMVMCB pVmcbNstGst= pCtx->hwvirt.svm.CTX_SUFF(pVmcb);4989 PSVMVMCBCTRL pVmcbNstGstCtrl= &pVmcbNstGst->ctrl;4990 uint64_t const uExitCode= pVmcbNstGstCtrl->u64ExitCode;4991 uint64_t const uExitInfo1= pVmcbNstGstCtrl->u64ExitInfo1;4992 uint64_t const uExitInfo2= pVmcbNstGstCtrl->u64ExitInfo2;5286 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 5287 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 5288 uint64_t const uExitCode = pVmcbNstGstCtrl->u64ExitCode; 5289 uint64_t const uExitInfo1 = pVmcbNstGstCtrl->u64ExitInfo1; 5290 uint64_t const uExitInfo2 = pVmcbNstGstCtrl->u64ExitInfo2; 4993 5291 4994 5292 Assert(uExitCode == pVmcbNstGstCtrl->u64ExitCode); … … 4997 5295 case SVM_EXIT_CPUID: 4998 5296 { 4999 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_CPUID))5000 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5297 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CPUID)) 5298 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5001 5299 return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient); 5002 5300 } … … 5004 5302 case SVM_EXIT_RDTSC: 5005 5303 { 5006 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_RDTSC))5007 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5304 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSC)) 5305 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5008 5306 return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient); 5009 5307 } … … 5011 5309 case SVM_EXIT_RDTSCP: 5012 5310 { 5013 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_RDTSCP))5014 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5311 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP)) 5312 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5015 5313 return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient); 5016 5314 } … … 5018 5316 case SVM_EXIT_MONITOR: 5019 5317 { 5020 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_MONITOR))5021 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5318 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MONITOR)) 5319 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5022 5320 return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient); 5023 5321 } … … 5025 5323 case SVM_EXIT_MWAIT: 5026 5324 { 5027 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_MWAIT))5028 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5325 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MWAIT)) 5326 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5029 5327 return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient); 5030 5328 } … … 5032 5330 case SVM_EXIT_HLT: 5033 5331 { 5034 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_HLT))5035 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5332 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_HLT)) 5333 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5036 5334 return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient); 5037 5335 } … … 5039 5337 case SVM_EXIT_MSR: 5040 5338 { 5041 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_MSR_PROT))5339 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT)) 5042 5340 { 5043 5341 uint32_t const idMsr = pCtx->ecx; … … 5058 5356 || (fInterceptRead && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ)) 5059 5357 { 5060 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5358 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5061 5359 } 5062 5360 } … … 5068 5366 */ 5069 5367 Assert(rc == VERR_OUT_OF_RANGE); 5070 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5368 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5071 5369 } 5072 5370 } … … 5076 5374 case SVM_EXIT_IOIO: 5077 5375 { 5078 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_IOIO_PROT))5376 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 5079 5377 { 5080 5378 void *pvIoBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap); … … 5083 5381 bool const fIntercept = hmR0SvmIsIoInterceptActive(pvIoBitmap, &IoExitInfo); 5084 5382 if (fIntercept) 5085 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5383 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5086 5384 } 5087 5385 return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient); … … 5097 5395 5098 5396 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */ 5099 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx,X86_XCPT_PF))5100 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, u32ErrCode, uFaultAddress);5397 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF)) 5398 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, u32ErrCode, uFaultAddress); 5101 5399 5102 5400 /* If the nested-guest is not intercepting #PFs, forward the #PF to the guest. */ 5401 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR2); 5103 5402 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress); 5104 5403 return VINF_SUCCESS; … … 5109 5408 case SVM_EXIT_XCPT_UD: 5110 5409 { 5111 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx,X86_XCPT_UD))5112 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5410 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_UD)) 5411 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5113 5412 hmR0SvmSetPendingXcptUD(pVCpu); 5114 5413 return VINF_SUCCESS; … … 5117 5416 case SVM_EXIT_XCPT_MF: 5118 5417 { 5119 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx,X86_XCPT_MF))5120 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5418 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_MF)) 5419 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5121 5420 return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient); 5122 5421 } … … 5124 5423 case SVM_EXIT_XCPT_DB: 5125 5424 { 5126 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx,X86_XCPT_DB))5127 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5425 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_DB)) 5426 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5128 5427 return hmR0SvmNestedExitXcptDB(pVCpu, pCtx, pSvmTransient); 5129 5428 } … … 5131 5430 case SVM_EXIT_XCPT_AC: 5132 5431 { 5133 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx,X86_XCPT_AC))5134 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5432 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_AC)) 5433 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5135 5434 return hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient); 5136 5435 } … … 5138 5437 case SVM_EXIT_XCPT_BP: 5139 5438 { 5140 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx,X86_XCPT_BP))5141 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5439 if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_BP)) 5440 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5142 5441 return hmR0SvmNestedExitXcptBP(pVCpu, pCtx, pSvmTransient); 5143 5442 } … … 5148 5447 { 5149 5448 uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0; 5150 if (HMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx,uCr))5151 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5449 if (HMIsGuestSvmReadCRxInterceptSet(pVCpu, uCr)) 5450 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5152 5451 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient); 5153 5452 } … … 5155 5454 case SVM_EXIT_CR0_SEL_WRITE: 5156 5455 { 5157 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))5158 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5456 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE)) 5457 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5159 5458 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient); 5160 5459 } … … 5168 5467 Log4(("hmR0SvmHandleExitNested: Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2)); 5169 5468 5170 if (HMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx,uCr))5171 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5469 if (HMIsGuestSvmWriteCRxInterceptSet(pVCpu, uCr)) 5470 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5172 5471 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient); 5173 5472 } … … 5175 5474 case SVM_EXIT_PAUSE: 5176 5475 { 5177 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_PAUSE))5178 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5476 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE)) 5477 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5179 5478 return hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient); 5180 5479 } … … 5182 5481 case SVM_EXIT_VINTR: 5183 5482 { 5184 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_VINTR))5185 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5483 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VINTR)) 5484 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5186 5485 return hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient); 5187 5486 } … … 5189 5488 case SVM_EXIT_INTR: 5190 5489 case SVM_EXIT_NMI: 5191 case SVM_EXIT_XCPT_NMI: /* Shouldn't ever happen, SVM_EXIT_NMI is used instead. */5192 5490 case SVM_EXIT_SMI: 5491 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */ 5193 5492 { 5194 5493 /* … … 5204 5503 case SVM_EXIT_FERR_FREEZE: 5205 5504 { 5206 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_FERR_FREEZE))5207 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5505 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_FERR_FREEZE)) 5506 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5208 5507 return hmR0SvmExitFerrFreeze(pVCpu, pCtx, pSvmTransient); 5209 5508 } … … 5211 5510 case SVM_EXIT_INVLPG: 5212 5511 { 5213 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_INVLPG))5214 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5512 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPG)) 5513 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5215 5514 return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient); 5216 5515 } … … 5218 5517 case SVM_EXIT_WBINVD: 5219 5518 { 5220 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_WBINVD))5221 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5519 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_WBINVD)) 5520 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5222 5521 return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient); 5223 5522 } … … 5225 5524 case SVM_EXIT_INVD: 5226 5525 { 5227 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_INVD))5228 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5526 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVD)) 5527 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5229 5528 return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient); 5230 5529 } … … 5232 5531 case SVM_EXIT_RDPMC: 5233 5532 { 5234 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_RDPMC))5235 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5533 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDPMC)) 5534 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5236 5535 return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient); 5237 5536 } … … 5247 5546 { 5248 5547 uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0; 5249 if (HMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx,uDr))5250 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5548 if (HMIsGuestSvmReadDRxInterceptSet(pVCpu, uDr)) 5549 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5251 5550 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient); 5252 5551 } … … 5258 5557 { 5259 5558 uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0; 5260 if (HMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx,uDr))5261 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5559 if (HMIsGuestSvmWriteDRxInterceptSet(pVCpu, uDr)) 5560 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5262 5561 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient); 5263 5562 } … … 5288 5587 { 5289 5588 uint8_t const uVector = uExitCode - SVM_EXIT_XCPT_0; 5290 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx,uVector))5291 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5589 if (HMIsGuestSvmXcptInterceptSet(pVCpu, uVector)) 5590 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5292 5591 return hmR0SvmExitXcptGeneric(pVCpu, pCtx, pSvmTransient); 5293 5592 } … … 5295 5594 case SVM_EXIT_XSETBV: 5296 5595 { 5297 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_XSETBV))5298 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5596 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_XSETBV)) 5597 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5299 5598 return hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient); 5300 5599 } … … 5302 5601 case SVM_EXIT_TASK_SWITCH: 5303 5602 { 5304 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_TASK_SWITCH))5305 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5603 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH)) 5604 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5306 5605 return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient); 5307 5606 } … … 5309 5608 case SVM_EXIT_IRET: 5310 5609 { 5311 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_IRET))5312 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5610 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IRET)) 5611 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5313 5612 return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient); 5314 5613 } … … 5316 5615 case SVM_EXIT_SHUTDOWN: 5317 5616 { 5318 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_SHUTDOWN))5319 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5617 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN)) 5618 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5320 5619 return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient); 5321 5620 } … … 5323 5622 case SVM_EXIT_VMMCALL: 5324 5623 { 5325 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_VMMCALL))5326 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5624 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL)) 5625 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5327 5626 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient); 5328 5627 } … … 5330 5629 case SVM_EXIT_CLGI: 5331 5630 { 5332 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_CLGI))5333 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5631 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CLGI)) 5632 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5334 5633 return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient); 5335 5634 } … … 5337 5636 case SVM_EXIT_STGI: 5338 5637 { 5339 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_STGI))5340 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5638 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_STGI)) 5639 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5341 5640 return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient); 5342 5641 } … … 5344 5643 case SVM_EXIT_VMLOAD: 5345 5644 { 5346 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_VMLOAD))5347 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5645 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD)) 5646 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5348 5647 return hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient); 5349 5648 } … … 5351 5650 case SVM_EXIT_VMSAVE: 5352 5651 { 5353 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_VMSAVE))5354 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5652 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE)) 5653 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5355 5654 return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient); 5356 5655 } … … 5358 5657 case SVM_EXIT_INVLPGA: 5359 5658 { 5360 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_INVLPGA))5361 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5659 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA)) 5660 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5362 5661 return hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient); 5363 5662 } … … 5365 5664 case SVM_EXIT_VMRUN: 5366 5665 { 5367 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_VMRUN))5368 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5666 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMRUN)) 5667 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5369 5668 return hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient); 5370 5669 } … … 5372 5671 case SVM_EXIT_RSM: 5373 5672 { 5374 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_RSM))5375 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5673 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RSM)) 5674 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5376 5675 hmR0SvmSetPendingXcptUD(pVCpu); 5377 5676 return VINF_SUCCESS; … … 5380 5679 case SVM_EXIT_SKINIT: 5381 5680 { 5382 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx,SVM_CTRL_INTERCEPT_SKINIT))5383 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);5681 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SKINIT)) 5682 NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); 5384 5683 hmR0SvmSetPendingXcptUD(pVCpu); 5385 5684 return VINF_SUCCESS; … … 5406 5705 /* not reached */ 5407 5706 5408 #undef HM_SVM_VMEXIT_NESTED5707 #undef NST_GST_VMEXIT_CALL_RET 5409 5708 } 5410 5709 #endif … … 5424 5723 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX); 5425 5724 5725 #ifdef DEBUG_ramshankar 5726 # define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) \ 5727 do { \ 5728 if ((a_fDbg) == 1) \ 5729 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); \ 5730 int rc = a_CallExpr; \ 5731 /* if ((a_fDbg) == 1) */ \ 5732 /* HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); */ \ 5733 return rc; \ 5734 } while (0) 5735 #else 5736 # define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) return a_CallExpr 5737 #endif 5738 5426 5739 /* 5427 * The ordering of the case labels is based on most-frequently-occurring #VMEXITs for most guests under5428 * normal workloads (for some definition of "normal").5740 * The ordering of the case labels is based on most-frequently-occurring #VMEXITs 5741 * for most guests under normal workloads (for some definition of "normal"). 5429 5742 */ 5430 5743 uint64_t const uExitCode = pSvmTransient->u64ExitCode; 5431 5744 switch (uExitCode) 5432 5745 { 5433 case SVM_EXIT_NPF: 5434 return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient); 5435 5436 case SVM_EXIT_IOIO: 5437 return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient); 5438 5439 case SVM_EXIT_RDTSC: 5440 return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient); 5441 5442 case SVM_EXIT_RDTSCP: 5443 return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient); 5444 5445 case SVM_EXIT_CPUID: 5446 return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient); 5447 5448 case SVM_EXIT_XCPT_14: /* X86_XCPT_PF */ 5449 return hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient); 5450 5451 case SVM_EXIT_XCPT_6: /* X86_XCPT_UD */ 5452 return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient); 5453 5454 case SVM_EXIT_XCPT_16: /* X86_XCPT_MF */ 5455 return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient); 5456 5457 case SVM_EXIT_XCPT_1: /* X86_XCPT_DB */ 5458 return hmR0SvmExitXcptDB(pVCpu, pCtx, pSvmTransient); 5459 5460 case SVM_EXIT_XCPT_17: /* X86_XCPT_AC */ 5461 return hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient); 5462 5463 case SVM_EXIT_XCPT_3: /* X86_XCPT_BP */ 5464 return hmR0SvmExitXcptBP(pVCpu, pCtx, pSvmTransient); 5465 5466 case SVM_EXIT_MONITOR: 5467 return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient); 5468 5469 case SVM_EXIT_MWAIT: 5470 return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient); 5471 5472 case SVM_EXIT_HLT: 5473 return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient); 5746 case SVM_EXIT_NPF: VMEXIT_CALL_RET(0, hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient)); 5747 case SVM_EXIT_IOIO: VMEXIT_CALL_RET(0, hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient)); 5748 case SVM_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient)); 5749 case SVM_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient)); 5750 case SVM_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient)); 5751 case SVM_EXIT_XCPT_PF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient)); 5752 case SVM_EXIT_MSR: VMEXIT_CALL_RET(0, hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient)); 5753 case SVM_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient)); 5754 case SVM_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient)); 5755 case SVM_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient)); 5756 5757 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */ 5758 case SVM_EXIT_INTR: 5759 case SVM_EXIT_NMI: VMEXIT_CALL_RET(0, hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient)); 5474 5760 5475 5761 case SVM_EXIT_READ_CR0: 5476 5762 case SVM_EXIT_READ_CR3: 5477 case SVM_EXIT_READ_CR4: 5478 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient); 5763 case SVM_EXIT_READ_CR4: VMEXIT_CALL_RET(0, hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient)); 5479 5764 5480 5765 case SVM_EXIT_CR0_SEL_WRITE: … … 5482 5767 case SVM_EXIT_WRITE_CR3: 5483 5768 case SVM_EXIT_WRITE_CR4: 5484 case SVM_EXIT_WRITE_CR8: 5485 { 5486 uint8_t const uCr = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : uExitCode - SVM_EXIT_WRITE_CR0; 5487 Log4(("hmR0SvmHandleExit: Write CR%u\n", uCr)); NOREF(uCr); 5488 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient); 5489 } 5490 5491 case SVM_EXIT_PAUSE: 5492 return hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient); 5493 5494 case SVM_EXIT_VMMCALL: 5495 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient); 5496 5497 case SVM_EXIT_VINTR: 5498 return hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient); 5499 5500 case SVM_EXIT_FERR_FREEZE: 5501 return hmR0SvmExitFerrFreeze(pVCpu, pCtx, pSvmTransient); 5502 5503 case SVM_EXIT_INTR: 5504 case SVM_EXIT_NMI: 5505 case SVM_EXIT_XCPT_NMI: /* Shouldn't ever happen, SVM_EXIT_NMI is used instead. */ 5506 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient); 5507 5508 case SVM_EXIT_MSR: 5509 return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient); 5510 5511 case SVM_EXIT_INVLPG: 5512 return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient); 5513 5514 case SVM_EXIT_WBINVD: 5515 return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient); 5516 5517 case SVM_EXIT_INVD: 5518 return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient); 5519 5520 case SVM_EXIT_RDPMC: 5521 return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient); 5769 case SVM_EXIT_WRITE_CR8: VMEXIT_CALL_RET(0, hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient)); 5770 5771 case SVM_EXIT_VINTR: VMEXIT_CALL_RET(0, hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient)); 5772 case SVM_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient)); 5773 case SVM_EXIT_VMMCALL: VMEXIT_CALL_RET(0, hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient)); 5774 case SVM_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient)); 5775 case SVM_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient)); 5776 case SVM_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient)); 5777 case SVM_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient)); 5778 case SVM_EXIT_IRET: VMEXIT_CALL_RET(0, hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient)); 5779 case SVM_EXIT_XCPT_UD: VMEXIT_CALL_RET(0, hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient)); 5780 case SVM_EXIT_XCPT_MF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient)); 5781 case SVM_EXIT_XCPT_DB: VMEXIT_CALL_RET(0, hmR0SvmExitXcptDB(pVCpu, pCtx, pSvmTransient)); 5782 case SVM_EXIT_XCPT_AC: VMEXIT_CALL_RET(0, hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient)); 5783 case SVM_EXIT_XCPT_BP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptBP(pVCpu, pCtx, pSvmTransient)); 5784 case SVM_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient)); 5785 case SVM_EXIT_FERR_FREEZE: VMEXIT_CALL_RET(0, hmR0SvmExitFerrFreeze(pVCpu, pCtx, pSvmTransient)); 5522 5786 5523 5787 default: … … 5529 5793 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: 5530 5794 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15: 5531 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);5795 VMEXIT_CALL_RET(0, hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient)); 5532 5796 5533 5797 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3: … … 5535 5799 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: 5536 5800 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15: 5537 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient); 5538 5539 case SVM_EXIT_XSETBV: 5540 return hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient); 5541 5542 case SVM_EXIT_TASK_SWITCH: 5543 return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient); 5544 5545 case SVM_EXIT_IRET: 5546 return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient); 5547 5548 case SVM_EXIT_SHUTDOWN: 5549 return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient); 5801 VMEXIT_CALL_RET(0, hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient)); 5802 5803 case SVM_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient)); 5804 case SVM_EXIT_SHUTDOWN: VMEXIT_CALL_RET(0, hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient)); 5550 5805 5551 5806 case SVM_EXIT_SMI: … … 5556 5811 * If it ever does, we want to know about it so log the exit code and bail. 5557 5812 */ 5558 return hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient);5813 VMEXIT_CALL_RET(0, hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient)); 5559 5814 } 5560 5815 5561 5816 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 5562 case SVM_EXIT_CLGI: return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);5563 case SVM_EXIT_STGI: return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);5564 case SVM_EXIT_VMLOAD: return hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient);5565 case SVM_EXIT_VMSAVE: return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient);5566 case SVM_EXIT_INVLPGA: return hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient);5567 case SVM_EXIT_VMRUN: return hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient);5817 case SVM_EXIT_CLGI: VMEXIT_CALL_RET(0, hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient)); 5818 case SVM_EXIT_STGI: VMEXIT_CALL_RET(0, hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient)); 5819 case SVM_EXIT_VMLOAD: VMEXIT_CALL_RET(0, hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient)); 5820 case SVM_EXIT_VMSAVE: VMEXIT_CALL_RET(0, hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient)); 5821 case SVM_EXIT_INVLPGA: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient)); 5822 case SVM_EXIT_VMRUN: VMEXIT_CALL_RET(0, hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient)); 5568 5823 #else 5569 5824 case SVM_EXIT_CLGI: … … 5582 5837 5583 5838 #ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS 5584 case SVM_EXIT_XCPT_ 0: /* #DE */5585 /* SVM_EXIT_XCPT_ 1: */ /* #DB -Handled above. */5586 /* SVM_EXIT_XCPT_ 2: */ /* #NMI -Handled above. */5587 /* SVM_EXIT_XCPT_ 3: */ /* #BP -Handled above. */5588 case SVM_EXIT_XCPT_ 4: /* #OF */5589 case SVM_EXIT_XCPT_ 5: /* #BR */5590 /* SVM_EXIT_XCPT_ 6: */ /* #UD -Handled above. */5591 case SVM_EXIT_XCPT_ 7: /* #NM */5592 case SVM_EXIT_XCPT_ 8: /* #DF */5593 case SVM_EXIT_XCPT_ 9: /* #CO_SEG_OVERRUN */5594 case SVM_EXIT_XCPT_ 10: /* #TS */5595 case SVM_EXIT_XCPT_ 11: /* #NP */5596 case SVM_EXIT_XCPT_ 12: /* #SS */5597 case SVM_EXIT_XCPT_ 13: /* #GP */5598 /* SVM_EXIT_XCPT_ 14: */ /* #PF - Handled above.*/5599 case SVM_EXIT_XCPT_15: /* Reserved.*/5600 /* SVM_EXIT_XCPT_ 16: */ /* #MF -Handled above. */5601 /* SVM_EXIT_XCPT_ 17: */ /* #AC -Handled above. */5602 case SVM_EXIT_XCPT_ 18: /* #MC */5603 case SVM_EXIT_XCPT_ 19: /* #XF */5839 case SVM_EXIT_XCPT_DE: 5840 /* SVM_EXIT_XCPT_DB: */ /* Handled above. */ 5841 /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */ 5842 /* SVM_EXIT_XCPT_BP: */ /* Handled above. */ 5843 case SVM_EXIT_XCPT_OF: 5844 case SVM_EXIT_XCPT_BR: 5845 /* SVM_EXIT_XCPT_UD: */ /* Handled above. */ 5846 case SVM_EXIT_XCPT_NM: 5847 case SVM_EXIT_XCPT_DF: 5848 case SVM_EXIT_XCPT_CO_SEG_OVERRUN: 5849 case SVM_EXIT_XCPT_TS: 5850 case SVM_EXIT_XCPT_NP: 5851 case SVM_EXIT_XCPT_SS: 5852 case SVM_EXIT_XCPT_GP: 5853 /* SVM_EXIT_XCPT_PF: */ 5854 case SVM_EXIT_XCPT_15: /* Reserved. */ 5855 /* SVM_EXIT_XCPT_MF: */ /* Handled above. */ 5856 /* SVM_EXIT_XCPT_AC: */ /* Handled above. */ 5857 case SVM_EXIT_XCPT_MC: 5858 case SVM_EXIT_XCPT_XF: 5604 5859 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23: 5605 5860 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27: 5606 5861 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31: 5607 return hmR0SvmExitXcptGeneric(pVCpu, pCtx, pSvmTransient);5862 VMEXIT_CALL_RET(0, hmR0SvmExitXcptGeneric(pVCpu, pCtx, pSvmTransient)); 5608 5863 #endif /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */ 5609 5864 … … 5618 5873 } 5619 5874 /* not reached */ 5875 #undef VMEXIT_CALL_RET 5620 5876 } 5621 5877 … … 5828 6084 int rc = VINF_SUCCESS; 5829 6085 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6086 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR2); 5830 6087 5831 6088 Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n", … … 6120 6377 Assert(pVmcb); 6121 6378 Assert(pVmcb->ctrl.u64NextRIP); 6379 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP)); 6122 6380 AssertRelease(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb); /* temporary, remove later */ 6123 6381 pCtx->rip = pVmcb->ctrl.u64NextRIP; … … 6238 6496 { 6239 6497 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6498 6240 6499 PVM pVM = pVCpu->CTX_SUFF(pVM); 6241 6500 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx)); … … 6261 6520 { 6262 6521 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6522 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 6263 6523 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 2)); 6264 6524 if (rcStrict == VINF_SUCCESS) … … 6279 6539 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6280 6540 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3)); 6541 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 6281 6542 if (rcStrict == VINF_SUCCESS) 6282 6543 pSvmTransient->fUpdateTscOffsetting = true; … … 6295 6556 { 6296 6557 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6558 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR0 6559 | CPUMCTX_EXTRN_CR4 6560 | CPUMCTX_EXTRN_SS); 6561 6297 6562 int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6298 6563 if (RT_LIKELY(rc == VINF_SUCCESS)) … … 6326 6591 && fSupportsNextRipSave) 6327 6592 { 6593 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 6328 6594 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6329 6595 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip; … … 6334 6600 } 6335 6601 6602 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 6336 6603 int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, pCtx); /* Updates RIP if successful. */ 6337 6604 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER); … … 6364 6631 { 6365 6632 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6633 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR0 6634 | CPUMCTX_EXTRN_SS); 6635 6366 6636 int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6367 6637 if (RT_LIKELY(rc == VINF_SUCCESS)) … … 6386 6656 { 6387 6657 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6658 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR0 6659 | CPUMCTX_EXTRN_SS); 6660 6388 6661 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6389 6662 int rc = VBOXSTRICTRC_VAL(rc2); … … 6419 6692 { 6420 6693 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6694 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 6421 6695 return VINF_EM_RESET; 6422 6696 } … … 6430 6704 RT_NOREF(pCtx); 6431 6705 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6706 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 6432 6707 AssertMsgFailed(("hmR0SvmExitUnexpected: ExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pSvmTransient->u64ExitCode, 6433 6708 pVmcb->ctrl.u64ExitInfo1, pVmcb->ctrl.u64ExitInfo2)); … … 6457 6732 if (fMovCRx) 6458 6733 { 6734 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 6459 6735 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip; 6460 6736 uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0; … … 6467 6743 } 6468 6744 6745 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 6469 6746 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 6470 6747 int rc = VBOXSTRICTRC_VAL(rc2); … … 6499 6776 if (fMovCRx) 6500 6777 { 6778 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 6501 6779 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip; 6502 6780 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER; … … 6510 6788 if (!fDecodedInstr) 6511 6789 { 6790 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 6512 6791 Log4(("hmR0SvmExitWriteCRx: iCrReg=%#x\n", iCrReg)); 6513 6792 rcStrict = IEMExecOneBypassEx(pVCpu, CPUMCTX2CORE(pCtx), NULL); … … 6521 6800 switch (iCrReg) 6522 6801 { 6523 case 0: /* CR0. */ 6524 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 6525 break; 6526 6527 case 3: /* CR3. */ 6528 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3); 6529 break; 6530 6531 case 4: /* CR4. */ 6532 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4); 6533 break; 6534 6535 case 8: /* CR8 (TPR). */ 6536 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 6537 break; 6538 6802 case 0: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); break; 6803 case 2: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR2); break; 6804 case 3: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3); break; 6805 case 4: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4); break; 6806 case 8: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); break; 6539 6807 default: 6808 { 6540 6809 AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x\n", 6541 6810 pSvmTransient->u64ExitCode, iCrReg)); 6542 6811 break; 6812 } 6543 6813 } 6544 6814 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); … … 6557 6827 { 6558 6828 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6829 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR0 6830 | CPUMCTX_EXTRN_RFLAGS 6831 | CPUMCTX_EXTRN_SS 6832 | CPUMCTX_EXTRN_ALL_MSRS); 6833 6559 6834 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6560 6835 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 6598 6873 else 6599 6874 { 6875 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 6600 6876 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */)); 6601 6877 if (RT_LIKELY(rc == VINF_SUCCESS)) … … 6656 6932 else 6657 6933 { 6934 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 6658 6935 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0)); 6659 6936 if (RT_UNLIKELY(rc != VINF_SUCCESS)) … … 6678 6955 { 6679 6956 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6957 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 6958 6680 6959 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 6681 6960 … … 6760 7039 { 6761 7040 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7041 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 6762 7042 6763 7043 /** @todo decode assists... */ … … 6781 7061 { 6782 7062 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7063 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK); 6783 7064 6784 7065 /* I/O operation lookup arrays. */ … … 6907 7188 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the 6908 7189 * execution engines about whether hyper BPs and such are pending. */ 7190 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_DR7); 6909 7191 uint32_t const uDr7 = pCtx->dr[7]; 6910 7192 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) … … 6973 7255 { 6974 7256 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7257 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 7258 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 6975 7259 6976 7260 PVM pVM = pVCpu->CTX_SUFF(pVM); 6977 7261 Assert(pVM->hm.s.fNestedPaging); 6978 6979 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();6980 7262 6981 7263 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */ … … 7103 7385 { 7104 7386 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7105 7106 7387 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7107 7388 … … 7135 7416 { 7136 7417 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7418 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 7419 7137 7420 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall); 7138 7421 … … 7189 7472 { 7190 7473 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7474 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR0); 7191 7475 Assert(!(pCtx->cr0 & X86_CR0_NE)); 7192 7476 … … 7223 7507 { 7224 7508 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7225 7509 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 7226 7510 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7227 7511 … … 7329 7613 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */ 7330 7614 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 7331 && HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx,X86_XCPT_PF))7615 && HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF)) 7332 7616 return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_XCPT_PF, uErrCode, uFaultAddress)); 7333 7617 #endif … … 7369 7653 if (pVCpu->hm.s.fGIMTrapXcptUD) 7370 7654 { 7655 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 7371 7656 uint8_t cbInstr = 0; 7372 7657 VBOXSTRICTRC rcStrict = GIMXcptUD(pVCpu, pCtx, NULL /* pDis */, &cbInstr); … … 7405 7690 { 7406 7691 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7692 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 7407 7693 7408 7694 /* Paranoia; Ensure we cannot be called as a result of event delivery. */ … … 7442 7728 { 7443 7729 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7444 7445 /* If this #DB is the result of delivering an event, go back to the interpreter. */ 7730 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 7446 7731 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7732 7447 7733 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending)) 7448 7734 { … … 7495 7781 { 7496 7782 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7497 7498 7783 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7499 7784 … … 7516 7801 { 7517 7802 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7518 7803 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 7519 7804 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7520 7805 … … 7542 7827 { 7543 7828 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7544 7545 7829 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7546 7830 … … 7586 7870 { 7587 7871 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7872 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK 7873 | CPUMCTX_EXTRN_HWVIRT); 7588 7874 7589 7875 #ifdef VBOX_STRICT … … 7608 7894 { 7609 7895 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7896 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK 7897 | CPUMCTX_EXTRN_HWVIRT); 7610 7898 7611 7899 /* … … 7629 7917 { 7630 7918 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7919 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK 7920 | CPUMCTX_EXTRN_FS 7921 | CPUMCTX_EXTRN_GS 7922 | CPUMCTX_EXTRN_TR 7923 | CPUMCTX_EXTRN_LDTR 7924 | CPUMCTX_EXTRN_KERNEL_GS_BASE 7925 | CPUMCTX_EXTRN_SYSCALL_MSRS 7926 | CPUMCTX_EXTRN_SYSENTER_MSRS); 7631 7927 7632 7928 #ifdef VBOX_STRICT … … 7644 7940 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS 7645 7941 | HM_CHANGED_GUEST_TR 7646 | HM_CHANGED_GUEST_LDTR); 7942 | HM_CHANGED_GUEST_LDTR 7943 | HM_CHANGED_GUEST_SYSENTER_CS_MSR 7944 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 7945 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 7647 7946 } 7648 7947 return VBOXSTRICTRC_VAL(rcStrict); … … 7656 7955 { 7657 7956 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7957 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 7658 7958 7659 7959 #ifdef VBOX_STRICT … … 7676 7976 { 7677 7977 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7978 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 7979 7678 7980 /** @todo Stat. */ 7679 7981 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpga); */ … … 7690 7992 { 7691 7993 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7994 /** @todo Only save and reload what VMRUN changes (e.g. skip LDTR, TR etc). */ 7995 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 7692 7996 7693 7997 VBOXSTRICTRC rcStrict; … … 7711 8015 { 7712 8016 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7713 7714 /* If this #DB is the result of delivering an event, go back to the interpreter. */7715 8017 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 8018 7716 8019 if (pVCpu->hm.s.Event.fPending) 7717 8020 { … … 7732 8035 { 7733 8036 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7734 7735 8037 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7736 8038 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r69474 r72643 38 38 #ifdef IN_RING0 39 39 40 VMMR0DECL(int) SVMR0GlobalInit(void);41 VMMR0DECL(void) SVMR0GlobalTerm(void);42 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);43 VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);44 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, bool fEnabledBySystem,45 void *pvArg);46 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);47 VMMR0DECL(int) SVMR0InitVM(PVM pVM);48 VMMR0DECL(int) SVMR0TermVM(PVM pVM);49 VMMR0DECL(int) SVMR0SetupVM(PVM pVM);40 VMMR0DECL(int) SVMR0GlobalInit(void); 41 VMMR0DECL(void) SVMR0GlobalTerm(void); 42 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu); 43 VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 44 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, 45 bool fEnabledBySystem, void *pvArg); 46 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 47 VMMR0DECL(int) SVMR0InitVM(PVM pVM); 48 VMMR0DECL(int) SVMR0TermVM(PVM pVM); 49 VMMR0DECL(int) SVMR0SetupVM(PVM pVM); 50 50 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 51 VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu); 51 VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu); 52 VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat); 53 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt); 52 54 53 55 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 54 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);55 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,56 uint32_t *paParam);56 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); 57 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, 58 uint32_t *paParam); 57 59 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) */ 58 60 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72642 r72643 131 131 * are used. Maybe later this can be extended (i.e. Nested Virtualization). 132 132 */ 133 #define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)134 #define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)135 #define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)133 #define HMVMX_VMCS_STATE_CLEAR RT_BIT(0) 134 #define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1) 135 #define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2) 136 136 /** @} */ 137 138 /** 139 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the 140 * guest using hardware-assisted VMX. 141 * 142 * This excludes state like GPRs (other than RSP) which are always are 143 * swapped and restored across the world-switch and also registers like EFER, 144 * MSR which cannot be modified by the guest without causing a VM-exit. 145 */ 146 #define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \ 147 | CPUMCTX_EXTRN_RFLAGS \ 148 | CPUMCTX_EXTRN_SREG_MASK \ 149 | CPUMCTX_EXTRN_TABLE_MASK \ 150 | CPUMCTX_EXTRN_SYSENTER_MSRS \ 151 | CPUMCTX_EXTRN_SYSCALL_MSRS \ 152 | CPUMCTX_EXTRN_KERNEL_GS_BASE \ 153 | CPUMCTX_EXTRN_TSC_AUX \ 154 | CPUMCTX_EXTRN_OTHER_MSRS \ 155 | CPUMCTX_EXTRN_CR0 \ 156 | CPUMCTX_EXTRN_CR3 \ 157 | CPUMCTX_EXTRN_CR4 \ 158 | CPUMCTX_EXTRN_DR7) 137 159 138 160 /** … … 191 213 return VERR_VMX_UNEXPECTED_EXIT; \ 192 214 } while (0) 215 216 /** Macro for saving segment registers from VMCS into the guest-CPU 217 * context. */ 218 #ifdef VMX_USE_CACHED_VMCS_ACCESSES 219 # define HMVMX_SAVE_SREG(Sel, a_pCtxSelReg) \ 220 hmR0VmxSaveSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \ 221 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg)) 222 #else 223 # define HMVMX_SAVE_SREG(Sel, a_pCtxSelReg) \ 224 hmR0VmxSaveSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \ 225 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg)) 226 #endif 193 227 194 228 … … 1929 1963 1930 1964 /** 1931 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,1932 * otherwise there is nothing really to invalidate.1933 *1934 * @returns VBox status code.1935 * @param pVM The cross context VM structure.1936 * @param pVCpu The cross context virtual CPU structure.1937 * @param GCPhys Guest physical address of the page to invalidate.1938 */1939 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)1940 {1941 NOREF(pVM); NOREF(GCPhys);1942 LogFlowFunc(("%RGp\n", GCPhys));1943 1944 /*1945 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes1946 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().1947 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.1948 */1949 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);1950 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);1951 return VINF_SUCCESS;1952 }1953 1954 1955 /**1956 1965 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the 1957 1966 * case where neither EPT nor VPID is supported by the CPU. … … 6747 6756 6748 6757 /** 6749 * Reads a guest segment register from the current VMCS into the guest-CPU6758 * Saves a guest segment register from the current VMCS into the guest-CPU 6750 6759 * context. 6751 6760 * … … 6759 6768 * 6760 6769 * @remarks No-long-jump zone!!! 6761 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()6762 * macro as that takes care of whether to read from the VMCS cache or6763 * not.6764 */ 6765 DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,6770 * @remarks Never call this function directly!!! Use the 6771 * HMVMX_SAVE_SREG() macro as that takes care of whether to read 6772 * from the VMCS cache or not. 6773 */ 6774 static int hmR0VmxSaveSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess, 6766 6775 PCPUMSELREG pSelReg) 6767 6776 { … … 6824 6833 } 6825 6834 6826 6827 #ifdef VMX_USE_CACHED_VMCS_ACCESSES6828 # define VMXLOCAL_READ_SEG(Sel, CtxSel) \6829 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \6830 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)6831 #else6832 # define VMXLOCAL_READ_SEG(Sel, CtxSel) \6833 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \6834 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)6835 #endif6836 6837 6838 6835 /** 6839 6836 * Saves the guest segment registers from the current VMCS into the guest-CPU … … 6853 6850 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS)) 6854 6851 { 6852 /** @todo r=ramshankar: Why do we save CR0 here? */ 6855 6853 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS)); 6856 6854 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 6857 6855 AssertRCReturn(rc, rc); 6858 6856 6859 rc = VMXLOCAL_READ_SEG(CS,cs);6860 rc |= VMXLOCAL_READ_SEG(SS,ss);6861 rc |= VMXLOCAL_READ_SEG(DS,ds);6862 rc |= VMXLOCAL_READ_SEG(ES,es);6863 rc |= VMXLOCAL_READ_SEG(FS,fs);6864 rc |= VMXLOCAL_READ_SEG(GS,gs);6857 rc = HMVMX_SAVE_SREG(CS, &pMixedCtx->cs); 6858 rc |= HMVMX_SAVE_SREG(SS, &pMixedCtx->ss); 6859 rc |= HMVMX_SAVE_SREG(DS, &pMixedCtx->ds); 6860 rc |= HMVMX_SAVE_SREG(ES, &pMixedCtx->es); 6861 rc |= HMVMX_SAVE_SREG(FS, &pMixedCtx->fs); 6862 rc |= HMVMX_SAVE_SREG(GS, &pMixedCtx->gs); 6865 6863 AssertRCReturn(rc, rc); 6866 6864 … … 6916 6914 { 6917 6915 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR)); 6918 rc = VMXLOCAL_READ_SEG(LDTR,ldtr);6916 rc = HMVMX_SAVE_SREG(LDTR, &pMixedCtx->ldtr); 6919 6917 AssertRCReturn(rc, rc); 6920 6918 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR); … … 6955 6953 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6956 6954 { 6957 rc = VMXLOCAL_READ_SEG(TR,tr);6955 rc = HMVMX_SAVE_SREG(TR, &pMixedCtx->tr); 6958 6956 AssertRCReturn(rc, rc); 6959 6957 } … … 6962 6960 return rc; 6963 6961 } 6964 6965 #undef VMXLOCAL_READ_SEG6966 6962 6967 6963 … … 7014 7010 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE); 7015 7011 return VINF_SUCCESS; 7012 } 7013 7014 7015 /** 7016 * Worker for VMXR0ImportStateOnDemand. 7017 * 7018 * @returns VBox status code. 7019 * @param pVCpu The cross context virtual CPU structure. 7020 * @param pCtx Pointer to the guest-CPU context. 7021 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 7022 */ 7023 static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 7024 { 7025 int rc = VINF_SUCCESS; 7026 PVM pVM = pVCpu->CTX_SUFF(pVM); 7027 uint64_t u64Val; 7028 uint32_t u32Val; 7029 uint32_t u32Shadow; 7030 7031 /* 7032 * Though we can longjmp to ring-3 due to log-flushes here and get re-invoked 7033 * on the ring-3 callback path, there is no real need to. 7034 */ 7035 if (VMMRZCallRing3IsEnabled(pVCpu)) 7036 VMMR0LogFlushDisable(pVCpu); 7037 else 7038 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 7039 Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat)); 7040 7041 /* 7042 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback() 7043 * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp 7044 * -> continue with VM-exit handling -> hmR0VmxImportGuestState() and here we are. 7045 * 7046 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus 7047 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that 7048 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should 7049 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not! 7050 * 7051 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here. 7052 */ 7053 if (VMMRZCallRing3IsEnabled(pVCpu)) 7054 { 7055 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 7056 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); 7057 7058 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) 7059 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 7060 7061 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 7062 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 7063 7064 VMMR0LogFlushEnable(pVCpu); 7065 } 7066 7067 Assert(!(fWhat & CPUMCTX_EXTRN_KEEPER_HM)); 7068 fWhat &= pCtx->fExtrn; 7069 7070 /* If there is nothing more to import, bail early. */ 7071 if (!(fWhat & HMVMX_CPUMCTX_EXTRN_ALL)) 7072 return VINF_SUCCESS; 7073 7074 /* RIP required while saving interruptibility-state below, see EMSetInhibitInterruptsPC(). */ 7075 if (fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_HM_VMX_INT_STATE)) 7076 { 7077 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); 7078 AssertRCReturn(rc, rc); 7079 pCtx->rip = u64Val; 7080 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RIP); 7081 } 7082 7083 /* RFLAGS and interruptibility-state required while re-evaluating interrupt injection, see hmR0VmxGetGuestIntrState(). */ 7084 if (fWhat & (CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_HM_VMX_INT_STATE)) 7085 { 7086 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RFLAGS, &u64Val); 7087 AssertRCReturn(rc, rc); 7088 pCtx->eflags.u32 = u64Val; 7089 /* Restore eflags for real-on-v86-mode hack. */ 7090 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7091 { 7092 Assert(pVM->hm.s.vmx.pRealModeTSS); 7093 pCtx->eflags.Bits.u1VM = 0; 7094 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL; 7095 } 7096 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RFLAGS); 7097 } 7098 7099 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE) 7100 { 7101 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32Val); 7102 AssertRCReturn(rc, rc); 7103 if (!u32Val) 7104 { 7105 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 7106 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 7107 7108 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7109 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 7110 } 7111 else 7112 { 7113 if (u32Val & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS 7114 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)) 7115 { 7116 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 7117 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 7118 } 7119 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 7120 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 7121 7122 if (u32Val & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) 7123 { 7124 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7125 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 7126 } 7127 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7128 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 7129 } 7130 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_VMX_INT_STATE); 7131 } 7132 7133 if (fWhat & CPUMCTX_EXTRN_RSP) 7134 { 7135 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); 7136 AssertRCReturn(rc, rc); 7137 pCtx->rsp = u64Val; 7138 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RSP); 7139 } 7140 7141 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 7142 { 7143 if (fWhat & CPUMCTX_EXTRN_CS) 7144 { 7145 rc = HMVMX_SAVE_SREG(CS, &pCtx->cs); 7146 AssertRCReturn(rc, rc); 7147 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7148 pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u; 7149 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CS); 7150 } 7151 if (fWhat & CPUMCTX_EXTRN_SS) 7152 { 7153 rc = HMVMX_SAVE_SREG(SS, &pCtx->ss); 7154 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7155 pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u; 7156 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SS); 7157 } 7158 if (fWhat & CPUMCTX_EXTRN_DS) 7159 { 7160 rc = HMVMX_SAVE_SREG(DS, &pCtx->ds); 7161 AssertRCReturn(rc, rc); 7162 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7163 pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u; 7164 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DS); 7165 } 7166 if (fWhat & CPUMCTX_EXTRN_ES) 7167 { 7168 rc = HMVMX_SAVE_SREG(ES, &pCtx->es); 7169 AssertRCReturn(rc, rc); 7170 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7171 pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u; 7172 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_ES); 7173 } 7174 if (fWhat & CPUMCTX_EXTRN_FS) 7175 { 7176 rc = HMVMX_SAVE_SREG(FS, &pCtx->fs); 7177 AssertRCReturn(rc, rc); 7178 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7179 pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u; 7180 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_FS); 7181 } 7182 if (fWhat & CPUMCTX_EXTRN_GS) 7183 { 7184 rc = HMVMX_SAVE_SREG(GS, &pCtx->gs); 7185 AssertRCReturn(rc, rc); 7186 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7187 pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u; 7188 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GS); 7189 } 7190 } 7191 7192 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK) 7193 { 7194 if (fWhat & CPUMCTX_EXTRN_LDTR) 7195 { 7196 rc = HMVMX_SAVE_SREG(LDTR, &pCtx->ldtr); 7197 AssertRCReturn(rc, rc); 7198 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_LDTR); 7199 } 7200 7201 if (fWhat & CPUMCTX_EXTRN_GDTR) 7202 { 7203 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 7204 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); 7205 AssertRCReturn(rc, rc); 7206 pCtx->gdtr.pGdt = u64Val; 7207 pCtx->gdtr.cbGdt = u32Val; 7208 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GDTR); 7209 } 7210 7211 /* Guest IDTR. */ 7212 if (fWhat & CPUMCTX_EXTRN_IDTR) 7213 { 7214 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 7215 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); 7216 AssertRCReturn(rc, rc); 7217 pCtx->idtr.pIdt = u64Val; 7218 pCtx->idtr.cbIdt = u32Val; 7219 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_IDTR); 7220 } 7221 7222 /* Guest TR. */ 7223 if (fWhat & CPUMCTX_EXTRN_TR) 7224 { 7225 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */ 7226 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7227 { 7228 rc = HMVMX_SAVE_SREG(TR, &pCtx->tr); 7229 AssertRCReturn(rc, rc); 7230 } 7231 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_TR); 7232 } 7233 } 7234 7235 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 7236 { 7237 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); 7238 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); 7239 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); 7240 pCtx->SysEnter.cs = u32Val; 7241 AssertRCReturn(rc, rc); 7242 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSENTER_MSRS); 7243 } 7244 7245 #if HC_ARCH_BITS == 64 7246 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 7247 { 7248 if ( pVM->hm.s.fAllow64BitGuests 7249 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)) 7250 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 7251 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KERNEL_GS_BASE); 7252 } 7253 7254 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 7255 { 7256 if ( pVM->hm.s.fAllow64BitGuests 7257 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)) 7258 { 7259 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); 7260 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR); 7261 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK); 7262 } 7263 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSCALL_MSRS); 7264 } 7265 #endif 7266 7267 if ( (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)) 7268 #if HC_ARCH_BITS == 32 7269 || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS)) 7270 #endif 7271 ) 7272 { 7273 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 7274 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs; 7275 for (uint32_t i = 0; i < cMsrs; i++, pMsr++) 7276 { 7277 switch (pMsr->u32Msr) 7278 { 7279 #if HC_ARCH_BITS == 32 7280 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr->u64Value; break; 7281 case MSR_K6_STAR: pCtx->msrSTAR = pMsr->u64Value; break; 7282 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr->u64Value; break; 7283 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value; break; 7284 #endif 7285 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break; 7286 case MSR_K8_TSC_AUX: 7287 { 7288 /* CPUMSetGuestTscAux alters fExtrn without using atomics, so disable preemption temporarily. */ 7289 HM_DISABLE_PREEMPT(); 7290 CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); 7291 HM_RESTORE_PREEMPT(); 7292 break; 7293 } 7294 default: 7295 { 7296 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs)); 7297 pVCpu->hm.s.u32HMError = pMsr->u32Msr; 7298 return VERR_HM_UNEXPECTED_LD_ST_MSR; 7299 } 7300 } 7301 } 7302 ASMAtomicUoAndU64(&pCtx->fExtrn, ~( CPUMCTX_EXTRN_TSC_AUX 7303 | CPUMCTX_EXTRN_OTHER_MSRS 7304 #if HC_ARCH_BITS == 32 7305 | CPUMCTX_EXTRN_KERNEL_GS_BASE 7306 | CPUMCTX_EXTRN_SYSCALL_MSRS 7307 #endif 7308 )); 7309 } 7310 7311 if (fWhat & CPUMCTX_EXTRN_DR7) 7312 { 7313 if (!pVCpu->hm.s.fUsingHyperDR7) 7314 { 7315 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */ 7316 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); 7317 AssertRCReturn(rc, rc); 7318 pCtx->dr[7] = u32Val; 7319 } 7320 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR7); 7321 } 7322 7323 if (fWhat & CPUMCTX_EXTRN_CR_MASK) 7324 { 7325 /* CR0 required for saving CR3 below, see CPUMIsGuestPagingEnabledEx(). */ 7326 if (fWhat & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3)) 7327 { 7328 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); 7329 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow); 7330 AssertRCReturn(rc, rc); 7331 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR0Mask) 7332 | (u32Shadow & pVCpu->hm.s.vmx.u32CR0Mask); 7333 CPUMSetGuestCR0(pVCpu, u32Val); 7334 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR0); 7335 } 7336 7337 /* CR4 required for saving CR3 below, see CPUMIsGuestInPAEModeEx(). */ 7338 if (fWhat & (CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3)) 7339 { 7340 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val); 7341 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow); 7342 AssertRCReturn(rc, rc); 7343 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR4Mask) 7344 | (u32Shadow & pVCpu->hm.s.vmx.u32CR4Mask); 7345 CPUMSetGuestCR4(pVCpu, u32Val); 7346 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR4); 7347 } 7348 7349 if (fWhat & CPUMCTX_EXTRN_CR3) 7350 { 7351 if ( pVM->hm.s.vmx.fUnrestrictedGuest 7352 || ( pVM->hm.s.fNestedPaging 7353 && CPUMIsGuestPagingEnabledEx(pCtx))) 7354 { 7355 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val); 7356 if (pCtx->cr3 != u64Val) 7357 { 7358 CPUMSetGuestCR3(pVCpu, u64Val); 7359 if (VMMRZCallRing3IsEnabled(pVCpu)) 7360 { 7361 PGMUpdateCR3(pVCpu, u64Val); 7362 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 7363 } 7364 else 7365 { 7366 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/ 7367 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3); 7368 } 7369 } 7370 7371 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */ 7372 if (CPUMIsGuestInPAEModeEx(pCtx)) 7373 { 7374 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); 7375 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); 7376 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); 7377 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); 7378 AssertRCReturn(rc, rc); 7379 7380 if (VMMRZCallRing3IsEnabled(pVCpu)) 7381 { 7382 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); 7383 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 7384 } 7385 else 7386 { 7387 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */ 7388 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES); 7389 } 7390 } 7391 } 7392 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR3); 7393 } 7394 } 7395 7396 /* If everything has been imported, clear the HM keeper bit. */ 7397 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL)) 7398 { 7399 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KEEPER_HM); 7400 Assert(!pCtx->fExtrn); 7401 } 7402 7403 return VINF_SUCCESS; 7404 } 7405 7406 7407 /** 7408 * Saves the guest state from the VMCS into the guest-CPU context. 7409 * 7410 * @returns VBox status code. 7411 * @param pVCpu The cross context virtual CPU structure. 7412 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. 7413 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 7414 */ 7415 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 7416 { 7417 return hmR0VmxImportGuestState(pVCpu, pCtx, fWhat); 7016 7418 } 7017 7419 … … 7236 7638 AssertRCReturn(rc3, rc3); 7237 7639 7640 /** @todo r=ramshankar: VMCPU_FF_HM_UPDATE_CR3 and VMCPU_FF_HM_UPDATE_PAE_PDPES 7641 * are not part of VMCPU_FF_HP_R0_PRE_HM_MASK. Hence, the two if 7642 * statements below won't ever be entered. Consider removing it or 7643 * determine if it is necessary to add these flags to VMCPU_FF_HP_R0_PRE_HM_MASK. */ 7238 7644 /* Pending HM CR3 sync. */ 7239 7645 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) … … 10818 11224 { 10819 11225 # ifdef DEBUG_ramshankar 10820 # define RETURN_EXIT_CALL(a_CallExpr) \11226 # define VMEXIT_CALL_RET(a_CallExpr) \ 10821 11227 do { \ 10822 11228 int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); \ … … 10826 11232 } while (0) 10827 11233 # else 10828 # define RETURN_EXIT_CALL(a_CallExpr) return a_CallExpr11234 # define VMEXIT_CALL_RET(a_CallExpr) return a_CallExpr 10829 11235 # endif 10830 11236 switch (rcReason) 10831 11237 { 10832 case VMX_EXIT_EPT_MISCONFIG: RETURN_EXIT_CALL(hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));10833 case VMX_EXIT_EPT_VIOLATION: RETURN_EXIT_CALL(hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));10834 case VMX_EXIT_IO_INSTR: RETURN_EXIT_CALL(hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));10835 case VMX_EXIT_CPUID: RETURN_EXIT_CALL(hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));10836 case VMX_EXIT_RDTSC: RETURN_EXIT_CALL(hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));10837 case VMX_EXIT_RDTSCP: RETURN_EXIT_CALL(hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));10838 case VMX_EXIT_APIC_ACCESS: RETURN_EXIT_CALL(hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));10839 case VMX_EXIT_XCPT_OR_NMI: RETURN_EXIT_CALL(hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));10840 case VMX_EXIT_MOV_CRX: RETURN_EXIT_CALL(hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));10841 case VMX_EXIT_EXT_INT: RETURN_EXIT_CALL(hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));10842 case VMX_EXIT_INT_WINDOW: RETURN_EXIT_CALL(hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));10843 case VMX_EXIT_MWAIT: RETURN_EXIT_CALL(hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));10844 case VMX_EXIT_MONITOR: RETURN_EXIT_CALL(hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));10845 case VMX_EXIT_TASK_SWITCH: RETURN_EXIT_CALL(hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));10846 case VMX_EXIT_PREEMPT_TIMER: RETURN_EXIT_CALL(hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));10847 case VMX_EXIT_RDMSR: RETURN_EXIT_CALL(hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));10848 case VMX_EXIT_WRMSR: RETURN_EXIT_CALL(hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));10849 case VMX_EXIT_MOV_DRX: RETURN_EXIT_CALL(hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));10850 case VMX_EXIT_TPR_BELOW_THRESHOLD: RETURN_EXIT_CALL(hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));10851 case VMX_EXIT_HLT: RETURN_EXIT_CALL(hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));10852 case VMX_EXIT_INVD: RETURN_EXIT_CALL(hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));10853 case VMX_EXIT_INVLPG: RETURN_EXIT_CALL(hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));10854 case VMX_EXIT_RSM: RETURN_EXIT_CALL(hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));10855 case VMX_EXIT_MTF: RETURN_EXIT_CALL(hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));10856 case VMX_EXIT_PAUSE: RETURN_EXIT_CALL(hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));10857 case VMX_EXIT_XDTR_ACCESS: RETURN_EXIT_CALL(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));10858 case VMX_EXIT_TR_ACCESS: RETURN_EXIT_CALL(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));10859 case VMX_EXIT_WBINVD: RETURN_EXIT_CALL(hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));10860 case VMX_EXIT_XSETBV: RETURN_EXIT_CALL(hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));10861 case VMX_EXIT_RDRAND: RETURN_EXIT_CALL(hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));10862 case VMX_EXIT_INVPCID: RETURN_EXIT_CALL(hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));10863 case VMX_EXIT_GETSEC: RETURN_EXIT_CALL(hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));10864 case VMX_EXIT_RDPMC: RETURN_EXIT_CALL(hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));10865 case VMX_EXIT_VMCALL: RETURN_EXIT_CALL(hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));11238 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient)); 11239 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient)); 11240 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient)); 11241 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient)); 11242 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient)); 11243 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient)); 11244 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient)); 11245 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient)); 11246 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient)); 11247 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient)); 11248 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient)); 11249 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient)); 11250 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient)); 11251 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient)); 11252 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient)); 11253 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient)); 11254 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient)); 11255 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient)); 11256 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient)); 11257 case VMX_EXIT_HLT: VMEXIT_CALL_RET(hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient)); 11258 case VMX_EXIT_INVD: VMEXIT_CALL_RET(hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient)); 11259 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient)); 11260 case VMX_EXIT_RSM: VMEXIT_CALL_RET(hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient)); 11261 case VMX_EXIT_MTF: VMEXIT_CALL_RET(hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient)); 11262 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient)); 11263 case VMX_EXIT_XDTR_ACCESS: VMEXIT_CALL_RET(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient)); 11264 case VMX_EXIT_TR_ACCESS: VMEXIT_CALL_RET(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient)); 11265 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient)); 11266 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient)); 11267 case VMX_EXIT_RDRAND: VMEXIT_CALL_RET(hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient)); 11268 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient)); 11269 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient)); 11270 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient)); 11271 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient)); 10866 11272 10867 11273 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); … … 10896 11302 return hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient); 10897 11303 } 10898 #undef RETURN_EXIT_CALL11304 #undef VMEXIT_CALL_RET 10899 11305 } 10900 11306 #endif /* !HMVMX_USE_FUNCTION_TABLE */ -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r69474 r72643 29 29 #ifdef IN_RING0 30 30 31 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu); 32 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 33 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem, 34 void *pvMsrs); 35 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 36 VMMR0DECL(int) VMXR0GlobalInit(void); 37 VMMR0DECL(void) VMXR0GlobalTerm(void); 38 VMMR0DECL(int) VMXR0InitVM(PVM pVM); 39 VMMR0DECL(int) VMXR0TermVM(PVM pVM); 40 VMMR0DECL(int) VMXR0SetupVM(PVM pVM); 41 VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu); 31 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu); 32 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 33 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, 34 bool fEnabledBySystem, void *pvMsrs); 35 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 36 VMMR0DECL(int) VMXR0GlobalInit(void); 37 VMMR0DECL(void) VMXR0GlobalTerm(void); 38 VMMR0DECL(int) VMXR0InitVM(PVM pVM); 39 VMMR0DECL(int) VMXR0TermVM(PVM pVM); 40 VMMR0DECL(int) VMXR0SetupVM(PVM pVM); 41 VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu); 42 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt); 43 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat); 42 44 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 43 DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 44 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 45 45 DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 46 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 46 47 47 48 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 48 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);49 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,50 uint32_t *paParam);49 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 50 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, 51 uint32_t *paParam); 51 52 # endif 52 53 -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r72178 r72643 1433 1433 SSMR3PutU16(pSSM, pGstCtx->hwvirt.svm.cPauseFilterThreshold); 1434 1434 SSMR3PutBool(pSSM, pGstCtx->hwvirt.svm.fInterceptEvents); 1435 SSMR3PutBool(pSSM, pGstCtx->hwvirt.svm.fHMCachedVmcb);1436 1435 SSMR3PutStructEx(pSSM, &pGstCtx->hwvirt.svm.HostState, sizeof(pGstCtx->hwvirt.svm.HostState), 0 /* fFlags */, 1437 1436 g_aSvmHwvirtHostState, NULL /* pvUser */); … … 1674 1673 SSMR3GetU16(pSSM, &pGstCtx->hwvirt.svm.cPauseFilterThreshold); 1675 1674 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.svm.fInterceptEvents); 1676 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.svm.fHMCachedVmcb);1677 1675 SSMR3GetStructEx(pSSM, &pGstCtx->hwvirt.svm.HostState, sizeof(pGstCtx->hwvirt.svm.HostState), 1678 1676 0 /* fFlags */, g_aSvmHwvirtHostState, NULL /* pvUser */); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r72642 r72643 1886 1886 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR)) 1887 1887 { 1888 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); 1888 1889 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 1889 1890 if (RT_SUCCESS(rcStrict)) … … 1904 1905 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */ 1905 1906 /** @todo this really isn't nice, should properly handle this */ 1907 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 1906 1908 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT); 1907 1909 Assert(rc != VINF_PGM_CHANGE_MODE); … … 1926 1928 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR)) 1927 1929 { 1930 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); 1928 1931 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0); 1929 1932 if (RT_SUCCESS(rcStrict)) -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r72598 r72643 3491 3491 { 3492 3492 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVM->aCpus[i].hm.s.svm.NstGstVmcbCache; 3493 rc = SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdCRx); 3493 rc = SSMR3PutBool(pSSM, pVmcbNstGstCache->fCacheValid); 3494 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdCRx); 3494 3495 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptWrCRx); 3495 3496 rc |= SSMR3PutU16(pSSM, pVmcbNstGstCache->u16InterceptRdDRx); … … 3576 3577 { 3577 3578 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVM->aCpus[i].hm.s.svm.NstGstVmcbCache; 3578 rc = SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdCRx); 3579 rc = SSMR3GetBool(pSSM, &pVmcbNstGstCache->fCacheValid); 3580 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdCRx); 3579 3581 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptWrCRx); 3580 3582 rc |= SSMR3GetU16(pSSM, &pVmcbNstGstCache->u16InterceptRdDRx); … … 3687 3689 3688 3690 /** 3689 * Displays the guest VM-exit history.3691 * Displays HM info. 3690 3692 * 3691 3693 * @param pVM The cross context VM structure. … … 3761 3763 && pVM->cpum.ro.GuestFeatures.fSvm) 3762 3764 { 3763 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);3764 3765 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 3765 3766 pHlp->pfnPrintf(pHlp, "CPU[%u]: HM SVM nested-guest VMCB cache\n", pVCpu->idCpu); 3766 pHlp->pfnPrintf(pHlp, " f HMCachedVmcb = %#RTbool\n", pCtx->hwvirt.svm.fHMCachedVmcb);3767 pHlp->pfnPrintf(pHlp, " fCacheValid = %#RTbool\n", pVmcbNstGstCache->fCacheValid); 3767 3768 pHlp->pfnPrintf(pHlp, " u16InterceptRdCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptRdCRx); 3768 3769 pHlp->pfnPrintf(pHlp, " u16InterceptWrCRx = %#RX16\n", pVmcbNstGstCache->u16InterceptWrCRx); -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r71833 r72643 243 243 .Guest.hwvirt.svm.cPauseFilterThreshold resw 1 244 244 .Guest.hwvirt.svm.fInterceptEvents resb 1 245 .Guest.hwvirt.svm.fHMCachedVmcb resb 1246 245 alignb 8 247 246 .Guest.hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1 … … 531 530 .Hyper.hwvirt.svm.cPauseFilterThreshold resw 1 532 531 .Hyper.hwvirt.svm.fInterceptEvents resb 1 533 .Hyper.hwvirt.svm.fHMCachedVmcb resb 1534 532 alignb 8 535 533 .Hyper.hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1 -
trunk/src/VBox/VMM/include/HMInternal.h
r72560 r72643 143 143 * @{ 144 144 */ 145 #if 0 146 #define HM_CHANGED_HOST_CONTEXT UINT64_C(0x0000000000000001) 147 #define HM_CHANGED_GUEST_RIP UINT64_C(0x0000000000000004) 148 #define HM_CHANGED_GUEST_RFLAGS UINT64_C(0x0000000000000008) 149 #define HM_CHANGED_GUEST_RAX UINT64_C(0x0000000000000010) 150 #define HM_CHANGED_GUEST_RCX UINT64_C(0x0000000000000020) 151 #define HM_CHANGED_GUEST_RDX UINT64_C(0x0000000000000040) 152 #define HM_CHANGED_GUEST_RBX UINT64_C(0x0000000000000080) 153 #define HM_CHANGED_GUEST_RSP UINT64_C(0x0000000000000100) 154 #define HM_CHANGED_GUEST_RBP UINT64_C(0x0000000000000200) 155 #define HM_CHANGED_GUEST_RSI UINT64_C(0x0000000000000400) 156 #define HM_CHANGED_GUEST_RDI UINT64_C(0x0000000000000800) 157 #define HM_CHANGED_GUEST_R8_R15 UINT64_C(0x0000000000001000) 158 #define HM_CHANGED_GUEST_GPRS_MASK UINT64_C(0x0000000000001ff0) 159 #define HM_CHANGED_GUEST_ES UINT64_C(0x0000000000002000) 160 #define HM_CHANGED_GUEST_CS UINT64_C(0x0000000000004000) 161 #define HM_CHANGED_GUEST_SS UINT64_C(0x0000000000008000) 162 #define HM_CHANGED_GUEST_DS UINT64_C(0x0000000000010000) 163 #define HM_CHANGED_GUEST_FS UINT64_C(0x0000000000020000) 164 #define HM_CHANGED_GUEST_GS UINT64_C(0x0000000000040000) 165 #define HM_CHANGED_GUEST_SREG_MASK UINT64_C(0x000000000007e000) 166 #define HM_CHANGED_GUEST_GDTR UINT64_C(0x0000000000080000) 167 #define HM_CHANGED_GUEST_IDTR UINT64_C(0x0000000000100000) 168 #define HM_CHANGED_GUEST_LDTR UINT64_C(0x0000000000200000) 169 #define HM_CHANGED_GUEST_TR UINT64_C(0x0000000000400000) 170 #define HM_CHANGED_GUEST_TABLE_MASK UINT64_C(0x0000000000780000) 171 #define HM_CHANGED_GUEST_CR0 UINT64_C(0x0000000000800000) 172 #define HM_CHANGED_GUEST_CR2 UINT64_C(0x0000000001000000) 173 #define HM_CHANGED_GUEST_CR3 UINT64_C(0x0000000002000000) 174 #define HM_CHANGED_GUEST_CR4 UINT64_C(0x0000000004000000) 175 #define HM_CHANGED_GUEST_CR_MASK UINT64_C(0x0000000007800000) 176 #define HM_CHANGED_GUEST_APIC_TPR UINT64_C(0x0000000008000000) 177 #define HM_CHANGED_GUEST_EFER UINT64_C(0x0000000010000000) 178 #define HM_CHANGED_GUEST_DR0_DR3 UINT64_C(0x0000000020000000) 179 #define HM_CHANGED_GUEST_DR6 UINT64_C(0x0000000040000000) 180 #define HM_CHANGED_GUEST_DR7 UINT64_C(0x0000000080000000) 181 #define HM_CHANGED_GUEST_DR_MASK UINT64_C(0x00000000e0000000) 182 #define HM_CHANGED_GUEST_X87 UINT64_C(0x0000000100000000) 183 #define HM_CHANGED_GUEST_SSE_AVX UINT64_C(0x0000000200000000) 184 #define HM_CHANGED_GUEST_OTHER_XSAVE UINT64_C(0x0000000400000000) 185 #define HM_CHANGED_GUEST_XCRx UINT64_C(0x0000000800000000) 186 #define HM_CHANGED_GUEST_KERNEL_GS_BASE UINT64_C(0x0000001000000000) 187 #define HM_CHANGED_GUEST_SYSCALL_MSRS UINT64_C(0x0000002000000000) 188 #define HM_CHANGED_GUEST_SYSENTER_MSRS UINT64_C(0x0000004000000000) 189 #define HM_CHANGED_GUEST_TSC_AUX UINT64_C(0x0000008000000000) 190 #define HM_CHANGED_GUEST_OTHER_MSRS UINT64_C(0x0000010000000000) 191 #define HM_CHANGED_GUEST_ALL_MSRS ( HM_CHANGED_GUEST_EFER \ 192 | HM_CHANGED_GUEST_KERNEL_GS_BASE \ 193 | HM_CHANGED_GUEST_SYSCALL_MSRS \ 194 | HM_CHANGED_GUEST_SYSENTER_MSRS \ 195 | HM_CHANGED_GUEST_TSC_AUX \ 196 | HM_CHANGED_GUEST_OTHER_MSRS) 197 #define HM_CHANGED_GUEST_HWVIRT UINT64_C(0x0000020000000000) 198 #define HM_CHANGED_GUEST_CONTEXT UINT64_C(0x000003fffffffffc) 199 200 #define HM_CHANGED_KEEPER_STATE_MASK UINT64_C(0xffff000000000000) 201 202 #define HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS UINT64_C(0x0001000000000000) 203 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS UINT64_C(0x0002000000000000) 204 #define HM_CHANGED_VMX_GUEST_LAZY_MSRS UINT64_C(0x0004000000000000) 205 #define HM_CHANGED_VMX_ENTRY_CTLS UINT64_C(0x0008000000000000) 206 #define HM_CHANGED_VMX_EXIT_CTLS UINT64_C(0x0010000000000000) 207 #define HM_CHANGED_VMX_MASK UINT64_C(0x001f000000000000) 208 209 #define HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS UINT64_C(0x0001000000000000) 210 #define HM_CHANGED_SVM_MASK UINT64_C(0x0001000000000000) 211 212 #define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \ 213 | HM_CHANGED_GUEST_DR_MASK \ 214 | HM_CHANGED_VMX_GUEST_LAZY_MSRS) 215 216 #define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CONTEXT 217 | HM_CHANGED_KEEPER_STATE_MASK) 218 #endif 219 145 220 #define HM_CHANGED_GUEST_CR0 RT_BIT(0) /* Shared */ 146 221 #define HM_CHANGED_GUEST_CR3 RT_BIT(1) -
trunk/src/VBox/VMM/include/IEMInternal.h
r72496 r72643 639 639 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero. 640 640 */ 641 #define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) Assert(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz))) 641 #define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \ 642 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, \ 643 (a_fExtrnMbz))) 642 644 643 645 /** @def IEM_CTX_IMPORT_RET -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r72634 r72643 141 141 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilterThreshold); 142 142 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fInterceptEvents); 143 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fHMCachedVmcb);144 143 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR0); 145 144 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR3);
Note:
See TracChangeset
for help on using the changeset viewer.