Changeset 79345 in vbox
- Timestamp:
- Jun 26, 2019 9:09:46 AM (6 years ago)
- Location:
- trunk
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm.h
r78869 r79345 182 182 VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM); 183 183 VMM_INT_DECL(bool) HMIsMsrBitmapActive(PVM pVM); 184 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM 185 VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx); 184 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 185 VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu); 186 VMM_INT_DECL(void) HMNotifyVmxNstGstCurrentVmcsChanged(PVMCPU pVCpu); 186 187 # endif 187 188 /** @} */ -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r79166 r79345 1232 1232 * 1233 1233 * @param pVCpu The cross context virtual CPU structure. 1234 * @param pCtx Pointer to the guest-CPU context.1235 1234 * 1236 1235 * @remarks Can be called from ring-0 as well as ring-3. 1237 1236 */ 1238 VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx) 1239 { 1240 NOREF(pCtx); 1241 1237 VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu) 1238 { 1242 1239 /* 1243 1240 * Transitions to ring-3 flag a full CPU-state change except if we transition to ring-3 … … 1249 1246 * for the nested-guest from ring-3. 1250 1247 * 1251 * Flag reloading of just the guest-CPU state is -not- sufficient since HM also needs 1252 * to reload related state with VM-entry/VM-exit controls and so on. Flag reloading 1253 * the entire state. 1248 * Signalling reload of just the guest-CPU state that changed with the VM-exit is -not- 1249 * sufficient since HM also needs to reload state related to VM-entry/VM-exit controls 1250 * etc. So signal reloading of the entire state. It does not seem worth making this any 1251 * more fine grained at the moment. 1254 1252 */ 1255 1253 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ALL); … … 1258 1256 /* 1259 1257 * Make sure we need to merge the guest VMCS controls with the nested-guest 1260 * VMCS controls on the next nested-guest VM 1258 * VMCS controls on the next nested-guest VM-entry. 1261 1259 */ 1262 1260 pVCpu->hm.s.vmx.fMergedNstGstCtls = false; 1263 1261 } 1262 1263 1264 /** 1265 * Notification callback for when the guest hypervisor's current VMCS is loaded or 1266 * changed outside VMX R0 code (e.g. in IEM). 1267 * 1268 * This need -not- be called for modifications to the guest hypervisor's current 1269 * VMCS when the guest is in VMX non-root mode as VMCS shadowing is not applicable 1270 * there. 1271 * 1272 * @param pVCpu The cross context virtual CPU structure. 1273 * 1274 * @remarks Can be called from ring-0 as well as ring-3. 1275 */ 1276 VMM_INT_DECL(void) HMNotifyVmxNstGstCurrentVmcsChanged(PVMCPU pVCpu) 1277 { 1278 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_HWVIRT); 1279 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, CPUMCTX_EXTRN_HWVIRT); 1280 1281 /* 1282 * Make sure we need to copy the guest hypervisor's current VMCS into the shadow VMCS 1283 * on the next guest VM-entry. 1284 */ 1285 pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs = false; 1286 } 1287 1264 1288 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 1265 1289 #endif /* IN_RC */ -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r79197 r79345 1172 1172 if (pVM->hm.s.vmx.fSupported) 1173 1173 { 1174 pVM->hm.s.vmx.fUsePreemptTimer &= g_HmR0.hwvirt.u.vmx.fUsePreemptTimer; /* Can be overridden by CFGM seeHMR3Init(). */1174 pVM->hm.s.vmx.fUsePreemptTimer &= g_HmR0.hwvirt.u.vmx.fUsePreemptTimer; /* Can be overridden by CFGM in HMR3Init(). */ 1175 1175 pVM->hm.s.vmx.cPreemptTimerShift = g_HmR0.hwvirt.u.vmx.cPreemptTimerShift; 1176 1176 pVM->hm.s.vmx.u64HostCr4 = g_HmR0.hwvirt.u.vmx.u64HostCr4; … … 1178 1178 pVM->hm.s.vmx.u64HostSmmMonitorCtl = g_HmR0.hwvirt.u.vmx.u64HostSmmMonitorCtl; 1179 1179 HMGetVmxMsrsFromHwvirtMsrs(&g_HmR0.hwvirt.Msrs, &pVM->hm.s.vmx.Msrs); 1180 1181 /* Enable VPID if supported and configured. */ 1182 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VPID) 1183 pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid; /* Can be overridden by CFGM in HMR3Init(). */ 1184 1185 /* Use VMCS shadowing if supported. */ 1186 Assert(!pVM->hm.s.vmx.fUseVmcsShadowing); 1187 if ( pVM->cpum.ro.GuestFeatures.fVmx 1188 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)) 1189 pVM->hm.s.vmx.fUseVmcsShadowing = true; 1190 1191 /* Use the VMCS controls for swapping the EFER MSR if supported. */ 1192 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer); 1193 #if HC_ARCH_BITS == 64 1194 if ( (pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR) 1195 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR) 1196 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR)) 1197 pVM->hm.s.vmx.fSupportsVmcsEfer = true; 1198 #endif 1199 1200 #if 0 1201 /* Enable APIC register virtualization and virtual-interrupt delivery if supported. */ 1202 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT) 1203 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY)) 1204 pVM->hm.s.fVirtApicRegs = true; 1205 1206 /* Enable posted-interrupt processing if supported. */ 1207 /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI 1208 * here. */ 1209 if ( (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT) 1210 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT)) 1211 pVM->hm.s.fPostedIntrs = true; 1212 #endif 1180 1213 } 1181 1214 else if (pVM->hm.s.svm.fSupported) -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r79232 r79345 105 105 | CPUMCTX_EXTRN_CR4 \ 106 106 | CPUMCTX_EXTRN_DR7 \ 107 | CPUMCTX_EXTRN_HWVIRT \ 107 108 | CPUMCTX_EXTRN_HM_VMX_MASK) 108 109 … … 355 356 #endif 356 357 357 static int hmR0VmxImportGuestState(PVMCPU pVCpu, P CVMXVMCSINFO pVmcsInfo, uint64_t fWhat);358 static int hmR0VmxImportGuestState(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat); 358 359 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 359 360 static void hmR0VmxInitVmcsReadCache(PVMCPU pVCpu); … … 479 480 static VBOXSTRICTRC hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 480 481 static VBOXSTRICTRC hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 481 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, P CVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst);482 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, P CVMXVMCSINFO pVmcsInfo, uint8_t cbInstr);483 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, P CVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);484 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, P CVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);482 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst); 483 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr); 484 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg); 485 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg); 485 486 static VBOXSTRICTRC hmR0VmxExitHostNmi(PVMCPU pVCpu); 486 487 /** @} */ … … 1217 1218 * @returns VBox status code. 1218 1219 * @param pVmcsInfo The VMCS info. object. 1220 * 1221 * @remarks Can be called with interrupts disabled. 1219 1222 */ 1220 1223 static int hmR0VmxLoadVmcs(PVMXVMCSINFO pVmcsInfo) 1221 1224 { 1222 Assert(pVmcsInfo->HCPhysVmcs );1225 Assert(pVmcsInfo->HCPhysVmcs != 0 && pVmcsInfo->HCPhysVmcs != NIL_RTHCPHYS); 1223 1226 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1224 1227 1225 if (pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_CLEAR) 1226 { 1227 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs); 1228 if (RT_SUCCESS(rc)) 1229 { 1230 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT; 1231 return VINF_SUCCESS; 1232 } 1233 return rc; 1234 } 1235 return VERR_VMX_INVALID_VMCS_LAUNCH_STATE; 1228 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs); 1229 if (RT_SUCCESS(rc)) 1230 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT; 1231 return rc; 1236 1232 } 1237 1233 … … 1242 1238 * @returns VBox status code. 1243 1239 * @param pVmcsInfo The VMCS info. object. 1240 * 1241 * @remarks Can be called with interrupts disabled. 1244 1242 */ 1245 1243 static int hmR0VmxClearVmcs(PVMXVMCSINFO pVmcsInfo) 1246 1244 { 1247 Assert(pVmcsInfo->HCPhysVmcs );1245 Assert(pVmcsInfo->HCPhysVmcs != 0 && pVmcsInfo->HCPhysVmcs != NIL_RTHCPHYS); 1248 1246 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1249 1247 … … 1256 1254 1257 1255 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1258 #if 01259 1256 /** 1260 1257 * Loads the shadow VMCS specified by the VMCS info. object. … … 1262 1259 * @returns VBox status code. 1263 1260 * @param pVmcsInfo The VMCS info. object. 1261 * 1262 * @remarks Can be called with interrupts disabled. 1264 1263 */ 1265 1264 static int hmR0VmxLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo) 1266 1265 { 1267 Assert(pVmcsInfo->HCPhysShadowVmcs);1268 1266 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1269 1270 if (pVmcsInfo->fShadowVmcsState & VMX_V_VMCS_LAUNCH_STATE_CLEAR) 1271 { 1272 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs); 1273 if (RT_SUCCESS(rc)) 1274 { 1275 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_ACTIVE; 1276 return VINF_SUCCESS; 1277 } 1278 return rc; 1279 } 1280 return VERR_VMX_INVALID_VMCS_LAUNCH_STATE; 1281 } 1282 #endif 1267 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS); 1268 1269 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs); 1270 if (RT_SUCCESS(rc)) 1271 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT; 1272 return rc; 1273 } 1283 1274 1284 1275 … … 1288 1279 * @returns VBox status code. 1289 1280 * @param pVmcsInfo The VMCS info. object. 1281 * 1282 * @remarks Can be called with interrupts disabled. 1290 1283 */ 1291 1284 static int hmR0VmxClearShadowVmcs(PVMXVMCSINFO pVmcsInfo) 1292 1285 { 1293 Assert(pVmcsInfo->HCPhysShadowVmcs);1294 1286 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1287 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS); 1295 1288 1296 1289 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs); … … 1320 1313 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom); 1321 1314 if (RT_SUCCESS(rc)) 1322 { /* likely */ } 1315 { 1316 /* 1317 * The shadow VMCS, if any, would not be active at this point since we 1318 * would have cleared it while importing the virtual hardware-virtualization 1319 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to 1320 * clear the shadow VMCS here, just assert for safety. 1321 */ 1322 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR); 1323 } 1323 1324 else 1324 1325 return rc; … … 1655 1656 return rc; 1656 1657 } 1657 1658 1659 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX1660 /**1661 * Initializes the shadow VMCS.1662 *1663 * This builds an array (for use later while executing a nested-guest) of VMCS1664 * fields to copy into the shadow VMCS.1665 *1666 * @param pVM The cross context VM structure.1667 */1668 static void hmR0VmxInitShadowVmcsFieldsArray(PVM pVM)1669 {1670 uint32_t const cVmcsFields = RT_ELEMENTS(g_aVmcsFields);1671 for (uint32_t i = 0; i < cVmcsFields; i++)1672 {1673 /*1674 * If the VMCS field depends on a CPU feature that is not exposed to the guest,1675 * we must not include it in the shadow VMCS fields array. Guests attempting to1676 * VMREAD/VMWRITE such VMCS fields would cause a VM-exit and we shall emulate1677 * the required behavior.1678 */1679 uint32_t const uVmcsField = g_aVmcsFields[i];1680 bool const fVmcsFieldValid = CPUMIsGuestVmxVmcsFieldValid(pVM, uVmcsField);1681 if (fVmcsFieldValid)1682 {1683 pVM->hm.s.vmx.paShadowVmcsFields[i] = uVmcsField;1684 ++pVM->hm.s.vmx.cShadowVmcsFields;1685 }1686 }1687 }1688 1689 1690 /**1691 * Initializes the VMREAD/VMWRITE bitmaps.1692 *1693 * @param pVM The cross context VM structure.1694 */1695 static void hmR0VmxInitVmreadVmwriteBitmaps(PVM pVM)1696 {1697 /*1698 * By default, ensure guest attempts to acceses to any VMCS fields cause VM-exits.1699 */1700 uint32_t const cbBitmap = X86_PAGE_4K_SIZE;1701 uint8_t *pbVmreadBitmap = (uint8_t *)pVM->hm.s.vmx.pvVmreadBitmap;1702 uint8_t *pbVmwriteBitmap = (uint8_t *)pVM->hm.s.vmx.pvVmwriteBitmap;1703 ASMMemFill32(pbVmreadBitmap, cbBitmap, UINT32_C(0xffffffff));1704 ASMMemFill32(pbVmwriteBitmap, cbBitmap, UINT32_C(0xffffffff));1705 1706 uint32_t const *paShadowVmcsFields = pVM->hm.s.vmx.paShadowVmcsFields;1707 uint32_t const cShadowVmcsFields = pVM->hm.s.vmx.cShadowVmcsFields;1708 1709 /*1710 * Initialize the VMREAD bitmap.1711 * All valid guest VMCS fields (read-only and read-write) can be accessed1712 * using VMREAD without causing a VM-exit.1713 */1714 for (uint32_t i = 0; i < cShadowVmcsFields; i++)1715 {1716 uint32_t const uVmcsField = paShadowVmcsFields[i];1717 Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));1718 uint8_t *pbField = pbVmreadBitmap + (uVmcsField >> 3);1719 ASMBitClear(pbField, uVmcsField & 7);1720 }1721 1722 /*1723 * Initialize the VMWRITE bitmap.1724 * Allow the guest to write to read-only guest VMCS fields only if the1725 * host CPU supports it, otherwise it would cause a VMWRITE instruction error.1726 */1727 bool const fHasVmwriteAll = RT_BOOL(pVM->hm.s.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL);1728 for (uint32_t i = 0; i < cShadowVmcsFields; i++)1729 {1730 uint32_t const uVmcsField = paShadowVmcsFields[i];1731 if ( fHasVmwriteAll1732 || !HMVmxIsVmcsFieldReadOnly(uVmcsField))1733 {1734 Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));1735 uint8_t *pbField = pbVmwriteBitmap + (uVmcsField >> 3);1736 ASMBitClear(pbField, uVmcsField & 7);1737 }1738 }1739 }1740 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */1741 1658 1742 1659 … … 1827 1744 1828 1745 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1829 if ( pVM->cpum.ro.GuestFeatures.fVmx 1830 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)) 1746 if (pVM->hm.s.vmx.fUseVmcsShadowing) 1831 1747 hmR0VmxPageFree(&pVmcsInfo->hMemObjShadowVmcs, &pVmcsInfo->pvShadowVmcs, &pVmcsInfo->HCPhysShadowVmcs); 1832 1748 #endif … … 1862 1778 { 1863 1779 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1864 /* Allocate the shadow VMCS if supported by the CPU. */ 1865 if ( pVM->cpum.ro.GuestFeatures.fVmx 1866 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)) 1780 if (pVM->hm.s.vmx.fUseVmcsShadowing) 1867 1781 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjShadowVmcs, &pVmcsInfo->pvShadowVmcs, &pVmcsInfo->HCPhysShadowVmcs); 1868 1782 #endif … … 1959 1873 1960 1874 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1961 if ( pVM->cpum.ro.GuestFeatures.fVmx 1962 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)) 1875 if (pVM->hm.s.vmx.fUseVmcsShadowing) 1963 1876 { 1964 1877 RTMemFree(pVM->hm.s.vmx.paShadowVmcsFields); 1878 RTMemFree(pVM->hm.s.vmx.paShadowVmcsRoFields); 1965 1879 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjVmreadBitmap, &pVM->hm.s.vmx.pvVmreadBitmap, &pVM->hm.s.vmx.HCPhysVmreadBitmap); 1966 1880 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjVmwriteBitmap, &pVM->hm.s.vmx.pvVmwriteBitmap, &pVM->hm.s.vmx.HCPhysVmwriteBitmap); … … 2056 1970 2057 1971 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 2058 /* Allocate the shadow VMCS fields array, VMREAD, VMWRITE bitmaps if VMCS shadowing supported by the CPU. */ 2059 if ( pVM->cpum.ro.GuestFeatures.fVmx 2060 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)) 2061 { 2062 pVM->hm.s.vmx.paShadowVmcsFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields)); 2063 if (RT_LIKELY(pVM->hm.s.vmx.paShadowVmcsFields)) 1972 /* Allocate the shadow VMCS fields array, VMREAD, VMWRITE bitmaps.. */ 1973 if (pVM->hm.s.vmx.fUseVmcsShadowing) 1974 { 1975 Assert(!pVM->hm.s.vmx.cShadowVmcsFields); 1976 Assert(!pVM->hm.s.vmx.cShadowVmcsRoFields); 1977 pVM->hm.s.vmx.paShadowVmcsFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields)); 1978 pVM->hm.s.vmx.paShadowVmcsRoFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields)); 1979 if (RT_LIKELY( pVM->hm.s.vmx.paShadowVmcsFields 1980 && pVM->hm.s.vmx.paShadowVmcsRoFields)) 2064 1981 { 2065 1982 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjVmreadBitmap, &pVM->hm.s.vmx.pvVmreadBitmap, … … 2069 1986 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjVmwriteBitmap, &pVM->hm.s.vmx.pvVmwriteBitmap, 2070 1987 &pVM->hm.s.vmx.HCPhysVmwriteBitmap); 2071 if (RT_SUCCESS(rc))2072 {2073 hmR0VmxInitShadowVmcsFieldsArray(pVM);2074 hmR0VmxInitVmreadVmwriteBitmaps(pVM);2075 }2076 1988 } 2077 1989 } … … 3376 3288 3377 3289 3290 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 3291 /** 3292 * Sets up the shadow VMCS fields arrays. 3293 * 3294 * This function builds arrays of VMCS fields to sync the shadow VMCS later while 3295 * executing the guest. 3296 * 3297 * @returns VBox status code. 3298 * @param pVM The cross context VM structure. 3299 */ 3300 static int hmR0VmxSetupShadowVmcsFieldsArrays(PVM pVM) 3301 { 3302 /* 3303 * Paranoia. Ensure we haven't exposed the VMWRITE-All VMX feature to the guest 3304 * when the host does not support it. 3305 */ 3306 bool const fGstVmwriteAll = pVM->cpum.ro.GuestFeatures.fVmxVmwriteAll; 3307 if ( !fGstVmwriteAll 3308 || (pVM->hm.s.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL)) 3309 { /* likely. */ } 3310 else 3311 { 3312 LogRelFunc(("VMX VMWRITE-All feature exposed to the guest but host CPU does not support it!\n")); 3313 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_GST_HOST_VMWRITE_ALL; 3314 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3315 } 3316 3317 uint32_t const cVmcsFields = RT_ELEMENTS(g_aVmcsFields); 3318 uint32_t cRwFields = 0; 3319 uint32_t cRoFields = 0; 3320 for (uint32_t i = 0; i < cVmcsFields; i++) 3321 { 3322 VMXVMCSFIELD VmcsField; 3323 VmcsField.u = g_aVmcsFields[i]; 3324 3325 /* 3326 * We will be writing "FULL" (64-bit) fields while syncing the shadow VMCS. 3327 * Therefore, "HIGH" (32-bit portion of 64-bit) fields must not be included 3328 * in the shadow VMCS fields array as they would be redundant. 3329 * 3330 * If the VMCS field depends on a CPU feature that is not exposed to the guest, 3331 * we must not include it in the shadow VMCS fields array. Guests attempting to 3332 * VMREAD/VMWRITE such VMCS fields would cause a VM-exit and we shall emulate 3333 * the required behavior. 3334 */ 3335 if ( VmcsField.n.fAccessType == VMX_VMCSFIELD_ACCESS_FULL 3336 && CPUMIsGuestVmxVmcsFieldValid(pVM, VmcsField.u)) 3337 { 3338 /* 3339 * Read-only fields are placed in a separate array so that while syncing shadow 3340 * VMCS fields later (which is more performance critical) we can avoid branches. 3341 * 3342 * However, if the guest can write to all fields (including read-only fields), 3343 * we treat it a as read/write field. Otherwise, writing to these fields would 3344 * cause a VMWRITE instruction error while syncing the shadow VMCS . 3345 */ 3346 if ( fGstVmwriteAll 3347 || !HMVmxIsVmcsFieldReadOnly(VmcsField.u)) 3348 pVM->hm.s.vmx.paShadowVmcsFields[cRwFields++] = VmcsField.u; 3349 else 3350 pVM->hm.s.vmx.paShadowVmcsRoFields[cRoFields++] = VmcsField.u; 3351 } 3352 } 3353 3354 /* Update the counts. */ 3355 pVM->hm.s.vmx.cShadowVmcsFields = cRwFields; 3356 pVM->hm.s.vmx.cShadowVmcsRoFields = cRoFields; 3357 return VINF_SUCCESS; 3358 } 3359 3360 3361 /** 3362 * Sets up the VMREAD and VMWRITE bitmaps. 3363 * 3364 * @param pVM The cross context VM structure. 3365 */ 3366 static void hmR0VmxSetupVmreadVmwriteBitmaps(PVM pVM) 3367 { 3368 /* 3369 * By default, ensure guest attempts to acceses to any VMCS fields cause VM-exits. 3370 */ 3371 uint32_t const cbBitmap = X86_PAGE_4K_SIZE; 3372 uint8_t *pbVmreadBitmap = (uint8_t *)pVM->hm.s.vmx.pvVmreadBitmap; 3373 uint8_t *pbVmwriteBitmap = (uint8_t *)pVM->hm.s.vmx.pvVmwriteBitmap; 3374 ASMMemFill32(pbVmreadBitmap, cbBitmap, UINT32_C(0xffffffff)); 3375 ASMMemFill32(pbVmwriteBitmap, cbBitmap, UINT32_C(0xffffffff)); 3376 3377 /* 3378 * Skip intercepting VMREAD/VMWRITE to guest read/write fields in the 3379 * VMREAD and VMWRITE bitmaps. 3380 */ 3381 { 3382 uint32_t const *paShadowVmcsFields = pVM->hm.s.vmx.paShadowVmcsFields; 3383 uint32_t const cShadowVmcsFields = pVM->hm.s.vmx.cShadowVmcsFields; 3384 for (uint32_t i = 0; i < cShadowVmcsFields; i++) 3385 { 3386 uint32_t const uVmcsField = paShadowVmcsFields[i]; 3387 Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK)); 3388 Assert(uVmcsField >> 3 < cbBitmap); 3389 ASMBitClear(pbVmreadBitmap + (uVmcsField >> 3), uVmcsField & 7); 3390 ASMBitClear(pbVmwriteBitmap + (uVmcsField >> 3), uVmcsField & 7); 3391 } 3392 } 3393 3394 /* 3395 * Skip intercepting VMREAD for guest read-only fields in the VMREAD bitmap 3396 * if the host supports VMWRITE to all supported VMCS fields. 3397 */ 3398 if (pVM->hm.s.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL) 3399 { 3400 uint32_t const *paShadowVmcsRoFields = pVM->hm.s.vmx.paShadowVmcsRoFields; 3401 uint32_t const cShadowVmcsRoFields = pVM->hm.s.vmx.cShadowVmcsRoFields; 3402 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++) 3403 { 3404 uint32_t const uVmcsField = paShadowVmcsRoFields[i]; 3405 Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK)); 3406 Assert(uVmcsField >> 3 < cbBitmap); 3407 ASMBitClear(pbVmreadBitmap + (uVmcsField >> 3), uVmcsField & 7); 3408 } 3409 } 3410 } 3411 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 3412 3413 3378 3414 /** 3379 3415 * Sets up the virtual-APIC page address for the VMCS. … … 3425 3461 3426 3462 3427 /** 3428 * Sets up the VMCS link pointer for the VMCS. 3463 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 3464 /** 3465 * Sets up the VMREAD bitmap address for the VMCS. 3429 3466 * 3430 3467 * @returns VBox status code. 3431 3468 * @param pVCpu The cross context virtual CPU structure. 3432 * @param pVmcsInfo The VMCS info. object. 3433 */ 3434 DECLINLINE(int) hmR0VmxSetupVmcsLinkPtr(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 3435 { 3436 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */ 3437 uint64_t const u64VmcsLinkPtr = pVmcsInfo->u64VmcsLinkPtr; 3438 Assert(u64VmcsLinkPtr == UINT64_C(0xffffffffffffffff)); /* Bits 63:0 MB1. */ 3439 return VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, u64VmcsLinkPtr); 3440 } 3469 */ 3470 DECLINLINE(int) hmR0VmxSetupVmcsVmreadBitmapAddr(PVMCPU pVCpu) 3471 { 3472 RTHCPHYS const HCPhysVmreadBitmap = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.HCPhysVmreadBitmap; 3473 Assert(HCPhysVmreadBitmap != NIL_RTHCPHYS); 3474 Assert(!(HCPhysVmreadBitmap & 0xfff)); /* Bits 11:0 MBZ. */ 3475 return VMXWriteVmcs64(VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL, HCPhysVmreadBitmap); 3476 } 3477 3478 3479 /** 3480 * Sets up the VMWRITE bitmap address for the VMCS. 3481 * 3482 * @returns VBox status code. 3483 * @param pVCpu The cross context virtual CPU structure. 3484 */ 3485 DECLINLINE(int) hmR0VmxSetupVmcsVmwriteBitmapAddr(PVMCPU pVCpu) 3486 { 3487 RTHCPHYS const HCPhysVmwriteBitmap = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.HCPhysVmwriteBitmap; 3488 Assert(HCPhysVmwriteBitmap != NIL_RTHCPHYS); 3489 Assert(!(HCPhysVmwriteBitmap & 0xfff)); /* Bits 11:0 MBZ. */ 3490 return VMXWriteVmcs64(VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL, HCPhysVmwriteBitmap); 3491 } 3492 #endif 3441 3493 3442 3494 … … 3624 3676 fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST; 3625 3677 3626 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX3627 #if 03628 /* Enable VMCS shadowing if supported by the hardware and VMX is exposed to the guest. */3629 if ( pVM->cpum.ro.GuestFeatures.fVmx3630 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING))3631 fVal |= VMX_PROC_CTLS2_VMCS_SHADOWING;3632 #endif3633 #endif3634 3635 3678 #if 0 3636 3679 if (pVM->hm.s.fVirtApicRegs) … … 3807 3850 * @param pVCpu The cross context virtual CPU structure. 3808 3851 * @param pVmcsInfo The VMCS info. object. 3809 *3810 * @remarks Must be called after secondary processor-based VM-execution controls3811 * have been initialized!3812 3852 */ 3813 3853 static int hmR0VmxSetupVmcsMiscCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 3814 3854 { 3815 /* Set the VMCS link pointer in the VMCS. */ 3816 int rc = hmR0VmxSetupVmcsLinkPtr(pVCpu, pVmcsInfo); 3855 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 3856 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUseVmcsShadowing) 3857 { 3858 int rc = hmR0VmxSetupVmcsVmreadBitmapAddr(pVCpu); 3859 rc |= hmR0VmxSetupVmcsVmwriteBitmapAddr(pVCpu); 3860 if (RT_SUCCESS(rc)) 3861 { /* likely */ } 3862 else 3863 { 3864 LogRelFunc(("Failed to setup VMREAD/VMWRITE bitmap addresses. rc=%Rrc\n", rc)); 3865 return rc; 3866 } 3867 } 3868 #endif 3869 3870 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); 3817 3871 if (RT_SUCCESS(rc)) 3818 3872 { 3819 /* Set the auto-load/store MSR area addresses in the VMCS. */3820 3873 rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVCpu, pVmcsInfo); 3821 3874 if (RT_SUCCESS(rc)) 3822 3875 { 3823 /* Set the CR0/CR4 guest/host mask. */3824 3876 uint64_t const u64Cr0Mask = hmR0VmxGetFixedCr0Mask(pVCpu); 3825 3877 uint64_t const u64Cr4Mask = hmR0VmxGetFixedCr4Mask(pVCpu); … … 3890 3942 { 3891 3943 PVM pVM = pVCpu->CTX_SUFF(pVM); 3892 int rc = hmR0VmxSetupVmcsLinkPtr(pVCpu, pVmcsInfo);3944 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); 3893 3945 if (RT_SUCCESS(rc)) 3894 3946 { … … 3967 4019 { 3968 4020 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 3969 /* If VMCS shadowing is used, initialize the shadow VMCS. */ 3970 if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING) 4021 /* 4022 * If a shadow VMCS is allocated for the VMCS info. object, initialize the 4023 * VMCS revision ID and shadow VMCS indicator bit. Also, clear the VMCS 4024 * making it fit for use when VMCS shadowing is later enabled. 4025 */ 4026 if (pVmcsInfo->pvShadowVmcs) 3971 4027 { 3972 Assert(pVmcsInfo->pvShadowVmcs);3973 4028 VMXVMCSREVID VmcsRevId; 3974 4029 VmcsRevId.u = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID); … … 4220 4275 if (RT_FAILURE(rc)) 4221 4276 { 4222 LogRelFunc((" hmR0VmxSetupTaggedTlb failed!rc=%Rrc\n", rc));4277 LogRelFunc(("Failed to setup tagged TLB. rc=%Rrc\n", rc)); 4223 4278 return rc; 4224 4279 } 4225 4280 4226 /* Check if we can use the VMCS controls for swapping the EFER MSR. */ 4227 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer); 4228 #if HC_ARCH_BITS == 64 4229 if ( (pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR) 4230 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR) 4231 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR)) 4232 pVM->hm.s.vmx.fSupportsVmcsEfer = true; 4281 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 4282 /* Setup the shadow VMCS fields array and VMREAD/VMWRITE bitmaps. */ 4283 if (pVM->hm.s.vmx.fUseVmcsShadowing) 4284 { 4285 rc = hmR0VmxSetupShadowVmcsFieldsArrays(pVM); 4286 if (RT_SUCCESS(rc)) 4287 hmR0VmxSetupVmreadVmwriteBitmaps(pVM); 4288 else 4289 { 4290 LogRelFunc(("Failed to setup shadow VMCS fields arrays. rc=%Rrc\n", rc)); 4291 return rc; 4292 } 4293 } 4233 4294 #endif 4234 4295 … … 5345 5406 5346 5407 5408 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 5409 /** 5410 * Copies the nested-guest VMCS to the shadow VMCS. 5411 * 5412 * @returns VBox status code. 5413 * @param pVCpu The cross context virtual CPU structure. 5414 * @param pVmcsInfo The VMCS info. object. 5415 * 5416 * @remarks No-long-jump zone!!! 5417 */ 5418 static int hmR0VmxCopyNstGstToShadowVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 5419 { 5420 PVM pVM = pVCpu->CTX_SUFF(pVM); 5421 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 5422 5423 /* 5424 * Disable interrupts so we don't get preempted while the shadow VMCS is the 5425 * current VMCS, as we may try saving guest lazy MSRs. 5426 * 5427 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk 5428 * calling the import VMCS code which is currently performing the guest MSR reads 5429 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts 5430 * and the rest of the VMX leave session machinery. 5431 */ 5432 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 5433 5434 int rc = hmR0VmxLoadShadowVmcs(pVmcsInfo); 5435 if (RT_SUCCESS(rc)) 5436 { 5437 /* 5438 * Copy all guest read/write VMCS fields. 5439 * 5440 * We don't check for VMWRITE failures here for performance reasons and 5441 * because they are not expected to fail, barring irrecoverable conditions 5442 * like hardware errors. 5443 */ 5444 uint32_t const cShadowVmcsFields = pVM->hm.s.vmx.cShadowVmcsFields; 5445 for (uint32_t i = 0; i < cShadowVmcsFields; i++) 5446 { 5447 uint64_t u64Val; 5448 uint32_t const uVmcsField = pVM->hm.s.vmx.paShadowVmcsFields[i]; 5449 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val); 5450 VMXWriteVmcs64(uVmcsField, u64Val); 5451 } 5452 5453 /* 5454 * If the host CPU supports writing all VMCS fields, copy the guest read-only 5455 * VMCS fields, so the guest can VMREAD them without causing a VM-exit. 5456 */ 5457 if (pVM->hm.s.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL) 5458 { 5459 uint32_t const cShadowVmcsRoFields = pVM->hm.s.vmx.cShadowVmcsRoFields; 5460 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++) 5461 { 5462 uint64_t u64Val; 5463 uint32_t const uVmcsField = pVM->hm.s.vmx.paShadowVmcsRoFields[i]; 5464 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val); 5465 VMXWriteVmcs64(uVmcsField, u64Val); 5466 } 5467 } 5468 5469 rc = hmR0VmxClearShadowVmcs(pVmcsInfo); 5470 rc |= hmR0VmxLoadVmcs(pVmcsInfo); 5471 } 5472 5473 ASMSetFlags(fEFlags); 5474 return rc; 5475 } 5476 5477 5478 /** 5479 * Copies the shadow VMCS to the nested-guest VMCS. 5480 * 5481 * @returns VBox status code. 5482 * @param pVCpu The cross context virtual CPU structure. 5483 * @param pVmcsInfo The VMCS info. object. 5484 * 5485 * @remarks Called with interrupts disabled. 5486 */ 5487 static int hmR0VmxCopyShadowToNstGstVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 5488 { 5489 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 5490 PVM pVM = pVCpu->CTX_SUFF(pVM); 5491 PVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 5492 5493 int rc = hmR0VmxLoadShadowVmcs(pVmcsInfo); 5494 if (RT_SUCCESS(rc)) 5495 { 5496 /* 5497 * Copy guest read/write fields from the shadow VMCS. 5498 * Guest read-only fields cannot be modified, so no need to copy them. 5499 * 5500 * We don't check for VMREAD failures here for performance reasons and 5501 * because they are not expected to fail, barring irrecoverable conditions 5502 * like hardware errors. 5503 */ 5504 uint32_t const cShadowVmcsFields = pVM->hm.s.vmx.cShadowVmcsFields; 5505 for (uint32_t i = 0; i < cShadowVmcsFields; i++) 5506 { 5507 uint64_t u64Val; 5508 uint32_t const uVmcsField = pVM->hm.s.vmx.paShadowVmcsFields[i]; 5509 VMXReadVmcs64(uVmcsField, &u64Val); 5510 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val); 5511 } 5512 5513 rc = hmR0VmxClearShadowVmcs(pVmcsInfo); 5514 rc |= hmR0VmxLoadVmcs(pVmcsInfo); 5515 } 5516 return rc; 5517 } 5518 5519 5520 /** 5521 * Enables VMCS shadowing for the given VMCS info. object. 5522 * 5523 * @param pVCpu The cross context virtual CPU structure. 5524 * @param pVmcsInfo The VMCS info. object. 5525 * 5526 * @remarks No-long-jump zone!!! 5527 */ 5528 static void hmR0VmxEnableVmcsShadowing(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 5529 { 5530 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */ 5531 5532 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2; 5533 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)) 5534 { 5535 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS); 5536 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING; 5537 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); 5538 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); 5539 AssertRC(rc); 5540 5541 pVmcsInfo->u32ProcCtls2 = uProcCtls2; 5542 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs; 5543 Log4Func(("Enabled\n")); 5544 } 5545 } 5546 5547 5548 /** 5549 * Disables VMCS shadowing for the given VMCS info. object. 5550 * 5551 * @param pVCpu The cross context virtual CPU structure. 5552 * @param pVmcsInfo The VMCS info. object. 5553 * 5554 * @remarks No-long-jump zone!!! 5555 */ 5556 static void hmR0VmxDisableVmcsShadowing(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 5557 { 5558 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */ 5559 5560 /* 5561 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the 5562 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit 5563 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS. 5564 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail. 5565 * 5566 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields". 5567 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State". 5568 */ 5569 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2; 5570 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING) 5571 { 5572 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING; 5573 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); 5574 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); 5575 AssertRC(rc); 5576 5577 pVmcsInfo->u32ProcCtls2 = uProcCtls2; 5578 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS; 5579 Log4Func(("Disabled\n")); 5580 } 5581 } 5582 #endif 5583 5584 5585 /** 5586 * Exports the guest hardware-virtualization state. 5587 * 5588 * @returns VBox status code. 5589 * @param pVCpu The cross context virtual CPU structure. 5590 * @param pVmxTransient The VMX-transient structure. 5591 * 5592 * @remarks No-long-jump zone!!! 5593 */ 5594 static int hmR0VmxExportGuestHwvirtState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 5595 { 5596 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_HWVIRT) 5597 { 5598 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 5599 /* 5600 * Check if the VMX feature is exposed to the guest and if the host CPU supports 5601 * VMCS shadowing. 5602 */ 5603 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUseVmcsShadowing) 5604 { 5605 /* 5606 * If the guest hypervisor has loaded a current VMCS and is in VMX root mode, 5607 * copy the guest hypervisor's current VMCS into the shadow VMCS and enable 5608 * VMCS shadowing to skip intercepting some or all VMREAD/VMWRITE VM-exits. 5609 * 5610 * We check for VMX root mode here in case the guest executes VMXOFF without 5611 * clearing the current VMCS pointer and our VMXOFF instruction emulation does 5612 * not clear the current VMCS pointer. 5613 */ 5614 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 5615 if ( CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx) 5616 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx) 5617 && CPUMIsGuestVmxCurrentVmcsValid(pVCpu, &pVCpu->cpum.GstCtx)) 5618 { 5619 /* Paranoia. */ 5620 Assert(!pVmxTransient->fIsNestedGuest); 5621 5622 /* 5623 * For performance reasons, also check if the guest hypervisor's current VMCS 5624 * was newly loaded or modified before copying it to the shadow VMCS. 5625 */ 5626 if (!pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs) 5627 { 5628 int rc = hmR0VmxCopyNstGstToShadowVmcs(pVCpu, pVmcsInfo); 5629 AssertRCReturn(rc, rc); 5630 pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs = true; 5631 } 5632 hmR0VmxEnableVmcsShadowing(pVCpu, pVmcsInfo); 5633 } 5634 else 5635 hmR0VmxDisableVmcsShadowing(pVCpu, pVmcsInfo); 5636 } 5637 #else 5638 NOREF(pVmxTransient); 5639 #endif 5640 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_HWVIRT); 5641 } 5642 return VINF_SUCCESS; 5643 } 5644 5645 5347 5646 /** 5348 5647 * Exports the guest CR0 control register into the guest-state area in the VMCS. … … 5964 6263 * segments. 5965 6264 */ 5966 static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, P CVMXVMCSINFO pVmcsInfo)6265 static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo) 5967 6266 { 5968 6267 /* … … 7985 8284 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 7986 8285 */ 7987 static int hmR0VmxImportGuestState(PVMCPU pVCpu, P CVMXVMCSINFO pVmcsInfo, uint64_t fWhat)8286 static int hmR0VmxImportGuestState(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat) 7988 8287 { 7989 8288 #define VMXLOCAL_BREAK_RC(a_rc) \ … … 8300 8599 } 8301 8600 } 8601 } 8302 8602 8303 8603 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 8604 if (fWhat & CPUMCTX_EXTRN_HWVIRT) 8605 { 8606 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING) 8607 && !CPUMIsGuestInVmxNonRootMode(pCtx)) 8608 { 8609 Assert(CPUMIsGuestInVmxRootMode(pCtx)); 8610 rc = hmR0VmxCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo); 8611 VMXLOCAL_BREAK_RC(rc); 8612 } 8613 8304 8614 # if 0 8305 8615 /** @todo NSTVMX: We handle most of these fields individually by passing it to IEM … … 8312 8622 * the guest-CPU state when executing a nested-guest. 8313 8623 */ 8314 if ( (fWhat & CPUMCTX_EXTRN_HWVIRT) 8315 && CPUMIsGuestInVmxNonRootMode(pCtx)) 8624 if (CPUMIsGuestInVmxNonRootMode(pCtx)) 8316 8625 { 8317 8626 PVMXVVMCS pGstVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); … … 8361 8670 } 8362 8671 # endif 8672 } 8363 8673 #endif 8364 }8365 8674 } while (0); 8366 8675 … … 8434 8743 { 8435 8744 AssertPtr(pVCpu); 8436 P CVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);8745 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 8437 8746 return hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fWhat); 8438 8747 } … … 8848 9157 int rc = hmR0VmxClearVmcs(pVmcsInfo); 8849 9158 AssertRCReturn(rc, rc); 9159 9160 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 9161 /* 9162 * A valid shadow VMCS is made active as part of VM-entry. It is necessary to 9163 * clear a shadow VMCS before allowing that VMCS to become active on another 9164 * logical processor. We may or may not be importing guest state which clears 9165 * it, so cover for it here. 9166 * 9167 * See Intel spec. 24.11.1 "Software Use of Virtual-Machine Control Structures". 9168 */ 9169 if ( pVmcsInfo->pvShadowVmcs 9170 && pVmcsInfo->fShadowVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR) 9171 { 9172 rc = hmR0VmxClearShadowVmcs(pVmcsInfo); 9173 AssertRCReturn(rc, rc); 9174 } 9175 #endif 8850 9176 8851 9177 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu)); … … 9083 9409 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 9084 9410 9085 /* Clear the current VMCS data back to memory. */ 9411 /* Clear the current VMCS data back to memory (shadow VMCS if any would have been 9412 cleared as part of importing the guest state above. */ 9086 9413 hmR0VmxClearVmcs(pVmcsInfo); 9087 9414 … … 9233 9560 9234 9561 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */ 9235 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;9562 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 9236 9563 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK 9237 9564 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS); … … 9876 10203 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 9877 10204 10205 rc = hmR0VmxExportGuestHwvirtState(pVCpu, pVmxTransient); 10206 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 10207 9878 10208 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */ 9879 10209 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP) … … 9888 10218 | HM_CHANGED_GUEST_TSC_AUX 9889 10219 | HM_CHANGED_GUEST_OTHER_MSRS 9890 | HM_CHANGED_GUEST_HWVIRT /* More accurate PLE handling someday? */9891 10220 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK))); 9892 10221 … … 11085 11414 * If any new events (interrupts/NMI) are pending currently, we try to set up the 11086 11415 * guest to cause a VM-exit the next time they are ready to receive the event. 11416 * 11417 * With nested-guests, evaluating pending events may cause VM-exits. 11087 11418 */ 11088 11419 if (TRPMHasTrap(pVCpu)) … … 11111 11442 * also result in triple-faulting the VM. 11112 11443 * 11113 * The above does not apply when executing a nested-guest (since unrestricted guest execution11114 * is a requirement) regardless doing itavoid duplicating code elsewhere.11444 * With nested-guests, the above does not apply since unrestricted guest execution is a 11445 * requirement. Regardless, we do this here to avoid duplicating code elsewhere. 11115 11446 */ 11116 11447 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pVmxTransient, fIntrState, fStepping); … … 11164 11495 * 11165 11496 * If we are injecting events to a real-on-v86 mode guest, we would have updated RIP and some segment 11166 * registers. Hence, loading of the guest state needs to be done -after- injection of events.11497 * registers. Hence, exporting of the guest state needs to be done -after- injection of events. 11167 11498 */ 11168 11499 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pVmxTransient); … … 11657 11988 Assert(pcLoops); 11658 11989 Assert(*pcLoops <= cMaxResumeLoops); 11990 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)); 11659 11991 11660 11992 VMXTRANSIENT VmxTransient; … … 13145 13477 do { \ 13146 13478 if (a_fSave != 0) \ 13147 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); \13479 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \ 13148 13480 VBOXSTRICTRC rcStrict = a_CallExpr; \ 13149 13481 if (a_fSave != 0) \ … … 13697 14029 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3); 13698 14030 13699 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14031 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13700 14032 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 13701 14033 AssertRCReturn(rc, rc); … … 13914 14246 * Get the state we need and update the exit history entry. 13915 14247 */ 13916 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14248 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13917 14249 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13918 14250 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); … … 13966 14298 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13967 14299 13968 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14300 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13969 14301 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4); 13970 14302 AssertRCReturn(rc, rc); … … 13985 14317 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 13986 14318 13987 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14319 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 13988 14320 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); 13989 14321 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); … … 14015 14347 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 14016 14348 14017 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14349 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14018 14350 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX); 14019 14351 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); … … 14045 14377 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 14046 14378 14047 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14379 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14048 14380 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 14049 14381 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS); … … 14076 14408 if (EMAreHypercallInstructionsEnabled(pVCpu)) 14077 14409 { 14078 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14410 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14079 14411 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0 14080 14412 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER); … … 14118 14450 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop); 14119 14451 14120 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14452 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14121 14453 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 14122 14454 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); … … 14147 14479 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 14148 14480 14149 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14481 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14150 14482 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS); 14151 14483 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); … … 14172 14504 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 14173 14505 14174 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14506 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14175 14507 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 14176 14508 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); … … 14259 14591 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 14260 14592 14261 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14593 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14262 14594 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 14263 14595 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4); … … 14292 14624 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 14293 14625 { 14294 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14626 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14295 14627 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 14296 14628 AssertRCReturn(rc, rc); … … 14437 14769 * MSRs required. That would require changes to IEM and possibly CPUM too. 14438 14770 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */ 14439 P CVMXVMCSINFOpVmcsInfo = pVmxTransient->pVmcsInfo;14440 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;14441 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;14771 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14772 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx; 14773 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS; 14442 14774 switch (idMsr) 14443 14775 { … … 14517 14849 } 14518 14850 14519 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;14851 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14520 14852 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 14521 14853 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport); … … 14685 15017 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2); 14686 15018 14687 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;15019 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14688 15020 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 14689 15021 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); … … 14838 15170 14839 15171 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 14840 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;15172 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 14841 15173 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); 14842 15174 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); … … 15336 15668 */ 15337 15669 RTGCPHYS GCPhys; 15338 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;15670 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 15339 15671 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys); 15340 15672 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); … … 15415 15747 15416 15748 RTGCPHYS GCPhys; 15417 P CVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;15749 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 15418 15750 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys); 15419 15751 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient); … … 15913 16245 * VM-exit helper for LMSW. 15914 16246 */ 15915 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, 15916 RTGCPTR GCPtrEffDst) 16247 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst) 15917 16248 { 15918 16249 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); … … 15939 16270 * VM-exit helper for CLTS. 15940 16271 */ 15941 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, P CVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)16272 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr) 15942 16273 { 15943 16274 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); … … 15964 16295 * VM-exit helper for MOV from CRx (CRx read). 15965 16296 */ 15966 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, P CVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)16297 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg) 15967 16298 { 15968 16299 Assert(iCrReg < 16); … … 15998 16329 * VM-exit helper for MOV to CRx (CRx write). 15999 16330 */ 16000 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, P CVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)16331 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg) 16001 16332 { 16002 16333 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK); … … 16094 16425 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16095 16426 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 16427 | CPUMCTX_EXTRN_HWVIRT 16096 16428 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 16097 16429 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); … … 16139 16471 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 16140 16472 { 16141 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;16142 16473 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 16474 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 16475 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME; 16143 16476 } 16144 16477 Assert(rcStrict != VINF_IEM_RAISED_XCPT); … … 16156 16489 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16157 16490 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 16491 | CPUMCTX_EXTRN_HWVIRT 16158 16492 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 16159 16493 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); … … 16192 16526 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16193 16527 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 16528 | CPUMCTX_EXTRN_HWVIRT 16194 16529 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 16195 16530 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); … … 16228 16563 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16229 16564 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 16565 | CPUMCTX_EXTRN_HWVIRT 16230 16566 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 16231 16567 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); … … 16274 16610 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 16275 16611 { 16276 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;16277 16612 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 16613 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)) 16614 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME; 16278 16615 } 16279 16616 Assert(rcStrict != VINF_IEM_RAISED_XCPT); … … 16289 16626 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 16290 16627 16628 /* 16629 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since 16630 * our HM hook that gets invoked when IEM's VMWRITE instruction emulation 16631 * modifies the current VMCS signals re-loading the entire shadow VMCS, we 16632 * should also save the entire shadow VMCS here. 16633 */ 16291 16634 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16292 16635 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 16636 | CPUMCTX_EXTRN_HWVIRT 16293 16637 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 16294 16638 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); … … 16307 16651 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr); 16308 16652 16653 /** @todo NSTVMX: Remove later. */ 16654 Log4Func(("VMWRITE: %#x\n", pVCpu->cpum.GstCtx.aGRegs[ExitInfo.InstrInfo.VmreadVmwrite.iReg2].u32)); 16655 16309 16656 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo); 16310 16657 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) … … 16328 16675 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16329 16676 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4 16677 | CPUMCTX_EXTRN_HWVIRT 16330 16678 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 16331 16679 AssertRCReturn(rc, rc); … … 16354 16702 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 16355 16703 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK 16704 | CPUMCTX_EXTRN_HWVIRT 16356 16705 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK); 16357 16706 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r79125 r79345 1597 1597 1598 1598 /* 1599 * Enable VPID if configured and supported.1600 */1601 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VPID)1602 pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid;1603 1604 #if 01605 /*1606 * Enable APIC register virtualization and virtual-interrupt delivery if supported.1607 */1608 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT)1609 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY))1610 pVM->hm.s.fVirtApicRegs = true;1611 1612 /*1613 * Enable posted-interrupt processing if supported.1614 */1615 /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI1616 * here. */1617 if ( (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT)1618 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT))1619 pVM->hm.s.fPostedIntrs = true;1620 #endif1621 1622 /*1623 1599 * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise 1624 1600 * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced … … 1771 1747 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); 1772 1748 1773 if (pVM->hm.s.fVirtApicRegs)1774 LogRel(("HM: Enabled APIC-register virtualization support\n"));1775 1776 if (pVM->hm.s.fPostedIntrs)1777 LogRel(("HM: Enabled posted-interrupt processing support\n"));1778 1779 1749 if (pVM->hm.s.vmx.fVpid) 1780 1750 { … … 1798 1768 else 1799 1769 LogRel(("HM: Disabled VMX-preemption timer\n")); 1770 1771 if (pVM->hm.s.fVirtApicRegs) 1772 LogRel(("HM: Enabled APIC-register virtualization support\n")); 1773 1774 if (pVM->hm.s.fPostedIntrs) 1775 LogRel(("HM: Enabled posted-interrupt processing support\n")); 1776 1777 if (pVM->hm.s.vmx.fUseVmcsShadowing) 1778 LogRel(("HM: Enabled VMCS shadowing support\n")); 1800 1779 1801 1780 return VINF_SUCCESS; -
trunk/src/VBox/VMM/include/HMInternal.h
r79222 r79345 595 595 /** Whether the CPU supports VMCS fields for swapping EFER. */ 596 596 bool fSupportsVmcsEfer; 597 uint8_t u8Alignment2[7]; 597 /** Whether to use VMCS shadowing. */ 598 bool fUseVmcsShadowing; 599 uint8_t u8Alignment2[6]; 598 600 599 601 /** VMX MSR values. */ … … 603 605 RTHCPHYS HCPhysVmxEnableError; 604 606 605 /** Pointer to the shadow VMCS fields array. */ 607 /** Pointer to the shadow VMCS read-only fields array. */ 608 R0PTRTYPE(uint32_t *) paShadowVmcsRoFields; 609 /** Pointer to the shadow VMCS read/write fields array. */ 606 610 R0PTRTYPE(uint32_t *) paShadowVmcsFields; 607 RTR0PTR pvR0Alignment1; 608 /** Number of elements in the shadow VMCS fields array. */ 611 /** Number of elements in the shadow VMCS read-only fields array. */ 612 uint32_t cShadowVmcsRoFields; 613 /** Number of elements in the shadow VMCS read-write fields array. */ 609 614 uint32_t cShadowVmcsFields; 610 uint32_t u32Alignemnt0;611 615 } vmx; 612 616 … … 984 988 * nested-guest VMCS controls. */ 985 989 bool fMergedNstGstCtls; 990 /** Whether the nested-guest VMCS has been copied to the shadow VMCS. */ 991 bool fCopiedNstGstToShadowVmcs; 986 992 /** Alignment. */ 987 bool afAlignment0[ 6];993 bool afAlignment0[5]; 988 994 /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */ 989 995 uint64_t u64GstMsrApicBase;
Note:
See TracChangeset
for help on using the changeset viewer.