Changeset 43387 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Sep 21, 2012 9:40:25 AM (13 years ago)
- svn:sync-xref-src-repo-rev:
- 80859
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 7 edited
- 3 moved
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r42464 r43387 26 26 #include <VBox/err.h> 27 27 #include <VBox/log.h> 28 #include <VBox/vmm/h waccm.h>28 #include <VBox/vmm/hm.h> 29 29 #include <iprt/assert.h> 30 30 #include <iprt/asm-amd64-x86.h> … … 407 407 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)) 408 408 { 409 H WACCMR0SaveFPUState(pVM, pVCpu, pCtx);409 HMR0SaveFPUState(pVM, pVCpu, pCtx); 410 410 cpumR0RestoreHostFPUState(&pVCpu->cpum.s); 411 411 } … … 493 493 uint64_t dr6 = pCtx->dr[6]; 494 494 495 H WACCMR0SaveDebugState(pVM, pVCpu, pCtx);495 HMR0SaveDebugState(pVM, pVCpu, pCtx); 496 496 if (!fDR6) /* dr6 was already up-to-date */ 497 497 pCtx->dr[6] = dr6; -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r43373 r43387 20 20 * Header Files * 21 21 *******************************************************************************/ 22 #define LOG_GROUP LOG_GROUP_H WACCM23 #include <VBox/vmm/h waccm.h>22 #define LOG_GROUP LOG_GROUP_HM 23 #include <VBox/vmm/hm.h> 24 24 #include <VBox/vmm/pgm.h> 25 #include "H WACCMInternal.h"25 #include "HMInternal.h" 26 26 #include <VBox/vmm/vm.h> 27 #include <VBox/vmm/h wacc_vmx.h>28 #include <VBox/vmm/h wacc_svm.h>27 #include <VBox/vmm/hm_vmx.h> 28 #include <VBox/vmm/hm_svm.h> 29 29 #include <VBox/err.h> 30 30 #include <VBox/log.h> … … 178 178 * simpler and hopefully easier to understand. */ 179 179 bool fEnabled; 180 /** Serialize initialization in H WACCMR0EnableAllCpus. */180 /** Serialize initialization in HMR0EnableAllCpus. */ 181 181 RTONCE EnableAllCpusOnce; 182 182 } g_HvmR0; … … 605 605 * @returns VBox status code. 606 606 */ 607 VMMR0DECL(int) H WACCMR0Init(void)607 VMMR0DECL(int) HMR0Init(void) 608 608 { 609 609 /* … … 676 676 hmR0InitAmd(u32FeaturesEDX); 677 677 else 678 g_HvmR0.lLastError = VERR_H WACCM_UNKNOWN_CPU;678 g_HvmR0.lLastError = VERR_HM_UNKNOWN_CPU; 679 679 } 680 680 else 681 g_HvmR0.lLastError = VERR_H WACCM_NO_CPUID;681 g_HvmR0.lLastError = VERR_HM_NO_CPUID; 682 682 683 683 /* … … 705 705 * @returns VBox status code. 706 706 */ 707 VMMR0DECL(int) H WACCMR0Term(void)707 VMMR0DECL(int) HMR0Term(void) 708 708 { 709 709 int rc; … … 768 768 769 769 /** 770 * Worker function used by hmR0PowerCallback and H WACCMR0Init to initalize770 * Worker function used by hmR0PowerCallback and HMR0Init to initalize 771 771 * VT-x on a CPU. 772 772 * … … 809 809 810 810 /** 811 * Worker function used by hmR0PowerCallback and H WACCMR0Init to initalize811 * Worker function used by hmR0PowerCallback and HMR0Init to initalize 812 812 * VT-x / AMD-V on a CPU. 813 813 * … … 911 911 912 912 /** 913 * RTOnce callback employed by H WACCMR0EnableAllCpus.913 * RTOnce callback employed by HMR0EnableAllCpus. 914 914 * 915 915 * @returns VBox status code. … … 934 934 * The global init variable is set by the first VM. 935 935 */ 936 g_HvmR0.fGlobalInit = pVM->h waccm.s.fGlobalInit;936 g_HvmR0.fGlobalInit = pVM->hm.s.fGlobalInit; 937 937 938 938 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++) … … 953 953 if (RT_SUCCESS(rc)) 954 954 /* If the host provides a VT-x init API, then we'll rely on that for global init. */ 955 g_HvmR0.fGlobalInit = pVM->h waccm.s.fGlobalInit = true;955 g_HvmR0.fGlobalInit = pVM->hm.s.fGlobalInit = true; 956 956 else 957 957 AssertMsgFailed(("hmR0EnableAllCpuOnce/SUPR0EnableVTx: rc=%Rrc\n", rc)); … … 996 996 997 997 /** 998 * Sets up H WACCM on all cpus.998 * Sets up HM on all cpus. 999 999 * 1000 1000 * @returns VBox status code. 1001 1001 * @param pVM Pointer to the VM. 1002 1002 */ 1003 VMMR0DECL(int) H WACCMR0EnableAllCpus(PVM pVM)1004 { 1005 /* Make sure we don't touch h waccm after we've disabled hwaccm in1003 VMMR0DECL(int) HMR0EnableAllCpus(PVM pVM) 1004 { 1005 /* Make sure we don't touch hm after we've disabled hm in 1006 1006 preparation of a suspend. */ 1007 1007 if (ASMAtomicReadBool(&g_HvmR0.fSuspended)) 1008 return VERR_H WACCM_SUSPEND_PENDING;1008 return VERR_HM_SUSPEND_PENDING; 1009 1009 1010 1010 return RTOnce(&g_HvmR0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM, NULL); … … 1086 1086 /* 1087 1087 * We only care about uninitializing a CPU that is going offline. When a 1088 * CPU comes online, the initialization is done lazily in H WACCMR0Enter().1088 * CPU comes online, the initialization is done lazily in HMR0Enter(). 1089 1089 */ 1090 1090 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1180 1180 * @param pVM Pointer to the VM. 1181 1181 */ 1182 VMMR0DECL(int) H WACCMR0InitVM(PVM pVM)1182 VMMR0DECL(int) HMR0InitVM(PVM pVM) 1183 1183 { 1184 1184 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1185 1185 1186 1186 #ifdef LOG_ENABLED 1187 SUPR0Printf("H WACCMR0InitVM: %p\n", pVM);1187 SUPR0Printf("HMR0InitVM: %p\n", pVM); 1188 1188 #endif 1189 1189 1190 /* Make sure we don't touch h waccm after we've disabled hwaccm in preparation of a suspend. */1190 /* Make sure we don't touch hm after we've disabled hm in preparation of a suspend. */ 1191 1191 if (ASMAtomicReadBool(&g_HvmR0.fSuspended)) 1192 return VERR_H WACCM_SUSPEND_PENDING;1192 return VERR_HM_SUSPEND_PENDING; 1193 1193 1194 1194 /* 1195 1195 * Copy globals to the VM structure. 1196 1196 */ 1197 pVM->h waccm.s.vmx.fSupported = g_HvmR0.vmx.fSupported;1198 pVM->h waccm.s.svm.fSupported = g_HvmR0.svm.fSupported;1199 1200 pVM->h waccm.s.vmx.fUsePreemptTimer = g_HvmR0.vmx.fUsePreemptTimer;1201 pVM->h waccm.s.vmx.cPreemptTimerShift = g_HvmR0.vmx.cPreemptTimerShift;1202 pVM->h waccm.s.vmx.msr.feature_ctrl = g_HvmR0.vmx.msr.feature_ctrl;1203 pVM->h waccm.s.vmx.hostCR4 = g_HvmR0.vmx.hostCR4;1204 pVM->h waccm.s.vmx.hostEFER = g_HvmR0.vmx.hostEFER;1205 pVM->h waccm.s.vmx.msr.vmx_basic_info = g_HvmR0.vmx.msr.vmx_basic_info;1206 pVM->h waccm.s.vmx.msr.vmx_pin_ctls = g_HvmR0.vmx.msr.vmx_pin_ctls;1207 pVM->h waccm.s.vmx.msr.vmx_proc_ctls = g_HvmR0.vmx.msr.vmx_proc_ctls;1208 pVM->h waccm.s.vmx.msr.vmx_proc_ctls2 = g_HvmR0.vmx.msr.vmx_proc_ctls2;1209 pVM->h waccm.s.vmx.msr.vmx_exit = g_HvmR0.vmx.msr.vmx_exit;1210 pVM->h waccm.s.vmx.msr.vmx_entry = g_HvmR0.vmx.msr.vmx_entry;1211 pVM->h waccm.s.vmx.msr.vmx_misc = g_HvmR0.vmx.msr.vmx_misc;1212 pVM->h waccm.s.vmx.msr.vmx_cr0_fixed0 = g_HvmR0.vmx.msr.vmx_cr0_fixed0;1213 pVM->h waccm.s.vmx.msr.vmx_cr0_fixed1 = g_HvmR0.vmx.msr.vmx_cr0_fixed1;1214 pVM->h waccm.s.vmx.msr.vmx_cr4_fixed0 = g_HvmR0.vmx.msr.vmx_cr4_fixed0;1215 pVM->h waccm.s.vmx.msr.vmx_cr4_fixed1 = g_HvmR0.vmx.msr.vmx_cr4_fixed1;1216 pVM->h waccm.s.vmx.msr.vmx_vmcs_enum = g_HvmR0.vmx.msr.vmx_vmcs_enum;1217 pVM->h waccm.s.vmx.msr.vmx_eptcaps = g_HvmR0.vmx.msr.vmx_eptcaps;1218 pVM->h waccm.s.svm.msrHWCR = g_HvmR0.svm.msrHWCR;1219 pVM->h waccm.s.svm.u32Rev = g_HvmR0.svm.u32Rev;1220 pVM->h waccm.s.svm.u32Features = g_HvmR0.svm.u32Features;1221 pVM->h waccm.s.cpuid.u32AMDFeatureECX = g_HvmR0.cpuid.u32AMDFeatureECX;1222 pVM->h waccm.s.cpuid.u32AMDFeatureEDX = g_HvmR0.cpuid.u32AMDFeatureEDX;1223 pVM->h waccm.s.lLastError = g_HvmR0.lLastError;1224 1225 pVM->h waccm.s.uMaxASID = g_HvmR0.uMaxASID;1226 1227 1228 if (!pVM->h waccm.s.cMaxResumeLoops) /* allow ring-3 overrides */1229 { 1230 pVM->h waccm.s.cMaxResumeLoops = 1024;1197 pVM->hm.s.vmx.fSupported = g_HvmR0.vmx.fSupported; 1198 pVM->hm.s.svm.fSupported = g_HvmR0.svm.fSupported; 1199 1200 pVM->hm.s.vmx.fUsePreemptTimer = g_HvmR0.vmx.fUsePreemptTimer; 1201 pVM->hm.s.vmx.cPreemptTimerShift = g_HvmR0.vmx.cPreemptTimerShift; 1202 pVM->hm.s.vmx.msr.feature_ctrl = g_HvmR0.vmx.msr.feature_ctrl; 1203 pVM->hm.s.vmx.hostCR4 = g_HvmR0.vmx.hostCR4; 1204 pVM->hm.s.vmx.hostEFER = g_HvmR0.vmx.hostEFER; 1205 pVM->hm.s.vmx.msr.vmx_basic_info = g_HvmR0.vmx.msr.vmx_basic_info; 1206 pVM->hm.s.vmx.msr.vmx_pin_ctls = g_HvmR0.vmx.msr.vmx_pin_ctls; 1207 pVM->hm.s.vmx.msr.vmx_proc_ctls = g_HvmR0.vmx.msr.vmx_proc_ctls; 1208 pVM->hm.s.vmx.msr.vmx_proc_ctls2 = g_HvmR0.vmx.msr.vmx_proc_ctls2; 1209 pVM->hm.s.vmx.msr.vmx_exit = g_HvmR0.vmx.msr.vmx_exit; 1210 pVM->hm.s.vmx.msr.vmx_entry = g_HvmR0.vmx.msr.vmx_entry; 1211 pVM->hm.s.vmx.msr.vmx_misc = g_HvmR0.vmx.msr.vmx_misc; 1212 pVM->hm.s.vmx.msr.vmx_cr0_fixed0 = g_HvmR0.vmx.msr.vmx_cr0_fixed0; 1213 pVM->hm.s.vmx.msr.vmx_cr0_fixed1 = g_HvmR0.vmx.msr.vmx_cr0_fixed1; 1214 pVM->hm.s.vmx.msr.vmx_cr4_fixed0 = g_HvmR0.vmx.msr.vmx_cr4_fixed0; 1215 pVM->hm.s.vmx.msr.vmx_cr4_fixed1 = g_HvmR0.vmx.msr.vmx_cr4_fixed1; 1216 pVM->hm.s.vmx.msr.vmx_vmcs_enum = g_HvmR0.vmx.msr.vmx_vmcs_enum; 1217 pVM->hm.s.vmx.msr.vmx_eptcaps = g_HvmR0.vmx.msr.vmx_eptcaps; 1218 pVM->hm.s.svm.msrHWCR = g_HvmR0.svm.msrHWCR; 1219 pVM->hm.s.svm.u32Rev = g_HvmR0.svm.u32Rev; 1220 pVM->hm.s.svm.u32Features = g_HvmR0.svm.u32Features; 1221 pVM->hm.s.cpuid.u32AMDFeatureECX = g_HvmR0.cpuid.u32AMDFeatureECX; 1222 pVM->hm.s.cpuid.u32AMDFeatureEDX = g_HvmR0.cpuid.u32AMDFeatureEDX; 1223 pVM->hm.s.lLastError = g_HvmR0.lLastError; 1224 1225 pVM->hm.s.uMaxASID = g_HvmR0.uMaxASID; 1226 1227 1228 if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */ 1229 { 1230 pVM->hm.s.cMaxResumeLoops = 1024; 1231 1231 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 1232 1232 if (RTThreadPreemptIsPendingTrusty()) 1233 pVM->h waccm.s.cMaxResumeLoops = 8192;1233 pVM->hm.s.cMaxResumeLoops = 8192; 1234 1234 #endif 1235 1235 } … … 1242 1242 PVMCPU pVCpu = &pVM->aCpus[i]; 1243 1243 1244 pVCpu->h waccm.s.idEnteredCpu = NIL_RTCPUID;1244 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1245 1245 1246 1246 /* Invalidate the last cpu we were running on. */ 1247 pVCpu->h waccm.s.idLastCpu = NIL_RTCPUID;1247 pVCpu->hm.s.idLastCpu = NIL_RTCPUID; 1248 1248 1249 1249 /* We'll aways increment this the first time (host uses ASID 0) */ 1250 pVCpu->h waccm.s.uCurrentASID = 0;1250 pVCpu->hm.s.uCurrentASID = 0; 1251 1251 } 1252 1252 … … 1260 1260 */ 1261 1261 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1262 PHMGLOBLCPUINFO pCpu = H WACCMR0GetCurrentCpu();1262 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1263 1263 ASMAtomicWriteBool(&pCpu->fInUse, true); 1264 1264 ASMSetFlags(fFlags); … … 1277 1277 * @param pVM Pointer to the VM. 1278 1278 */ 1279 VMMR0DECL(int) H WACCMR0TermVM(PVM pVM)1280 { 1281 Log(("H WACCMR0TermVM: %p\n", pVM));1279 VMMR0DECL(int) HMR0TermVM(PVM pVM) 1280 { 1281 Log(("HMR0TermVM: %p\n", pVM)); 1282 1282 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1283 1283 1284 /* Make sure we don't touch hm after we've disabled h waccm in preparation1284 /* Make sure we don't touch hm after we've disabled hm in preparation 1285 1285 of a suspend. */ 1286 1286 /** @todo r=bird: This cannot be right, the termination functions are 1287 1287 * just freeing memory and resetting pVM/pVCpu members... 1288 1288 * ==> memory leak. */ 1289 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_H WACCM_SUSPEND_PENDING);1289 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1290 1290 1291 1291 /* … … 1296 1296 */ 1297 1297 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1298 PHMGLOBLCPUINFO pCpu = H WACCMR0GetCurrentCpu();1298 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1299 1299 ASMAtomicWriteBool(&pCpu->fInUse, true); 1300 1300 ASMSetFlags(fFlags); … … 1315 1315 * @param pVM Pointer to the VM. 1316 1316 */ 1317 VMMR0DECL(int) H WACCMR0SetupVM(PVM pVM)1318 { 1319 Log(("H WACCMR0SetupVM: %p\n", pVM));1317 VMMR0DECL(int) HMR0SetupVM(PVM pVM) 1318 { 1319 Log(("HMR0SetupVM: %p\n", pVM)); 1320 1320 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1321 1321 1322 /* Make sure we don't touch h waccm after we've disabled hwaccm in1322 /* Make sure we don't touch hm after we've disabled hm in 1323 1323 preparation of a suspend. */ 1324 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_H WACCM_SUSPEND_PENDING);1324 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1325 1325 1326 1326 … … 1336 1336 /* On first entry we'll sync everything. */ 1337 1337 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1338 pVM->aCpus[i].h waccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;1338 pVM->aCpus[i].hm.s.fContextUseFlags = HM_CHANGED_ALL; 1339 1339 1340 1340 /* Enable VT-x or AMD-V if local init is required. */ … … 1372 1372 * @remarks This is called with preemption disabled. 1373 1373 */ 1374 VMMR0DECL(int) H WACCMR0Enter(PVM pVM, PVMCPU pVCpu)1374 VMMR0DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu) 1375 1375 { 1376 1376 RTCPUID idCpu = RTMpCpuId(); … … 1378 1378 1379 1379 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ 1380 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_H WACCM_SUSPEND_PENDING);1380 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1381 1381 ASMAtomicWriteBool(&pCpu->fInUse, true); 1382 1382 1383 AssertMsg(pVCpu->h waccm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hwaccm.s.idEnteredCpu));1384 pVCpu->h waccm.s.idEnteredCpu = idCpu;1383 AssertMsg(pVCpu->hm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hm.s.idEnteredCpu)); 1384 pVCpu->hm.s.idEnteredCpu = idCpu; 1385 1385 1386 1386 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); … … 1393 1393 1394 1394 /* Always reload the host context and the guest's CR0 register. (!!!!) */ 1395 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;1395 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT; 1396 1396 1397 1397 /* Setup the register and mask according to the current execution mode. */ 1398 1398 if (pCtx->msrEFER & MSR_K6_EFER_LMA) 1399 pVM->h waccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);1399 pVM->hm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF); 1400 1400 else 1401 pVM->h waccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);1401 pVM->hm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF); 1402 1402 1403 1403 /* Enable VT-x or AMD-V if local init is required, or enable if it's a … … 1432 1432 and ring-3 calls. */ 1433 1433 if (RT_FAILURE(rc)) 1434 pVCpu->h waccm.s.idEnteredCpu = NIL_RTCPUID;1434 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1435 1435 return rc; 1436 1436 } … … 1444 1444 * @param pVCpu Pointer to the VMCPU. 1445 1445 * 1446 * @remarks Called with preemption disabled just like H WACCMR0Enter, our1446 * @remarks Called with preemption disabled just like HMR0Enter, our 1447 1447 * counterpart. 1448 1448 */ 1449 VMMR0DECL(int) H WACCMR0Leave(PVM pVM, PVMCPU pVCpu)1449 VMMR0DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu) 1450 1450 { 1451 1451 int rc; … … 1455 1455 1456 1456 /** @todo r=bird: This can't be entirely right? */ 1457 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_H WACCM_SUSPEND_PENDING);1457 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1458 1458 1459 1459 /* … … 1470 1470 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1471 1471 1472 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;1472 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1473 1473 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1474 1474 } … … 1479 1479 guests, so we must make sure the recompiler flushes its TLB the next 1480 1480 time it executes code. */ 1481 if ( pVM->h waccm.s.fNestedPaging1481 if ( pVM->hm.s.fNestedPaging 1482 1482 && CPUMIsGuestInPagedProtectedModeEx(pCtx)) 1483 1483 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); … … 1485 1485 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness 1486 1486 and ring-3 calls. */ 1487 AssertMsgStmt( pVCpu->h waccm.s.idEnteredCpu == idCpu1487 AssertMsgStmt( pVCpu->hm.s.idEnteredCpu == idCpu 1488 1488 || RT_FAILURE_NP(rc), 1489 ("Owner is %u, I'm %u", pVCpu->h waccm.s.idEnteredCpu, idCpu),1489 ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu), 1490 1490 rc = VERR_HM_WRONG_CPU_1); 1491 pVCpu->h waccm.s.idEnteredCpu = NIL_RTCPUID;1491 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1492 1492 1493 1493 /* … … 1500 1500 1501 1501 /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */ 1502 pVCpu->h waccm.s.idLastCpu = NIL_RTCPUID;1503 pVCpu->h waccm.s.uCurrentASID = 0;1502 pVCpu->hm.s.idLastCpu = NIL_RTCPUID; 1503 pVCpu->hm.s.uCurrentASID = 0; 1504 1504 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1505 1505 } … … 1518 1518 * 1519 1519 * @remarks Called with preemption disabled and after first having called 1520 * H WACCMR0Enter.1521 */ 1522 VMMR0DECL(int) H WACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)1520 * HMR0Enter. 1521 */ 1522 VMMR0DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu) 1523 1523 { 1524 1524 #ifdef VBOX_STRICT … … 1526 1526 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 1527 1527 Assert(pCpu->fConfigured); 1528 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_H WACCM_SUSPEND_PENDING);1528 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1529 1529 Assert(ASMAtomicReadBool(&pCpu->fInUse) == true); 1530 1530 #endif … … 1552 1552 * @param pCtx Pointer to the guest CPU context. 1553 1553 */ 1554 VMMR0DECL(int) H WACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)1555 { 1556 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFpu64SwitchBack);1557 if (pVM->h waccm.s.vmx.fSupported)1558 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnSaveGuestFPU64, 0, NULL);1559 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnSaveGuestFPU64, 0, NULL);1554 VMMR0DECL(int) HMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1555 { 1556 STAM_COUNTER_INC(&pVCpu->hm.s.StatFpu64SwitchBack); 1557 if (pVM->hm.s.vmx.fSupported) 1558 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL); 1559 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL); 1560 1560 } 1561 1561 … … 1569 1569 * @param pCtx Pointer to the guest CPU context. 1570 1570 */ 1571 VMMR0DECL(int) H WACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)1572 { 1573 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDebug64SwitchBack);1574 if (pVM->h waccm.s.vmx.fSupported)1575 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnSaveGuestDebug64, 0, NULL);1576 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnSaveGuestDebug64, 0, NULL);1571 VMMR0DECL(int) HMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1572 { 1573 STAM_COUNTER_INC(&pVCpu->hm.s.StatDebug64SwitchBack); 1574 if (pVM->hm.s.vmx.fSupported) 1575 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL); 1576 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL); 1577 1577 } 1578 1578 … … 1584 1584 * @param pVM Pointer to the VM. 1585 1585 */ 1586 VMMR0DECL(int) H WACCMR0TestSwitcher3264(PVM pVM)1586 VMMR0DECL(int) HMR0TestSwitcher3264(PVM pVM) 1587 1587 { 1588 1588 PVMCPU pVCpu = &pVM->aCpus[0]; … … 1591 1591 int rc; 1592 1592 1593 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatWorldSwitch3264, z);1594 if (pVM->h waccm.s.vmx.fSupported)1595 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnTest64, 5, &aParam[0]);1593 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z); 1594 if (pVM->hm.s.vmx.fSupported) 1595 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]); 1596 1596 else 1597 rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnTest64, 5, &aParam[0]);1598 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatWorldSwitch3264, z);1597 rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]); 1598 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z); 1599 1599 1600 1600 return rc; … … 1608 1608 * @returns Suspend pending or not. 1609 1609 */ 1610 VMMR0DECL(bool) H WACCMR0SuspendPending(void)1610 VMMR0DECL(bool) HMR0SuspendPending(void) 1611 1611 { 1612 1612 return ASMAtomicReadBool(&g_HvmR0.fSuspended); … … 1620 1620 * @returns The cpu structure pointer. 1621 1621 */ 1622 VMMR0DECL(PHMGLOBLCPUINFO) H WACCMR0GetCurrentCpu(void)1622 VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void) 1623 1623 { 1624 1624 RTCPUID idCpu = RTMpCpuId(); … … 1635 1635 * @param idCpu id of the VCPU. 1636 1636 */ 1637 VMMR0DECL(PHMGLOBLCPUINFO) H WACCMR0GetCurrentCpuEx(RTCPUID idCpu)1637 VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu) 1638 1638 { 1639 1639 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); … … 1652 1652 * @param cbSize Read size. 1653 1653 */ 1654 VMMR0DECL(void) H WACCMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)1655 { 1656 pVCpu->h waccm.s.PendingIO.enmType = HWACCMPENDINGIO_PORT_READ;1657 pVCpu->h waccm.s.PendingIO.GCPtrRip = GCPtrRip;1658 pVCpu->h waccm.s.PendingIO.GCPtrRipNext = GCPtrRipNext;1659 pVCpu->h waccm.s.PendingIO.s.Port.uPort = uPort;1660 pVCpu->h waccm.s.PendingIO.s.Port.uAndVal = uAndVal;1661 pVCpu->h waccm.s.PendingIO.s.Port.cbSize = cbSize;1654 VMMR0DECL(void) HMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize) 1655 { 1656 pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_PORT_READ; 1657 pVCpu->hm.s.PendingIO.GCPtrRip = GCPtrRip; 1658 pVCpu->hm.s.PendingIO.GCPtrRipNext = GCPtrRipNext; 1659 pVCpu->hm.s.PendingIO.s.Port.uPort = uPort; 1660 pVCpu->hm.s.PendingIO.s.Port.uAndVal = uAndVal; 1661 pVCpu->hm.s.PendingIO.s.Port.cbSize = cbSize; 1662 1662 return; 1663 1663 } … … 1673 1673 * @param cbSize Read size. 1674 1674 */ 1675 VMMR0DECL(void) H WACCMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)1676 { 1677 pVCpu->h waccm.s.PendingIO.enmType = HWACCMPENDINGIO_PORT_WRITE;1678 pVCpu->h waccm.s.PendingIO.GCPtrRip = GCPtrRip;1679 pVCpu->h waccm.s.PendingIO.GCPtrRipNext = GCPtrRipNext;1680 pVCpu->h waccm.s.PendingIO.s.Port.uPort = uPort;1681 pVCpu->h waccm.s.PendingIO.s.Port.uAndVal = uAndVal;1682 pVCpu->h waccm.s.PendingIO.s.Port.cbSize = cbSize;1675 VMMR0DECL(void) HMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize) 1676 { 1677 pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_PORT_WRITE; 1678 pVCpu->hm.s.PendingIO.GCPtrRip = GCPtrRip; 1679 pVCpu->hm.s.PendingIO.GCPtrRipNext = GCPtrRipNext; 1680 pVCpu->hm.s.PendingIO.s.Port.uPort = uPort; 1681 pVCpu->hm.s.PendingIO.s.Port.uAndVal = uAndVal; 1682 pVCpu->hm.s.PendingIO.s.Port.cbSize = cbSize; 1683 1683 return; 1684 1684 } … … 1691 1691 * @returns VBox status code. 1692 1692 * @param pVM Pointer to the VM. 1693 * @param enmSwitcher The switcher we're about to use. 1693 1694 * @param pfVTxDisabled Where to store whether VT-x was disabled or not. 1694 1695 */ 1695 VMMR0DECL(int) H WACCMR0EnterSwitcher(PVM pVM, bool *pfVTxDisabled)1696 VMMR0DECL(int) HMR0EnterSwitcher(PVM pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled) 1696 1697 { 1697 1698 Assert(!(ASMGetFlags() & X86_EFL_IF) || !RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1699 1700 *pfVTxDisabled = false; 1700 1701 1701 if ( !g_HvmR0.fEnabled1702 || !g_HvmR0.vmx.fSupported /* no such issues with AMD-V */1703 || !g_HvmR0.fGlobalInit /* Local init implies the CPU is currently not in VMX root mode. */)1704 return VINF_SUCCESS; /* nothing to do */ 1705 1706 switch ( VMMGetSwitcher(pVM))1702 /* No such issues with AMD-V */ 1703 if (!g_HvmR0.vmx.fSupported) 1704 return VINF_SUCCESS; 1705 1706 /* Check if the swithcing we're up to is safe. */ 1707 switch (enmSwitcher) 1707 1708 { 1708 1709 case VMMSWITCHER_32_TO_32: … … 1720 1721 } 1721 1722 1722 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1723 /* When using SUPR0EnableVTx we must let the host suspend and resume VT-x, 1724 regardless of whether we're currently using VT-x or not. */ 1725 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx) 1726 { 1727 *pfVTxDisabled = SUPR0SuspendVTxOnCpu(); 1728 return VINF_SUCCESS; 1729 } 1730 1731 /** @todo Check if this code is presumtive wrt other VT-x users on the 1732 * system... */ 1733 1734 /* Nothing to do if we haven't enabled VT-x. */ 1735 if (!g_HvmR0.fEnabled) 1736 return VINF_SUCCESS; 1737 1738 /* Local init implies the CPU is currently not in VMX root mode. */ 1739 if (!g_HvmR0.fGlobalInit) 1740 return VINF_SUCCESS; 1741 1742 /* Ok, disable VT-x. */ 1743 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1723 1744 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2); 1724 1745 … … 1734 1755 * switcher turned off paging. 1735 1756 * 1736 * @returns VBox status code.1737 1757 * @param pVM Pointer to the VM. 1738 1758 * @param fVTxDisabled Whether VT-x was disabled or not. 1739 1759 */ 1740 VMMR0DECL( int) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)1760 VMMR0DECL(void) HMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled) 1741 1761 { 1742 1762 Assert(!(ASMGetFlags() & X86_EFL_IF)); 1743 1763 1744 1764 if (!fVTxDisabled) 1745 return VINF_SUCCESS; /* nothing to do */ 1746 1747 Assert(g_HvmR0.fEnabled); 1765 return; /* nothing to do */ 1766 1748 1767 Assert(g_HvmR0.vmx.fSupported); 1749 Assert(g_HvmR0.fGlobalInit); 1750 1751 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1752 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2); 1753 1754 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); 1755 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 1756 return VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false); 1768 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx) 1769 SUPR0ResumeVTxOnCpu(fVTxDisabled); 1770 else 1771 { 1772 Assert(g_HvmR0.fEnabled); 1773 Assert(g_HvmR0.fGlobalInit); 1774 1775 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1776 AssertReturnVoid(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ); 1777 1778 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); 1779 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 1780 VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false); 1781 } 1757 1782 } 1758 1783 … … 1766 1791 * @param pszMsg Message to prepend the log entry with. 1767 1792 */ 1768 VMMR0DECL(void) H WACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)1793 VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg) 1769 1794 { 1770 1795 /* … … 1887 1912 * @param pCtx Pointer to the CPU context. 1888 1913 */ 1889 VMMR0DECL(void) H WACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)1914 VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1890 1915 { 1891 1916 NOREF(pVM); -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r43373 r43387 21 21 %include "VBox/asmdefs.mac" 22 22 %include "VBox/err.mac" 23 %include "VBox/vmm/h wacc_vmx.mac"23 %include "VBox/vmm/hm_vmx.mac" 24 24 %include "VBox/vmm/cpum.mac" 25 25 %include "iprt/x86.mac" 26 %include "H WACCMInternal.mac"26 %include "HMInternal.mac" 27 27 28 28 %ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely. … … 56 56 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't 57 57 ; risk loading a stale LDT value or something invalid. 58 %define H WACCM_64_BIT_USE_NULL_SEL58 %define HM_64_BIT_USE_NULL_SEL 59 59 %endif 60 60 %endif … … 157 157 ; trashes, rax, rdx & rcx 158 158 %macro MYPUSHSEGS64 2 159 %ifndef H WACCM_64_BIT_USE_NULL_SEL159 %ifndef HM_64_BIT_USE_NULL_SEL 160 160 mov %2, es 161 161 push %1 … … 169 169 push rdx 170 170 push rax 171 %ifndef H WACCM_64_BIT_USE_NULL_SEL171 %ifndef HM_64_BIT_USE_NULL_SEL 172 172 push fs 173 173 %endif … … 178 178 push rdx 179 179 push rax 180 %ifndef H WACCM_64_BIT_USE_NULL_SEL180 %ifndef HM_64_BIT_USE_NULL_SEL 181 181 push gs 182 182 %endif … … 186 186 %macro MYPOPSEGS64 2 187 187 ; Note: do not step through this code with a debugger! 188 %ifndef H WACCM_64_BIT_USE_NULL_SEL188 %ifndef HM_64_BIT_USE_NULL_SEL 189 189 xor eax, eax 190 190 mov ds, ax … … 194 194 %endif 195 195 196 %ifndef H WACCM_64_BIT_USE_NULL_SEL196 %ifndef HM_64_BIT_USE_NULL_SEL 197 197 pop gs 198 198 %endif … … 202 202 wrmsr 203 203 204 %ifndef H WACCM_64_BIT_USE_NULL_SEL204 %ifndef HM_64_BIT_USE_NULL_SEL 205 205 pop fs 206 206 %endif … … 211 211 ; Now it's safe to step again 212 212 213 %ifndef H WACCM_64_BIT_USE_NULL_SEL213 %ifndef HM_64_BIT_USE_NULL_SEL 214 214 pop %1 215 215 mov ds, %2 … … 971 971 ; * @param pIdtr Where to store the 64-bit IDTR. 972 972 ; */ 973 ;DECLASM(void) h waccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);974 ALIGNCODE(16) 975 BEGINPROC h waccmR0Get64bitGDTRandIDTR973 ;DECLASM(void) hmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr); 974 ALIGNCODE(16) 975 BEGINPROC hmR0Get64bitGDTRandIDTR 976 976 db 0xea ; jmp far .sixtyfourbit_mode 977 977 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) … … 991 991 dd .the_end, NAME(SUPR0AbsKernelCS) 992 992 BITS 32 993 ENDPROC h waccmR0Get64bitGDTRandIDTR993 ENDPROC hmR0Get64bitGDTRandIDTR 994 994 995 995 … … 998 998 ; * @returns CR3 999 999 ; */ 1000 ;DECLASM(uint64_t) h waccmR0Get64bitCR3(void);1001 ALIGNCODE(16) 1002 BEGINPROC h waccmR0Get64bitCR31000 ;DECLASM(uint64_t) hmR0Get64bitCR3(void); 1001 ALIGNCODE(16) 1002 BEGINPROC hmR0Get64bitCR3 1003 1003 db 0xea ; jmp far .sixtyfourbit_mode 1004 1004 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) … … 1016 1016 dd .the_end, NAME(SUPR0AbsKernelCS) 1017 1017 BITS 32 1018 ENDPROC h waccmR0Get64bitCR31018 ENDPROC hmR0Get64bitCR3 1019 1019 1020 1020 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL … … 1026 1026 ; load the guest ones when necessary. 1027 1027 ; 1028 ; @cproto DECLASM(int) h waccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);1028 ; @cproto DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM); 1029 1029 ; 1030 1030 ; @returns eax … … 1037 1037 ; @param pfnStartVM msc:[rbp+38h] 1038 1038 ; 1039 ; @remarks This is essentially the same code as h waccmR0SVMRunWrapXMM, only the parameters differ a little bit.1039 ; @remarks This is essentially the same code as hmR0SVMRunWrapXMM, only the parameters differ a little bit. 1040 1040 ; 1041 1041 ; ASSUMING 64-bit and windows for now. 1042 1042 ALIGNCODE(16) 1043 BEGINPROC h waccmR0VMXStartVMWrapXMM1043 BEGINPROC hmR0VMXStartVMWrapXMM 1044 1044 push xBP 1045 1045 mov xBP, xSP … … 1148 1148 leave 1149 1149 ret 1150 ENDPROC h waccmR0VMXStartVMWrapXMM1150 ENDPROC hmR0VMXStartVMWrapXMM 1151 1151 1152 1152 ;; … … 1154 1154 ; load the guest ones when necessary. 1155 1155 ; 1156 ; @cproto DECLASM(int) h waccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);1156 ; @cproto DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun); 1157 1157 ; 1158 1158 ; @returns eax … … 1165 1165 ; @param pfnVMRun msc:[rbp+38h] 1166 1166 ; 1167 ; @remarks This is essentially the same code as h waccmR0VMXStartVMWrapXMM, only the parameters differ a little bit.1167 ; @remarks This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit. 1168 1168 ; 1169 1169 ; ASSUMING 64-bit and windows for now. 1170 1170 ALIGNCODE(16) 1171 BEGINPROC h waccmR0SVMRunWrapXMM1171 BEGINPROC hmR0SVMRunWrapXMM 1172 1172 push xBP 1173 1173 mov xBP, xSP … … 1276 1276 leave 1277 1277 ret 1278 ENDPROC h waccmR0SVMRunWrapXMM1278 ENDPROC hmR0SVMRunWrapXMM 1279 1279 1280 1280 %endif ; VBOX_WITH_KERNEL_USING_XMM … … 1300 1300 %endif 1301 1301 1302 %include "H WACCMR0Mixed.mac"1302 %include "HMR0Mixed.mac" 1303 1303 1304 1304 … … 1503 1503 %define MYPOPSEGS MYPOPSEGS64 1504 1504 1505 %include "H WACCMR0Mixed.mac"1505 %include "HMR0Mixed.mac" 1506 1506 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL -
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r43373 r43387 1 1 ; $Id$ 2 2 ;; @file 3 ; H WACCMR0Mixed.mac - Stuff that darwin needs to build two versions of.3 ; HMR0Mixed.mac - Stuff that darwin needs to build two versions of. 4 4 ; 5 ; Included by H WACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined.5 ; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined. 6 6 ; 7 7 -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r43353 r43387 19 19 * Header Files * 20 20 *******************************************************************************/ 21 #define LOG_GROUP LOG_GROUP_H WACCM22 #include <VBox/vmm/h waccm.h>21 #define LOG_GROUP LOG_GROUP_HM 22 #include <VBox/vmm/hm.h> 23 23 #include <VBox/vmm/pgm.h> 24 24 #include <VBox/vmm/selm.h> … … 28 28 #include <VBox/vmm/tm.h> 29 29 #include <VBox/vmm/pdmapi.h> 30 #include "H WACCMInternal.h"30 #include "HMInternal.h" 31 31 #include <VBox/vmm/vm.h> 32 #include <VBox/vmm/h wacc_svm.h>32 #include <VBox/vmm/hm_svm.h> 33 33 #include <VBox/err.h> 34 34 #include <VBox/log.h> … … 92 92 */ 93 93 if ( pVM 94 && pVM->h waccm.s.svm.fIgnoreInUseError)94 && pVM->hm.s.svm.fIgnoreInUseError) 95 95 { 96 96 pCpu->fIgnoreAMDVInUseError = true; … … 159 159 int rc; 160 160 161 pVM->h waccm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;161 pVM->hm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ; 162 162 163 163 /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */ 164 rc = RTR0MemObjAllocCont(&pVM->h waccm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, false /* executable R0 mapping */);164 rc = RTR0MemObjAllocCont(&pVM->hm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, false /* executable R0 mapping */); 165 165 if (RT_FAILURE(rc)) 166 166 return rc; 167 167 168 pVM->h waccm.s.svm.pIOBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjIOBitmap);169 pVM->h waccm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjIOBitmap, 0);168 pVM->hm.s.svm.pIOBitmap = RTR0MemObjAddress(pVM->hm.s.svm.pMemObjIOBitmap); 169 pVM->hm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.svm.pMemObjIOBitmap, 0); 170 170 /* Set all bits to intercept all IO accesses. */ 171 ASMMemFill32(pVM->h waccm.s.svm.pIOBitmap, 3 << PAGE_SHIFT, 0xffffffff);171 ASMMemFill32(pVM->hm.s.svm.pIOBitmap, 3 << PAGE_SHIFT, 0xffffffff); 172 172 173 173 /* … … 199 199 { 200 200 Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping)); 201 pVM->h waccm.s.svm.fAlwaysFlushTLB = true;201 pVM->hm.s.svm.fAlwaysFlushTLB = true; 202 202 } 203 203 … … 207 207 PVMCPU pVCpu = &pVM->aCpus[i]; 208 208 209 pVCpu->h waccm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ;210 pVCpu->h waccm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ;211 pVCpu->h waccm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;209 pVCpu->hm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ; 210 pVCpu->hm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ; 211 pVCpu->hm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ; 212 212 213 213 /* Allocate one page for the host context */ 214 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, false /* executable R0 mapping */);214 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, false /* executable R0 mapping */); 215 215 if (RT_FAILURE(rc)) 216 216 return rc; 217 217 218 pVCpu->h waccm.s.svm.pVMCBHost = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjVMCBHost);219 pVCpu->h waccm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjVMCBHost, 0);220 Assert(pVCpu->h waccm.s.svm.pVMCBHostPhys < _4G);221 ASMMemZeroPage(pVCpu->h waccm.s.svm.pVMCBHost);218 pVCpu->hm.s.svm.pVMCBHost = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjVMCBHost); 219 pVCpu->hm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjVMCBHost, 0); 220 Assert(pVCpu->hm.s.svm.pVMCBHostPhys < _4G); 221 ASMMemZeroPage(pVCpu->hm.s.svm.pVMCBHost); 222 222 223 223 /* Allocate one page for the VM control block (VMCB). */ 224 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, false /* executable R0 mapping */);224 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, false /* executable R0 mapping */); 225 225 if (RT_FAILURE(rc)) 226 226 return rc; 227 227 228 pVCpu->h waccm.s.svm.pVMCB = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjVMCB);229 pVCpu->h waccm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjVMCB, 0);230 Assert(pVCpu->h waccm.s.svm.pVMCBPhys < _4G);231 ASMMemZeroPage(pVCpu->h waccm.s.svm.pVMCB);228 pVCpu->hm.s.svm.pVMCB = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjVMCB); 229 pVCpu->hm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjVMCB, 0); 230 Assert(pVCpu->hm.s.svm.pVMCBPhys < _4G); 231 ASMMemZeroPage(pVCpu->hm.s.svm.pVMCB); 232 232 233 233 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */ 234 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* executable R0 mapping */);234 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* executable R0 mapping */); 235 235 if (RT_FAILURE(rc)) 236 236 return rc; 237 237 238 pVCpu->h waccm.s.svm.pMSRBitmap = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap);239 pVCpu->h waccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap, 0);238 pVCpu->hm.s.svm.pMSRBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjMSRBitmap); 239 pVCpu->hm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjMSRBitmap, 0); 240 240 /* Set all bits to intercept all MSR accesses. */ 241 ASMMemFill32(pVCpu->h waccm.s.svm.pMSRBitmap, 2 << PAGE_SHIFT, 0xffffffff);241 ASMMemFill32(pVCpu->hm.s.svm.pMSRBitmap, 2 << PAGE_SHIFT, 0xffffffff); 242 242 } 243 243 … … 258 258 PVMCPU pVCpu = &pVM->aCpus[i]; 259 259 260 if (pVCpu->h waccm.s.svm.pMemObjVMCBHost != NIL_RTR0MEMOBJ)261 { 262 RTR0MemObjFree(pVCpu->h waccm.s.svm.pMemObjVMCBHost, false);263 pVCpu->h waccm.s.svm.pVMCBHost = 0;264 pVCpu->h waccm.s.svm.pVMCBHostPhys = 0;265 pVCpu->h waccm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ;266 } 267 268 if (pVCpu->h waccm.s.svm.pMemObjVMCB != NIL_RTR0MEMOBJ)269 { 270 RTR0MemObjFree(pVCpu->h waccm.s.svm.pMemObjVMCB, false);271 pVCpu->h waccm.s.svm.pVMCB = 0;272 pVCpu->h waccm.s.svm.pVMCBPhys = 0;273 pVCpu->h waccm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ;274 } 275 if (pVCpu->h waccm.s.svm.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)276 { 277 RTR0MemObjFree(pVCpu->h waccm.s.svm.pMemObjMSRBitmap, false);278 pVCpu->h waccm.s.svm.pMSRBitmap = 0;279 pVCpu->h waccm.s.svm.pMSRBitmapPhys = 0;280 pVCpu->h waccm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;281 } 282 } 283 if (pVM->h waccm.s.svm.pMemObjIOBitmap != NIL_RTR0MEMOBJ)284 { 285 RTR0MemObjFree(pVM->h waccm.s.svm.pMemObjIOBitmap, false);286 pVM->h waccm.s.svm.pIOBitmap = 0;287 pVM->h waccm.s.svm.pIOBitmapPhys = 0;288 pVM->h waccm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;260 if (pVCpu->hm.s.svm.pMemObjVMCBHost != NIL_RTR0MEMOBJ) 261 { 262 RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjVMCBHost, false); 263 pVCpu->hm.s.svm.pVMCBHost = 0; 264 pVCpu->hm.s.svm.pVMCBHostPhys = 0; 265 pVCpu->hm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ; 266 } 267 268 if (pVCpu->hm.s.svm.pMemObjVMCB != NIL_RTR0MEMOBJ) 269 { 270 RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjVMCB, false); 271 pVCpu->hm.s.svm.pVMCB = 0; 272 pVCpu->hm.s.svm.pVMCBPhys = 0; 273 pVCpu->hm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ; 274 } 275 if (pVCpu->hm.s.svm.pMemObjMSRBitmap != NIL_RTR0MEMOBJ) 276 { 277 RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjMSRBitmap, false); 278 pVCpu->hm.s.svm.pMSRBitmap = 0; 279 pVCpu->hm.s.svm.pMSRBitmapPhys = 0; 280 pVCpu->hm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ; 281 } 282 } 283 if (pVM->hm.s.svm.pMemObjIOBitmap != NIL_RTR0MEMOBJ) 284 { 285 RTR0MemObjFree(pVM->hm.s.svm.pMemObjIOBitmap, false); 286 pVM->hm.s.svm.pIOBitmap = 0; 287 pVM->hm.s.svm.pIOBitmapPhys = 0; 288 pVM->hm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ; 289 289 } 290 290 return VINF_SUCCESS; … … 303 303 304 304 AssertReturn(pVM, VERR_INVALID_PARAMETER); 305 Assert(pVM->h waccm.s.svm.fSupported);305 Assert(pVM->hm.s.svm.fSupported); 306 306 307 307 for (VMCPUID i = 0; i < pVM->cCpus; i++) 308 308 { 309 309 PVMCPU pVCpu = &pVM->aCpus[i]; 310 SVM_VMCB *pVMCB = (SVM_VMCB *)pVM->aCpus[i].h waccm.s.svm.pVMCB;310 SVM_VMCB *pVMCB = (SVM_VMCB *)pVM->aCpus[i].hm.s.svm.pVMCB; 311 311 312 312 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB); … … 382 382 383 383 /* Set IO and MSR bitmap addresses. */ 384 pVMCB->ctrl.u64IOPMPhysAddr = pVM->h waccm.s.svm.pIOBitmapPhys;385 pVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->h waccm.s.svm.pMSRBitmapPhys;384 pVMCB->ctrl.u64IOPMPhysAddr = pVM->hm.s.svm.pIOBitmapPhys; 385 pVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.pMSRBitmapPhys; 386 386 387 387 /* No LBR virtualization. */ … … 399 399 400 400 /* If nested paging is not in use, additional intercepts have to be set up. */ 401 if (!pVM->h waccm.s.fNestedPaging)401 if (!pVM->hm.s.fNestedPaging) 402 402 { 403 403 /* CR3 reads/writes must be intercepted; our shadow values are different from guest's. */ … … 448 448 { 449 449 unsigned ulBit; 450 uint8_t *pMSRBitmap = (uint8_t *)pVCpu->h waccm.s.svm.pMSRBitmap;450 uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hm.s.svm.pMSRBitmap; 451 451 452 452 if (ulMSR <= 0x00001FFF) … … 498 498 { 499 499 #ifdef VBOX_WITH_STATISTICS 500 STAM_COUNTER_INC(&pVCpu->h waccm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);500 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]); 501 501 #endif 502 502 … … 539 539 * Dispatch any pending interrupts (injected before, but a VM-exit occurred prematurely). 540 540 */ 541 if (pVCpu->h waccm.s.Event.fPending)541 if (pVCpu->hm.s.Event.fPending) 542 542 { 543 543 SVM_EVENT Event; 544 544 545 Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->h waccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode,545 Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hm.s.Event.intInfo, pVCpu->hm.s.Event.errCode, 546 546 (RTGCPTR)pCtx->rip)); 547 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatIntReinject);548 Event.au64[0] = pVCpu->h waccm.s.Event.intInfo;547 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject); 548 Event.au64[0] = pVCpu->hm.s.Event.intInfo; 549 549 hmR0SvmInjectEvent(pVCpu, pVMCB, pCtx, &Event); 550 550 551 pVCpu->h waccm.s.Event.fPending = false;551 pVCpu->hm.s.Event.fPending = false; 552 552 return VINF_SUCCESS; 553 553 } … … 614 614 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */ 615 615 Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))); 616 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatSwitchGuestIrq);616 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 617 617 /* Just continue */ 618 618 } … … 681 681 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 682 682 683 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatIntInject);683 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject); 684 684 hmR0SvmInjectEvent(pVCpu, pVMCB, pCtx, &Event); 685 685 } /* if (interrupts can be dispatched) */ … … 724 724 725 725 /* Setup AMD SVM. */ 726 Assert(pVM->h waccm.s.svm.fSupported);727 728 pVMCB = (SVM_VMCB *)pVCpu->h waccm.s.svm.pVMCB;726 Assert(pVM->hm.s.svm.fSupported); 727 728 pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB; 729 729 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 730 730 731 731 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ 732 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)732 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS) 733 733 { 734 734 SVM_WRITE_SELREG(CS, cs); … … 741 741 742 742 /* Guest CPU context: LDTR. */ 743 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)743 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR) 744 744 { 745 745 SVM_WRITE_SELREG(LDTR, ldtr); … … 747 747 748 748 /* Guest CPU context: TR. */ 749 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)749 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR) 750 750 { 751 751 SVM_WRITE_SELREG(TR, tr); … … 753 753 754 754 /* Guest CPU context: GDTR. */ 755 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)755 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR) 756 756 { 757 757 pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt; … … 760 760 761 761 /* Guest CPU context: IDTR. */ 762 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)762 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR) 763 763 { 764 764 pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt; … … 774 774 775 775 /* Control registers */ 776 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)776 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0) 777 777 { 778 778 val = pCtx->cr0; … … 790 790 791 791 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */ 792 if (!pVCpu->h waccm.s.fFPUOldStyleOverride)792 if (!pVCpu->hm.s.fFPUOldStyleOverride) 793 793 { 794 794 pVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF); 795 pVCpu->h waccm.s.fFPUOldStyleOverride = true;795 pVCpu->hm.s.fFPUOldStyleOverride = true; 796 796 } 797 797 } … … 806 806 * translation will remain active. 807 807 */ 808 if (!pVM->h waccm.s.fNestedPaging)808 if (!pVM->hm.s.fNestedPaging) 809 809 { 810 810 val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */ … … 816 816 pVMCB->guest.u64CR2 = pCtx->cr2; 817 817 818 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)818 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3) 819 819 { 820 820 /* Save our shadow CR3 register. */ 821 if (pVM->h waccm.s.fNestedPaging)821 if (pVM->hm.s.fNestedPaging) 822 822 { 823 823 PGMMODE enmShwPagingMode; … … 841 841 } 842 842 843 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)843 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4) 844 844 { 845 845 val = pCtx->cr4; 846 if (!pVM->h waccm.s.fNestedPaging)847 { 848 switch (pVCpu->h waccm.s.enmShadowMode)846 if (!pVM->hm.s.fNestedPaging) 847 { 848 switch (pVCpu->hm.s.enmShadowMode) 849 849 { 850 850 case PGMMODE_REAL: … … 881 881 882 882 /* Debug registers. */ 883 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)883 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG) 884 884 { 885 885 pCtx->dr[6] |= X86_DR6_INIT_VAL; /* set all reserved bits to 1. */ … … 916 916 && !DBGFIsStepping(pVCpu)) 917 917 { 918 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxArmed);918 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 919 919 920 920 /* Disable drx move intercepts. */ … … 948 948 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 949 949 #elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 950 pVCpu->h waccm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;950 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64; 951 951 #else 952 952 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 953 if (!pVM->h waccm.s.fAllow64BitGuests)953 if (!pVM->hm.s.fAllow64BitGuests) 954 954 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 955 955 # endif 956 pVCpu->h waccm.s.svm.pfnVMRun = SVMR0VMRun64;957 #endif 958 /* Unconditionally update these as wrmsr might have changed them. (H WACCM_CHANGED_GUEST_SEGMENT_REGS will not be set) */956 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64; 957 #endif 958 /* Unconditionally update these as wrmsr might have changed them. (HM_CHANGED_GUEST_SEGMENT_REGS will not be set) */ 959 959 pVMCB->guest.FS.u64Base = pCtx->fs.u64Base; 960 960 pVMCB->guest.GS.u64Base = pCtx->gs.u64Base; … … 965 965 pVMCB->guest.u64EFER &= ~MSR_K6_EFER_LME; 966 966 967 pVCpu->h waccm.s.svm.pfnVMRun = SVMR0VMRun;967 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun; 968 968 } 969 969 … … 976 976 pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC; 977 977 pVMCB->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP; 978 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCOffset);978 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCOffset); 979 979 } 980 980 else … … 986 986 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 987 987 pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 988 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCInterceptOverFlow);988 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow); 989 989 } 990 990 } … … 993 993 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 994 994 pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 995 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCIntercept);995 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCIntercept); 996 996 } 997 997 … … 1013 1013 1014 1014 /* Done. */ 1015 pVCpu->h waccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;1015 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST; 1016 1016 1017 1017 return VINF_SUCCESS; … … 1032 1032 AssertPtr(pVCpu); 1033 1033 1034 SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->h waccm.s.svm.pVMCB;1035 pCpu = H WACCMR0GetCurrentCpu();1034 SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB; 1035 pCpu = HMR0GetCurrentCpu(); 1036 1036 1037 1037 /* … … 1042 1042 */ 1043 1043 bool fNewASID = false; 1044 if ( pVCpu->h waccm.s.idLastCpu != pCpu->idCpu1045 || pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)1046 { 1047 pVCpu->h waccm.s.fForceTLBFlush = true;1044 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 1045 || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 1046 { 1047 pVCpu->hm.s.fForceTLBFlush = true; 1048 1048 fNewASID = true; 1049 1049 } … … 1052 1052 * Set TLB flush state as checked until we return from the world switch. 1053 1053 */ 1054 ASMAtomicWriteBool(&pVCpu->h waccm.s.fCheckedTLBFlush, true);1054 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); 1055 1055 1056 1056 /* … … 1058 1058 */ 1059 1059 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 1060 pVCpu->h waccm.s.fForceTLBFlush = true;1061 1062 pVCpu->h waccm.s.idLastCpu = pCpu->idCpu;1060 pVCpu->hm.s.fForceTLBFlush = true; 1061 1062 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 1063 1063 pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 1064 1064 1065 if (RT_UNLIKELY(pVM->h waccm.s.svm.fAlwaysFlushTLB))1065 if (RT_UNLIKELY(pVM->hm.s.svm.fAlwaysFlushTLB)) 1066 1066 { 1067 1067 /* … … 1069 1069 */ 1070 1070 pCpu->uCurrentASID = 1; 1071 pVCpu->h waccm.s.uCurrentASID = 1;1072 pVCpu->h waccm.s.cTLBFlushes = pCpu->cTLBFlushes;1071 pVCpu->hm.s.uCurrentASID = 1; 1072 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 1073 1073 pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1074 1074 } 1075 else if (pVCpu->h waccm.s.fForceTLBFlush)1075 else if (pVCpu->hm.s.fForceTLBFlush) 1076 1076 { 1077 1077 if (fNewASID) … … 1079 1079 ++pCpu->uCurrentASID; 1080 1080 bool fHitASIDLimit = false; 1081 if (pCpu->uCurrentASID >= pVM->h waccm.s.uMaxASID)1081 if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID) 1082 1082 { 1083 1083 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */ … … 1085 1085 fHitASIDLimit = true; 1086 1086 1087 if (pVM->h waccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)1087 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1088 1088 { 1089 1089 pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; … … 1100 1100 && pCpu->fFlushASIDBeforeUse) 1101 1101 { 1102 if (pVM->h waccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)1102 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1103 1103 pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1104 1104 else … … 1109 1109 } 1110 1110 1111 pVCpu->h waccm.s.uCurrentASID = pCpu->uCurrentASID;1112 pVCpu->h waccm.s.cTLBFlushes = pCpu->cTLBFlushes;1111 pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID; 1112 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 1113 1113 } 1114 1114 else 1115 1115 { 1116 if (pVM->h waccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)1116 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1117 1117 pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1118 1118 else … … 1120 1120 } 1121 1121 1122 pVCpu->h waccm.s.fForceTLBFlush = false;1122 pVCpu->hm.s.fForceTLBFlush = false; 1123 1123 } 1124 1124 else 1125 1125 { 1126 1126 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should 1127 * not be executed. See h waccmQueueInvlPage() where it is commented1127 * not be executed. See hmQueueInvlPage() where it is commented 1128 1128 * out. Support individual entry flushing someday. */ 1129 1129 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 1130 1130 { 1131 1131 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ 1132 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTlbShootdown);1133 for (unsigned i = 0; i < pVCpu->h waccm.s.TlbShootdown.cPages; i++)1134 SVMR0InvlpgA(pVCpu->h waccm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID);1135 } 1136 } 1137 1138 pVCpu->h waccm.s.TlbShootdown.cPages = 0;1132 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 1133 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 1134 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID); 1135 } 1136 } 1137 1138 pVCpu->hm.s.TlbShootdown.cPages = 0; 1139 1139 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 1140 1140 1141 1141 /* Update VMCB with the ASID. */ 1142 pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->h waccm.s.uCurrentASID;1143 1144 AssertMsg(pVCpu->h waccm.s.cTLBFlushes == pCpu->cTLBFlushes,1145 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->h waccm.s.cTLBFlushes, pCpu->cTLBFlushes));1146 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->h waccm.s.uMaxASID,1142 pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentASID; 1143 1144 AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes, 1145 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1146 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID, 1147 1147 ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 1148 AssertMsg(pVCpu->h waccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,1149 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->h waccm.s.uCurrentASID));1148 AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID, 1149 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID)); 1150 1150 1151 1151 #ifdef VBOX_WITH_STATISTICS 1152 1152 if (pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING) 1153 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatNoFlushTLBWorldSwitch);1153 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch); 1154 1154 else if ( pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT 1155 1155 || pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS) 1156 1156 { 1157 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushASID);1157 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID); 1158 1158 } 1159 1159 else 1160 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushTLBWorldSwitch);1160 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch); 1161 1161 #endif 1162 1162 } … … 1173 1173 VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1174 1174 { 1175 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatEntry, x);1176 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->h waccm.s.StatExit1);1177 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->h waccm.s.StatExit2);1175 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 1176 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 1177 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 1178 1178 1179 1179 VBOXSTRICTRC rc = VINF_SUCCESS; … … 1194 1194 #endif 1195 1195 1196 pVMCB = (SVM_VMCB *)pVCpu->h waccm.s.svm.pVMCB;1196 pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB; 1197 1197 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 1198 1198 … … 1201 1201 */ 1202 1202 ResumeExecution: 1203 if (!STAM_PROFILE_ADV_IS_RUNNING(&pVCpu->h waccm.s.StatEntry))1204 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatExit2, &pVCpu->hwaccm.s.StatEntry, x);1205 Assert(!H WACCMR0SuspendPending());1203 if (!STAM_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry)) 1204 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x); 1205 Assert(!HMR0SuspendPending()); 1206 1206 1207 1207 /* 1208 1208 * Safety precaution; looping for too long here can have a very bad effect on the host. 1209 1209 */ 1210 if (RT_UNLIKELY(++cResume > pVM->h waccm.s.cMaxResumeLoops))1211 { 1212 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMaxResume);1210 if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops)) 1211 { 1212 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume); 1213 1213 rc = VINF_EM_RAW_INTERRUPT; 1214 1214 goto end; … … 1256 1256 * Check for pending actions that force us to go back to ring-3. 1257 1257 */ 1258 if ( VM_FF_ISPENDING(pVM, VM_FF_H WACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)1258 if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA) 1259 1259 || VMCPU_FF_ISPENDING(pVCpu, 1260 VMCPU_FF_H WACCM_TO_R3_MASK1260 VMCPU_FF_HM_TO_R3_MASK 1261 1261 | VMCPU_FF_PGM_SYNC_CR3 1262 1262 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL … … 1280 1280 #endif 1281 1281 { 1282 if ( VM_FF_ISPENDING(pVM, VM_FF_H WACCM_TO_R3_MASK)1283 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_H WACCM_TO_R3_MASK))1282 if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK) 1283 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 1284 1284 { 1285 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatSwitchToR3);1285 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchToR3); 1286 1286 rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 1287 1287 goto end; … … 1326 1326 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 1327 1327 { 1328 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitPreemptPending);1328 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending); 1329 1329 rc = VINF_EM_RAW_INTERRUPT; 1330 1330 goto end; … … 1348 1348 /** @todo query and update the TPR only when it could have been changed (mmio access) 1349 1349 */ 1350 if (pVM->h waccm.s.fHasIoApic)1350 if (pVM->hm.s.fHasIoApic) 1351 1351 { 1352 1352 /* TPR caching in CR8 */ … … 1355 1355 AssertRC(rc2); 1356 1356 1357 if (pVM->h waccm.s.fTPRPatchingActive)1357 if (pVM->hm.s.fTPRPatchingActive) 1358 1358 { 1359 1359 /* Our patch code uses LSTAR for TPR caching. */ … … 1399 1399 1400 1400 /* Enable nested paging if necessary (disabled each time after #VMEXIT). */ 1401 pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->h waccm.s.fNestedPaging;1401 pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging; 1402 1402 1403 1403 #ifdef LOG_ENABLED 1404 pCpu = H WACCMR0GetCurrentCpu();1405 if (pVCpu->h waccm.s.idLastCpu != pCpu->idCpu)1406 LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->h waccm.s.idLastCpu, pCpu->idCpu));1407 else if (pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)1408 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->h waccm.s.cTLBFlushes, pCpu->cTLBFlushes));1404 pCpu = HMR0GetCurrentCpu(); 1405 if (pVCpu->hm.s.idLastCpu != pCpu->idCpu) 1406 LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu)); 1407 else if (pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 1408 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1409 1409 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH)) 1410 1410 LogFlow(("Manual TLB flush\n")); … … 1438 1438 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 1439 1439 #endif 1440 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x);1440 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); 1441 1441 1442 1442 /* Setup TLB control and ASID in the VMCB. */ … … 1444 1444 1445 1445 /* In case we execute a goto ResumeExecution later on. */ 1446 pVCpu->h waccm.s.fResumeVM = true;1447 pVCpu->h waccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;1448 1449 Assert(sizeof(pVCpu->h waccm.s.svm.pVMCBPhys) == 8);1446 pVCpu->hm.s.fResumeVM = true; 1447 pVCpu->hm.s.fForceTLBFlush = pVM->hm.s.svm.fAlwaysFlushTLB; 1448 1449 Assert(sizeof(pVCpu->hm.s.svm.pVMCBPhys) == 8); 1450 1450 Assert(pVMCB->ctrl.IntCtrl.n.u1VIrqMasking); 1451 Assert(pVMCB->ctrl.u64IOPMPhysAddr == pVM->h waccm.s.svm.pIOBitmapPhys);1452 Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->h waccm.s.svm.pMSRBitmapPhys);1451 Assert(pVMCB->ctrl.u64IOPMPhysAddr == pVM->hm.s.svm.pIOBitmapPhys); 1452 Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.pMSRBitmapPhys); 1453 1453 Assert(pVMCB->ctrl.u64LBRVirt == 0); 1454 1454 … … 1462 1462 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}. 1463 1463 */ 1464 u32HostExtFeatures = pVM->h waccm.s.cpuid.u32AMDFeatureEDX;1464 u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX; 1465 1465 if ( (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 1466 1466 && !(pVMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP)) 1467 1467 { 1468 pVCpu->h waccm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);1468 pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX); 1469 1469 uint64_t u64GuestTSCAux = 0; 1470 1470 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux); … … 1474 1474 1475 1475 #ifdef VBOX_WITH_KERNEL_USING_XMM 1476 h waccmR0SVMRunWrapXMM(pVCpu->hwaccm.s.svm.pVMCBHostPhys, pVCpu->hwaccm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu,1477 pVCpu->h waccm.s.svm.pfnVMRun);1476 hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.pVMCBHostPhys, pVCpu->hm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu, 1477 pVCpu->hm.s.svm.pfnVMRun); 1478 1478 #else 1479 pVCpu->h waccm.s.svm.pfnVMRun(pVCpu->hwaccm.s.svm.pVMCBHostPhys, pVCpu->hwaccm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu);1480 #endif 1481 ASMAtomicWriteBool(&pVCpu->h waccm.s.fCheckedTLBFlush, false);1482 ASMAtomicIncU32(&pVCpu->h waccm.s.cWorldSwitchExits);1479 pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.pVMCBHostPhys, pVCpu->hm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu); 1480 #endif 1481 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); 1482 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); 1483 1483 /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */ 1484 1484 if (!(pVMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC)) … … 1486 1486 /* Restore host's TSC_AUX. */ 1487 1487 if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 1488 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->h waccm.s.u64HostTSCAux);1488 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux); 1489 1489 1490 1490 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + … … 1493 1493 TMNotifyEndOfExecution(pVCpu); 1494 1494 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 1495 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatInGC, &pVCpu->hwaccm.s.StatExit1, x);1495 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); 1496 1496 ASMSetFlags(uOldEFlags); 1497 1497 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION … … 1510 1510 if (RT_UNLIKELY(exitCode == (uint64_t)SVM_EXIT_INVALID)) /* Invalid guest state. */ 1511 1511 { 1512 H WACCMDumpRegs(pVM, pVCpu, pCtx);1512 HMDumpRegs(pVM, pVCpu, pCtx); 1513 1513 #ifdef DEBUG 1514 1514 Log(("ctrl.u16InterceptRdCRx %x\n", pVMCB->ctrl.u16InterceptRdCRx)); … … 1713 1713 * unless in the nested paging case where CR3 can be changed by the guest. 1714 1714 */ 1715 if ( pVM->h waccm.s.fNestedPaging1715 if ( pVM->hm.s.fNestedPaging 1716 1716 && pCtx->cr3 != pVMCB->guest.u64CR3) 1717 1717 { … … 1740 1740 1741 1741 /* Check if an injected event was interrupted prematurely. */ 1742 pVCpu->h waccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];1742 pVCpu->hm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0]; 1743 1743 if ( pVMCB->ctrl.ExitIntInfo.n.u1Valid 1744 1744 /* we don't care about 'int xx' as the instruction will be restarted. */ 1745 1745 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT) 1746 1746 { 1747 Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->h waccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));1747 Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode)); 1748 1748 1749 1749 #ifdef LOG_ENABLED 1750 1750 SVM_EVENT Event; 1751 Event.au64[0] = pVCpu->h waccm.s.Event.intInfo;1751 Event.au64[0] = pVCpu->hm.s.Event.intInfo; 1752 1752 1753 1753 if ( exitCode == SVM_EXIT_EXCEPTION_E … … 1758 1758 #endif 1759 1759 1760 pVCpu->h waccm.s.Event.fPending = true;1760 pVCpu->hm.s.Event.fPending = true; 1761 1761 /* Error code present? (redundant) */ 1762 1762 if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid) 1763 pVCpu->h waccm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;1763 pVCpu->hm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode; 1764 1764 else 1765 pVCpu->h waccm.s.Event.errCode = 0;1765 pVCpu->hm.s.Event.errCode = 0; 1766 1766 } 1767 1767 #ifdef VBOX_WITH_STATISTICS 1768 1768 if (exitCode == SVM_EXIT_NPF) 1769 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitReasonNPF);1769 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNPF); 1770 1770 else 1771 STAM_COUNTER_INC(&pVCpu->h waccm.s.paStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);1771 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]); 1772 1772 #endif 1773 1773 … … 1775 1775 if (fSyncTPR) 1776 1776 { 1777 if (pVM->h waccm.s.fTPRPatchingActive)1777 if (pVM->hm.s.fTPRPatchingActive) 1778 1778 { 1779 1779 if ((pCtx->msrLSTAR & 0xff) != u8LastTPR) … … 1804 1804 pVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX); 1805 1805 #endif 1806 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatExit1, &pVCpu->hwaccm.s.StatExit2, x);1806 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); 1807 1807 1808 1808 /* Deal with the reason of the VM-exit. */ … … 1827 1827 case X86_XCPT_DB: 1828 1828 { 1829 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestDB);1829 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 1830 1830 1831 1831 /* Note that we don't support guest and host-initiated debugging at the same time. */ … … 1861 1861 { 1862 1862 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 1863 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowNM);1863 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 1864 1864 1865 1865 /* Continue execution. */ 1866 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;1866 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1867 1867 1868 1868 goto ResumeExecution; … … 1870 1870 1871 1871 Log(("Forward #NM fault to the guest\n")); 1872 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestNM);1872 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM); 1873 1873 1874 1874 Event.au64[0] = 0; … … 1887 1887 1888 1888 #ifdef VBOX_ALWAYS_TRAP_PF 1889 if (pVM->h waccm.s.fNestedPaging)1889 if (pVM->hm.s.fNestedPaging) 1890 1890 { 1891 1891 /* … … 1894 1894 Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, 1895 1895 uFaultAddress, errCode, (RTGCPTR)pCtx->rsp)); 1896 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestPF);1896 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 1897 1897 1898 1898 /* Now we must update CR2. */ … … 1910 1910 } 1911 1911 #endif 1912 Assert(!pVM->h waccm.s.fNestedPaging);1913 1914 #ifdef VBOX_H WACCM_WITH_GUEST_PATCHING1912 Assert(!pVM->hm.s.fNestedPaging); 1913 1914 #ifdef VBOX_HM_WITH_GUEST_PATCHING 1915 1915 /* Shortcut for APIC TPR reads and writes; 32 bits guests only */ 1916 if ( pVM->h waccm.s.fTRPPatchingAllowed1916 if ( pVM->hm.s.fTRPPatchingAllowed 1917 1917 && (uFaultAddress & 0xfff) == 0x080 1918 1918 && !(errCode & X86_TRAP_PF_P) /* not present */ 1919 1919 && CPUMGetGuestCPL(pVCpu) == 0 1920 1920 && !CPUMIsGuestInLongModeEx(pCtx) 1921 && pVM->h waccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))1921 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches)) 1922 1922 { 1923 1923 RTGCPHYS GCPhysApicBase, GCPhys; … … 1930 1930 { 1931 1931 /* Only attempt to patch the instruction once. */ 1932 PH WACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);1932 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 1933 1933 if (!pPatch) 1934 1934 { 1935 rc = VINF_EM_H WACCM_PATCH_TPR_INSTR;1935 rc = VINF_EM_HM_PATCH_TPR_INSTR; 1936 1936 break; 1937 1937 } … … 1953 1953 /* We've successfully synced our shadow pages, so let's just continue execution. */ 1954 1954 Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode)); 1955 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowPF);1955 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); 1956 1956 1957 1957 TRPMResetTrap(pVCpu); … … 1964 1964 */ 1965 1965 Log2(("Forward page fault to the guest\n")); 1966 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestPF);1966 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 1967 1967 /* The error code might have been changed. */ 1968 1968 errCode = TRPMGetErrorCode(pVCpu); … … 1994 1994 case X86_XCPT_MF: /* Floating point exception. */ 1995 1995 { 1996 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestMF);1996 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); 1997 1997 if (!(pCtx->cr0 & X86_CR0_NE)) 1998 1998 { … … 2029 2029 { 2030 2030 case X86_XCPT_GP: 2031 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestGP);2031 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 2032 2032 Event.n.u1ErrorCodeValid = 1; 2033 2033 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ … … 2037 2037 break; 2038 2038 case X86_XCPT_DE: 2039 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestDE);2039 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); 2040 2040 break; 2041 2041 case X86_XCPT_UD: 2042 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestUD);2042 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); 2043 2043 break; 2044 2044 case X86_XCPT_SS: 2045 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestSS);2045 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); 2046 2046 Event.n.u1ErrorCodeValid = 1; 2047 2047 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2048 2048 break; 2049 2049 case X86_XCPT_NP: 2050 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestNP);2050 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); 2051 2051 Event.n.u1ErrorCodeValid = 1; 2052 2052 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ … … 2074 2074 PGMMODE enmShwPagingMode; 2075 2075 2076 Assert(pVM->h waccm.s.fNestedPaging);2076 Assert(pVM->hm.s.fNestedPaging); 2077 2077 LogFlow(("Nested page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode)); 2078 2078 2079 #ifdef VBOX_H WACCM_WITH_GUEST_PATCHING2079 #ifdef VBOX_HM_WITH_GUEST_PATCHING 2080 2080 /* Shortcut for APIC TPR reads and writes; 32 bits guests only */ 2081 if ( pVM->h waccm.s.fTRPPatchingAllowed2081 if ( pVM->hm.s.fTRPPatchingAllowed 2082 2082 && (GCPhysFault & PAGE_OFFSET_MASK) == 0x080 2083 2083 && ( !(errCode & X86_TRAP_PF_P) /* not present */ … … 2085 2085 && CPUMGetGuestCPL(pVCpu) == 0 2086 2086 && !CPUMIsGuestInLongModeEx(pCtx) 2087 && pVM->h waccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))2087 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches)) 2088 2088 { 2089 2089 RTGCPHYS GCPhysApicBase; … … 2094 2094 { 2095 2095 /* Only attempt to patch the instruction once. */ 2096 PH WACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);2096 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 2097 2097 if (!pPatch) 2098 2098 { 2099 rc = VINF_EM_H WACCM_PATCH_TPR_INSTR;2099 rc = VINF_EM_HM_PATCH_TPR_INSTR; 2100 2100 break; 2101 2101 } … … 2153 2153 /* We've successfully synced our shadow pages, so let's just continue execution. */ 2154 2154 Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode)); 2155 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowPF);2155 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); 2156 2156 2157 2157 TRPMResetTrap(pVCpu); … … 2186 2186 case SVM_EXIT_WBINVD: 2187 2187 case SVM_EXIT_INVD: /* Guest software attempted to execute INVD. */ 2188 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInvd);2188 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd); 2189 2189 /* Skip instruction and continue directly. */ 2190 2190 pCtx->rip += 2; /* Note! hardcoded opcode size! */ … … 2195 2195 { 2196 2196 Log2(("SVM: Cpuid at %RGv for %x\n", (RTGCPTR)pCtx->rip, pCtx->eax)); 2197 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCpuid);2197 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid); 2198 2198 rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2199 2199 if (rc == VINF_SUCCESS) … … 2211 2211 { 2212 2212 Log2(("SVM: Rdtsc\n")); 2213 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdtsc);2213 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc); 2214 2214 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2215 2215 if (rc == VINF_SUCCESS) … … 2226 2226 { 2227 2227 Log2(("SVM: Rdpmc %x\n", pCtx->ecx)); 2228 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdpmc);2228 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc); 2229 2229 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2230 2230 if (rc == VINF_SUCCESS) … … 2241 2241 { 2242 2242 Log2(("SVM: Rdtscp\n")); 2243 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdtscp);2243 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp); 2244 2244 rc = EMInterpretRdtscp(pVM, pVCpu, pCtx); 2245 2245 if (rc == VINF_SUCCESS) … … 2257 2257 { 2258 2258 Log2(("SVM: invlpg\n")); 2259 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInvlpg);2260 2261 Assert(!pVM->h waccm.s.fNestedPaging);2259 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg); 2260 2261 Assert(!pVM->hm.s.fNestedPaging); 2262 2262 2263 2263 /* Truly a pita. Why can't SVM give the same information as VT-x? */ … … 2265 2265 if (rc == VINF_SUCCESS) 2266 2266 { 2267 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushPageInvlpg);2267 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageInvlpg); 2268 2268 goto ResumeExecution; /* eip already updated */ 2269 2269 } … … 2277 2277 { 2278 2278 Log2(("SVM: %RGv mov cr%d, \n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_CR0)); 2279 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]);2279 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]); 2280 2280 rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0); 2281 2281 … … 2283 2283 { 2284 2284 case 0: 2285 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;2285 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 2286 2286 break; 2287 2287 case 2: 2288 2288 break; 2289 2289 case 3: 2290 Assert(!pVM->h waccm.s.fNestedPaging);2291 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;2290 Assert(!pVM->hm.s.fNestedPaging); 2291 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3; 2292 2292 break; 2293 2293 case 4: 2294 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;2294 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4; 2295 2295 break; 2296 2296 case 8: … … 2315 2315 { 2316 2316 Log2(("SVM: %RGv mov x, cr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_CR0)); 2317 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]);2317 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]); 2318 2318 rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0); 2319 2319 if (rc == VINF_SUCCESS) … … 2333 2333 { 2334 2334 Log2(("SVM: %RGv mov dr%d, x\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_DR0)); 2335 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxWrite);2335 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 2336 2336 2337 2337 if ( !DBGFIsStepping(pVCpu) 2338 2338 && !CPUMIsHyperDebugStateActive(pVCpu)) 2339 2339 { 2340 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxContextSwitch);2340 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch); 2341 2341 2342 2342 /* Disable drx move intercepts. */ … … 2354 2354 { 2355 2355 /* EIP has been updated already. */ 2356 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;2356 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 2357 2357 2358 2358 /* Only resume if successful. */ … … 2369 2369 { 2370 2370 Log2(("SVM: %RGv mov x, dr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_DR0)); 2371 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxRead);2371 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 2372 2372 2373 2373 if (!DBGFIsStepping(pVCpu)) 2374 2374 { 2375 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxContextSwitch);2375 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch); 2376 2376 2377 2377 /* Disable DRx move intercepts. */ … … 2415 2415 { 2416 2416 /* ins/outs */ 2417 PDISCPUSTATE pDis = &pVCpu->h waccm.s.DisState;2417 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 2418 2418 2419 2419 /* Disassemble manually to deal with segment prefixes. */ … … 2424 2424 { 2425 2425 Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize)); 2426 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOStringWrite);2426 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite); 2427 2427 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix, 2428 2428 (DISCPUMODE)pDis->uAddrMode, uIOSize); … … 2431 2431 { 2432 2432 Log2(("IOMInterpretINSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize)); 2433 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOStringRead);2433 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead); 2434 2434 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix, 2435 2435 (DISCPUMODE)pDis->uAddrMode, uIOSize); … … 2448 2448 Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, 2449 2449 uIOSize)); 2450 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOWrite);2450 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite); 2451 2451 rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize); 2452 2452 if (rc == VINF_IOM_R3_IOPORT_WRITE) 2453 2453 { 2454 H WACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,2454 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 2455 2455 uAndVal, uIOSize); 2456 2456 } … … 2460 2460 uint32_t u32Val = 0; 2461 2461 2462 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIORead);2462 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); 2463 2463 rc = IOMIOPortRead(pVM, IoExitInfo.n.u16Port, &u32Val, uIOSize); 2464 2464 if (IOM_SUCCESS(rc)) … … 2471 2471 else if (rc == VINF_IOM_R3_IOPORT_READ) 2472 2472 { 2473 H WACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,2473 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 2474 2474 uAndVal, uIOSize); 2475 2475 } … … 2493 2493 static uint32_t const aIOSize[4] = { 1, 2, 0, 4 }; 2494 2494 2495 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxIOCheck);2495 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIOCheck); 2496 2496 for (unsigned i = 0; i < 4; i++) 2497 2497 { … … 2566 2566 case SVM_EXIT_HLT: 2567 2567 /* Check if external interrupts are pending; if so, don't switch back. */ 2568 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitHlt);2568 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 2569 2569 pCtx->rip++; /* skip hlt */ 2570 2570 if (EMShouldContinueAfterHalt(pVCpu, pCtx)) … … 2576 2576 case SVM_EXIT_MWAIT_UNCOND: 2577 2577 Log2(("SVM: mwait\n")); 2578 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMwait);2578 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait); 2579 2579 rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2580 2580 if ( rc == VINF_EM_HALT … … 2598 2598 Log2(("SVM: monitor\n")); 2599 2599 2600 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMonitor);2600 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor); 2601 2601 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2602 2602 if (rc == VINF_SUCCESS) … … 2644 2644 { 2645 2645 /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */ 2646 if ( pVM->h waccm.s.fTPRPatchingActive2646 if ( pVM->hm.s.fTPRPatchingActive 2647 2647 && pCtx->ecx == MSR_K8_LSTAR 2648 2648 && pVMCB->ctrl.u64ExitInfo1 == 1 /* wrmsr */) … … 2668 2668 * so we play safe by completely disassembling the instruction. 2669 2669 */ 2670 STAM_COUNTER_INC((pVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->h waccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr);2670 STAM_COUNTER_INC((pVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr); 2671 2671 Log(("SVM: %s\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr")); 2672 2672 rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0); … … 2685 2685 Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", pVMCB->ctrl.u64ExitInfo2)); 2686 2686 if ( !(pVMCB->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP)) 2687 && pVCpu->h waccm.s.Event.fPending)2687 && pVCpu->hm.s.Event.fPending) 2688 2688 { 2689 2689 SVM_EVENT Event; 2690 Event.au64[0] = pVCpu->h waccm.s.Event.intInfo;2690 Event.au64[0] = pVCpu->hm.s.Event.intInfo; 2691 2691 2692 2692 /* Caused by an injected interrupt. */ 2693 pVCpu->h waccm.s.Event.fPending = false;2693 pVCpu->hm.s.Event.fPending = false; 2694 2694 switch (Event.n.u3Type) 2695 2695 { … … 2758 2758 if (exitCode == SVM_EXIT_INTR) 2759 2759 { 2760 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatPendingHostIrq);2760 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 2761 2761 /* On the next entry we'll only sync the host context. */ 2762 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;2762 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT; 2763 2763 } 2764 2764 else … … 2767 2767 /** @todo we can do better than this */ 2768 2768 /* Not in the VINF_PGM_CHANGE_MODE though! */ 2769 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;2769 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL; 2770 2770 } 2771 2771 … … 2783 2783 #endif 2784 2784 2785 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2, x);2786 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit1, x);2787 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatEntry, x);2785 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); 2786 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x); 2787 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 2788 2788 return VBOXSTRICTRC_TODO(rc); 2789 2789 } … … 2809 2809 uint8_t u8Tpr; 2810 2810 2811 PH WACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);2811 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 2812 2812 if (!pPatch) 2813 2813 break; … … 2815 2815 switch (pPatch->enmType) 2816 2816 { 2817 case H WACCMTPRINSTR_READ:2817 case HMTPRINSTR_READ: 2818 2818 /* TPR caching in CR8 */ 2819 2819 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending); … … 2827 2827 break; 2828 2828 2829 case H WACCMTPRINSTR_WRITE_REG:2830 case H WACCMTPRINSTR_WRITE_IMM:2829 case HMTPRINSTR_WRITE_REG: 2830 case HMTPRINSTR_WRITE_IMM: 2831 2831 /* Fetch the new TPR value */ 2832 if (pPatch->enmType == H WACCMTPRINSTR_WRITE_REG)2832 if (pPatch->enmType == HMTPRINSTR_WRITE_REG) 2833 2833 { 2834 2834 uint32_t val; … … 2865 2865 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu) 2866 2866 { 2867 Assert(pVM->h waccm.s.svm.fSupported);2868 2869 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->h waccm.s.idLastCpu, pVCpu->hwaccm.s.uCurrentASID));2870 pVCpu->h waccm.s.fResumeVM = false;2867 Assert(pVM->hm.s.svm.fSupported); 2868 2869 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentASID)); 2870 pVCpu->hm.s.fResumeVM = false; 2871 2871 2872 2872 /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */ 2873 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;2873 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_LDTR; 2874 2874 2875 2875 return VINF_SUCCESS; … … 2887 2887 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2888 2888 { 2889 SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->h waccm.s.svm.pVMCB;2890 2891 Assert(pVM->h waccm.s.svm.fSupported);2889 SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB; 2890 2891 Assert(pVM->hm.s.svm.fSupported); 2892 2892 2893 2893 #ifdef DEBUG … … 2908 2908 2909 2909 /* Resync the debug registers the next time. */ 2910 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;2910 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 2911 2911 } 2912 2912 else … … 2979 2979 if (CPUMGetGuestCodeBits(pVCpu) != 16) 2980 2980 { 2981 PDISSTATE pDis = &pVCpu->h waccm.s.DisState;2981 PDISSTATE pDis = &pVCpu->hm.s.DisState; 2982 2982 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL); 2983 2983 if (RT_SUCCESS(rc) && pDis->pCurInstr->uOpcode == OP_INVLPG) … … 3003 3003 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) 3004 3004 { 3005 bool fFlushPending = pVM->h waccm.s.svm.fAlwaysFlushTLB | VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH);3005 bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB | VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH); 3006 3006 3007 3007 /* Skip it if a TLB flush is already pending. */ … … 3012 3012 Log2(("SVMR0InvalidatePage %RGv\n", GCVirt)); 3013 3013 AssertReturn(pVM, VERR_INVALID_PARAMETER); 3014 Assert(pVM->h waccm.s.svm.fSupported);3015 3016 pVMCB = (SVM_VMCB *)pVCpu->h waccm.s.svm.pVMCB;3014 Assert(pVM->hm.s.svm.fSupported); 3015 3016 pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB; 3017 3017 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 3018 3018 … … 3040 3040 VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys) 3041 3041 { 3042 Assert(pVM->h waccm.s.fNestedPaging);3042 Assert(pVM->hm.s.fNestedPaging); 3043 3043 /* invlpga only invalidates TLB entries for guest virtual addresses; we have no choice but to force a TLB flush here. */ 3044 3044 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 3045 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushTLBInvlpga);3045 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBInvlpga); 3046 3046 return VINF_SUCCESS; 3047 3047 } … … 3069 3069 aParam[3] = (uint32_t)(pVMCBPhys >> 32); /* Param 2: pVMCBPhys - Hi. */ 3070 3070 3071 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnSVMGCVMRun64, 4, &aParam[0]);3071 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSVMGCVMRun64, 4, &aParam[0]); 3072 3072 } 3073 3073 … … 3105 3105 CPUMPushHyper(pVCpu, paParam[i]); 3106 3106 3107 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatWorldSwitch3264, z);3107 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z); 3108 3108 /* Call switcher. */ 3109 rc = pVM->h waccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));3110 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatWorldSwitch3264, z);3109 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum)); 3110 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z); 3111 3111 3112 3112 ASMSetFlags(uOldEFlags); -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.h
r43307 r43387 24 24 #include <VBox/vmm/stam.h> 25 25 #include <VBox/dis.h> 26 #include <VBox/vmm/h waccm.h>26 #include <VBox/vmm/hm.h> 27 27 #include <VBox/vmm/pgm.h> 28 #include <VBox/vmm/h wacc_svm.h>28 #include <VBox/vmm/hm_svm.h> 29 29 30 30 RT_C_DECLS_BEGIN -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r43379 r43387 20 20 * Header Files * 21 21 *******************************************************************************/ 22 #define LOG_GROUP LOG_GROUP_H WACCM22 #define LOG_GROUP LOG_GROUP_HM 23 23 #include <iprt/asm-amd64-x86.h> 24 #include <VBox/vmm/h waccm.h>24 #include <VBox/vmm/hm.h> 25 25 #include <VBox/vmm/pgm.h> 26 26 #include <VBox/vmm/dbgf.h> … … 32 32 #endif 33 33 #include <VBox/vmm/tm.h> 34 #include "H WACCMInternal.h"34 #include "HMInternal.h" 35 35 #include <VBox/vmm/vm.h> 36 36 #include <VBox/vmm/pdmapi.h> … … 70 70 71 71 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 72 /** See H WACCMR0A.asm. */72 /** See HMR0A.asm. */ 73 73 extern "C" uint32_t g_fVMXIs64bitHost; 74 74 #endif … … 90 90 91 91 /** 92 * Updates error from VMCS to H WACCMCPU's lasterror record.92 * Updates error from VMCS to HMCPU's lasterror record. 93 93 * 94 94 * @param pVM Pointer to the VM. … … 103 103 104 104 VMXReadVMCS(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError); 105 pVCpu->h waccm.s.vmx.lasterror.ulInstrError = instrError;106 } 107 pVM->h waccm.s.lLastError = rc;105 pVCpu->hm.s.vmx.lasterror.ulInstrError = instrError; 106 } 107 pVM->hm.s.lLastError = rc; 108 108 } 109 109 … … 130 130 { 131 131 /* Set revision dword at the beginning of the VMXON structure. */ 132 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->h waccm.s.vmx.msr.vmx_basic_info);132 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info); 133 133 } 134 134 … … 165 165 */ 166 166 if ( pVM 167 && pVM->h waccm.s.vmx.fVPID168 && (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS))167 && pVM->hm.s.vmx.fVPID 168 && (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)) 169 169 { 170 170 hmR0VmxFlushVPID(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */); … … 224 224 #endif 225 225 226 pVM->h waccm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;227 228 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)226 pVM->hm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ; 227 228 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 229 229 { 230 230 /* Allocate one page for the APIC physical page (serves for filtering accesses). */ 231 rc = RTR0MemObjAllocCont(&pVM->h waccm.s.vmx.pMemObjAPIC, PAGE_SIZE, false /* executable R0 mapping */);231 rc = RTR0MemObjAllocCont(&pVM->hm.s.vmx.pMemObjAPIC, PAGE_SIZE, false /* executable R0 mapping */); 232 232 AssertRC(rc); 233 233 if (RT_FAILURE(rc)) 234 234 return rc; 235 235 236 pVM->h waccm.s.vmx.pAPIC = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjAPIC);237 pVM->h waccm.s.vmx.pAPICPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjAPIC, 0);238 ASMMemZero32(pVM->h waccm.s.vmx.pAPIC, PAGE_SIZE);236 pVM->hm.s.vmx.pAPIC = (uint8_t *)RTR0MemObjAddress(pVM->hm.s.vmx.pMemObjAPIC); 237 pVM->hm.s.vmx.pAPICPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.vmx.pMemObjAPIC, 0); 238 ASMMemZero32(pVM->hm.s.vmx.pAPIC, PAGE_SIZE); 239 239 } 240 240 else 241 241 { 242 pVM->h waccm.s.vmx.pMemObjAPIC = 0;243 pVM->h waccm.s.vmx.pAPIC = 0;244 pVM->h waccm.s.vmx.pAPICPhys = 0;242 pVM->hm.s.vmx.pMemObjAPIC = 0; 243 pVM->hm.s.vmx.pAPIC = 0; 244 pVM->hm.s.vmx.pAPICPhys = 0; 245 245 } 246 246 247 247 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 248 248 { 249 rc = RTR0MemObjAllocCont(&pVM->h waccm.s.vmx.pMemObjScratch, PAGE_SIZE, false /* executable R0 mapping */);249 rc = RTR0MemObjAllocCont(&pVM->hm.s.vmx.pMemObjScratch, PAGE_SIZE, false /* executable R0 mapping */); 250 250 AssertRC(rc); 251 251 if (RT_FAILURE(rc)) 252 252 return rc; 253 253 254 pVM->h waccm.s.vmx.pScratch = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjScratch);255 pVM->h waccm.s.vmx.pScratchPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjScratch, 0);256 257 ASMMemZero32(pVM->h waccm.s.vmx.pScratch, PAGE_SIZE);258 strcpy((char *)pVM->h waccm.s.vmx.pScratch, "SCRATCH Magic");259 *(uint64_t *)(pVM->h waccm.s.vmx.pScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);254 pVM->hm.s.vmx.pScratch = (uint8_t *)RTR0MemObjAddress(pVM->hm.s.vmx.pMemObjScratch); 255 pVM->hm.s.vmx.pScratchPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.vmx.pMemObjScratch, 0); 256 257 ASMMemZero32(pVM->hm.s.vmx.pScratch, PAGE_SIZE); 258 strcpy((char *)pVM->hm.s.vmx.pScratch, "SCRATCH Magic"); 259 *(uint64_t *)(pVM->hm.s.vmx.pScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF); 260 260 } 261 261 #endif … … 266 266 PVMCPU pVCpu = &pVM->aCpus[i]; 267 267 268 pVCpu->h waccm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;268 pVCpu->hm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ; 269 269 270 270 /* Allocate one page for the VM control structure (VMCS). */ 271 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.vmx.hMemObjVMCS, PAGE_SIZE, false /* executable R0 mapping */);271 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjVMCS, PAGE_SIZE, false /* executable R0 mapping */); 272 272 AssertRC(rc); 273 273 if (RT_FAILURE(rc)) 274 274 return rc; 275 275 276 pVCpu->h waccm.s.vmx.pvVMCS = RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.hMemObjVMCS);277 pVCpu->h waccm.s.vmx.HCPhysVMCS = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.hMemObjVMCS, 0);278 ASMMemZeroPage(pVCpu->h waccm.s.vmx.pvVMCS);279 280 pVCpu->h waccm.s.vmx.cr0_mask = 0;281 pVCpu->h waccm.s.vmx.cr4_mask = 0;276 pVCpu->hm.s.vmx.pvVMCS = RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjVMCS); 277 pVCpu->hm.s.vmx.HCPhysVMCS = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjVMCS, 0); 278 ASMMemZeroPage(pVCpu->hm.s.vmx.pvVMCS); 279 280 pVCpu->hm.s.vmx.cr0_mask = 0; 281 pVCpu->hm.s.vmx.cr4_mask = 0; 282 282 283 283 /* Allocate one page for the virtual APIC page for TPR caching. */ 284 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.vmx.hMemObjVAPIC, PAGE_SIZE, false /* executable R0 mapping */);284 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjVAPIC, PAGE_SIZE, false /* executable R0 mapping */); 285 285 AssertRC(rc); 286 286 if (RT_FAILURE(rc)) 287 287 return rc; 288 288 289 pVCpu->h waccm.s.vmx.pbVAPIC = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.hMemObjVAPIC);290 pVCpu->h waccm.s.vmx.HCPhysVAPIC = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.hMemObjVAPIC, 0);291 ASMMemZeroPage(pVCpu->h waccm.s.vmx.pbVAPIC);289 pVCpu->hm.s.vmx.pbVAPIC = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjVAPIC); 290 pVCpu->hm.s.vmx.HCPhysVAPIC = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjVAPIC, 0); 291 ASMMemZeroPage(pVCpu->hm.s.vmx.pbVAPIC); 292 292 293 293 /* Allocate the MSR bitmap if this feature is supported. */ 294 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)295 { 296 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.vmx.pMemObjMSRBitmap, PAGE_SIZE, false /* executable R0 mapping */);294 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 295 { 296 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjMSRBitmap, PAGE_SIZE, false /* executable R0 mapping */); 297 297 AssertRC(rc); 298 298 if (RT_FAILURE(rc)) 299 299 return rc; 300 300 301 pVCpu->h waccm.s.vmx.pMSRBitmap = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap);302 pVCpu->h waccm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 0);303 memset(pVCpu->h waccm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);301 pVCpu->hm.s.vmx.pMSRBitmap = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjMSRBitmap); 302 pVCpu->hm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjMSRBitmap, 0); 303 memset(pVCpu->hm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE); 304 304 } 305 305 306 306 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 307 307 /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */ 308 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.vmx.pMemObjGuestMSR, PAGE_SIZE, false /* executable R0 mapping */);308 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjGuestMSR, PAGE_SIZE, false /* executable R0 mapping */); 309 309 AssertRC(rc); 310 310 if (RT_FAILURE(rc)) 311 311 return rc; 312 312 313 pVCpu->h waccm.s.vmx.pGuestMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR);314 pVCpu->h waccm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 0);315 Assert(!(pVCpu->h waccm.s.vmx.pGuestMSRPhys & 0xf));316 memset(pVCpu->h waccm.s.vmx.pGuestMSR, 0, PAGE_SIZE);313 pVCpu->hm.s.vmx.pGuestMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjGuestMSR); 314 pVCpu->hm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjGuestMSR, 0); 315 Assert(!(pVCpu->hm.s.vmx.pGuestMSRPhys & 0xf)); 316 memset(pVCpu->hm.s.vmx.pGuestMSR, 0, PAGE_SIZE); 317 317 318 318 /* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */ 319 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.vmx.pMemObjHostMSR, PAGE_SIZE, false /* executable R0 mapping */);319 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjHostMSR, PAGE_SIZE, false /* executable R0 mapping */); 320 320 AssertRC(rc); 321 321 if (RT_FAILURE(rc)) 322 322 return rc; 323 323 324 pVCpu->h waccm.s.vmx.pHostMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjHostMSR);325 pVCpu->h waccm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 0);326 Assert(!(pVCpu->h waccm.s.vmx.pHostMSRPhys & 0xf));327 memset(pVCpu->h waccm.s.vmx.pHostMSR, 0, PAGE_SIZE);324 pVCpu->hm.s.vmx.pHostMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjHostMSR); 325 pVCpu->hm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjHostMSR, 0); 326 Assert(!(pVCpu->hm.s.vmx.pHostMSRPhys & 0xf)); 327 memset(pVCpu->hm.s.vmx.pHostMSR, 0, PAGE_SIZE); 328 328 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 329 329 330 330 /* Current guest paging mode. */ 331 pVCpu->h waccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;331 pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL; 332 332 333 333 #ifdef LOG_ENABLED 334 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->h waccm.s.vmx.pvVMCS, (uint32_t)pVCpu->hwaccm.s.vmx.HCPhysVMCS);334 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->hm.s.vmx.pvVMCS, (uint32_t)pVCpu->hm.s.vmx.HCPhysVMCS); 335 335 #endif 336 336 } … … 352 352 PVMCPU pVCpu = &pVM->aCpus[i]; 353 353 354 if (pVCpu->h waccm.s.vmx.hMemObjVMCS != NIL_RTR0MEMOBJ)355 { 356 RTR0MemObjFree(pVCpu->h waccm.s.vmx.hMemObjVMCS, false);357 pVCpu->h waccm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;358 pVCpu->h waccm.s.vmx.pvVMCS = 0;359 pVCpu->h waccm.s.vmx.HCPhysVMCS = 0;360 } 361 if (pVCpu->h waccm.s.vmx.hMemObjVAPIC != NIL_RTR0MEMOBJ)362 { 363 RTR0MemObjFree(pVCpu->h waccm.s.vmx.hMemObjVAPIC, false);364 pVCpu->h waccm.s.vmx.hMemObjVAPIC = NIL_RTR0MEMOBJ;365 pVCpu->h waccm.s.vmx.pbVAPIC = 0;366 pVCpu->h waccm.s.vmx.HCPhysVAPIC = 0;367 } 368 if (pVCpu->h waccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)369 { 370 RTR0MemObjFree(pVCpu->h waccm.s.vmx.pMemObjMSRBitmap, false);371 pVCpu->h waccm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;372 pVCpu->h waccm.s.vmx.pMSRBitmap = 0;373 pVCpu->h waccm.s.vmx.pMSRBitmapPhys = 0;354 if (pVCpu->hm.s.vmx.hMemObjVMCS != NIL_RTR0MEMOBJ) 355 { 356 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjVMCS, false); 357 pVCpu->hm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ; 358 pVCpu->hm.s.vmx.pvVMCS = 0; 359 pVCpu->hm.s.vmx.HCPhysVMCS = 0; 360 } 361 if (pVCpu->hm.s.vmx.hMemObjVAPIC != NIL_RTR0MEMOBJ) 362 { 363 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjVAPIC, false); 364 pVCpu->hm.s.vmx.hMemObjVAPIC = NIL_RTR0MEMOBJ; 365 pVCpu->hm.s.vmx.pbVAPIC = 0; 366 pVCpu->hm.s.vmx.HCPhysVAPIC = 0; 367 } 368 if (pVCpu->hm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ) 369 { 370 RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjMSRBitmap, false); 371 pVCpu->hm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ; 372 pVCpu->hm.s.vmx.pMSRBitmap = 0; 373 pVCpu->hm.s.vmx.pMSRBitmapPhys = 0; 374 374 } 375 375 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 376 if (pVCpu->h waccm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ)377 { 378 RTR0MemObjFree(pVCpu->h waccm.s.vmx.pMemObjHostMSR, false);379 pVCpu->h waccm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ;380 pVCpu->h waccm.s.vmx.pHostMSR = 0;381 pVCpu->h waccm.s.vmx.pHostMSRPhys = 0;382 } 383 if (pVCpu->h waccm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ)384 { 385 RTR0MemObjFree(pVCpu->h waccm.s.vmx.pMemObjGuestMSR, false);386 pVCpu->h waccm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ;387 pVCpu->h waccm.s.vmx.pGuestMSR = 0;388 pVCpu->h waccm.s.vmx.pGuestMSRPhys = 0;376 if (pVCpu->hm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ) 377 { 378 RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjHostMSR, false); 379 pVCpu->hm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ; 380 pVCpu->hm.s.vmx.pHostMSR = 0; 381 pVCpu->hm.s.vmx.pHostMSRPhys = 0; 382 } 383 if (pVCpu->hm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ) 384 { 385 RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjGuestMSR, false); 386 pVCpu->hm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ; 387 pVCpu->hm.s.vmx.pGuestMSR = 0; 388 pVCpu->hm.s.vmx.pGuestMSRPhys = 0; 389 389 } 390 390 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 391 391 } 392 if (pVM->h waccm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ)393 { 394 RTR0MemObjFree(pVM->h waccm.s.vmx.pMemObjAPIC, false);395 pVM->h waccm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;396 pVM->h waccm.s.vmx.pAPIC = 0;397 pVM->h waccm.s.vmx.pAPICPhys = 0;392 if (pVM->hm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ) 393 { 394 RTR0MemObjFree(pVM->hm.s.vmx.pMemObjAPIC, false); 395 pVM->hm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ; 396 pVM->hm.s.vmx.pAPIC = 0; 397 pVM->hm.s.vmx.pAPICPhys = 0; 398 398 } 399 399 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 400 if (pVM->h waccm.s.vmx.pMemObjScratch != NIL_RTR0MEMOBJ)401 { 402 ASMMemZero32(pVM->h waccm.s.vmx.pScratch, PAGE_SIZE);403 RTR0MemObjFree(pVM->h waccm.s.vmx.pMemObjScratch, false);404 pVM->h waccm.s.vmx.pMemObjScratch = NIL_RTR0MEMOBJ;405 pVM->h waccm.s.vmx.pScratch = 0;406 pVM->h waccm.s.vmx.pScratchPhys = 0;400 if (pVM->hm.s.vmx.pMemObjScratch != NIL_RTR0MEMOBJ) 401 { 402 ASMMemZero32(pVM->hm.s.vmx.pScratch, PAGE_SIZE); 403 RTR0MemObjFree(pVM->hm.s.vmx.pMemObjScratch, false); 404 pVM->hm.s.vmx.pMemObjScratch = NIL_RTR0MEMOBJ; 405 pVM->hm.s.vmx.pScratch = 0; 406 pVM->hm.s.vmx.pScratchPhys = 0; 407 407 } 408 408 #endif … … 424 424 AssertReturn(pVM, VERR_INVALID_PARAMETER); 425 425 426 /* Initialize these always, see h waccmR3InitFinalizeR0().*/427 pVM->h waccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NONE;428 pVM->h waccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NONE;426 /* Initialize these always, see hmR3InitFinalizeR0().*/ 427 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NONE; 428 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NONE; 429 429 430 430 /* Determine optimal flush type for EPT. */ 431 if (pVM->h waccm.s.fNestedPaging)432 { 433 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)434 { 435 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)436 pVM->h waccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT;437 else if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)438 pVM->h waccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS;431 if (pVM->hm.s.fNestedPaging) 432 { 433 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT) 434 { 435 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT) 436 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT; 437 else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS) 438 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS; 439 439 else 440 440 { … … 443 443 * We cannot ignore EPT at this point as we've already setup Unrestricted Guest execution. 444 444 */ 445 pVM->h waccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;445 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED; 446 446 return VERR_VMX_GENERIC; 447 447 } … … 452 452 * Should never really happen. EPT is supported but INVEPT instruction is not supported. 453 453 */ 454 pVM->h waccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;454 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED; 455 455 return VERR_VMX_GENERIC; 456 456 } … … 458 458 459 459 /* Determine optimal flush type for VPID. */ 460 if (pVM->h waccm.s.vmx.fVPID)461 { 462 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)463 { 464 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)465 pVM->h waccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT;466 else if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)467 pVM->h waccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS;460 if (pVM->hm.s.vmx.fVPID) 461 { 462 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID) 463 { 464 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT) 465 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT; 466 else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS) 467 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS; 468 468 else 469 469 { … … 472 472 * We do not handle other flush type combinations, ignore VPID capabilities. 473 473 */ 474 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)474 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR) 475 475 Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_INDIV_ADDR supported. Ignoring VPID.\n")); 476 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)476 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS) 477 477 Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n")); 478 pVM->h waccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;479 pVM->h waccm.s.vmx.fVPID = false;478 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED; 479 pVM->hm.s.vmx.fVPID = false; 480 480 } 481 481 } … … 487 487 */ 488 488 Log(("VMXR0SetupVM: VPID supported without INVEPT support. Ignoring VPID.\n")); 489 pVM->h waccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;490 pVM->h waccm.s.vmx.fVPID = false;489 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED; 490 pVM->hm.s.vmx.fVPID = false; 491 491 } 492 492 } … … 496 496 PVMCPU pVCpu = &pVM->aCpus[i]; 497 497 498 AssertPtr(pVCpu->h waccm.s.vmx.pvVMCS);498 AssertPtr(pVCpu->hm.s.vmx.pvVMCS); 499 499 500 500 /* Set revision dword at the beginning of the VMCS structure. */ 501 *(uint32_t *)pVCpu->h waccm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);501 *(uint32_t *)pVCpu->hm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info); 502 502 503 503 /* 504 504 * Clear and activate the VMCS. 505 505 */ 506 Log(("HCPhysVMCS = %RHp\n", pVCpu->h waccm.s.vmx.HCPhysVMCS));507 rc = VMXClearVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);506 Log(("HCPhysVMCS = %RHp\n", pVCpu->hm.s.vmx.HCPhysVMCS)); 507 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 508 508 if (RT_FAILURE(rc)) 509 509 goto vmx_end; 510 510 511 rc = VMXActivateVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);511 rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 512 512 if (RT_FAILURE(rc)) 513 513 goto vmx_end; … … 517 517 * Set required bits to one and zero according to the MSR capabilities. 518 518 */ 519 val = pVM->h waccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;519 val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; 520 520 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT /* External interrupts */ 521 521 | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; /* Non-maskable interrupts */ … … 524 524 * Enable the VMX preemption timer. 525 525 */ 526 if (pVM->h waccm.s.vmx.fUsePreemptTimer)526 if (pVM->hm.s.vmx.fUsePreemptTimer) 527 527 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER; 528 val &= pVM->h waccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;528 val &= pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; 529 529 530 530 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, val); … … 535 535 * Set required bits to one and zero according to the MSR capabilities. 536 536 */ 537 val = pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;537 val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; 538 538 /* Program which event cause VM-exits and which features we want to use. */ 539 539 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT … … 547 547 548 548 /* Without nested paging we should intercept invlpg and cr3 mov instructions. */ 549 if (!pVM->h waccm.s.fNestedPaging)549 if (!pVM->hm.s.fNestedPaging) 550 550 { 551 551 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT … … 558 558 * failure with an invalid control fields error. (combined with some other exit reasons) 559 559 */ 560 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)560 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 561 561 { 562 562 /* CR8 reads from the APIC shadow page; writes cause an exit is they lower the TPR below the threshold */ 563 563 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW; 564 Assert(pVM->h waccm.s.vmx.pAPIC);564 Assert(pVM->hm.s.vmx.pAPIC); 565 565 } 566 566 else … … 568 568 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT; 569 569 570 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)571 { 572 Assert(pVCpu->h waccm.s.vmx.pMSRBitmapPhys);570 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 571 { 572 Assert(pVCpu->hm.s.vmx.pMSRBitmapPhys); 573 573 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS; 574 574 } … … 579 579 /* Mask away the bits that the CPU doesn't support */ 580 580 /** @todo make sure they don't conflict with the above requirements. */ 581 val &= pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;582 pVCpu->h waccm.s.vmx.proc_ctls = val;581 val &= pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; 582 pVCpu->hm.s.vmx.proc_ctls = val; 583 583 584 584 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, val); 585 585 AssertRC(rc); 586 586 587 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)587 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 588 588 { 589 589 /* … … 591 591 * Set required bits to one and zero according to the MSR capabilities. 592 592 */ 593 val = pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;593 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; 594 594 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; 595 595 596 if (pVM->h waccm.s.fNestedPaging)596 if (pVM->hm.s.fNestedPaging) 597 597 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; 598 598 599 if (pVM->h waccm.s.vmx.fVPID)599 if (pVM->hm.s.vmx.fVPID) 600 600 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; 601 601 602 if (pVM->h waccm.s.fHasIoApic)602 if (pVM->hm.s.fHasIoApic) 603 603 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; 604 604 605 if (pVM->h waccm.s.vmx.fUnrestrictedGuest)605 if (pVM->hm.s.vmx.fUnrestrictedGuest) 606 606 val |= VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE; 607 607 608 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)608 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 609 609 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; 610 610 611 611 /* Mask away the bits that the CPU doesn't support */ 612 612 /** @todo make sure they don't conflict with the above requirements. */ 613 val &= pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;614 pVCpu->h waccm.s.vmx.proc_ctls2 = val;613 val &= pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; 614 pVCpu->hm.s.vmx.proc_ctls2 = val; 615 615 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2, val); 616 616 AssertRC(rc); … … 656 656 * Set the MSR bitmap address. 657 657 */ 658 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)659 { 660 Assert(pVCpu->h waccm.s.vmx.pMSRBitmapPhys);661 662 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->h waccm.s.vmx.pMSRBitmapPhys);658 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 659 { 660 Assert(pVCpu->hm.s.vmx.pMSRBitmapPhys); 661 662 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.pMSRBitmapPhys); 663 663 AssertRC(rc); 664 664 … … 676 676 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true); 677 677 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true); 678 if (pVCpu->h waccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)678 if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 679 679 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_TSC_AUX, true, true); 680 680 } … … 684 684 * Set the guest & host MSR load/store physical addresses. 685 685 */ 686 Assert(pVCpu->h waccm.s.vmx.pGuestMSRPhys);687 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->h waccm.s.vmx.pGuestMSRPhys);686 Assert(pVCpu->hm.s.vmx.pGuestMSRPhys); 687 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.pGuestMSRPhys); 688 688 AssertRC(rc); 689 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->h waccm.s.vmx.pGuestMSRPhys);689 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.pGuestMSRPhys); 690 690 AssertRC(rc); 691 Assert(pVCpu->h waccm.s.vmx.pHostMSRPhys);692 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->h waccm.s.vmx.pHostMSRPhys);691 Assert(pVCpu->hm.s.vmx.pHostMSRPhys); 692 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.pHostMSRPhys); 693 693 AssertRC(rc); 694 694 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ … … 701 701 AssertRC(rc); 702 702 703 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)704 { 705 Assert(pVM->h waccm.s.vmx.pMemObjAPIC);703 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 704 { 705 Assert(pVM->hm.s.vmx.pMemObjAPIC); 706 706 /* Optional */ 707 707 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, 0); 708 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->h waccm.s.vmx.HCPhysVAPIC);709 710 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)711 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL, pVM->h waccm.s.vmx.pAPICPhys);708 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVAPIC); 709 710 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 711 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.pAPICPhys); 712 712 713 713 AssertRC(rc); … … 722 722 * VMCS data back to memory. 723 723 */ 724 rc = VMXClearVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);724 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 725 725 AssertRC(rc); 726 726 … … 728 728 * Configure the VMCS read cache. 729 729 */ 730 PVMCSCACHE pCache = &pVCpu->h waccm.s.vmx.VMCSCache;730 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; 731 731 732 732 VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_RIP); … … 769 769 VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_RO_IDT_ERRCODE); 770 770 771 if (pVM->h waccm.s.fNestedPaging)771 if (pVM->hm.s.fNestedPaging) 772 772 { 773 773 VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_CR3); … … 782 782 * Setup the right TLB function based on CPU capabilities. 783 783 */ 784 if (pVM->h waccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID)785 pVM->h waccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth;786 else if (pVM->h waccm.s.fNestedPaging)787 pVM->h waccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBEPT;788 else if (pVM->h waccm.s.vmx.fVPID)789 pVM->h waccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBVPID;784 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVPID) 785 pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth; 786 else if (pVM->hm.s.fNestedPaging) 787 pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBEPT; 788 else if (pVM->hm.s.vmx.fVPID) 789 pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBVPID; 790 790 else 791 pVM->h waccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBDummy;791 pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBDummy; 792 792 793 793 vmx_end: … … 808 808 { 809 809 unsigned ulBit; 810 uint8_t *pMSRBitmap = (uint8_t *)pVCpu->h waccm.s.vmx.pMSRBitmap;810 uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hm.s.vmx.pMSRBitmap; 811 811 812 812 /* … … 867 867 868 868 #ifdef VBOX_WITH_STATISTICS 869 STAM_COUNTER_INC(&pVCpu->h waccm.s.paStatInjectedIrqsR0[iGate & MASK_INJECT_IRQ_STAT]);869 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[iGate & MASK_INJECT_IRQ_STAT]); 870 870 #endif 871 871 … … 892 892 893 893 if ( CPUMIsGuestInRealModeEx(pCtx) 894 && pVM->h waccm.s.vmx.pRealModeTSS)894 && pVM->hm.s.vmx.pRealModeTSS) 895 895 { 896 896 RTGCPHYS GCPhysHandler; … … 967 967 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC); 968 968 969 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_SEGMENT_REGS;969 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; 970 970 return VINF_SUCCESS; 971 971 } … … 998 998 * Dispatch any pending interrupts (injected before, but a VM exit occurred prematurely). 999 999 */ 1000 if (pVCpu->h waccm.s.Event.fPending)1001 { 1002 Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->h waccm.s.Event.intInfo,1003 pVCpu->h waccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));1004 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatIntReinject);1005 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->h waccm.s.Event.intInfo, 0, pVCpu->hwaccm.s.Event.errCode);1000 if (pVCpu->hm.s.Event.fPending) 1001 { 1002 Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.intInfo, 1003 pVCpu->hm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2)); 1004 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject); 1005 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->hm.s.Event.intInfo, 0, pVCpu->hm.s.Event.errCode); 1006 1006 AssertRC(rc); 1007 1007 1008 pVCpu->h waccm.s.Event.fPending = false;1008 pVCpu->hm.s.Event.fPending = false; 1009 1009 return VINF_SUCCESS; 1010 1010 } … … 1040 1040 if (!(pCtx->eflags.u32 & X86_EFL_IF)) 1041 1041 { 1042 if (!(pVCpu->h waccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT))1042 if (!(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)) 1043 1043 { 1044 1044 LogFlow(("Enable irq window exit!\n")); 1045 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;1046 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);1045 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT; 1046 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 1047 1047 AssertRC(rc); 1048 1048 } … … 1065 1065 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */ 1066 1066 Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))); 1067 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatSwitchGuestIrq);1067 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 1068 1068 /* Just continue */ 1069 1069 } … … 1142 1142 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 1143 1143 1144 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatIntInject);1144 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject); 1145 1145 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo, 0, errCode); 1146 1146 AssertRC(rc); … … 1166 1166 * Host CPU Context. 1167 1167 */ 1168 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)1168 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT) 1169 1169 { 1170 1170 RTIDTR idtr; … … 1185 1185 if (VMX_IS_64BIT_HOST_MODE()) 1186 1186 { 1187 cr3 = h waccmR0Get64bitCR3();1187 cr3 = hmR0Get64bitCR3(); 1188 1188 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_CR3, cr3); 1189 1189 } … … 1250 1250 { 1251 1251 X86XDTR64 gdtr64, idtr64; 1252 h waccmR0Get64bitGDTRandIDTR(&gdtr64, &idtr64);1252 hmR0Get64bitGDTRandIDTR(&gdtr64, &idtr64); 1253 1253 rc = VMXWriteVMCS64(VMX_VMCS_HOST_GDTR_BASE, gdtr64.uAddr); 1254 1254 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_IDTR_BASE, gdtr64.uAddr); … … 1356 1356 * the world switch back to the host. 1357 1357 */ 1358 PVMXMSR pMsr = (PVMXMSR)pVCpu->h waccm.s.vmx.pHostMSR;1358 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pHostMSR; 1359 1359 unsigned idxMsr = 0; 1360 1360 … … 1404 1404 # endif 1405 1405 1406 if (pVCpu->h waccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)1406 if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 1407 1407 { 1408 1408 pMsr->u32IndexMSR = MSR_K8_TSC_AUX; … … 1418 1418 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 1419 1419 1420 pVCpu->h waccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;1420 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT; 1421 1421 } 1422 1422 return rc; … … 1515 1515 */ 1516 1516 /** @todo NP state won't change so maybe we should build the initial trap mask up front? */ 1517 if (!pVM->h waccm.s.fNestedPaging)1517 if (!pVM->hm.s.fNestedPaging) 1518 1518 u32TrapMask |= RT_BIT(X86_XCPT_PF); 1519 1519 … … 1531 1531 /** @todo Despite the claim to intercept everything, with NP we do not intercept #PF. Should we? */ 1532 1532 if ( CPUMIsGuestInRealModeEx(pCtx) 1533 && pVM->h waccm.s.vmx.pRealModeTSS)1533 && pVM->hm.s.vmx.pRealModeTSS) 1534 1534 { 1535 1535 u32TrapMask |= RT_BIT(X86_XCPT_DE) … … 1572 1572 X86EFLAGS eflags; 1573 1573 1574 Assert(!(pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_ALL_GUEST));1574 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)); 1575 1575 1576 1576 /* … … 1592 1592 */ 1593 1593 if ( CPUMIsGuestInRealModeEx(pCtx) 1594 && pVM->h waccm.s.vmx.pRealModeTSS)1595 { 1596 pVCpu->h waccm.s.vmx.RealMode.eflags = eflags;1594 && pVM->hm.s.vmx.pRealModeTSS) 1595 { 1596 pVCpu->hm.s.vmx.RealMode.eflags = eflags; 1597 1597 1598 1598 eflags.Bits.u1VM = 1; … … 1623 1623 * Set required bits to one and zero according to the MSR capabilities. 1624 1624 */ 1625 val = pVM->h waccm.s.vmx.msr.vmx_entry.n.disallowed0;1625 val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; 1626 1626 1627 1627 /* … … 1638 1638 * Mask away the bits that the CPU doesn't support. 1639 1639 */ 1640 val &= pVM->h waccm.s.vmx.msr.vmx_entry.n.allowed1;1640 val &= pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; 1641 1641 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val); 1642 1642 AssertRC(rc); … … 1646 1646 * Set required bits to one and zero according to the MSR capabilities. 1647 1647 */ 1648 val = pVM->h waccm.s.vmx.msr.vmx_exit.n.disallowed0;1648 val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; 1649 1649 1650 1650 /* … … 1664 1664 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)); 1665 1665 #endif 1666 val &= pVM->h waccm.s.vmx.msr.vmx_exit.n.allowed1;1666 val &= pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; 1667 1667 1668 1668 /* … … 1675 1675 * Guest CPU context: ES, CS, SS, DS, FS, GS. 1676 1676 */ 1677 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)1678 { 1679 if (pVM->h waccm.s.vmx.pRealModeTSS)1677 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS) 1678 { 1679 if (pVM->hm.s.vmx.pRealModeTSS) 1680 1680 { 1681 1681 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu); 1682 if (pVCpu->h waccm.s.vmx.enmLastSeenGuestMode != enmGuestMode)1682 if (pVCpu->hm.s.vmx.enmLastSeenGuestMode != enmGuestMode) 1683 1683 { 1684 1684 /* 1685 1685 * Correct weird requirements for switching to protected mode. 1686 1686 */ 1687 if ( pVCpu->h waccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL1687 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL 1688 1688 && enmGuestMode >= PGMMODE_PROTECTED) 1689 1689 { … … 1708 1708 pCtx->ss.Attr.n.u2Dpl = 0; 1709 1709 } 1710 pVCpu->h waccm.s.vmx.enmLastSeenGuestMode = enmGuestMode;1710 pVCpu->hm.s.vmx.enmLastSeenGuestMode = enmGuestMode; 1711 1711 } 1712 1712 else if ( CPUMIsGuestInRealModeEx(pCtx) … … 1741 1741 * Guest CPU context: LDTR. 1742 1742 */ 1743 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)1743 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR) 1744 1744 { 1745 1745 if (pCtx->ldtr.Sel == 0) … … 1764 1764 * Guest CPU context: TR. 1765 1765 */ 1766 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)1766 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR) 1767 1767 { 1768 1768 /* … … 1771 1771 */ 1772 1772 if ( CPUMIsGuestInRealModeEx(pCtx) 1773 && pVM->h waccm.s.vmx.pRealModeTSS)1773 && pVM->hm.s.vmx.pRealModeTSS) 1774 1774 { 1775 1775 RTGCPHYS GCPhys; 1776 1776 1777 1777 /* We convert it here every time as PCI regions could be reconfigured. */ 1778 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->h waccm.s.vmx.pRealModeTSS, &GCPhys);1778 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys); 1779 1779 AssertRC(rc); 1780 1780 1781 1781 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_TR, 0); 1782 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT, H WACCM_VTX_TSS_SIZE);1782 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT, HM_VTX_TSS_SIZE); 1783 1783 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_TR_BASE, GCPhys /* phys = virt in this mode */); 1784 1784 … … 1817 1817 * Guest CPU context: GDTR. 1818 1818 */ 1819 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)1819 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR) 1820 1820 { 1821 1821 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); … … 1827 1827 * Guest CPU context: IDTR. 1828 1828 */ 1829 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)1829 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR) 1830 1830 { 1831 1831 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); … … 1837 1837 * Sysenter MSRs. 1838 1838 */ 1839 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_MSR)1839 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_MSR) 1840 1840 { 1841 1841 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs); … … 1848 1848 * Guest CPU context: Control registers. 1849 1849 */ 1850 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)1850 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0) 1851 1851 { 1852 1852 val = pCtx->cr0; … … 1867 1867 } 1868 1868 /* Protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */ 1869 if (!pVM->h waccm.s.vmx.fUnrestrictedGuest)1869 if (!pVM->hm.s.vmx.fUnrestrictedGuest) 1870 1870 val |= X86_CR0_PE | X86_CR0_PG; 1871 1871 1872 if (pVM->h waccm.s.fNestedPaging)1872 if (pVM->hm.s.fNestedPaging) 1873 1873 { 1874 1874 if (CPUMIsGuestInPagedProtectedModeEx(pCtx)) 1875 1875 { 1876 1876 /* Disable CR3 read/write monitoring as we don't need it for EPT. */ 1877 pVCpu->h waccm.s.vmx.proc_ctls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT1877 pVCpu->hm.s.vmx.proc_ctls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 1878 1878 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT); 1879 1879 } … … 1881 1881 { 1882 1882 /* Reenable CR3 read/write monitoring as our identity mapped page table is active. */ 1883 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT1883 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 1884 1884 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT; 1885 1885 } 1886 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);1886 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 1887 1887 AssertRC(rc); 1888 1888 } … … 1915 1915 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_MP; 1916 1916 1917 pVCpu->h waccm.s.vmx.cr0_mask = val;1917 pVCpu->hm.s.vmx.cr0_mask = val; 1918 1918 1919 1919 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR0_MASK, val); … … 1922 1922 } 1923 1923 1924 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)1924 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4) 1925 1925 { 1926 1926 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, pCtx->cr4); 1927 1927 Log2(("Guest CR4-shadow %08x\n", pCtx->cr4)); 1928 1928 /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */ 1929 val = pCtx->cr4 | (uint32_t)pVM->h waccm.s.vmx.msr.vmx_cr4_fixed0;1930 1931 if (!pVM->h waccm.s.fNestedPaging)1932 { 1933 switch (pVCpu->h waccm.s.enmShadowMode)1929 val = pCtx->cr4 | (uint32_t)pVM->hm.s.vmx.msr.vmx_cr4_fixed0; 1930 1931 if (!pVM->hm.s.fNestedPaging) 1932 { 1933 switch (pVCpu->hm.s.enmShadowMode) 1934 1934 { 1935 1935 case PGMMODE_REAL: /* Real mode -> emulated using v86 mode */ … … 1959 1959 } 1960 1960 else if ( !CPUMIsGuestInPagedProtectedModeEx(pCtx) 1961 && !pVM->h waccm.s.vmx.fUnrestrictedGuest)1961 && !pVM->hm.s.vmx.fUnrestrictedGuest) 1962 1962 { 1963 1963 /* We use 4 MB pages in our identity mapping page table for real and protected mode without paging. */ … … 1971 1971 */ 1972 1972 if ( CPUMIsGuestInRealModeEx(pCtx) 1973 && pVM->h waccm.s.vmx.pRealModeTSS)1973 && pVM->hm.s.vmx.pRealModeTSS) 1974 1974 { 1975 1975 val &= ~X86_CR4_VME; … … 1988 1988 | X86_CR4_PSE 1989 1989 | X86_CR4_VMXE; 1990 pVCpu->h waccm.s.vmx.cr4_mask = val;1990 pVCpu->hm.s.vmx.cr4_mask = val; 1991 1991 1992 1992 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR4_MASK, val); … … 1997 1997 #if 0 1998 1998 /* Enable single stepping if requested and CPU supports it. */ 1999 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)1999 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG) 2000 2000 if (DBGFIsStepping(pVCpu)) 2001 2001 { 2002 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;2003 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);2002 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG; 2003 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 2004 2004 AssertRC(rc); 2005 2005 } 2006 2006 #endif 2007 2007 2008 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)2009 { 2010 if (pVM->h waccm.s.fNestedPaging)2008 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3) 2009 { 2010 if (pVM->hm.s.fNestedPaging) 2011 2011 { 2012 2012 Assert(PGMGetHyperCR3(pVCpu)); 2013 pVCpu->h waccm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu);2014 2015 Assert(!(pVCpu->h waccm.s.vmx.GCPhysEPTP & 0xfff));2013 pVCpu->hm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu); 2014 2015 Assert(!(pVCpu->hm.s.vmx.GCPhysEPTP & 0xfff)); 2016 2016 /** @todo Check the IA32_VMX_EPT_VPID_CAP MSR for other supported memory types. */ 2017 pVCpu->h waccm.s.vmx.GCPhysEPTP |= VMX_EPT_MEMTYPE_WB2017 pVCpu->hm.s.vmx.GCPhysEPTP |= VMX_EPT_MEMTYPE_WB 2018 2018 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT); 2019 2019 2020 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_EPTP_FULL, pVCpu->h waccm.s.vmx.GCPhysEPTP);2020 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.GCPhysEPTP); 2021 2021 AssertRC(rc); 2022 2022 2023 2023 if ( !CPUMIsGuestInPagedProtectedModeEx(pCtx) 2024 && !pVM->h waccm.s.vmx.fUnrestrictedGuest)2024 && !pVM->hm.s.vmx.fUnrestrictedGuest) 2025 2025 { 2026 2026 RTGCPHYS GCPhys; 2027 2027 2028 2028 /* We convert it here every time as PCI regions could be reconfigured. */ 2029 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->h waccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);2030 AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->h waccm.s.vmx.pNonPagingModeEPTPageTable));2029 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys); 2030 AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->hm.s.vmx.pNonPagingModeEPTPageTable)); 2031 2031 2032 2032 /* … … 2058 2058 * Guest CPU context: Debug registers. 2059 2059 */ 2060 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)2060 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG) 2061 2061 { 2062 2062 pCtx->dr[6] |= X86_DR6_INIT_VAL; /* set all reserved bits to 1. */ … … 2094 2094 && !DBGFIsStepping(pVCpu)) 2095 2095 { 2096 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxArmed);2096 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 2097 2097 2098 2098 /* Disable DRx move intercepts. */ 2099 pVCpu->h waccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;2100 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);2099 pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT; 2100 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 2101 2101 AssertRC(rc); 2102 2102 … … 2123 2123 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 2124 2124 #elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2125 pVCpu->h waccm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;2125 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64; 2126 2126 #else 2127 2127 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2128 if (!pVM->h waccm.s.fAllow64BitGuests)2128 if (!pVM->hm.s.fAllow64BitGuests) 2129 2129 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 2130 2130 # endif 2131 pVCpu->h waccm.s.vmx.pfnStartVM = VMXR0StartVM64;2132 #endif 2133 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_MSR)2131 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64; 2132 #endif 2133 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_MSR) 2134 2134 { 2135 2135 /* Update these as wrmsr might have changed them. */ … … 2142 2142 else 2143 2143 { 2144 pVCpu->h waccm.s.vmx.pfnStartVM = VMXR0StartVM32;2144 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 2145 2145 } 2146 2146 … … 2152 2152 * during VM-entry and restored into the VM-exit store area during VM-exit. 2153 2153 */ 2154 PVMXMSR pMsr = (PVMXMSR)pVCpu->h waccm.s.vmx.pGuestMSR;2154 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pGuestMSR; 2155 2155 unsigned idxMsr = 0; 2156 2156 … … 2196 2196 } 2197 2197 2198 if ( pVCpu->h waccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP2198 if ( pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP 2199 2199 && (u32GstExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)) 2200 2200 { … … 2206 2206 } 2207 2207 2208 pVCpu->h waccm.s.vmx.cCachedMSRs = idxMsr;2208 pVCpu->hm.s.vmx.cCachedMSRs = idxMsr; 2209 2209 2210 2210 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr); … … 2216 2216 2217 2217 bool fOffsettedTsc; 2218 if (pVM->h waccm.s.vmx.fUsePreemptTimer)2219 { 2220 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->h waccm.s.vmx.u64TSCOffset);2218 if (pVM->hm.s.vmx.fUsePreemptTimer) 2219 { 2220 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset); 2221 2221 2222 2222 /* Make sure the returned values have sane upper and lower boundaries. */ … … 2226 2226 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */ 2227 2227 2228 cTicksToDeadline >>= pVM->h waccm.s.vmx.cPreemptTimerShift;2228 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift; 2229 2229 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16); 2230 2230 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_PREEMPTION_TIMER_VALUE, cPreemptionTickCount); … … 2232 2232 } 2233 2233 else 2234 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->h waccm.s.vmx.u64TSCOffset);2234 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset); 2235 2235 2236 2236 if (fOffsettedTsc) 2237 2237 { 2238 2238 uint64_t u64CurTSC = ASMReadTSC(); 2239 if (u64CurTSC + pVCpu->h waccm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))2239 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu)) 2240 2240 { 2241 2241 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */ 2242 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->h waccm.s.vmx.u64TSCOffset);2242 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); 2243 2243 AssertRC(rc); 2244 2244 2245 pVCpu->h waccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;2246 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);2245 pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT; 2246 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 2247 2247 AssertRC(rc); 2248 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCOffset);2248 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCOffset); 2249 2249 } 2250 2250 else … … 2252 2252 /* Fall back to rdtsc, rdtscp emulation as we would otherwise pass decreasing tsc values to the guest. */ 2253 2253 LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, 2254 pVCpu->h waccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset,2255 TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->h waccm.s.vmx.u64TSCOffset,2254 pVCpu->hm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset, 2255 TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hm.s.vmx.u64TSCOffset, 2256 2256 TMCpuTickGet(pVCpu))); 2257 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;2258 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);2257 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT; 2258 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 2259 2259 AssertRC(rc); 2260 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCInterceptOverFlow);2260 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow); 2261 2261 } 2262 2262 } 2263 2263 else 2264 2264 { 2265 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;2266 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);2265 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT; 2266 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 2267 2267 AssertRC(rc); 2268 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCIntercept);2268 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCIntercept); 2269 2269 } 2270 2270 2271 2271 /* Done with the major changes */ 2272 pVCpu->h waccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;2272 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST; 2273 2273 2274 2274 /* Minimal guest state update (ESP, EIP, EFLAGS mostly) */ … … 2318 2318 VMXReadCachedVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW, &valShadow); 2319 2319 VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR0, &val); 2320 val = (valShadow & pVCpu->h waccm.s.vmx.cr0_mask) | (val & ~pVCpu->hwaccm.s.vmx.cr0_mask);2320 val = (valShadow & pVCpu->hm.s.vmx.cr0_mask) | (val & ~pVCpu->hm.s.vmx.cr0_mask); 2321 2321 CPUMSetGuestCR0(pVCpu, val); 2322 2322 2323 2323 VMXReadCachedVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, &valShadow); 2324 2324 VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR4, &val); 2325 val = (valShadow & pVCpu->h waccm.s.vmx.cr4_mask) | (val & ~pVCpu->hwaccm.s.vmx.cr4_mask);2325 val = (valShadow & pVCpu->hm.s.vmx.cr4_mask) | (val & ~pVCpu->hm.s.vmx.cr4_mask); 2326 2326 CPUMSetGuestCR4(pVCpu, val); 2327 2327 … … 2330 2330 * the nested paging case where CR3 & CR4 can be changed by the guest. 2331 2331 */ 2332 if ( pVM->h waccm.s.fNestedPaging2332 if ( pVM->hm.s.fNestedPaging 2333 2333 && CPUMIsGuestInPagedProtectedModeEx(pCtx)) /** @todo check if we will always catch mode switches and such... */ 2334 2334 { 2335 PVMCSCACHE pCache = &pVCpu->h waccm.s.vmx.VMCSCache;2335 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; 2336 2336 2337 2337 /* Can be updated behind our back in the nested paging case. */ … … 2384 2384 /* Real mode emulation using v86 mode. */ 2385 2385 if ( CPUMIsGuestInRealModeEx(pCtx) 2386 && pVM->h waccm.s.vmx.pRealModeTSS)2386 && pVM->hm.s.vmx.pRealModeTSS) 2387 2387 { 2388 2388 /* Hide our emulation flags */ … … 2390 2390 2391 2391 /* Restore original IOPL setting as we always use 0. */ 2392 pCtx->eflags.Bits.u2IOPL = pVCpu->h waccm.s.vmx.RealMode.eflags.Bits.u2IOPL;2392 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL; 2393 2393 2394 2394 /* Force a TR resync every time in case we switch modes. */ 2395 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_TR;2395 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_TR; 2396 2396 } 2397 2397 else … … 2405 2405 * Save the possibly changed MSRs that we automatically restore and save during a world switch. 2406 2406 */ 2407 for (unsigned i = 0; i < pVCpu->h waccm.s.vmx.cCachedMSRs; i++)2408 { 2409 PVMXMSR pMsr = (PVMXMSR)pVCpu->h waccm.s.vmx.pGuestMSR;2407 for (unsigned i = 0; i < pVCpu->hm.s.vmx.cCachedMSRs; i++) 2408 { 2409 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pGuestMSR; 2410 2410 pMsr += i; 2411 2411 … … 2458 2458 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); 2459 2459 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2460 pVCpu->h waccm.s.TlbShootdown.cPages = 0;2460 pVCpu->hm.s.TlbShootdown.cPages = 0; 2461 2461 return; 2462 2462 } … … 2473 2473 PHMGLOBLCPUINFO pCpu; 2474 2474 2475 Assert(pVM->h waccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID);2476 2477 pCpu = H WACCMR0GetCurrentCpu();2475 Assert(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVPID); 2476 2477 pCpu = HMR0GetCurrentCpu(); 2478 2478 2479 2479 /* … … 2484 2484 */ 2485 2485 bool fNewASID = false; 2486 if ( pVCpu->h waccm.s.idLastCpu != pCpu->idCpu2487 || pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)2488 { 2489 pVCpu->h waccm.s.fForceTLBFlush = true;2486 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 2487 || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 2488 { 2489 pVCpu->hm.s.fForceTLBFlush = true; 2490 2490 fNewASID = true; 2491 2491 } … … 2495 2495 */ 2496 2496 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2497 pVCpu->h waccm.s.fForceTLBFlush = true;2498 2499 pVCpu->h waccm.s.idLastCpu = pCpu->idCpu;2500 2501 if (pVCpu->h waccm.s.fForceTLBFlush)2497 pVCpu->hm.s.fForceTLBFlush = true; 2498 2499 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 2500 2501 if (pVCpu->hm.s.fForceTLBFlush) 2502 2502 { 2503 2503 if (fNewASID) 2504 2504 { 2505 2505 ++pCpu->uCurrentASID; 2506 if (pCpu->uCurrentASID >= pVM->h waccm.s.uMaxASID)2506 if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID) 2507 2507 { 2508 2508 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */ … … 2511 2511 } 2512 2512 2513 pVCpu->h waccm.s.uCurrentASID = pCpu->uCurrentASID;2513 pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID; 2514 2514 if (pCpu->fFlushASIDBeforeUse) 2515 2515 { 2516 hmR0VmxFlushVPID(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);2516 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */); 2517 2517 #ifdef VBOX_WITH_STATISTICS 2518 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushASID);2518 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID); 2519 2519 #endif 2520 2520 } … … 2522 2522 else 2523 2523 { 2524 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)2524 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT) 2525 2525 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */); 2526 2526 else 2527 hmR0VmxFlushEPT(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushEPT);2527 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT); 2528 2528 2529 2529 #ifdef VBOX_WITH_STATISTICS … … 2532 2532 * as ASID flushes too, better than including them under StatFlushTLBWorldSwitch. 2533 2533 */ 2534 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushASID);2535 #endif 2536 } 2537 2538 pVCpu->h waccm.s.cTLBFlushes = pCpu->cTLBFlushes;2539 pVCpu->h waccm.s.fForceTLBFlush = false;2534 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID); 2535 #endif 2536 } 2537 2538 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 2539 pVCpu->hm.s.fForceTLBFlush = false; 2540 2540 } 2541 2541 else 2542 2542 { 2543 AssertMsg(pVCpu->h waccm.s.uCurrentASID && pCpu->uCurrentASID,2544 ("h waccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",2545 pVCpu->h waccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,2543 AssertMsg(pVCpu->hm.s.uCurrentASID && pCpu->uCurrentASID, 2544 ("hm->uCurrentASID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n", 2545 pVCpu->hm.s.uCurrentASID, pVCpu->hm.s.cTLBFlushes, 2546 2546 pCpu->uCurrentASID, pCpu->cTLBFlushes)); 2547 2547 2548 2548 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should 2549 * not be executed. See h waccmQueueInvlPage() where it is commented2549 * not be executed. See hmQueueInvlPage() where it is commented 2550 2550 * out. Support individual entry flushing someday. */ 2551 2551 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 2552 2552 { 2553 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTlbShootdown);2553 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 2554 2554 2555 2555 /* … … 2557 2557 * as supported by the CPU. 2558 2558 */ 2559 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)2559 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR) 2560 2560 { 2561 for (unsigned i = 0; i < pVCpu->h waccm.s.TlbShootdown.cPages; i++)2562 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->h waccm.s.TlbShootdown.aPages[i]);2561 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 2562 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]); 2563 2563 } 2564 2564 else 2565 hmR0VmxFlushEPT(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushEPT);2565 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT); 2566 2566 } 2567 2567 else 2568 2568 { 2569 2569 #ifdef VBOX_WITH_STATISTICS 2570 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatNoFlushTLBWorldSwitch);2571 #endif 2572 } 2573 } 2574 pVCpu->h waccm.s.TlbShootdown.cPages = 0;2570 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch); 2571 #endif 2572 } 2573 } 2574 pVCpu->hm.s.TlbShootdown.cPages = 0; 2575 2575 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2576 2576 2577 AssertMsg(pVCpu->h waccm.s.cTLBFlushes == pCpu->cTLBFlushes,2578 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->h waccm.s.cTLBFlushes, pCpu->cTLBFlushes));2579 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->h waccm.s.uMaxASID,2577 AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes, 2578 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes)); 2579 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID, 2580 2580 ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 2581 AssertMsg(pVCpu->h waccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,2582 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->h waccm.s.uCurrentASID));2581 AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID, 2582 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID)); 2583 2583 2584 2584 /* Update VMCS with the VPID. */ 2585 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->h waccm.s.uCurrentASID);2585 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentASID); 2586 2586 AssertRC(rc); 2587 2587 } … … 2599 2599 PHMGLOBLCPUINFO pCpu; 2600 2600 2601 Assert(pVM->h waccm.s.fNestedPaging);2602 Assert(!pVM->h waccm.s.vmx.fVPID);2603 2604 pCpu = H WACCMR0GetCurrentCpu();2601 Assert(pVM->hm.s.fNestedPaging); 2602 Assert(!pVM->hm.s.vmx.fVPID); 2603 2604 pCpu = HMR0GetCurrentCpu(); 2605 2605 2606 2606 /* … … 2609 2609 * A change in the TLB flush count implies the host Cpu is online after a suspend/resume. 2610 2610 */ 2611 if ( pVCpu->h waccm.s.idLastCpu != pCpu->idCpu2612 || pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)2613 { 2614 pVCpu->h waccm.s.fForceTLBFlush = true;2611 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 2612 || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 2613 { 2614 pVCpu->hm.s.fForceTLBFlush = true; 2615 2615 } 2616 2616 … … 2619 2619 */ 2620 2620 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2621 pVCpu->h waccm.s.fForceTLBFlush = true;2622 2623 pVCpu->h waccm.s.idLastCpu = pCpu->idCpu;2624 pVCpu->h waccm.s.cTLBFlushes = pCpu->cTLBFlushes;2625 2626 if (pVCpu->h waccm.s.fForceTLBFlush)2627 hmR0VmxFlushEPT(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushEPT);2621 pVCpu->hm.s.fForceTLBFlush = true; 2622 2623 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 2624 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 2625 2626 if (pVCpu->hm.s.fForceTLBFlush) 2627 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT); 2628 2628 else 2629 2629 { 2630 2630 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should 2631 * not be executed. See h waccmQueueInvlPage() where it is commented2631 * not be executed. See hmQueueInvlPage() where it is commented 2632 2632 * out. Support individual entry flushing someday. */ 2633 2633 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) … … 2636 2636 * We cannot flush individual entries without VPID support. Flush using EPT. 2637 2637 */ 2638 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTlbShootdown);2639 hmR0VmxFlushEPT(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushEPT);2640 } 2641 } 2642 pVCpu->h waccm.s.TlbShootdown.cPages= 0;2638 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 2639 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT); 2640 } 2641 } 2642 pVCpu->hm.s.TlbShootdown.cPages= 0; 2643 2643 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2644 2644 2645 2645 #ifdef VBOX_WITH_STATISTICS 2646 if (pVCpu->h waccm.s.fForceTLBFlush)2647 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushTLBWorldSwitch);2646 if (pVCpu->hm.s.fForceTLBFlush) 2647 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch); 2648 2648 else 2649 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatNoFlushTLBWorldSwitch);2649 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch); 2650 2650 #endif 2651 2651 } … … 2663 2663 PHMGLOBLCPUINFO pCpu; 2664 2664 2665 Assert(pVM->h waccm.s.vmx.fVPID);2666 Assert(!pVM->h waccm.s.fNestedPaging);2667 2668 pCpu = H WACCMR0GetCurrentCpu();2665 Assert(pVM->hm.s.vmx.fVPID); 2666 Assert(!pVM->hm.s.fNestedPaging); 2667 2668 pCpu = HMR0GetCurrentCpu(); 2669 2669 2670 2670 /* … … 2674 2674 * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore. 2675 2675 */ 2676 if ( pVCpu->h waccm.s.idLastCpu != pCpu->idCpu2677 || pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)2676 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 2677 || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 2678 2678 { 2679 2679 /* Force a TLB flush on VM entry. */ 2680 pVCpu->h waccm.s.fForceTLBFlush = true;2680 pVCpu->hm.s.fForceTLBFlush = true; 2681 2681 } 2682 2682 … … 2685 2685 */ 2686 2686 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2687 pVCpu->h waccm.s.fForceTLBFlush = true;2688 2689 pVCpu->h waccm.s.idLastCpu = pCpu->idCpu;2690 2691 if (pVCpu->h waccm.s.fForceTLBFlush)2687 pVCpu->hm.s.fForceTLBFlush = true; 2688 2689 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 2690 2691 if (pVCpu->hm.s.fForceTLBFlush) 2692 2692 { 2693 2693 ++pCpu->uCurrentASID; 2694 if (pCpu->uCurrentASID >= pVM->h waccm.s.uMaxASID)2694 if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID) 2695 2695 { 2696 2696 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */ … … 2699 2699 } 2700 2700 else 2701 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushASID);2702 2703 pVCpu->h waccm.s.fForceTLBFlush = false;2704 pVCpu->h waccm.s.cTLBFlushes = pCpu->cTLBFlushes;2705 pVCpu->h waccm.s.uCurrentASID = pCpu->uCurrentASID;2701 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID); 2702 2703 pVCpu->hm.s.fForceTLBFlush = false; 2704 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 2705 pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID; 2706 2706 if (pCpu->fFlushASIDBeforeUse) 2707 hmR0VmxFlushVPID(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);2707 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */); 2708 2708 } 2709 2709 else 2710 2710 { 2711 AssertMsg(pVCpu->h waccm.s.uCurrentASID && pCpu->uCurrentASID,2712 ("h waccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",2713 pVCpu->h waccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,2711 AssertMsg(pVCpu->hm.s.uCurrentASID && pCpu->uCurrentASID, 2712 ("hm->uCurrentASID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n", 2713 pVCpu->hm.s.uCurrentASID, pVCpu->hm.s.cTLBFlushes, 2714 2714 pCpu->uCurrentASID, pCpu->cTLBFlushes)); 2715 2715 2716 2716 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should 2717 * not be executed. See h waccmQueueInvlPage() where it is commented2717 * not be executed. See hmQueueInvlPage() where it is commented 2718 2718 * out. Support individual entry flushing someday. */ 2719 2719 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) … … 2723 2723 * as supported by the CPU. 2724 2724 */ 2725 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)2725 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR) 2726 2726 { 2727 for (unsigned i = 0; i < pVCpu->h waccm.s.TlbShootdown.cPages; i++)2728 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->h waccm.s.TlbShootdown.aPages[i]);2727 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 2728 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]); 2729 2729 } 2730 2730 else 2731 hmR0VmxFlushVPID(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);2732 } 2733 } 2734 pVCpu->h waccm.s.TlbShootdown.cPages = 0;2731 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */); 2732 } 2733 } 2734 pVCpu->hm.s.TlbShootdown.cPages = 0; 2735 2735 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2736 2736 2737 AssertMsg(pVCpu->h waccm.s.cTLBFlushes == pCpu->cTLBFlushes,2738 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->h waccm.s.cTLBFlushes, pCpu->cTLBFlushes));2739 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->h waccm.s.uMaxASID,2737 AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes, 2738 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes)); 2739 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID, 2740 2740 ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 2741 AssertMsg(pVCpu->h waccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,2742 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->h waccm.s.uCurrentASID));2743 2744 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->h waccm.s.uCurrentASID);2741 AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID, 2742 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID)); 2743 2744 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentASID); 2745 2745 AssertRC(rc); 2746 2746 2747 2747 # ifdef VBOX_WITH_STATISTICS 2748 if (pVCpu->h waccm.s.fForceTLBFlush)2749 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushTLBWorldSwitch);2748 if (pVCpu->hm.s.fForceTLBFlush) 2749 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch); 2750 2750 else 2751 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatNoFlushTLBWorldSwitch);2751 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch); 2752 2752 # endif 2753 2753 } … … 2764 2764 VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2765 2765 { 2766 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatEntry, x);2767 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->h waccm.s.StatExit1);2768 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->h waccm.s.StatExit2);2766 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 2767 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 2768 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 2769 2769 2770 2770 VBOXSTRICTRC rc = VINF_SUCCESS; … … 2789 2789 #endif 2790 2790 2791 Assert(!(pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)2792 || (pVCpu->h waccm.s.vmx.pbVAPIC && pVM->hwaccm.s.vmx.pAPIC));2791 Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 2792 || (pVCpu->hm.s.vmx.pbVAPIC && pVM->hm.s.vmx.pAPIC)); 2793 2793 2794 2794 /* … … 2796 2796 */ 2797 2797 if ( CPUMIsGuestInLongModeEx(pCtx) 2798 || ( (( pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)2799 || pVM->h waccm.s.fTRPPatchingAllowed)2800 && pVM->h waccm.s.fHasIoApic)2798 || ( (( pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 2799 || pVM->hm.s.fTRPPatchingAllowed) 2800 && pVM->hm.s.fHasIoApic) 2801 2801 ) 2802 2802 { … … 2807 2807 2808 2808 /* This is not ideal, but if we don't clear the event injection in the VMCS right here, 2809 * we may end up injecting some stale event into a VM, including injecting an event that 2809 * we may end up injecting some stale event into a VM, including injecting an event that 2810 2810 * originated before a VM reset *after* the VM has been reset. See @bugref{6220}. 2811 2811 */ … … 2821 2821 2822 2822 /* allowed zero */ 2823 if ((val2 & pVM->h waccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0)2823 if ((val2 & pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) 2824 2824 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: zero\n")); 2825 2825 2826 2826 /* allowed one */ 2827 if ((val2 & ~pVM->h waccm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0)2827 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0) 2828 2828 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: one\n")); 2829 2829 … … 2835 2835 * Must be set according to the MSR, but can be cleared if nested paging is used. 2836 2836 */ 2837 if (pVM->h waccm.s.fNestedPaging)2837 if (pVM->hm.s.fNestedPaging) 2838 2838 { 2839 2839 val2 |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT … … 2843 2843 2844 2844 /* allowed zero */ 2845 if ((val2 & pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0)2845 if ((val2 & pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) 2846 2846 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: zero\n")); 2847 2847 2848 2848 /* allowed one */ 2849 if ((val2 & ~pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0)2849 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0) 2850 2850 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: one\n")); 2851 2851 … … 2855 2855 2856 2856 /* allowed zero */ 2857 if ((val2 & pVM->h waccm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0)2857 if ((val2 & pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0) 2858 2858 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: zero\n")); 2859 2859 2860 2860 /* allowed one */ 2861 if ((val2 & ~pVM->h waccm.s.vmx.msr.vmx_entry.n.allowed1) != 0)2861 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_entry.n.allowed1) != 0) 2862 2862 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: one\n")); 2863 2863 … … 2867 2867 2868 2868 /* allowed zero */ 2869 if ((val2 & pVM->h waccm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0)2869 if ((val2 & pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0) 2870 2870 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: zero\n")); 2871 2871 2872 2872 /* allowed one */ 2873 if ((val2 & ~pVM->h waccm.s.vmx.msr.vmx_exit.n.allowed1) != 0)2873 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_exit.n.allowed1) != 0) 2874 2874 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n")); 2875 2875 } … … 2878 2878 2879 2879 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 2880 pVCpu->h waccm.s.vmx.VMCSCache.u64TimeEntry = RTTimeNanoTS();2880 pVCpu->hm.s.vmx.VMCSCache.u64TimeEntry = RTTimeNanoTS(); 2881 2881 #endif 2882 2882 … … 2885 2885 */ 2886 2886 ResumeExecution: 2887 if (!STAM_REL_PROFILE_ADV_IS_RUNNING(&pVCpu->h waccm.s.StatEntry))2888 STAM_REL_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatExit2, &pVCpu->hwaccm.s.StatEntry, x);2889 AssertMsg(pVCpu->h waccm.s.idEnteredCpu == RTMpCpuId(),2887 if (!STAM_REL_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry)) 2888 STAM_REL_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x); 2889 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), 2890 2890 ("Expected %d, I'm %d; cResume=%d exitReason=%RGv exitQualification=%RGv\n", 2891 (int)pVCpu->h waccm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification));2892 Assert(!H WACCMR0SuspendPending());2891 (int)pVCpu->hm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification)); 2892 Assert(!HMR0SuspendPending()); 2893 2893 /* Not allowed to switch modes without reloading the host state (32->64 switcher)!! */ 2894 2894 Assert(fWasInLongMode == CPUMIsGuestInLongModeEx(pCtx)); … … 2897 2897 * Safety precaution; looping for too long here can have a very bad effect on the host. 2898 2898 */ 2899 if (RT_UNLIKELY(++cResume > pVM->h waccm.s.cMaxResumeLoops))2900 { 2901 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMaxResume);2899 if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops)) 2900 { 2901 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume); 2902 2902 rc = VINF_EM_RAW_INTERRUPT; 2903 2903 goto end; … … 2947 2947 * Check for pending actions that force us to go back to ring-3. 2948 2948 */ 2949 if ( VM_FF_ISPENDING(pVM, VM_FF_H WACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)2950 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_H WACCM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))2949 if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA) 2950 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST)) 2951 2951 { 2952 2952 /* Check if a sync operation is pending. */ … … 2967 2967 #endif 2968 2968 { 2969 if ( VM_FF_ISPENDING(pVM, VM_FF_H WACCM_TO_R3_MASK)2970 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_H WACCM_TO_R3_MASK))2969 if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK) 2970 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 2971 2971 { 2972 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatSwitchToR3);2972 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchToR3); 2973 2973 rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 2974 2974 goto end; … … 3013 3013 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 3014 3014 { 3015 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitPreemptPending);3015 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending); 3016 3016 rc = VINF_EM_RAW_INTERRUPT; 3017 3017 goto end; … … 3045 3045 AssertRC(rc2); 3046 3046 /* The TPR can be found at offset 0x80 in the APIC mmio page. */ 3047 pVCpu->h waccm.s.vmx.pbVAPIC[0x80] = u8LastTPR;3047 pVCpu->hm.s.vmx.pbVAPIC[0x80] = u8LastTPR; 3048 3048 3049 3049 /* … … 3059 3059 AssertRC(VBOXSTRICTRC_VAL(rc)); 3060 3060 3061 if (pVM->h waccm.s.fTPRPatchingActive)3061 if (pVM->hm.s.fTPRPatchingActive) 3062 3062 { 3063 3063 Assert(!CPUMIsGuestInLongModeEx(pCtx)); … … 3083 3083 3084 3084 #ifdef LOG_ENABLED 3085 if ( pVM->h waccm.s.fNestedPaging3086 || pVM->h waccm.s.vmx.fVPID)3087 { 3088 PHMGLOBLCPUINFO pCpu = H WACCMR0GetCurrentCpu();3089 if (pVCpu->h waccm.s.idLastCpu != pCpu->idCpu)3090 { 3091 LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->h waccm.s.idLastCpu,3085 if ( pVM->hm.s.fNestedPaging 3086 || pVM->hm.s.vmx.fVPID) 3087 { 3088 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 3089 if (pVCpu->hm.s.idLastCpu != pCpu->idCpu) 3090 { 3091 LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, 3092 3092 pCpu->idCpu)); 3093 3093 } 3094 else if (pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)3095 { 3096 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->h waccm.s.cTLBFlushes,3094 else if (pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 3095 { 3096 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTLBFlushes, 3097 3097 pCpu->cTLBFlushes)); 3098 3098 } … … 3119 3119 * Save the host state first. 3120 3120 */ 3121 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)3121 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT) 3122 3122 { 3123 3123 rc = VMXR0SaveHostState(pVM, pVCpu); … … 3132 3132 * Load the guest state. 3133 3133 */ 3134 if (!pVCpu->h waccm.s.fContextUseFlags)3134 if (!pVCpu->hm.s.fContextUseFlags) 3135 3135 { 3136 3136 VMXR0LoadMinimalGuestState(pVM, pVCpu, pCtx); 3137 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatLoadMinimal);3137 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal); 3138 3138 } 3139 3139 else … … 3145 3145 goto end; 3146 3146 } 3147 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatLoadFull);3147 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); 3148 3148 } 3149 3149 … … 3163 3163 3164 3164 /* Set TLB flush state as checked until we return from the world switch. */ 3165 ASMAtomicWriteBool(&pVCpu->h waccm.s.fCheckedTLBFlush, true);3165 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); 3166 3166 /* Deal with tagged TLB setup and invalidation. */ 3167 pVM->h waccm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu);3167 pVM->hm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu); 3168 3168 3169 3169 /* … … 3180 3180 3181 3181 /* All done! Let's start VM execution. */ 3182 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x);3182 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); 3183 3183 Assert(idCpuCheck == RTMpCpuId()); 3184 3184 3185 3185 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 3186 pVCpu->h waccm.s.vmx.VMCSCache.cResume = cResume;3187 pVCpu->h waccm.s.vmx.VMCSCache.u64TimeSwitch = RTTimeNanoTS();3186 pVCpu->hm.s.vmx.VMCSCache.cResume = cResume; 3187 pVCpu->hm.s.vmx.VMCSCache.u64TimeSwitch = RTTimeNanoTS(); 3188 3188 #endif 3189 3189 … … 3191 3191 * Save the current TPR value in the LSTAR MSR so our patches can access it. 3192 3192 */ 3193 if (pVM->h waccm.s.fTPRPatchingActive)3194 { 3195 Assert(pVM->h waccm.s.fTPRPatchingActive);3193 if (pVM->hm.s.fTPRPatchingActive) 3194 { 3195 Assert(pVM->hm.s.fTPRPatchingActive); 3196 3196 u64OldLSTAR = ASMRdMsr(MSR_K8_LSTAR); 3197 3197 ASMWrMsr(MSR_K8_LSTAR, u8LastTPR); … … 3205 3205 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}. 3206 3206 */ 3207 if ( (pVCpu->h waccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)3208 && !(pVCpu->h waccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))3209 { 3210 pVCpu->h waccm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);3207 if ( (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 3208 && !(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) 3209 { 3210 pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX); 3211 3211 uint64_t u64GuestTSCAux = 0; 3212 3212 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux); … … 3217 3217 3218 3218 #ifdef VBOX_WITH_KERNEL_USING_XMM 3219 rc = h waccmR0VMXStartVMWrapXMM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hwaccm.s.vmx.pfnStartVM);3219 rc = hmR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM); 3220 3220 #else 3221 rc = pVCpu->h waccm.s.vmx.pfnStartVM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu);3222 #endif 3223 ASMAtomicWriteBool(&pVCpu->h waccm.s.fCheckedTLBFlush, false);3224 ASMAtomicIncU32(&pVCpu->h waccm.s.cWorldSwitchExits);3221 rc = pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu); 3222 #endif 3223 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); 3224 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); 3225 3225 3226 3226 /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */ 3227 if (!(pVCpu->h waccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))3227 if (!(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) 3228 3228 { 3229 3229 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 3230 3230 /* Restore host's TSC_AUX. */ 3231 if (pVCpu->h waccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)3232 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->h waccm.s.u64HostTSCAux);3231 if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 3232 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux); 3233 3233 #endif 3234 3234 3235 3235 TMCpuTickSetLastSeen(pVCpu, 3236 ASMReadTSC() + pVCpu->h waccm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);3236 ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */); 3237 3237 } 3238 3238 … … 3244 3244 * Restore the host LSTAR MSR if the guest could have changed it. 3245 3245 */ 3246 if (pVM->h waccm.s.fTPRPatchingActive)3247 { 3248 Assert(pVM->h waccm.s.fTPRPatchingActive);3249 pVCpu->h waccm.s.vmx.pbVAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);3246 if (pVM->hm.s.fTPRPatchingActive) 3247 { 3248 Assert(pVM->hm.s.fTPRPatchingActive); 3249 pVCpu->hm.s.vmx.pbVAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); 3250 3250 ASMWrMsr(MSR_K8_LSTAR, u64OldLSTAR); 3251 3251 } 3252 3252 3253 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatInGC, &pVCpu->hwaccm.s.StatExit1, x);3253 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); 3254 3254 ASMSetFlags(uOldEFlags); 3255 3255 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION … … 3257 3257 #endif 3258 3258 3259 AssertMsg(!pVCpu->h waccm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries=%d\n",3260 pVCpu->h waccm.s.vmx.VMCSCache.Write.cValidEntries));3259 AssertMsg(!pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries=%d\n", 3260 pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries)); 3261 3261 3262 3262 /* In case we execute a goto ResumeExecution later on. */ 3263 pVCpu->h waccm.s.fResumeVM = true;3264 pVCpu->h waccm.s.fForceTLBFlush = false;3263 pVCpu->hm.s.fResumeVM = true; 3264 pVCpu->hm.s.fForceTLBFlush = false; 3265 3265 3266 3266 /* … … 3281 3281 /* Investigate why there was a VM-exit. */ 3282 3282 rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason); 3283 STAM_COUNTER_INC(&pVCpu->h waccm.s.paStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]);3283 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]); 3284 3284 3285 3285 exitReason &= 0xffff; /* bit 0-15 contain the exit code. */ … … 3311 3311 rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_IDT_INFO, &val); 3312 3312 AssertRC(rc2); 3313 pVCpu->h waccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);3314 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->h waccm.s.Event.intInfo)3313 pVCpu->hm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val); 3314 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hm.s.Event.intInfo) 3315 3315 /* Ignore 'int xx' as they'll be restarted anyway. */ 3316 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->h waccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW3316 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW 3317 3317 /* Ignore software exceptions (such as int3) as they'll reoccur when we restart the instruction anyway. */ 3318 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->h waccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)3319 { 3320 Assert(!pVCpu->h waccm.s.Event.fPending);3321 pVCpu->h waccm.s.Event.fPending = true;3318 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT) 3319 { 3320 Assert(!pVCpu->hm.s.Event.fPending); 3321 pVCpu->hm.s.Event.fPending = true; 3322 3322 /* Error code present? */ 3323 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->h waccm.s.Event.intInfo))3323 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.intInfo)) 3324 3324 { 3325 3325 rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_IDT_ERRCODE, &val); 3326 3326 AssertRC(rc2); 3327 pVCpu->h waccm.s.Event.errCode = val;3327 pVCpu->hm.s.Event.errCode = val; 3328 3328 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv pending error=%RX64\n", 3329 pVCpu->h waccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));3329 pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val)); 3330 3330 } 3331 3331 else 3332 3332 { 3333 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->h waccm.s.Event.intInfo,3333 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hm.s.Event.intInfo, 3334 3334 (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification)); 3335 pVCpu->h waccm.s.Event.errCode = 0;3335 pVCpu->hm.s.Event.errCode = 0; 3336 3336 } 3337 3337 } 3338 3338 #ifdef VBOX_STRICT 3339 else if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->h waccm.s.Event.intInfo)3339 else if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hm.s.Event.intInfo) 3340 3340 /* Ignore software exceptions (such as int3) as they're reoccur when we restart the instruction anyway. */ 3341 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->h waccm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)3341 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT) 3342 3342 { 3343 3343 Log(("Ignore pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", 3344 pVCpu->h waccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));3344 pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification)); 3345 3345 } 3346 3346 3347 3347 if (exitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE) 3348 H WACCMDumpRegs(pVM, pVCpu, pCtx);3348 HMDumpRegs(pVM, pVCpu, pCtx); 3349 3349 #endif 3350 3350 … … 3359 3359 */ 3360 3360 if ( fSetupTPRCaching 3361 && u8LastTPR != pVCpu->h waccm.s.vmx.pbVAPIC[0x80])3362 { 3363 rc2 = PDMApicSetTPR(pVCpu, pVCpu->h waccm.s.vmx.pbVAPIC[0x80]);3361 && u8LastTPR != pVCpu->hm.s.vmx.pbVAPIC[0x80]) 3362 { 3363 rc2 = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVAPIC[0x80]); 3364 3364 AssertRC(rc2); 3365 3365 } … … 3369 3369 exitReason, (uint64_t)exitQualification, pCtx->cs.Sel, pCtx->rip, (uint64_t)intInfo); 3370 3370 #endif 3371 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatExit1, &pVCpu->hwaccm.s.StatExit2, x);3371 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); 3372 3372 3373 3373 /* Some cases don't need a complete resync of the guest CPU state; handle them here. */ … … 3392 3392 break; 3393 3393 } 3394 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatExit2Sub3, y3);3394 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub3, y3); 3395 3395 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo)) 3396 3396 { … … 3423 3423 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 3424 3424 3425 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowNM);3425 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 3426 3426 3427 3427 /* Continue execution. */ 3428 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;3429 3430 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3428 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 3429 3430 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3431 3431 goto ResumeExecution; 3432 3432 } 3433 3433 3434 3434 Log(("Forward #NM fault to the guest\n")); 3435 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestNM);3435 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM); 3436 3436 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3437 3437 cbInstr, 0); 3438 3438 AssertRC(rc2); 3439 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3439 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3440 3440 goto ResumeExecution; 3441 3441 } … … 3444 3444 { 3445 3445 #ifdef VBOX_ALWAYS_TRAP_PF 3446 if (pVM->h waccm.s.fNestedPaging)3446 if (pVM->hm.s.fNestedPaging) 3447 3447 { 3448 3448 /* … … 3454 3454 Assert(CPUMIsGuestInPagedProtectedModeEx(pCtx)); 3455 3455 3456 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestPF);3456 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 3457 3457 3458 3458 /* Now we must update CR2. */ … … 3462 3462 AssertRC(rc2); 3463 3463 3464 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3464 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3465 3465 goto ResumeExecution; 3466 3466 } 3467 3467 #else 3468 Assert(!pVM->h waccm.s.fNestedPaging);3469 #endif 3470 3471 #ifdef VBOX_H WACCM_WITH_GUEST_PATCHING3468 Assert(!pVM->hm.s.fNestedPaging); 3469 #endif 3470 3471 #ifdef VBOX_HM_WITH_GUEST_PATCHING 3472 3472 /* Shortcut for APIC TPR reads and writes; 32 bits guests only */ 3473 if ( pVM->h waccm.s.fTRPPatchingAllowed3474 && pVM->h waccm.s.pGuestPatchMem3473 if ( pVM->hm.s.fTRPPatchingAllowed 3474 && pVM->hm.s.pGuestPatchMem 3475 3475 && (exitQualification & 0xfff) == 0x080 3476 3476 && !(errCode & X86_TRAP_PF_P) /* not present */ 3477 3477 && CPUMGetGuestCPL(pVCpu) == 0 3478 3478 && !CPUMIsGuestInLongModeEx(pCtx) 3479 && pVM->h waccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))3479 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches)) 3480 3480 { 3481 3481 RTGCPHYS GCPhysApicBase, GCPhys; … … 3488 3488 { 3489 3489 /* Only attempt to patch the instruction once. */ 3490 PH WACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);3490 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 3491 3491 if (!pPatch) 3492 3492 { 3493 rc = VINF_EM_H WACCM_PATCH_TPR_INSTR;3493 rc = VINF_EM_HM_PATCH_TPR_INSTR; 3494 3494 break; 3495 3495 } … … 3508 3508 && !(errCode & X86_TRAP_PF_P) /* not present */ 3509 3509 && fSetupTPRCaching 3510 && (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))3510 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) 3511 3511 { 3512 3512 RTGCPHYS GCPhysApicBase, GCPhys; … … 3519 3519 { 3520 3520 Log(("Enable VT-x virtual APIC access filtering\n")); 3521 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->h waccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);3521 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P); 3522 3522 AssertRC(rc2); 3523 3523 } … … 3531 3531 { /* We've successfully synced our shadow pages, so let's just continue execution. */ 3532 3532 Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, exitQualification ,errCode)); 3533 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowPF);3533 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); 3534 3534 3535 3535 TRPMResetTrap(pVCpu); 3536 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3536 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3537 3537 goto ResumeExecution; 3538 3538 } … … 3544 3544 Log2(("Forward page fault to the guest\n")); 3545 3545 3546 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestPF);3546 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 3547 3547 /* The error code might have been changed. */ 3548 3548 errCode = TRPMGetErrorCode(pVCpu); … … 3556 3556 AssertRC(rc2); 3557 3557 3558 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3558 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3559 3559 goto ResumeExecution; 3560 3560 } … … 3564 3564 #endif 3565 3565 /* Need to go back to the recompiler to emulate the instruction. */ 3566 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowPFEM);3566 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM); 3567 3567 TRPMResetTrap(pVCpu); 3568 3568 break; … … 3571 3571 case X86_XCPT_MF: /* Floating point exception. */ 3572 3572 { 3573 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestMF);3573 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); 3574 3574 if (!(pCtx->cr0 & X86_CR0_NE)) 3575 3575 { … … 3584 3584 AssertRC(rc2); 3585 3585 3586 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3586 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3587 3587 goto ResumeExecution; 3588 3588 } … … 3602 3602 * 63:15 Reserved (0) 3603 3603 */ 3604 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestDB);3604 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 3605 3605 3606 3606 /* Note that we don't support guest and host-initiated debugging at the same time. */ … … 3636 3636 AssertRC(rc2); 3637 3637 3638 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3638 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3639 3639 goto ResumeExecution; 3640 3640 } … … 3646 3646 case X86_XCPT_BP: /* Breakpoint. */ 3647 3647 { 3648 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestBP);3648 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); 3649 3649 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 3650 3650 if (rc == VINF_EM_RAW_GUEST_TRAP) … … 3654 3654 cbInstr, errCode); 3655 3655 AssertRC(rc2); 3656 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3656 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3657 3657 goto ResumeExecution; 3658 3658 } 3659 3659 if (rc == VINF_SUCCESS) 3660 3660 { 3661 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3661 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3662 3662 goto ResumeExecution; 3663 3663 } … … 3669 3669 { 3670 3670 uint32_t cbOp; 3671 PDISCPUSTATE pDis = &pVCpu->h waccm.s.DisState;3672 3673 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestGP);3671 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 3672 3673 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 3674 3674 #ifdef VBOX_STRICT 3675 3675 if ( !CPUMIsGuestInRealModeEx(pCtx) 3676 || !pVM->h waccm.s.vmx.pRealModeTSS)3676 || !pVM->hm.s.vmx.pRealModeTSS) 3677 3677 { 3678 3678 Log(("Trap %x at %04X:%RGv errorCode=%RGv\n", vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, errCode)); … … 3680 3680 cbInstr, errCode); 3681 3681 AssertRC(rc2); 3682 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3682 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3683 3683 goto ResumeExecution; 3684 3684 } … … 3699 3699 case OP_CLI: 3700 3700 pCtx->eflags.Bits.u1IF = 0; 3701 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCli);3701 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli); 3702 3702 break; 3703 3703 … … 3709 3709 VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 3710 3710 AssertRC(rc2); 3711 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitSti);3711 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 3712 3712 break; 3713 3713 … … 3716 3716 rc = VINF_EM_HALT; 3717 3717 pCtx->rip += pDis->cbInstr; 3718 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitHlt);3718 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 3719 3719 break; 3720 3720 … … 3758 3758 pCtx->esp &= uMask; 3759 3759 3760 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitPopf);3760 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf); 3761 3761 break; 3762 3762 } … … 3801 3801 pCtx->esp -= cbParm; 3802 3802 pCtx->esp &= uMask; 3803 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitPushf);3803 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); 3804 3804 break; 3805 3805 } … … 3839 3839 LogFlow(("iret to %04x:%x\n", pCtx->cs.Sel, pCtx->ip)); 3840 3840 fUpdateRIP = false; 3841 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIret);3841 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); 3842 3842 break; 3843 3843 } … … 3855 3855 AssertRC(VBOXSTRICTRC_VAL(rc)); 3856 3856 fUpdateRIP = false; 3857 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInt);3857 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 3858 3858 break; 3859 3859 } … … 3873 3873 AssertRC(VBOXSTRICTRC_VAL(rc)); 3874 3874 fUpdateRIP = false; 3875 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInt);3875 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 3876 3876 } 3877 3877 break; … … 3890 3890 AssertRC(VBOXSTRICTRC_VAL(rc)); 3891 3891 fUpdateRIP = false; 3892 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInt);3892 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 3893 3893 break; 3894 3894 } … … 3909 3909 * whole context to be done with it. 3910 3910 */ 3911 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;3911 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL; 3912 3912 3913 3913 /* Only resume if successful. */ 3914 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3914 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3915 3915 goto ResumeExecution; 3916 3916 } … … 3933 3933 switch (vector) 3934 3934 { 3935 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestDE); break;3936 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestUD); break;3937 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestSS); break;3938 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestNP); break;3939 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestXF); break;3935 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); break; 3936 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); break; 3937 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); break; 3938 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); break; 3939 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); break; 3940 3940 } 3941 3941 … … 3945 3945 AssertRC(rc2); 3946 3946 3947 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3947 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3948 3948 goto ResumeExecution; 3949 3949 } 3950 3950 #endif 3951 3951 default: 3952 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestXcpUnk);3952 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk); 3953 3953 if ( CPUMIsGuestInRealModeEx(pCtx) 3954 && pVM->h waccm.s.vmx.pRealModeTSS)3954 && pVM->hm.s.vmx.pRealModeTSS) 3955 3955 { 3956 3956 Log(("Real Mode Trap %x at %04x:%04X error code %x\n", vector, pCtx->cs.Sel, pCtx->eip, errCode)); … … 3966 3966 } 3967 3967 3968 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3968 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3969 3969 goto ResumeExecution; 3970 3970 } … … 3982 3982 } 3983 3983 3984 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3984 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3985 3985 break; 3986 3986 } … … 3994 3994 RTGCPHYS GCPhys; 3995 3995 3996 Assert(pVM->h waccm.s.fNestedPaging);3996 Assert(pVM->hm.s.fNestedPaging); 3997 3997 3998 3998 rc2 = VMXReadVMCS64(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys); … … 4017 4017 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */ 4018 4018 && fSetupTPRCaching 4019 && (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))4019 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) 4020 4020 { 4021 4021 RTGCPHYS GCPhysApicBase; … … 4025 4025 { 4026 4026 Log(("Enable VT-x virtual APIC access filtering\n")); 4027 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->h waccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);4027 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P); 4028 4028 AssertRC(rc2); 4029 4029 } … … 4049 4049 /* We've successfully synced our shadow pages, so let's just continue execution. */ 4050 4050 Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, exitQualification , errCode)); 4051 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitReasonNPF);4051 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNPF); 4052 4052 4053 4053 TRPMResetTrap(pVCpu); … … 4068 4068 RTGCPHYS GCPhys; 4069 4069 4070 Assert(pVM->h waccm.s.fNestedPaging);4070 Assert(pVM->hm.s.fNestedPaging); 4071 4071 4072 4072 rc2 = VMXReadVMCS64(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys); … … 4078 4078 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */ 4079 4079 && fSetupTPRCaching 4080 && (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))4080 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) 4081 4081 { 4082 4082 RTGCPHYS GCPhysApicBase; … … 4086 4086 { 4087 4087 Log(("Enable VT-x virtual APIC access filtering\n")); 4088 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->h waccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);4088 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P); 4089 4089 AssertRC(rc2); 4090 4090 } … … 4116 4116 LogFlow(("VMX_EXIT_IRQ_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip, 4117 4117 VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF)); 4118 pVCpu->h waccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;4119 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);4118 pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT; 4119 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 4120 4120 AssertRC(rc2); 4121 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIrqWindow);4121 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIrqWindow); 4122 4122 goto ResumeExecution; /* we check for pending guest interrupts there */ 4123 4123 4124 4124 case VMX_EXIT_WBINVD: /* 54 Guest software attempted to execute WBINVD. (conditional) */ 4125 4125 case VMX_EXIT_INVD: /* 13 Guest software attempted to execute INVD. (unconditional) */ 4126 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInvd);4126 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd); 4127 4127 /* Skip instruction and continue directly. */ 4128 4128 pCtx->rip += cbInstr; … … 4133 4133 { 4134 4134 Log2(("VMX: Cpuid %x\n", pCtx->eax)); 4135 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCpuid);4135 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid); 4136 4136 rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 4137 4137 if (rc == VINF_SUCCESS) … … 4150 4150 { 4151 4151 Log2(("VMX: Rdpmc %x\n", pCtx->ecx)); 4152 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdpmc);4152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc); 4153 4153 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 4154 4154 if (rc == VINF_SUCCESS) … … 4166 4166 { 4167 4167 Log2(("VMX: Rdtsc\n")); 4168 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdtsc);4168 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc); 4169 4169 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 4170 4170 if (rc == VINF_SUCCESS) … … 4182 4182 { 4183 4183 Log2(("VMX: Rdtscp\n")); 4184 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdtscp);4184 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp); 4185 4185 rc = EMInterpretRdtscp(pVM, pVCpu, pCtx); 4186 4186 if (rc == VINF_SUCCESS) … … 4198 4198 { 4199 4199 Log2(("VMX: invlpg\n")); 4200 Assert(!pVM->h waccm.s.fNestedPaging);4201 4202 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInvlpg);4200 Assert(!pVM->hm.s.fNestedPaging); 4201 4202 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg); 4203 4203 rc = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx), exitQualification); 4204 4204 if (rc == VINF_SUCCESS) … … 4216 4216 Log2(("VMX: monitor\n")); 4217 4217 4218 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMonitor);4218 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor); 4219 4219 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 4220 4220 if (rc == VINF_SUCCESS) … … 4230 4230 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */ 4231 4231 /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */ 4232 if ( pVM->h waccm.s.fTPRPatchingActive4232 if ( pVM->hm.s.fTPRPatchingActive 4233 4233 && pCtx->ecx == MSR_K8_LSTAR) 4234 4234 { … … 4249 4249 goto ResumeExecution; 4250 4250 } 4251 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_MSR;4251 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_MSR; 4252 4252 /* no break */ 4253 4253 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */ 4254 4254 { 4255 STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->h waccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr);4255 STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr); 4256 4256 4257 4257 /* … … 4274 4274 case VMX_EXIT_CRX_MOVE: /* 28 Control-register accesses. */ 4275 4275 { 4276 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatExit2Sub2, y2);4276 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub2, y2); 4277 4277 4278 4278 switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification)) … … 4281 4281 { 4282 4282 Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))); 4283 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);4283 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]); 4284 4284 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 4285 4285 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification), … … 4288 4288 { 4289 4289 case 0: 4290 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;4290 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3; 4291 4291 break; 4292 4292 case 2: 4293 4293 break; 4294 4294 case 3: 4295 Assert(!pVM->h waccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));4296 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;4295 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx)); 4296 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3; 4297 4297 break; 4298 4298 case 4: 4299 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;4299 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4; 4300 4300 break; 4301 4301 case 8: 4302 4302 /* CR8 contains the APIC TPR */ 4303 Assert(!(pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed14303 Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 4304 4304 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)); 4305 4305 break; … … 4315 4315 { 4316 4316 Log2(("VMX: mov x, crx\n")); 4317 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);4318 4319 Assert( !pVM->h waccm.s.fNestedPaging4317 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]); 4318 4319 Assert( !pVM->hm.s.fNestedPaging 4320 4320 || !CPUMIsGuestInPagedProtectedModeEx(pCtx) 4321 4321 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != DISCREG_CR3); … … 4323 4323 /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */ 4324 4324 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8 4325 || !(pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));4325 || !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)); 4326 4326 4327 4327 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), … … 4334 4334 { 4335 4335 Log2(("VMX: clts\n")); 4336 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCLTS);4336 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCLTS); 4337 4337 rc = EMInterpretCLTS(pVM, pVCpu); 4338 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;4338 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 4339 4339 break; 4340 4340 } … … 4343 4343 { 4344 4344 Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification))); 4345 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitLMSW);4345 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLMSW); 4346 4346 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)); 4347 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;4347 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 4348 4348 break; 4349 4349 } … … 4357 4357 { 4358 4358 /* Only resume if successful. */ 4359 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub2, y2);4359 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub2, y2); 4360 4360 goto ResumeExecution; 4361 4361 } 4362 4362 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3); 4363 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub2, y2);4363 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub2, y2); 4364 4364 break; 4365 4365 } … … 4371 4371 { 4372 4372 /* Disable DRx move intercepts. */ 4373 pVCpu->h waccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;4374 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);4373 pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT; 4374 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 4375 4375 AssertRC(rc2); 4376 4376 … … 4390 4390 4391 4391 #ifdef VBOX_WITH_STATISTICS 4392 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxContextSwitch);4392 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch); 4393 4393 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE) 4394 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxWrite);4394 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 4395 4395 else 4396 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxRead);4396 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 4397 4397 #endif 4398 4398 … … 4406 4406 Log2(("VMX: mov DRx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), 4407 4407 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification))); 4408 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxWrite);4408 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 4409 4409 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 4410 4410 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), 4411 4411 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)); 4412 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;4412 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 4413 4413 Log2(("DR7=%08x\n", pCtx->dr[7])); 4414 4414 } … … 4416 4416 { 4417 4417 Log2(("VMX: mov x, DRx\n")); 4418 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxRead);4418 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 4419 4419 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), 4420 4420 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification), … … 4437 4437 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */ 4438 4438 { 4439 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatExit2Sub1, y1);4439 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub1, y1); 4440 4440 uint32_t uPort; 4441 4441 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(exitQualification); … … 4451 4451 { 4452 4452 rc = fIOWrite ? VINF_IOM_R3_IOPORT_WRITE : VINF_IOM_R3_IOPORT_READ; 4453 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub1, y1);4453 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1); 4454 4454 break; 4455 4455 } … … 4459 4459 { 4460 4460 /* ins/outs */ 4461 PDISCPUSTATE pDis = &pVCpu->h waccm.s.DisState;4461 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 4462 4462 4463 4463 /* Disassemble manually to deal with segment prefixes. */ … … 4470 4470 { 4471 4471 Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize)); 4472 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOStringWrite);4472 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite); 4473 4473 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), uPort, pDis->fPrefix, (DISCPUMODE)pDis->uAddrMode, cbSize); 4474 4474 } … … 4476 4476 { 4477 4477 Log2(("IOMInterpretINSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize)); 4478 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOStringRead);4478 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead); 4479 4479 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), uPort, pDis->fPrefix, (DISCPUMODE)pDis->uAddrMode, cbSize); 4480 4480 } … … 4492 4492 if (fIOWrite) 4493 4493 { 4494 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOWrite);4494 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite); 4495 4495 rc = IOMIOPortWrite(pVM, uPort, pCtx->eax & uAndVal, cbSize); 4496 4496 if (rc == VINF_IOM_R3_IOPORT_WRITE) 4497 H WACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);4497 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize); 4498 4498 } 4499 4499 else … … 4501 4501 uint32_t u32Val = 0; 4502 4502 4503 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIORead);4503 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); 4504 4504 rc = IOMIOPortRead(pVM, uPort, &u32Val, cbSize); 4505 4505 if (IOM_SUCCESS(rc)) … … 4510 4510 else 4511 4511 if (rc == VINF_IOM_R3_IOPORT_READ) 4512 H WACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);4512 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize); 4513 4513 } 4514 4514 } … … 4527 4527 if (pCtx->dr[7] & X86_DR7_ENABLED_MASK) 4528 4528 { 4529 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxIOCheck);4529 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIOCheck); 4530 4530 for (unsigned i = 0; i < 4; i++) 4531 4531 { … … 4575 4575 AssertRC(rc2); 4576 4576 4577 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub1, y1);4577 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1); 4578 4578 goto ResumeExecution; 4579 4579 } 4580 4580 } 4581 4581 } 4582 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub1, y1);4582 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1); 4583 4583 goto ResumeExecution; 4584 4584 } 4585 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub1, y1);4585 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1); 4586 4586 break; 4587 4587 } … … 4600 4600 } 4601 4601 #endif 4602 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub1, y1);4602 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1); 4603 4603 break; 4604 4604 } … … 4686 4686 Log(("VMX_EXIT_TASK_SWITCH: exit=%RX64\n", exitQualification)); 4687 4687 if ( (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(exitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT) 4688 && pVCpu->h waccm.s.Event.fPending)4688 && pVCpu->hm.s.Event.fPending) 4689 4689 { 4690 4690 /* Caused by an injected interrupt. */ 4691 pVCpu->h waccm.s.Event.fPending = false;4692 4693 Log(("VMX_EXIT_TASK_SWITCH: reassert trap %d\n", VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->h waccm.s.Event.intInfo)));4694 Assert(!VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->h waccm.s.Event.intInfo));4695 rc2 = TRPMAssertTrap(pVCpu, VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->h waccm.s.Event.intInfo), TRPM_HARDWARE_INT);4691 pVCpu->hm.s.Event.fPending = false; 4692 4693 Log(("VMX_EXIT_TASK_SWITCH: reassert trap %d\n", VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.intInfo))); 4694 Assert(!VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.intInfo)); 4695 rc2 = TRPMAssertTrap(pVCpu, VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.intInfo), TRPM_HARDWARE_INT); 4696 4696 AssertRC(rc2); 4697 4697 } … … 4702 4702 case VMX_EXIT_HLT: /* 12 Guest software attempted to execute HLT. */ 4703 4703 /* Check if external interrupts are pending; if so, don't switch back. */ 4704 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitHlt);4704 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 4705 4705 pCtx->rip++; /* skip hlt */ 4706 4706 if (EMShouldContinueAfterHalt(pVCpu, pCtx)) … … 4712 4712 case VMX_EXIT_MWAIT: /* 36 Guest software executed MWAIT. */ 4713 4713 Log2(("VMX: mwait\n")); 4714 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMwait);4714 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait); 4715 4715 rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 4716 4716 if ( rc == VINF_EM_HALT … … 4737 4737 case VMX_EXIT_MTF: /* 37 Exit due to Monitor Trap Flag. */ 4738 4738 LogFlow(("VMX_EXIT_MTF at %RGv\n", (RTGCPTR)pCtx->rip)); 4739 pVCpu->h waccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;4740 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);4739 pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG; 4740 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 4741 4741 AssertRC(rc2); 4742 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMTF);4742 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMTF); 4743 4743 #if 0 4744 4744 DBGFDoneStepping(pVCpu); … … 4872 4872 && !VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo)) 4873 4873 { 4874 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatPendingHostIrq);4874 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 4875 4875 /* On the next entry we'll only sync the host context. */ 4876 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;4876 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT; 4877 4877 } 4878 4878 else … … 4881 4881 /** @todo we can do better than this */ 4882 4882 /* Not in the VINF_PGM_CHANGE_MODE though! */ 4883 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;4883 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL; 4884 4884 } 4885 4885 … … 4890 4890 { 4891 4891 /* Try to extract more information about what might have gone wrong here. */ 4892 VMXGetActivateVMCS(&pVCpu->h waccm.s.vmx.lasterror.u64VMCSPhys);4893 pVCpu->h waccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS;4894 pVCpu->h waccm.s.vmx.lasterror.idEnteredCpu = pVCpu->hwaccm.s.idEnteredCpu;4895 pVCpu->h waccm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId();4892 VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys); 4893 pVCpu->hm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVMCS; 4894 pVCpu->hm.s.vmx.lasterror.idEnteredCpu = pVCpu->hm.s.idEnteredCpu; 4895 pVCpu->hm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId(); 4896 4896 } 4897 4897 … … 4905 4905 #endif 4906 4906 4907 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2, x);4908 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit1, x);4909 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatEntry, x);4907 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); 4908 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x); 4909 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 4910 4910 Log2(("X")); 4911 4911 return VBOXSTRICTRC_TODO(rc); … … 4923 4923 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu) 4924 4924 { 4925 Assert(pVM->h waccm.s.vmx.fSupported);4925 Assert(pVM->hm.s.vmx.fSupported); 4926 4926 NOREF(pCpu); 4927 4927 … … 4934 4934 4935 4935 /* Activate the VMCS. */ 4936 int rc = VMXActivateVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);4936 int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 4937 4937 if (RT_FAILURE(rc)) 4938 4938 return rc; 4939 4939 4940 pVCpu->h waccm.s.fResumeVM = false;4940 pVCpu->hm.s.fResumeVM = false; 4941 4941 return VINF_SUCCESS; 4942 4942 } … … 4953 4953 VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 4954 4954 { 4955 Assert(pVM->h waccm.s.vmx.fSupported);4955 Assert(pVM->hm.s.vmx.fSupported); 4956 4956 4957 4957 #ifdef DEBUG … … 4959 4959 { 4960 4960 CPUMR0LoadHostDebugState(pVM, pVCpu); 4961 Assert(pVCpu->h waccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);4961 Assert(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT); 4962 4962 } 4963 4963 else … … 4972 4972 4973 4973 /* Enable DRx move intercepts again. */ 4974 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;4975 int rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);4974 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT; 4975 int rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 4976 4976 AssertRC(rc); 4977 4977 4978 4978 /* Resync the debug registers the next time. */ 4979 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;4979 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 4980 4980 } 4981 4981 else 4982 Assert(pVCpu->h waccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);4982 Assert(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT); 4983 4983 4984 4984 /* … … 4986 4986 * VMCS data back to memory. 4987 4987 */ 4988 int rc = VMXClearVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);4988 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 4989 4989 AssertRC(rc); 4990 4990 … … 5006 5006 5007 5007 LogFlow(("hmR0VmxFlushEPT %d\n", enmFlush)); 5008 Assert(pVM->h waccm.s.fNestedPaging);5009 descriptor[0] = pVCpu->h waccm.s.vmx.GCPhysEPTP;5008 Assert(pVM->hm.s.fNestedPaging); 5009 descriptor[0] = pVCpu->hm.s.vmx.GCPhysEPTP; 5010 5010 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 VMX Instructions */ 5011 5011 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]); 5012 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %x %RGv failed with %d\n", enmFlush, pVCpu->h waccm.s.vmx.GCPhysEPTP, rc));5012 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %x %RGv failed with %d\n", enmFlush, pVCpu->hm.s.vmx.GCPhysEPTP, rc)); 5013 5013 } 5014 5014 … … 5029 5029 uint64_t descriptor[2]; 5030 5030 5031 Assert(pVM->h waccm.s.vmx.fVPID);5031 Assert(pVM->hm.s.vmx.fVPID); 5032 5032 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS) 5033 5033 { … … 5038 5038 { 5039 5039 AssertPtr(pVCpu); 5040 AssertMsg(pVCpu->h waccm.s.uCurrentASID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hwaccm.s.uCurrentASID));5041 AssertMsg(pVCpu->h waccm.s.uCurrentASID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hwaccm.s.uCurrentASID));5042 descriptor[0] = pVCpu->h waccm.s.uCurrentASID;5040 AssertMsg(pVCpu->hm.s.uCurrentASID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID)); 5041 AssertMsg(pVCpu->hm.s.uCurrentASID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID)); 5042 descriptor[0] = pVCpu->hm.s.uCurrentASID; 5043 5043 descriptor[1] = GCPtr; 5044 5044 } 5045 5045 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc); 5046 5046 AssertMsg(rc == VINF_SUCCESS, 5047 ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->h waccm.s.uCurrentASID : 0, GCPtr, rc));5047 ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentASID : 0, GCPtr, rc)); 5048 5048 } 5049 5049 … … 5073 5073 * function maybe called in a loop with individual addresses. 5074 5074 */ 5075 if (pVM->h waccm.s.vmx.fVPID)5075 if (pVM->hm.s.vmx.fVPID) 5076 5076 { 5077 5077 /* If we can flush just this page do it, otherwise flush as little as possible. */ 5078 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)5078 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR) 5079 5079 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt); 5080 5080 else 5081 5081 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 5082 5082 } 5083 else if (pVM->h waccm.s.fNestedPaging)5083 else if (pVM->hm.s.fNestedPaging) 5084 5084 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 5085 5085 } … … 5147 5147 Log(("Current stack %08x\n", &rc2)); 5148 5148 5149 pVCpu->h waccm.s.vmx.lasterror.ulInstrError = instrError;5150 pVCpu->h waccm.s.vmx.lasterror.ulExitReason = exitReason;5149 pVCpu->hm.s.vmx.lasterror.ulInstrError = instrError; 5150 pVCpu->hm.s.vmx.lasterror.ulExitReason = exitReason; 5151 5151 5152 5152 #ifdef VBOX_STRICT … … 5183 5183 { 5184 5184 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5185 H WACCMR0DumpDescriptor(pDesc, val, "CS: ");5185 HMR0DumpDescriptor(pDesc, val, "CS: "); 5186 5186 } 5187 5187 … … 5191 5191 { 5192 5192 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5193 H WACCMR0DumpDescriptor(pDesc, val, "DS: ");5193 HMR0DumpDescriptor(pDesc, val, "DS: "); 5194 5194 } 5195 5195 … … 5199 5199 { 5200 5200 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5201 H WACCMR0DumpDescriptor(pDesc, val, "ES: ");5201 HMR0DumpDescriptor(pDesc, val, "ES: "); 5202 5202 } 5203 5203 … … 5207 5207 { 5208 5208 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5209 H WACCMR0DumpDescriptor(pDesc, val, "FS: ");5209 HMR0DumpDescriptor(pDesc, val, "FS: "); 5210 5210 } 5211 5211 … … 5215 5215 { 5216 5216 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5217 H WACCMR0DumpDescriptor(pDesc, val, "GS: ");5217 HMR0DumpDescriptor(pDesc, val, "GS: "); 5218 5218 } 5219 5219 … … 5223 5223 { 5224 5224 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5225 H WACCMR0DumpDescriptor(pDesc, val, "SS: ");5225 HMR0DumpDescriptor(pDesc, val, "SS: "); 5226 5226 } 5227 5227 … … 5231 5231 { 5232 5232 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5233 H WACCMR0DumpDescriptor(pDesc, val, "TR: ");5233 HMR0DumpDescriptor(pDesc, val, "TR: "); 5234 5234 } 5235 5235 … … 5292 5292 int rc; 5293 5293 5294 pCpu = H WACCMR0GetCurrentCpu();5294 pCpu = HMR0GetCurrentCpu(); 5295 5295 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 5296 5296 … … 5298 5298 pCache->uPos = 1; 5299 5299 pCache->interPD = PGMGetInterPaeCR3(pVM); 5300 pCache->pSwitcher = (uint64_t)pVM->h waccm.s.pfnHost32ToGuest64R0;5300 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0; 5301 5301 #endif 5302 5302 … … 5313 5313 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */ 5314 5314 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */ 5315 aParam[2] = (uint32_t)(pVCpu->h waccm.s.vmx.HCPhysVMCS); /* Param 2: VMCS physical address - Lo. */5316 aParam[3] = (uint32_t)(pVCpu->h waccm.s.vmx.HCPhysVMCS >> 32); /* Param 2: VMCS physical address - Hi. */5317 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].h waccm.s.vmx.VMCSCache);5315 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVMCS); /* Param 2: VMCS physical address - Lo. */ 5316 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVMCS >> 32); /* Param 2: VMCS physical address - Hi. */ 5317 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache); 5318 5318 aParam[5] = 0; 5319 5319 5320 5320 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 5321 pCtx->dr[4] = pVM->h waccm.s.vmx.pScratchPhys + 16 + 8;5322 *(uint32_t *)(pVM->h waccm.s.vmx.pScratch + 16 + 8) = 1;5323 #endif 5324 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnVMXGCStartVM64, 6, &aParam[0]);5321 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8; 5322 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1; 5323 #endif 5324 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnVMXGCStartVM64, 6, &aParam[0]); 5325 5325 5326 5326 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 5327 Assert(*(uint32_t *)(pVM->h waccm.s.vmx.pScratch + 16 + 8) == 5);5327 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5); 5328 5328 Assert(pCtx->dr[4] == 10); 5329 *(uint32_t *)(pVM->h waccm.s.vmx.pScratch + 16 + 8) = 0xff;5329 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff; 5330 5330 #endif 5331 5331 5332 5332 #ifdef DEBUG 5333 5333 AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage)); 5334 AssertMsg(pCache->TestIn.HCPhysVMCS == pVCpu->h waccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,5335 pVCpu->h waccm.s.vmx.HCPhysVMCS));5334 AssertMsg(pCache->TestIn.HCPhysVMCS == pVCpu->hm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, 5335 pVCpu->hm.s.vmx.HCPhysVMCS)); 5336 5336 AssertMsg(pCache->TestIn.HCPhysVMCS == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, 5337 5337 pCache->TestOut.HCPhysVMCS)); 5338 5338 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache, 5339 5339 pCache->TestOut.pCache)); 5340 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].h waccm.s.vmx.VMCSCache),5341 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].h waccm.s.vmx.VMCSCache)));5340 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache), 5341 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache))); 5342 5342 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx, 5343 5343 pCache->TestOut.pCtx)); … … 5466 5466 RTHCUINTREG uOldEFlags; 5467 5467 5468 AssertReturn(pVM->h waccm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);5468 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); 5469 5469 Assert(pfnHandler); 5470 Assert(pVCpu->h waccm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField));5471 Assert(pVCpu->h waccm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField));5470 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField)); 5471 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField)); 5472 5472 5473 5473 #ifdef VBOX_STRICT 5474 for (unsigned i=0;i<pVCpu->h waccm.s.vmx.VMCSCache.Write.cValidEntries;i++)5475 Assert(hmR0VmxIsValidWriteField(pVCpu->h waccm.s.vmx.VMCSCache.Write.aField[i]));5476 5477 for (unsigned i=0;i<pVCpu->h waccm.s.vmx.VMCSCache.Read.cValidEntries;i++)5478 Assert(hmR0VmxIsValidReadField(pVCpu->h waccm.s.vmx.VMCSCache.Read.aField[i]));5474 for (unsigned i=0;i<pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries;i++) 5475 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i])); 5476 5477 for (unsigned i=0;i<pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries;i++) 5478 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i])); 5479 5479 #endif 5480 5480 … … 5487 5487 #endif 5488 5488 5489 pCpu = H WACCMR0GetCurrentCpu();5489 pCpu = HMR0GetCurrentCpu(); 5490 5490 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 5491 5491 5492 5492 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */ 5493 VMXClearVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);5493 VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 5494 5494 5495 5495 /* Leave VMX Root Mode. */ … … 5503 5503 CPUMPushHyper(pVCpu, paParam[i]); 5504 5504 5505 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatWorldSwitch3264, z);5505 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z); 5506 5506 5507 5507 /* Call switcher. */ 5508 rc = pVM->h waccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));5509 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatWorldSwitch3264, z);5508 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum)); 5509 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z); 5510 5510 5511 5511 /* Make sure the VMX instructions don't cause #UD faults. */ … … 5521 5521 } 5522 5522 5523 rc2 = VMXActivateVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);5523 rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 5524 5524 AssertRC(rc2); 5525 5525 Assert(!(ASMGetFlags() & X86_EFL_IF)); … … 5609 5609 VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val) 5610 5610 { 5611 PVMCSCACHE pCache = &pVCpu->h waccm.s.vmx.VMCSCache;5611 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; 5612 5612 5613 5613 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1, -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.h
r43307 r43387 24 24 #include <VBox/vmm/stam.h> 25 25 #include <VBox/dis.h> 26 #include <VBox/vmm/h waccm.h>26 #include <VBox/vmm/hm.h> 27 27 #include <VBox/vmm/pgm.h> 28 #include <VBox/vmm/h wacc_vmx.h>28 #include <VBox/vmm/hm_vmx.h> 29 29 30 30 RT_C_DECLS_BEGIN … … 220 220 else \ 221 221 if ( CPUMIsGuestInRealModeEx(pCtx) \ 222 && !pVM->h waccm.s.vmx.fUnrestrictedGuest) \222 && !pVM->hm.s.vmx.fUnrestrictedGuest) \ 223 223 { \ 224 224 /* Must override this or else VT-x will fail with invalid guest state errors. */ \ … … 291 291 { 292 292 Assert(idxCache <= VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX); 293 *pVal = pVCpu->h waccm.s.vmx.VMCSCache.Read.aFieldVal[idxCache];293 *pVal = pVCpu->hm.s.vmx.VMCSCache.Read.aFieldVal[idxCache]; 294 294 return VINF_SUCCESS; 295 295 } -
trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp
r42222 r43387 28 28 #include <VBox/vmm/vmm.h> 29 29 #include <VBox/vmm/patm.h> 30 #include <VBox/vmm/h waccm.h>30 #include <VBox/vmm/hm.h> 31 31 32 32 #include <VBox/log.h> … … 374 374 PDMDEV_ASSERT_DEVINS(pDevIns); 375 375 LogFlow(("pdmR0DevHlp_GetVM: caller='%p'/%d\n", pDevIns, pDevIns->iInstance)); 376 return H WACCMCanEmulateIoBlock(VMMGetCpu(pDevIns->Internal.s.pVMR0));376 return HMCanEmulateIoBlock(VMMGetCpu(pDevIns->Internal.s.pVMR0)); 377 377 } 378 378 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r43379 r43387 37 37 #include <VBox/vmm/gmm.h> 38 38 #include <VBox/intnet.h> 39 #include <VBox/vmm/h waccm.h>39 #include <VBox/vmm/hm.h> 40 40 #include <VBox/param.h> 41 41 #include <VBox/err.h> … … 117 117 118 118 /* 119 * Initialize the VMM, GVMM, GMM, H WACCM, PGM (Darwin) and INTNET.119 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET. 120 120 */ 121 121 int rc = vmmInitFormatTypes(); … … 128 128 if (RT_SUCCESS(rc)) 129 129 { 130 rc = H WACCMR0Init();130 rc = HMR0Init(); 131 131 if (RT_SUCCESS(rc)) 132 132 { … … 188 188 else 189 189 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc)); 190 H WACCMR0Term();190 HMR0Term(); 191 191 } 192 192 else 193 LogRel(("ModuleInit: H WACCMR0Init -> %Rrc\n", rc));193 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc)); 194 194 GMMR0Term(); 195 195 } … … 231 231 232 232 /* 233 * PGM (Darwin), H WACCM and PciRaw global cleanup.233 * PGM (Darwin), HM and PciRaw global cleanup. 234 234 */ 235 235 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE … … 240 240 #endif 241 241 PGMDeregisterStringFormatTypes(); 242 H WACCMR0Term();242 HMR0Term(); 243 243 #ifdef VBOX_WITH_TRIPLE_FAULT_HACK 244 244 vmmR0TripleFaultHackTerm(); … … 342 342 { 343 343 /* 344 * Init H WACCM, CPUM and PGM (Darwin only).345 */ 346 rc = H WACCMR0InitVM(pVM);344 * Init HM, CPUM and PGM (Darwin only). 345 */ 346 rc = HMR0InitVM(pVM); 347 347 if (RT_SUCCESS(rc)) 348 348 { … … 370 370 PciRawR0TermVM(pVM); 371 371 #endif 372 H WACCMR0TermVM(pVM);372 HMR0TermVM(pVM); 373 373 } 374 374 } … … 410 410 PGMR0DynMapTermVM(pVM); 411 411 #endif 412 H WACCMR0TermVM(pVM);412 HMR0TermVM(pVM); 413 413 } 414 414 … … 603 603 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest); 604 604 break; 605 case VINF_EM_H WACCM_PATCH_TPR_INSTR:605 case VINF_EM_HM_PATCH_TPR_INSTR: 606 606 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR); 607 607 break; … … 662 662 /* Some safety precautions first. */ 663 663 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 664 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled /* h waccm */664 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled /* hm */ 665 665 && pVM->cCpus == 1 /* !smp */ 666 666 && PGMGetHyperCR3(pVCpu))) … … 683 683 /* We might need to disable VT-x if the active switcher turns off paging. */ 684 684 bool fVTxDisabled; 685 int rc = H WACCMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);685 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled); 686 686 if (RT_SUCCESS(rc)) 687 687 { … … 705 705 706 706 /* Re-enable VT-x if previously turned off. */ 707 H WACCMR0LeaveSwitcher(pVM, fVTxDisabled);707 HMR0LeaveSwitcher(pVM, fVTxDisabled); 708 708 709 709 if ( rc == VINF_EM_RAW_INTERRUPT … … 770 770 #endif 771 771 int rc; 772 if (!H WACCMR0SuspendPending())772 if (!HMR0SuspendPending()) 773 773 { 774 rc = H WACCMR0Enter(pVM, pVCpu);774 rc = HMR0Enter(pVM, pVCpu); 775 775 if (RT_SUCCESS(rc)) 776 776 { 777 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, H WACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */778 int rc2 = H WACCMR0Leave(pVM, pVCpu);777 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */ 778 int rc2 = HMR0Leave(pVM, pVCpu); 779 779 AssertRC(rc2); 780 780 } … … 962 962 963 963 /* 964 * Attempt to enable h waccmode and check the current setting.964 * Attempt to enable hm mode and check the current setting. 965 965 */ 966 966 case VMMR0_DO_HWACC_ENABLE: 967 return H WACCMR0EnableAllCpus(pVM);967 return HMR0EnableAllCpus(pVM); 968 968 969 969 /* … … 971 971 */ 972 972 case VMMR0_DO_HWACC_SETUP_VM: 973 return H WACCMR0SetupVM(pVM);973 return HMR0SetupVM(pVM); 974 974 975 975 /* … … 981 981 bool fVTxDisabled; 982 982 983 /* Safety precaution as H WACCM can disable the switcher. */983 /* Safety precaution as HM can disable the switcher. */ 984 984 Assert(!pVM->vmm.s.fSwitcherDisabled); 985 985 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled)) … … 999 999 1000 1000 /* We might need to disable VT-x if the active switcher turns off paging. */ 1001 rc = H WACCMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);1001 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled); 1002 1002 if (RT_FAILURE(rc)) 1003 1003 return rc; … … 1006 1006 1007 1007 /* Re-enable VT-x if previously turned off. */ 1008 H WACCMR0LeaveSwitcher(pVM, fVTxDisabled);1008 HMR0LeaveSwitcher(pVM, fVTxDisabled); 1009 1009 1010 1010 /** @todo dispatch interrupts? */ … … 1284 1284 if (idCpu == NIL_VMCPUID) 1285 1285 return VERR_INVALID_CPU_ID; 1286 return H WACCMR0TestSwitcher3264(pVM);1286 return HMR0TestSwitcher3264(pVM); 1287 1287 #endif 1288 1288 default:
Note:
See TracChangeset
for help on using the changeset viewer.