Changeset 37320 in vbox for trunk/src/VBox
- Timestamp:
- Jun 3, 2011 3:05:36 PM (14 years ago)
- svn:sync-xref-src-repo-rev:
- 72075
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r37319 r37320 1 1 /* $Id$ */ 2 2 /** @file 3 * H WACCM - Host Context Ring0.3 * Hardware Assisted Virtualization Manager - Host Context Ring-0. 4 4 */ 5 5 … … 48 48 * Internal Functions * 49 49 *******************************************************************************/ 50 static DECLCALLBACK(void) hwaccmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2); 51 static DECLCALLBACK(void) hwaccmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2); 52 static DECLCALLBACK(void) hwaccmR0InitCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2); 53 static bool hwaccmR0IsSubjectToVmxPreemptionTimerErratum(void); 54 static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser); 55 static DECLCALLBACK(void) hwaccmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData); 50 static DECLCALLBACK(void) hmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2); 51 static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2); 52 static DECLCALLBACK(void) hmR0InitIntelCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2); 53 static DECLCALLBACK(void) hmR0InitAmdCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2); 54 static DECLCALLBACK(void) hmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser); 55 static DECLCALLBACK(void) hmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData); 56 57 58 /******************************************************************************* 59 * Structures and Typedefs * 60 *******************************************************************************/ 61 /** 62 * This is used to manage the status code of a RTMpOnAll in HM. 63 */ 64 typedef struct HMR0FIRSTRC 65 { 66 /** The status code. */ 67 int32_t volatile rc; 68 /** The ID of the CPU reporting the first failure. */ 69 RTCPUID volatile idCpu; 70 } HMR0FIRSTRC; 71 /** Pointer to a first return code structure. */ 72 typedef HMR0FIRSTRC *PHMR0FIRSTRC; 56 73 57 74 … … 59 76 * Global Variables * 60 77 *******************************************************************************/ 61 78 /** 79 * Global data. 80 */ 62 81 static struct 63 82 { 64 HWACCM_CPUINFO aCpuInfo[RTCPUSET_MAX_CPUS]; 65 66 /** Ring 0 handlers for VT-x and AMD-V. */ 67 DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu)); 83 /** Per CPU globals. */ 84 HMGLOBLCPUINFO aCpuInfo[RTCPUSET_MAX_CPUS]; 85 86 /** @name Ring-0 method table for AMD-V and VT-x specific operations. 87 * @{ */ 88 DECLR0CALLBACKMEMBER(int, pfnEnterSession,(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)); 68 89 DECLR0CALLBACKMEMBER(int, pfnLeaveSession,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 69 90 DECLR0CALLBACKMEMBER(int, pfnSaveHostState,(PVM pVM, PVMCPU pVCpu)); 70 91 DECLR0CALLBACKMEMBER(int, pfnLoadGuestState,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 71 92 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 72 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHWACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 73 DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (PHWACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 74 DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM)); 75 DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVM pVM)); 76 DECLR0CALLBACKMEMBER(int, pfnSetupVM, (PVM pVM)); 93 DECLR0CALLBACKMEMBER(int, pfnEnableCpu,(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 94 DECLR0CALLBACKMEMBER(int, pfnDisableCpu,(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 95 DECLR0CALLBACKMEMBER(int, pfnInitVM,(PVM pVM)); 96 DECLR0CALLBACKMEMBER(int, pfnTermVM,(PVM pVM)); 97 DECLR0CALLBACKMEMBER(int, pfnSetupVM,(PVM pVM)); 98 /** @} */ 77 99 78 100 /** Maximum ASID allowed. */ 79 101 uint32_t uMaxASID; 80 102 103 /** VT-x data. */ 81 104 struct 82 105 { … … 118 141 } vmx; 119 142 143 /** AMD-V information. */ 120 144 struct 121 145 { … … 155 179 /** Serialize initialization in HWACCMR0EnableAllCpus. */ 156 180 RTONCE EnableAllCpusOnce; 157 } HWACCMR0Globals; 158 159 160 /** 161 * This is used to manage the status code of a RTMpOnAll in HWACCM. 162 */ 163 typedef struct HWACCMR0FIRSTRC 164 { 165 /** The status code. */ 166 int32_t volatile rc; 167 /** The ID of the CPU reporting the first failure. */ 168 RTCPUID volatile idCpu; 169 } HWACCMR0FIRSTRC; 170 /** Pointer to a first return code structure. */ 171 typedef HWACCMR0FIRSTRC *PHWACCMR0FIRSTRC; 181 } g_HvmR0; 182 172 183 173 184 … … 177 188 * @param pFirstRc The structure to init. 178 189 */ 179 static void h waccmR0FirstRcInit(PHWACCMR0FIRSTRC pFirstRc)190 static void hmR0FirstRcInit(PHMR0FIRSTRC pFirstRc) 180 191 { 181 192 pFirstRc->rc = VINF_SUCCESS; … … 190 201 * @param rc The status code. 191 202 */ 192 static void hwaccmR0FirstRcSetStatus(PHWACCMR0FIRSTRC pFirstRc, int rc)203 static void hmR0FirstRcSetStatus(PHMR0FIRSTRC pFirstRc, int rc) 193 204 { 194 205 if ( RT_FAILURE(rc) … … 205 216 * @param pFirstRc The first return code structure. 206 217 */ 207 static int h waccmR0FirstRcGetStatus(PHWACCMR0FIRSTRC pFirstRc)218 static int hmR0FirstRcGetStatus(PHMR0FIRSTRC pFirstRc) 208 219 { 209 220 return pFirstRc->rc; … … 217 228 * @param pFirstRc The first return code structure. 218 229 */ 219 static RTCPUID h waccmR0FirstRcGetCpuId(PHWACCMR0FIRSTRC pFirstRc)230 static RTCPUID hmR0FirstRcGetCpuId(PHMR0FIRSTRC pFirstRc) 220 231 { 221 232 return pFirstRc->idCpu; … … 226 237 * @{ */ 227 238 228 static DECLCALLBACK(int) h waccmR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu)239 static DECLCALLBACK(int) hmR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu) 229 240 { 230 241 return VINF_SUCCESS; 231 242 } 232 243 233 static DECLCALLBACK(int) h waccmR0DummyLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)244 static DECLCALLBACK(int) hmR0DummyLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 234 245 { 235 246 return VINF_SUCCESS; 236 247 } 237 248 238 static DECLCALLBACK(int) h waccmR0DummyEnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)249 static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 239 250 { 240 251 return VINF_SUCCESS; 241 252 } 242 253 243 static DECLCALLBACK(int) h waccmR0DummyDisableCpu(PHWACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)254 static DECLCALLBACK(int) hmR0DummyDisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 244 255 { 245 256 return VINF_SUCCESS; 246 257 } 247 258 248 static DECLCALLBACK(int) h waccmR0DummyInitVM(PVM pVM)259 static DECLCALLBACK(int) hmR0DummyInitVM(PVM pVM) 249 260 { 250 261 return VINF_SUCCESS; 251 262 } 252 263 253 static DECLCALLBACK(int) h waccmR0DummyTermVM(PVM pVM)264 static DECLCALLBACK(int) hmR0DummyTermVM(PVM pVM) 254 265 { 255 266 return VINF_SUCCESS; 256 267 } 257 268 258 static DECLCALLBACK(int) h waccmR0DummySetupVM(PVM pVM)269 static DECLCALLBACK(int) hmR0DummySetupVM(PVM pVM) 259 270 { 260 271 return VINF_SUCCESS; 261 272 } 262 273 263 static DECLCALLBACK(int) h waccmR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)274 static DECLCALLBACK(int) hmR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 264 275 { 265 276 return VINF_SUCCESS; 266 277 } 267 278 268 static DECLCALLBACK(int) h waccmR0DummySaveHostState(PVM pVM, PVMCPU pVCpu)279 static DECLCALLBACK(int) hmR0DummySaveHostState(PVM pVM, PVMCPU pVCpu) 269 280 { 270 281 return VINF_SUCCESS; 271 282 } 272 283 273 static DECLCALLBACK(int) h waccmR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)284 static DECLCALLBACK(int) hmR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 274 285 { 275 286 return VINF_SUCCESS; … … 277 288 278 289 /** @} */ 279 280 281 282 /**283 * Does global Ring-0 HWACCM initialization (at module init).284 *285 * @returns VBox status code.286 */287 VMMR0DECL(int) HWACCMR0Init(void)288 {289 290 /*291 * Initialize the globals.292 */293 HWACCMR0Globals.fEnabled = false;294 static RTONCE s_OnceInit = RTONCE_INITIALIZER;295 HWACCMR0Globals.EnableAllCpusOnce = s_OnceInit;296 for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++)297 HWACCMR0Globals.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;298 299 /* Fill in all callbacks with placeholders. */300 HWACCMR0Globals.pfnEnterSession = hwaccmR0DummyEnter;301 HWACCMR0Globals.pfnLeaveSession = hwaccmR0DummyLeave;302 HWACCMR0Globals.pfnSaveHostState = hwaccmR0DummySaveHostState;303 HWACCMR0Globals.pfnLoadGuestState = hwaccmR0DummyLoadGuestState;304 HWACCMR0Globals.pfnRunGuestCode = hwaccmR0DummyRunGuestCode;305 HWACCMR0Globals.pfnEnableCpu = hwaccmR0DummyEnableCpu;306 HWACCMR0Globals.pfnDisableCpu = hwaccmR0DummyDisableCpu;307 HWACCMR0Globals.pfnInitVM = hwaccmR0DummyInitVM;308 HWACCMR0Globals.pfnTermVM = hwaccmR0DummyTermVM;309 HWACCMR0Globals.pfnSetupVM = hwaccmR0DummySetupVM;310 311 /* Default is global VT-x/AMD-V init */312 HWACCMR0Globals.fGlobalInit = true;313 314 /*315 * Make sure aCpuInfo is big enough for all the CPUs on this system.316 */317 if (RTMpGetArraySize() > RT_ELEMENTS(HWACCMR0Globals.aCpuInfo))318 {319 LogRel(("HWACCM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(HWACCMR0Globals.aCpuInfo)));320 return VERR_TOO_MANY_CPUS;321 }322 323 /*324 * Check for VT-x and AMD-V capabilities325 */326 int rc;327 if (ASMHasCpuId())328 {329 uint32_t u32FeaturesECX, u32FeaturesEDX;330 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX;331 uint32_t u32Dummy;332 333 /* STandard features. */334 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX);335 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX);336 337 /* Query AMD features. */338 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, &HWACCMR0Globals.cpuid.u32AMDFeatureECX,339 &HWACCMR0Globals.cpuid.u32AMDFeatureEDX);340 341 /*342 * Intel CPU?343 */344 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX345 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX346 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX347 )348 {349 /*350 * Read all VMX MSRs if VMX is available. (same goes for RDMSR/WRMSR)351 * We also assume all VMX-enabled CPUs support fxsave/fxrstor.352 */353 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX)354 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)355 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)356 )357 {358 /** @todo move this into a separate function. */359 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);360 361 /*362 * First try use native kernel API for controlling VT-x.363 * (This is only supported by some Mac OS X kernels atm.)364 */365 HWACCMR0Globals.lLastError = rc = SUPR0EnableVTx(true /* fEnable */);366 if (rc != VERR_NOT_SUPPORTED)367 {368 AssertMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc));369 HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = true;370 if (RT_SUCCESS(rc))371 {372 HWACCMR0Globals.vmx.fSupported = true;373 rc = SUPR0EnableVTx(false /* fEnable */);374 AssertRC(rc);375 }376 }377 else378 {379 HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = false;380 381 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */382 HWACCMR0FIRSTRC FirstRc;383 hwaccmR0FirstRcInit(&FirstRc);384 HWACCMR0Globals.lLastError = RTMpOnAll(hwaccmR0InitCpu, (void *)(uintptr_t)u32VendorEBX, &FirstRc);385 if (RT_SUCCESS(HWACCMR0Globals.lLastError))386 HWACCMR0Globals.lLastError = hwaccmR0FirstRcGetStatus(&FirstRc);387 }388 if (RT_SUCCESS(HWACCMR0Globals.lLastError))389 {390 /* Reread in case we've changed it. */391 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);392 393 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))394 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))395 {396 RTR0MEMOBJ pScatchMemObj;397 void *pvScatchPage;398 RTHCPHYS pScatchPagePhys;399 400 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);401 HWACCMR0Globals.vmx.msr.vmx_pin_ctls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);402 HWACCMR0Globals.vmx.msr.vmx_proc_ctls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);403 HWACCMR0Globals.vmx.msr.vmx_exit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);404 HWACCMR0Globals.vmx.msr.vmx_entry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);405 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC);406 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);407 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);408 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);409 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);410 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);411 /* VPID 16 bits ASID. */412 HWACCMR0Globals.uMaxASID = 0x10000; /* exclusive */413 414 if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)415 {416 HWACCMR0Globals.vmx.msr.vmx_proc_ctls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);417 if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT|VMX_VMCS_CTRL_PROC_EXEC2_VPID))418 HWACCMR0Globals.vmx.msr.vmx_eptcaps = ASMRdMsr(MSR_IA32_VMX_EPT_CAPS);419 }420 421 if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)422 {423 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4();424 HWACCMR0Globals.vmx.hostEFER = ASMRdMsr(MSR_K6_EFER);425 426 rc = RTR0MemObjAllocCont(&pScatchMemObj, PAGE_SIZE, true /* executable R0 mapping */);427 if (RT_FAILURE(rc))428 return rc;429 430 pvScatchPage = RTR0MemObjAddress(pScatchMemObj);431 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0);432 memset(pvScatchPage, 0, PAGE_SIZE);433 434 /* Set revision dword at the beginning of the structure. */435 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info);436 437 /* Make sure we don't get rescheduled to another cpu during this probe. */438 RTCCUINTREG fFlags = ASMIntDisableFlags();439 440 /*441 * Check CR4.VMXE442 */443 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE))444 {445 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we446 * try to execute the VMX instructions...447 */448 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE);449 }450 451 /* Enter VMX Root Mode */452 rc = VMXEnable(pScatchPagePhys);453 if (RT_FAILURE(rc))454 {455 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because456 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)457 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode or 32 bits to PAE)458 *459 * They should fix their code, but until they do we simply refuse to run.460 */461 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;462 }463 else464 {465 HWACCMR0Globals.vmx.fSupported = true;466 VMXDisable();467 468 /*469 * Check for the VMX-Preemption Timer and adjust for the470 * "VMX-Preemption Timer Does Not Count Down at the Rate Specified" erratum.471 */472 if ( HWACCMR0Globals.vmx.msr.vmx_pin_ctls.n.allowed1473 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)474 {475 HWACCMR0Globals.vmx.fUsePreemptTimer = true;476 HWACCMR0Globals.vmx.cPreemptTimerShift = MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(HWACCMR0Globals.vmx.msr.vmx_misc);477 if (hwaccmR0IsSubjectToVmxPreemptionTimerErratum())478 HWACCMR0Globals.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */479 }480 }481 482 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */483 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);484 ASMSetFlags(fFlags);485 486 RTR0MemObjFree(pScatchMemObj, false);487 if (RT_FAILURE(HWACCMR0Globals.lLastError))488 return HWACCMR0Globals.lLastError;489 }490 }491 else492 {493 AssertFailed(); /* can't hit this case anymore */494 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;495 }496 497 if (HWACCMR0Globals.vmx.fSupported)498 {499 HWACCMR0Globals.pfnEnterSession = VMXR0Enter;500 HWACCMR0Globals.pfnLeaveSession = VMXR0Leave;501 HWACCMR0Globals.pfnSaveHostState = VMXR0SaveHostState;502 HWACCMR0Globals.pfnLoadGuestState = VMXR0LoadGuestState;503 HWACCMR0Globals.pfnRunGuestCode = VMXR0RunGuestCode;504 HWACCMR0Globals.pfnEnableCpu = VMXR0EnableCpu;505 HWACCMR0Globals.pfnDisableCpu = VMXR0DisableCpu;506 HWACCMR0Globals.pfnInitVM = VMXR0InitVM;507 HWACCMR0Globals.pfnTermVM = VMXR0TermVM;508 HWACCMR0Globals.pfnSetupVM = VMXR0SetupVM;509 }510 }511 #ifdef LOG_ENABLED512 else513 SUPR0Printf("hwaccmR0InitCpu failed with rc=%d\n", HWACCMR0Globals.lLastError);514 #endif515 }516 else517 HWACCMR0Globals.lLastError = VERR_VMX_NO_VMX;518 }519 /*520 * AMD CPU?521 */522 else if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX523 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX524 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX525 )526 {527 /** @todo move this into a separate function. */528 529 /*530 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR)531 * We also assume all SVM-enabled CPUs support fxsave/fxrstor.532 */533 if ( (HWACCMR0Globals.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM)534 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)535 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)536 )537 {538 HWACCMR0Globals.pfnEnterSession = SVMR0Enter;539 HWACCMR0Globals.pfnLeaveSession = SVMR0Leave;540 HWACCMR0Globals.pfnSaveHostState = SVMR0SaveHostState;541 HWACCMR0Globals.pfnLoadGuestState = SVMR0LoadGuestState;542 HWACCMR0Globals.pfnRunGuestCode = SVMR0RunGuestCode;543 HWACCMR0Globals.pfnEnableCpu = SVMR0EnableCpu;544 HWACCMR0Globals.pfnDisableCpu = SVMR0DisableCpu;545 HWACCMR0Globals.pfnInitVM = SVMR0InitVM;546 HWACCMR0Globals.pfnTermVM = SVMR0TermVM;547 HWACCMR0Globals.pfnSetupVM = SVMR0SetupVM;548 549 /* Query AMD features. */550 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.uMaxASID,551 &u32Dummy, &HWACCMR0Globals.svm.u32Features);552 553 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */554 HWACCMR0FIRSTRC FirstRc;555 hwaccmR0FirstRcInit(&FirstRc);556 rc = RTMpOnAll(hwaccmR0InitCpu, (void *)(uintptr_t)u32VendorEBX, &FirstRc); AssertRC(rc);557 if (RT_SUCCESS(rc))558 rc = hwaccmR0FirstRcGetStatus(&FirstRc);559 #ifndef DEBUG_bird560 AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE,561 ("hwaccmR0InitCpu failed for cpu %d with rc=%d\n", hwaccmR0FirstRcGetCpuId(&FirstRc), rc));562 #endif563 if (RT_SUCCESS(rc))564 {565 /* Read the HWCR msr for diagnostics. */566 HWACCMR0Globals.svm.msrHWCR = ASMRdMsr(MSR_K8_HWCR);567 HWACCMR0Globals.svm.fSupported = true;568 }569 else570 HWACCMR0Globals.lLastError = rc;571 }572 else573 HWACCMR0Globals.lLastError = VERR_SVM_NO_SVM;574 }575 /*576 * Unknown CPU.577 */578 else579 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU;580 }581 else582 HWACCMR0Globals.lLastError = VERR_HWACCM_NO_CPUID;583 584 /*585 * Register notification callbacks that we can use to disable/enable CPUs586 * when brought offline/online or suspending/resuming.587 */588 if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)589 {590 rc = RTMpNotificationRegister(hwaccmR0MpEventCallback, NULL);591 AssertRC(rc);592 593 rc = RTPowerNotificationRegister(hwaccmR0PowerCallback, NULL);594 AssertRC(rc);595 }596 597 /* We return success here because module init shall not fail if HWACCM598 fails to initialize. */599 return VINF_SUCCESS;600 }601 290 602 291 … … 622 311 * @returns true if subject to it, false if not. 623 312 */ 624 static bool h waccmR0IsSubjectToVmxPreemptionTimerErratum(void)313 static bool hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum(void) 625 314 { 626 315 uint32_t u = ASMCpuId_EAX(1); … … 647 336 648 337 /** 649 * Does global Ring-0 HWACCM termination. 338 * Intel specific initialization code. 339 * 340 * @returns VBox status code (will only fail if out of memory). 341 */ 342 static int hmR0InitIntel(uint32_t u32FeaturesECX, uint32_t u32FeaturesEDX) 343 { 344 /* 345 * Check that all the required VT-x features are present. 346 * We also assume all VT-x-enabled CPUs support fxsave/fxrstor. 347 */ 348 if ( (u32FeaturesECX & X86_CPUID_FEATURE_ECX_VMX) 349 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR) 350 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR) 351 ) 352 { 353 /** @todo move this into a separate function. */ 354 g_HvmR0.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 355 356 /* 357 * First try use native kernel API for controlling VT-x. 358 * (This is only supported by some Mac OS X kernels atm.) 359 */ 360 int rc = g_HvmR0.lLastError = SUPR0EnableVTx(true /* fEnable */); 361 g_HvmR0.vmx.fUsingSUPR0EnableVTx = rc != VERR_NOT_SUPPORTED; 362 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx) 363 { 364 AssertMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc)); 365 if (RT_SUCCESS(rc)) 366 { 367 g_HvmR0.vmx.fSupported = true; 368 rc = SUPR0EnableVTx(false /* fEnable */); 369 AssertRC(rc); 370 } 371 } 372 else 373 { 374 /* We need to check if VT-x has been properly initialized on all 375 CPUs. Some BIOSes do a lousy job. */ 376 HMR0FIRSTRC FirstRc; 377 hmR0FirstRcInit(&FirstRc); 378 g_HvmR0.lLastError = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL); 379 if (RT_SUCCESS(g_HvmR0.lLastError)) 380 g_HvmR0.lLastError = hmR0FirstRcGetStatus(&FirstRc); 381 } 382 if (RT_SUCCESS(g_HvmR0.lLastError)) 383 { 384 /* Reread in case we've changed it. */ 385 g_HvmR0.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 386 387 if ( (g_HvmR0.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 388 == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 389 { 390 /* 391 * Read all relevant MSR. 392 */ 393 g_HvmR0.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO); 394 g_HvmR0.vmx.msr.vmx_pin_ctls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS); 395 g_HvmR0.vmx.msr.vmx_proc_ctls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS); 396 g_HvmR0.vmx.msr.vmx_exit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS); 397 g_HvmR0.vmx.msr.vmx_entry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS); 398 g_HvmR0.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC); 399 g_HvmR0.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0); 400 g_HvmR0.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1); 401 g_HvmR0.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0); 402 g_HvmR0.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1); 403 g_HvmR0.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM); 404 g_HvmR0.vmx.hostCR4 = ASMGetCR4(); 405 g_HvmR0.vmx.hostEFER = ASMRdMsr(MSR_K6_EFER); 406 /* VPID 16 bits ASID. */ 407 g_HvmR0.uMaxASID = 0x10000; /* exclusive */ 408 409 if (g_HvmR0.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 410 { 411 g_HvmR0.vmx.msr.vmx_proc_ctls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2); 412 if ( g_HvmR0.vmx.msr.vmx_proc_ctls2.n.allowed1 413 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID)) 414 g_HvmR0.vmx.msr.vmx_eptcaps = ASMRdMsr(MSR_IA32_VMX_EPT_CAPS); 415 } 416 417 if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx) 418 { 419 /* 420 * Enter root mode 421 */ 422 RTR0MEMOBJ hScatchMemObj; 423 rc = RTR0MemObjAllocCont(&hScatchMemObj, PAGE_SIZE, true /* executable R0 mapping */); 424 if (RT_FAILURE(rc)) 425 return rc; 426 427 void *pvScatchPage = RTR0MemObjAddress(hScatchMemObj); 428 RTHCPHYS HCPhysScratchPage = RTR0MemObjGetPagePhysAddr(hScatchMemObj, 0); 429 ASMMemZeroPage(pvScatchPage); 430 431 /* Set revision dword at the beginning of the structure. */ 432 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(g_HvmR0.vmx.msr.vmx_basic_info); 433 434 /* Make sure we don't get rescheduled to another cpu during this probe. */ 435 RTCCUINTREG fFlags = ASMIntDisableFlags(); 436 437 /* 438 * Check CR4.VMXE 439 */ 440 g_HvmR0.vmx.hostCR4 = ASMGetCR4(); 441 if (!(g_HvmR0.vmx.hostCR4 & X86_CR4_VMXE)) 442 { 443 /* In theory this bit could be cleared behind our back. Which would cause 444 #UD faults when we try to execute the VMX instructions... */ 445 ASMSetCR4(g_HvmR0.vmx.hostCR4 | X86_CR4_VMXE); 446 } 447 448 /* Enter VMX Root Mode */ 449 rc = VMXEnable(HCPhysScratchPage); 450 if (RT_SUCCESS(rc)) 451 { 452 g_HvmR0.vmx.fSupported = true; 453 VMXDisable(); 454 455 /* 456 * Check for the VMX-Preemption Timer and adjust for the * "VMX-Preemption 457 * Timer Does Not Count Down at the Rate Specified" erratum. 458 */ 459 if ( g_HvmR0.vmx.msr.vmx_pin_ctls.n.allowed1 460 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER) 461 { 462 g_HvmR0.vmx.fUsePreemptTimer = true; 463 g_HvmR0.vmx.cPreemptTimerShift = MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(g_HvmR0.vmx.msr.vmx_misc); 464 if (hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum()) 465 g_HvmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */ 466 } 467 } 468 else 469 { 470 /* 471 * KVM leaves the CPU in VMX root mode. Not only is this not allowed, 472 * it will crash the host when we enter raw mode, because: 473 * 474 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify 475 * this bit), and 476 * (b) turning off paging causes a #GP (unavoidable when switching 477 * from long to 32 bits mode or 32 bits to PAE). 478 * 479 * They should fix their code, but until they do we simply refuse to run. 480 */ 481 g_HvmR0.lLastError = VERR_VMX_IN_VMX_ROOT_MODE; 482 } 483 484 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set 485 if it wasn't so before (some software could incorrectly 486 think it's in VMX mode). */ 487 ASMSetCR4(g_HvmR0.vmx.hostCR4); 488 ASMSetFlags(fFlags); 489 490 RTR0MemObjFree(hScatchMemObj, false); 491 } 492 } 493 else 494 { 495 AssertFailed(); /* can't hit this case anymore */ 496 g_HvmR0.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR; 497 } 498 499 /* 500 * Install the VT-x methods. 501 */ 502 if (g_HvmR0.vmx.fSupported) 503 { 504 g_HvmR0.pfnEnterSession = VMXR0Enter; 505 g_HvmR0.pfnLeaveSession = VMXR0Leave; 506 g_HvmR0.pfnSaveHostState = VMXR0SaveHostState; 507 g_HvmR0.pfnLoadGuestState = VMXR0LoadGuestState; 508 g_HvmR0.pfnRunGuestCode = VMXR0RunGuestCode; 509 g_HvmR0.pfnEnableCpu = VMXR0EnableCpu; 510 g_HvmR0.pfnDisableCpu = VMXR0DisableCpu; 511 g_HvmR0.pfnInitVM = VMXR0InitVM; 512 g_HvmR0.pfnTermVM = VMXR0TermVM; 513 g_HvmR0.pfnSetupVM = VMXR0SetupVM; 514 } 515 } 516 #ifdef LOG_ENABLED 517 else 518 SUPR0Printf("hmR0InitIntelCpu failed with rc=%d\n", g_HvmR0.lLastError); 519 #endif 520 } 521 else 522 g_HvmR0.lLastError = VERR_VMX_NO_VMX; 523 return VINF_SUCCESS; 524 } 525 526 527 /** 528 * AMD specific initialization code. 529 */ 530 static void hmR0InitAmd(uint32_t u32FeaturesEDX) 531 { 532 /* 533 * Read all SVM MSRs if SVM is available. (same goes for RDMSR/WRMSR) 534 * We also assume all SVM-enabled CPUs support fxsave/fxrstor. 535 */ 536 if ( (g_HvmR0.cpuid.u32AMDFeatureECX & X86_CPUID_AMD_FEATURE_ECX_SVM) 537 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_MSR) 538 && (u32FeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR) 539 ) 540 { 541 g_HvmR0.pfnEnterSession = SVMR0Enter; 542 g_HvmR0.pfnLeaveSession = SVMR0Leave; 543 g_HvmR0.pfnSaveHostState = SVMR0SaveHostState; 544 g_HvmR0.pfnLoadGuestState = SVMR0LoadGuestState; 545 g_HvmR0.pfnRunGuestCode = SVMR0RunGuestCode; 546 g_HvmR0.pfnEnableCpu = SVMR0EnableCpu; 547 g_HvmR0.pfnDisableCpu = SVMR0DisableCpu; 548 g_HvmR0.pfnInitVM = SVMR0InitVM; 549 g_HvmR0.pfnTermVM = SVMR0TermVM; 550 g_HvmR0.pfnSetupVM = SVMR0SetupVM; 551 552 /* Query AMD features. */ 553 uint32_t u32Dummy; 554 ASMCpuId(0x8000000A, &g_HvmR0.svm.u32Rev, &g_HvmR0.uMaxASID, 555 &u32Dummy, &g_HvmR0.svm.u32Features); 556 557 /* 558 * We need to check if AMD-V has been properly initialized on all CPUs. 559 * Some BIOSes might do a poor job. 560 */ 561 HMR0FIRSTRC FirstRc; 562 hmR0FirstRcInit(&FirstRc); 563 int rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL); 564 AssertRC(rc); 565 if (RT_SUCCESS(rc)) 566 rc = hmR0FirstRcGetStatus(&FirstRc); 567 #ifndef DEBUG_bird 568 AssertMsg(rc == VINF_SUCCESS || rc == VERR_SVM_IN_USE, 569 ("hmR0InitAmdCpu failed for cpu %d with rc=%Rrc\n", hmR0FirstRcGetCpuId(&FirstRc), rc)); 570 #endif 571 if (RT_SUCCESS(rc)) 572 { 573 /* Read the HWCR msr for diagnostics. */ 574 g_HvmR0.svm.msrHWCR = ASMRdMsr(MSR_K8_HWCR); 575 g_HvmR0.svm.fSupported = true; 576 } 577 else 578 g_HvmR0.lLastError = rc; 579 } 580 else 581 g_HvmR0.lLastError = VERR_SVM_NO_SVM; 582 } 583 584 585 /** 586 * Does global Ring-0 HM initialization (at module init). 650 587 * 651 588 * @returns VBox status code. 652 589 */ 590 VMMR0DECL(int) HWACCMR0Init(void) 591 { 592 /* 593 * Initialize the globals. 594 */ 595 g_HvmR0.fEnabled = false; 596 static RTONCE s_OnceInit = RTONCE_INITIALIZER; 597 g_HvmR0.EnableAllCpusOnce = s_OnceInit; 598 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++) 599 g_HvmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ; 600 601 /* Fill in all callbacks with placeholders. */ 602 g_HvmR0.pfnEnterSession = hmR0DummyEnter; 603 g_HvmR0.pfnLeaveSession = hmR0DummyLeave; 604 g_HvmR0.pfnSaveHostState = hmR0DummySaveHostState; 605 g_HvmR0.pfnLoadGuestState = hmR0DummyLoadGuestState; 606 g_HvmR0.pfnRunGuestCode = hmR0DummyRunGuestCode; 607 g_HvmR0.pfnEnableCpu = hmR0DummyEnableCpu; 608 g_HvmR0.pfnDisableCpu = hmR0DummyDisableCpu; 609 g_HvmR0.pfnInitVM = hmR0DummyInitVM; 610 g_HvmR0.pfnTermVM = hmR0DummyTermVM; 611 g_HvmR0.pfnSetupVM = hmR0DummySetupVM; 612 613 /* Default is global VT-x/AMD-V init */ 614 g_HvmR0.fGlobalInit = true; 615 616 /* 617 * Make sure aCpuInfo is big enough for all the CPUs on this system. 618 */ 619 if (RTMpGetArraySize() > RT_ELEMENTS(g_HvmR0.aCpuInfo)) 620 { 621 LogRel(("HM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_HvmR0.aCpuInfo))); 622 return VERR_TOO_MANY_CPUS; 623 } 624 625 /* 626 * Check for VT-x and AMD-V capabilities 627 */ 628 int rc; 629 if (ASMHasCpuId()) 630 { 631 uint32_t u32FeaturesECX, u32FeaturesEDX; 632 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX; 633 uint32_t u32Dummy; 634 635 /* Standard features. */ 636 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX); 637 ASMCpuId(1, &u32Dummy, &u32Dummy, &u32FeaturesECX, &u32FeaturesEDX); 638 639 /* Query AMD features. */ 640 ASMCpuId(0x80000001, &u32Dummy, &u32Dummy, 641 &g_HvmR0.cpuid.u32AMDFeatureECX, 642 &g_HvmR0.cpuid.u32AMDFeatureEDX); 643 644 /* Go to CPU specific initialization code. */ 645 if ( u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX 646 && u32VendorECX == X86_CPUID_VENDOR_INTEL_ECX 647 && u32VendorEDX == X86_CPUID_VENDOR_INTEL_EDX) 648 { 649 rc = hmR0InitIntel(u32FeaturesECX, u32FeaturesEDX); 650 if (RT_FAILURE(rc)) 651 return rc; 652 } 653 else if ( u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX 654 && u32VendorECX == X86_CPUID_VENDOR_AMD_ECX 655 && u32VendorEDX == X86_CPUID_VENDOR_AMD_EDX) 656 hmR0InitAmd(u32FeaturesEDX); 657 else 658 g_HvmR0.lLastError = VERR_HWACCM_UNKNOWN_CPU; 659 } 660 else 661 g_HvmR0.lLastError = VERR_HWACCM_NO_CPUID; 662 663 /* 664 * Register notification callbacks that we can use to disable/enable CPUs 665 * when brought offline/online or suspending/resuming. 666 */ 667 if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx) 668 { 669 rc = RTMpNotificationRegister(hmR0MpEventCallback, NULL); 670 AssertRC(rc); 671 672 rc = RTPowerNotificationRegister(hmR0PowerCallback, NULL); 673 AssertRC(rc); 674 } 675 676 /* We return success here because module init shall not fail if HM 677 fails to initialize. */ 678 return VINF_SUCCESS; 679 } 680 681 682 /** 683 * Does global Ring-0 HM termination (at module termination). 684 * 685 * @returns VBox status code. 686 */ 653 687 VMMR0DECL(int) HWACCMR0Term(void) 654 688 { 655 689 int rc; 656 if ( HWACCMR0Globals.vmx.fSupported657 && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)690 if ( g_HvmR0.vmx.fSupported 691 && g_HvmR0.vmx.fUsingSUPR0EnableVTx) 658 692 { 659 693 /* 660 694 * Simple if the host OS manages VT-x. 661 695 */ 662 Assert( HWACCMR0Globals.fGlobalInit);696 Assert(g_HvmR0.fGlobalInit); 663 697 rc = SUPR0EnableVTx(false /* fEnable */); 664 698 665 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS( HWACCMR0Globals.aCpuInfo); iCpu++)699 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo); iCpu++) 666 700 { 667 HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = false;668 Assert( HWACCMR0Globals.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);701 g_HvmR0.aCpuInfo[iCpu].fConfigured = false; 702 Assert(g_HvmR0.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ); 669 703 } 670 704 } 671 705 else 672 706 { 673 Assert(! HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);674 if (! HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)707 Assert(!g_HvmR0.vmx.fUsingSUPR0EnableVTx); 708 if (!g_HvmR0.vmx.fUsingSUPR0EnableVTx) 675 709 { 676 710 /* Doesn't really matter if this fails. */ 677 rc = RTMpNotificationDeregister(h waccmR0MpEventCallback, NULL); AssertRC(rc);678 rc = RTPowerNotificationDeregister(h waccmR0PowerCallback, NULL); AssertRC(rc);711 rc = RTMpNotificationDeregister(hmR0MpEventCallback, NULL); AssertRC(rc); 712 rc = RTPowerNotificationDeregister(hmR0PowerCallback, NULL); AssertRC(rc); 679 713 } 680 714 else 681 715 rc = VINF_SUCCESS; 682 716 683 /* Only disable VT-x/AMD-V on all CPUs if we enabled it before. */ 684 if (HWACCMR0Globals.fGlobalInit) 717 /* 718 * Disable VT-x/AMD-V on all CPUs if we enabled it before. 719 */ 720 if (g_HvmR0.fGlobalInit) 685 721 { 686 H WACCMR0FIRSTRC FirstRc;687 h waccmR0FirstRcInit(&FirstRc);688 rc = RTMpOnAll(h waccmR0DisableCpuCallback, NULL, &FirstRc);722 HMR0FIRSTRC FirstRc; 723 hmR0FirstRcInit(&FirstRc); 724 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL, &FirstRc); 689 725 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 690 726 if (RT_SUCCESS(rc)) 691 727 { 692 rc = h waccmR0FirstRcGetStatus(&FirstRc);693 AssertMsgRC(rc, ("%u: %Rrc\n", h waccmR0FirstRcGetCpuId(&FirstRc), rc));728 rc = hmR0FirstRcGetStatus(&FirstRc); 729 AssertMsgRC(rc, ("%u: %Rrc\n", hmR0FirstRcGetCpuId(&FirstRc), rc)); 694 730 } 695 731 } 696 732 697 /* Free the per-cpu pages used for VT-x and AMD-V */ 698 for (unsigned i = 0; i < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); i++) 733 /* 734 * Free the per-cpu pages used for VT-x and AMD-V. 735 */ 736 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++) 699 737 { 700 if ( HWACCMR0Globals.aCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ)738 if (g_HvmR0.aCpuInfo[i].hMemObj != NIL_RTR0MEMOBJ) 701 739 { 702 RTR0MemObjFree( HWACCMR0Globals.aCpuInfo[i].hMemObj, false);703 HWACCMR0Globals.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ;740 RTR0MemObjFree(g_HvmR0.aCpuInfo[i].hMemObj, false); 741 g_HvmR0.aCpuInfo[i].hMemObj = NIL_RTR0MEMOBJ; 704 742 } 705 743 } … … 709 747 710 748 711 712 /** 713 * Worker function used by hwaccmR0PowerCallback and HWACCMR0Init to initalize 749 /** 750 * Worker function used by hmR0PowerCallback and HWACCMR0Init to initalize 751 * VT-x on a CPU. 752 * 753 * @param idCpu The identifier for the CPU the function is called on. 754 * @param pvUser1 Pointer to the first RC structure. 755 * @param pvUser2 Ignored. 756 */ 757 static DECLCALLBACK(void) hmR0InitIntelCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2) 758 { 759 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser1; 760 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) 761 NOREF(pvUser2); 762 763 /* 764 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP. 765 * Once the lock bit is set, this MSR can no longer be modified. 766 */ 767 uint64_t fFC = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 768 if ( !(fFC & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 769 || ( (fFC & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 770 == MSR_IA32_FEATURE_CONTROL_VMXON ) /* Some BIOSes forget to set the locked bit. */ 771 ) 772 { 773 /* MSR is not yet locked; we can change it ourselves here */ 774 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, 775 g_HvmR0.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK); 776 fFC = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 777 } 778 779 int rc; 780 if ( (fFC & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 781 == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 782 rc = VINF_SUCCESS; 783 else 784 rc = VERR_VMX_MSR_LOCKED_OR_DISABLED; 785 786 hmR0FirstRcSetStatus(pFirstRc, rc); 787 } 788 789 790 /** 791 * Worker function used by hmR0PowerCallback and HWACCMR0Init to initalize 714 792 * VT-x / AMD-V on a CPU. 715 793 * 716 794 * @param idCpu The identifier for the CPU the function is called on. 717 * @param pvUser1 The EBX value of CPUID(0). 718 * @param pvUser2 Pointer to the first RC structure. 719 */ 720 static DECLCALLBACK(void) hwaccmR0InitCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2) 721 { 722 unsigned u32VendorEBX = (uintptr_t)pvUser1; 723 PHWACCMR0FIRSTRC pFirstRc = (PHWACCMR0FIRSTRC)pvUser2; 724 uint64_t val; 725 int rc; 726 795 * @param pvUser1 Pointer to the first RC structure. 796 * @param pvUser2 Ignored. 797 */ 798 static DECLCALLBACK(void) hmR0InitAmdCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2) 799 { 800 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser1; 727 801 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) 728 729 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX) 730 { 731 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 732 733 /* 734 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP. 735 * Once the lock bit is set, this MSR can no longer be modified. 736 */ 737 if ( !(val & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 738 || ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 739 == MSR_IA32_FEATURE_CONTROL_VMXON ) /* Some BIOSes forget to set the locked bit. */ 740 ) 802 NOREF(pvUser2); 803 804 /* Check if SVM is disabled. */ 805 int rc; 806 uint64_t fVmCr = ASMRdMsr(MSR_K8_VM_CR); 807 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE)) 808 { 809 /* Turn on SVM in the EFER MSR. */ 810 uint64_t fEfer = ASMRdMsr(MSR_K6_EFER); 811 if (fEfer & MSR_K6_EFER_SVME) 812 rc = VERR_SVM_IN_USE; 813 else 741 814 { 742 /* MSR is not yet locked; we can change it ourselves here */ 743 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK); 744 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 815 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME); 816 817 /* Paranoia. */ 818 fEfer = ASMRdMsr(MSR_K6_EFER); 819 if (fEfer & MSR_K6_EFER_SVME) 820 { 821 /* Restore previous value. */ 822 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME); 823 rc = VINF_SUCCESS; 824 } 825 else 826 rc = VERR_SVM_ILLEGAL_EFER_MSR; 745 827 } 746 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))747 == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))748 rc = VINF_SUCCESS;749 else750 rc = VERR_VMX_MSR_LOCKED_OR_DISABLED;751 }752 else if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX)753 {754 /* Check if SVM is disabled */755 val = ASMRdMsr(MSR_K8_VM_CR);756 if (!(val & MSR_K8_VM_CR_SVM_DISABLE))757 {758 /* Turn on SVM in the EFER MSR. */759 val = ASMRdMsr(MSR_K6_EFER);760 if (val & MSR_K6_EFER_SVME)761 rc = VERR_SVM_IN_USE;762 else763 {764 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);765 766 /* Paranoia. */767 val = ASMRdMsr(MSR_K6_EFER);768 if (val & MSR_K6_EFER_SVME)769 {770 /* Restore previous value. */771 ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);772 rc = VINF_SUCCESS;773 }774 else775 rc = VERR_SVM_ILLEGAL_EFER_MSR;776 }777 }778 else779 rc = VERR_SVM_DISABLED;780 828 } 781 829 else 782 { 783 AssertFailed(); /* can't happen */ 784 rc = VERR_INTERNAL_ERROR_5; 785 } 786 787 hwaccmR0FirstRcSetStatus(pFirstRc, rc); 788 } 830 rc = VERR_SVM_DISABLED; 831 832 hmR0FirstRcSetStatus(pFirstRc, rc); 833 } 834 789 835 790 836 … … 796 842 * @param idCpu The identifier for the CPU the function is called on. 797 843 */ 798 static int h waccmR0EnableCpu(PVM pVM, RTCPUID idCpu)799 { 800 PH WACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];801 802 Assert(! HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);844 static int hmR0EnableCpu(PVM pVM, RTCPUID idCpu) 845 { 846 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 847 848 Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx); 803 849 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) 804 Assert(idCpu < RT_ELEMENTS( HWACCMR0Globals.aCpuInfo));850 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); 805 851 Assert(!pCpu->fConfigured); 806 Assert(! HWACCMR0Globals.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);852 Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false); 807 853 808 854 pCpu->idCpu = idCpu; … … 817 863 if (pCpu->hMemObj == NIL_RTR0MEMOBJ) 818 864 { 819 AssertLogRelMsgFailed(("h waccmR0EnableCpu failed idCpu=%u.\n", idCpu));865 AssertLogRelMsgFailed(("hmR0EnableCpu failed idCpu=%u.\n", idCpu)); 820 866 return VERR_INTERNAL_ERROR; 821 867 } … … 824 870 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 825 871 826 int rc = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage);872 int rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage); 827 873 AssertRC(rc); 828 874 if (RT_SUCCESS(rc)) … … 841 887 * @param pvUser2 The 2nd user argument. 842 888 */ 843 static DECLCALLBACK(void) h waccmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)844 { 845 PVM 846 PH WACCMR0FIRSTRC pFirstRc = (PHWACCMR0FIRSTRC)pvUser2;847 AssertReturnVoid( HWACCMR0Globals.fGlobalInit);848 h waccmR0FirstRcSetStatus(pFirstRc, hwaccmR0EnableCpu(pVM, idCpu));889 static DECLCALLBACK(void) hmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2) 890 { 891 PVM pVM = (PVM)pvUser1; /* can be NULL! */ 892 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2; 893 AssertReturnVoid(g_HvmR0.fGlobalInit); 894 hmR0FirstRcSetStatus(pFirstRc, hmR0EnableCpu(pVM, idCpu)); 849 895 } 850 896 … … 857 903 * @param pvUserIgnore NULL, ignored. 858 904 */ 859 static DECLCALLBACK(int32_t) h waccmR0EnableAllCpuOnce(void *pvUser, void *pvUserIgnore)905 static DECLCALLBACK(int32_t) hmR0EnableAllCpuOnce(void *pvUser, void *pvUserIgnore) 860 906 { 861 907 PVM pVM = (PVM)pvUser; … … 868 914 * notification. Kind of unlikely though, so ignored for now. 869 915 */ 870 AssertReturn(! HWACCMR0Globals.fEnabled, VERR_INTERNAL_ERROR_3);871 ASMAtomicWriteBool(& HWACCMR0Globals.fEnabled, true);916 AssertReturn(!g_HvmR0.fEnabled, VERR_INTERNAL_ERROR_3); 917 ASMAtomicWriteBool(&g_HvmR0.fEnabled, true); 872 918 873 919 /* 874 920 * The global init variable is set by the first VM. 875 921 */ 876 HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit;922 g_HvmR0.fGlobalInit = pVM->hwaccm.s.fGlobalInit; 877 923 878 924 int rc; 879 if ( HWACCMR0Globals.vmx.fSupported880 && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx)925 if ( g_HvmR0.vmx.fSupported 926 && g_HvmR0.vmx.fUsingSUPR0EnableVTx) 881 927 { 882 928 /* … … 886 932 if (RT_SUCCESS(rc)) 887 933 { 888 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS( HWACCMR0Globals.aCpuInfo); iCpu++)934 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo); iCpu++) 889 935 { 890 HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = true;891 Assert( HWACCMR0Globals.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ);936 g_HvmR0.aCpuInfo[iCpu].fConfigured = true; 937 Assert(g_HvmR0.aCpuInfo[iCpu].hMemObj == NIL_RTR0MEMOBJ); 892 938 } 893 939 894 940 /* If the host provides a VT-x init API, then we'll rely on that for global init. */ 895 HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true;941 g_HvmR0.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true; 896 942 } 897 943 else … … 904 950 */ 905 951 /* Allocate one page per cpu for the global vt-x and amd-v pages */ 906 for (unsigned i = 0; i < RT_ELEMENTS( HWACCMR0Globals.aCpuInfo); i++)952 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++) 907 953 { 908 Assert( HWACCMR0Globals.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);954 Assert(g_HvmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ); 909 955 910 956 if (RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(i))) 911 957 { 912 rc = RTR0MemObjAllocCont(& HWACCMR0Globals.aCpuInfo[i].hMemObj, PAGE_SIZE, true /* executable R0 mapping */);958 rc = RTR0MemObjAllocCont(&g_HvmR0.aCpuInfo[i].hMemObj, PAGE_SIZE, true /* executable R0 mapping */); 913 959 AssertLogRelRCReturn(rc, rc); 914 960 915 void *pvR0 = RTR0MemObjAddress( HWACCMR0Globals.aCpuInfo[i].hMemObj); Assert(pvR0);961 void *pvR0 = RTR0MemObjAddress(g_HvmR0.aCpuInfo[i].hMemObj); Assert(pvR0); 916 962 ASMMemZeroPage(pvR0); 917 963 } 918 HWACCMR0Globals.aCpuInfo[i].fConfigured = false;964 g_HvmR0.aCpuInfo[i].fConfigured = false; 919 965 } 920 966 921 if ( HWACCMR0Globals.fGlobalInit)967 if (g_HvmR0.fGlobalInit) 922 968 { 923 969 /* First time, so initialize each cpu/core. */ 924 H WACCMR0FIRSTRC FirstRc;925 h waccmR0FirstRcInit(&FirstRc);926 rc = RTMpOnAll(h waccmR0EnableCpuCallback, (void *)pVM, &FirstRc);970 HMR0FIRSTRC FirstRc; 971 hmR0FirstRcInit(&FirstRc); 972 rc = RTMpOnAll(hmR0EnableCpuCallback, (void *)pVM, &FirstRc); 927 973 if (RT_SUCCESS(rc)) 928 rc = h waccmR0FirstRcGetStatus(&FirstRc);929 AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", h waccmR0FirstRcGetCpuId(&FirstRc), rc));974 rc = hmR0FirstRcGetStatus(&FirstRc); 975 AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", hmR0FirstRcGetCpuId(&FirstRc), rc)); 930 976 } 931 977 else … … 947 993 /* Make sure we don't touch hwaccm after we've disabled hwaccm in 948 994 preparation of a suspend. */ 949 if (ASMAtomicReadBool(& HWACCMR0Globals.fSuspended))995 if (ASMAtomicReadBool(&g_HvmR0.fSuspended)) 950 996 return VERR_HWACCM_SUSPEND_PENDING; 951 997 952 return RTOnce(& HWACCMR0Globals.EnableAllCpusOnce, hwaccmR0EnableAllCpuOnce, pVM, NULL);998 return RTOnce(&g_HvmR0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM, NULL); 953 999 } 954 1000 … … 960 1006 * @param idCpu The identifier for the CPU the function is called on. 961 1007 */ 962 static int h waccmR0DisableCpu(RTCPUID idCpu)963 { 964 PH WACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];965 966 Assert(! HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);1008 static int hmR0DisableCpu(RTCPUID idCpu) 1009 { 1010 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1011 1012 Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx); 967 1013 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) 968 Assert(idCpu < RT_ELEMENTS( HWACCMR0Globals.aCpuInfo));969 Assert(! HWACCMR0Globals.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false);1014 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); 1015 Assert(!g_HvmR0.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false); 970 1016 Assert(!pCpu->fConfigured || pCpu->hMemObj != NIL_RTR0MEMOBJ); 971 1017 … … 978 1024 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); 979 1025 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 980 rc = HWACCMR0Globals.pfnDisableCpu(pCpu, pvCpuPage, HCPhysCpuPage);1026 rc = g_HvmR0.pfnDisableCpu(pCpu, pvCpuPage, HCPhysCpuPage); 981 1027 AssertRC(rc); 982 1028 pCpu->fConfigured = false; … … 998 1044 * @param pvUser2 The 2nd user argument. 999 1045 */ 1000 static DECLCALLBACK(void) h waccmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)1001 { 1002 PH WACCMR0FIRSTRC pFirstRc = (PHWACCMR0FIRSTRC)pvUser2;1003 AssertReturnVoid( HWACCMR0Globals.fGlobalInit);1004 h waccmR0FirstRcSetStatus(pFirstRc, hwaccmR0DisableCpu(idCpu));1046 static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2) 1047 { 1048 PHMR0FIRSTRC pFirstRc = (PHMR0FIRSTRC)pvUser2; 1049 AssertReturnVoid(g_HvmR0.fGlobalInit); 1050 hmR0FirstRcSetStatus(pFirstRc, hmR0DisableCpu(idCpu)); 1005 1051 } 1006 1052 … … 1013 1059 * @param pvData Opaque data (PVM pointer). 1014 1060 */ 1015 static DECLCALLBACK(void) h waccmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData)1061 static DECLCALLBACK(void) hmR0MpEventCallback(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvData) 1016 1062 { 1017 1063 /* … … 1025 1071 case RTMPEVENT_OFFLINE: 1026 1072 { 1027 int rc = h waccmR0DisableCpu(idCpu);1073 int rc = hmR0DisableCpu(idCpu); 1028 1074 AssertRC(rc); 1029 1075 break; … … 1042 1088 * @param pvUser User argument 1043 1089 */ 1044 static DECLCALLBACK(void) h waccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser)1090 static DECLCALLBACK(void) hmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser) 1045 1091 { 1046 1092 NOREF(pvUser); 1047 Assert(! HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx);1093 Assert(!g_HvmR0.vmx.fSupported || !g_HvmR0.vmx.fUsingSUPR0EnableVTx); 1048 1094 1049 1095 #ifdef LOG_ENABLED 1050 1096 if (enmEvent == RTPOWEREVENT_SUSPEND) 1051 SUPR0Printf("h waccmR0PowerCallback RTPOWEREVENT_SUSPEND\n");1097 SUPR0Printf("hmR0PowerCallback RTPOWEREVENT_SUSPEND\n"); 1052 1098 else 1053 SUPR0Printf("h waccmR0PowerCallback RTPOWEREVENT_RESUME\n");1099 SUPR0Printf("hmR0PowerCallback RTPOWEREVENT_RESUME\n"); 1054 1100 #endif 1055 1101 1056 1102 if (enmEvent == RTPOWEREVENT_SUSPEND) 1057 ASMAtomicWriteBool(& HWACCMR0Globals.fSuspended, true);1058 1059 if ( HWACCMR0Globals.fEnabled)1060 { 1061 int 1062 H WACCMR0FIRSTRC FirstRc;1063 h waccmR0FirstRcInit(&FirstRc);1103 ASMAtomicWriteBool(&g_HvmR0.fSuspended, true); 1104 1105 if (g_HvmR0.fEnabled) 1106 { 1107 int rc; 1108 HMR0FIRSTRC FirstRc; 1109 hmR0FirstRcInit(&FirstRc); 1064 1110 1065 1111 if (enmEvent == RTPOWEREVENT_SUSPEND) 1066 1112 { 1067 if ( HWACCMR0Globals.fGlobalInit)1113 if (g_HvmR0.fGlobalInit) 1068 1114 { 1069 1115 /* Turn off VT-x or AMD-V on all CPUs. */ 1070 rc = RTMpOnAll(h waccmR0DisableCpuCallback, NULL, &FirstRc);1116 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL, &FirstRc); 1071 1117 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 1072 1118 } … … 1075 1121 else 1076 1122 { 1077 /* Reinit the CPUs from scratch as the suspend state might have messed with the MSRs. (lousy BIOSes as usual) */ 1078 uintptr_t uFirstArg = HWACCMR0Globals.vmx.fSupported ? X86_CPUID_VENDOR_INTEL_EBX : X86_CPUID_VENDOR_AMD_EBX; 1079 rc = RTMpOnAll(hwaccmR0InitCpu, (void *)uFirstArg , &FirstRc); 1123 /* Reinit the CPUs from scratch as the suspend state might have 1124 messed with the MSRs. (lousy BIOSes as usual) */ 1125 if (g_HvmR0.vmx.fSupported) 1126 rc = RTMpOnAll(hmR0InitIntelCpu, &FirstRc, NULL); 1127 else 1128 rc = RTMpOnAll(hmR0InitAmdCpu, &FirstRc, NULL); 1080 1129 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 1081 1130 if (RT_SUCCESS(rc)) 1082 rc = h waccmR0FirstRcGetStatus(&FirstRc);1131 rc = hmR0FirstRcGetStatus(&FirstRc); 1083 1132 #ifdef LOG_ENABLED 1084 1133 if (RT_FAILURE(rc)) 1085 SUPR0Printf("h waccmR0PowerCallback hwaccmR0InitCpu failed with %Rc\n", rc);1134 SUPR0Printf("hmR0PowerCallback hmR0InitXxxCpu failed with %Rc\n", rc); 1086 1135 #endif 1087 1088 if (HWACCMR0Globals.fGlobalInit) 1136 if (g_HvmR0.fGlobalInit) 1089 1137 { 1090 1138 /* Turn VT-x or AMD-V back on on all CPUs. */ 1091 rc = RTMpOnAll(h waccmR0EnableCpuCallback, NULL, &FirstRc /* output ignored */);1139 rc = RTMpOnAll(hmR0EnableCpuCallback, NULL, &FirstRc /* output ignored */); 1092 1140 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 1093 1141 } … … 1097 1145 1098 1146 if (enmEvent == RTPOWEREVENT_RESUME) 1099 ASMAtomicWriteBool(& HWACCMR0Globals.fSuspended, false);1100 } 1101 1102 1103 /** 1104 * Does Ring-0 per VM H WACCM initialization.1105 * 1106 * This is mainly to check that the Host CPU mode is compatible1107 * with VMX.1147 ASMAtomicWriteBool(&g_HvmR0.fSuspended, false); 1148 } 1149 1150 1151 /** 1152 * Does Ring-0 per VM HM initialization. 1153 * 1154 * This will copy HM global into the VM structure and call the CPU specific 1155 * init routine which will allocate resources for each virtual CPU and such. 1108 1156 * 1109 1157 * @returns VBox status code. … … 1119 1167 1120 1168 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */ 1121 if (ASMAtomicReadBool(& HWACCMR0Globals.fSuspended))1169 if (ASMAtomicReadBool(&g_HvmR0.fSuspended)) 1122 1170 return VERR_HWACCM_SUSPEND_PENDING; 1123 1171 1124 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported; 1125 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported; 1126 1127 pVM->hwaccm.s.vmx.fUsePreemptTimer = HWACCMR0Globals.vmx.fUsePreemptTimer; 1128 pVM->hwaccm.s.vmx.cPreemptTimerShift = HWACCMR0Globals.vmx.cPreemptTimerShift; 1129 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl; 1130 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4; 1131 pVM->hwaccm.s.vmx.hostEFER = HWACCMR0Globals.vmx.hostEFER; 1132 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info; 1133 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls; 1134 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls; 1135 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2 = HWACCMR0Globals.vmx.msr.vmx_proc_ctls2; 1136 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit; 1137 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry; 1138 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc; 1139 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0; 1140 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1; 1141 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0; 1142 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1; 1143 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum; 1144 pVM->hwaccm.s.vmx.msr.vmx_eptcaps = HWACCMR0Globals.vmx.msr.vmx_eptcaps; 1145 pVM->hwaccm.s.svm.msrHWCR = HWACCMR0Globals.svm.msrHWCR; 1146 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev; 1147 pVM->hwaccm.s.svm.u32Features = HWACCMR0Globals.svm.u32Features; 1148 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX; 1149 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX; 1150 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError; 1151 1152 pVM->hwaccm.s.uMaxASID = HWACCMR0Globals.uMaxASID; 1172 /* 1173 * Copy globals to the VM structure. 1174 */ 1175 pVM->hwaccm.s.vmx.fSupported = g_HvmR0.vmx.fSupported; 1176 pVM->hwaccm.s.svm.fSupported = g_HvmR0.svm.fSupported; 1177 1178 pVM->hwaccm.s.vmx.fUsePreemptTimer = g_HvmR0.vmx.fUsePreemptTimer; 1179 pVM->hwaccm.s.vmx.cPreemptTimerShift = g_HvmR0.vmx.cPreemptTimerShift; 1180 pVM->hwaccm.s.vmx.msr.feature_ctrl = g_HvmR0.vmx.msr.feature_ctrl; 1181 pVM->hwaccm.s.vmx.hostCR4 = g_HvmR0.vmx.hostCR4; 1182 pVM->hwaccm.s.vmx.hostEFER = g_HvmR0.vmx.hostEFER; 1183 pVM->hwaccm.s.vmx.msr.vmx_basic_info = g_HvmR0.vmx.msr.vmx_basic_info; 1184 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = g_HvmR0.vmx.msr.vmx_pin_ctls; 1185 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = g_HvmR0.vmx.msr.vmx_proc_ctls; 1186 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2 = g_HvmR0.vmx.msr.vmx_proc_ctls2; 1187 pVM->hwaccm.s.vmx.msr.vmx_exit = g_HvmR0.vmx.msr.vmx_exit; 1188 pVM->hwaccm.s.vmx.msr.vmx_entry = g_HvmR0.vmx.msr.vmx_entry; 1189 pVM->hwaccm.s.vmx.msr.vmx_misc = g_HvmR0.vmx.msr.vmx_misc; 1190 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = g_HvmR0.vmx.msr.vmx_cr0_fixed0; 1191 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = g_HvmR0.vmx.msr.vmx_cr0_fixed1; 1192 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = g_HvmR0.vmx.msr.vmx_cr4_fixed0; 1193 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = g_HvmR0.vmx.msr.vmx_cr4_fixed1; 1194 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = g_HvmR0.vmx.msr.vmx_vmcs_enum; 1195 pVM->hwaccm.s.vmx.msr.vmx_eptcaps = g_HvmR0.vmx.msr.vmx_eptcaps; 1196 pVM->hwaccm.s.svm.msrHWCR = g_HvmR0.svm.msrHWCR; 1197 pVM->hwaccm.s.svm.u32Rev = g_HvmR0.svm.u32Rev; 1198 pVM->hwaccm.s.svm.u32Features = g_HvmR0.svm.u32Features; 1199 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = g_HvmR0.cpuid.u32AMDFeatureECX; 1200 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = g_HvmR0.cpuid.u32AMDFeatureEDX; 1201 pVM->hwaccm.s.lLastError = g_HvmR0.lLastError; 1202 1203 pVM->hwaccm.s.uMaxASID = g_HvmR0.uMaxASID; 1153 1204 1154 1205 … … 1162 1213 } 1163 1214 1215 /* 1216 * Initialize some per CPU fields. 1217 */ 1164 1218 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1165 1219 { … … 1175 1229 } 1176 1230 1231 /* 1232 * Call the hardware specific initialization method. 1233 * 1234 * Note! The fInUse handling here isn't correct as we can we can be 1235 * rescheduled to a different cpu, but the fInUse case is mostly for 1236 * debugging... Disabling preemption isn't an option when allocating 1237 * memory, so we'll let it slip for now. 1238 */ 1177 1239 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1178 PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1179 1180 /* Note: Not correct as we can be rescheduled to a different cpu, but the 1181 fInUse case is mostly for debugging. */ 1240 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1182 1241 ASMAtomicWriteBool(&pCpu->fInUse, true); 1183 1242 ASMSetFlags(fFlags); 1184 1243 1185 /* Init a VT-x or AMD-V VM. */ 1186 int rc = HWACCMR0Globals.pfnInitVM(pVM); 1244 int rc = g_HvmR0.pfnInitVM(pVM); 1187 1245 1188 1246 ASMAtomicWriteBool(&pCpu->fInUse, false); … … 1192 1250 1193 1251 /** 1194 * Does Ring-0 per VM H WACCM termination.1252 * Does Ring-0 per VM HM termination. 1195 1253 * 1196 1254 * @returns VBox status code. … … 1199 1257 VMMR0DECL(int) HWACCMR0TermVM(PVM pVM) 1200 1258 { 1201 int rc; 1202 1259 Log(("HWACCMR0TermVM: %p\n", pVM)); 1203 1260 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1204 1261 1205 #ifdef LOG_ENABLED 1206 SUPR0Printf("HWACCMR0TermVM: %p\n", pVM); 1207 #endif 1208 1209 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */ 1210 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING); 1211 1212 /* @note Not correct as we can be rescheduled to a different cpu, but the fInUse case is mostly for debugging. */ 1262 /* Make sure we don't touch hm after we've disabled hwaccm in preparation 1263 of a suspend. */ 1264 /** @todo r=bird: This cannot be right, the termination functions are 1265 * just freeing memory and resetting pVM/pVCpu members... 1266 * ==> memory leak. */ 1267 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING); 1268 1269 /* 1270 * Call the hardware specific method. 1271 * 1272 * Note! Not correct as we can be rescheduled to a different cpu, but the 1273 * fInUse case is mostly for debugging. 1274 */ 1213 1275 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1214 PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1215 1276 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1216 1277 ASMAtomicWriteBool(&pCpu->fInUse, true); 1217 1278 ASMSetFlags(fFlags); 1218 1279 1219 /* Terminate a VT-x or AMD-V VM. */ 1220 rc = HWACCMR0Globals.pfnTermVM(pVM); 1280 int rc = g_HvmR0.pfnTermVM(pVM); 1221 1281 1222 1282 ASMAtomicWriteBool(&pCpu->fInUse, false); … … 1226 1286 1227 1287 /** 1228 * Sets up a VT-x or AMD-V session 1288 * Sets up a VT-x or AMD-V session. 1289 * 1290 * This is mostly about setting up the hardware VM state. 1229 1291 * 1230 1292 * @returns VBox status code. … … 1233 1295 VMMR0DECL(int) HWACCMR0SetupVM(PVM pVM) 1234 1296 { 1235 int rc; 1236 RTCPUID idCpu = RTMpCpuId(); 1237 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu]; 1238 1297 Log(("HWACCMR0SetupVM: %p\n", pVM)); 1239 1298 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1240 1299 1241 /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */ 1242 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING); 1243 1244 #ifdef LOG_ENABLED 1245 SUPR0Printf("HWACCMR0SetupVM: %p\n", pVM); 1246 #endif 1247 1300 /* Make sure we don't touch hwaccm after we've disabled hwaccm in 1301 preparation of a suspend. */ 1302 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING); 1303 1304 1305 /* 1306 * Call the hardware specific setup VM method. This requires the CPU to be 1307 * enabled for AMD-V/VT-x and preemption to be prevented. 1308 */ 1309 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1310 RTCPUID idCpu = RTMpCpuId(); 1311 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1248 1312 ASMAtomicWriteBool(&pCpu->fInUse, true); 1249 1313 1314 /* On first entry we'll sync everything. */ 1250 1315 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1251 {1252 /* On first entry we'll sync everything. */1253 1316 pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL; 1254 }1255 1317 1256 1318 /* Enable VT-x or AMD-V if local init is required. */ 1257 if (!HWACCMR0Globals.fGlobalInit) 1258 { 1259 rc = hwaccmR0EnableCpu(pVM, idCpu); 1260 AssertRCReturn(rc, rc); 1319 int rc; 1320 if (!g_HvmR0.fGlobalInit) 1321 { 1322 rc = hmR0EnableCpu(pVM, idCpu); 1323 AssertReturnStmt(RT_SUCCESS_NP(rc), ASMSetFlags(fFlags), rc); 1261 1324 } 1262 1325 1263 1326 /* Setup VT-x or AMD-V. */ 1264 rc = HWACCMR0Globals.pfnSetupVM(pVM);1327 rc = g_HvmR0.pfnSetupVM(pVM); 1265 1328 1266 1329 /* Disable VT-x or AMD-V if local init was done before. */ 1267 if (! HWACCMR0Globals.fGlobalInit)1268 { 1269 rc = hwaccmR0DisableCpu(idCpu);1270 AssertRC(rc );1330 if (!g_HvmR0.fGlobalInit) 1331 { 1332 int rc2 = hmR0DisableCpu(idCpu); 1333 AssertRC(rc2); 1271 1334 } 1272 1335 1273 1336 ASMAtomicWriteBool(&pCpu->fInUse, false); 1337 ASMSetFlags(fFlags); 1274 1338 1275 1339 return rc; … … 1283 1347 * @param pVM The VM to operate on. 1284 1348 * @param pVCpu VMCPU handle. 1349 * 1350 * @remarks This is called with preemption disabled. 1285 1351 */ 1286 1352 VMMR0DECL(int) HWACCMR0Enter(PVM pVM, PVMCPU pVCpu) 1287 1353 { 1288 PCPUMCTX pCtx;1289 int rc;1290 1354 RTCPUID idCpu = RTMpCpuId(); 1291 PH WACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];1292 1293 /* Make sure we can't enter a session after we've disabled hwaccmin preparation of a suspend. */1294 AssertReturn(!ASMAtomicReadBool(& HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);1355 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1356 1357 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ 1358 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING); 1295 1359 ASMAtomicWriteBool(&pCpu->fInUse, true); 1296 1360 … … 1298 1362 pVCpu->hwaccm.s.idEnteredCpu = idCpu; 1299 1363 1300 pCtx = CPUMQueryGuestCtxPtr(pVCpu);1364 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1301 1365 1302 1366 /* Always load the guest's FPU/XMM state on-demand. */ … … 1315 1379 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF); 1316 1380 1317 /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */ 1381 /* Enable VT-x or AMD-V if local init is required, or enable if it's a 1382 freshly onlined CPU. */ 1383 int rc; 1318 1384 if ( !pCpu->fConfigured 1319 || ! HWACCMR0Globals.fGlobalInit)1320 { 1321 rc = h waccmR0EnableCpu(pVM, idCpu);1385 || !g_HvmR0.fGlobalInit) 1386 { 1387 rc = hmR0EnableCpu(pVM, idCpu); 1322 1388 AssertRCReturn(rc, rc); 1323 1389 } … … 1327 1393 #endif 1328 1394 1329 rc = HWACCMR0Globals.pfnEnterSession(pVM, pVCpu, pCpu);1395 rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu); 1330 1396 AssertRC(rc); 1331 /* We must save the host context here (VT-x) as we might be rescheduled on a different cpu after a long jump back to ring 3. */ 1332 rc |= HWACCMR0Globals.pfnSaveHostState(pVM, pVCpu); 1397 /* We must save the host context here (VT-x) as we might be rescheduled on 1398 a different cpu after a long jump back to ring 3. */ 1399 rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu); 1333 1400 AssertRC(rc); 1334 rc |= HWACCMR0Globals.pfnLoadGuestState(pVM, pVCpu, pCtx);1401 rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx); 1335 1402 AssertRC(rc); 1336 1403 … … 1340 1407 #endif 1341 1408 1342 /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */ 1409 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness 1410 and ring-3 calls. */ 1343 1411 if (RT_FAILURE(rc)) 1344 1412 pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID; … … 1353 1421 * @param pVM The VM to operate on. 1354 1422 * @param pVCpu VMCPU handle. 1423 * 1424 * @remarks Called with preemption disabled just like HWACCMR0Enter, our 1425 * counterpart. 1355 1426 */ 1356 1427 VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu) … … 1358 1429 int rc; 1359 1430 RTCPUID idCpu = RTMpCpuId(); 1360 PH WACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];1431 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1361 1432 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1362 1433 1363 1364 1434 /** @todo r=bird: This can't be entirely right? */ 1365 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING); 1366 1367 /* Note: It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. 1368 * We must restore the host FPU here to make absolutely sure we don't leave the guest FPU state active 1369 * or trash somebody else's FPU state. 1435 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING); 1436 1437 /* 1438 * Save the guest FPU and XMM state if necessary. 1439 * 1440 * Note! It's rather tricky with longjmps done by e.g. Log statements or 1441 * the page fault handler. We must restore the host FPU here to make 1442 * absolutely sure we don't leave the guest FPU state active or trash 1443 * somebody else's FPU state. 1370 1444 */ 1371 /* Save the guest FPU and XMM state if necessary. */1372 1445 if (CPUMIsGuestFPUStateActive(pVCpu)) 1373 1446 { … … 1379 1452 } 1380 1453 1381 rc = HWACCMR0Globals.pfnLeaveSession(pVM, pVCpu, pCtx);1382 1383 /* We don't pass on invlpg information to the recompiler for nested paging guests, so we must make sure the recompiler flushes its TLB1384 * the next time it executes code.1385 */1454 rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx); 1455 1456 /* We don't pass on invlpg information to the recompiler for nested paging 1457 guests, so we must make sure the recompiler flushes its TLB the next 1458 time it executes code. */ 1386 1459 if ( pVM->hwaccm.s.fNestedPaging 1387 1460 && CPUMIsGuestInPagedProtectedModeEx(pCtx)) 1388 1461 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); 1389 1462 1390 /* keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */ 1463 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness 1464 and ring-3 calls. */ 1391 1465 #ifdef RT_STRICT 1392 1466 if (RT_UNLIKELY( pVCpu->hwaccm.s.idEnteredCpu != idCpu … … 1399 1473 pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID; 1400 1474 1401 /* Disable VT-x or AMD-V if local init was done before. */ 1402 if (!HWACCMR0Globals.fGlobalInit) 1403 { 1404 rc = hwaccmR0DisableCpu(idCpu); 1475 /* 1476 * Disable VT-x or AMD-V if local init was done before. 1477 */ 1478 if (!g_HvmR0.fGlobalInit) 1479 { 1480 rc = hmR0DisableCpu(idCpu); 1405 1481 AssertRC(rc); 1406 1482 … … 1415 1491 } 1416 1492 1493 1417 1494 /** 1418 1495 * Runs guest code in a hardware accelerated VM. … … 1420 1497 * @returns VBox status code. 1421 1498 * @param pVM The VM to operate on. 1422 * @param pVCpu VMCPUD id. 1499 * @param pVCpu VMCPUD id. 1500 * 1501 * @remarks Called with preemption disabled and after first having called 1502 * HWACCMR0Enter. 1423 1503 */ 1424 1504 VMMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu) 1425 1505 { 1426 1506 #ifdef VBOX_STRICT 1427 PH WACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[RTMpCpuId()];1507 PHMGLOBLCPUINFO pCpu = &g_HvmR0.aCpuInfo[RTMpCpuId()]; 1428 1508 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 1429 1509 Assert(pCpu->fConfigured); 1430 AssertReturn(!ASMAtomicReadBool(& HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING);1510 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING); 1431 1511 Assert(ASMAtomicReadBool(&pCpu->fInUse) == true); 1432 1512 #endif … … 1436 1516 #endif 1437 1517 1438 int rc = HWACCMR0Globals.pfnRunGuestCode(pVM, pVCpu, CPUMQueryGuestCtxPtr(pVCpu));1518 int rc = g_HvmR0.pfnRunGuestCode(pVM, pVCpu, CPUMQueryGuestCtxPtr(pVCpu)); 1439 1519 1440 1520 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE … … 1459 1539 if (pVM->hwaccm.s.vmx.fSupported) 1460 1540 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL); 1461 1462 1541 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL); 1463 1542 } 1543 1464 1544 1465 1545 /** … … 1476 1556 if (pVM->hwaccm.s.vmx.fSupported) 1477 1557 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL); 1478 1479 1558 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL); 1480 1559 } 1560 1481 1561 1482 1562 /** … … 1489 1569 { 1490 1570 PVMCPU pVCpu = &pVM->aCpus[0]; 1491 CPUMCTX *pCtx;1571 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1492 1572 uint32_t aParam[5] = {0, 1, 2, 3, 4}; 1493 1573 int rc; 1494 1495 pCtx = CPUMQueryGuestCtxPtr(pVCpu);1496 1574 1497 1575 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z); … … 1501 1579 rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]); 1502 1580 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z); 1581 1503 1582 return rc; 1504 1583 } … … 1511 1590 * @returns Suspend pending or not 1512 1591 */ 1513 VMMR0DECL(bool) HWACCMR0SuspendPending() 1514 { 1515 return ASMAtomicReadBool(&HWACCMR0Globals.fSuspended); 1516 } 1592 VMMR0DECL(bool) HWACCMR0SuspendPending(void) 1593 { 1594 return ASMAtomicReadBool(&g_HvmR0.fSuspended); 1595 } 1596 1517 1597 1518 1598 /** … … 1522 1602 * @returns cpu structure pointer 1523 1603 */ 1524 VMMR0DECL(PH WACCM_CPUINFO) HWACCMR0GetCurrentCpu(void)1604 VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void) 1525 1605 { 1526 1606 RTCPUID idCpu = RTMpCpuId(); 1527 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo)); 1528 return &HWACCMR0Globals.aCpuInfo[idCpu]; 1529 } 1607 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); 1608 return &g_HvmR0.aCpuInfo[idCpu]; 1609 } 1610 1530 1611 1531 1612 /** … … 1536 1617 * @param idCpu id of the VCPU 1537 1618 */ 1538 VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu) 1539 { 1540 return &HWACCMR0Globals.aCpuInfo[idCpu]; 1541 } 1619 VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu) 1620 { 1621 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); 1622 return &g_HvmR0.aCpuInfo[idCpu]; 1623 } 1624 1542 1625 1543 1626 /** … … 1562 1645 } 1563 1646 1647 1564 1648 /** 1565 1649 * Save a pending IO write. … … 1597 1681 *pfVTxDisabled = false; 1598 1682 1599 if ( ! HWACCMR0Globals.fEnabled1600 || ! HWACCMR0Globals.vmx.fSupported /* no such issues with AMD-V */1601 || ! HWACCMR0Globals.fGlobalInit /* Local init implies the CPU is currently not in VMX root mode. */)1683 if ( !g_HvmR0.fEnabled 1684 || !g_HvmR0.vmx.fSupported /* no such issues with AMD-V */ 1685 || !g_HvmR0.fGlobalInit /* Local init implies the CPU is currently not in VMX root mode. */) 1602 1686 return VINF_SUCCESS; /* nothing to do */ 1603 1687 1604 switch (VMMGetSwitcher(pVM))1688 switch (VMMGetSwitcher(pVM)) 1605 1689 { 1606 1690 case VMMSWITCHER_32_TO_32: … … 1619 1703 } 1620 1704 1621 PH WACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();1705 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1622 1706 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_INTERNAL_ERROR); 1623 1707 … … 1628 1712 } 1629 1713 1714 1630 1715 /** 1631 1716 * Raw-mode switcher hook - re-enable VT-x if was active *and* the current … … 1643 1728 return VINF_SUCCESS; /* nothing to do */ 1644 1729 1645 Assert( HWACCMR0Globals.fEnabled);1646 Assert( HWACCMR0Globals.vmx.fSupported);1647 Assert( HWACCMR0Globals.fGlobalInit);1648 1649 PH WACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu();1730 Assert(g_HvmR0.fEnabled); 1731 Assert(g_HvmR0.vmx.fSupported); 1732 Assert(g_HvmR0.fGlobalInit); 1733 1734 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1650 1735 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_INTERNAL_ERROR); 1651 1736 … … 1656 1741 1657 1742 #ifdef VBOX_STRICT 1743 1658 1744 /** 1659 1745 * Dumps a descriptor. … … 1778 1864 } 1779 1865 1866 1780 1867 /** 1781 1868 * Formats a full register dump. … … 1924 2011 1925 2012 } 2013 1926 2014 #endif /* VBOX_STRICT */ 1927 2015 -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r37319 r37320 67 67 * @param HCPhysCpuPage Physical address of the global cpu page. 68 68 */ 69 VMMR0DECL(int) SVMR0EnableCpu(PH WACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)69 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 70 70 { 71 71 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER); … … 104 104 * @param HCPhysCpuPage Physical address of the global cpu page. 105 105 */ 106 VMMR0DECL(int) SVMR0DisableCpu(PH WACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)106 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 107 107 { 108 108 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER); … … 967 967 unsigned cResume = 0; 968 968 uint8_t u8LastTPR; 969 PH WACCM_CPUINFO pCpu = 0;969 PHMGLOBLCPUINFO pCpu = 0; 970 970 RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0; 971 971 #ifdef VBOX_STRICT … … 2624 2624 * @param pCpu CPU info struct 2625 2625 */ 2626 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PH WACCM_CPUINFO pCpu)2626 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu) 2627 2627 { 2628 2628 Assert(pVM->hwaccm.s.svm.fSupported); -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.h
r37319 r37320 46 46 * @param pCpu CPU info struct 47 47 */ 48 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PH WACCM_CPUINFO pCpu);48 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu); 49 49 50 50 /** … … 67 67 * @param pPageCpuPhys Physical address of the global cpu page 68 68 */ 69 VMMR0DECL(int) SVMR0EnableCpu(PH WACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage);69 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage); 70 70 71 71 /** … … 77 77 * @param pPageCpuPhys Physical address of the global cpu page 78 78 */ 79 VMMR0DECL(int) SVMR0DisableCpu(PH WACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);79 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 80 80 81 81 /** -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r37319 r37320 104 104 * @param HCPhysCpuPage Physical address of the global cpu page. 105 105 */ 106 VMMR0DECL(int) VMXR0EnableCpu(PH WACCM_CPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)106 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 107 107 { 108 108 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER); … … 143 143 * @param HCPhysCpuPage Physical address of the global cpu page. 144 144 */ 145 VMMR0DECL(int) VMXR0DisableCpu(PH WACCM_CPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)145 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 146 146 { 147 147 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER); … … 2144 2144 static void vmxR0SetupTLBEPT(PVM pVM, PVMCPU pVCpu) 2145 2145 { 2146 PH WACCM_CPUINFO pCpu;2146 PHMGLOBLCPUINFO pCpu; 2147 2147 2148 2148 Assert(pVM->hwaccm.s.fNestedPaging); … … 2207 2207 static void vmxR0SetupTLBVPID(PVM pVM, PVMCPU pVCpu) 2208 2208 { 2209 PH WACCM_CPUINFO pCpu;2209 PHMGLOBLCPUINFO pCpu; 2210 2210 2211 2211 Assert(pVM->hwaccm.s.vmx.fVPID); … … 2590 2590 ) 2591 2591 { 2592 PH WACCM_CPUINFO pCpu;2592 PHMGLOBLCPUINFO pCpu; 2593 2593 2594 2594 pCpu = HWACCMR0GetCurrentCpu(); … … 4266 4266 * @param pCpu CPU info struct 4267 4267 */ 4268 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PH WACCM_CPUINFO pCpu)4268 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu) 4269 4269 { 4270 4270 Assert(pVM->hwaccm.s.vmx.fSupported); … … 4615 4615 { 4616 4616 uint32_t aParam[6]; 4617 PH WACCM_CPUINFO pCpu;4617 PHMGLOBLCPUINFO pCpu; 4618 4618 RTHCPHYS HCPhysCpuPage; 4619 4619 int rc; … … 4683 4683 { 4684 4684 int rc, rc2; 4685 PH WACCM_CPUINFO pCpu;4685 PHMGLOBLCPUINFO pCpu; 4686 4686 RTHCPHYS HCPhysCpuPage; 4687 4687 RTHCUINTREG uOldEFlags; -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.h
r35346 r37320 110 110 * @param pCpu CPU info struct 111 111 */ 112 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PH WACCM_CPUINFO pCpu);112 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu); 113 113 114 114 /** … … 132 132 * @param pPageCpuPhys Physical address of the global cpu page 133 133 */ 134 VMMR0DECL(int) VMXR0EnableCpu(PH WACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys);134 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 135 135 136 136 /** … … 142 142 * @param pPageCpuPhys Physical address of the global cpu page 143 143 */ 144 VMMR0DECL(int) VMXR0DisableCpu(PH WACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);144 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 145 145 146 146 /** -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r37228 r37320 921 921 */ 922 922 case VMMR0_DO_HWACC_SETUP_VM: 923 { 924 RTCCUINTREG fFlags = ASMIntDisableFlags(); 925 int rc = HWACCMR0SetupVM(pVM); 926 ASMSetFlags(fFlags); 927 return rc; 928 } 923 return HWACCMR0SetupVM(pVM); 929 924 930 925 /* -
trunk/src/VBox/VMM/include/HWACCMInternal.h
r37319 r37320 1 1 /* $Id$ */ 2 2 /** @file 3 * H WACCM - Internal header file.3 * HM - Internal header file. 4 4 */ 5 5 6 6 /* 7 * Copyright (C) 2006-20 07Oracle Corporation7 * Copyright (C) 2006-2011 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 140 140 #define HWACCM_VTX_TOTAL_DEVHEAP_MEM (HWACCM_EPT_IDENTITY_PG_TABLE_SIZE + HWACCM_VTX_TSS_SIZE) 141 141 142 /* Enable for TPR guest patching. */142 /** Enable for TPR guest patching. */ 143 143 #define VBOX_HWACCM_WITH_GUEST_PATCHING 144 144 … … 146 146 */ 147 147 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING 148 # define HWACCM_SSM_VERSION5149 # define HWACCM_SSM_VERSION_NO_PATCHING4148 # define HWACCM_SSM_VERSION 5 149 # define HWACCM_SSM_VERSION_NO_PATCHING 4 150 150 #else 151 # define HWACCM_SSM_VERSION4152 # define HWACCM_SSM_VERSION_NO_PATCHING4151 # define HWACCM_SSM_VERSION 4 152 # define HWACCM_SSM_VERSION_NO_PATCHING 4 153 153 #endif 154 154 #define HWACCM_SSM_VERSION_2_0_X 3 … … 157 157 * Global per-cpu information. (host) 158 158 */ 159 typedef struct 159 typedef struct HMGLOBLCPUINFO 160 160 { 161 161 /** The CPU ID. */ … … 163 163 /** The memory object */ 164 164 RTR0MEMOBJ hMemObj; 165 /* Current ASID (AMD-V)/VPID (Intel)*/165 /** Current ASID (AMD-V) / VPID (Intel). */ 166 166 uint32_t uCurrentASID; 167 /* TLB flush count*/167 /** TLB flush count. */ 168 168 uint32_t cTLBFlushes; 169 169 170 /* Set the first time a cpu is used to make sure we start with a clean TLB. */170 /** Set the first time a cpu is used to make sure we start with a clean TLB. */ 171 171 bool fFlushTLB; 172 172 … … 179 179 /** In use by our code. (for power suspend) */ 180 180 volatile bool fInUse; 181 } HWACCM_CPUINFO; 182 typedef HWACCM_CPUINFO *PHWACCM_CPUINFO; 181 } HMGLOBLCPUINFO; 182 /** Pointer to the per-cpu global information. */ 183 typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO; 183 184 184 185 typedef enum … … 886 887 #ifdef IN_RING0 887 888 888 VMMR0DECL(PH WACCM_CPUINFO) HWACCMR0GetCurrentCpu(void);889 VMMR0DECL(PH WACCM_CPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu);889 VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void); 890 VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu); 890 891 891 892
Note:
See TracChangeset
for help on using the changeset viewer.