- Timestamp:
- Aug 30, 2013 10:05:31 PM (11 years ago)
- Location:
- trunk/src/VBox
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/HostDrivers/Support/SUPDrv.c
r48152 r48208 3367 3367 3368 3368 /** 3369 * Qu ries the AMD-V and VT-x capabilities of the calling CPU.3369 * Queries the AMD-V and VT-x capabilities of the calling CPU. 3370 3370 * 3371 3371 * @returns VBox status code. … … 3419 3419 ) 3420 3420 { 3421 VMX _CAPABILITY vtCaps;3421 VMXCAPABILITY vtCaps; 3422 3422 3423 3423 *pfCaps |= SUPVTCAPS_VT_X; -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r47456 r48208 1074 1074 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_GVMM_INSTANCE); 1075 1075 1076 1077 1076 /* 1078 1077 * Validate the VM structure, state and caller. … … 1080 1079 AssertPtrReturn(pVM, VERR_INVALID_POINTER); 1081 1080 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER); 1082 AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER); 1081 AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState), 1082 VERR_WRONG_ORDER); 1083 1083 1084 1084 uint32_t hGVM = pVM->hSelf; … … 1090 1090 1091 1091 RTPROCESS ProcId = RTProcSelf(); 1092 RTNATIVETHREAD hSelf = RTThreadNativeSelf();1092 RTNATIVETHREAD hSelf = RTThreadNativeSelf(); 1093 1093 AssertReturn( ( pHandle->hEMT0 == hSelf 1094 1094 && pHandle->ProcId == ProcId) -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r48153 r48208 114 114 115 115 /** Host CR4 value (set by ring-0 VMX init) */ 116 uint64_t hostCR4;116 uint64_t u64HostCr4; 117 117 118 118 /** Host EFER value (set by ring-0 VMX init) */ 119 uint64_t hostEFER;119 uint64_t u64HostEfer; 120 120 121 121 /** VMX MSR values */ 122 122 struct 123 123 { 124 uint64_t feature_ctrl;125 uint64_t vmx_basic_info;126 VMX _CAPABILITY vmx_pin_ctls;127 VMX _CAPABILITY vmx_proc_ctls;128 VMX _CAPABILITY vmx_proc_ctls2;129 VMX _CAPABILITY vmx_exit;130 VMX _CAPABILITY vmx_entry;131 uint64_t vmx_misc;132 uint64_t vmx_cr0_fixed0;133 uint64_t vmx_cr0_fixed1;134 uint64_t vmx_cr4_fixed0;135 uint64_t vmx_cr4_fixed1;136 uint64_t vmx_vmcs_enum;137 uint64_t vmx_vmfunc;138 uint64_t vmx_ept_vpid_caps;124 uint64_t u64FeatureCtrl; 125 uint64_t u64BasicInfo; 126 VMXCAPABILITY vmxPinCtls; 127 VMXCAPABILITY vmxProcCtls; 128 VMXCAPABILITY vmxProcCtls2; 129 VMXCAPABILITY vmxExit; 130 VMXCAPABILITY vmxEntry; 131 uint64_t u64Misc; 132 uint64_t u64Cr0Fixed0; 133 uint64_t u64Cr0Fixed1; 134 uint64_t u64Cr4Fixed0; 135 uint64_t u64Cr4Fixed1; 136 uint64_t u64VmcsEnum; 137 uint64_t u64Vmfunc; 138 uint64_t u64EptVpidCaps; 139 139 } msr; 140 140 /* Last instruction error */ … … 158 158 } svm; 159 159 /** Saved error from detection */ 160 int32_t lLastError;160 int32_t lLastError; 161 161 162 162 struct … … 172 172 * actions when the host is being suspended to speed up the suspending and 173 173 * avoid trouble. */ 174 volatile boolfSuspended;174 volatile bool fSuspended; 175 175 176 176 /** Whether we've already initialized all CPUs. … … 371 371 { 372 372 /** @todo move this into a separate function. */ 373 g_HvmR0.vmx.msr. feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);373 g_HvmR0.vmx.msr.u64FeatureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 374 374 375 375 /* … … 402 402 { 403 403 /* Reread in case we've changed it. */ 404 g_HvmR0.vmx.msr. feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);405 406 if ( (g_HvmR0.vmx.msr. feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))407 == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK))404 g_HvmR0.vmx.msr.u64FeatureCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 405 406 if ( (g_HvmR0.vmx.msr.u64FeatureCtrl & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 407 == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 408 408 { 409 409 /* 410 * Read all relevant MSR .410 * Read all relevant MSRs. 411 411 */ 412 g_HvmR0.vmx.msr. vmx_basic_info= ASMRdMsr(MSR_IA32_VMX_BASIC_INFO);413 g_HvmR0.vmx.msr.vmx _pin_ctls.u= ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);414 g_HvmR0.vmx.msr.vmx _proc_ctls.u= ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);415 g_HvmR0.vmx.msr.vmx _exit.u= ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);416 g_HvmR0.vmx.msr.vmx _entry.u= ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);417 g_HvmR0.vmx.msr. vmx_misc= ASMRdMsr(MSR_IA32_VMX_MISC);418 g_HvmR0.vmx.msr. vmx_cr0_fixed0= ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);419 g_HvmR0.vmx.msr. vmx_cr0_fixed1= ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);420 g_HvmR0.vmx.msr. vmx_cr4_fixed0= ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);421 g_HvmR0.vmx.msr. vmx_cr4_fixed1= ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);422 g_HvmR0.vmx.msr. vmx_vmcs_enum= ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);423 g_HvmR0.vmx. hostCR4= ASMGetCR4();424 g_HvmR0.vmx. hostEFER= ASMRdMsr(MSR_K6_EFER);412 g_HvmR0.vmx.msr.u64BasicInfo = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO); 413 g_HvmR0.vmx.msr.vmxPinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS); 414 g_HvmR0.vmx.msr.vmxProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS); 415 g_HvmR0.vmx.msr.vmxExit.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS); 416 g_HvmR0.vmx.msr.vmxEntry.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS); 417 g_HvmR0.vmx.msr.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC); 418 g_HvmR0.vmx.msr.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0); 419 g_HvmR0.vmx.msr.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1); 420 g_HvmR0.vmx.msr.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0); 421 g_HvmR0.vmx.msr.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1); 422 g_HvmR0.vmx.msr.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM); 423 g_HvmR0.vmx.u64HostCr4 = ASMGetCR4(); 424 g_HvmR0.vmx.u64HostEfer = ASMRdMsr(MSR_K6_EFER); 425 425 /* VPID 16 bits ASID. */ 426 426 g_HvmR0.uMaxAsid = 0x10000; /* exclusive */ 427 427 428 if (g_HvmR0.vmx.msr.vmx _proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)428 if (g_HvmR0.vmx.msr.vmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 429 429 { 430 g_HvmR0.vmx.msr.vmx_proc_ctls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2); 431 if ( g_HvmR0.vmx.msr.vmx_proc_ctls2.n.allowed1 432 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID)) 433 { 434 g_HvmR0.vmx.msr.vmx_ept_vpid_caps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP); 435 } 436 437 if (g_HvmR0.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC) 438 g_HvmR0.vmx.msr.vmx_vmfunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC); 430 g_HvmR0.vmx.msr.vmxProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2); 431 if (g_HvmR0.vmx.msr.vmxProcCtls2.n.allowed1 & (VMX_VMCS_CTRL_PROC_EXEC2_EPT | VMX_VMCS_CTRL_PROC_EXEC2_VPID)) 432 g_HvmR0.vmx.msr.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP); 433 434 if (g_HvmR0.vmx.msr.vmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC) 435 g_HvmR0.vmx.msr.u64Vmfunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC); 439 436 } 440 437 … … 457 454 458 455 /* Set revision dword at the beginning of the structure. */ 459 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(g_HvmR0.vmx.msr. vmx_basic_info);456 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(g_HvmR0.vmx.msr.u64BasicInfo); 460 457 461 458 /* Make sure we don't get rescheduled to another cpu during this probe. */ … … 465 462 * Check CR4.VMXE 466 463 */ 467 g_HvmR0.vmx. hostCR4 = ASMGetCR4();468 if (!(g_HvmR0.vmx. hostCR4 & X86_CR4_VMXE))464 g_HvmR0.vmx.u64HostCr4 = ASMGetCR4(); 465 if (!(g_HvmR0.vmx.u64HostCr4 & X86_CR4_VMXE)) 469 466 { 470 467 /* In theory this bit could be cleared behind our back. Which would cause 471 468 #UD faults when we try to execute the VMX instructions... */ 472 ASMSetCR4(g_HvmR0.vmx. hostCR4 | X86_CR4_VMXE);469 ASMSetCR4(g_HvmR0.vmx.u64HostCr4 | X86_CR4_VMXE); 473 470 } 474 471 … … 503 500 if it wasn't so before (some software could incorrectly 504 501 think it's in VMX mode). */ 505 ASMSetCR4(g_HvmR0.vmx. hostCR4);502 ASMSetCR4(g_HvmR0.vmx.u64HostCr4); 506 503 ASMSetFlags(fFlags); 507 504 … … 539 536 * Timer Does Not Count Down at the Rate Specified" erratum. 540 537 */ 541 if (g_HvmR0.vmx.msr.vmx _pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER)538 if (g_HvmR0.vmx.msr.vmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER) 542 539 { 543 540 g_HvmR0.vmx.fUsePreemptTimer = true; 544 g_HvmR0.vmx.cPreemptTimerShift = MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(g_HvmR0.vmx.msr. vmx_misc);541 g_HvmR0.vmx.cPreemptTimerShift = MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(g_HvmR0.vmx.msr.u64Misc); 545 542 if (hmR0InitIntelIsSubjectToVmxPreemptionTimerErratum()) 546 543 g_HvmR0.vmx.cPreemptTimerShift = 0; /* This is about right most of the time here. */ … … 781 778 HMR0FIRSTRC FirstRc; 782 779 hmR0FirstRcInit(&FirstRc); 783 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL , &FirstRc);780 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL /* pvUser 1 */, &FirstRc); 784 781 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 785 782 if (RT_SUCCESS(rc)) … … 842 839 /* MSR is not yet locked; we can change it ourselves here. */ 843 840 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, 844 g_HvmR0.vmx.msr. feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK);841 g_HvmR0.vmx.msr.u64FeatureCtrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK); 845 842 fFC = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 846 843 } 847 844 848 845 int rc; 849 if ( (fFC & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 850 == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 846 if ((fFC & (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 847 == (MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK)) 848 { 851 849 rc = VINF_SUCCESS; 850 } 852 851 else 853 852 rc = VERR_VMX_MSR_LOCKED_OR_DISABLED; … … 907 906 * 908 907 * @returns VBox status code. 909 * @param pVM Pointer to the VM (can be 0).908 * @param pVM Pointer to the VM (can be NULL). 910 909 * @param idCpu The identifier for the CPU the function is called on. 911 910 */ … … 914 913 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 915 914 916 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)915 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */ 917 916 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); 918 917 Assert(!pCpu->fConfigured); … … 924 923 int rc; 925 924 if (g_HvmR0.vmx.fSupported && g_HvmR0.vmx.fUsingSUPR0EnableVTx) 926 rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, NULL , NIL_RTHCPHYS, true);925 rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true); 927 926 else 928 927 { … … 945 944 * 946 945 * @param idCpu The identifier for the CPU the function is called on. 947 * @param pvUser1 The 1st user argument.946 * @param pvUser1 Opaque pointer to the VM (can be NULL!). 948 947 * @param pvUser2 The 2nd user argument. 949 948 */ … … 1106 1105 /** 1107 1106 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that 1108 * is to be called on the target cpus.1107 * is to be called on the target CPUs. 1109 1108 * 1110 1109 * @param idCpu The identifier for the CPU the function is called on. 1111 1110 * @param pvUser1 The 1st user argument. 1112 * @param pvUser2 The 2nd user argument.1111 * @param pvUser2 Opaque pointer to the FirstRc. 1113 1112 */ 1114 1113 static DECLCALLBACK(void) hmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2) … … 1183 1182 { 1184 1183 /* Turn off VT-x or AMD-V on all CPUs. */ 1185 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL , &FirstRc);1184 rc = RTMpOnAll(hmR0DisableCpuCallback, NULL /* pvUser 1 */, &FirstRc); 1186 1185 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 1187 1186 } … … 1206 1205 { 1207 1206 /* Turn VT-x or AMD-V back on on all CPUs. */ 1208 rc = RTMpOnAll(hmR0EnableCpuCallback, NULL , &FirstRc /* output ignored */);1207 rc = RTMpOnAll(hmR0EnableCpuCallback, NULL /* pVM */, &FirstRc /* output ignored */); 1209 1208 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 1210 1209 } … … 1242 1241 * Copy globals to the VM structure. 1243 1242 */ 1243 /** @todo r=ramshankar: Why do we do this for MSRs? We never change them in the 1244 * per-VM structures anyway... */ 1244 1245 pVM->hm.s.vmx.fSupported = g_HvmR0.vmx.fSupported; 1245 1246 pVM->hm.s.svm.fSupported = g_HvmR0.svm.fSupported; … … 1247 1248 pVM->hm.s.vmx.fUsePreemptTimer = g_HvmR0.vmx.fUsePreemptTimer; 1248 1249 pVM->hm.s.vmx.cPreemptTimerShift = g_HvmR0.vmx.cPreemptTimerShift; 1249 pVM->hm.s.vmx.msr.feature_ctrl = g_HvmR0.vmx.msr. feature_ctrl;1250 pVM->hm.s.vmx.hostCR4 = g_HvmR0.vmx. hostCR4;1251 pVM->hm.s.vmx.hostEFER = g_HvmR0.vmx. hostEFER;1252 pVM->hm.s.vmx.msr.vmx_basic_info = g_HvmR0.vmx.msr. vmx_basic_info;1253 pVM->hm.s.vmx.msr.vmx_pin_ctls = g_HvmR0.vmx.msr.vmx _pin_ctls;1254 pVM->hm.s.vmx.msr.vmx_proc_ctls = g_HvmR0.vmx.msr.vmx _proc_ctls;1255 pVM->hm.s.vmx.msr.vmx_proc_ctls2 = g_HvmR0.vmx.msr.vmx _proc_ctls2;1256 pVM->hm.s.vmx.msr.vmx_exit = g_HvmR0.vmx.msr.vmx _exit;1257 pVM->hm.s.vmx.msr.vmx_entry = g_HvmR0.vmx.msr.vmx _entry;1258 pVM->hm.s.vmx.msr.vmx_misc = g_HvmR0.vmx.msr. vmx_misc;1259 pVM->hm.s.vmx.msr.vmx_cr0_fixed0 = g_HvmR0.vmx.msr. vmx_cr0_fixed0;1260 pVM->hm.s.vmx.msr.vmx_cr0_fixed1 = g_HvmR0.vmx.msr. vmx_cr0_fixed1;1261 pVM->hm.s.vmx.msr.vmx_cr4_fixed0 = g_HvmR0.vmx.msr. vmx_cr4_fixed0;1262 pVM->hm.s.vmx.msr.vmx_cr4_fixed1 = g_HvmR0.vmx.msr. vmx_cr4_fixed1;1263 pVM->hm.s.vmx.msr.vmx_vmcs_enum = g_HvmR0.vmx.msr. vmx_vmcs_enum;1264 pVM->hm.s.vmx.msr.vmx_vmfunc = g_HvmR0.vmx.msr. vmx_vmfunc;1265 pVM->hm.s.vmx.msr.vmx_ept_vpid_caps = g_HvmR0.vmx.msr. vmx_ept_vpid_caps;1250 pVM->hm.s.vmx.msr.feature_ctrl = g_HvmR0.vmx.msr.u64FeatureCtrl; 1251 pVM->hm.s.vmx.hostCR4 = g_HvmR0.vmx.u64HostCr4; 1252 pVM->hm.s.vmx.hostEFER = g_HvmR0.vmx.u64HostEfer; 1253 pVM->hm.s.vmx.msr.vmx_basic_info = g_HvmR0.vmx.msr.u64BasicInfo; 1254 pVM->hm.s.vmx.msr.vmx_pin_ctls = g_HvmR0.vmx.msr.vmxPinCtls; 1255 pVM->hm.s.vmx.msr.vmx_proc_ctls = g_HvmR0.vmx.msr.vmxProcCtls; 1256 pVM->hm.s.vmx.msr.vmx_proc_ctls2 = g_HvmR0.vmx.msr.vmxProcCtls2; 1257 pVM->hm.s.vmx.msr.vmx_exit = g_HvmR0.vmx.msr.vmxExit; 1258 pVM->hm.s.vmx.msr.vmx_entry = g_HvmR0.vmx.msr.vmxEntry; 1259 pVM->hm.s.vmx.msr.vmx_misc = g_HvmR0.vmx.msr.u64Misc; 1260 pVM->hm.s.vmx.msr.vmx_cr0_fixed0 = g_HvmR0.vmx.msr.u64Cr0Fixed0; 1261 pVM->hm.s.vmx.msr.vmx_cr0_fixed1 = g_HvmR0.vmx.msr.u64Cr0Fixed1; 1262 pVM->hm.s.vmx.msr.vmx_cr4_fixed0 = g_HvmR0.vmx.msr.u64Cr4Fixed0; 1263 pVM->hm.s.vmx.msr.vmx_cr4_fixed1 = g_HvmR0.vmx.msr.u64Cr4Fixed1; 1264 pVM->hm.s.vmx.msr.vmx_vmcs_enum = g_HvmR0.vmx.msr.u64VmcsEnum; 1265 pVM->hm.s.vmx.msr.vmx_vmfunc = g_HvmR0.vmx.msr.u64Vmfunc; 1266 pVM->hm.s.vmx.msr.vmx_ept_vpid_caps = g_HvmR0.vmx.msr.u64EptVpidCaps; 1266 1267 pVM->hm.s.svm.msrHwcr = g_HvmR0.svm.msrHwcr; 1267 1268 pVM->hm.s.svm.u32Rev = g_HvmR0.svm.u32Rev; -
trunk/src/VBox/VMM/include/HMInternal.h
r48130 r48208 364 364 uint64_t feature_ctrl; 365 365 uint64_t vmx_basic_info; 366 VMX _CAPABILITYvmx_pin_ctls;367 VMX _CAPABILITYvmx_proc_ctls;368 VMX _CAPABILITYvmx_proc_ctls2;369 VMX _CAPABILITYvmx_exit;370 VMX _CAPABILITYvmx_entry;366 VMXCAPABILITY vmx_pin_ctls; 367 VMXCAPABILITY vmx_proc_ctls; 368 VMXCAPABILITY vmx_proc_ctls2; 369 VMXCAPABILITY vmx_exit; 370 VMXCAPABILITY vmx_entry; 371 371 uint64_t vmx_misc; 372 372 uint64_t vmx_cr0_fixed0;
Note:
See TracChangeset
for help on using the changeset viewer.