Changeset 48621 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 21, 2013 12:23:40 PM (11 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r48591 r48621 1284 1284 * Call the hardware specific initialization method. 1285 1285 */ 1286 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1287 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu(); 1288 ASMSetFlags(fFlags); 1289 1290 int rc = g_HvmR0.pfnInitVM(pVM); 1291 return rc; 1286 return g_HvmR0.pfnInitVM(pVM); 1292 1287 } 1293 1288 … … 1304 1299 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1305 1300 1306 /* Make sure we don't touch HM after we've disabled HM in preparation1307 of a suspend. */1308 /** @todo r=bird: This cannot be right, the termination functions are1309 * just freeing memory and resetting pVM/pVCpu members...1310 * ==> memory leak. */1311 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);1312 1313 1301 /* 1314 1302 * Call the hardware specific method. 1303 * 1304 * Note! We might be preparing for a suspend, so the pfnTermVM() functions should probably not 1305 * mess with VT-x/AMD-V features on the CPU, currently all they do is free memory so this is safe. 1315 1306 */ 1316 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1317 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu(); 1318 ASMSetFlags(fFlags); 1319 1320 int rc = g_HvmR0.pfnTermVM(pVM); 1321 return rc; 1307 return g_HvmR0.pfnTermVM(pVM); 1322 1308 } 1323 1309 … … 1340 1326 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1341 1327 1342 /*1343 * Call the hardware specific setup VM method. This requires the CPU to be1344 * enabled for AMD-V/VT-x and preemption to be prevented.1345 */1346 RTCCUINTREG fFlags = ASMIntDisableFlags();1347 RTCPUID idCpu = RTMpCpuId();1348 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];1349 1350 1328 /* On first entry we'll sync everything. */ 1351 1329 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1352 1330 VMCPU_HMCF_RESET_TO(&pVM->aCpus[i], HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST); 1353 1331 1332 /* 1333 * Call the hardware specific setup VM method. This requires the CPU to be 1334 * enabled for AMD-V/VT-x and preemption to be prevented. 1335 */ 1336 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1337 RTThreadPreemptDisable(&PreemptState); 1338 RTCPUID idCpu = RTMpCpuId(); 1339 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1340 1354 1341 /* Enable VT-x or AMD-V if local init is required. */ 1355 1342 int rc; … … 1357 1344 { 1358 1345 rc = hmR0EnableCpu(pVM, idCpu); 1359 AssertR eturnStmt(RT_SUCCESS_NP(rc), ASMSetFlags(fFlags), rc);1346 AssertRCReturnStmt(rc, RTThreadPreemptRestore(&PreemptState), rc); 1360 1347 } 1361 1348 … … 1370 1357 } 1371 1358 1372 ASMSetFlags(fFlags);1359 RTThreadPreemptRestore(&PreemptState); 1373 1360 return rc; 1374 1361 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r48571 r48621 321 321 void *pvArg) 322 322 { 323 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 323 324 AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER); 324 325 AssertReturn( HCPhysCpuPage … … 328 329 NOREF(fEnabledByHost); 329 330 331 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */ 332 RTCCUINTREG uEflags = ASMIntDisableFlags(); 333 330 334 /* 331 335 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU. … … 342 346 343 347 if (!pCpu->fIgnoreAMDVInUseError) 348 { 349 ASMSetFlags(uEflags); 344 350 return VERR_SVM_IN_USE; 351 } 345 352 } 346 353 … … 350 357 /* Write the physical page address where the CPU will store the host state while executing the VM. */ 351 358 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage); 359 360 /* Restore interrupts. */ 361 ASMSetFlags(uEflags); 352 362 353 363 /* … … 378 388 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 379 389 { 390 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 380 391 AssertReturn( HCPhysCpuPage 381 392 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER); … … 383 394 NOREF(pCpu); 384 395 396 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */ 397 RTCCUINTREG uEflags = ASMIntDisableFlags(); 398 385 399 /* Turn off AMD-V in the EFER MSR. */ 386 400 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER); … … 389 403 /* Invalidate host state physical address. */ 390 404 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0); 405 406 /* Restore interrupts. */ 407 ASMSetFlags(uEflags); 391 408 392 409 return VINF_SUCCESS; … … 641 658 VMMR0DECL(int) SVMR0SetupVM(PVM pVM) 642 659 { 643 int rc = VINF_SUCCESS; 644 660 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 645 661 AssertReturn(pVM, VERR_INVALID_PARAMETER); 646 662 Assert(pVM->hm.s.svm.fSupported); … … 651 667 PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb; 652 668 653 AssertMsgReturn(pVmcb, ("Invalid pVmcb \n"), VERR_SVM_INVALID_PVMCB);669 AssertMsgReturn(pVmcb, ("Invalid pVmcb for vcpu[%u]\n", i), VERR_SVM_INVALID_PVMCB); 654 670 655 671 /* Trap exceptions unconditionally (debug purposes). */ … … 770 786 } 771 787 772 return rc;788 return VINF_SUCCESS; 773 789 } 774 790 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r48570 r48621 740 740 } 741 741 742 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */ 743 RTCCUINTREG uEflags = ASMIntDisableFlags(); 744 742 745 /* Enable the VMX bit in CR4 if necessary. */ 743 746 RTCCUINTREG uCr4 = ASMGetCR4(); … … 750 753 ASMSetCR4(uCr4); 751 754 755 /* Restore interrupts. */ 756 ASMSetFlags(uEflags); 752 757 return rc; 753 758 } … … 762 767 { 763 768 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 769 770 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */ 771 RTCCUINTREG uEflags = ASMIntDisableFlags(); 764 772 765 773 /* If we're for some reason not in VMX root mode, then don't leave it. */ 766 774 RTCCUINTREG uHostCR4 = ASMGetCR4(); 775 776 int rc; 767 777 if (uHostCR4 & X86_CR4_VMXE) 768 778 { … … 770 780 VMXDisable(); 771 781 ASMSetCR4(uHostCR4 & ~X86_CR4_VMXE); 772 return VINF_SUCCESS; 773 } 774 775 return VERR_VMX_NOT_IN_VMX_ROOT_MODE; 782 rc = VINF_SUCCESS; 783 } 784 else 785 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE; 786 787 /* Restore interrupts. */ 788 ASMSetFlags(uEflags); 789 return rc; 776 790 } 777 791
Note:
See TracChangeset
for help on using the changeset viewer.