VirtualBox

Changeset 48621 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Sep 21, 2013 12:23:40 PM (11 years ago)
Author:
vboxsync
Message:

VMM/VMMR0: Don't disable interrupts for a long time when disabling preemption is sufficient. Other minor fixes.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r48591 r48621  
    12841284     * Call the hardware specific initialization method.
    12851285     */
    1286     RTCCUINTREG      fFlags = ASMIntDisableFlags();
    1287     PHMGLOBALCPUINFO pCpu   = HMR0GetCurrentCpu();
    1288     ASMSetFlags(fFlags);
    1289 
    1290     int rc = g_HvmR0.pfnInitVM(pVM);
    1291     return rc;
     1286    return g_HvmR0.pfnInitVM(pVM);
    12921287}
    12931288
     
    13041299    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    13051300
    1306     /* Make sure we don't touch HM after we've disabled HM in preparation
    1307        of a suspend. */
    1308     /** @todo r=bird: This cannot be right, the termination functions are
    1309      *        just freeing memory and resetting pVM/pVCpu members...
    1310      *  ==> memory leak. */
    1311     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    1312 
    13131301    /*
    13141302     * Call the hardware specific method.
     1303     *
     1304     * Note! We might be preparing for a suspend, so the pfnTermVM() functions should probably not
     1305     * mess with VT-x/AMD-V features on the CPU, currently all they do is free memory so this is safe.
    13151306     */
    1316     RTCCUINTREG      fFlags = ASMIntDisableFlags();
    1317     PHMGLOBALCPUINFO pCpu   = HMR0GetCurrentCpu();
    1318     ASMSetFlags(fFlags);
    1319 
    1320     int rc = g_HvmR0.pfnTermVM(pVM);
    1321     return rc;
     1307    return g_HvmR0.pfnTermVM(pVM);
    13221308}
    13231309
     
    13401326    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    13411327
    1342     /*
    1343      * Call the hardware specific setup VM method.  This requires the CPU to be
    1344      * enabled for AMD-V/VT-x and preemption to be prevented.
    1345      */
    1346     RTCCUINTREG      fFlags = ASMIntDisableFlags();
    1347     RTCPUID          idCpu  = RTMpCpuId();
    1348     PHMGLOBALCPUINFO pCpu   = &g_HvmR0.aCpuInfo[idCpu];
    1349 
    13501328    /* On first entry we'll sync everything. */
    13511329    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    13521330        VMCPU_HMCF_RESET_TO(&pVM->aCpus[i], HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST);
    13531331
     1332    /*
     1333     * Call the hardware specific setup VM method. This requires the CPU to be
     1334     * enabled for AMD-V/VT-x and preemption to be prevented.
     1335     */
     1336    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     1337    RTThreadPreemptDisable(&PreemptState);
     1338    RTCPUID          idCpu  = RTMpCpuId();
     1339    PHMGLOBALCPUINFO pCpu   = &g_HvmR0.aCpuInfo[idCpu];
     1340
    13541341    /* Enable VT-x or AMD-V if local init is required. */
    13551342    int rc;
     
    13571344    {
    13581345        rc = hmR0EnableCpu(pVM, idCpu);
    1359         AssertReturnStmt(RT_SUCCESS_NP(rc), ASMSetFlags(fFlags), rc);
     1346        AssertRCReturnStmt(rc, RTThreadPreemptRestore(&PreemptState), rc);
    13601347    }
    13611348
     
    13701357    }
    13711358
    1372     ASMSetFlags(fFlags);
     1359    RTThreadPreemptRestore(&PreemptState);
    13731360    return rc;
    13741361}
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r48571 r48621  
    321321                              void *pvArg)
    322322{
     323    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    323324    AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER);
    324325    AssertReturn(   HCPhysCpuPage
     
    328329    NOREF(fEnabledByHost);
    329330
     331    /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */
     332    RTCCUINTREG uEflags = ASMIntDisableFlags();
     333
    330334    /*
    331335     * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
     
    342346
    343347        if (!pCpu->fIgnoreAMDVInUseError)
     348        {
     349            ASMSetFlags(uEflags);
    344350            return VERR_SVM_IN_USE;
     351        }
    345352    }
    346353
     
    350357    /* Write the physical page address where the CPU will store the host state while executing the VM. */
    351358    ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
     359
     360    /* Restore interrupts. */
     361    ASMSetFlags(uEflags);
    352362
    353363    /*
     
    378388VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    379389{
     390    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    380391    AssertReturn(   HCPhysCpuPage
    381392                 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
     
    383394    NOREF(pCpu);
    384395
     396    /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
     397    RTCCUINTREG uEflags = ASMIntDisableFlags();
     398
    385399    /* Turn off AMD-V in the EFER MSR. */
    386400    uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
     
    389403    /* Invalidate host state physical address. */
    390404    ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
     405
     406    /* Restore interrupts. */
     407    ASMSetFlags(uEflags);
    391408
    392409    return VINF_SUCCESS;
     
    641658VMMR0DECL(int) SVMR0SetupVM(PVM pVM)
    642659{
    643     int rc = VINF_SUCCESS;
    644 
     660    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    645661    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    646662    Assert(pVM->hm.s.svm.fSupported);
     
    651667        PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb;
    652668
    653         AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
     669        AssertMsgReturn(pVmcb, ("Invalid pVmcb for vcpu[%u]\n", i), VERR_SVM_INVALID_PVMCB);
    654670
    655671        /* Trap exceptions unconditionally (debug purposes). */
     
    770786    }
    771787
    772     return rc;
     788    return VINF_SUCCESS;
    773789}
    774790
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r48570 r48621  
    740740    }
    741741
     742    /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
     743    RTCCUINTREG uEflags = ASMIntDisableFlags();
     744
    742745    /* Enable the VMX bit in CR4 if necessary. */
    743746    RTCCUINTREG uCr4 = ASMGetCR4();
     
    750753        ASMSetCR4(uCr4);
    751754
     755    /* Restore interrupts. */
     756    ASMSetFlags(uEflags);
    752757    return rc;
    753758}
     
    762767{
    763768    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     769
     770    /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
     771    RTCCUINTREG uEflags = ASMIntDisableFlags();
    764772
    765773    /* If we're for some reason not in VMX root mode, then don't leave it. */
    766774    RTCCUINTREG uHostCR4 = ASMGetCR4();
     775
     776    int rc;
    767777    if (uHostCR4 & X86_CR4_VMXE)
    768778    {
     
    770780        VMXDisable();
    771781        ASMSetCR4(uHostCR4 & ~X86_CR4_VMXE);
    772         return VINF_SUCCESS;
    773     }
    774 
    775     return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
     782        rc = VINF_SUCCESS;
     783    }
     784    else
     785        rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
     786
     787    /* Restore interrupts. */
     788    ASMSetFlags(uEflags);
     789    return rc;
    776790}
    777791
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette