VirtualBox

Ignore:
Timestamp:
Aug 22, 2013 1:56:52 PM (11 years ago)
Author:
vboxsync
Message:

VMM/VMMR0: Preemption hooks implemented and enabled for Solaris and Linux hosts.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r47959 r47989  
    475475                    }
    476476
    477                     /* Enter VMX Root Mode */
     477                    /*
     478                     * The only way of checking if we're in VMX root mode or not is to try and enter it.
     479                     * There is no instruction or control bit that tells us if we're in VMX root mode.
     480                     * Therefore, try and enter VMX root mode here.
     481                     */
    478482                    rc = VMXEnable(HCPhysScratchPage);
    479483                    if (RT_SUCCESS(rc))
     
    989993    {
    990994        Assert(g_HvmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
    991         g_HvmR0.aCpuInfo[i].fConfigured = false;
    992         g_HvmR0.aCpuInfo[i].cTlbFlushes = 0;
     995        g_HvmR0.aCpuInfo[i].fConfigured  = false;
     996        g_HvmR0.aCpuInfo[i].cTlbFlushes  = 0;
     997        g_HvmR0.aCpuInfo[i].uCurrentAsid = 0;
    993998    }
    994999
     
    10121017         * We're doing the job ourselves.
    10131018         */
    1014         /* Allocate one page per cpu for the global vt-x and amd-v pages */
     1019        /* Allocate one page per cpu for the global VT-x and AMD-V pages */
    10151020        for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
    10161021        {
     
    10301035    }
    10311036
    1032     if (RT_SUCCESS(rc) && g_HvmR0.fGlobalInit)
     1037    if (   RT_SUCCESS(rc)
     1038        && g_HvmR0.fGlobalInit)
    10331039    {
    10341040        /* First time, so initialize each cpu/core. */
     
    14111417
    14121418    /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */
    1413     if (   !pCpu->fConfigured
    1414         || !g_HvmR0.fGlobalInit)
    1415     {
     1419    if (!pCpu->fConfigured)
    14161420        hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu);
    1417     }
    14181421
    14191422    /* Reload host-context (back from ring-3/migrated CPUs), reload guest CR0 (for FPU bits). */
     
    14551458
    14561459    int rc  = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
    1457     AssertRC(rc);
     1460    AssertMsgRC(rc, ("pfnEnterSession failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));
    14581461
    14591462    /* Load the host as we may be resuming code after a longjmp and quite
    14601463       possibly be scheduled on a different CPU. */
    14611464    rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu);
    1462     AssertRC(rc);
     1465    AssertMsgRC(rc, ("pfnSaveHostState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));
    14631466
    14641467    /** @todo This is not needed to be done here anymore, can fix/optimize later. */
    14651468    rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx);
    1466     AssertRC(rc);
     1469    AssertMsgRC(rc, ("pfnLoadGuestState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));
    14671470
    14681471#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    14891492    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    14901493
    1491     if (!g_HvmR0.fGlobalInit)
    1492     {
    1493         RTCPUID idCpu = RTMpCpuId();
     1494    RTCPUID          idCpu = RTMpCpuId();
     1495    PHMGLOBALCPUINFO pCpu  = &g_HvmR0.aCpuInfo[idCpu];
     1496
     1497    if (   !g_HvmR0.fGlobalInit
     1498        && pCpu->fConfigured)
     1499    {
    14941500        int rc = hmR0DisableCpu(idCpu);
    14951501        AssertRCReturn(rc, rc);
     1502        Assert(!pCpu->fConfigured);
    14961503    }
    14971504
    14981505    /* Reset these to force a TLB flush for the next entry. */
    14991506    pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
     1507    pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    15001508    pVCpu->hm.s.uCurrentAsid = 0;
    15011509    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
     
    15201528    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    15211529
    1522     PCPUMCTX pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
     1530    PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    15231531    AssertPtr(pCtx);
    15241532
     1533    bool fDisabledPreempt = false;
     1534    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     1535    if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
     1536    {
     1537        Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
     1538        RTThreadPreemptDisable(&PreemptState);
     1539        fDisabledPreempt = true;
     1540    }
     1541
    15251542    int rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx);
    15261543
    1527     /*
    1528      * When thread-context hooks are not used, leave HM context and if necessary disable HM on the CPU.
    1529      * When thread-context hooks -are- used, this work would be done in the VT-x and AMD-V thread-context callback.
    1530      */
    15311544    if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
    15321545    {
    1533         Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1534         RTCPUID idCpu = RTMpCpuId();
    1535 
    15361546        /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
    15371547           and ring-3 calls when thread-context hooks are not supported. */
     1548        RTCPUID idCpu = RTMpCpuId();
    15381549        AssertMsgStmt(   pVCpu->hm.s.idEnteredCpu == idCpu
    15391550                      || RT_FAILURE_NP(rc), ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu),
    15401551                      rc = VERR_HM_WRONG_CPU_1);
    1541 
     1552    }
     1553
     1554    /* Leave HM context, takes care of local init (term). */
     1555    if (RT_SUCCESS(rc))
     1556    {
    15421557        rc = HMR0LeaveEx(pVCpu);
    15431558        AssertRCReturn(rc, rc);
    15441559    }
     1560
     1561    /* Deregister hook now that we've left HM context before re-enabling preemption. */
     1562    /** @todo This is bad. Deregistering here means we need to VMCLEAR always
     1563     *        (longjmp/exit-to-r3) in VT-x which is not efficient. */
     1564    if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     1565        VMMR0ThreadCtxHooksDeregister(pVCpu);
     1566
     1567    if (fDisabledPreempt)
     1568        RTThreadPreemptRestore(&PreemptState);
    15451569
    15461570    /* Guest FPU and debug state shouldn't be active now, it's likely that we're going back to ring-3. */
     
    15481572    Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    15491573
    1550     pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    15511574    return rc;
    15521575}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette