VirtualBox

Ignore:
Timestamp:
Apr 16, 2009 11:41:38 AM (16 years ago)
Author:
vboxsync
Message:

Big step to separate VMM data structures for guest SMP. (pgm, em)

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r18666 r18927  
    229229 * for mapping conflicts and set the SyncCR3 FF if found.
    230230 *
     231 * @param   pVCpu       VMCPU handle
    231232 * @param   pPool       The pool.
    232233 * @param   pPage       The head page.
     
    237238 *                      This need not be specified if the caller knows we won't do cross entry accesses.
    238239 */
    239 void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, PDISCPUSTATE pCpu)
     240void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, PDISCPUSTATE pCpu)
    240241{
    241242    Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
     
    437438                    Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    438439                    VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    439                     STAM_COUNTER_INC(&(pVM->pgm.s.StatRZGuestCR3WriteConflict));
     440                    STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    440441                    LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
    441442                    break;
     
    469470                        {
    470471                            Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    471                             STAM_COUNTER_INC(&(pVM->pgm.s.StatRZGuestCR3WriteConflict));
     472                            STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    472473                            VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    473474                            LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
     
    515516                    Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    516517                    VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    517                     STAM_COUNTER_INC(&(pVM->pgm.s.StatRZGuestCR3WriteConflict));
     518                    STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    518519                    LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
    519520                    break;
     
    553554                        Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    554555                        VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    555                         STAM_COUNTER_INC(&(pVM->pgm.s.StatRZGuestCR3WriteConflict));
     556                        STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    556557                        LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
    557558                        break;
     
    592593                    {
    593594                        Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    594                         STAM_COUNTER_INC(&(pVM->pgm.s.StatRZGuestCR3WriteConflict));
     595                        STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    595596                        VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    596597                        LogFlow(("pgmPoolMonitorChainChanging: Detected pdpt conflict at iShw=%#x!\n", iShw));
     
    624625                            {
    625626                                Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    626                                 STAM_COUNTER_INC(&(pVM->pgm.s.StatRZGuestCR3WriteConflict));
     627                                STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
    627628                                VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    628629                                LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
     
    888889 * @returns VBox status code suitable for scheduling.
    889890 * @param   pVM         The VM handle.
     891 * @param   pVCpu       The VMCPU handle.
    890892 * @param   pPool       The pool.
    891893 * @param   pPage       The pool page (head).
     
    895897 * @param   pvFault     The fault address.
    896898 */
    897 static int pgmPoolAccessHandlerFlush(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu,
     899static int pgmPoolAccessHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu,
    898900                                     PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
    899901{
     
    907909     */
    908910    uint32_t cbWritten;
    909     int rc2 = EMInterpretInstructionCPU(pVM, pCpu, pRegFrame, pvFault, &cbWritten);
     911    int rc2 = EMInterpretInstructionCPU(pVM, pVCpu, pCpu, pRegFrame, pvFault, &cbWritten);
    910912    if (RT_SUCCESS(rc2))
    911913        pRegFrame->rip += pCpu->opsize;
     
    971973     * write situation, meaning that it's safe to write here.
    972974     */
    973 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    974975    PVMCPU      pVCpu = VMMGetCpu(pPool->CTX_SUFF(pVM));
    975 #endif
    976976    RTGCUINTPTR pu32 = (RTGCUINTPTR)pvFault;
    977977    while (pRegFrame->ecx)
     
    979979#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    980980        uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
    981         pgmPoolMonitorChainChanging(pPool, pPage, GCPhysFault, (RTGCPTR)pu32, NULL);
     981        pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, NULL);
    982982        PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
    983983#else
    984         pgmPoolMonitorChainChanging(pPool, pPage, GCPhysFault, (RTGCPTR)pu32, NULL);
     984        pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, NULL);
    985985#endif
    986986#ifdef IN_RC
     
    10111011 * @returns VBox status code suitable for scheduling.
    10121012 * @param   pVM         The VM handle.
     1013 * @param   pVCpu       The VMCPU handle.
    10131014 * @param   pPool       The pool.
    10141015 * @param   pPage       The pool page (head).
     
    10181019 * @param   pvFault     The fault address.
    10191020 */
    1020 DECLINLINE(int) pgmPoolAccessHandlerSimple(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu,
     1021DECLINLINE(int) pgmPoolAccessHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu,
    10211022                                           PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
    10221023{
     
    10331034     */
    10341035#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1035     PVMCPU      pVCpu = VMMGetCpu(pPool->CTX_SUFF(pVM));
    10361036    uint32_t    iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
    1037     pgmPoolMonitorChainChanging(pPool, pPage, GCPhysFault, pvFault, pCpu);
     1037    pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, pCpu);
    10381038    PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
    10391039#else
    1040     pgmPoolMonitorChainChanging(pPool, pPage, GCPhysFault, pvFault, pCpu);
     1040    pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, pCpu);
    10411041#endif
    10421042
     
    10451045     */
    10461046    uint32_t cb;
    1047     int rc = EMInterpretInstructionCPU(pVM, pCpu, pRegFrame, pvFault, &cb);
     1047    int rc = EMInterpretInstructionCPU(pVM, pVCpu, pCpu, pRegFrame, pvFault, &cb);
    10481048    if (RT_SUCCESS(rc))
    10491049        pRegFrame->rip += pCpu->opsize;
     
    10931093    PPGMPOOL        pPool = pVM->pgm.s.CTX_SUFF(pPool);
    10941094    PPGMPOOLPAGE    pPage = (PPGMPOOLPAGE)pvUser;
     1095    PVMCPU          pVCpu = VMMGetCpu(pVM);
     1096
    10951097    LogFlow(("pgmPoolAccessHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault));
    10961098
     
    11051107     */
    11061108    DISCPUSTATE Cpu;
    1107     int rc = EMInterpretDisasOne(pVM, pRegFrame, &Cpu, NULL);
     1109    int rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, &Cpu, NULL);
    11081110    AssertRCReturn(rc, rc);
    11091111
     
    11231125        if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))
    11241126        {
    1125              rc = pgmPoolAccessHandlerSimple(pVM, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault);
     1127             rc = pgmPoolAccessHandlerSimple(pVM, pVCpu, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault);
    11261128             STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a);
    11271129             return rc;
     
    11331135         */
    11341136        if (    Cpu.pCurInstr->opcode == OP_STOSWD
    1135             &&  CPUMGetGuestCPL(pVM, pRegFrame) == 0
     1137            &&  CPUMGetGuestCPL(pVCpu, pRegFrame) == 0
    11361138            &&  pRegFrame->ecx <= 0x20
    11371139            &&  pRegFrame->ecx * 4 <= PAGE_SIZE - ((uintptr_t)pvFault & PAGE_OFFSET_MASK)
     
    11641166     * the reuse detection must be fixed.
    11651167     */
    1166     rc = pgmPoolAccessHandlerFlush(pVM, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault);
     1168    rc = pgmPoolAccessHandlerFlush(pVM, pVCpu, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault);
    11671169    if (rc == VINF_EM_RAW_EMULATE_INSTR && fReused)
    11681170        rc = VINF_SUCCESS;
     
    17201722         * the heap size should suffice. */
    17211723        AssertFatalRC(rc);
    1722         Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     1724        Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
    17231725    }
    17241726    pPage->fMonitored = true;
     
    18171819        rc = PGMHandlerPhysicalDeregister(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1));
    18181820        AssertFatalRC(rc);
    1819         AssertMsg(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3),
    1820                   ("%#x %#x\n", pVM->pgm.s.fSyncFlags, pVM->fForcedActions));
     1821        AssertMsg(!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3),
     1822                  ("%#x %#x\n", pVM->pgm.s.fGlobalSyncFlags, pVM->fForcedActions));
    18211823    }
    18221824    pPage->fMonitored = false;
     
    20682070     * sometimes refered to as a 'lightweight flush'.
    20692071     */
    2070     if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
     2072    if (!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
    20712073        pgmPoolMonitorModifiedClearAll(pVM);
    20722074    else
    20732075    {
    20742076# ifdef IN_RING3 /* Don't flush in ring-0 or raw mode, it's taking too long. */
    2075         pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_CLEAR_PGM_POOL;
     2077        /** @todo SMP support! */
     2078        Assert(pVM->cCPUs == 1);
     2079        pVM->pgm.s.fGlobalSyncFlags &= ~PGM_SYNC_CLEAR_PGM_POOL;
    20762080        pgmPoolClearAll(pVM);
    20772081# else  /* !IN_RING3 */
     
    27092713    if (rc == VINF_PGM_GCPHYS_ALIASED)
    27102714    {
    2711         pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
     2715        pVM->pgm.s.fGlobalSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
    27122716        VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    27132717        rc = VINF_PGM_SYNC_CR3;
     
    38113815    }
    38123816
     3817    /* @todo Need to synchronize this across all VCPUs! */
     3818    Assert(pVM->cCPUs == 1);
     3819    PVMCPU pVCpu = &pVM->aCpus[0];  /* to get it compiled... */
     3820
    38133821    /* Unmap the old CR3 value before flushing everything. */
    3814     int rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM);
     3822    int rc = PGM_BTH_PFN(UnmapCR3, pVCpu)(pVM, pVCpu);
    38153823    AssertRC(rc);
    38163824
    38173825    /* Exit the current shadow paging mode as well; nested paging and EPT use a root CR3 which will get flushed here. */
    3818     rc = PGM_SHW_PFN(Exit, pVM)(pVM);
     3826    rc = PGM_SHW_PFN(Exit, pVCpu)(pVM, pVCpu);
    38193827    AssertRC(rc);
    38203828
     
    39663974     * EPT use a root CR3 which will get flushed here.
    39673975     */
    3968     pVM->pgm.s.enmShadowMode = PGMMODE_INVALID;
    3969     rc = PGMR3ChangeMode(pVM, PGMGetGuestMode(pVM));
     3976    pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID;
     3977    rc = PGMR3ChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu));
    39703978    AssertRC(rc);
    39713979
     
    40224030                  || pPage->enmKind == PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD
    40234031                  || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
    4024                   ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(pPool->CTX_SUFF(pVM)), pPage->Core.Key, pPage->enmKind));
     4032                  ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(VMMGetCpu(pPool->CTX_SUFF(pVM))), pPage->Core.Key, pPage->enmKind));
    40254033        Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
    40264034        return VINF_SUCCESS;
     
    41994207    /** @todo CSAM/PGMPrefetchPage messes up here during CSAMR3CheckGates
    42004208     *  (TRPMR3SyncIDT) because of FF priority. Try fix that?
    4201      *  Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)); */
     4209     *  Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)); */
    42024210
    42034211#ifdef PGMPOOL_WITH_CACHE
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette