VirtualBox

Changeset 104548 in vbox


Ignore:
Timestamp:
May 8, 2024 12:26:12 PM (7 months ago)
Author:
vboxsync
Message:

VMM/PGM: Some EMT asserting and spaces. bugref:10687

Location:
trunk/src/VBox/VMM/VMMAll
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r103417 r104548  
    19071907 * Gets effective Guest OS page information.
    19081908 *
    1909  * When GCPtr is in a big page, the function will return as if it was a normal
    1910  * 4KB page. If the need for distinguishing between big and normal page becomes
    1911  * necessary at a later point, a PGMGstGetPage() will be created for that
    1912  * purpose.
    1913  *
    19141909 * @returns VBox status code.
    19151910 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    19161911 * @param   GCPtr       Guest Context virtual address of the page.
    19171912 * @param   pWalk       Where to store the page walk information.
     1913 * @thread  EMT(pVCpu)
    19181914 */
    19191915VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r98103 r104548  
    128128    int rc;
    129129
    130 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     130# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    131131/** @def PGM_GST_SLAT_WALK
    132132 * Macro to perform guest second-level address translation (EPT or Nested).
     
    139139 * @param   a_GCPhysOut     Where to store the guest-physical address (result).
    140140 */
    141 # define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \
     141#  define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \
    142142    do { \
    143143        if ((a_pVCpu)->pgm.s.enmGuestSlatMode == PGMSLAT_EPT) \
     
    156156        } \
    157157    } while (0)
    158 #endif
     158# endif
    159159
    160160    /*
     
    203203         */
    204204        RTGCPHYS GCPhysPdpt = Pml4e.u & X86_PML4E_PG_MASK;
    205 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     205#  ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    206206        PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk);
    207 #endif
     207#  endif
    208208        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt);
    209209        if (RT_SUCCESS(rc)) { /* probable */ }
     
    214214        if (RT_SUCCESS(rc)) { /* probable */ }
    215215        else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
    216 #endif
     216# endif
    217217    }
    218218    {
     
    229229        else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
    230230
    231 # if PGM_GST_TYPE == PGM_TYPE_AMD64
     231#  if PGM_GST_TYPE == PGM_TYPE_AMD64
    232232        fEffective &= (Pdpe.u & (  X86_PDPE_P   | X86_PDPE_RW  | X86_PDPE_US
    233233                                 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A));
    234234        fEffective |= Pdpe.u & X86_PDPE_LM_NX;
    235 # else
     235#  else
    236236        /*
    237237         * NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set.
     
    241241        fEffective = X86_PDPE_P | X86_PDPE_RW  | X86_PDPE_US | X86_PDPE_A
    242242                   | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));
    243 # endif
     243#  endif
    244244        pWalk->fEffective = fEffective;
    245245
     
    248248         */
    249249        RTGCPHYS GCPhysPd = Pdpe.u & X86_PDPE_PG_MASK;
    250 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     250#  ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    251251        PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk);
    252 # endif
     252#  endif
    253253        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd);
    254254        if (RT_SUCCESS(rc)) { /* probable */ }
     
    394394        return rc;
    395395    }
    396 #  endif
     396# endif
    397397
    398398    /*
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r104256 r104548  
    21252125 * @remark  Avoid calling this API from within critical sections (other than
    21262126 *          the PGM one) because of the deadlock risk.
    2127  * @thread  EMT
     2127 * @thread  EMT(pVCpu)
    21282128 */
    21292129VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
     
    23772377 * @param   GCPtr       The guest pointer to convert.
    23782378 * @param   pGCPhys     Where to store the GC physical address.
     2379 * @thread  EMT(pVCpu)
    23792380 */
    23802381VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
    23812382{
     2383    VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
    23822384    PGMPTWALK Walk;
    23832385    int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
     
    23972399 * @param   GCPtr       The guest pointer to convert.
    23982400 * @param   pHCPhys     Where to store the HC physical address.
     2401 * @thread  EMT(pVCpu)
    23992402 */
    24002403VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
    24012404{
     2405    VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
    24022406    PVMCC     pVM = pVCpu->CTX_SUFF(pVM);
    24032407    PGMPTWALK Walk;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette