VirtualBox

Changeset 73261 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jul 20, 2018 11:00:53 AM (7 years ago)
Author:
vboxsync
Message:

PGM: Moving guest and shadow mode Enter and Exit function to PGMAll. bugref:9044

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r73253 r73261  
    533533        PGM_GST_NAME_REAL(ModifyPage),
    534534        PGM_GST_NAME_REAL(GetPDE),
    535 #ifdef IN_RING3
    536535        PGM_GST_NAME_REAL(Enter),
    537536        PGM_GST_NAME_REAL(Exit),
     537#ifdef IN_RING3
    538538        PGM_GST_NAME_REAL(Relocate),
    539 #else
    540         NULL, NULL, NULL,
    541539#endif
    542540    },
     
    546544        PGM_GST_NAME_PROT(ModifyPage),
    547545        PGM_GST_NAME_PROT(GetPDE),
    548 #ifdef IN_RING3
    549546        PGM_GST_NAME_PROT(Enter),
    550547        PGM_GST_NAME_PROT(Exit),
     548#ifdef IN_RING3
    551549        PGM_GST_NAME_PROT(Relocate),
    552 #else
    553         NULL, NULL, NULL,
    554550#endif
    555551    },
     
    559555        PGM_GST_NAME_32BIT(ModifyPage),
    560556        PGM_GST_NAME_32BIT(GetPDE),
    561 #ifdef IN_RING3
    562557        PGM_GST_NAME_32BIT(Enter),
    563558        PGM_GST_NAME_32BIT(Exit),
     559#ifdef IN_RING3
    564560        PGM_GST_NAME_32BIT(Relocate),
    565 #else
    566         NULL, NULL, NULL,
    567561#endif
    568562    },
     
    572566        PGM_GST_NAME_PAE(ModifyPage),
    573567        PGM_GST_NAME_PAE(GetPDE),
    574 #ifdef IN_RING3
    575568        PGM_GST_NAME_PAE(Enter),
    576569        PGM_GST_NAME_PAE(Exit),
     570#ifdef IN_RING3
    577571        PGM_GST_NAME_PAE(Relocate),
    578 #else
    579         NULL, NULL, NULL,
    580572#endif
    581573    },
     
    586578        PGM_GST_NAME_AMD64(ModifyPage),
    587579        PGM_GST_NAME_AMD64(GetPDE),
    588 # ifdef IN_RING3
    589580        PGM_GST_NAME_AMD64(Enter),
    590581        PGM_GST_NAME_AMD64(Exit),
     582# ifdef IN_RING3
    591583        PGM_GST_NAME_AMD64(Relocate),
    592 # else
    593         NULL, NULL, NULL,
    594584# endif
    595585    },
     
    610600        PGM_SHW_NAME_32BIT(GetPage),
    611601        PGM_SHW_NAME_32BIT(ModifyPage),
    612 #ifdef IN_RING3
    613602        PGM_SHW_NAME_32BIT(Enter),
    614603        PGM_SHW_NAME_32BIT(Exit),
     604#ifdef IN_RING3
    615605        PGM_SHW_NAME_32BIT(Relocate),
    616 #else
    617         NULL, NULL, NULL,
    618606#endif
    619607    },
     
    622610        PGM_SHW_NAME_PAE(GetPage),
    623611        PGM_SHW_NAME_PAE(ModifyPage),
    624 #ifdef IN_RING3
    625612        PGM_SHW_NAME_PAE(Enter),
    626613        PGM_SHW_NAME_PAE(Exit),
     614#ifdef IN_RING3
    627615        PGM_SHW_NAME_PAE(Relocate),
    628 #else
    629         NULL, NULL, NULL,
    630616#endif
    631617    },
     
    635621        PGM_SHW_NAME_AMD64(GetPage),
    636622        PGM_SHW_NAME_AMD64(ModifyPage),
    637 # ifdef IN_RING3
    638623        PGM_SHW_NAME_AMD64(Enter),
    639624        PGM_SHW_NAME_AMD64(Exit),
     625# ifdef IN_RING3
    640626        PGM_SHW_NAME_AMD64(Relocate),
    641 # else
    642         NULL, NULL, NULL,
    643627# endif
    644628    },
     
    647631        PGM_SHW_NAME_NESTED_32BIT(GetPage),
    648632        PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
    649 # ifdef IN_RING3
    650633        PGM_SHW_NAME_NESTED_32BIT(Enter),
    651634        PGM_SHW_NAME_NESTED_32BIT(Exit),
     635# ifdef IN_RING3
    652636        PGM_SHW_NAME_NESTED_32BIT(Relocate),
    653 # else
    654         NULL, NULL, NULL,
    655637# endif
    656638    },
     
    659641        PGM_SHW_NAME_NESTED_PAE(GetPage),
    660642        PGM_SHW_NAME_NESTED_PAE(ModifyPage),
    661 # ifdef IN_RING3
    662643        PGM_SHW_NAME_NESTED_PAE(Enter),
    663644        PGM_SHW_NAME_NESTED_PAE(Exit),
     645# ifdef IN_RING3
    664646        PGM_SHW_NAME_NESTED_PAE(Relocate),
    665 # else
    666         NULL, NULL, NULL,
    667647# endif
    668648    },
     
    671651        PGM_SHW_NAME_NESTED_AMD64(GetPage),
    672652        PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
    673 # ifdef IN_RING3
    674653        PGM_SHW_NAME_NESTED_AMD64(Enter),
    675654        PGM_SHW_NAME_NESTED_AMD64(Exit),
     655# ifdef IN_RING3
    676656        PGM_SHW_NAME_NESTED_AMD64(Relocate),
    677 # else
    678         NULL, NULL, NULL,
    679657# endif
    680658    },
     
    683661        PGM_SHW_NAME_EPT(GetPage),
    684662        PGM_SHW_NAME_EPT(ModifyPage),
    685 # ifdef IN_RING3
    686663        PGM_SHW_NAME_EPT(Enter),
    687664        PGM_SHW_NAME_EPT(Exit),
     665# ifdef IN_RING3
    688666        PGM_SHW_NAME_EPT(Relocate),
    689 # else
    690         NULL, NULL, NULL,
    691667# endif
    692668    },
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r73199 r73261  
    3737#endif
    3838RT_C_DECLS_END
     39
     40
     41/**
     42 * Enters the guest mode.
     43 *
     44 * @returns VBox status code.
     45 * @param   pVCpu       The cross context virtual CPU structure.
     46 * @param   GCPhysCR3   The physical address from the CR3 register.
     47 */
     48PGM_GST_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
     49{
     50    /*
     51     * Map and monitor CR3
     52     */
     53    uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
     54    AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
     55    AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
     56    return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
     57}
     58
     59
     60/**
     61 * Exits the guest mode.
     62 *
     63 * @returns VBox status code.
     64 * @param   pVCpu       The cross context virtual CPU structure.
     65 */
     66PGM_GST_DECL(int, Exit)(PVMCPU pVCpu)
     67{
     68    uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
     69    AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
     70    AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
     71    return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
     72}
    3973
    4074
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r73246 r73261  
    176176PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
    177177PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
    178 #ifdef IN_RING3  /* for now */
    179178PGM_SHW_DECL(int, Enter)(PVMCPU pVCpu, bool fIs64BitsPagingMode);
    180179PGM_SHW_DECL(int, Exit)(PVMCPU pVCpu);
     180#ifdef IN_RING3
    181181PGM_SHW_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
    182182#endif
    183183RT_C_DECLS_END
    184184
     185
     186/**
     187 * Enters the shadow mode.
     188 *
     189 * @returns VBox status code.
     190 * @param   pVCpu                   The cross context virtual CPU structure.
     191 * @param   fIs64BitsPagingMode     New shadow paging mode is for 64 bits? (only relevant for 64 bits guests on a 32 bits AMD-V nested paging host)
     192 */
     193PGM_SHW_DECL(int, Enter)(PVMCPU pVCpu, bool fIs64BitsPagingMode)
     194{
     195#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
     196
     197# if PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) && HC_ARCH_BITS == 32
     198    /* Must distinguish between 32 and 64 bits guest paging modes as we'll use
     199       a different shadow paging root/mode in both cases. */
     200    RTGCPHYS     GCPhysCR3 = (fIs64BitsPagingMode) ? RT_BIT_64(63) : RT_BIT_64(62);
     201# else
     202    RTGCPHYS     GCPhysCR3 = RT_BIT_64(63); NOREF(fIs64BitsPagingMode);
     203# endif
     204    PPGMPOOLPAGE pNewShwPageCR3;
     205    PVM          pVM       = pVCpu->CTX_SUFF(pVM);
     206
     207    Assert((HMIsNestedPagingActive(pVM) || VM_IS_NEM_ENABLED(pVM)) == pVM->pgm.s.fNestedPaging);
     208    Assert(pVM->pgm.s.fNestedPaging);
     209    Assert(!pVCpu->pgm.s.pShwPageCR3R3);
     210
     211    pgmLock(pVM);
     212
     213    int rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_ROOT_NESTED, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
     214                          NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/,
     215                          &pNewShwPageCR3);
     216    AssertFatalRC(rc);
     217
     218    pVCpu->pgm.s.pShwPageCR3R3 = (R3PTRTYPE(PPGMPOOLPAGE))MMHyperCCToR3(pVM, pNewShwPageCR3);
     219    pVCpu->pgm.s.pShwPageCR3RC = (RCPTRTYPE(PPGMPOOLPAGE))MMHyperCCToRC(pVM, pNewShwPageCR3);
     220    pVCpu->pgm.s.pShwPageCR3R0 = (R0PTRTYPE(PPGMPOOLPAGE))MMHyperCCToR0(pVM, pNewShwPageCR3);
     221
     222    pgmUnlock(pVM);
     223
     224    Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key));
     225#else
     226    NOREF(pVCpu); NOREF(fIs64BitsPagingMode);
     227#endif
     228    return VINF_SUCCESS;
     229}
     230
     231
     232/**
     233 * Exits the shadow mode.
     234 *
     235 * @returns VBox status code.
     236 * @param   pVCpu       The cross context virtual CPU structure.
     237 */
     238PGM_SHW_DECL(int, Exit)(PVMCPU pVCpu)
     239{
     240#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
     241    PVM pVM = pVCpu->CTX_SUFF(pVM);
     242    if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
     243    {
     244        PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
     245
     246        pgmLock(pVM);
     247
     248        /* Do *not* unlock this page as we have two of them floating around in the 32-bit host & 64-bit guest case.
     249         * We currently assert when you try to free one of them; don't bother to really allow this.
     250         *
     251         * Note that this is two nested paging root pages max. This isn't a leak. They are reused.
     252         */
     253        /* pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); */
     254
     255        pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), NIL_PGMPOOL_IDX, UINT32_MAX);
     256        pVCpu->pgm.s.pShwPageCR3R3 = 0;
     257        pVCpu->pgm.s.pShwPageCR3R0 = 0;
     258        pVCpu->pgm.s.pShwPageCR3RC = 0;
     259
     260        pgmUnlock(pVM);
     261
     262        Log(("Leave nested shadow paging mode\n"));
     263    }
     264#else
     265    RT_NOREF_PV(pVCpu);
     266#endif
     267    return VINF_SUCCESS;
     268}
    185269
    186270
  • trunk/src/VBox/VMM/VMMR3/PGMGst.h

    r73250 r73261  
    55
    66/*
    7  * Copyright (C) 2006-2017 Oracle Corporation
     7 * Copyright (C) 2006-2018 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    1717
    1818
    19 /*******************************************************************************
    20 *   Internal Functions                                                         *
    21 *******************************************************************************/
     19/*********************************************************************************************************************************
     20*   Internal Functions                                                                                                           *
     21*********************************************************************************************************************************/
    2222RT_C_DECLS_BEGIN
    23 /* r3 */
     23/* all */
    2424PGM_GST_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
    25 PGM_GST_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
    2625PGM_GST_DECL(int, Exit)(PVMCPU pVCpu);
    27 
    28 /* all */
    2926PGM_GST_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
    3027PGM_GST_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
    3128PGM_GST_DECL(int, GetPDE)(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE);
     29
     30/* r3 */
     31PGM_GST_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
    3232RT_C_DECLS_END
    33 
    34 
    35 /**
    36  * Enters the guest mode.
    37  *
    38  * @returns VBox status code.
    39  * @param   pVCpu       The cross context virtual CPU structure.
    40  * @param   GCPhysCR3   The physical address from the CR3 register.
    41  */
    42 PGM_GST_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
    43 {
    44     /*
    45      * Map and monitor CR3
    46      */
    47     uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
    48     AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
    49     AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
    50     return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
    51 }
    5233
    5334
     
    7253}
    7354
    74 
    75 /**
    76  * Exits the guest mode.
    77  *
    78  * @returns VBox status code.
    79  * @param   pVCpu       The cross context virtual CPU structure.
    80  */
    81 PGM_GST_DECL(int, Exit)(PVMCPU pVCpu)
    82 {
    83     uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
    84     AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
    85     AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
    86     return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
    87 }
    88 
  • trunk/src/VBox/VMM/VMMR3/PGMShw.h

    r73246 r73261  
    55
    66/*
    7  * Copyright (C) 2006-2017 Oracle Corporation
     7 * Copyright (C) 2006-2018 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    1616 */
    1717
    18 /*******************************************************************************
    19 *   Defined Constants And Macros                                               *
    20 *******************************************************************************/
     18/*********************************************************************************************************************************
     19*   Defined Constants And Macros                                                                                                 *
     20*********************************************************************************************************************************/
    2121#undef SHWPT
    2222#undef PSHWPT
     
    106106
    107107
    108 /*******************************************************************************
    109 *   Internal Functions                                                         *
    110 *******************************************************************************/
     108/*********************************************************************************************************************************
     109*   Internal Functions                                                                                                           *
     110*********************************************************************************************************************************/
    111111RT_C_DECLS_BEGIN
    112 /* r3 */
     112/* all */
    113113PGM_SHW_DECL(int, Enter)(PVMCPU pVCpu, bool fIs64BitsPagingMode);
    114 PGM_SHW_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
    115114PGM_SHW_DECL(int, Exit)(PVMCPU pVCpu);
    116 
    117 /* all */
    118115PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
    119116PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
     117
     118/* r3 */
     119PGM_SHW_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
    120120RT_C_DECLS_END
    121 
    122 
    123 /**
    124  * Enters the shadow mode.
    125  *
    126  * @returns VBox status code.
    127  * @param   pVCpu                   The cross context virtual CPU structure.
    128  * @param   fIs64BitsPagingMode     New shadow paging mode is for 64 bits? (only relevant for 64 bits guests on a 32 bits AMD-V nested paging host)
    129  */
    130 PGM_SHW_DECL(int, Enter)(PVMCPU pVCpu, bool fIs64BitsPagingMode)
    131 {
    132 #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
    133 
    134 # if PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) && HC_ARCH_BITS == 32
    135     /* Must distinguish between 32 and 64 bits guest paging modes as we'll use
    136        a different shadow paging root/mode in both cases. */
    137     RTGCPHYS     GCPhysCR3 = (fIs64BitsPagingMode) ? RT_BIT_64(63) : RT_BIT_64(62);
    138 # else
    139     RTGCPHYS     GCPhysCR3 = RT_BIT_64(63); NOREF(fIs64BitsPagingMode);
    140 # endif
    141     PPGMPOOLPAGE pNewShwPageCR3;
    142     PVM          pVM       = pVCpu->pVMR3;
    143 
    144     Assert((HMIsNestedPagingActive(pVM) || VM_IS_NEM_ENABLED(pVM)) == pVM->pgm.s.fNestedPaging);
    145     Assert(pVM->pgm.s.fNestedPaging);
    146     Assert(!pVCpu->pgm.s.pShwPageCR3R3);
    147 
    148     pgmLock(pVM);
    149 
    150     int rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_ROOT_NESTED, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
    151                           NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/,
    152                           &pNewShwPageCR3);
    153     AssertFatalRC(rc);
    154 
    155     pVCpu->pgm.s.pShwPageCR3R3 = pNewShwPageCR3;
    156 
    157     pVCpu->pgm.s.pShwPageCR3RC = MMHyperCCToRC(pVM, pVCpu->pgm.s.pShwPageCR3R3);
    158     pVCpu->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVCpu->pgm.s.pShwPageCR3R3);
    159 
    160     pgmUnlock(pVM);
    161 
    162     Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key));
    163 #else
    164     NOREF(pVCpu); NOREF(fIs64BitsPagingMode);
    165 #endif
    166     return VINF_SUCCESS;
    167 }
    168121
    169122
     
    181134}
    182135
    183 
    184 /**
    185  * Exits the shadow mode.
    186  *
    187  * @returns VBox status code.
    188  * @param   pVCpu       The cross context virtual CPU structure.
    189  */
    190 PGM_SHW_DECL(int, Exit)(PVMCPU pVCpu)
    191 {
    192 #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
    193     PVM pVM = pVCpu->pVMR3;
    194     if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
    195     {
    196         PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    197 
    198         pgmLock(pVM);
    199 
    200         /* Do *not* unlock this page as we have two of them floating around in the 32-bit host & 64-bit guest case.
    201          * We currently assert when you try to free one of them; don't bother to really allow this.
    202          *
    203          * Note that this is two nested paging root pages max. This isn't a leak. They are reused.
    204          */
    205         /* pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); */
    206 
    207         pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), NIL_PGMPOOL_IDX, UINT32_MAX);
    208         pVCpu->pgm.s.pShwPageCR3R3 = 0;
    209         pVCpu->pgm.s.pShwPageCR3R0 = 0;
    210         pVCpu->pgm.s.pShwPageCR3RC = 0;
    211 
    212         pgmUnlock(pVM);
    213 
    214         Log(("Leave nested shadow paging mode\n"));
    215     }
    216 #else
    217     RT_NOREF_PV(pVCpu);
    218 #endif
    219     return VINF_SUCCESS;
    220 }
    221 
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r73250 r73261  
    31313131    DECLCALLBACKMEMBER(int,         pfnEnter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
    31323132    DECLCALLBACKMEMBER(int,         pfnExit)(PVMCPU pVCpu);
     3133#ifdef IN_RING3
    31333134    DECLCALLBACKMEMBER(int,         pfnRelocate)(PVMCPU pVCpu, RTGCPTR offDelta); /**< Only in ring-3. */
     3135#endif
    31343136} PGMMODEDATAGST;
    31353137
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette