VirtualBox

Changeset 92186 in vbox for trunk


Ignore:
Timestamp:
Nov 3, 2021 8:31:27 AM (3 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:10092 Separate the guest's second-level address translation into separate enum and adjust templates accordingly plus other related bits.

Location:
trunk
Files:
1 added
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/pgm.h

    r92164 r92186  
    274274} PGMMODE;
    275275
     276/**
     277 * Second level address translation mode.
     278 */
     279typedef enum PGMSLAT
     280{
     281    /** The usual invalid value. */
     282    PGMSLAT_INVALID = 0,
     283    /** No second level translation. */
     284    PGMSLAT_DIRECT,
     285    /** Intel Extended Page Tables (EPT). */
     286    PGMSLAT_EPT,
     287    /** AMD-V Nested Paging 32-bit. */
     288    PGMSLAT_32BIT,
     289    /** AMD-V Nested Paging PAE. */
     290    PGMSLAT_PAE,
     291    /** AMD-V Nested Paging 64-bit. */
     292    PGMSLAT_AMD64,
     293    /** 32bit hackishness. */
     294    PGMSLAT_32BIT_HACK = 0x7fffffff
     295} PGMSLAT;
     296
    276297/** Macro for checking if the guest is using paging.
    277298 * @param enmMode   PGMMODE_*.
     
    351372VMMDECL(PGMMODE)    PGMGetHostMode(PVM pVM);
    352373VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode);
     374#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     375VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode);
     376#endif
    353377VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu);
    354378VMM_INT_DECL(void)  PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe);
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r92046 r92186  
    5050DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
    5151DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
     52#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     53static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALKGST pWalk);
     54#endif
    5255static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
    5356static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
     57
     58
     59#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     60/* Guest - EPT SLAT is identical for all guest paging mode. */
     61# define PGM_SLAT_TYPE               PGM_SLAT_TYPE_EPT
     62# define PGM_GST_TYPE                PGM_TYPE_EPT
     63# include "PGMGstDefs.h"
     64# include "PGMAllGstSlatEpt.h"
     65# undef PGM_GST_TYPE
     66#endif
    5467
    5568
     
    489502#endif /* VBOX_WITH_64_BITS_GUESTS */
    490503
    491 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    492 /* Guest - EPT mode */
    493 # define PGM_GST_TYPE               PGM_TYPE_EPT
    494 # define PGM_GST_NAME(name)         PGM_GST_NAME_EPT(name)
    495 # define PGM_BTH_NAME(name)         PGM_BTH_NAME_EPT_EPT(name)
    496 # define BTH_PGMPOOLKIND_PT_FOR_PT  PGMPOOLKIND_EPT_PT_FOR_PHYS
    497 # include "PGMGstDefs.h"
    498 # include "PGMAllGst.h"
    499 # include "PGMAllBth.h"
    500 # undef BTH_PGMPOOLKIND_PT_FOR_PT
    501 # undef PGM_BTH_NAME
    502 # undef PGM_GST_TYPE
    503 # undef PGM_GST_NAME
    504 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
    505 
    506504#undef PGM_SHW_TYPE
    507505#undef PGM_SHW_NAME
     
    629627# endif
    630628    },
    631 #endif
    632 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    633     { UINT32_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_NESTED_32BIT */
    634     { UINT32_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_NESTED_PAE   */
    635     { UINT32_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_NESTED_AMD64 */
    636     {
    637         PGM_TYPE_EPT,
    638         PGM_GST_NAME_EPT(GetPage),
    639         PGM_GST_NAME_EPT(ModifyPage),
    640         PGM_GST_NAME_EPT(Enter),
    641         PGM_GST_NAME_EPT(Exit),
    642 # ifdef IN_RING3
    643         PGM_GST_NAME_EPT(Relocate),
    644 # endif
    645     }
    646629#endif
    647630};
     
    893876    PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE   - illegal */
    894877    PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
    895 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    896     PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_EPT, PGM_BTH_NAME_EPT_EPT),
    897 #else
    898878    PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT          - illegal */
    899 #endif
    900879    PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE         - illegal */
    901880
     
    19661945            return VERR_PGM_NOT_USED_IN_MODE;
    19671946
    1968 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    19691947        case PGMMODE_EPT:
    1970             pWalk->enmType = PGMPTWALKGSTTYPE_EPT;
    1971             return PGM_GST_NAME_EPT(Walk)(pVCpu, GCPtr, &pWalk->u.Ept);
    1972 #else
    1973         case PGMMODE_EPT:
    1974 #endif
    19751948        case PGMMODE_NESTED_32BIT:
    19761949        case PGMMODE_NESTED_PAE:
     
    19821955    }
    19831956}
     1957
     1958
     1959#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     1960/**
     1961 * Performs a guest second-level address translation (SLAT).
     1962 *
     1963 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
     1964 * function.
     1965 *
     1966 * @returns VBox status code.
     1967 * @retval  VINF_SUCCESS on success.
     1968 * @retval  VERR_PAGE_TABLE_NOT_PRESENT on failure.  Check pWalk for details.
     1969 * @retval  VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
     1970 *          not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
     1971 *
     1972 * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
     1973 * @param   GCPhysNested        The nested-guest physical address being translated
     1974 *                              (input).
     1975 * @param   fIsLinearAddrValid  Whether the linear address in @a GCPtrNested is
     1976 *                              valid. This indicates the SLAT is caused when
     1977 *                              translating a nested-guest linear address.
     1978 * @param   GCPtrNested         The nested-guest virtual address that initiated the
     1979 *                              SLAT. If none, pass NIL_RTGCPTR.
     1980 * @param   pWalk               Where to return the walk result. This is valid for
     1981 *                              some error codes as well.
     1982 */
     1983static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
     1984                          PPGMPTWALKGST pWalk)
     1985{
     1986    Assert(pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT);
     1987    switch (pVCpu->pgm.s.enmGuestSlatMode)
     1988    {
     1989        case PGMSLAT_EPT:
     1990            pWalk->enmType = PGMPTWALKGSTTYPE_EPT;
     1991            return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, &pWalk->u.Ept);
     1992
     1993        default:
     1994            AssertFailed();
     1995            pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
     1996            return VERR_PGM_NOT_USED_IN_MODE;
     1997    }
     1998}
     1999#endif
    19842000
    19852001
     
    29062922     */
    29072923    PGMMODE enmGuestMode;
    2908 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2909     if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
    2910         enmGuestMode = PGMMODE_EPT;
    2911     else
    2912 #endif
    29132924    if (cr0 & X86_CR0_PG)
    29142925    {
     
    31163127            }
    31173128            break;
    3118 
    3119 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    3120         case PGMMODE_EPT:
    3121             /* Nested paging is a requirement for nested VT-x. */
    3122             Assert(enmHostMode == PGMMODE_EPT);
    3123             break;
    3124 #endif
    31253129
    31263130        default:
     
    33403344    }
    33413345
     3346#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     3347    /* Update the guest SLAT mode if it's a nested-guest. */
     3348    if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
     3349    {
     3350        if (PGMMODE_WITH_PAGING(enmGuestMode))
     3351            pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
     3352        else
     3353            pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
     3354    }
     3355    else
     3356        Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT);
     3357#endif
     3358
    33423359    /* Enter the new guest mode.  */
    33433360    pVCpu->pgm.s.enmGuestMode = enmGuestMode;
     
    34843501
    34853502
     3503#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     3504/**
     3505 * Gets the SLAT mode name.
     3506 *
     3507 * @returns The read-only SLAT mode descriptive string.
     3508 * @param   enmSlatMode     The SLAT mode value.
     3509 */
     3510VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
     3511{
     3512    switch (enmSlatMode)
     3513    {
     3514        case PGMSLAT_DIRECT:        return "Direct";
     3515        case PGMSLAT_EPT:           return "EPT";
     3516        case PGMSLAT_32BIT:         return "32-bit";
     3517        case PGMSLAT_PAE:           return "PAE";
     3518        case PGMSLAT_AMD64:         return "AMD64";
     3519        default:                    return "Unknown";
     3520    }
     3521}
     3522#endif
     3523
     3524
    34863525/**
    34873526 * Gets the physical address represented in the guest CR3 as PGM sees it.
     
    38393878VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
    38403879{
    3841     pVCpu->pgm.s.uEptPtr = uEptPtr;
    3842 }
    3843 
     3880    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     3881    PGM_LOCK_VOID(pVM);
     3882    if (pVCpu->pgm.s.uEptPtr != uEptPtr)
     3883    {
     3884        pVCpu->pgm.s.uEptPtr = uEptPtr;
     3885        pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
     3886        pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
     3887    }
     3888    PGM_UNLOCK(pVM);
     3889}
     3890
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r92076 r92186  
    2323#if PGM_GST_TYPE == PGM_TYPE_32BIT \
    2424 || PGM_GST_TYPE == PGM_TYPE_PAE \
    25  || PGM_GST_TYPE == PGM_TYPE_EPT \
    2625 || PGM_GST_TYPE == PGM_TYPE_AMD64
    2726DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);
     
    7473#if PGM_GST_TYPE == PGM_TYPE_32BIT \
    7574 || PGM_GST_TYPE == PGM_TYPE_PAE \
    76  || PGM_GST_TYPE == PGM_TYPE_EPT \
    7775 || PGM_GST_TYPE == PGM_TYPE_AMD64
    7876
     
    117115{
    118116    int rc;
     117
     118#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     119# define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \
     120    do { \
     121        if ((a_pVCpu)->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT) \
     122        { \
     123            PGMPTWALKGST SlatWalk; \
     124            int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &SlatWalk); \
     125            if (RT_SUCCESS(rcX)) \
     126                (a_GCPhysOut) = SlatWalk.u.Core.GCPhys; \
     127            else \
     128            { \
     129                (a_pWalk)->Core = SlatWalk.u.Core; \
     130                return rcX; \
     131            } \
     132        } \
     133    } while (0)
     134#endif
    119135
    120136    /*
     
    137153# if PGM_GST_TYPE == PGM_TYPE_AMD64
    138154        /*
    139          * The PMLE4.
     155         * The PML4 table.
    140156         */
    141157        rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pWalk->pPml4);
     
    158174
    159175        /*
    160          * The PDPE.
     176         * The PDPT.
    161177         */
    162         rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pWalk->pPdpt);
     178        RTGCPHYS GCPhysPdpt = Pml4e.u & X86_PML4E_PG_MASK;
     179#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     180        PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk);
     181#endif
     182        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pWalk->pPdpt);
    163183        if (RT_SUCCESS(rc)) { /* probable */ }
    164184        else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
     
    168188        if (RT_SUCCESS(rc)) { /* probable */ }
    169189        else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
    170 
    171 # elif PGM_GST_TYPE == PGM_TYPE_EPT
    172         rc = pgmGstGetEptPML4PtrEx(pVCpu, &pWalk->pPml4);
    173         if (RT_SUCCESS(rc)) { /* probable */ }
    174         else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
    175 
    176         PEPTPML4E pPml4e;
    177         pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK];
    178         EPTPML4E  Pml4e;
    179         pWalk->Pml4e.u = Pml4e.u = pPml4e->u;
    180 
    181         if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
    182         else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
    183 
    184         if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
    185         else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
    186 
    187         Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
    188         uint64_t const fEptAttrs     = Pml4e.u & EPT_PML4E_ATTR_MASK;
    189         uint8_t const fExecute       = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
    190         uint8_t const fRead          = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
    191         uint8_t const fWrite         = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
    192         uint8_t const fAccessed      = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
    193         uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
    194         pWalk->Core.fEffective = fEffective = RT_BF_MAKE(PGM_BF_PTWALK_EFF_X,  fExecute)
    195                                             | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fRead & fWrite)
    196                                             | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
    197                                             | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A,  fAccessed)
    198                                             | fEffectiveEpt;
    199 
    200         rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pWalk->pPdpt);
    201         if (RT_SUCCESS(rc)) { /* probable */ }
    202         else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
    203 # endif
     190#endif
    204191    }
    205192    {
     
    226213
    227214        /*
    228          * The PDE.
     215         * The PD.
    229216         */
    230         rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pWalk->pPd);
     217        RTGCPHYS GCPhysPd = Pdpe.u & X86_PDPE_PG_MASK;
     218# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     219        PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk);
     220# endif
     221        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pWalk->pPd);
    231222        if (RT_SUCCESS(rc)) { /* probable */ }
    232223        else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
     224
    233225# elif PGM_GST_TYPE == PGM_TYPE_32BIT
    234226        rc = pgmGstGet32bitPDPtrEx(pVCpu, &pWalk->pPd);
    235227        if (RT_SUCCESS(rc)) { /* probable */ }
    236228        else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
    237 
    238 # elif PGM_GST_TYPE == PGM_TYPE_EPT
    239         PEPTPDPTE pPdpte;
    240         pWalk->pPdpte = pPdpte = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
    241         EPTPDPTE  Pdpte;
    242         pWalk->Pdpte.u = Pdpte.u = pPdpte->u;
    243 
    244         if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpte)) { /* probable */ }
    245         else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
    246 
    247         /* The order of the following 2 "if" statements matter. */
    248         if (GST_IS_PDPE_VALID(pVCpu, Pdpte))
    249         {
    250             uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE_ATTR_MASK;
    251             uint8_t const fExecute   = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
    252             uint8_t const fWrite     = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
    253             uint8_t const fAccessed  = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
    254             uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
    255             pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X,  fExecute)
    256                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
    257                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
    258                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A,  fAccessed)
    259                                                  | fEffectiveEpt;
    260         }
    261         else if (GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte))
    262         {
    263             uint64_t const fEptAttrs  = Pdpte.u & EPT_PDPTE1G_ATTR_MASK;
    264             uint8_t const fExecute    = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
    265             uint8_t const fWrite      = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
    266             uint8_t const fAccessed   = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
    267             uint8_t const fDirty      = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
    268             uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
    269             pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X,       fExecute)
    270                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW,      fWrite)
    271                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US,      1)
    272                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A,       fAccessed)
    273                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D,       fDirty)
    274                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0)
    275                                                  | fEffectiveEpt;
    276             pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
    277             pWalk->Core.fEffectiveUS = true;
    278             pWalk->Core.fEffectiveNX = !fExecute;
    279             pWalk->Core.fGigantPage  = true;
    280             pWalk->Core.fSucceeded   = true;
    281             pWalk->Core.GCPhys       = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte)
    282                                      | (GCPtr & GST_GIGANT_PAGE_OFFSET_MASK);
    283             PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
    284             return VINF_SUCCESS;
    285         }
    286         else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
    287229# endif
    288230    }
     
    302244             * We're done.
    303245             */
    304 # if PGM_GST_TYPE == PGM_TYPE_EPT
    305             uint64_t const fEptAttrs  = Pde.u & EPT_PDE2M_ATTR_MASK;
    306             uint8_t const fExecute    = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
    307             uint8_t const fWrite      = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
    308             uint8_t const fAccessed   = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
    309             uint8_t const fDirty      = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
    310             uint32_t fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
    311             pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X,       fExecute)
    312                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW,      fWrite)
    313                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US,      1)
    314                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A,       fAccessed)
    315                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D,       fDirty)
    316                                                  | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0)
    317                                                  | fEffectiveEpt;
     246# if PGM_GST_TYPE == PGM_TYPE_32BIT
     247            fEffective &= Pde.u & (X86_PDE4M_RW  | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
     248# else
     249            fEffective &= ((uint32_t)Pde.u & (X86_PDE4M_RW  | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A))
     250                        | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */;
     251# endif
     252            fEffective |= (uint32_t)Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
     253            fEffective |= (uint32_t)(Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
     254            pWalk->Core.fEffective = fEffective;
     255
    318256            pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
    319             pWalk->Core.fEffectiveUS = true;
    320             pWalk->Core.fEffectiveNX = !fExecute;
    321 # else
    322 #  if PGM_GST_TYPE == PGM_TYPE_32BIT
    323              fEffective &= Pde.u & (X86_PDE4M_RW  | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
    324 #  else
    325              fEffective &= ((uint32_t)Pde.u & (X86_PDE4M_RW  | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A))
    326                          | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */;
    327 #  endif
    328              fEffective |= (uint32_t)Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
    329              fEffective |= (uint32_t)(Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
    330              pWalk->Core.fEffective = fEffective;
    331 
    332              pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
    333              pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US);
    334 #  if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
    335              pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu);
    336 #  else
    337              pWalk->Core.fEffectiveNX = false;
    338 #  endif
     257            pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US);
     258# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
     259            pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu);
     260# else
     261            pWalk->Core.fEffectiveNX = false;
    339262# endif
    340263            pWalk->Core.fBigPage     = true;
    341264            pWalk->Core.fSucceeded   = true;
    342265
    343             pWalk->Core.GCPhys       = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
    344                                      | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
     266            RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
     267                               | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
     268# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     269            PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPde, GCPhysPde, pWalk);
     270# endif
     271            pWalk->Core.GCPhys       = GCPhysPde;
    345272            PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
    346273            return VINF_SUCCESS;
     
    349276        if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
    350277            return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
    351 # if  PGM_GST_TYPE == PGM_TYPE_EPT
    352         uint64_t const fEptAttrs = Pde.u & EPT_PDE_ATTR_MASK;
    353         uint8_t const fExecute   = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
    354         uint8_t const fWrite     = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
    355         uint8_t const fAccessed  = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
    356         uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
    357         pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X,  fExecute)
    358                                              | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
    359                                              | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
    360                                              | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A,  fAccessed)
    361                                              | fEffectiveEpt;
    362 # elif PGM_GST_TYPE == PGM_TYPE_32BIT
     278# if PGM_GST_TYPE == PGM_TYPE_32BIT
    363279        pWalk->Core.fEffective = fEffective &= Pde.u & (X86_PDE_RW  | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
    364280# else
     
    368284
    369285        /*
    370          * The PTE.
     286         * The PT.
    371287         */
    372         rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt);
     288        RTGCPHYS GCPhysPt = GST_GET_PDE_GCPHYS(Pde);
     289# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     290        PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPt, GCPhysPt, pWalk);
     291# endif
     292        rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pWalk->pPt);
    373293        if (RT_SUCCESS(rc)) { /* probable */ }
    374294        else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
     
    389309         * We're done.
    390310         */
    391 # if PGM_GST_TYPE == PGM_TYPE_EPT
    392         uint64_t const fEptAttrs  = Pte.u & EPT_PTE_ATTR_MASK;
    393         uint8_t const fExecute    = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
    394         uint8_t const fWrite      = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
    395         uint8_t const fAccessed   = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
    396         uint8_t const fDirty      = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
    397         uint32_t fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
    398         pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X,       fExecute)
    399                                              | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW,      fWrite)
    400                                              | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US,      1)
    401                                              | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A,       fAccessed)
    402                                              | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D,       fDirty)
    403                                              | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0)
    404                                              | fEffectiveEpt;
    405         pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
    406         pWalk->Core.fEffectiveUS = true;
    407         pWalk->Core.fEffectiveNX = !fExecute;
    408 # else
    409 #  if PGM_GST_TYPE == PGM_TYPE_32BIT
     311# if PGM_GST_TYPE == PGM_TYPE_32BIT
    410312        fEffective &= Pte.u & (X86_PTE_RW  | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
    411 #  else
     313# else
    412314        fEffective &= ((uint32_t)Pte.u & (X86_PTE_RW  | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A))
    413                     | ((uint32_t)(Pte.u >> 63) ^ 1) /*NX */;
    414 #  endif
     315                   | ((uint32_t)(Pte.u >> 63) ^ 1) /*NX */;
     316# endif
    415317        fEffective |= (uint32_t)Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
    416318        pWalk->Core.fEffective = fEffective;
    417319
    418320        pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
    419 #  if PGM_GST_TYPE == PGM_TYPE_EPT
    420         pWalk->Core.fEffectiveUS = true;
    421 #  else
    422321        pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US);
    423 #  endif
    424 #  if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
     322# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
    425323        pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu);
    426 #  else
     324# else
    427325        pWalk->Core.fEffectiveNX = false;
    428 #  endif
    429326# endif
    430327        pWalk->Core.fSucceeded   = true;
    431         pWalk->Core.GCPhys       = GST_GET_PDE_GCPHYS(Pte)      /** @todo Shouldn't this be PTE_GCPHYS? */
    432                                  | (GCPtr & PAGE_OFFSET_MASK);
     328
     329        RTGCPHYS GCPhysPte = GST_GET_PDE_GCPHYS(Pte)        /** @todo This should be GST_GET_PTE_GCPHYS. */
     330                           | (GCPtr & PAGE_OFFSET_MASK);
     331# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     332        PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPte, GCPhysPte, pWalk);
     333# endif
     334        pWalk->Core.GCPhys       = GCPhysPte;
    433335        return VINF_SUCCESS;
    434336    }
    435337}
    436338
    437 #endif /* 32BIT, PAE, EPT, AMD64 */
     339#endif /* 32BIT, PAE, AMD64 */
    438340
    439341/**
     
    468370#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
    469371   || PGM_GST_TYPE == PGM_TYPE_PAE \
    470    || PGM_GST_TYPE == PGM_TYPE_EPT \
    471372   || PGM_GST_TYPE == PGM_TYPE_AMD64
    472373
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r92177 r92186  
    798798        pPGM->enmShadowMode     = PGMMODE_INVALID;
    799799        pPGM->enmGuestMode      = PGMMODE_INVALID;
     800        pPGM->enmGuestSlatMode  = PGMSLAT_INVALID;
    800801        pPGM->idxGuestModeData  = UINT8_MAX;
    801802        pPGM->idxShadowModeData = UINT8_MAX;
     
    10761077        pVCpu->pgm.s.enmShadowMode     = PGMMODE_INVALID;
    10771078        pVCpu->pgm.s.enmGuestMode      = PGMMODE_INVALID;
     1079        pVCpu->pgm.s.enmGuestSlatMode  = PGMSLAT_INVALID;
    10781080        pVCpu->pgm.s.idxGuestModeData  = UINT8_MAX;
    10791081        pVCpu->pgm.s.idxShadowModeData = UINT8_MAX;
     
    19871989    /* print info. */
    19881990    if (fGuest)
     1991    {
    19891992        pHlp->pfnPrintf(pHlp, "Guest paging mode (VCPU #%u):  %s (changed %RU64 times), A20 %s (changed %RU64 times)\n",
    19901993                        pVCpu->idCpu, PGMGetModeName(pVCpu->pgm.s.enmGuestMode), pVCpu->pgm.s.cGuestModeChanges.c,
    19911994                        pVCpu->pgm.s.fA20Enabled ? "enabled" : "disabled", pVCpu->pgm.s.cA20Changes.c);
     1995#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     1996        if (pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID)
     1997            pHlp->pfnPrintf(pHlp, "Guest SLAT mode (VCPU #%u): %s\n", pVCpu->idCpu,
     1998                            PGMGetSlatModeName(pVCpu->pgm.s.enmGuestSlatMode));
     1999#endif
     2000    }
    19922001    if (fShadow)
    19932002        pHlp->pfnPrintf(pHlp, "Shadow paging mode (VCPU #%u): %s\n", pVCpu->idCpu, PGMGetModeName(pVCpu->pgm.s.enmShadowMode));
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r92177 r92186  
    199199#define PGM_TYPE_END                    (PGM_TYPE_NONE + 1)
    200200#define PGM_TYPE_FIRST_SHADOW           PGM_TYPE_32BIT /**< The first type used by shadow paging. */
     201/** @} */
     202
     203/** @name Defines used to indicate the second-level
     204 * address translation (SLAT) modes in the templates.
     205 * @{ */
     206#define PGM_SLAT_TYPE_EPT               (PGM_TYPE_END + 1)
     207#define PGM_SLAT_TYPE_32BIT             (PGM_TYPE_END + 2)
     208#define PGM_SLAT_TYPE_PAE               (PGM_TYPE_END + 3)
     209#define PGM_SLAT_TYPE_AMD64             (PGM_TYPE_END + 4)
    201210/** @} */
    202211
     
    23312340    RTGCPTR         GCPtr;
    23322341
     2342    /** The nested-guest physical address that is being resolved if this is a
     2343     *  second-level walk (input).
     2344     *  @remarks only valid if fIsSlat is set. */
     2345    RTGCPHYS        GCPhysNested;
     2346
    23332347    /** The guest physical address that is the result of the walk.
    23342348     * @remarks only valid if fSucceeded is set. */
     
    23372351    /** Set if the walk succeeded, i.d. GCPhys is valid. */
    23382352    bool            fSucceeded;
     2353    /** Whether this is a second-level translation. */
     2354    bool            fIsSlat;
     2355    /** Whether the linear address (GCPtr) is valid and thus the cause for the
     2356     *  second-level translation. */
     2357    bool            fIsLinearAddrValid;
    23392358    /** The level problem arrised at.
    23402359     * PTE is level 1, PDE is level 2, PDPE is level 3, PML4 is level 4, CR3 is
     
    23572376    /** The effective X86_PTE_NX flag for the address. */
    23582377    bool            fEffectiveNX;
    2359     bool            afPadding1[2];
    23602378    /** Effective flags thus far: RW, US, PWT, PCD, A, ~NX >> 63.
    23612379     * The NX bit is inverted and shifted down 63 places to bit 0. */
     
    25132531typedef PGMPTWALKGSTAMD64 const *PCPGMPTWALKGSTAMD64;
    25142532
    2515 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    25162533/**
    25172534 * Guest page table walk for the EPT mode.
     
    25422559/** Pointer to a const EPT guest page table walk. */
    25432560typedef PGMPTWALKGSTEPT const *PCPGMPTWALKGSTEPT;
    2544 #endif
    25452561
    25462562/**
     
    26252641         * convension). */
    26262642        PGMPTWALKGST32BIT   Legacy;
    2627 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2628         /** The page walker for EPT. */
     2643        /** The page walker for EPT (SLAT). */
    26292644        PGMPTWALKGSTEPT     Ept;
    2630 #endif
    26312645    } u;
    26322646    /** Indicates which part of the union is valid. */
     
    26692683#define PGM_GST_NAME_RC_AMD64_STR(name)                 "pgmRCGstAMD64" #name
    26702684#define PGM_GST_NAME_R0_AMD64_STR(name)                 "pgmR0GstAMD64" #name
    2671 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2672 # define PGM_GST_NAME_EPT(name)                         PGM_CTX(pgm,GstEPT##name)
    2673 # define PGM_GST_NAME_RC_EPT_STR(name)                  "pgmRCGstEPT" #name
    2674 # define PGM_GST_NAME_R0_EPT_STR(name)                  "pgmR0GstEPT" #name
    2675 #endif
    26762685#define PGM_GST_DECL(type, name)                        PGM_CTX_DECL(type) PGM_GST_NAME(name)
     2686
     2687#define PGM_GST_SLAT_NAME_EPT(name)                     PGM_CTX(pgm,GstSlatEpt##name)
     2688#define PGM_GST_SLAT_NAME_RC_EPT_STR(name)              "pgmRCGstSlatEpt" #name
     2689#define PGM_GST_SLAT_NAME_R0_EPT_STR(name)              "pgmR0GstSlatEpt" #name
     2690#define PGM_GST_SLAT_DECL(type, name)                   PGM_CTX_DECL(type) PGM_GST_SLAT_NAME(name)
    26772691
    26782692#define PGM_SHW_NAME_32BIT(name)                        PGM_CTX(pgm,Shw32Bit##name)
     
    27322746#define PGM_BTH_NAME_EPT_PAE(name)                      PGM_CTX(pgm,BthEPTPAE##name)
    27332747#define PGM_BTH_NAME_EPT_AMD64(name)                    PGM_CTX(pgm,BthEPTAMD64##name)
    2734 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2735 # define PGM_BTH_NAME_EPT_EPT(name)                     PGM_CTX(pgm,BthEPTEPT##name)
    2736 #endif
    27372748#define PGM_BTH_NAME_NONE_REAL(name)                    PGM_CTX(pgm,BthNoneReal##name)
    27382749#define PGM_BTH_NAME_NONE_PROT(name)                    PGM_CTX(pgm,BthNoneProt##name)
     
    27692780#define PGM_BTH_NAME_RC_EPT_PAE_STR(name)               "pgmRCBthEPTPAE" #name
    27702781#define PGM_BTH_NAME_RC_EPT_AMD64_STR(name)             "pgmRCBthEPTAMD64" #name
    2771 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2772 # define PGM_BTH_NAME_RC_EPT_EPT_STR(name)              "pgmRCBthEPTEPT" #name
    2773 #endif
    27742782
    27752783#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name)            "pgmR0Bth32BitReal" #name
     
    28022810#define PGM_BTH_NAME_R0_EPT_PAE_STR(name)               "pgmR0BthEPTPAE" #name
    28032811#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name)             "pgmR0BthEPTAMD64" #name
    2804 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2805 # define PGM_BTH_NAME_R0_EPT_EPT_STR(name)              "pgmR0BthEPTEPT" #name
    2806 #endif
    28072812
    28082813#define PGM_BTH_DECL(type, name)        PGM_CTX_DECL(type) PGM_BTH_NAME(name)
     
    28272832
    28282833/** The length of g_aPgmGuestModeData. */
    2829 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2830 # define PGM_GUEST_MODE_DATA_ARRAY_SIZE     (PGM_TYPE_EPT + 1)
    2831 #elif defined(VBOX_WITH_64_BITS_GUESTS)
     2834#if VBOX_WITH_64_BITS_GUESTS
    28322835# define PGM_GUEST_MODE_DATA_ARRAY_SIZE     (PGM_TYPE_AMD64 + 1)
    28332836#else
     
    35173520    /** The guest paging mode. */
    35183521    PGMMODE                         enmGuestMode;
     3522    /** The guest second level address translation mode. */
     3523    PGMSLAT                         enmGuestSlatMode;
    35193524    /** Guest mode data table index (PGM_TYPE_XXX). */
    35203525    uint8_t volatile                idxGuestModeData;
     
    35243529    uint8_t volatile                idxBothModeData;
    35253530    /** Alignment padding. */
    3526     uint8_t                         abPadding[5];
     3531    uint8_t                         abPadding[1];
    35273532
    35283533    /** The current physical address represented in the guest CR3 register. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette