VirtualBox

Changeset 92200 in vbox for trunk


Ignore:
Timestamp:
Nov 3, 2021 9:46:36 PM (3 years ago)
Author:
vboxsync
Message:

VMM/GVMM,VMM: Make it possible for known worker thread to enter critical sections in ring-0. Added a couple of helpers for safely signalling event semaphores. bugref:10093 bugref:6695

Location:
trunk
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/gvmm.h

    r91287 r92200  
    170170typedef FNGVMMR0ENUMCALLBACK *PFNGVMMR0ENUMCALLBACK;
    171171
     172/**
     173 * Worker thread IDs.
     174 */
     175typedef enum GVMMWORKERTHREAD
     176{
     177    /** The usual invalid zero value. */
     178    GVMMWORKERTHREAD_INVALID = 0,
     179    /** PGM handy page allocator thread. */
     180    GVMMWORKERTHREAD_PGM_ALLOCATOR,
     181    /** End of valid worker thread values. */
     182    GVMMWORKERTHREAD_END,
     183    /** Make sure the type size is 32 bits. */
     184    GVMMWORKERTHREAD_32_BIT_HACK = 0x7fffffff
     185} GVMMWORKERTHREAD;
    172186
    173187GVMMR0DECL(int)     GVMMR0Init(void);
     
    183197GVMMR0DECL(int)     GVMMR0RegisterVCpu(PGVM pGVM, VMCPUID idCpu);
    184198GVMMR0DECL(int)     GVMMR0DeregisterVCpu(PGVM pGVM, VMCPUID idCpu);
     199GVMMR0DECL(int)     GVMMR0RegisterWorkerThread(PGVM pGVM, GVMMWORKERTHREAD enmWorker, uintptr_t hThreadR3);
     200GVMMR0DECL(int)     GVMMR0DeregisterWorkerThread(PGVM pGVM, GVMMWORKERTHREAD enmWorker);
    185201GVMMR0DECL(PGVM)    GVMMR0ByHandle(uint32_t hGVM);
    186202GVMMR0DECL(int)     GVMMR0ValidateGVM(PGVM pGVM);
    187203GVMMR0DECL(int)     GVMMR0ValidateGVMandEMT(PGVM pGVM, VMCPUID idCpu);
     204GVMMR0DECL(int)     GVMMR0ValidateGVMandEMTorWorker(PGVM pGVM, VMCPUID idCpu, GVMMWORKERTHREAD enmWorker);
    188205GVMMR0DECL(PVMCC)   GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT);
    189206GVMMR0DECL(PGVMCPU) GVMMR0GetGVCpuByEMT(RTNATIVETHREAD hEMT);
    190207GVMMR0DECL(PGVMCPU) GVMMR0GetGVCpuByGVMandEMT(PGVM pGVM, RTNATIVETHREAD hEMT);
     208GVMMR0DECL(RTNATIVETHREAD) GVMMR0GetRing3ThreadForSelf(PGVM pGVM);
    191209GVMMR0DECL(RTHCPHYS) GVMMR0ConvertGVMPtr2HCPhys(PGVM pGVM, void *pv);
    192210GVMMR0DECL(int)     GVMMR0SchedHalt(PGVM pGVM, PGVMCPU pGVCpu, uint64_t u64ExpireGipTime);
     
    229247
    230248/**
     249 * Request packet for calling GVMMR0RegisterWorkerThread.
     250 */
     251typedef struct GVMMREGISTERWORKERTHREADREQ
     252{
     253    /** The request header. */
     254    SUPVMMR0REQHDR  Hdr;
     255    /** Ring-3 native thread handle of the caller. (IN)   */
     256    RTNATIVETHREAD  hNativeThreadR3;
     257} GVMMREGISTERWORKERTHREADREQ;
     258/** Pointer to a GVMMR0RegisterWorkerThread request packet. */
     259typedef GVMMREGISTERWORKERTHREADREQ *PGVMMREGISTERWORKERTHREADREQ;
     260
     261
     262/**
    231263 * Request buffer for GVMMR0SchedWakeUpAndPokeCpusReq / VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS.
    232264 * @see GVMMR0SchedWakeUpAndPokeCpus.
     
    286318
    287319
     320#ifdef IN_RING3
     321VMMR3_INT_DECL(int)  GVMMR3RegisterWorkerThread(PVM pVM, GVMMWORKERTHREAD enmWorker);
     322VMMR3_INT_DECL(int)  GVMMR3DeregisterWorkerThread(PVM pVM, GVMMWORKERTHREAD enmWorker);
     323#endif
     324
     325
    288326/** @} */
    289327
  • trunk/include/VBox/vmm/vmm.h

    r91245 r92200  
    252252    /** Call GVMMR0DeregisterVCpu(). */
    253253    VMMR0_DO_GVMM_DEREGISTER_VMCPU,
     254    /** Call GVMMR0RegisterWorkerThread(). */
     255    VMMR0_DO_GVMM_REGISTER_WORKER_THREAD,
     256    /** Call GVMMR0DeregisterWorkerThread(). */
     257    VMMR0_DO_GVMM_DEREGISTER_WORKER_THREAD,
    254258    /** Call GVMMR0SchedHalt(). */
    255259    VMMR0_DO_GVMM_SCHED_HALT,
     
    513517                                            PVMMR0EMTBLOCKCTX pCtx);
    514518VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx);
     519VMMR0_INT_DECL(int)  VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout);
     520VMMR0_INT_DECL(int)  VMMR0EmtSignalSupEvent(PGVM pGVM, PGVMCPU pGVCpu, SUPSEMEVENT hEvent);
     521VMMR0_INT_DECL(int)  VMMR0EmtSignalSupEventByGVM(PGVM pGVM, SUPSEMEVENT hEvent);
     522
     523/** @name VMMR0EMTWAIT_F_XXX - flags for VMMR0EmtWaitEventInner and friends.
     524 * @{ */
     525/** Try suppress VERR_INTERRUPTED for a little while (~10 sec). */
     526#define VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED     RT_BIT_32(0)
     527/** @} */
     528#endif /* IN_RING0 */
     529
    515530VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu);
    516 #endif /* IN_RING0 */
    517 
    518531/** @} */
    519532
  • trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp

    r91287 r92200  
    15921592
    15931593/**
     1594 * Registers the caller as a given worker thread.
     1595 *
     1596 * This enables the thread to operate critical sections in ring-0.
     1597 *
     1598 * @returns VBox status code.
     1599 * @param   pGVM            The global (ring-0) VM structure.
     1600 * @param   enmWorker       The worker thread this is supposed to be.
     1601 * @param   hNativeSelfR3   The ring-3 native self of the caller.
     1602 */
     1603GVMMR0DECL(int) GVMMR0RegisterWorkerThread(PGVM pGVM, GVMMWORKERTHREAD enmWorker, RTNATIVETHREAD hNativeSelfR3)
     1604{
     1605    /*
     1606     * Validate input.
     1607     */
     1608    AssertReturn(enmWorker > GVMMWORKERTHREAD_INVALID && enmWorker < GVMMWORKERTHREAD_END, VERR_INVALID_PARAMETER);
     1609    AssertReturn(hNativeSelfR3 != NIL_RTNATIVETHREAD, VERR_INVALID_HANDLE);
     1610    RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
     1611    AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
     1612    PGVMM pGVMM;
     1613    int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
     1614    AssertRCReturn(rc, rc);
     1615    AssertReturn(pGVM->enmVMState < VMSTATE_DESTROYING, VERR_VM_INVALID_VM_STATE);
     1616
     1617    /*
     1618     * Grab the big lock and check the VM state again.
     1619     */
     1620    uint32_t const hSelf = pGVM->hSelf;
     1621    gvmmR0CreateDestroyLock(pGVMM); /** @todo per-VM lock? */
     1622    if (   hSelf < RT_ELEMENTS(pGVMM->aHandles)
     1623        && pGVMM->aHandles[hSelf].pvObj != NULL
     1624        && pGVMM->aHandles[hSelf].pGVM  == pGVM
     1625        && pGVMM->aHandles[hSelf].ProcId == RTProcSelf())
     1626    {
     1627        if (pGVM->enmVMState < VMSTATE_DESTROYING)
     1628        {
     1629            /*
     1630             * Check that the thread isn't an EMT or serving in some other worker capacity.
     1631             */
     1632            for (VMCPUID iCpu = 0; iCpu < pGVM->cCpus; iCpu++)
     1633                AssertBreakStmt(pGVM->aCpus[iCpu].hEMT != hNativeSelf, rc = VERR_INVALID_PARAMETER);
     1634            for (size_t idx = 0; idx < RT_ELEMENTS(pGVM->gvmm.s.aWorkerThreads); idx++)
     1635                AssertBreakStmt(idx != (size_t)enmWorker && pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread != hNativeSelf,
     1636                                rc = VERR_INVALID_PARAMETER);
     1637            if (RT_SUCCESS(rc))
     1638            {
     1639                /*
     1640                 * Do the registration.
     1641                 */
     1642                if (   pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread   == NIL_RTNATIVETHREAD
     1643                    && pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThreadR3 == NIL_RTNATIVETHREAD)
     1644                {
     1645                    pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread   = hNativeSelf;
     1646                    pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThreadR3 = hNativeSelfR3;
     1647                    rc = VINF_SUCCESS;
     1648                }
     1649                else if (   pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread   == hNativeSelf
     1650                         && pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThreadR3 == hNativeSelfR3)
     1651                    rc = VERR_ALREADY_EXISTS;
     1652                else
     1653                    rc = VERR_RESOURCE_BUSY;
     1654            }
     1655        }
     1656        else
     1657            rc = VERR_VM_INVALID_VM_STATE;
     1658    }
     1659    else
     1660        rc = VERR_INVALID_VM_HANDLE;
     1661    gvmmR0CreateDestroyUnlock(pGVMM);
     1662    return rc;
     1663}
     1664
     1665
     1666/**
     1667 * Deregisters a workinger thread (caller).
     1668 *
     1669 * The worker thread cannot be re-created and re-registered, instead the given
     1670 * @a enmWorker slot becomes invalid.
     1671 *
     1672 * @returns VBox status code.
     1673 * @param   pGVM            The global (ring-0) VM structure.
     1674 * @param   enmWorker       The worker thread this is supposed to be.
     1675 */
     1676GVMMR0DECL(int)  GVMMR0DeregisterWorkerThread(PGVM pGVM, GVMMWORKERTHREAD enmWorker)
     1677{
     1678    /*
     1679     * Validate input.
     1680     */
     1681    AssertReturn(enmWorker > GVMMWORKERTHREAD_INVALID && enmWorker < GVMMWORKERTHREAD_END, VERR_INVALID_PARAMETER);
     1682    RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
     1683    AssertReturn(hNativeThread != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR_3);
     1684    PGVMM pGVMM;
     1685    int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /*fTakeUsedLock*/);
     1686    AssertRCReturn(rc, rc);
     1687
     1688    /*
     1689     * Grab the big lock and check the VM state again.
     1690     */
     1691    uint32_t const hSelf = pGVM->hSelf;
     1692    gvmmR0CreateDestroyLock(pGVMM); /** @todo per-VM lock? */
     1693    if (   hSelf < RT_ELEMENTS(pGVMM->aHandles)
     1694        && pGVMM->aHandles[hSelf].pvObj != NULL
     1695        && pGVMM->aHandles[hSelf].pGVM  == pGVM
     1696        && pGVMM->aHandles[hSelf].ProcId == RTProcSelf())
     1697    {
     1698        /*
     1699         * Do the deregistration.
     1700         * This will prevent any other threads register as the worker later.
     1701         */
     1702        if (pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread == hNativeThread)
     1703        {
     1704            pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread   = GVMM_RTNATIVETHREAD_DESTROYED;
     1705            pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThreadR3 = GVMM_RTNATIVETHREAD_DESTROYED;
     1706            rc = VINF_SUCCESS;
     1707        }
     1708        else if (   pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThread   == GVMM_RTNATIVETHREAD_DESTROYED
     1709                 && pGVM->gvmm.s.aWorkerThreads[enmWorker].hNativeThreadR3 == GVMM_RTNATIVETHREAD_DESTROYED)
     1710            rc = VINF_SUCCESS;
     1711        else
     1712            rc = VERR_NOT_OWNER;
     1713    }
     1714    else
     1715        rc = VERR_INVALID_VM_HANDLE;
     1716    gvmmR0CreateDestroyUnlock(pGVMM);
     1717    return rc;
     1718}
     1719
     1720
     1721/**
    15941722 * Lookup a GVM structure by its handle.
    15951723 *
     
    19252053    /*
    19262054     * Find the matching hash table entry.
     2055     * See similar code in GVMMR0GetRing3ThreadForSelf.
    19272056     */
    19282057    uint32_t idxHash = GVMM_EMT_HASH_1(hEMT);
     
    19632092    Assert(pGVCpu->gvmm.s.idxEmtHash == idxHash);
    19642093    return pGVCpu;
     2094}
     2095
     2096
     2097/**
     2098 * Get the native ring-3 thread handle for the caller.
     2099 *
     2100 * This works for EMTs and registered workers.
     2101 *
     2102 * @returns ring-3 native thread handle or NIL_RTNATIVETHREAD.
     2103 * @param   pGVM    The global (ring-0) VM structure.
     2104 */
     2105GVMMR0DECL(RTNATIVETHREAD) GVMMR0GetRing3ThreadForSelf(PGVM pGVM)
     2106{
     2107    /*
     2108     * Validate input.
     2109     */
     2110    AssertPtr(pGVM);
     2111    AssertReturn(pGVM->u32Magic == GVM_MAGIC, NIL_RTNATIVETHREAD);
     2112    RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf();
     2113    AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, NIL_RTNATIVETHREAD);
     2114
     2115    /*
     2116     * Find the matching hash table entry.
     2117     * See similar code in GVMMR0GetGVCpuByGVMandEMT.
     2118     */
     2119    uint32_t idxHash = GVMM_EMT_HASH_1(hNativeSelf);
     2120    if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == hNativeSelf)
     2121    { /* likely */ }
     2122    else
     2123    {
     2124#ifdef VBOX_STRICT
     2125        unsigned       cCollisions = 0;
     2126#endif
     2127        uint32_t const idxHash2    = GVMM_EMT_HASH_2(hNativeSelf);
     2128        for (;;)
     2129        {
     2130            Assert(cCollisions++ < GVMM_EMT_HASH_SIZE);
     2131            idxHash = (idxHash + idxHash2) % GVMM_EMT_HASH_SIZE;
     2132            if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == hNativeSelf)
     2133                break;
     2134            if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == NIL_RTNATIVETHREAD)
     2135            {
     2136#ifdef VBOX_STRICT
     2137                uint32_t idxCpu = pGVM->cCpus;
     2138                AssertStmt(idxCpu < VMM_MAX_CPU_COUNT, idxCpu = VMM_MAX_CPU_COUNT);
     2139                while (idxCpu-- > 0)
     2140                    Assert(pGVM->aCpus[idxCpu].hNativeThreadR0 != hNativeSelf);
     2141#endif
     2142
     2143                /*
     2144                 * Not an EMT, so see if it's a worker thread.
     2145                 */
     2146                size_t idx = RT_ELEMENTS(pGVM->gvmm.s.aWorkerThreads);
     2147                while (--idx > GVMMWORKERTHREAD_INVALID)
     2148                    if (pGVM->gvmm.s.aWorkerThreads[idx].hNativeThread == hNativeSelf)
     2149                        return pGVM->gvmm.s.aWorkerThreads[idx].hNativeThreadR3;
     2150
     2151                return NIL_RTNATIVETHREAD;
     2152            }
     2153        }
     2154    }
     2155
     2156    /*
     2157     * Validate the VCpu number and translate it into a pointer.
     2158     */
     2159    VMCPUID const idCpu = pGVM->gvmm.s.aEmtHash[idxHash].idVCpu;
     2160    AssertReturn(idCpu < pGVM->cCpus, NIL_RTNATIVETHREAD);
     2161    PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
     2162    Assert(pGVCpu->hNativeThreadR0   == hNativeSelf);
     2163    Assert(pGVCpu->gvmm.s.idxEmtHash == idxHash);
     2164    return pGVCpu->hNativeThread;
    19652165}
    19662166
  • trunk/src/VBox/VMM/VMMR0/GVMMR0Internal.h

    r90597 r92200  
    107107    bool                afPadding[6];
    108108
     109    /** Worker thread registrations. */
     110    struct
     111    {
     112        /** The native ring-0 thread handle. */
     113        RTNATIVETHREAD  hNativeThread;
     114        /** The native ring-3 thread handle. */
     115        RTNATIVETHREAD  hNativeThreadR3;
     116    } aWorkerThreads[GVMMWORKERTHREAD_END];
     117
    109118    /** EMT lookup hash table. */
    110119    GVMMEMTHASHENTRY    aEmtHash[GVMM_EMT_HASH_SIZE];
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r91819 r92200  
    17401740            break;
    17411741
     1742        case VMMR0_DO_GVMM_REGISTER_WORKER_THREAD:
     1743            if (pGVM != NULL && pReqHdr && pReqHdr->cbReq == sizeof(GVMMREGISTERWORKERTHREADREQ))
     1744                rc = GVMMR0RegisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg,
     1745                                                ((PGVMMREGISTERWORKERTHREADREQ)(pReqHdr))->hNativeThreadR3);
     1746            else
     1747                rc = VERR_INVALID_PARAMETER;
     1748            break;
     1749
     1750        case VMMR0_DO_GVMM_DEREGISTER_WORKER_THREAD:
     1751            if (pGVM != NULL)
     1752                rc = GVMMR0DeregisterWorkerThread(pGVM, (GVMMWORKERTHREAD)(unsigned)u64Arg);
     1753            else
     1754                rc = VERR_INVALID_PARAMETER;
     1755            break;
     1756
    17421757        case VMMR0_DO_GVMM_SCHED_HALT:
    17431758            if (pReqHdr)
     
    25922607}
    25932608
    2594 /** @name VMMR0EMTWAIT_F_XXX - flags for VMMR0EmtWaitEventInner and friends.
    2595  * @{ */
    2596 /** Try suppress VERR_INTERRUPTED for a little while (~10 sec). */
    2597 #define VMMR0EMTWAIT_F_TRY_SUPPRESS_INTERRUPTED     RT_BIT_32(0)
    2598 /** @} */
    25992609
    26002610/**
     
    26112621 * @param   cMsTimeout  The timeout or RT_INDEFINITE_WAIT.
    26122622 */
    2613 VMMR0DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout)
     2623VMMR0_INT_DECL(int) VMMR0EmtWaitEventInner(PGVMCPU pGVCpu, uint32_t fFlags, RTSEMEVENT hEvent, RTMSINTERVAL cMsTimeout)
    26142624{
    26152625    AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
     
    26782688    }
    26792689    /* not reached */
     2690}
     2691
     2692
     2693/**
     2694 * Helper for signalling an SUPSEMEVENT.
     2695 *
     2696 * This may temporarily leave the HM context if the host requires that for
     2697 * signalling SUPSEMEVENT objects.
     2698 *
     2699 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
     2700 * @param   pGVM        The ring-0 VM structure.
     2701 * @param   pGVCpu      The ring-0 virtual CPU structure.
     2702 * @param   hEvent      The event to signal.
     2703 */
     2704VMMR0_INT_DECL(int) VMMR0EmtSignalSupEvent(PGVM pGVM, PGVMCPU pGVCpu, SUPSEMEVENT hEvent)
     2705{
     2706    AssertReturn(pGVCpu->hEMT == RTThreadNativeSelf(), VERR_VM_THREAD_NOT_EMT);
     2707    if (RTSemEventIsSignalSafe())
     2708        return SUPSemEventSignal(pGVM->pSession, hEvent);
     2709
     2710    VMMR0EMTBLOCKCTX Ctx;
     2711    int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
     2712    if (RT_SUCCESS(rc))
     2713    {
     2714        rc = SUPSemEventSignal(pGVM->pSession, hEvent);
     2715        VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
     2716    }
     2717    return rc;
     2718}
     2719
     2720
     2721/**
     2722 * Helper for signalling an SUPSEMEVENT, variant supporting non-EMTs.
     2723 *
     2724 * This may temporarily leave the HM context if the host requires that for
     2725 * signalling SUPSEMEVENT objects.
     2726 *
     2727 * @returns VBox status code (see VMMR0EmtPrepareToBlock)
     2728 * @param   pGVM        The ring-0 VM structure.
     2729 * @param   hEvent      The event to signal.
     2730 */
     2731VMMR0_INT_DECL(int) VMMR0EmtSignalSupEventByGVM(PGVM pGVM, SUPSEMEVENT hEvent)
     2732{
     2733    if (!RTSemEventIsSignalSafe())
     2734    {
     2735        PGVMCPU pGVCpu = GVMMR0GetGVCpuByGVMandEMT(pGVM, NIL_RTNATIVETHREAD);
     2736        if (pGVCpu)
     2737        {
     2738            VMMR0EMTBLOCKCTX Ctx;
     2739            int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, (void *)(uintptr_t)hEvent, &Ctx);
     2740            if (RT_SUCCESS(rc))
     2741            {
     2742                rc = SUPSemEventSignal(pGVM->pSession, hEvent);
     2743                VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
     2744            }
     2745            return rc;
     2746        }
     2747    }
     2748    return SUPSemEventSignal(pGVM->pSession, hEvent);
    26802749}
    26812750
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette