VirtualBox

Changeset 40966 in vbox for trunk/src/VBox/Runtime/r0drv


Ignore:
Timestamp:
Apr 17, 2012 4:43:28 PM (13 years ago)
Author:
vboxsync
Message:

Runtime/r0drv/solaris: Dissolve VBI into IPRT.

Location:
trunk/src/VBox/Runtime/r0drv/solaris
Files:
1 added
1 deleted
16 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c

    r29281 r40966  
    4545    if (pch[cb] != '\0')
    4646        AssertBreakpoint();
    47     if (    !g_frtSolarisSplSetsEIF
     47    if (    !g_frtSolSplSetsEIF
    4848#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    4949        ||  ASMIntAreEnabled()
  • trunk/src/VBox/Runtime/r0drv/solaris/dbg-r0drv-solaris.c

    r40855 r40966  
    9494            *ppCTF = ctf_modopen(((modctl_t *)*ppMod)->mod_mp, &err);
    9595            mutex_exit(&mod_lock);
     96            mod_release_mod(*ppMod);
    9697
    9798            if (*ppCTF)
     
    102103                rc = VERR_INTERNAL_ERROR_3;
    103104            }
    104 
    105             mod_release_mod(*ppMod);
    106105        }
    107106        else
     
    133132
    134133    ctf_close(pCTF);
    135     mod_release_mod(pMod);
    136134}
    137135
  • trunk/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c

    r36555 r40966  
    3232#include "internal/iprt.h"
    3333
     34#include <iprt/assert.h>
    3435#include <iprt/err.h>
    3536#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
     
    4243*   Global Variables                                                           *
    4344*******************************************************************************/
     45/** Kernel debug info handle. */
     46RTDBGKRNLINFO               g_hKrnlDbgInfo;
    4447/** Indicates that the spl routines (and therefore a bunch of other ones too)
    4548 * will set EFLAGS::IF and break code that disables interrupts.  */
    46 bool g_frtSolarisSplSetsEIF = false;
    47 
     49bool g_frtSolSplSetsEIF                                    = false;
    4850/** timeout_generic address. */
    4951PFNSOL_timeout_generic      g_pfnrtR0Sol_timeout_generic   = NULL;
     
    5254/** cyclic_reprogram address. */
    5355PFNSOL_cyclic_reprogram     g_pfnrtR0Sol_cyclic_reprogram  = NULL;
    54 
     56/** Whether to use the kernel page freelist. */
     57bool                        g_frtSolUseKflt                = false;
     58/** Whether we've completed R0 initialization. */
     59bool                        g_frtSolInitDone               = false;
     60/** Whether to use old-style xc_call interface. */
     61bool                        g_frtSolOldIPI                 = false;
     62/** Whether to use old-style xc_call interface using one ulong_t as the CPU set
     63 *  representation. */
     64bool                        g_frtSolOldIPIUlong            = false;
     65/** The xc_call callout table structure. */
     66RTR0FNSOLXCCALL             g_rtSolXcCall;
     67/** Thread preemption offset. */
     68size_t                      g_offrtSolThreadPreempt;
     69/** Host scheduler preemption offset. */
     70size_t                      g_offrtSolCpuPreempt;
     71/** Host scheduler force preemption offset. */
     72size_t                      g_offrtSolCpuForceKernelPreempt;
     73/* Resolve using dl_lookup (remove if no longer relevant for supported S10 versions) */
     74extern void contig_free(void *addr, size_t size);
     75#pragma weak contig_free
     76/** contig_free address. */
     77PFNSOL_contig_free          g_pfnrtR0Sol_contig_free       = contig_free;
    5578
    5679DECLHIDDEN(int) rtR0InitNative(void)
    5780{
    5881    /*
    59      * Initialize vbi (keeping it separate for now)
     82     * IPRT has not yet been initialized at this point, so use Solaris' native cmn_err() for logging.
    6083     */
    61     int rc = vbi_init();
    62     if (!rc)
     84    int rc = RTR0DbgKrnlInfoOpen(&g_hKrnlDbgInfo, 0 /* fFlags */);
     85    if (RT_SUCCESS(rc))
    6386    {
    6487#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
     
    7093        int iOld = splr(DISP_LEVEL);
    7194        if (ASMIntAreEnabled())
    72             g_frtSolarisSplSetsEIF = true;
     95            g_frtSolSplSetsEIF = true;
    7396        splx(iOld);
    7497        if (ASMIntAreEnabled())
    75             g_frtSolarisSplSetsEIF = true;
     98            g_frtSolSplSetsEIF = true;
    7699        ASMSetFlags(uOldFlags);
    77100#else
     
    80103
    81104        /*
    82          * Dynamically resolve new symbols we want to use.
    83          */
    84         g_pfnrtR0Sol_timeout_generic    = (PFNSOL_timeout_generic  )kobj_getsymvalue("timeout_generic",   1);
    85         g_pfnrtR0Sol_untimeout_generic  = (PFNSOL_untimeout_generic)kobj_getsymvalue("untimeout_generic", 1);
     105         * Mandatory: Preemption offsets.
     106         */
     107        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "cpu_t", "cpu_runrun", &g_offrtSolCpuPreempt);
     108        if (RT_FAILURE(rc))
     109        {
     110            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_runrun!\n");
     111            goto errorbail;
     112        }
     113
     114        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "cpu_t", "cpu_kprunrun", &g_offrtSolCpuForceKernelPreempt);
     115        if (RT_FAILURE(rc))
     116        {
     117            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_kprunrun!\n");
     118            goto errorbail;
     119        }
     120
     121        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "kthread_t", "t_preempt", &g_offrtSolThreadPreempt);
     122        if (RT_FAILURE(rc))
     123        {
     124            cmn_err(CE_NOTE, "Failed to find kthread_t::t_preempt!\n");
     125            goto errorbail;
     126        }
     127        cmn_err(CE_CONT, "!cpu_t::cpu_runrun @ 0x%lx\n",    g_offrtSolCpuPreempt);
     128        cmn_err(CE_CONT, "!cpu_t::cpu_kprunrun @ 0x%lx\n",  g_offrtSolCpuForceKernelPreempt);
     129        cmn_err(CE_CONT, "!kthread_t::t_preempt @ 0x%lx\n", g_offrtSolThreadPreempt);
     130
     131        /*
     132         * Mandatory: CPU cross call infrastructure. Refer the-solaris-kernel.h for details.
     133         */
     134        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "xc_init_cpu", NULL /* ppvSymbol */);
     135        if (RT_SUCCESS(rc))
     136        {
     137            if (ncpus > IPRT_SOL_NCPUS)
     138            {
     139                cmn_err(CE_NOTE, "rtR0InitNative: CPU count mismatch! ncpus=%d IPRT_SOL_NCPUS=%d\n", ncpus, IPRT_SOL_NCPUS);
     140                rc = VERR_NOT_SUPPORTED;
     141                goto errorbail;
     142            }
     143            g_rtSolXcCall.u.pfnSol_xc_call = (void *)xc_call;
     144        }
     145        else
     146        {
     147            g_frtSolOldIPI = true;
     148            g_rtSolXcCall.u.pfnSol_xc_call_old = (void *)xc_call;
     149            if (max_cpuid + 1 == sizeof(ulong_t) * 8)
     150            {
     151                g_frtSolOldIPIUlong = true;
     152                g_rtSolXcCall.u.pfnSol_xc_call_old_ulong = (void *)xc_call;
     153            }
     154            else if (max_cpuid + 1 != IPRT_SOL_NCPUS)
     155            {
     156                cmn_err(CE_NOTE, "rtR0InitNative: cpuset_t size mismatch! max_cpuid=%d IPRT_SOL_NCPUS=%d\n", max_cpuid, IPRT_SOL_NCPUS);
     157                rc = VERR_NOT_SUPPORTED;
     158                goto errorbail;
     159            }
     160        }
     161
     162        /*
     163         * Optional: Timeout hooks.
     164         */
     165        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "timeout_generic", (void **)&g_pfnrtR0Sol_timeout_generic);
     166        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "untimeout_generic", (void **)&g_pfnrtR0Sol_untimeout_generic);
    86167        if ((g_pfnrtR0Sol_timeout_generic == NULL) != (g_pfnrtR0Sol_untimeout_generic == NULL))
    87168        {
     
    92173            g_pfnrtR0Sol_untimeout_generic = NULL;
    93174        }
    94 
    95         g_pfnrtR0Sol_cyclic_reprogram   = (PFNSOL_cyclic_reprogram )kobj_getsymvalue("cyclic_reprogram",  1);
    96 
    97 
     175        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "cyclic_reprogram", (void **)&g_pfnrtR0Sol_cyclic_reprogram);
     176
     177        /*
     178         * Optional: Kernel page freelist (kflt)
     179         *
     180         * Only applicable to 64-bit Solaris kernels. Use kflt flags to get pages from kernel page freelists
     181         * while allocating physical pages, once the userpages are exhausted. snv_161+, see @bugref{5632}.
     182         */
     183        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "kflt_init", NULL /* ppvSymbol */);
     184        if (RT_SUCCESS(rc))
     185        {
     186            int *pKfltDisable = NULL;
     187            rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "kflt_disable", (void **)&pKfltDisable);
     188            if (RT_SUCCESS(rc) && pKfltDisable && *pKfltDisable == 0)
     189                g_frtSolUseKflt = true;
     190        }
     191
     192        /*
     193         * Weak binding failures: contig_free
     194         */
     195        if (g_pfnrtR0Sol_contig_free == NULL)
     196        {
     197            rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "contig_free", (void **)&g_pfnrtR0Sol_contig_free);
     198            if (RT_FAILURE(rc))
     199            {
     200                cmn_err(CE_NOTE, "rtR0InitNative: failed to find contig_free!\n");
     201                goto errorbail;
     202            }
     203        }
     204
     205        g_frtSolInitDone = true;
    98206        return VINF_SUCCESS;
    99207    }
    100     cmn_err(CE_NOTE, "vbi_init failed. rc=%d\n", rc);
    101     return VERR_GENERAL_FAILURE;
     208    else
     209    {
     210        cmn_err(CE_NOTE, "RTR0DbgKrnlInfoOpen failed. rc=%d\n", rc);
     211        return rc;
     212    }
     213
     214errorbail:
     215    RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
     216    return rc;
    102217}
    103218
     
    105220DECLHIDDEN(void) rtR0TermNative(void)
    106221{
     222    RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
     223    g_frtSolInitDone = false;
    107224}
    108225
  • trunk/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h

    r36392 r40966  
    229229    PRTR0SEMSOLWAIT pWait   = (PRTR0SEMSOLWAIT)pvUser;
    230230    kthread_t      *pThread = pWait->pThread;
    231     kmutex_t       *pMtx    = (kmutex_t *)ASMAtomicReadPtr(&pWait->pvMtx);
     231    kmutex_t       *pMtx    = (kmutex_t *)ASMAtomicReadPtr((void * volatile *)&pWait->pvMtx);
    232232    if (VALID_PTR(pMtx))
    233233    {
     
    487487}
    488488
    489 #endif
     489#endif /* ___r0drv_solaris_semeventwait_r0drv_solaris_h */
     490
  • trunk/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c

    r36190 r40966  
    147147
    148148/**
    149  * Worker for rtSemMutexSolarisRequest that handles the case where we go to sleep.
     149 * Worker for rtSemMutexSolRequest that handles the case where we go to sleep.
    150150 *
    151151 * @returns VINF_SUCCESS, VERR_INTERRUPTED, or VERR_SEM_DESTROYED.
     
    157157 * @remarks This needs to be called with the mutex object held!
    158158 */
    159 static int rtSemMutexSolarisRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
     159static int rtSemMutexSolRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,
    160160                                       bool fInterruptible)
    161161{
     
    254254 * Internal worker.
    255255 */
    256 DECLINLINE(int) rtSemMutexSolarisRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fInterruptible)
     256DECLINLINE(int) rtSemMutexSolRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fInterruptible)
    257257{
    258258    PRTSEMMUTEXINTERNAL pThis = hMutexSem;
     
    296296     */
    297297    else
    298         rc = rtSemMutexSolarisRequestSleep(pThis, cMillies, fInterruptible);
     298        rc = rtSemMutexSolRequestSleep(pThis, cMillies, fInterruptible);
    299299
    300300    mutex_exit(&pThis->Mtx);
     
    305305RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
    306306{
    307     return rtSemMutexSolarisRequest(hMutexSem, cMillies, false /*fInterruptible*/);
     307    return rtSemMutexSolRequest(hMutexSem, cMillies, false /*fInterruptible*/);
    308308}
    309309
     
    317317RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies)
    318318{
    319     return rtSemMutexSolarisRequest(hMutexSem, cMillies, true /*fInterruptible*/);
     319    return rtSemMutexSolRequest(hMutexSem, cMillies, true /*fInterruptible*/);
    320320}
    321321
  • trunk/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h

    r40695 r40966  
    5757#include <sys/ctf_api.h>
    5858#include <sys/modctl.h>
    59 #include "vbi.h"
    6059
    6160#undef u /* /usr/include/sys/user.h:249:1 is where this is defined to (curproc->p_user). very cool. */
     
    6362#include <iprt/cdefs.h>
    6463#include <iprt/types.h>
     64#include <iprt/dbg.h>
    6565
    6666RT_C_DECLS_BEGIN
    6767
     68/* IPRT functions. */
     69DECLHIDDEN(void *)   rtR0SolMemAlloc(uint64_t cbPhysHi, uint64_t *puPhys, size_t cb, uint64_t cbAlign, bool fContig);
     70DECLHIDDEN(void)     rtR0SolMemFree(void *pv, size_t cb);
     71
     72
     73/* Solaris functions. */
    6874typedef callout_id_t (*PFNSOL_timeout_generic)(int type, void (*func)(void *),
    6975                                               void *arg, hrtime_t expiration,
    7076                                               hrtime_t resultion, int flags);
    71 typedef hrtime_t    (*PFNSOL_untimeout_generic)(callout_id_t id, int nowait);
    72 typedef int         (*PFNSOL_cyclic_reprogram)(cyclic_id_t id, hrtime_t expiration);
    73 
     77typedef hrtime_t     (*PFNSOL_untimeout_generic)(callout_id_t id, int nowait);
     78typedef int          (*PFNSOL_cyclic_reprogram)(cyclic_id_t id, hrtime_t expiration);
     79typedef void         (*PFNSOL_contig_free)(void *addr, size_t size);
    7480
    7581/* IPRT globals. */
    76 extern bool                     g_frtSolarisSplSetsEIF;
     82extern bool                     g_frtSolSplSetsEIF;
    7783extern struct ddi_dma_attr      g_SolarisX86PhysMemLimits;
    78 extern RTCPUSET                 g_rtMpSolarisCpuSet;
     84extern RTCPUSET                 g_rtMpSolCpuSet;
    7985extern PFNSOL_timeout_generic   g_pfnrtR0Sol_timeout_generic;
    8086extern PFNSOL_untimeout_generic g_pfnrtR0Sol_untimeout_generic;
    8187extern PFNSOL_cyclic_reprogram  g_pfnrtR0Sol_cyclic_reprogram;
     88extern PFNSOL_contig_free       g_pfnrtR0Sol_contig_free;
     89extern bool                     g_frtSolUseKflt;
     90extern size_t                   g_offrtSolThreadPreempt;
     91extern size_t                   g_offrtSolCpuPreempt;
     92extern size_t                   g_offrtSolCpuForceKernelPreempt;
     93extern bool                     g_frtSolInitDone;
     94extern RTDBGKRNLINFO            g_hKrnlDbgInfo;
     95
     96/*
     97 * Workarounds for running on old versions of solaris with different cross call
     98 * interfaces. If we find xc_init_cpu() in the kernel, then just use the
     99 * defined interfaces for xc_call() from the include file where the xc_call()
     100 * interfaces just takes a pointer to a ulong_t array. The array must be long
     101 * enough to hold "ncpus" bits at runtime.
     102
     103 * The reason for the hacks is that using the type "cpuset_t" is pretty much
     104 * impossible from code built outside the Solaris source repository that wants
     105 * to run on multiple releases of Solaris.
     106 *
     107 * For old style xc_call()s, 32 bit solaris and older 64 bit versions use
     108 * "ulong_t" as cpuset_t.
     109 *
     110 * Later versions of 64 bit Solaris used: struct {ulong_t words[x];}
     111 * where "x" depends on NCPU.
     112 *
     113 * We detect the difference in 64 bit support by checking the kernel value of
     114 * max_cpuid, which always holds the compiled value of NCPU - 1.
     115 *
     116 * If Solaris increases NCPU to more than 256, VBox will continue to work on
     117 * all versions of Solaris as long as the number of installed CPUs in the
     118 * machine is <= IPRT_SOLARIS_NCPUS. If IPRT_SOLARIS_NCPUS is increased, this
     119 * code has to be re-written some to provide compatibility with older Solaris
     120 * which expects cpuset_t to be based on NCPU==256 -- or we discontinue
     121 * support of old Nevada/S10.
     122 */
     123#define IPRT_SOL_NCPUS          256
     124#define IPRT_SOL_SET_WORDS      (IPRT_SOL_NCPUS / (sizeof(ulong_t) * 8))
     125#define IPRT_SOL_X_CALL_HIPRI   (2) /* for Old Solaris interface */
     126typedef struct RTSOLCPUSET
     127{
     128    ulong_t                     auCpus[IPRT_SOL_SET_WORDS];
     129} RTSOLCPUSET;
     130typedef RTSOLCPUSET *PRTSOLCPUSET;
     131
     132/* Avoid warnings even if it means more typing... */
     133typedef struct RTR0FNSOLXCCALL
     134{
     135    union
     136    {
     137        void *(*pfnSol_xc_call)          (xc_arg_t, xc_arg_t, xc_arg_t, ulong_t *, xc_func_t);
     138        void *(*pfnSol_xc_call_old)      (xc_arg_t, xc_arg_t, xc_arg_t, int, RTSOLCPUSET, xc_func_t);
     139        void *(*pfnSol_xc_call_old_ulong)(xc_arg_t, xc_arg_t, xc_arg_t, int, ulong_t, xc_func_t);
     140    } u;
     141} RTR0FNSOLXCCALL;
     142typedef RTR0FNSOLXCCALL *PRTR0FNSOLXCCALL;
     143
     144extern RTR0FNSOLXCCALL          g_rtSolXcCall;
     145extern bool                     g_frtSolOldIPI;
     146extern bool                     g_frtSolOldIPIUlong;
     147
    82148
    83149/* Solaris globals. */
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/RTMpPokeCpu-r0drv-solaris.c

    r29300 r40966  
    4444{
    4545    RT_ASSERT_INTS_ON();
    46     vbi_poke_cpu(idCpu);
     46    if (idCpu < ncpus)
     47        poke_cpu(idCpu);
    4748    return VINF_SUCCESS;
    4849}
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/alloc-r0drv-solaris.c

    r36555 r40966  
    4141
    4242
     43/*******************************************************************************
     44*   Structures and Typedefs                                                    *
     45*******************************************************************************/
     46static ddi_dma_attr_t s_rtR0SolDmaAttr =
     47{
     48    DMA_ATTR_V0,                /* Version Number */
     49    (uint64_t)0,                /* Lower limit */
     50    (uint64_t)0,                /* High limit */
     51    (uint64_t)0xffffffff,       /* Counter limit */
     52    (uint64_t)PAGESIZE,         /* Alignment */
     53    (uint64_t)PAGESIZE,         /* Burst size */
     54    (uint64_t)PAGESIZE,         /* Effective DMA size */
     55    (uint64_t)0xffffffff,       /* Max DMA xfer size */
     56    (uint64_t)0xffffffff,       /* Segment boundary */
     57    1,                          /* Scatter-gather list length (1 for contiguous) */
     58    1,                          /* Device granularity */
     59    0                           /* Bus-specific flags */
     60};
     61
     62extern void *contig_alloc(size_t cb, ddi_dma_attr_t *pDmaAttr, size_t uAlign, int fCanSleep);
     63
    4364
    4465/**
     
    5576        AssertReturn(!(fFlags & RTMEMHDR_FLAG_ANY_CTX), NULL);
    5677        cbAllocated = RT_ALIGN_Z(cb + sizeof(*pHdr), PAGE_SIZE) - sizeof(*pHdr);
    57         pHdr = (PRTMEMHDR)vbi_text_alloc(cbAllocated + sizeof(*pHdr));
     78        pHdr = (PRTMEMHDR)segkmem_alloc(heaptext_arena, cbAllocated + sizeof(*pHdr), KM_SLEEP);
    5879    }
    5980    else
     
    90111#ifdef RT_ARCH_AMD64
    91112    if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC)
    92         vbi_text_free(pHdr, pHdr->cb + sizeof(*pHdr));
     113        segkmem_free(heaptext_arena, pHdr, pHdr->cb + sizeof(*pHdr));
    93114    else
    94115#endif
     
    97118
    98119
     120/**
     121 * Allocates physical memory which satisfy the given constraints.
     122 *
     123 * @param   uPhysHi        The upper physical address limit (inclusive).
     124 * @param   puPhys         Where to store the physical address of the allocated
     125 *                         memory. Optional, can be NULL.
     126 * @param   cb             Size of allocation.
     127 * @param   uAlignment     Alignment.
     128 * @param   fContig        Whether the memory must be physically contiguous or
     129 *                         not.
     130 *
     131 * @returns Virtual address of allocated memory block or NULL if allocation
     132 *        failed.
     133 */
     134DECLHIDDEN(void *) rtR0SolMemAlloc(uint64_t uPhysHi, uint64_t *puPhys, size_t cb, uint64_t uAlignment, bool fContig)
     135{
     136    if ((cb & PAGEOFFSET) != 0)
     137        return NULL;
     138
     139    size_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT;
     140    if (!cPages)
     141        return NULL;
     142
     143    ddi_dma_attr_t DmaAttr = s_rtR0SolDmaAttr;
     144    DmaAttr.dma_attr_addr_hi    = uPhysHi;
     145    DmaAttr.dma_attr_align      = uAlignment;
     146    if (!fContig)
     147        DmaAttr.dma_attr_sgllen = cPages > INT_MAX ? INT_MAX - 1 : cPages;
     148    else
     149        AssertRelease(DmaAttr.dma_attr_sgllen == 1);
     150
     151    void *pvMem = contig_alloc(cb, &DmaAttr, PAGESIZE, 1 /* can sleep */);
     152    if (!pvMem)
     153    {
     154        LogRel(("rtR0SolMemAlloc failed. cb=%u Align=%u fContig=%d\n", (unsigned)cb, (unsigned)uAlignment, fContig));
     155        return NULL;
     156    }
     157
     158    pfn_t PageFrameNum = hat_getpfnum(kas.a_hat, (caddr_t)pvMem);
     159    AssertRelease(PageFrameNum != PFN_INVALID);
     160    if (puPhys)
     161        *puPhys = (uint64_t)PageFrameNum << PAGESHIFT;
     162
     163    return pvMem;
     164}
     165
     166
     167/**
     168 * Frees memory allocated using rtR0SolMemAlloc().
     169 *
     170 * @param   pv         The memory to free.
     171 * @param   cb         Size of the memory block
     172 */
     173DECLHIDDEN(void) rtR0SolMemFree(void *pv, size_t cb)
     174{
     175    if (RT_LIKELY(pv))
     176        g_pfnrtR0Sol_contig_free(pv, cb);
     177}
     178
     179
    99180RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
    100181{
     
    104185
    105186    /* Allocate physically contiguous (< 4GB) page-aligned memory. */
    106     uint64_t physAddr = _4G -1;
    107     caddr_t virtAddr  = vbi_contig_alloc(&physAddr, cb);
    108     if (virtAddr == NULL)
    109     {
    110         LogRel(("vbi_contig_alloc failed to allocate %u bytes\n", cb));
    111         return NULL;
    112     }
    113 
    114     Assert(physAddr < _4G);
    115     *pPhys = physAddr;
    116     return virtAddr;
     187    uint64_t uPhys;
     188    void *pvMem = rtR0SolMemAlloc((uint64_t)_4G - 1, &uPhys, cb, PAGESIZE, true);
     189    if (RT_UNLIKELY(!pvMem))
     190    {
     191        LogRel(("RTMemContAlloc failed to allocate %u bytes\n", cb));
     192        return NULL;
     193    }
     194
     195    Assert(uPhys < _4G);
     196    *pPhys = uPhys;
     197    return pvMem;
    117198}
    118199
     
    121202{
    122203    RT_ASSERT_PREEMPTIBLE();
    123     if (pv)
    124         vbi_contig_free(pv, cb);
    125 }
    126 
     204    rtR0SolMemFree(pv, cb);
     205}
     206
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c

    r37281 r40966  
    4141#include <iprt/process.h>
    4242#include "internal/memobj.h"
     43#include "memobj-r0drv-solaris.h"
     44
     45#define SOL_IS_KRNL_ADDR(vx)    ((uintptr_t)(vx) >= kernelbase)
     46static vnode_t                  s_PageVnode;
    4347
    4448/*******************************************************************************
     
    4852 * The Solaris version of the memory object structure.
    4953 */
    50 typedef struct RTR0MEMOBJSOLARIS
     54typedef struct RTR0MEMOBJSOL
    5155{
    5256    /** The core structure. */
     
    6165     *  allocation. */
    6266    bool                fLargePage;
    63 } RTR0MEMOBJSOLARIS, *PRTR0MEMOBJSOLARIS;
    64 
     67} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
     68
     69
     70/**
     71 * Returns the physical address for a virtual address.
     72 *
     73 * @param pv        The virtual address.
     74 *
     75 * @returns The physical address corresponding to @a pv.
     76 */
     77static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
     78{
     79    struct hat *pHat         = NULL;
     80    pfn_t       PageFrameNum = 0;
     81    uintptr_t   uVirtAddr    = (uintptr_t)pv;
     82
     83    if (SOL_IS_KRNL_ADDR(pv))
     84        pHat = kas.a_hat;
     85    else
     86    {
     87        proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
     88        AssertRelease(pProcess);
     89        pHat = pProcess->p_as->a_hat;
     90    }
     91
     92    PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
     93    AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
     94    return (((uint64_t)PageFrameNum << PAGESHIFT) | (uVirtAddr & PAGEOFFSET));
     95}
     96
     97
     98/**
     99 * Returns the physical address of a page from an array of pages.
     100 *
     101 * @param ppPages       The array of pages.
     102 * @param iPage         Index of the page in the array to get the physical
     103 *                      address.
     104 *
     105 * @returns Physical address of specific page within the list of pages specified
     106 *         in @a ppPages.
     107 */
     108static inline uint64_t rtR0MemObjSolPageToPhys(page_t **ppPages, size_t iPage)
     109{
     110    pfn_t PageFrameNum = page_pptonum(ppPages[iPage]);
     111    AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPageToPhys failed. ppPages=%p iPage=%u\n", ppPages, iPage));
     112    return (uint64_t)PageFrameNum << PAGESHIFT;
     113}
     114
     115
     116/**
     117 * Retreives a free page from the kernel freelist.
     118 *
     119 * @param virtAddr       The virtual address to which this page maybe mapped in
     120 *                       the future.
     121 * @param cbPage         The size of the page.
     122 *
     123 * @returns Pointer to the allocated page, NULL on failure.
     124 */
     125static page_t *rtR0MemObjSolPageFromFreelist(caddr_t virtAddr, size_t cbPage)
     126{
     127    seg_t KernelSeg;
     128    KernelSeg.s_as = &kas;
     129    page_t *pPage = page_get_freelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
     130                                      cbPage, 0 /* flags */, NULL /* NUMA group */);
     131    if (   !pPage
     132        && g_frtSolUseKflt)
     133    {
     134        pPage = page_get_freelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
     135                                  cbPage, 0x200 /* PG_KFLT */, NULL /* NUMA group */);
     136    }
     137    return pPage;
     138}
     139
     140
     141/**
     142 * Retrieves a free page from the kernel cachelist.
     143 *
     144 * @param virtAddr      The virtual address to which this page maybe mapped in
     145 *                      the future.
     146 * @param cbPage        The size of the page.
     147 *
     148 * @return Pointer to the allocated page, NULL on failure.
     149 */
     150static page_t *rtR0MemObjSolPageFromCachelist(caddr_t virtAddr, size_t cbPage)
     151{
     152    seg_t KernelSeg;
     153    KernelSeg.s_as = &kas;
     154    page_t *pPage = page_get_cachelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
     155                                       0 /* flags */, NULL /* NUMA group */);
     156    if (   !pPage
     157        && g_frtSolUseKflt)
     158    {
     159        pPage = page_get_cachelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
     160                                   0x200 /* PG_KFLT */, NULL /* NUMA group */);
     161    }
     162
     163    /*
     164     * Remove association with the vnode for pages from the cachelist.
     165     */
     166    if (!PP_ISAGED(pPage))
     167        page_hashout(pPage, NULL /* mutex */);
     168
     169    return pPage;
     170}
     171
     172
     173/**
     174 * Allocates physical non-contiguous memory.
     175 *
     176 * @param uPhysHi   The upper physical address limit (inclusive).
     177 * @param puPhys    Where to store the physical address of first page. Optional,
     178 *                  can be NULL.
     179 * @param cb        The size of the allocation.
     180 *
     181 * @return Array of allocated pages, NULL on failure.
     182 */
     183static page_t **rtR0MemObjSolPagesAlloc(uint64_t uPhysHi, uint64_t *puPhys, size_t cb)
     184{
     185    /** @todo We need to satisfy the upper physical address constraint */
     186
     187    /*
     188     * The page freelist and cachelist both hold pages that are not mapped into any address space.
     189     * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
     190     * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
     191     *
     192     * Reserve available memory for pages and create the pages.
     193     */
     194    pgcnt_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT;
     195    int rc = page_resv(cPages, KM_NOSLEEP);
     196    if (rc)
     197    {
     198        rc = page_create_wait(cPages, 0 /* flags */);
     199        if (rc)
     200        {
     201            size_t   cbPages = cPages * sizeof(page_t *);
     202            page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
     203            if (RT_LIKELY(ppPages))
     204            {
     205                /*
     206                 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
     207                 * we don't yet have the 'virtAddr' to which this memory may be mapped.
     208                 */
     209                caddr_t virtAddr = NULL;
     210                for (size_t i = 0; i < cPages; i++, virtAddr += PAGESIZE)
     211                {
     212                    /*
     213                     * Get a page from the freelist or cachelist.
     214                     */
     215                    page_t *pPage = rtR0MemObjSolPageFromFreelist(virtAddr, PAGESIZE);
     216                    if (!pPage)
     217                        pPage = rtR0MemObjSolPageFromCachelist(virtAddr, PAGESIZE);
     218                    if (RT_UNLIKELY(!pPage))
     219                    {
     220                        /*
     221                         * No more pages found, release was grabbed so far.
     222                         */
     223                        page_create_putback(cPages - i);
     224                        while (--i >= 0)
     225                            page_free(ppPages[i], 0 /* don't need page, move to tail of pagelist */);
     226                        kmem_free(ppPages, cbPages);
     227                        page_unresv(cPages);
     228                        return NULL;
     229                    }
     230
     231                    PP_CLRFREE(pPage);      /* Page is no longer free */
     232                    PP_CLRAGED(pPage);      /* Page is not hashed in */
     233                    ppPages[i] = pPage;
     234                }
     235
     236                /*
     237                 * We now have the pages locked exclusively, before they are mapped in
     238                 * we must downgrade the lock.
     239                 */
     240                if (puPhys)
     241                    *puPhys = (uint64_t)page_pptonum(ppPages[0]) << PAGESHIFT;
     242                return ppPages;
     243            }
     244
     245            page_create_putback(cPages);
     246        }
     247
     248        page_unresv(cPages);
     249    }
     250
     251    return NULL;
     252}
     253
     254
     255/**
     256 * Prepares pages allocated by rtR0MemObjSolPagesAlloc for mapping.
     257 *
     258 * @param    ppPages    Pointer to the page list.
     259 * @param    cb         Size of the allocation.
     260 * @param    auPhys     Where to store the physical address of the premapped
     261 *                      pages.
     262 * @param    cPages     The number of pages (entries) in @a auPhys.
     263 *
     264 * @returns IPRT status code.
     265 */
     266static int rtR0MemObjSolPagesPreMap(page_t **ppPages, size_t cb, uint64_t auPhys[], size_t cPages)
     267{
     268    AssertPtrReturn(ppPages, VERR_INVALID_PARAMETER);
     269    AssertPtrReturn(auPhys, VERR_INVALID_PARAMETER);
     270
     271    for (size_t iPage = 0; iPage < cPages; iPage++)
     272    {
     273        /*
     274         * Prepare pages for mapping into kernel/user-space. Downgrade the
     275         * exclusive page lock to a shared lock if necessary.
     276         */
     277        if (page_tryupgrade(ppPages[iPage]) == 1)
     278            page_downgrade(ppPages[iPage]);
     279
     280        auPhys[iPage] = rtR0MemObjSolPageToPhys(ppPages, iPage);
     281    }
     282
     283    return VINF_SUCCESS;
     284}
     285
     286
     287/**
     288 * Frees pages allocated by rtR0MemObjSolPagesAlloc.
     289 *
     290 * @param ppPages       Pointer to the page list.
     291 * @param cbPages       Size of the allocation.
     292 */
     293static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
     294{
     295    size_t cPages  = (cb + PAGESIZE - 1) >> PAGESHIFT;
     296    size_t cbPages = cPages * sizeof(page_t *);
     297    for (size_t iPage = 0; iPage < cPages; iPage++)
     298    {
     299        /*
     300         *  We need to exclusive lock the pages before freeing them.
     301         */
     302        int rc = page_tryupgrade(ppPages[iPage]);
     303        if (!rc)
     304        {
     305            page_unlock(ppPages[iPage]);
     306            while (!page_lock(ppPages[iPage], SE_EXCL, NULL /* mutex */, P_RECLAIM))
     307            {
     308                /* nothing */;
     309            }
     310        }
     311        page_free(ppPages[iPage], 0 /* don't need page, move to tail of pagelist */);
     312    }
     313    kmem_free(ppPages, cbPages);
     314    page_unresv(cPages);
     315}
     316
     317
     318/**
     319 * Allocates a large page to cover the required allocation size.
     320 *
     321 * @param puPhys        Where to store the physical address of the allocated
     322 *                      page. Optional, can be NULL.
     323 * @param cb            Size of the allocation.
     324 *
     325 * @returns Pointer to the allocated large page, NULL on failure.
     326 */
     327static page_t *rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cb)
     328{
     329    /*
     330     * Reserve available memory and create the sub-pages.
     331     */
     332    const pgcnt_t cPages = cb >> PAGESHIFT;
     333    int rc = page_resv(cPages, KM_NOSLEEP);
     334    if (rc)
     335    {
     336        rc = page_create_wait(cPages, 0 /* flags */);
     337        if (rc)
     338        {
     339            /*
     340             * Get a page off the free list. We set virtAddr to 0 since we don't know where
     341             * the memory is going to be mapped.
     342             */
     343            seg_t KernelSeg;
     344            caddr_t virtAddr  = NULL;
     345            KernelSeg.s_as    = &kas;
     346            page_t *pRootPage = rtR0MemObjSolPageFromFreelist(virtAddr, cb);
     347            if (pRootPage)
     348            {
     349                AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx cPages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages));
     350
     351                /*
     352                 * Mark all the sub-pages as non-free and not-hashed-in.
     353                 * It is paramount that we destroy the list (before freeing it).
     354                 */
     355                page_t *pPageList = pRootPage;
     356                for (size_t iPage = 0; iPage < cPages; iPage++)
     357                {
     358                    page_t *pPage = pPageList;
     359                    AssertPtr(pPage);
     360                    AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
     361                        ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
     362                    page_sub(&pPageList, pPage);
     363
     364                    /*
     365                     * Ensure page is now be free and the page size-code must match that of the root page.
     366                     */
     367                    AssertMsg(PP_ISFREE(pPage), ("%p\n", pPage));
     368                    AssertMsg(pPage->p_szc == pRootPage->p_szc, ("%p - %d expected %d \n", pPage, pPage->p_szc, pRootPage->p_szc));
     369
     370                    PP_CLRFREE(pPage);      /* Page no longer free */
     371                    PP_CLRAGED(pPage);      /* Page no longer hashed-in */
     372                }
     373
     374                uint64_t uPhys = (uint64_t)page_pptonum(pRootPage) << PAGESHIFT;
     375                AssertMsg(!(uPhys & (cb - 1)), ("%llx %zx\n", uPhys, cb));
     376                if (puPhys)
     377                    *puPhys = uPhys;
     378
     379                return pRootPage;
     380            }
     381
     382            page_create_putback(cPages);
     383        }
     384
     385        page_unresv(cPages);
     386    }
     387
     388    return NULL;
     389}
     390
     391/**
     392 * Prepares the large page allocated by rtR0MemObjSolLargePageAlloc to be mapped.
     393 *
     394 * @param    pRootPage      Pointer to the root page.
     395 * @param    cb             Size of the allocation.
     396 *
     397 * @returns IPRT status code.
     398 */
     399static int rtR0MemObjSolLargePagePreMap(page_t *pRootPage, size_t cb)
     400{
     401    const pgcnt_t cPages = cb >> PAGESHIFT;
     402
     403    Assert(page_get_pagecnt(pRootPage->p_szc) == cPages);
     404    AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx npages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages));
     405
     406    /*
     407     * We need to downgrade the sub-pages from exclusive to shared locking
     408     * because otherweise we cannot <you go figure>.
     409     */
     410    for (pgcnt_t iPage = 0; iPage < cPages; iPage++)
     411    {
     412        page_t *pPage = page_nextn(pRootPage, iPage);
     413        AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
     414            ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
     415        AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage));
     416
     417        if (page_tryupgrade(pPage) == 1)
     418            page_downgrade(pPage);
     419        AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage));
     420    }
     421
     422    return VINF_SUCCESS;
     423}
     424
     425
     426/**
     427 * Frees the page allocated by rtR0MemObjSolLargePageAlloc.
     428 *
     429 * @param    pRootPage      Pointer to the root page.
     430 * @param    cb             Allocated size.
     431 */
     432static void rtR0MemObjSolLargePageFree(page_t *pRootPage, size_t cb)
     433{
     434    pgcnt_t cPages = cb >> PAGESHIFT;
     435
     436    Assert(page_get_pagecnt(pRootPage->p_szc) == cPages);
     437    AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx cPages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages));
     438
     439    /*
     440     * We need to exclusively lock the sub-pages before freeing the large one.
     441     */
     442    for (pgcnt_t iPage = 0; iPage < cPages; iPage++)
     443    {
     444        page_t *pPage = page_nextn(pRootPage, iPage);
     445        AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
     446                  ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
     447        AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage));
     448
     449        int rc = page_tryupgrade(pPage);
     450        if (!rc)
     451        {
     452            page_unlock(pPage);
     453            while (!page_lock(pPage, SE_EXCL, NULL /* mutex */, P_RECLAIM))
     454            {
     455                /* nothing */;
     456            }
     457        }
     458    }
     459
     460    /*
     461     * Free the large page and unreserve the memory.
     462     */
     463    page_free_pages(pRootPage);
     464    page_unresv(cPages);
     465
     466}
     467
     468
     469/**
     470 * Unmaps kernel/user-space mapped memory.
     471 *
     472 * @param    pv         Pointer to the mapped memory block.
     473 * @param    cb         Size of the memory block.
     474 */
     475static void rtR0MemObjSolUnmap(void *pv, size_t cb)
     476{
     477    if (SOL_IS_KRNL_ADDR(pv))
     478    {
     479        hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
     480        vmem_free(heap_arena, pv, cb);
     481    }
     482    else
     483    {
     484        struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
     485        AssertPtr(pAddrSpace);
     486        as_rangelock(pAddrSpace);
     487        as_unmap(pAddrSpace, pv, cb);
     488        as_rangeunlock(pAddrSpace);
     489    }
     490}
     491
     492/**
     493 * Lock down memory mappings for a virtual address.
     494 *
     495 * @param    pv             Pointer to the memory to lock down.
     496 * @param    cb             Size of the memory block.
     497 * @param    fAccess        Page access rights (S_READ, S_WRITE, S_EXEC)
     498 *
     499 * @returns IPRT status code.
     500 */
     501static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
     502{
     503    /*
     504     * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
     505     */
     506    if (!SOL_IS_KRNL_ADDR(pv))
     507    {
     508        proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
     509        AssertPtr(pProc);
     510        faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
     511        if (rc)
     512        {
     513            LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
     514            return VERR_LOCK_FAILED;
     515        }
     516    }
     517    return VINF_SUCCESS;
     518}
     519
     520
     521/**
     522 * Unlock memory mappings for a virtual address.
     523 *
     524 * @param    pv             Pointer to the locked memory.
     525 * @param    cb             Size of the memory block.
     526 * @param    fPageAccess    Page access rights (S_READ, S_WRITE, S_EXEC).
     527 */
     528static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
     529{
     530    if (!SOL_IS_KRNL_ADDR(pv))
     531    {
     532        proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
     533        AssertPtr(pProcess);
     534        as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
     535    }
     536}
     537
     538
     539/**
     540 * Maps a list of physical pages into user address space.
     541 *
     542 * @param    pVirtAddr      Where to store the virtual address of the mapping.
     543 * @param    fPageAccess    Page access rights (PROT_READ, PROT_WRITE,
     544 *                          PROT_EXEC)
     545 * @param    paPhysAddrs    Array of physical addresses to pages.
     546 * @param    cb             Size of memory being mapped.
     547 *
     548 * @returns IPRT status code.
     549 */
     550static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb)
     551{
     552    struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
     553    int rc = VERR_INTERNAL_ERROR;
     554    SEGVBOX_CRARGS Args;
     555
     556    Args.paPhysAddrs = paPhysAddrs;
     557    Args.fPageAccess = fPageAccess;
     558
     559    as_rangelock(pAddrSpace);
     560    map_addr(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
     561    if (*pVirtAddr != NULL)
     562        rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
     563    else
     564        rc = ENOMEM;
     565    as_rangeunlock(pAddrSpace);
     566
     567    return RTErrConvertFromErrno(rc);
     568}
    65569
    66570
    67571DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
    68572{
    69     PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)pMem;
     573    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
    70574
    71575    switch (pMemSolaris->Core.enmType)
    72576    {
    73577        case RTR0MEMOBJTYPE_LOW:
    74             vbi_lowmem_free(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
     578            rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
    75579            break;
    76580
    77581        case RTR0MEMOBJTYPE_PHYS:
    78             if (!pMemSolaris->Core.u.Phys.fAllocated)
    79             {   /* nothing to do here */;   }
    80             else if (pMemSolaris->fLargePage)
    81                 vbi_large_page_free(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
    82             else
    83                 vbi_phys_free(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
     582            if (pMemSolaris->Core.u.Phys.fAllocated)
     583            {
     584                if (pMemSolaris->fLargePage)
     585                    rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
     586                else
     587                    rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
     588            }
    84589            break;
    85590
    86591        case RTR0MEMOBJTYPE_PHYS_NC:
    87             vbi_pages_free(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
     592            rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
    88593            break;
    89594
     
    93598
    94599        case RTR0MEMOBJTYPE_LOCK:
    95             vbi_unlock_va(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess, pMemSolaris->pvHandle);
     600            rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
    96601            break;
    97602
    98603        case RTR0MEMOBJTYPE_MAPPING:
    99             vbi_unmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
     604            rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
    100605            break;
    101606
     
    122627{
    123628    /* Create the object. */
    124     PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
    125     if (!pMemSolaris)
     629    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
     630    if (RT_UNLIKELY(!pMemSolaris))
    126631        return VERR_NO_MEMORY;
    127632
    128     void *virtAddr = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
    129     if (!virtAddr)
     633    void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
     634    if (RT_UNLIKELY(!pvMem))
    130635    {
    131636        rtR0MemObjDelete(&pMemSolaris->Core);
     
    133638    }
    134639
    135     pMemSolaris->Core.pv  = virtAddr;
     640    pMemSolaris->Core.pv  = pvMem;
    136641    pMemSolaris->pvHandle = NULL;
    137642    *ppMem = &pMemSolaris->Core;
     
    145650
    146651    /* Create the object */
    147     PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
     652    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
    148653    if (!pMemSolaris)
    149654        return VERR_NO_MEMORY;
    150655
    151656    /* Allocate physically low page-aligned memory. */
    152     uint64_t physAddr = _4G - 1;
    153     caddr_t virtAddr  = vbi_lowmem_alloc(physAddr, cb);
    154     if (virtAddr == NULL)
     657    uint64_t uPhysHi = _4G - 1;
     658    void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGESIZE, false /* fContig */);
     659    if (RT_UNLIKELY(!pvMem))
    155660    {
    156661        rtR0MemObjDelete(&pMemSolaris->Core);
    157662        return VERR_NO_LOW_MEMORY;
    158663    }
    159     pMemSolaris->Core.pv = virtAddr;
     664    pMemSolaris->Core.pv = pvMem;
    160665    pMemSolaris->pvHandle = NULL;
    161666    *ppMem = &pMemSolaris->Core;
     
    174679{
    175680#if HC_ARCH_BITS == 64
    176     PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
    177     if (!pMemSolaris)
     681    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
     682    if (RT_UNLIKELY(!pMemSolaris))
    178683        return VERR_NO_MEMORY;
    179684
    180     uint64_t PhysAddr = PhysHighest;
    181     void *pvPages = vbi_pages_alloc(&PhysAddr, cb);
     685    uint64_t PhysAddr = UINT64_MAX;
     686    void *pvPages = rtR0MemObjSolPagesAlloc((uint64_t)PhysHighest, &PhysAddr, cb);
    182687    if (!pvPages)
    183688    {
    184         LogRel(("rtR0MemObjNativeAllocPhysNC: vbi_pages_alloc failed.\n"));
     689        LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
    185690        rtR0MemObjDelete(&pMemSolaris->Core);
    186691        return VERR_NO_MEMORY;
     
    189694    pMemSolaris->pvHandle  = pvPages;
    190695
     696    Assert(PhysAddr != UINT64_MAX);
    191697    Assert(!(PhysAddr & PAGE_OFFSET_MASK));
    192698    *ppMem = &pMemSolaris->Core;
     
    203709    AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
    204710
    205     PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
    206     if (!pMemSolaris)
     711    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
     712    if (RT_UNLIKELY(!pMemSolaris))
    207713        return VERR_NO_MEMORY;
    208714
     
    228734         * Allocate one large page.
    229735         */
    230         void *pvPages = vbi_large_page_alloc(&PhysAddr, cb);
    231         if (pvPages)
     736        cmn_err(CE_NOTE,  "calling rtR0MemObjSolLargePageAlloc\n");
     737        void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
     738        if (RT_LIKELY(pvPages))
    232739        {
    233740            AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
     
    247754         * Allocate physically contiguous memory aligned as specified.
    248755         */
     756        cmn_err(CE_NOTE,  "rtR0MemObjNativeAllocPhys->rtR0SolMemAlloc\n");
    249757        AssertCompile(NIL_RTHCPHYS == UINT64_MAX);
    250758        PhysAddr = PhysHighest;
    251         caddr_t pvMem = vbi_phys_alloc(&PhysAddr, cb, uAlignment, 1 /* contiguous */);
     759        void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
    252760        if (RT_LIKELY(pvMem))
    253761        {
     
    276784
    277785    /* Create the object. */
    278     PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
     786    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
    279787    if (!pMemSolaris)
    280788        return VERR_NO_MEMORY;
     
    296804
    297805    /* Create the locking object */
    298     PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
     806    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
    299807    if (!pMemSolaris)
    300808        return VERR_NO_MEMORY;
     
    306814    if (fAccess & RTMEM_PROT_EXEC)
    307815        fPageAccess = S_EXEC;
    308     void *pvPageList = NULL;
    309     int rc = vbi_lock_va((caddr_t)R3Ptr, cb, fPageAccess, &pvPageList);
    310     if (rc != 0)
    311     {
    312         LogRel(("rtR0MemObjNativeLockUser: vbi_lock_va failed rc=%d\n", rc));
     816    int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
     817    if (RT_FAILURE(rc))
     818    {
     819        LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
    313820        rtR0MemObjDelete(&pMemSolaris->Core);
    314         return VERR_LOCK_FAILED;
     821        return rc;
    315822    }
    316823
    317824    /* Fill in the object attributes and return successfully. */
    318825    pMemSolaris->Core.u.Lock.R0Process  = R0Process;
    319     pMemSolaris->pvHandle               = pvPageList;
     826    pMemSolaris->pvHandle               = NULL;
    320827    pMemSolaris->fAccess                = fPageAccess;
    321828    *ppMem = &pMemSolaris->Core;
     
    328835    NOREF(fAccess);
    329836
    330     PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
     837    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
    331838    if (!pMemSolaris)
    332839        return VERR_NO_MEMORY;
     
    338845    if (fAccess & RTMEM_PROT_EXEC)
    339846        fPageAccess = S_EXEC;
    340     void *pvPageList = NULL;
    341     int rc = vbi_lock_va((caddr_t)pv, cb, fPageAccess, &pvPageList);
    342     if (rc != 0)
    343     {
    344         LogRel(("rtR0MemObjNativeLockKernel: vbi_lock_va failed rc=%d\n", rc));
     847    int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
     848    if (RT_FAILURE(rc))
     849    {
     850        LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
    345851        rtR0MemObjDelete(&pMemSolaris->Core);
    346         return VERR_LOCK_FAILED;
     852        return rc;
    347853    }
    348854
    349855    /* Fill in the object attributes and return successfully. */
    350856    pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
    351     pMemSolaris->pvHandle = pvPageList;
    352     pMemSolaris->fAccess = fPageAccess;
     857    pMemSolaris->pvHandle              = NULL;
     858    pMemSolaris->fAccess               = fPageAccess;
    353859    *ppMem = &pMemSolaris->Core;
    354860    return VINF_SUCCESS;
     
    358864DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
    359865{
    360     PRTR0MEMOBJSOLARIS  pMemSolaris;
     866    PRTR0MEMOBJSOL  pMemSolaris;
    361867
    362868    /*
    363869     * Use xalloc.
    364870     */
    365     void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /*phase*/, 0 /*nocross*/,
    366                            NULL /*minaddr*/, NULL /*maxaddr*/, VM_SLEEP);
     871    void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
     872                           NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
    367873    if (RT_UNLIKELY(!pv))
    368874        return VERR_NO_MEMORY;
    369875
    370876    /* Create the object. */
    371     pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
     877    pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
    372878    if (!pMemSolaris)
    373879    {
     
    411917     * Get parameters from the source object.
    412918     */
    413     PRTR0MEMOBJSOLARIS  pMemToMapSolaris = (PRTR0MEMOBJSOLARIS)pMemToMap;
    414     void               *pv               = pMemToMapSolaris->Core.pv;
    415     size_t              cb               = pMemToMapSolaris->Core.cb;
    416     pgcnt_t             cPages           = cb >> PAGE_SHIFT;
     919    PRTR0MEMOBJSOL  pMemToMapSolaris     = (PRTR0MEMOBJSOL)pMemToMap;
     920    void           *pv                   = pMemToMapSolaris->Core.pv;
     921    size_t          cb                   = pMemToMapSolaris->Core.cb;
     922    size_t          cPages               = cb >> PAGE_SHIFT;
    417923
    418924    /*
    419925     * Create the mapping object
    420926     */
    421     PRTR0MEMOBJSOLARIS pMemSolaris;
    422     pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb);
     927    PRTR0MEMOBJSOL pMemSolaris;
     928    pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb);
    423929    if (RT_UNLIKELY(!pMemSolaris))
    424930        return VERR_NO_MEMORY;
     
    432938         */
    433939        if (pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC)
    434             rc = vbi_pages_premap(pMemToMapSolaris->pvHandle, cb, paPhysAddrs);
     940            rc = rtR0MemObjSolPagesPreMap(pMemToMapSolaris->pvHandle, cb, paPhysAddrs, cPages);
    435941        else if (   pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
    436942                 && pMemToMapSolaris->fLargePage)
     
    439945            for (pgcnt_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
    440946                paPhysAddrs[iPage] = Phys;
    441             rc = vbi_large_page_premap(pMemToMapSolaris->pvHandle, cb);
     947            rc = rtR0MemObjSolLargePagePreMap(pMemToMapSolaris->pvHandle, cb);
    442948        }
    443949        else
    444950        {
    445             /* Have kernel mapping, just translate virtual to physical. */
     951            /*
     952             * Have kernel mapping, just translate virtual to physical.
     953             */
    446954            AssertPtr(pv);
    447             rc = 0;
    448             for (pgcnt_t iPage = 0; iPage < cPages; iPage++)
     955            rc = VINF_SUCCESS;
     956            for (size_t iPage = 0; iPage < cPages; iPage++)
    449957            {
    450                 paPhysAddrs[iPage] = vbi_va_to_pa(pv);
     958                paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pv);
    451959                if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
    452960                {
    453961                    LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
    454                     rc = -1;
     962                    rc = VERR_MAP_FAILED;
    455963                    break;
    456964                }
     
    458966            }
    459967        }
    460         if (!rc)
    461         {
     968        if (RT_SUCCESS(rc))
     969        {
     970            unsigned fPageAccess = PROT_READ;
     971            if (fProt & RTMEM_PROT_WRITE)
     972                fPageAccess |= PROT_WRITE;
     973            if (fProt & RTMEM_PROT_EXEC)
     974                fPageAccess |= PROT_EXEC;
     975
    462976            /*
    463977             * Perform the actual mapping.
    464978             */
    465979            caddr_t UserAddr = NULL;
    466             rc = vbi_user_map(&UserAddr, fProt, paPhysAddrs, cb);
    467             if (!rc)
     980            rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb);
     981            if (RT_SUCCESS(rc))
    468982            {
    469983                pMemSolaris->Core.u.Mapping.R0Process = R0Process;
     
    475989            }
    476990
    477             LogRel(("rtR0MemObjNativeMapUser: vbi_user_map failed.\n"));
    478         }
     991            LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
     992        }
     993
    479994        rc = VERR_MAP_FAILED;
    480995        kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
     
    4991014DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
    5001015{
    501     PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)pMem;
     1016    PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
    5021017
    5031018    switch (pMemSolaris->Core.enmType)
     
    5071022            {
    5081023                uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
    509                 return vbi_va_to_pa(pb);
     1024                return rtR0MemObjSolVirtToPhys(pb);
    5101025            }
    511             return vbi_page_to_pa(pMemSolaris->pvHandle, iPage);
     1026            return rtR0MemObjSolPageToPhys(pMemSolaris->pvHandle, iPage);
    5121027
    5131028        case RTR0MEMOBJTYPE_PAGE:
     
    5161031        {
    5171032            uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
    518             return vbi_va_to_pa(pb);
     1033            return rtR0MemObjSolVirtToPhys(pb);
    5191034        }
    5201035
    5211036        /*
    522          * Although mapping can be handled by vbi_va_to_pa(offset) like the above case,
     1037         * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
    5231038         * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
    5241039         */
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/mp-r0drv-solaris.c

    r40216 r40966  
    3333#include <iprt/mp.h>
    3434#include <iprt/cpuset.h>
     35#include <iprt/thread.h>
    3536
    3637#include <iprt/asm.h>
     
    4142#include "r0drv/mp-r0drv.h"
    4243
     44typedef int FNRTMPSOLWORKER(void *pvUser1, void *pvUser2, void *pvUser3);
     45typedef FNRTMPSOLWORKER *PFNRTMPSOLWORKER;
    4346
    4447
     
    5154RTDECL(RTCPUID) RTMpCpuId(void)
    5255{
    53     return vbi_cpu_id();
     56    return CPU->cpu_id;
    5457}
    5558
     
    5760RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
    5861{
    59     return idCpu < RTCPUSET_MAX_CPUS && idCpu < vbi_cpu_maxcount() ? idCpu : -1;
     62    return idCpu < RTCPUSET_MAX_CPUS && idCpu <= max_cpuid ? idCpu : -1;
    6063}
    6164
     
    6366RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
    6467{
    65     return (unsigned)iCpu < vbi_cpu_maxcount() ? iCpu : NIL_RTCPUID;
     68    return (unsigned)iCpu <= max_cpuid ? iCpu : NIL_RTCPUID;
    6669}
    6770
     
    6972RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
    7073{
    71     return vbi_max_cpu_id();
     74    return max_cpuid;
    7275}
    7376
     
    7881     * We cannot query CPU status recursively, check cpu member from cached set.
    7982     */
    80     if (idCpu >= vbi_cpu_count())
     83    if (idCpu >= ncpus)
    8184        return false;
    8285
    83     return RTCpuSetIsMember(&g_rtMpSolarisCpuSet, idCpu);
    84 
    85 #if 0
    86     return idCpu < vbi_cpu_count() && vbi_cpu_online(idCpu);
    87 #endif
     86    return RTCpuSetIsMember(&g_rtMpSolCpuSet, idCpu);
    8887}
    8988
     
    9190RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
    9291{
    93     return idCpu < vbi_cpu_count();
     92    return idCpu < ncpus;
    9493}
    9594
     
    113112RTDECL(RTCPUID) RTMpGetCount(void)
    114113{
    115     return vbi_cpu_count();
     114    return ncpus;
    116115}
    117116
     
    122121     * We cannot query CPU status recursively, return the cached set.
    123122     */
    124     *pSet = g_rtMpSolarisCpuSet;
     123    *pSet = g_rtMpSolCpuSet;
    125124    return pSet;
    126125}
     
    135134
    136135
     136/**
     137 * Wrapper to Solaris IPI infrastructure.
     138 *
     139 * @param    pCpuSet        Pointer to Solaris CPU set.
     140 * @param    pfnSolWorker     Function to execute on target CPU(s).
     141 * @param     pArgs            Pointer to RTMPARGS to pass to @a pfnSolWorker.
     142 *
     143 * @returns Solaris error code.
     144 */
     145static void rtMpSolCrossCall(PRTSOLCPUSET pCpuSet, PFNRTMPSOLWORKER pfnSolWorker, PRTMPARGS pArgs)
     146{
     147    AssertPtrReturnVoid(pCpuSet);
     148    AssertPtrReturnVoid(pfnSolWorker);
     149    AssertPtrReturnVoid(pCpuSet);
     150
     151    if (g_frtSolOldIPI)
     152    {
     153        if (g_frtSolOldIPIUlong)
     154        {
     155            g_rtSolXcCall.u.pfnSol_xc_call_old_ulong((xc_arg_t)pArgs,          /* Arg to IPI function */
     156                                                     0,                        /* Arg2, ignored */
     157                                                     0,                        /* Arg3, ignored */
     158                                                     IPRT_SOL_X_CALL_HIPRI,    /* IPI priority */
     159                                                     pCpuSet->auCpus[0],       /* Target CPU(s) */
     160                                                     (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
     161        }
     162        else
     163        {
     164            g_rtSolXcCall.u.pfnSol_xc_call_old((xc_arg_t)pArgs,          /* Arg to IPI function */
     165                                               0,                        /* Arg2, ignored */
     166                                               0,                        /* Arg3, ignored */
     167                                               IPRT_SOL_X_CALL_HIPRI,    /* IPI priority */
     168                                               *pCpuSet,                 /* Target CPU set */
     169                                               (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
     170        }
     171    }
     172    else
     173    {
     174        g_rtSolXcCall.u.pfnSol_xc_call((xc_arg_t)pArgs,          /* Arg to IPI function */
     175                                       0,                        /* Arg2 */
     176                                       0,                        /* Arg3 */
     177                                       &pCpuSet->auCpus[0],      /* Target CPU set */
     178                                       (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
     179    }
     180}
     181
    137182
    138183/**
     
    144189 * @param   uIgnored2   Ignored.
    145190 */
    146 static int rtmpOnAllSolarisWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
     191static int rtMpSolOnAllCpuWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
    147192{
    148193    PRTMPARGS pArgs = (PRTMPARGS)(uArg);
     
    174219    Args.cHits = 0;
    175220
    176     vbi_preempt_disable();
    177 
    178     vbi_execute_on_all(rtmpOnAllSolarisWrapper, &Args);
    179 
    180     vbi_preempt_enable();
     221    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     222    RTThreadPreemptDisable(&PreemptState);
     223
     224    RTSOLCPUSET CpuSet;
     225    for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
     226        CpuSet.auCpus[i] = (ulong_t)-1L;
     227
     228    rtMpSolCrossCall(&CpuSet, rtMpSolOnAllCpuWrapper, &Args);
     229
     230    RTThreadPreemptRestore(&PreemptState);
    181231
    182232    return VINF_SUCCESS;
     
    192242 * @param   uIgnored2   Ignored.
    193243 */
    194 static int rtmpOnOthersSolarisWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
     244static int rtMpSolOnOtherCpusWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
    195245{
    196246    PRTMPARGS pArgs = (PRTMPARGS)(uArg);
     
    210260    RTMPARGS Args;
    211261    RT_ASSERT_INTS_ON();
    212 
    213     /* The caller is supposed to have disabled preemption, but take no chances. */
    214     vbi_preempt_disable();
    215262
    216263    Args.pfnWorker = pfnWorker;
     
    220267    Args.cHits = 0;
    221268
    222     vbi_execute_on_others(rtmpOnOthersSolarisWrapper, &Args);
    223 
    224     vbi_preempt_enable();
     269    /* The caller is supposed to have disabled preemption, but take no chances. */
     270    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     271    RTThreadPreemptDisable(&PreemptState);
     272
     273    RTSOLCPUSET CpuSet;
     274    for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
     275        CpuSet.auCpus[0] = (ulong_t)-1L;
     276    BT_CLEAR(CpuSet.auCpus, RTMpCpuId());
     277
     278    rtMpSolCrossCall(&CpuSet, rtMpSolOnOtherCpusWrapper, &Args);
     279
     280    RTThreadPreemptRestore(&PreemptState);
    225281
    226282    return VINF_SUCCESS;
     
    232288 * for the RTMpOnSpecific API.
    233289 *
    234  *
    235290 * @param   uArgs       Pointer to the RTMPARGS package.
    236291 * @param   uIgnored1   Ignored.
    237292 * @param   uIgnored2   Ignored.
    238  */
    239 static int rtmpOnSpecificSolarisWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
     293 *
     294 * @returns Solaris error code.
     295 */
     296static int rtMpSolOnSpecificCpuWrapper(void *uArg, void *uIgnored1, void *uIgnored2)
    240297{
    241298    PRTMPARGS pArgs = (PRTMPARGS)(uArg);
     
    257314    RT_ASSERT_INTS_ON();
    258315
    259     if (idCpu >= vbi_cpu_count())
     316    if (idCpu >= ncpus)
    260317        return VERR_CPU_NOT_FOUND;
    261318
     
    269326    Args.cHits = 0;
    270327
    271     vbi_preempt_disable();
    272 
    273     vbi_execute_on_one(rtmpOnSpecificSolarisWrapper, &Args, idCpu);
    274 
    275     vbi_preempt_enable();
     328    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     329    RTThreadPreemptDisable(&PreemptState);
     330
     331    RTSOLCPUSET CpuSet;
     332    for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
     333        CpuSet.auCpus[i] = 0;
     334    BT_SET(CpuSet.auCpus, idCpu);
     335
     336    rtMpSolCrossCall(&CpuSet, rtMpSolOnSpecificCpuWrapper, &Args);
     337
     338    RTThreadPreemptRestore(&PreemptState);
    276339
    277340    Assert(ASMAtomicUoReadU32(&Args.cHits) <= 1);
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/mpnotification-r0drv-solaris.c

    r40227 r40966  
    4242*   Global Variables                                                           *
    4343*******************************************************************************/
    44 /** CPU watch callback handle. */
    45 static vbi_cpu_watch_t *g_hVbiCpuWatch = NULL;
     44/** Whether CPUs are being watched or not. */
     45static volatile bool g_fSolCpuWatch = false;
    4646/** Set of online cpus that is maintained by the MP callback.
    4747 * This avoids locking issues querying the set from the kernel as well as
    4848 * eliminating any uncertainty regarding the online status during the
    4949 * callback. */
    50 RTCPUSET g_rtMpSolarisCpuSet;
     50RTCPUSET g_rtMpSolCpuSet;
     51
     52/**
     53 * Internal solaris representation for watching CPUs.
     54 */
     55typedef struct RTMPSOLWATCHCPUS
     56{
     57    /** Function pointer to Mp worker. */
     58    PFNRTMPWORKER   pfnWorker;
     59    /** Argument to pass to the Mp worker. */
     60    void           *pvArg;
     61} RTMPSOLWATCHCPUS;
     62typedef RTMPSOLWATCHCPUS *PRTMPSOLWATCHCPUS;
    5163
    5264
    53 static void rtMpNotificationSolarisOnCurrentCpu(void *pvArgs, void *uIgnored1, void *uIgnored2)
     65/**
     66 * PFNRTMPWORKER worker for executing Mp events on the target CPU.
     67 *
     68 * @param    idCpu          The current CPU Id.
     69 * @param    pvArg          Opaque pointer to event type (online/offline).
     70 * @param    pvIgnored1     Ignored.
     71 */
     72static void rtMpNotificationSolOnCurrentCpu(RTCPUID idCpu, void *pvArg, void *pvIgnored1)
    5473{
    55     NOREF(uIgnored1);
    56     NOREF(uIgnored2);
     74    NOREF(pvIgnored1);
     75    NOREF(idCpu);
    5776
    58     PRTMPARGS pArgs = (PRTMPARGS)(pvArgs);
     77    PRTMPARGS pArgs = (PRTMPARGS)pvArg;
    5978    AssertRelease(pArgs && pArgs->idCpu == RTMpCpuId());
    60     Assert(pArgs->pvUser2);
     79    Assert(pArgs->pvUser1);
    6180    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    6281
    63     int online = *(int *)pArgs->pvUser2;
    64     if (online)
     82    RTMPEVENT enmMpEvent = *(RTMPEVENT *)pArgs->pvUser1;
     83    rtMpNotificationDoCallbacks(enmMpEvent, pArgs->idCpu);
     84}
     85
     86
     87/**
     88 * Solaris callback function for Mp event notification.
     89 *
     90 * @param    CpuState   The current event/state of the CPU.
     91 * @param    iCpu       Which CPU is this event fore.
     92 * @param    pvArg      Ignored.
     93 *
     94 * @remarks This function assumes index == RTCPUID.
     95 * @returns Solaris error code.
     96 */
     97static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg)
     98{
     99    RTMPEVENT enmMpEvent;
     100
     101    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     102    RTThreadPreemptDisable(&PreemptState);
     103
     104    /*
     105     * Update our CPU set structures first regardless of whether we've been
     106     * scheduled on the right CPU or not, this is just atomic accounting.
     107     */
     108    if (CpuState == CPU_ON)
    65109    {
    66         RTCpuSetAdd(&g_rtMpSolarisCpuSet, pArgs->idCpu);
    67         rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, pArgs->idCpu);
     110        enmMpEvent = RTMPEVENT_ONLINE;
     111        RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu);
     112    }
     113    else if (CpuState == CPU_OFF)
     114    {
     115        enmMpEvent = RTMPEVENT_OFFLINE;
     116        RTCpuSetDel(&g_rtMpSolCpuSet, iCpu);
     117    }
     118    else
     119        return 0;
     120
     121    /*
     122     * Since we don't absolutely need to do CPU bound code in any of the CPU offline
     123     * notification hooks, run it on the current CPU. Scheduling a callback to execute
     124     * on the CPU going offline at this point is too late and will not work reliably.
     125     */
     126    bool fRunningOnTargetCpu = iCpu == RTMpCpuId();
     127    if (   fRunningOnTargetCpu == true
     128        || enmMpEvent == RTMPEVENT_OFFLINE)
     129    {
     130        rtMpNotificationDoCallbacks(enmMpEvent, iCpu);
    68131    }
    69132    else
    70133    {
    71         RTCpuSetDel(&g_rtMpSolarisCpuSet, pArgs->idCpu);
    72         rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, pArgs->idCpu);
    73     }
    74 }
    75 
    76 
    77 static void rtMpNotificationSolarisCallback(void *pvUser, int iCpu, int online)
    78 {
    79     vbi_preempt_disable();
    80 
    81     RTMPARGS Args;
    82     RT_ZERO(Args);
    83     Args.pvUser1 = pvUser;
    84     Args.pvUser2 = &online;
    85     Args.idCpu   = iCpu;
    86 
    87     /*
    88      * If we're not on the target CPU, schedule (synchronous) the event notification callback
    89      * to run on the target CPU i.e. the one pertaining to the MP event.
    90      */
    91     bool fRunningOnTargetCpu = iCpu == RTMpCpuId();      /* ASSUMES iCpu == RTCPUID */
    92     if (fRunningOnTargetCpu)
    93         rtMpNotificationSolarisOnCurrentCpu(&Args, NULL /* pvIgnored1 */, NULL /* pvIgnored2 */);
    94     else
    95     {
    96         if (online)
    97             vbi_execute_on_one(rtMpNotificationSolarisOnCurrentCpu, &Args, iCpu);
    98         else
    99         {
    100             /*
    101              * Since we don't absolutely need to do CPU bound code in any of the CPU offline
    102              * notification hooks, run it on the current CPU. Scheduling a callback to execute
    103              * on the CPU going offline at this point is too late and will not work reliably.
    104              */
    105             RTCpuSetDel(&g_rtMpSolarisCpuSet, iCpu);
    106             rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, iCpu);
    107         }
     134        /*
     135         * We're not on the target CPU, schedule (synchronous) the event notification callback
     136         * to run on the target CPU i.e. the CPU that was online'd.
     137         */
     138        RTMPARGS Args;
     139        RT_ZERO(Args);
     140        Args.pvUser1 = &enmMpEvent;
     141        Args.pvUser2 = NULL;
     142        Args.idCpu   = iCpu;
     143        RTMpOnSpecific(iCpu, rtMpNotificationSolOnCurrentCpu, &Args, NULL /* pvIgnored1 */);
    108144    }
    109145
    110     vbi_preempt_enable();
     146    RTThreadPreemptRestore(&PreemptState);
     147
     148    NOREF(pvArg);
     149    return 0;
    111150}
    112151
     
    114153DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
    115154{
    116     if (g_hVbiCpuWatch != NULL)
     155    if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
    117156        return VERR_WRONG_ORDER;
    118157
    119158    /*
    120      * Register the callback building the online cpu set as we
    121      * do so (current_too = 1).
     159     * Register the callback building the online cpu set as we do so.
    122160     */
    123     RTCpuSetEmpty(&g_rtMpSolarisCpuSet);
    124     g_hVbiCpuWatch = vbi_watch_cpus(rtMpNotificationSolarisCallback, NULL, 1 /*current_too*/);
     161    RTCpuSetEmpty(&g_rtMpSolCpuSet);
     162
     163    mutex_enter(&cpu_lock);
     164    register_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);
     165
     166    for (int i = 0; i < (int)RTMpGetCount(); ++i)
     167        if (cpu_is_online(cpu[i]))
     168            rtMpNotificationCpuEvent(CPU_ON, i, NULL /* pvArg */);
     169
     170    ASMAtomicWriteBool(&g_fSolCpuWatch, true);
     171    mutex_exit(&cpu_lock);
    125172
    126173    return VINF_SUCCESS;
     
    130177DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
    131178{
    132     if (g_hVbiCpuWatch != NULL)
    133         vbi_ignore_cpus(g_hVbiCpuWatch);
    134     g_hVbiCpuWatch = NULL;
     179    if (ASMAtomicReadBool(&g_fSolCpuWatch) == true)
     180    {
     181        mutex_enter(&cpu_lock);
     182        unregister_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */);
     183        ASMAtomicWriteBool(&g_fSolCpuWatch, false);
     184        mutex_exit(&cpu_lock);
     185    }
    135186}
    136187
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/process-r0drv-solaris.c

    r28800 r40966  
    4343RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void)
    4444{
    45     return (RTR0PROCESS)vbi_proc();
     45    proc_t *pProcess = NULL;
     46    drv_getparm(UPROCP, &pProcess);
     47    return (RTR0PROCESS)pProcess;
    4648}
    4749
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/thread-r0drv-solaris.c

    r39443 r40966  
    4040#include <iprt/mp.h>
    4141
    42 
     42#define SOL_THREAD_PREEMPT       (*((char *)curthread + g_offrtSolThreadPreempt))
     43#define SOL_CPU_RUNRUN           (*((char *)CPU + g_offrtSolCpuPreempt))
     44#define SOL_CPU_KPRUNRUN         (*((char *)CPU + g_offrtSolCpuForceKernelPreempt))
    4345
    4446RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
    4547{
    46     return (RTNATIVETHREAD)vbi_curthread();
     48    return (RTNATIVETHREAD)curthread;
    4749}
    4850
     
    5557    if (!cMillies)
    5658    {
    57         vbi_yield();
     59        RTThreadYield();
    5860        return VINF_SUCCESS;
    5961    }
     
    8486{
    8587    RT_ASSERT_PREEMPTIBLE();
    86     return vbi_yield();
     88
     89    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     90    RTThreadPreemptDisable(&PreemptState);
     91
     92    char cThreadPreempt = SOL_THREAD_PREEMPT;
     93    char cForcePreempt  = SOL_CPU_KPRUNRUN;
     94    bool fWillYield = false;
     95    Assert(cThreadPreempt >= 1);
     96
     97    /*
     98     * If we are the last preemption enabler for this thread and if force
     99     * preemption is set on the CPU, only then we are guaranteed to be preempted.
     100     */
     101    if (cThreadPreempt == 1 && cForcePreempt != 0)
     102        fWillYield = true;
     103
     104    RTThreadPreemptRestore(&PreemptState);
     105    return fWillYield;
    87106}
    88107
     
    91110{
    92111    Assert(hThread == NIL_RTTHREAD);
    93     if (!vbi_is_preempt_enabled())
     112    if (RT_UNLIKELY(g_frtSolInitDone == false))
     113    {
     114        cmn_err(CE_CONT, "!RTThreadPreemptIsEnabled called before RTR0Init!\n");
     115        return true;
     116    }
     117
     118    bool fThreadPreempt = false;
     119    if (SOL_THREAD_PREEMPT == 0)
     120        fThreadPreempt = true;
     121
     122    if (!fThreadPreempt)
    94123        return false;
    95124#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
     
    106135{
    107136    Assert(hThread == NIL_RTTHREAD);
    108     return !!vbi_is_preempt_pending();
     137
     138    char cPreempt      = SOL_CPU_RUNRUN;
     139    char cForcePreempt = SOL_CPU_KPRUNRUN;
     140    return (cPreempt != 0 || cForcePreempt != 0);
    109141}
    110142
     
    128160    AssertPtr(pState);
    129161
    130     vbi_preempt_disable();
     162    SOL_THREAD_PREEMPT++;
     163    Assert(SOL_THREAD_PREEMPT >= 1);
    131164
    132165    RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
     
    139172    RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
    140173
    141     vbi_preempt_enable();
     174    Assert(SOL_THREAD_PREEMPT >= 1);
     175    if (--SOL_THREAD_PREEMPT == 0 && SOL_CPU_RUNRUN != 0)
     176        kpreempt(KPREEMPT_SYNC);
    142177}
    143178
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/thread2-r0drv-solaris.c

    r36555 r40966  
    3232#include "internal/iprt.h"
    3333#include <iprt/thread.h>
     34#include <iprt/process.h>
    3435
    3536#include <iprt/assert.h>
     
    6768    }
    6869
    69     vbi_set_priority(vbi_curthread(), iPriority);
     70    kthread_t *pCurThread = curthread;
     71    Assert(pCurThread);
     72    thread_lock(pCurThread);
     73    thread_change_pri(pCurThread, iPriority, 0);
     74    thread_unlock(pCurThread);
    7075    return VINF_SUCCESS;
    7176}
     
    96101    PRTTHREADINT pThreadInt = (PRTTHREADINT)pvThreadInt;
    97102
    98     rtThreadMain(pThreadInt, (RTNATIVETHREAD)vbi_curthread(), &pThreadInt->szName[0]);
    99     vbi_thread_exit();
     103    rtThreadMain(pThreadInt, RTThreadNativeSelf(), &pThreadInt->szName[0]);
     104    thread_exit();
    100105}
    101106
     
    103108DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
    104109{
    105     void   *pvKernThread;
    106110    RT_ASSERT_PREEMPTIBLE();
    107 
    108     pvKernThread = vbi_thread_create(rtThreadNativeMain, pThreadInt, sizeof(pThreadInt), minclsyspri);
    109     if (pvKernThread)
     111    kthread_t *pThread = thread_create(NULL,                            /* Stack, use base */
     112                                       0,                               /* Stack size */
     113                                       rtThreadNativeMain,              /* Thread function */
     114                                       pThreadInt,                      /* Function data */
     115                                       sizeof(pThreadInt),              /* Data size*/
     116                                       (proc_t *)RTR0ProcHandleSelf(),  /* Process handle */
     117                                       TS_RUN,                          /* Ready to run */
     118                                       minclsyspri                      /* Priority */
     119                                       );
     120    if (RT_LIKELY(pThread))
    110121    {
    111         *pNativeThread = (RTNATIVETHREAD)pvKernThread;
     122        *pNativeThread = (RTNATIVETHREAD)pThread;
    112123        return VINF_SUCCESS;
    113124    }
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/time-r0drv-solaris.c

    r28800 r40966  
    6161RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime)
    6262{
    63     return RTTimeSpecSetNano(pTime, vbi_tod());
     63    timestruc_t TimeSpec;
     64
     65    mutex_enter(&tod_lock);
     66    TimeSpec = tod_get();
     67    mutex_exit(&tod_lock);
     68    return RTTimeSpecSetNano(pTime, (uint64_t)TimeSpec.tv_sec * 1000000000 + TimeSpec.tv_nsec);
    6469}
    6570
  • trunk/src/VBox/Runtime/r0drv/solaris/vbi/timer-r0drv-solaris.c

    r37275 r40966  
    4646#include "internal/magics.h"
    4747
     48#define SOL_TIMER_ANY_CPU       (-1)
    4849
    4950/*******************************************************************************
    5051*   Structures and Typedefs                                                    *
    5152*******************************************************************************/
     53/**
     54 * Single-CPU timer handle.
     55 */
     56typedef struct RTR0SINGLETIMERSOL
     57{
     58    /** Cyclic handler. */
     59    cyc_handler_t           hHandler;
     60    /** Cyclic time and interval representation. */
     61    cyc_time_t              hFireTime;
     62    /** Timer ticks. */
     63    uint64_t                u64Tick;
     64} RTR0SINGLETIMERSOL;
     65typedef RTR0SINGLETIMERSOL *PRTR0SINGLETIMERSOL;
     66
     67/**
     68 * Omni-CPU timer handle.
     69 */
     70typedef struct RTR0OMNITIMERSOL
     71{
     72    /** Absolute timestamp of when the timer should fire next. */
     73    uint64_t                u64When;
     74    /** Array of timer ticks per CPU. Reinitialized when a CPU is online'd. */
     75    uint64_t               *au64Ticks;
     76} RTR0OMNITIMERSOL;
     77typedef RTR0OMNITIMERSOL *PRTR0OMNITIMERSOL;
     78
    5279/**
    5380 * The internal representation of a Solaris timer handle.
     
    6188    /** Flag indicating that the timer is suspended. */
    6289    uint8_t volatile        fSuspended;
    63     /** Run on all CPUs if set */
     90    /** Whether the timer must run on all CPUs or not. */
    6491    uint8_t                 fAllCpu;
    6592    /** Whether the timer must run on a specific CPU or not. */
     
    6794    /** The CPU it must run on if fSpecificCpu is set. */
    6895    uint8_t                 iCpu;
    69     /** The nano second interval for repeating timers */
     96    /** The nano second interval for repeating timers. */
    7097    uint64_t                interval;
    71     /** simple Solaris timer handle. */
    72     vbi_stimer_t           *stimer;
    73     /** global Solaris timer handle. */
    74     vbi_gtimer_t           *gtimer;
     98    /** Cyclic timer Id. */
     99    cyclic_id_t             hCyclicId;
     100    /** @todo Make this a union unless we intend to support omni<=>single timers
     101     *        conversions. */
     102    /** Single-CPU timer handle. */
     103    PRTR0SINGLETIMERSOL     pSingleTimer;
     104    /** Omni-CPU timer handle. */
     105    PRTR0OMNITIMERSOL       pOmniTimer;
    75106    /** The user callback. */
    76107    PFNRTTIMER              pfnTimer;
     
    88119    { \
    89120        AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); \
    90         AssertReturn((pTimer)->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); \
     121        AssertMsgReturn((pTimer)->u32Magic == RTTIMER_MAGIC, ("pTimer=%p u32Magic=%x expected %x\n", (pTimer), (pTimer)->u32Magic, RTTIMER_MAGIC), \
     122            VERR_INVALID_HANDLE); \
    91123    } while (0)
    92124
    93125
    94 /*
    95  * Need a wrapper to get the PRTTIMER passed through
    96  */
    97 static void rtTimerSolarisCallbackWrapper(PRTTIMER pTimer, uint64_t tick)
    98 {
    99     pTimer->pfnTimer(pTimer, pTimer->pvUser, tick);
    100 }
    101 
    102 
     126/**
     127 * Callback wrapper for Omni-CPU and single-CPU timers.
     128 *
     129 * @param    pvArg              Opaque pointer to the timer.
     130 *
     131 * @remarks This will be executed in interrupt context but only at the specified
     132 *          level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
     133 *          cyclic subsystem here, neither should pfnTimer().
     134 */
     135static void rtTimerSolCallbackWrapper(void *pvArg)
     136{
     137    PRTTIMER pTimer = (PRTTIMER)pvArg;
     138    AssertPtrReturnVoid(pTimer);
     139
     140    if (pTimer->pSingleTimer)
     141    {
     142        uint64_t u64Tick = ++pTimer->pSingleTimer->u64Tick;
     143        pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
     144    }
     145    else if (pTimer->pOmniTimer)
     146    {
     147        uint64_t u64Tick = ++pTimer->pOmniTimer->au64Ticks[CPU->cpu_id];
     148        pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
     149    }
     150}
     151
     152
     153/**
     154 * Omni-CPU cyclic online event. This is called before the omni cycle begins to
     155 * fire on the specified CPU.
     156 *
     157 * @param    pvArg              Opaque pointer to the timer.
     158 * @param    pCpu               Pointer to the CPU on which it will fire.
     159 * @param    pCyclicHandler     Pointer to a cyclic handler to add to the CPU
     160 *                              specified in @a pCpu.
     161 * @param    pCyclicTime        Pointer to the cyclic time and interval object.
     162 *
     163 * @remarks We -CANNOT- call back into the cyclic subsystem here, we can however
     164 *          block (sleep).
     165 */
     166static void rtTimerSolOmniCpuOnline(void *pvArg, cpu_t *pCpu, cyc_handler_t *pCyclicHandler, cyc_time_t *pCyclicTime)
     167{
     168    PRTTIMER pTimer = (PRTTIMER)pvArg;
     169    AssertPtrReturnVoid(pTimer);
     170    AssertPtrReturnVoid(pCpu);
     171    AssertPtrReturnVoid(pCyclicHandler);
     172    AssertPtrReturnVoid(pCyclicTime);
     173
     174    pTimer->pOmniTimer->au64Ticks[pCpu->cpu_id] = 0;
     175    pCyclicHandler->cyh_func  = rtTimerSolCallbackWrapper;
     176    pCyclicHandler->cyh_arg   = pTimer;
     177    pCyclicHandler->cyh_level = CY_LOCK_LEVEL;
     178
     179    uint64_t u64Now = RTTimeNanoTS();
     180    if (pTimer->pOmniTimer->u64When < u64Now)
     181        pCyclicTime->cyt_when = u64Now + pTimer->interval / 2;
     182    else
     183        pCyclicTime->cyt_when = pTimer->pOmniTimer->u64When;
     184
     185    pCyclicTime->cyt_interval = pTimer->interval;
     186}
    103187
    104188
     
    152236    pTimer->pfnTimer = pfnTimer;
    153237    pTimer->pvUser = pvUser;
    154     pTimer->stimer = NULL;
    155     pTimer->gtimer = NULL;
    156 
     238    pTimer->pSingleTimer = NULL;
     239    pTimer->pOmniTimer = NULL;
     240    pTimer->hCyclicId = CYCLIC_NONE;
     241
     242    cmn_err(CE_NOTE, "Create pTimer->u32Magic=%x RTTIMER_MAGIC=%x\n",  pTimer->u32Magic, RTTIMER_MAGIC);
    157243    *ppTimer = pTimer;
    158244    return VINF_SUCCESS;
     
    179265RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
    180266{
     267    cmn_err(CE_NOTE, "Start pTimer->u32Magic=%x RTTIMER_MAGIC=%x\n",  pTimer->u32Magic, RTTIMER_MAGIC);
    181268    RTTIMER_ASSERT_VALID_RET(pTimer);
    182269    RT_ASSERT_INTS_ON();
     
    185272        return VERR_TIMER_ACTIVE;
    186273
     274    /* One-shot timers are not supported by the cyclic system. */
     275    if (pTimer->interval == 0)
     276        return VERR_NOT_SUPPORTED;
     277
    187278    pTimer->fSuspended = false;
    188279    if (pTimer->fAllCpu)
    189280    {
    190         pTimer->gtimer = vbi_gtimer_begin(rtTimerSolarisCallbackWrapper, pTimer, u64First, pTimer->interval);
    191         if (pTimer->gtimer == NULL)
    192             return VERR_INVALID_PARAMETER;
     281        PRTR0OMNITIMERSOL pOmniTimer = RTMemAllocZ(sizeof(RTR0OMNITIMERSOL));
     282        if (RT_UNLIKELY(!pOmniTimer))
     283            return VERR_NO_MEMORY;
     284
     285        pOmniTimer->au64Ticks = RTMemAllocZ(RTMpGetCount() * sizeof(uint64_t));
     286        if (RT_UNLIKELY(!pOmniTimer->au64Ticks))
     287        {
     288            RTMemFree(pOmniTimer);
     289            return VERR_NO_MEMORY;
     290        }
     291
     292        /*
     293         * Setup omni (all CPU) timer. The Omni-CPU online event will fire
     294         * and from there we setup periodic timers per CPU.
     295         */
     296        pTimer->pOmniTimer = pOmniTimer;
     297        pOmniTimer->u64When     = pTimer->interval + RTTimeNanoTS();
     298
     299        cyc_omni_handler_t hOmni;
     300        hOmni.cyo_online        = rtTimerSolOmniCpuOnline;
     301        hOmni.cyo_offline       = NULL;
     302        hOmni.cyo_arg           = pTimer;
     303
     304        mutex_enter(&cpu_lock);
     305        pTimer->hCyclicId = cyclic_add_omni(&hOmni);
     306        mutex_exit(&cpu_lock);
    193307    }
    194308    else
    195309    {
    196         int iCpu = VBI_ANY_CPU;
     310        int iCpu = SOL_TIMER_ANY_CPU;
    197311        if (pTimer->fSpecificCpu)
     312        {
    198313            iCpu = pTimer->iCpu;
    199         pTimer->stimer = vbi_stimer_begin(rtTimerSolarisCallbackWrapper, pTimer, u64First, pTimer->interval, iCpu);
    200         if (pTimer->stimer == NULL)
     314            if (!RTMpIsCpuOnline(iCpu))    /* ASSUMES: index == cpuid */
     315                return VERR_CPU_OFFLINE;
     316        }
     317
     318        PRTR0SINGLETIMERSOL pSingleTimer = RTMemAllocZ(sizeof(RTR0SINGLETIMERSOL));
     319        if (RT_UNLIKELY(!pSingleTimer))
     320            return VERR_NO_MEMORY;
     321
     322        pTimer->pSingleTimer = pSingleTimer;
     323        pSingleTimer->hHandler.cyh_func  = rtTimerSolCallbackWrapper;
     324        pSingleTimer->hHandler.cyh_arg   = pTimer;
     325        pSingleTimer->hHandler.cyh_level = CY_LOCK_LEVEL;
     326
     327        mutex_enter(&cpu_lock);
     328        if (iCpu != SOL_TIMER_ANY_CPU && !cpu_is_online(cpu[iCpu]))
    201329        {
    202             if (iCpu != VBI_ANY_CPU)
    203                 return VERR_CPU_OFFLINE;
    204             return VERR_INVALID_PARAMETER;
     330            mutex_exit(&cpu_lock);
     331            RTMemFree(pSingleTimer);
     332            pTimer->pSingleTimer = NULL;
     333            return VERR_CPU_OFFLINE;
    205334        }
     335
     336        pSingleTimer->hFireTime.cyt_when = u64First + RTTimeNanoTS();
     337        if (pTimer->interval == 0)
     338        {
     339            /* @todo use gethrtime_max instead of LLONG_MAX? */
     340            AssertCompileSize(pSingleTimer->hFireTime.cyt_interval, sizeof(long long));
     341            pSingleTimer->hFireTime.cyt_interval = LLONG_MAX - pSingleTimer->hFireTime.cyt_when;
     342        }
     343        else
     344            pSingleTimer->hFireTime.cyt_interval = pTimer->interval;
     345
     346        pTimer->hCyclicId = cyclic_add(&pSingleTimer->hHandler, &pSingleTimer->hFireTime);
     347        if (iCpu != SOL_TIMER_ANY_CPU)
     348            cyclic_bind(pTimer->hCyclicId, cpu[iCpu], NULL /* cpupart */);
     349
     350        mutex_exit(&cpu_lock);
    206351    }
    207352
     
    219364
    220365    pTimer->fSuspended = true;
    221     if (pTimer->stimer)
    222     {
    223         vbi_stimer_end(pTimer->stimer);
    224         pTimer->stimer = NULL;
    225     }
    226     else if (pTimer->gtimer)
    227     {
    228         vbi_gtimer_end(pTimer->gtimer);
    229         pTimer->gtimer = NULL;
     366    if (pTimer->pSingleTimer)
     367    {
     368        mutex_enter(&cpu_lock);
     369        cyclic_remove(pTimer->hCyclicId);
     370        mutex_exit(&cpu_lock);
     371        RTMemFree(pTimer->pSingleTimer);
     372    }
     373    else if (pTimer->pOmniTimer)
     374    {
     375        mutex_enter(&cpu_lock);
     376        cyclic_remove(pTimer->hCyclicId);
     377        mutex_exit(&cpu_lock);
     378        RTMemFree(pTimer->pOmniTimer->au64Ticks);
     379        RTMemFree(pTimer->pOmniTimer);
    230380    }
    231381
     
    234384
    235385
    236 
    237386RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
    238387{
     
    247396RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
    248397{
    249     return vbi_timer_granularity();
     398    return nsec_per_tick;
    250399}
    251400
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette