Changeset 40966 in vbox for trunk/src/VBox/Runtime/r0drv
- Timestamp:
- Apr 17, 2012 4:43:28 PM (13 years ago)
- Location:
- trunk/src/VBox/Runtime/r0drv/solaris
- Files:
-
- 1 added
- 1 deleted
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/solaris/RTLogWriteDebugger-r0drv-solaris.c
r29281 r40966 45 45 if (pch[cb] != '\0') 46 46 AssertBreakpoint(); 47 if ( !g_frtSol arisSplSetsEIF47 if ( !g_frtSolSplSetsEIF 48 48 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 49 49 || ASMIntAreEnabled() -
trunk/src/VBox/Runtime/r0drv/solaris/dbg-r0drv-solaris.c
r40855 r40966 94 94 *ppCTF = ctf_modopen(((modctl_t *)*ppMod)->mod_mp, &err); 95 95 mutex_exit(&mod_lock); 96 mod_release_mod(*ppMod); 96 97 97 98 if (*ppCTF) … … 102 103 rc = VERR_INTERNAL_ERROR_3; 103 104 } 104 105 mod_release_mod(*ppMod);106 105 } 107 106 else … … 133 132 134 133 ctf_close(pCTF); 135 mod_release_mod(pMod);136 134 } 137 135 -
trunk/src/VBox/Runtime/r0drv/solaris/initterm-r0drv-solaris.c
r36555 r40966 32 32 #include "internal/iprt.h" 33 33 34 #include <iprt/assert.h> 34 35 #include <iprt/err.h> 35 36 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) … … 42 43 * Global Variables * 43 44 *******************************************************************************/ 45 /** Kernel debug info handle. */ 46 RTDBGKRNLINFO g_hKrnlDbgInfo; 44 47 /** Indicates that the spl routines (and therefore a bunch of other ones too) 45 48 * will set EFLAGS::IF and break code that disables interrupts. */ 46 bool g_frtSolarisSplSetsEIF = false; 47 49 bool g_frtSolSplSetsEIF = false; 48 50 /** timeout_generic address. */ 49 51 PFNSOL_timeout_generic g_pfnrtR0Sol_timeout_generic = NULL; … … 52 54 /** cyclic_reprogram address. */ 53 55 PFNSOL_cyclic_reprogram g_pfnrtR0Sol_cyclic_reprogram = NULL; 54 56 /** Whether to use the kernel page freelist. */ 57 bool g_frtSolUseKflt = false; 58 /** Whether we've completed R0 initialization. */ 59 bool g_frtSolInitDone = false; 60 /** Whether to use old-style xc_call interface. */ 61 bool g_frtSolOldIPI = false; 62 /** Whether to use old-style xc_call interface using one ulong_t as the CPU set 63 * representation. */ 64 bool g_frtSolOldIPIUlong = false; 65 /** The xc_call callout table structure. */ 66 RTR0FNSOLXCCALL g_rtSolXcCall; 67 /** Thread preemption offset. */ 68 size_t g_offrtSolThreadPreempt; 69 /** Host scheduler preemption offset. */ 70 size_t g_offrtSolCpuPreempt; 71 /** Host scheduler force preemption offset. */ 72 size_t g_offrtSolCpuForceKernelPreempt; 73 /* Resolve using dl_lookup (remove if no longer relevant for supported S10 versions) */ 74 extern void contig_free(void *addr, size_t size); 75 #pragma weak contig_free 76 /** contig_free address. */ 77 PFNSOL_contig_free g_pfnrtR0Sol_contig_free = contig_free; 55 78 56 79 DECLHIDDEN(int) rtR0InitNative(void) 57 80 { 58 81 /* 59 * I nitialize vbi (keeping it separate for now)82 * IPRT has not yet been initialized at this point, so use Solaris' native cmn_err() for logging. 60 83 */ 61 int rc = vbi_init();62 if ( !rc)84 int rc = RTR0DbgKrnlInfoOpen(&g_hKrnlDbgInfo, 0 /* fFlags */); 85 if (RT_SUCCESS(rc)) 63 86 { 64 87 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) … … 70 93 int iOld = splr(DISP_LEVEL); 71 94 if (ASMIntAreEnabled()) 72 g_frtSol arisSplSetsEIF = true;95 g_frtSolSplSetsEIF = true; 73 96 splx(iOld); 74 97 if (ASMIntAreEnabled()) 75 g_frtSol arisSplSetsEIF = true;98 g_frtSolSplSetsEIF = true; 76 99 ASMSetFlags(uOldFlags); 77 100 #else … … 80 103 81 104 /* 82 * Dynamically resolve new symbols we want to use. 83 */ 84 g_pfnrtR0Sol_timeout_generic = (PFNSOL_timeout_generic )kobj_getsymvalue("timeout_generic", 1); 85 g_pfnrtR0Sol_untimeout_generic = (PFNSOL_untimeout_generic)kobj_getsymvalue("untimeout_generic", 1); 105 * Mandatory: Preemption offsets. 106 */ 107 rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "cpu_t", "cpu_runrun", &g_offrtSolCpuPreempt); 108 if (RT_FAILURE(rc)) 109 { 110 cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_runrun!\n"); 111 goto errorbail; 112 } 113 114 rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "cpu_t", "cpu_kprunrun", &g_offrtSolCpuForceKernelPreempt); 115 if (RT_FAILURE(rc)) 116 { 117 cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_kprunrun!\n"); 118 goto errorbail; 119 } 120 121 rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, "kthread_t", "t_preempt", &g_offrtSolThreadPreempt); 122 if (RT_FAILURE(rc)) 123 { 124 cmn_err(CE_NOTE, "Failed to find kthread_t::t_preempt!\n"); 125 goto errorbail; 126 } 127 cmn_err(CE_CONT, "!cpu_t::cpu_runrun @ 0x%lx\n", g_offrtSolCpuPreempt); 128 cmn_err(CE_CONT, "!cpu_t::cpu_kprunrun @ 0x%lx\n", g_offrtSolCpuForceKernelPreempt); 129 cmn_err(CE_CONT, "!kthread_t::t_preempt @ 0x%lx\n", g_offrtSolThreadPreempt); 130 131 /* 132 * Mandatory: CPU cross call infrastructure. Refer the-solaris-kernel.h for details. 133 */ 134 rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "xc_init_cpu", NULL /* ppvSymbol */); 135 if (RT_SUCCESS(rc)) 136 { 137 if (ncpus > IPRT_SOL_NCPUS) 138 { 139 cmn_err(CE_NOTE, "rtR0InitNative: CPU count mismatch! ncpus=%d IPRT_SOL_NCPUS=%d\n", ncpus, IPRT_SOL_NCPUS); 140 rc = VERR_NOT_SUPPORTED; 141 goto errorbail; 142 } 143 g_rtSolXcCall.u.pfnSol_xc_call = (void *)xc_call; 144 } 145 else 146 { 147 g_frtSolOldIPI = true; 148 g_rtSolXcCall.u.pfnSol_xc_call_old = (void *)xc_call; 149 if (max_cpuid + 1 == sizeof(ulong_t) * 8) 150 { 151 g_frtSolOldIPIUlong = true; 152 g_rtSolXcCall.u.pfnSol_xc_call_old_ulong = (void *)xc_call; 153 } 154 else if (max_cpuid + 1 != IPRT_SOL_NCPUS) 155 { 156 cmn_err(CE_NOTE, "rtR0InitNative: cpuset_t size mismatch! max_cpuid=%d IPRT_SOL_NCPUS=%d\n", max_cpuid, IPRT_SOL_NCPUS); 157 rc = VERR_NOT_SUPPORTED; 158 goto errorbail; 159 } 160 } 161 162 /* 163 * Optional: Timeout hooks. 164 */ 165 RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "timeout_generic", (void **)&g_pfnrtR0Sol_timeout_generic); 166 RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "untimeout_generic", (void **)&g_pfnrtR0Sol_untimeout_generic); 86 167 if ((g_pfnrtR0Sol_timeout_generic == NULL) != (g_pfnrtR0Sol_untimeout_generic == NULL)) 87 168 { … … 92 173 g_pfnrtR0Sol_untimeout_generic = NULL; 93 174 } 94 95 g_pfnrtR0Sol_cyclic_reprogram = (PFNSOL_cyclic_reprogram )kobj_getsymvalue("cyclic_reprogram", 1); 96 97 175 RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "cyclic_reprogram", (void **)&g_pfnrtR0Sol_cyclic_reprogram); 176 177 /* 178 * Optional: Kernel page freelist (kflt) 179 * 180 * Only applicable to 64-bit Solaris kernels. Use kflt flags to get pages from kernel page freelists 181 * while allocating physical pages, once the userpages are exhausted. snv_161+, see @bugref{5632}. 182 */ 183 rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "kflt_init", NULL /* ppvSymbol */); 184 if (RT_SUCCESS(rc)) 185 { 186 int *pKfltDisable = NULL; 187 rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "kflt_disable", (void **)&pKfltDisable); 188 if (RT_SUCCESS(rc) && pKfltDisable && *pKfltDisable == 0) 189 g_frtSolUseKflt = true; 190 } 191 192 /* 193 * Weak binding failures: contig_free 194 */ 195 if (g_pfnrtR0Sol_contig_free == NULL) 196 { 197 rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "contig_free", (void **)&g_pfnrtR0Sol_contig_free); 198 if (RT_FAILURE(rc)) 199 { 200 cmn_err(CE_NOTE, "rtR0InitNative: failed to find contig_free!\n"); 201 goto errorbail; 202 } 203 } 204 205 g_frtSolInitDone = true; 98 206 return VINF_SUCCESS; 99 207 } 100 cmn_err(CE_NOTE, "vbi_init failed. rc=%d\n", rc); 101 return VERR_GENERAL_FAILURE; 208 else 209 { 210 cmn_err(CE_NOTE, "RTR0DbgKrnlInfoOpen failed. rc=%d\n", rc); 211 return rc; 212 } 213 214 errorbail: 215 RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo); 216 return rc; 102 217 } 103 218 … … 105 220 DECLHIDDEN(void) rtR0TermNative(void) 106 221 { 222 RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo); 223 g_frtSolInitDone = false; 107 224 } 108 225 -
trunk/src/VBox/Runtime/r0drv/solaris/semeventwait-r0drv-solaris.h
r36392 r40966 229 229 PRTR0SEMSOLWAIT pWait = (PRTR0SEMSOLWAIT)pvUser; 230 230 kthread_t *pThread = pWait->pThread; 231 kmutex_t *pMtx = (kmutex_t *)ASMAtomicReadPtr( &pWait->pvMtx);231 kmutex_t *pMtx = (kmutex_t *)ASMAtomicReadPtr((void * volatile *)&pWait->pvMtx); 232 232 if (VALID_PTR(pMtx)) 233 233 { … … 487 487 } 488 488 489 #endif 489 #endif /* ___r0drv_solaris_semeventwait_r0drv_solaris_h */ 490 -
trunk/src/VBox/Runtime/r0drv/solaris/semmutex-r0drv-solaris.c
r36190 r40966 147 147 148 148 /** 149 * Worker for rtSemMutexSol arisRequest that handles the case where we go to sleep.149 * Worker for rtSemMutexSolRequest that handles the case where we go to sleep. 150 150 * 151 151 * @returns VINF_SUCCESS, VERR_INTERRUPTED, or VERR_SEM_DESTROYED. … … 157 157 * @remarks This needs to be called with the mutex object held! 158 158 */ 159 static int rtSemMutexSol arisRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies,159 static int rtSemMutexSolRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies, 160 160 bool fInterruptible) 161 161 { … … 254 254 * Internal worker. 255 255 */ 256 DECLINLINE(int) rtSemMutexSol arisRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fInterruptible)256 DECLINLINE(int) rtSemMutexSolRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fInterruptible) 257 257 { 258 258 PRTSEMMUTEXINTERNAL pThis = hMutexSem; … … 296 296 */ 297 297 else 298 rc = rtSemMutexSol arisRequestSleep(pThis, cMillies, fInterruptible);298 rc = rtSemMutexSolRequestSleep(pThis, cMillies, fInterruptible); 299 299 300 300 mutex_exit(&pThis->Mtx); … … 305 305 RTDECL(int) RTSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies) 306 306 { 307 return rtSemMutexSol arisRequest(hMutexSem, cMillies, false /*fInterruptible*/);307 return rtSemMutexSolRequest(hMutexSem, cMillies, false /*fInterruptible*/); 308 308 } 309 309 … … 317 317 RTDECL(int) RTSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies) 318 318 { 319 return rtSemMutexSol arisRequest(hMutexSem, cMillies, true /*fInterruptible*/);319 return rtSemMutexSolRequest(hMutexSem, cMillies, true /*fInterruptible*/); 320 320 } 321 321 -
trunk/src/VBox/Runtime/r0drv/solaris/the-solaris-kernel.h
r40695 r40966 57 57 #include <sys/ctf_api.h> 58 58 #include <sys/modctl.h> 59 #include "vbi.h"60 59 61 60 #undef u /* /usr/include/sys/user.h:249:1 is where this is defined to (curproc->p_user). very cool. */ … … 63 62 #include <iprt/cdefs.h> 64 63 #include <iprt/types.h> 64 #include <iprt/dbg.h> 65 65 66 66 RT_C_DECLS_BEGIN 67 67 68 /* IPRT functions. */ 69 DECLHIDDEN(void *) rtR0SolMemAlloc(uint64_t cbPhysHi, uint64_t *puPhys, size_t cb, uint64_t cbAlign, bool fContig); 70 DECLHIDDEN(void) rtR0SolMemFree(void *pv, size_t cb); 71 72 73 /* Solaris functions. */ 68 74 typedef callout_id_t (*PFNSOL_timeout_generic)(int type, void (*func)(void *), 69 75 void *arg, hrtime_t expiration, 70 76 hrtime_t resultion, int flags); 71 typedef hrtime_t (*PFNSOL_untimeout_generic)(callout_id_t id, int nowait);72 typedef int (*PFNSOL_cyclic_reprogram)(cyclic_id_t id, hrtime_t expiration);73 77 typedef hrtime_t (*PFNSOL_untimeout_generic)(callout_id_t id, int nowait); 78 typedef int (*PFNSOL_cyclic_reprogram)(cyclic_id_t id, hrtime_t expiration); 79 typedef void (*PFNSOL_contig_free)(void *addr, size_t size); 74 80 75 81 /* IPRT globals. */ 76 extern bool g_frtSol arisSplSetsEIF;82 extern bool g_frtSolSplSetsEIF; 77 83 extern struct ddi_dma_attr g_SolarisX86PhysMemLimits; 78 extern RTCPUSET g_rtMpSol arisCpuSet;84 extern RTCPUSET g_rtMpSolCpuSet; 79 85 extern PFNSOL_timeout_generic g_pfnrtR0Sol_timeout_generic; 80 86 extern PFNSOL_untimeout_generic g_pfnrtR0Sol_untimeout_generic; 81 87 extern PFNSOL_cyclic_reprogram g_pfnrtR0Sol_cyclic_reprogram; 88 extern PFNSOL_contig_free g_pfnrtR0Sol_contig_free; 89 extern bool g_frtSolUseKflt; 90 extern size_t g_offrtSolThreadPreempt; 91 extern size_t g_offrtSolCpuPreempt; 92 extern size_t g_offrtSolCpuForceKernelPreempt; 93 extern bool g_frtSolInitDone; 94 extern RTDBGKRNLINFO g_hKrnlDbgInfo; 95 96 /* 97 * Workarounds for running on old versions of solaris with different cross call 98 * interfaces. If we find xc_init_cpu() in the kernel, then just use the 99 * defined interfaces for xc_call() from the include file where the xc_call() 100 * interfaces just takes a pointer to a ulong_t array. The array must be long 101 * enough to hold "ncpus" bits at runtime. 102 103 * The reason for the hacks is that using the type "cpuset_t" is pretty much 104 * impossible from code built outside the Solaris source repository that wants 105 * to run on multiple releases of Solaris. 106 * 107 * For old style xc_call()s, 32 bit solaris and older 64 bit versions use 108 * "ulong_t" as cpuset_t. 109 * 110 * Later versions of 64 bit Solaris used: struct {ulong_t words[x];} 111 * where "x" depends on NCPU. 112 * 113 * We detect the difference in 64 bit support by checking the kernel value of 114 * max_cpuid, which always holds the compiled value of NCPU - 1. 115 * 116 * If Solaris increases NCPU to more than 256, VBox will continue to work on 117 * all versions of Solaris as long as the number of installed CPUs in the 118 * machine is <= IPRT_SOLARIS_NCPUS. If IPRT_SOLARIS_NCPUS is increased, this 119 * code has to be re-written some to provide compatibility with older Solaris 120 * which expects cpuset_t to be based on NCPU==256 -- or we discontinue 121 * support of old Nevada/S10. 122 */ 123 #define IPRT_SOL_NCPUS 256 124 #define IPRT_SOL_SET_WORDS (IPRT_SOL_NCPUS / (sizeof(ulong_t) * 8)) 125 #define IPRT_SOL_X_CALL_HIPRI (2) /* for Old Solaris interface */ 126 typedef struct RTSOLCPUSET 127 { 128 ulong_t auCpus[IPRT_SOL_SET_WORDS]; 129 } RTSOLCPUSET; 130 typedef RTSOLCPUSET *PRTSOLCPUSET; 131 132 /* Avoid warnings even if it means more typing... */ 133 typedef struct RTR0FNSOLXCCALL 134 { 135 union 136 { 137 void *(*pfnSol_xc_call) (xc_arg_t, xc_arg_t, xc_arg_t, ulong_t *, xc_func_t); 138 void *(*pfnSol_xc_call_old) (xc_arg_t, xc_arg_t, xc_arg_t, int, RTSOLCPUSET, xc_func_t); 139 void *(*pfnSol_xc_call_old_ulong)(xc_arg_t, xc_arg_t, xc_arg_t, int, ulong_t, xc_func_t); 140 } u; 141 } RTR0FNSOLXCCALL; 142 typedef RTR0FNSOLXCCALL *PRTR0FNSOLXCCALL; 143 144 extern RTR0FNSOLXCCALL g_rtSolXcCall; 145 extern bool g_frtSolOldIPI; 146 extern bool g_frtSolOldIPIUlong; 147 82 148 83 149 /* Solaris globals. */ -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/RTMpPokeCpu-r0drv-solaris.c
r29300 r40966 44 44 { 45 45 RT_ASSERT_INTS_ON(); 46 vbi_poke_cpu(idCpu); 46 if (idCpu < ncpus) 47 poke_cpu(idCpu); 47 48 return VINF_SUCCESS; 48 49 } -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/alloc-r0drv-solaris.c
r36555 r40966 41 41 42 42 43 /******************************************************************************* 44 * Structures and Typedefs * 45 *******************************************************************************/ 46 static ddi_dma_attr_t s_rtR0SolDmaAttr = 47 { 48 DMA_ATTR_V0, /* Version Number */ 49 (uint64_t)0, /* Lower limit */ 50 (uint64_t)0, /* High limit */ 51 (uint64_t)0xffffffff, /* Counter limit */ 52 (uint64_t)PAGESIZE, /* Alignment */ 53 (uint64_t)PAGESIZE, /* Burst size */ 54 (uint64_t)PAGESIZE, /* Effective DMA size */ 55 (uint64_t)0xffffffff, /* Max DMA xfer size */ 56 (uint64_t)0xffffffff, /* Segment boundary */ 57 1, /* Scatter-gather list length (1 for contiguous) */ 58 1, /* Device granularity */ 59 0 /* Bus-specific flags */ 60 }; 61 62 extern void *contig_alloc(size_t cb, ddi_dma_attr_t *pDmaAttr, size_t uAlign, int fCanSleep); 63 43 64 44 65 /** … … 55 76 AssertReturn(!(fFlags & RTMEMHDR_FLAG_ANY_CTX), NULL); 56 77 cbAllocated = RT_ALIGN_Z(cb + sizeof(*pHdr), PAGE_SIZE) - sizeof(*pHdr); 57 pHdr = (PRTMEMHDR) vbi_text_alloc(cbAllocated + sizeof(*pHdr));78 pHdr = (PRTMEMHDR)segkmem_alloc(heaptext_arena, cbAllocated + sizeof(*pHdr), KM_SLEEP); 58 79 } 59 80 else … … 90 111 #ifdef RT_ARCH_AMD64 91 112 if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC) 92 vbi_text_free(pHdr, pHdr->cb + sizeof(*pHdr));113 segkmem_free(heaptext_arena, pHdr, pHdr->cb + sizeof(*pHdr)); 93 114 else 94 115 #endif … … 97 118 98 119 120 /** 121 * Allocates physical memory which satisfy the given constraints. 122 * 123 * @param uPhysHi The upper physical address limit (inclusive). 124 * @param puPhys Where to store the physical address of the allocated 125 * memory. Optional, can be NULL. 126 * @param cb Size of allocation. 127 * @param uAlignment Alignment. 128 * @param fContig Whether the memory must be physically contiguous or 129 * not. 130 * 131 * @returns Virtual address of allocated memory block or NULL if allocation 132 * failed. 133 */ 134 DECLHIDDEN(void *) rtR0SolMemAlloc(uint64_t uPhysHi, uint64_t *puPhys, size_t cb, uint64_t uAlignment, bool fContig) 135 { 136 if ((cb & PAGEOFFSET) != 0) 137 return NULL; 138 139 size_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT; 140 if (!cPages) 141 return NULL; 142 143 ddi_dma_attr_t DmaAttr = s_rtR0SolDmaAttr; 144 DmaAttr.dma_attr_addr_hi = uPhysHi; 145 DmaAttr.dma_attr_align = uAlignment; 146 if (!fContig) 147 DmaAttr.dma_attr_sgllen = cPages > INT_MAX ? INT_MAX - 1 : cPages; 148 else 149 AssertRelease(DmaAttr.dma_attr_sgllen == 1); 150 151 void *pvMem = contig_alloc(cb, &DmaAttr, PAGESIZE, 1 /* can sleep */); 152 if (!pvMem) 153 { 154 LogRel(("rtR0SolMemAlloc failed. cb=%u Align=%u fContig=%d\n", (unsigned)cb, (unsigned)uAlignment, fContig)); 155 return NULL; 156 } 157 158 pfn_t PageFrameNum = hat_getpfnum(kas.a_hat, (caddr_t)pvMem); 159 AssertRelease(PageFrameNum != PFN_INVALID); 160 if (puPhys) 161 *puPhys = (uint64_t)PageFrameNum << PAGESHIFT; 162 163 return pvMem; 164 } 165 166 167 /** 168 * Frees memory allocated using rtR0SolMemAlloc(). 169 * 170 * @param pv The memory to free. 171 * @param cb Size of the memory block 172 */ 173 DECLHIDDEN(void) rtR0SolMemFree(void *pv, size_t cb) 174 { 175 if (RT_LIKELY(pv)) 176 g_pfnrtR0Sol_contig_free(pv, cb); 177 } 178 179 99 180 RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb) 100 181 { … … 104 185 105 186 /* Allocate physically contiguous (< 4GB) page-aligned memory. */ 106 uint64_t physAddr = _4G -1;107 caddr_t virtAddr = vbi_contig_alloc(&physAddr, cb);108 if ( virtAddr == NULL)109 { 110 LogRel((" vbi_contig_alloc failed to allocate %u bytes\n", cb));111 return NULL; 112 } 113 114 Assert( physAddr< _4G);115 *pPhys = physAddr;116 return virtAddr;187 uint64_t uPhys; 188 void *pvMem = rtR0SolMemAlloc((uint64_t)_4G - 1, &uPhys, cb, PAGESIZE, true); 189 if (RT_UNLIKELY(!pvMem)) 190 { 191 LogRel(("RTMemContAlloc failed to allocate %u bytes\n", cb)); 192 return NULL; 193 } 194 195 Assert(uPhys < _4G); 196 *pPhys = uPhys; 197 return pvMem; 117 198 } 118 199 … … 121 202 { 122 203 RT_ASSERT_PREEMPTIBLE(); 123 if (pv) 124 vbi_contig_free(pv, cb); 125 } 126 204 rtR0SolMemFree(pv, cb); 205 } 206 -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c
r37281 r40966 41 41 #include <iprt/process.h> 42 42 #include "internal/memobj.h" 43 #include "memobj-r0drv-solaris.h" 44 45 #define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase) 46 static vnode_t s_PageVnode; 43 47 44 48 /******************************************************************************* … … 48 52 * The Solaris version of the memory object structure. 49 53 */ 50 typedef struct RTR0MEMOBJSOL ARIS54 typedef struct RTR0MEMOBJSOL 51 55 { 52 56 /** The core structure. */ … … 61 65 * allocation. */ 62 66 bool fLargePage; 63 } RTR0MEMOBJSOLARIS, *PRTR0MEMOBJSOLARIS; 64 67 } RTR0MEMOBJSOL, *PRTR0MEMOBJSOL; 68 69 70 /** 71 * Returns the physical address for a virtual address. 72 * 73 * @param pv The virtual address. 74 * 75 * @returns The physical address corresponding to @a pv. 76 */ 77 static uint64_t rtR0MemObjSolVirtToPhys(void *pv) 78 { 79 struct hat *pHat = NULL; 80 pfn_t PageFrameNum = 0; 81 uintptr_t uVirtAddr = (uintptr_t)pv; 82 83 if (SOL_IS_KRNL_ADDR(pv)) 84 pHat = kas.a_hat; 85 else 86 { 87 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf(); 88 AssertRelease(pProcess); 89 pHat = pProcess->p_as->a_hat; 90 } 91 92 PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK)); 93 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv)); 94 return (((uint64_t)PageFrameNum << PAGESHIFT) | (uVirtAddr & PAGEOFFSET)); 95 } 96 97 98 /** 99 * Returns the physical address of a page from an array of pages. 100 * 101 * @param ppPages The array of pages. 102 * @param iPage Index of the page in the array to get the physical 103 * address. 104 * 105 * @returns Physical address of specific page within the list of pages specified 106 * in @a ppPages. 107 */ 108 static inline uint64_t rtR0MemObjSolPageToPhys(page_t **ppPages, size_t iPage) 109 { 110 pfn_t PageFrameNum = page_pptonum(ppPages[iPage]); 111 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPageToPhys failed. ppPages=%p iPage=%u\n", ppPages, iPage)); 112 return (uint64_t)PageFrameNum << PAGESHIFT; 113 } 114 115 116 /** 117 * Retreives a free page from the kernel freelist. 118 * 119 * @param virtAddr The virtual address to which this page maybe mapped in 120 * the future. 121 * @param cbPage The size of the page. 122 * 123 * @returns Pointer to the allocated page, NULL on failure. 124 */ 125 static page_t *rtR0MemObjSolPageFromFreelist(caddr_t virtAddr, size_t cbPage) 126 { 127 seg_t KernelSeg; 128 KernelSeg.s_as = &kas; 129 page_t *pPage = page_get_freelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr, 130 cbPage, 0 /* flags */, NULL /* NUMA group */); 131 if ( !pPage 132 && g_frtSolUseKflt) 133 { 134 pPage = page_get_freelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr, 135 cbPage, 0x200 /* PG_KFLT */, NULL /* NUMA group */); 136 } 137 return pPage; 138 } 139 140 141 /** 142 * Retrieves a free page from the kernel cachelist. 143 * 144 * @param virtAddr The virtual address to which this page maybe mapped in 145 * the future. 146 * @param cbPage The size of the page. 147 * 148 * @return Pointer to the allocated page, NULL on failure. 149 */ 150 static page_t *rtR0MemObjSolPageFromCachelist(caddr_t virtAddr, size_t cbPage) 151 { 152 seg_t KernelSeg; 153 KernelSeg.s_as = &kas; 154 page_t *pPage = page_get_cachelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr, 155 0 /* flags */, NULL /* NUMA group */); 156 if ( !pPage 157 && g_frtSolUseKflt) 158 { 159 pPage = page_get_cachelist(&s_PageVnode, 0 /* offset */, &KernelSeg, virtAddr, 160 0x200 /* PG_KFLT */, NULL /* NUMA group */); 161 } 162 163 /* 164 * Remove association with the vnode for pages from the cachelist. 165 */ 166 if (!PP_ISAGED(pPage)) 167 page_hashout(pPage, NULL /* mutex */); 168 169 return pPage; 170 } 171 172 173 /** 174 * Allocates physical non-contiguous memory. 175 * 176 * @param uPhysHi The upper physical address limit (inclusive). 177 * @param puPhys Where to store the physical address of first page. Optional, 178 * can be NULL. 179 * @param cb The size of the allocation. 180 * 181 * @return Array of allocated pages, NULL on failure. 182 */ 183 static page_t **rtR0MemObjSolPagesAlloc(uint64_t uPhysHi, uint64_t *puPhys, size_t cb) 184 { 185 /** @todo We need to satisfy the upper physical address constraint */ 186 187 /* 188 * The page freelist and cachelist both hold pages that are not mapped into any address space. 189 * The cachelist is not really free pages but when memory is exhausted they'll be moved to the 190 * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat. 191 * 192 * Reserve available memory for pages and create the pages. 193 */ 194 pgcnt_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT; 195 int rc = page_resv(cPages, KM_NOSLEEP); 196 if (rc) 197 { 198 rc = page_create_wait(cPages, 0 /* flags */); 199 if (rc) 200 { 201 size_t cbPages = cPages * sizeof(page_t *); 202 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP); 203 if (RT_LIKELY(ppPages)) 204 { 205 /* 206 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately 207 * we don't yet have the 'virtAddr' to which this memory may be mapped. 208 */ 209 caddr_t virtAddr = NULL; 210 for (size_t i = 0; i < cPages; i++, virtAddr += PAGESIZE) 211 { 212 /* 213 * Get a page from the freelist or cachelist. 214 */ 215 page_t *pPage = rtR0MemObjSolPageFromFreelist(virtAddr, PAGESIZE); 216 if (!pPage) 217 pPage = rtR0MemObjSolPageFromCachelist(virtAddr, PAGESIZE); 218 if (RT_UNLIKELY(!pPage)) 219 { 220 /* 221 * No more pages found, release was grabbed so far. 222 */ 223 page_create_putback(cPages - i); 224 while (--i >= 0) 225 page_free(ppPages[i], 0 /* don't need page, move to tail of pagelist */); 226 kmem_free(ppPages, cbPages); 227 page_unresv(cPages); 228 return NULL; 229 } 230 231 PP_CLRFREE(pPage); /* Page is no longer free */ 232 PP_CLRAGED(pPage); /* Page is not hashed in */ 233 ppPages[i] = pPage; 234 } 235 236 /* 237 * We now have the pages locked exclusively, before they are mapped in 238 * we must downgrade the lock. 239 */ 240 if (puPhys) 241 *puPhys = (uint64_t)page_pptonum(ppPages[0]) << PAGESHIFT; 242 return ppPages; 243 } 244 245 page_create_putback(cPages); 246 } 247 248 page_unresv(cPages); 249 } 250 251 return NULL; 252 } 253 254 255 /** 256 * Prepares pages allocated by rtR0MemObjSolPagesAlloc for mapping. 257 * 258 * @param ppPages Pointer to the page list. 259 * @param cb Size of the allocation. 260 * @param auPhys Where to store the physical address of the premapped 261 * pages. 262 * @param cPages The number of pages (entries) in @a auPhys. 263 * 264 * @returns IPRT status code. 265 */ 266 static int rtR0MemObjSolPagesPreMap(page_t **ppPages, size_t cb, uint64_t auPhys[], size_t cPages) 267 { 268 AssertPtrReturn(ppPages, VERR_INVALID_PARAMETER); 269 AssertPtrReturn(auPhys, VERR_INVALID_PARAMETER); 270 271 for (size_t iPage = 0; iPage < cPages; iPage++) 272 { 273 /* 274 * Prepare pages for mapping into kernel/user-space. Downgrade the 275 * exclusive page lock to a shared lock if necessary. 276 */ 277 if (page_tryupgrade(ppPages[iPage]) == 1) 278 page_downgrade(ppPages[iPage]); 279 280 auPhys[iPage] = rtR0MemObjSolPageToPhys(ppPages, iPage); 281 } 282 283 return VINF_SUCCESS; 284 } 285 286 287 /** 288 * Frees pages allocated by rtR0MemObjSolPagesAlloc. 289 * 290 * @param ppPages Pointer to the page list. 291 * @param cbPages Size of the allocation. 292 */ 293 static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb) 294 { 295 size_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT; 296 size_t cbPages = cPages * sizeof(page_t *); 297 for (size_t iPage = 0; iPage < cPages; iPage++) 298 { 299 /* 300 * We need to exclusive lock the pages before freeing them. 301 */ 302 int rc = page_tryupgrade(ppPages[iPage]); 303 if (!rc) 304 { 305 page_unlock(ppPages[iPage]); 306 while (!page_lock(ppPages[iPage], SE_EXCL, NULL /* mutex */, P_RECLAIM)) 307 { 308 /* nothing */; 309 } 310 } 311 page_free(ppPages[iPage], 0 /* don't need page, move to tail of pagelist */); 312 } 313 kmem_free(ppPages, cbPages); 314 page_unresv(cPages); 315 } 316 317 318 /** 319 * Allocates a large page to cover the required allocation size. 320 * 321 * @param puPhys Where to store the physical address of the allocated 322 * page. Optional, can be NULL. 323 * @param cb Size of the allocation. 324 * 325 * @returns Pointer to the allocated large page, NULL on failure. 326 */ 327 static page_t *rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cb) 328 { 329 /* 330 * Reserve available memory and create the sub-pages. 331 */ 332 const pgcnt_t cPages = cb >> PAGESHIFT; 333 int rc = page_resv(cPages, KM_NOSLEEP); 334 if (rc) 335 { 336 rc = page_create_wait(cPages, 0 /* flags */); 337 if (rc) 338 { 339 /* 340 * Get a page off the free list. We set virtAddr to 0 since we don't know where 341 * the memory is going to be mapped. 342 */ 343 seg_t KernelSeg; 344 caddr_t virtAddr = NULL; 345 KernelSeg.s_as = &kas; 346 page_t *pRootPage = rtR0MemObjSolPageFromFreelist(virtAddr, cb); 347 if (pRootPage) 348 { 349 AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx cPages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages)); 350 351 /* 352 * Mark all the sub-pages as non-free and not-hashed-in. 353 * It is paramount that we destroy the list (before freeing it). 354 */ 355 page_t *pPageList = pRootPage; 356 for (size_t iPage = 0; iPage < cPages; iPage++) 357 { 358 page_t *pPage = pPageList; 359 AssertPtr(pPage); 360 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage), 361 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage))); 362 page_sub(&pPageList, pPage); 363 364 /* 365 * Ensure page is now be free and the page size-code must match that of the root page. 366 */ 367 AssertMsg(PP_ISFREE(pPage), ("%p\n", pPage)); 368 AssertMsg(pPage->p_szc == pRootPage->p_szc, ("%p - %d expected %d \n", pPage, pPage->p_szc, pRootPage->p_szc)); 369 370 PP_CLRFREE(pPage); /* Page no longer free */ 371 PP_CLRAGED(pPage); /* Page no longer hashed-in */ 372 } 373 374 uint64_t uPhys = (uint64_t)page_pptonum(pRootPage) << PAGESHIFT; 375 AssertMsg(!(uPhys & (cb - 1)), ("%llx %zx\n", uPhys, cb)); 376 if (puPhys) 377 *puPhys = uPhys; 378 379 return pRootPage; 380 } 381 382 page_create_putback(cPages); 383 } 384 385 page_unresv(cPages); 386 } 387 388 return NULL; 389 } 390 391 /** 392 * Prepares the large page allocated by rtR0MemObjSolLargePageAlloc to be mapped. 393 * 394 * @param pRootPage Pointer to the root page. 395 * @param cb Size of the allocation. 396 * 397 * @returns IPRT status code. 398 */ 399 static int rtR0MemObjSolLargePagePreMap(page_t *pRootPage, size_t cb) 400 { 401 const pgcnt_t cPages = cb >> PAGESHIFT; 402 403 Assert(page_get_pagecnt(pRootPage->p_szc) == cPages); 404 AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx npages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages)); 405 406 /* 407 * We need to downgrade the sub-pages from exclusive to shared locking 408 * because otherweise we cannot <you go figure>. 409 */ 410 for (pgcnt_t iPage = 0; iPage < cPages; iPage++) 411 { 412 page_t *pPage = page_nextn(pRootPage, iPage); 413 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage), 414 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage))); 415 AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage)); 416 417 if (page_tryupgrade(pPage) == 1) 418 page_downgrade(pPage); 419 AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage)); 420 } 421 422 return VINF_SUCCESS; 423 } 424 425 426 /** 427 * Frees the page allocated by rtR0MemObjSolLargePageAlloc. 428 * 429 * @param pRootPage Pointer to the root page. 430 * @param cb Allocated size. 431 */ 432 static void rtR0MemObjSolLargePageFree(page_t *pRootPage, size_t cb) 433 { 434 pgcnt_t cPages = cb >> PAGESHIFT; 435 436 Assert(page_get_pagecnt(pRootPage->p_szc) == cPages); 437 AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx cPages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages)); 438 439 /* 440 * We need to exclusively lock the sub-pages before freeing the large one. 441 */ 442 for (pgcnt_t iPage = 0; iPage < cPages; iPage++) 443 { 444 page_t *pPage = page_nextn(pRootPage, iPage); 445 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage), 446 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage))); 447 AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage)); 448 449 int rc = page_tryupgrade(pPage); 450 if (!rc) 451 { 452 page_unlock(pPage); 453 while (!page_lock(pPage, SE_EXCL, NULL /* mutex */, P_RECLAIM)) 454 { 455 /* nothing */; 456 } 457 } 458 } 459 460 /* 461 * Free the large page and unreserve the memory. 462 */ 463 page_free_pages(pRootPage); 464 page_unresv(cPages); 465 466 } 467 468 469 /** 470 * Unmaps kernel/user-space mapped memory. 471 * 472 * @param pv Pointer to the mapped memory block. 473 * @param cb Size of the memory block. 474 */ 475 static void rtR0MemObjSolUnmap(void *pv, size_t cb) 476 { 477 if (SOL_IS_KRNL_ADDR(pv)) 478 { 479 hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK); 480 vmem_free(heap_arena, pv, cb); 481 } 482 else 483 { 484 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as; 485 AssertPtr(pAddrSpace); 486 as_rangelock(pAddrSpace); 487 as_unmap(pAddrSpace, pv, cb); 488 as_rangeunlock(pAddrSpace); 489 } 490 } 491 492 /** 493 * Lock down memory mappings for a virtual address. 494 * 495 * @param pv Pointer to the memory to lock down. 496 * @param cb Size of the memory block. 497 * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC) 498 * 499 * @returns IPRT status code. 500 */ 501 static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess) 502 { 503 /* 504 * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory. 505 */ 506 if (!SOL_IS_KRNL_ADDR(pv)) 507 { 508 proc_t *pProc = (proc_t *)RTR0ProcHandleSelf(); 509 AssertPtr(pProc); 510 faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess); 511 if (rc) 512 { 513 LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc)); 514 return VERR_LOCK_FAILED; 515 } 516 } 517 return VINF_SUCCESS; 518 } 519 520 521 /** 522 * Unlock memory mappings for a virtual address. 523 * 524 * @param pv Pointer to the locked memory. 525 * @param cb Size of the memory block. 526 * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC). 527 */ 528 static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess) 529 { 530 if (!SOL_IS_KRNL_ADDR(pv)) 531 { 532 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf(); 533 AssertPtr(pProcess); 534 as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess); 535 } 536 } 537 538 539 /** 540 * Maps a list of physical pages into user address space. 541 * 542 * @param pVirtAddr Where to store the virtual address of the mapping. 543 * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE, 544 * PROT_EXEC) 545 * @param paPhysAddrs Array of physical addresses to pages. 546 * @param cb Size of memory being mapped. 547 * 548 * @returns IPRT status code. 549 */ 550 static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb) 551 { 552 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as; 553 int rc = VERR_INTERNAL_ERROR; 554 SEGVBOX_CRARGS Args; 555 556 Args.paPhysAddrs = paPhysAddrs; 557 Args.fPageAccess = fPageAccess; 558 559 as_rangelock(pAddrSpace); 560 map_addr(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED); 561 if (*pVirtAddr != NULL) 562 rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args); 563 else 564 rc = ENOMEM; 565 as_rangeunlock(pAddrSpace); 566 567 return RTErrConvertFromErrno(rc); 568 } 65 569 66 570 67 571 DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem) 68 572 { 69 PRTR0MEMOBJSOL ARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)pMem;573 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem; 70 574 71 575 switch (pMemSolaris->Core.enmType) 72 576 { 73 577 case RTR0MEMOBJTYPE_LOW: 74 vbi_lowmem_free(pMemSolaris->Core.pv, pMemSolaris->Core.cb);578 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb); 75 579 break; 76 580 77 581 case RTR0MEMOBJTYPE_PHYS: 78 if (!pMemSolaris->Core.u.Phys.fAllocated) 79 { /* nothing to do here */; } 80 else if (pMemSolaris->fLargePage) 81 vbi_large_page_free(pMemSolaris->pvHandle, pMemSolaris->Core.cb); 82 else 83 vbi_phys_free(pMemSolaris->Core.pv, pMemSolaris->Core.cb); 582 if (pMemSolaris->Core.u.Phys.fAllocated) 583 { 584 if (pMemSolaris->fLargePage) 585 rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb); 586 else 587 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb); 588 } 84 589 break; 85 590 86 591 case RTR0MEMOBJTYPE_PHYS_NC: 87 vbi_pages_free(pMemSolaris->pvHandle, pMemSolaris->Core.cb);592 rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb); 88 593 break; 89 594 … … 93 598 94 599 case RTR0MEMOBJTYPE_LOCK: 95 vbi_unlock_va(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess, pMemSolaris->pvHandle);600 rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess); 96 601 break; 97 602 98 603 case RTR0MEMOBJTYPE_MAPPING: 99 vbi_unmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);604 rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb); 100 605 break; 101 606 … … 122 627 { 123 628 /* Create the object. */ 124 PRTR0MEMOBJSOL ARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);125 if ( !pMemSolaris)629 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb); 630 if (RT_UNLIKELY(!pMemSolaris)) 126 631 return VERR_NO_MEMORY; 127 632 128 void * virtAddr= ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);129 if ( !virtAddr)633 void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie); 634 if (RT_UNLIKELY(!pvMem)) 130 635 { 131 636 rtR0MemObjDelete(&pMemSolaris->Core); … … 133 638 } 134 639 135 pMemSolaris->Core.pv = virtAddr;640 pMemSolaris->Core.pv = pvMem; 136 641 pMemSolaris->pvHandle = NULL; 137 642 *ppMem = &pMemSolaris->Core; … … 145 650 146 651 /* Create the object */ 147 PRTR0MEMOBJSOL ARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);652 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb); 148 653 if (!pMemSolaris) 149 654 return VERR_NO_MEMORY; 150 655 151 656 /* Allocate physically low page-aligned memory. */ 152 uint64_t physAddr= _4G - 1;153 caddr_t virtAddr = vbi_lowmem_alloc(physAddr, cb);154 if ( virtAddr == NULL)657 uint64_t uPhysHi = _4G - 1; 658 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGESIZE, false /* fContig */); 659 if (RT_UNLIKELY(!pvMem)) 155 660 { 156 661 rtR0MemObjDelete(&pMemSolaris->Core); 157 662 return VERR_NO_LOW_MEMORY; 158 663 } 159 pMemSolaris->Core.pv = virtAddr;664 pMemSolaris->Core.pv = pvMem; 160 665 pMemSolaris->pvHandle = NULL; 161 666 *ppMem = &pMemSolaris->Core; … … 174 679 { 175 680 #if HC_ARCH_BITS == 64 176 PRTR0MEMOBJSOL ARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);177 if ( !pMemSolaris)681 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb); 682 if (RT_UNLIKELY(!pMemSolaris)) 178 683 return VERR_NO_MEMORY; 179 684 180 uint64_t PhysAddr = PhysHighest;181 void *pvPages = vbi_pages_alloc(&PhysAddr, cb);685 uint64_t PhysAddr = UINT64_MAX; 686 void *pvPages = rtR0MemObjSolPagesAlloc((uint64_t)PhysHighest, &PhysAddr, cb); 182 687 if (!pvPages) 183 688 { 184 LogRel(("rtR0MemObjNativeAllocPhysNC: vbi_pages_alloc failed.\n"));689 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb)); 185 690 rtR0MemObjDelete(&pMemSolaris->Core); 186 691 return VERR_NO_MEMORY; … … 189 694 pMemSolaris->pvHandle = pvPages; 190 695 696 Assert(PhysAddr != UINT64_MAX); 191 697 Assert(!(PhysAddr & PAGE_OFFSET_MASK)); 192 698 *ppMem = &pMemSolaris->Core; … … 203 709 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED); 204 710 205 PRTR0MEMOBJSOL ARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);206 if ( !pMemSolaris)711 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb); 712 if (RT_UNLIKELY(!pMemSolaris)) 207 713 return VERR_NO_MEMORY; 208 714 … … 228 734 * Allocate one large page. 229 735 */ 230 void *pvPages = vbi_large_page_alloc(&PhysAddr, cb); 231 if (pvPages) 736 cmn_err(CE_NOTE, "calling rtR0MemObjSolLargePageAlloc\n"); 737 void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb); 738 if (RT_LIKELY(pvPages)) 232 739 { 233 740 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr)); … … 247 754 * Allocate physically contiguous memory aligned as specified. 248 755 */ 756 cmn_err(CE_NOTE, "rtR0MemObjNativeAllocPhys->rtR0SolMemAlloc\n"); 249 757 AssertCompile(NIL_RTHCPHYS == UINT64_MAX); 250 758 PhysAddr = PhysHighest; 251 caddr_t pvMem = vbi_phys_alloc(&PhysAddr, cb, uAlignment, 1 /* contiguous*/);759 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */); 252 760 if (RT_LIKELY(pvMem)) 253 761 { … … 276 784 277 785 /* Create the object. */ 278 PRTR0MEMOBJSOL ARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);786 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb); 279 787 if (!pMemSolaris) 280 788 return VERR_NO_MEMORY; … … 296 804 297 805 /* Create the locking object */ 298 PRTR0MEMOBJSOL ARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);806 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb); 299 807 if (!pMemSolaris) 300 808 return VERR_NO_MEMORY; … … 306 814 if (fAccess & RTMEM_PROT_EXEC) 307 815 fPageAccess = S_EXEC; 308 void *pvPageList = NULL; 309 int rc = vbi_lock_va((caddr_t)R3Ptr, cb, fPageAccess, &pvPageList); 310 if (rc != 0) 311 { 312 LogRel(("rtR0MemObjNativeLockUser: vbi_lock_va failed rc=%d\n", rc)); 816 int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess); 817 if (RT_FAILURE(rc)) 818 { 819 LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc)); 313 820 rtR0MemObjDelete(&pMemSolaris->Core); 314 return VERR_LOCK_FAILED;821 return rc; 315 822 } 316 823 317 824 /* Fill in the object attributes and return successfully. */ 318 825 pMemSolaris->Core.u.Lock.R0Process = R0Process; 319 pMemSolaris->pvHandle = pvPageList;826 pMemSolaris->pvHandle = NULL; 320 827 pMemSolaris->fAccess = fPageAccess; 321 828 *ppMem = &pMemSolaris->Core; … … 328 835 NOREF(fAccess); 329 836 330 PRTR0MEMOBJSOL ARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);837 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb); 331 838 if (!pMemSolaris) 332 839 return VERR_NO_MEMORY; … … 338 845 if (fAccess & RTMEM_PROT_EXEC) 339 846 fPageAccess = S_EXEC; 340 void *pvPageList = NULL; 341 int rc = vbi_lock_va((caddr_t)pv, cb, fPageAccess, &pvPageList); 342 if (rc != 0) 343 { 344 LogRel(("rtR0MemObjNativeLockKernel: vbi_lock_va failed rc=%d\n", rc)); 847 int rc = rtR0MemObjSolLock(pv, cb, fPageAccess); 848 if (RT_FAILURE(rc)) 849 { 850 LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc)); 345 851 rtR0MemObjDelete(&pMemSolaris->Core); 346 return VERR_LOCK_FAILED;852 return rc; 347 853 } 348 854 349 855 /* Fill in the object attributes and return successfully. */ 350 856 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS; 351 pMemSolaris->pvHandle = pvPageList;352 pMemSolaris->fAccess = fPageAccess;857 pMemSolaris->pvHandle = NULL; 858 pMemSolaris->fAccess = fPageAccess; 353 859 *ppMem = &pMemSolaris->Core; 354 860 return VINF_SUCCESS; … … 358 864 DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment) 359 865 { 360 PRTR0MEMOBJSOL ARISpMemSolaris;866 PRTR0MEMOBJSOL pMemSolaris; 361 867 362 868 /* 363 869 * Use xalloc. 364 870 */ 365 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase*/, 0 /*nocross*/,366 NULL /* minaddr*/, NULL /*maxaddr*/, VM_SLEEP);871 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */, 872 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP); 367 873 if (RT_UNLIKELY(!pv)) 368 874 return VERR_NO_MEMORY; 369 875 370 876 /* Create the object. */ 371 pMemSolaris = (PRTR0MEMOBJSOL ARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);877 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb); 372 878 if (!pMemSolaris) 373 879 { … … 411 917 * Get parameters from the source object. 412 918 */ 413 PRTR0MEMOBJSOL ARIS pMemToMapSolaris = (PRTR0MEMOBJSOLARIS)pMemToMap;414 void *pv= pMemToMapSolaris->Core.pv;415 size_t cb= pMemToMapSolaris->Core.cb;416 pgcnt_t cPages= cb >> PAGE_SHIFT;919 PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap; 920 void *pv = pMemToMapSolaris->Core.pv; 921 size_t cb = pMemToMapSolaris->Core.cb; 922 size_t cPages = cb >> PAGE_SHIFT; 417 923 418 924 /* 419 925 * Create the mapping object 420 926 */ 421 PRTR0MEMOBJSOL ARISpMemSolaris;422 pMemSolaris = (PRTR0MEMOBJSOL ARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb);927 PRTR0MEMOBJSOL pMemSolaris; 928 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb); 423 929 if (RT_UNLIKELY(!pMemSolaris)) 424 930 return VERR_NO_MEMORY; … … 432 938 */ 433 939 if (pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC) 434 rc = vbi_pages_premap(pMemToMapSolaris->pvHandle, cb, paPhysAddrs);940 rc = rtR0MemObjSolPagesPreMap(pMemToMapSolaris->pvHandle, cb, paPhysAddrs, cPages); 435 941 else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS 436 942 && pMemToMapSolaris->fLargePage) … … 439 945 for (pgcnt_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE) 440 946 paPhysAddrs[iPage] = Phys; 441 rc = vbi_large_page_premap(pMemToMapSolaris->pvHandle, cb);947 rc = rtR0MemObjSolLargePagePreMap(pMemToMapSolaris->pvHandle, cb); 442 948 } 443 949 else 444 950 { 445 /* Have kernel mapping, just translate virtual to physical. */ 951 /* 952 * Have kernel mapping, just translate virtual to physical. 953 */ 446 954 AssertPtr(pv); 447 rc = 0;448 for ( pgcnt_t iPage = 0; iPage < cPages; iPage++)955 rc = VINF_SUCCESS; 956 for (size_t iPage = 0; iPage < cPages; iPage++) 449 957 { 450 paPhysAddrs[iPage] = vbi_va_to_pa(pv);958 paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pv); 451 959 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1)) 452 960 { 453 961 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n")); 454 rc = -1;962 rc = VERR_MAP_FAILED; 455 963 break; 456 964 } … … 458 966 } 459 967 } 460 if (!rc) 461 { 968 if (RT_SUCCESS(rc)) 969 { 970 unsigned fPageAccess = PROT_READ; 971 if (fProt & RTMEM_PROT_WRITE) 972 fPageAccess |= PROT_WRITE; 973 if (fProt & RTMEM_PROT_EXEC) 974 fPageAccess |= PROT_EXEC; 975 462 976 /* 463 977 * Perform the actual mapping. 464 978 */ 465 979 caddr_t UserAddr = NULL; 466 rc = vbi_user_map(&UserAddr, fProt, paPhysAddrs, cb);467 if ( !rc)980 rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb); 981 if (RT_SUCCESS(rc)) 468 982 { 469 983 pMemSolaris->Core.u.Mapping.R0Process = R0Process; … … 475 989 } 476 990 477 LogRel(("rtR0MemObjNativeMapUser: vbi_user_map failed.\n")); 478 } 991 LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc)); 992 } 993 479 994 rc = VERR_MAP_FAILED; 480 995 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages); … … 499 1014 DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage) 500 1015 { 501 PRTR0MEMOBJSOL ARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)pMem;1016 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem; 502 1017 503 1018 switch (pMemSolaris->Core.enmType) … … 507 1022 { 508 1023 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT); 509 return vbi_va_to_pa(pb);1024 return rtR0MemObjSolVirtToPhys(pb); 510 1025 } 511 return vbi_page_to_pa(pMemSolaris->pvHandle, iPage);1026 return rtR0MemObjSolPageToPhys(pMemSolaris->pvHandle, iPage); 512 1027 513 1028 case RTR0MEMOBJTYPE_PAGE: … … 516 1031 { 517 1032 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT); 518 return vbi_va_to_pa(pb);1033 return rtR0MemObjSolVirtToPhys(pb); 519 1034 } 520 1035 521 1036 /* 522 * Although mapping can be handled by vbi_va_to_pa(offset) like the above case,1037 * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case, 523 1038 * request it from the parent so that we have a clear distinction between CONT/PHYS_NC. 524 1039 */ -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/mp-r0drv-solaris.c
r40216 r40966 33 33 #include <iprt/mp.h> 34 34 #include <iprt/cpuset.h> 35 #include <iprt/thread.h> 35 36 36 37 #include <iprt/asm.h> … … 41 42 #include "r0drv/mp-r0drv.h" 42 43 44 typedef int FNRTMPSOLWORKER(void *pvUser1, void *pvUser2, void *pvUser3); 45 typedef FNRTMPSOLWORKER *PFNRTMPSOLWORKER; 43 46 44 47 … … 51 54 RTDECL(RTCPUID) RTMpCpuId(void) 52 55 { 53 return vbi_cpu_id();56 return CPU->cpu_id; 54 57 } 55 58 … … 57 60 RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu) 58 61 { 59 return idCpu < RTCPUSET_MAX_CPUS && idCpu < vbi_cpu_maxcount()? idCpu : -1;62 return idCpu < RTCPUSET_MAX_CPUS && idCpu <= max_cpuid ? idCpu : -1; 60 63 } 61 64 … … 63 66 RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu) 64 67 { 65 return (unsigned)iCpu < vbi_cpu_maxcount()? iCpu : NIL_RTCPUID;68 return (unsigned)iCpu <= max_cpuid ? iCpu : NIL_RTCPUID; 66 69 } 67 70 … … 69 72 RTDECL(RTCPUID) RTMpGetMaxCpuId(void) 70 73 { 71 return vbi_max_cpu_id();74 return max_cpuid; 72 75 } 73 76 … … 78 81 * We cannot query CPU status recursively, check cpu member from cached set. 79 82 */ 80 if (idCpu >= vbi_cpu_count())83 if (idCpu >= ncpus) 81 84 return false; 82 85 83 return RTCpuSetIsMember(&g_rtMpSolarisCpuSet, idCpu); 84 85 #if 0 86 return idCpu < vbi_cpu_count() && vbi_cpu_online(idCpu); 87 #endif 86 return RTCpuSetIsMember(&g_rtMpSolCpuSet, idCpu); 88 87 } 89 88 … … 91 90 RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu) 92 91 { 93 return idCpu < vbi_cpu_count();92 return idCpu < ncpus; 94 93 } 95 94 … … 113 112 RTDECL(RTCPUID) RTMpGetCount(void) 114 113 { 115 return vbi_cpu_count();114 return ncpus; 116 115 } 117 116 … … 122 121 * We cannot query CPU status recursively, return the cached set. 123 122 */ 124 *pSet = g_rtMpSol arisCpuSet;123 *pSet = g_rtMpSolCpuSet; 125 124 return pSet; 126 125 } … … 135 134 136 135 136 /** 137 * Wrapper to Solaris IPI infrastructure. 138 * 139 * @param pCpuSet Pointer to Solaris CPU set. 140 * @param pfnSolWorker Function to execute on target CPU(s). 141 * @param pArgs Pointer to RTMPARGS to pass to @a pfnSolWorker. 142 * 143 * @returns Solaris error code. 144 */ 145 static void rtMpSolCrossCall(PRTSOLCPUSET pCpuSet, PFNRTMPSOLWORKER pfnSolWorker, PRTMPARGS pArgs) 146 { 147 AssertPtrReturnVoid(pCpuSet); 148 AssertPtrReturnVoid(pfnSolWorker); 149 AssertPtrReturnVoid(pCpuSet); 150 151 if (g_frtSolOldIPI) 152 { 153 if (g_frtSolOldIPIUlong) 154 { 155 g_rtSolXcCall.u.pfnSol_xc_call_old_ulong((xc_arg_t)pArgs, /* Arg to IPI function */ 156 0, /* Arg2, ignored */ 157 0, /* Arg3, ignored */ 158 IPRT_SOL_X_CALL_HIPRI, /* IPI priority */ 159 pCpuSet->auCpus[0], /* Target CPU(s) */ 160 (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */ 161 } 162 else 163 { 164 g_rtSolXcCall.u.pfnSol_xc_call_old((xc_arg_t)pArgs, /* Arg to IPI function */ 165 0, /* Arg2, ignored */ 166 0, /* Arg3, ignored */ 167 IPRT_SOL_X_CALL_HIPRI, /* IPI priority */ 168 *pCpuSet, /* Target CPU set */ 169 (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */ 170 } 171 } 172 else 173 { 174 g_rtSolXcCall.u.pfnSol_xc_call((xc_arg_t)pArgs, /* Arg to IPI function */ 175 0, /* Arg2 */ 176 0, /* Arg3 */ 177 &pCpuSet->auCpus[0], /* Target CPU set */ 178 (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */ 179 } 180 } 181 137 182 138 183 /** … … 144 189 * @param uIgnored2 Ignored. 145 190 */ 146 static int rt mpOnAllSolarisWrapper(void *uArg, void *uIgnored1, void *uIgnored2)191 static int rtMpSolOnAllCpuWrapper(void *uArg, void *uIgnored1, void *uIgnored2) 147 192 { 148 193 PRTMPARGS pArgs = (PRTMPARGS)(uArg); … … 174 219 Args.cHits = 0; 175 220 176 vbi_preempt_disable(); 177 178 vbi_execute_on_all(rtmpOnAllSolarisWrapper, &Args); 179 180 vbi_preempt_enable(); 221 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 222 RTThreadPreemptDisable(&PreemptState); 223 224 RTSOLCPUSET CpuSet; 225 for (int i = 0; i < IPRT_SOL_SET_WORDS; i++) 226 CpuSet.auCpus[i] = (ulong_t)-1L; 227 228 rtMpSolCrossCall(&CpuSet, rtMpSolOnAllCpuWrapper, &Args); 229 230 RTThreadPreemptRestore(&PreemptState); 181 231 182 232 return VINF_SUCCESS; … … 192 242 * @param uIgnored2 Ignored. 193 243 */ 194 static int rt mpOnOthersSolarisWrapper(void *uArg, void *uIgnored1, void *uIgnored2)244 static int rtMpSolOnOtherCpusWrapper(void *uArg, void *uIgnored1, void *uIgnored2) 195 245 { 196 246 PRTMPARGS pArgs = (PRTMPARGS)(uArg); … … 210 260 RTMPARGS Args; 211 261 RT_ASSERT_INTS_ON(); 212 213 /* The caller is supposed to have disabled preemption, but take no chances. */214 vbi_preempt_disable();215 262 216 263 Args.pfnWorker = pfnWorker; … … 220 267 Args.cHits = 0; 221 268 222 vbi_execute_on_others(rtmpOnOthersSolarisWrapper, &Args); 223 224 vbi_preempt_enable(); 269 /* The caller is supposed to have disabled preemption, but take no chances. */ 270 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 271 RTThreadPreemptDisable(&PreemptState); 272 273 RTSOLCPUSET CpuSet; 274 for (int i = 0; i < IPRT_SOL_SET_WORDS; i++) 275 CpuSet.auCpus[0] = (ulong_t)-1L; 276 BT_CLEAR(CpuSet.auCpus, RTMpCpuId()); 277 278 rtMpSolCrossCall(&CpuSet, rtMpSolOnOtherCpusWrapper, &Args); 279 280 RTThreadPreemptRestore(&PreemptState); 225 281 226 282 return VINF_SUCCESS; … … 232 288 * for the RTMpOnSpecific API. 233 289 * 234 *235 290 * @param uArgs Pointer to the RTMPARGS package. 236 291 * @param uIgnored1 Ignored. 237 292 * @param uIgnored2 Ignored. 238 */ 239 static int rtmpOnSpecificSolarisWrapper(void *uArg, void *uIgnored1, void *uIgnored2) 293 * 294 * @returns Solaris error code. 295 */ 296 static int rtMpSolOnSpecificCpuWrapper(void *uArg, void *uIgnored1, void *uIgnored2) 240 297 { 241 298 PRTMPARGS pArgs = (PRTMPARGS)(uArg); … … 257 314 RT_ASSERT_INTS_ON(); 258 315 259 if (idCpu >= vbi_cpu_count())316 if (idCpu >= ncpus) 260 317 return VERR_CPU_NOT_FOUND; 261 318 … … 269 326 Args.cHits = 0; 270 327 271 vbi_preempt_disable(); 272 273 vbi_execute_on_one(rtmpOnSpecificSolarisWrapper, &Args, idCpu); 274 275 vbi_preempt_enable(); 328 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 329 RTThreadPreemptDisable(&PreemptState); 330 331 RTSOLCPUSET CpuSet; 332 for (int i = 0; i < IPRT_SOL_SET_WORDS; i++) 333 CpuSet.auCpus[i] = 0; 334 BT_SET(CpuSet.auCpus, idCpu); 335 336 rtMpSolCrossCall(&CpuSet, rtMpSolOnSpecificCpuWrapper, &Args); 337 338 RTThreadPreemptRestore(&PreemptState); 276 339 277 340 Assert(ASMAtomicUoReadU32(&Args.cHits) <= 1); -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/mpnotification-r0drv-solaris.c
r40227 r40966 42 42 * Global Variables * 43 43 *******************************************************************************/ 44 /** CPU watch callback handle. */45 static v bi_cpu_watch_t *g_hVbiCpuWatch = NULL;44 /** Whether CPUs are being watched or not. */ 45 static volatile bool g_fSolCpuWatch = false; 46 46 /** Set of online cpus that is maintained by the MP callback. 47 47 * This avoids locking issues querying the set from the kernel as well as 48 48 * eliminating any uncertainty regarding the online status during the 49 49 * callback. */ 50 RTCPUSET g_rtMpSolarisCpuSet; 50 RTCPUSET g_rtMpSolCpuSet; 51 52 /** 53 * Internal solaris representation for watching CPUs. 54 */ 55 typedef struct RTMPSOLWATCHCPUS 56 { 57 /** Function pointer to Mp worker. */ 58 PFNRTMPWORKER pfnWorker; 59 /** Argument to pass to the Mp worker. */ 60 void *pvArg; 61 } RTMPSOLWATCHCPUS; 62 typedef RTMPSOLWATCHCPUS *PRTMPSOLWATCHCPUS; 51 63 52 64 53 static void rtMpNotificationSolarisOnCurrentCpu(void *pvArgs, void *uIgnored1, void *uIgnored2) 65 /** 66 * PFNRTMPWORKER worker for executing Mp events on the target CPU. 67 * 68 * @param idCpu The current CPU Id. 69 * @param pvArg Opaque pointer to event type (online/offline). 70 * @param pvIgnored1 Ignored. 71 */ 72 static void rtMpNotificationSolOnCurrentCpu(RTCPUID idCpu, void *pvArg, void *pvIgnored1) 54 73 { 55 NOREF( uIgnored1);56 NOREF( uIgnored2);74 NOREF(pvIgnored1); 75 NOREF(idCpu); 57 76 58 PRTMPARGS pArgs = (PRTMPARGS) (pvArgs);77 PRTMPARGS pArgs = (PRTMPARGS)pvArg; 59 78 AssertRelease(pArgs && pArgs->idCpu == RTMpCpuId()); 60 Assert(pArgs->pvUser 2);79 Assert(pArgs->pvUser1); 61 80 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 62 81 63 int online = *(int *)pArgs->pvUser2; 64 if (online) 82 RTMPEVENT enmMpEvent = *(RTMPEVENT *)pArgs->pvUser1; 83 rtMpNotificationDoCallbacks(enmMpEvent, pArgs->idCpu); 84 } 85 86 87 /** 88 * Solaris callback function for Mp event notification. 89 * 90 * @param CpuState The current event/state of the CPU. 91 * @param iCpu Which CPU is this event fore. 92 * @param pvArg Ignored. 93 * 94 * @remarks This function assumes index == RTCPUID. 95 * @returns Solaris error code. 96 */ 97 static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg) 98 { 99 RTMPEVENT enmMpEvent; 100 101 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 102 RTThreadPreemptDisable(&PreemptState); 103 104 /* 105 * Update our CPU set structures first regardless of whether we've been 106 * scheduled on the right CPU or not, this is just atomic accounting. 107 */ 108 if (CpuState == CPU_ON) 65 109 { 66 RTCpuSetAdd(&g_rtMpSolarisCpuSet, pArgs->idCpu); 67 rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, pArgs->idCpu); 110 enmMpEvent = RTMPEVENT_ONLINE; 111 RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu); 112 } 113 else if (CpuState == CPU_OFF) 114 { 115 enmMpEvent = RTMPEVENT_OFFLINE; 116 RTCpuSetDel(&g_rtMpSolCpuSet, iCpu); 117 } 118 else 119 return 0; 120 121 /* 122 * Since we don't absolutely need to do CPU bound code in any of the CPU offline 123 * notification hooks, run it on the current CPU. Scheduling a callback to execute 124 * on the CPU going offline at this point is too late and will not work reliably. 125 */ 126 bool fRunningOnTargetCpu = iCpu == RTMpCpuId(); 127 if ( fRunningOnTargetCpu == true 128 || enmMpEvent == RTMPEVENT_OFFLINE) 129 { 130 rtMpNotificationDoCallbacks(enmMpEvent, iCpu); 68 131 } 69 132 else 70 133 { 71 RTCpuSetDel(&g_rtMpSolarisCpuSet, pArgs->idCpu); 72 rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, pArgs->idCpu); 73 } 74 } 75 76 77 static void rtMpNotificationSolarisCallback(void *pvUser, int iCpu, int online) 78 { 79 vbi_preempt_disable(); 80 81 RTMPARGS Args; 82 RT_ZERO(Args); 83 Args.pvUser1 = pvUser; 84 Args.pvUser2 = &online; 85 Args.idCpu = iCpu; 86 87 /* 88 * If we're not on the target CPU, schedule (synchronous) the event notification callback 89 * to run on the target CPU i.e. the one pertaining to the MP event. 90 */ 91 bool fRunningOnTargetCpu = iCpu == RTMpCpuId(); /* ASSUMES iCpu == RTCPUID */ 92 if (fRunningOnTargetCpu) 93 rtMpNotificationSolarisOnCurrentCpu(&Args, NULL /* pvIgnored1 */, NULL /* pvIgnored2 */); 94 else 95 { 96 if (online) 97 vbi_execute_on_one(rtMpNotificationSolarisOnCurrentCpu, &Args, iCpu); 98 else 99 { 100 /* 101 * Since we don't absolutely need to do CPU bound code in any of the CPU offline 102 * notification hooks, run it on the current CPU. Scheduling a callback to execute 103 * on the CPU going offline at this point is too late and will not work reliably. 104 */ 105 RTCpuSetDel(&g_rtMpSolarisCpuSet, iCpu); 106 rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, iCpu); 107 } 134 /* 135 * We're not on the target CPU, schedule (synchronous) the event notification callback 136 * to run on the target CPU i.e. the CPU that was online'd. 137 */ 138 RTMPARGS Args; 139 RT_ZERO(Args); 140 Args.pvUser1 = &enmMpEvent; 141 Args.pvUser2 = NULL; 142 Args.idCpu = iCpu; 143 RTMpOnSpecific(iCpu, rtMpNotificationSolOnCurrentCpu, &Args, NULL /* pvIgnored1 */); 108 144 } 109 145 110 vbi_preempt_enable(); 146 RTThreadPreemptRestore(&PreemptState); 147 148 NOREF(pvArg); 149 return 0; 111 150 } 112 151 … … 114 153 DECLHIDDEN(int) rtR0MpNotificationNativeInit(void) 115 154 { 116 if ( g_hVbiCpuWatch != NULL)155 if (ASMAtomicReadBool(&g_fSolCpuWatch) == true) 117 156 return VERR_WRONG_ORDER; 118 157 119 158 /* 120 * Register the callback building the online cpu set as we 121 * do so (current_too = 1). 159 * Register the callback building the online cpu set as we do so. 122 160 */ 123 RTCpuSetEmpty(&g_rtMpSolarisCpuSet); 124 g_hVbiCpuWatch = vbi_watch_cpus(rtMpNotificationSolarisCallback, NULL, 1 /*current_too*/); 161 RTCpuSetEmpty(&g_rtMpSolCpuSet); 162 163 mutex_enter(&cpu_lock); 164 register_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */); 165 166 for (int i = 0; i < (int)RTMpGetCount(); ++i) 167 if (cpu_is_online(cpu[i])) 168 rtMpNotificationCpuEvent(CPU_ON, i, NULL /* pvArg */); 169 170 ASMAtomicWriteBool(&g_fSolCpuWatch, true); 171 mutex_exit(&cpu_lock); 125 172 126 173 return VINF_SUCCESS; … … 130 177 DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void) 131 178 { 132 if (g_hVbiCpuWatch != NULL) 133 vbi_ignore_cpus(g_hVbiCpuWatch); 134 g_hVbiCpuWatch = NULL; 179 if (ASMAtomicReadBool(&g_fSolCpuWatch) == true) 180 { 181 mutex_enter(&cpu_lock); 182 unregister_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */); 183 ASMAtomicWriteBool(&g_fSolCpuWatch, false); 184 mutex_exit(&cpu_lock); 185 } 135 186 } 136 187 -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/process-r0drv-solaris.c
r28800 r40966 43 43 RTR0DECL(RTR0PROCESS) RTR0ProcHandleSelf(void) 44 44 { 45 return (RTR0PROCESS)vbi_proc(); 45 proc_t *pProcess = NULL; 46 drv_getparm(UPROCP, &pProcess); 47 return (RTR0PROCESS)pProcess; 46 48 } 47 49 -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/thread-r0drv-solaris.c
r39443 r40966 40 40 #include <iprt/mp.h> 41 41 42 42 #define SOL_THREAD_PREEMPT (*((char *)curthread + g_offrtSolThreadPreempt)) 43 #define SOL_CPU_RUNRUN (*((char *)CPU + g_offrtSolCpuPreempt)) 44 #define SOL_CPU_KPRUNRUN (*((char *)CPU + g_offrtSolCpuForceKernelPreempt)) 43 45 44 46 RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void) 45 47 { 46 return (RTNATIVETHREAD) vbi_curthread();48 return (RTNATIVETHREAD)curthread; 47 49 } 48 50 … … 55 57 if (!cMillies) 56 58 { 57 vbi_yield();59 RTThreadYield(); 58 60 return VINF_SUCCESS; 59 61 } … … 84 86 { 85 87 RT_ASSERT_PREEMPTIBLE(); 86 return vbi_yield(); 88 89 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 90 RTThreadPreemptDisable(&PreemptState); 91 92 char cThreadPreempt = SOL_THREAD_PREEMPT; 93 char cForcePreempt = SOL_CPU_KPRUNRUN; 94 bool fWillYield = false; 95 Assert(cThreadPreempt >= 1); 96 97 /* 98 * If we are the last preemption enabler for this thread and if force 99 * preemption is set on the CPU, only then we are guaranteed to be preempted. 100 */ 101 if (cThreadPreempt == 1 && cForcePreempt != 0) 102 fWillYield = true; 103 104 RTThreadPreemptRestore(&PreemptState); 105 return fWillYield; 87 106 } 88 107 … … 91 110 { 92 111 Assert(hThread == NIL_RTTHREAD); 93 if (!vbi_is_preempt_enabled()) 112 if (RT_UNLIKELY(g_frtSolInitDone == false)) 113 { 114 cmn_err(CE_CONT, "!RTThreadPreemptIsEnabled called before RTR0Init!\n"); 115 return true; 116 } 117 118 bool fThreadPreempt = false; 119 if (SOL_THREAD_PREEMPT == 0) 120 fThreadPreempt = true; 121 122 if (!fThreadPreempt) 94 123 return false; 95 124 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) … … 106 135 { 107 136 Assert(hThread == NIL_RTTHREAD); 108 return !!vbi_is_preempt_pending(); 137 138 char cPreempt = SOL_CPU_RUNRUN; 139 char cForcePreempt = SOL_CPU_KPRUNRUN; 140 return (cPreempt != 0 || cForcePreempt != 0); 109 141 } 110 142 … … 128 160 AssertPtr(pState); 129 161 130 vbi_preempt_disable(); 162 SOL_THREAD_PREEMPT++; 163 Assert(SOL_THREAD_PREEMPT >= 1); 131 164 132 165 RT_ASSERT_PREEMPT_CPUID_DISABLE(pState); … … 139 172 RT_ASSERT_PREEMPT_CPUID_RESTORE(pState); 140 173 141 vbi_preempt_enable(); 174 Assert(SOL_THREAD_PREEMPT >= 1); 175 if (--SOL_THREAD_PREEMPT == 0 && SOL_CPU_RUNRUN != 0) 176 kpreempt(KPREEMPT_SYNC); 142 177 } 143 178 -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/thread2-r0drv-solaris.c
r36555 r40966 32 32 #include "internal/iprt.h" 33 33 #include <iprt/thread.h> 34 #include <iprt/process.h> 34 35 35 36 #include <iprt/assert.h> … … 67 68 } 68 69 69 vbi_set_priority(vbi_curthread(), iPriority); 70 kthread_t *pCurThread = curthread; 71 Assert(pCurThread); 72 thread_lock(pCurThread); 73 thread_change_pri(pCurThread, iPriority, 0); 74 thread_unlock(pCurThread); 70 75 return VINF_SUCCESS; 71 76 } … … 96 101 PRTTHREADINT pThreadInt = (PRTTHREADINT)pvThreadInt; 97 102 98 rtThreadMain(pThreadInt, (RTNATIVETHREAD)vbi_curthread(), &pThreadInt->szName[0]);99 vbi_thread_exit();103 rtThreadMain(pThreadInt, RTThreadNativeSelf(), &pThreadInt->szName[0]); 104 thread_exit(); 100 105 } 101 106 … … 103 108 DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread) 104 109 { 105 void *pvKernThread;106 110 RT_ASSERT_PREEMPTIBLE(); 107 108 pvKernThread = vbi_thread_create(rtThreadNativeMain, pThreadInt, sizeof(pThreadInt), minclsyspri); 109 if (pvKernThread) 111 kthread_t *pThread = thread_create(NULL, /* Stack, use base */ 112 0, /* Stack size */ 113 rtThreadNativeMain, /* Thread function */ 114 pThreadInt, /* Function data */ 115 sizeof(pThreadInt), /* Data size*/ 116 (proc_t *)RTR0ProcHandleSelf(), /* Process handle */ 117 TS_RUN, /* Ready to run */ 118 minclsyspri /* Priority */ 119 ); 120 if (RT_LIKELY(pThread)) 110 121 { 111 *pNativeThread = (RTNATIVETHREAD)p vKernThread;122 *pNativeThread = (RTNATIVETHREAD)pThread; 112 123 return VINF_SUCCESS; 113 124 } -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/time-r0drv-solaris.c
r28800 r40966 61 61 RTDECL(PRTTIMESPEC) RTTimeNow(PRTTIMESPEC pTime) 62 62 { 63 return RTTimeSpecSetNano(pTime, vbi_tod()); 63 timestruc_t TimeSpec; 64 65 mutex_enter(&tod_lock); 66 TimeSpec = tod_get(); 67 mutex_exit(&tod_lock); 68 return RTTimeSpecSetNano(pTime, (uint64_t)TimeSpec.tv_sec * 1000000000 + TimeSpec.tv_nsec); 64 69 } 65 70 -
trunk/src/VBox/Runtime/r0drv/solaris/vbi/timer-r0drv-solaris.c
r37275 r40966 46 46 #include "internal/magics.h" 47 47 48 #define SOL_TIMER_ANY_CPU (-1) 48 49 49 50 /******************************************************************************* 50 51 * Structures and Typedefs * 51 52 *******************************************************************************/ 53 /** 54 * Single-CPU timer handle. 55 */ 56 typedef struct RTR0SINGLETIMERSOL 57 { 58 /** Cyclic handler. */ 59 cyc_handler_t hHandler; 60 /** Cyclic time and interval representation. */ 61 cyc_time_t hFireTime; 62 /** Timer ticks. */ 63 uint64_t u64Tick; 64 } RTR0SINGLETIMERSOL; 65 typedef RTR0SINGLETIMERSOL *PRTR0SINGLETIMERSOL; 66 67 /** 68 * Omni-CPU timer handle. 69 */ 70 typedef struct RTR0OMNITIMERSOL 71 { 72 /** Absolute timestamp of when the timer should fire next. */ 73 uint64_t u64When; 74 /** Array of timer ticks per CPU. Reinitialized when a CPU is online'd. */ 75 uint64_t *au64Ticks; 76 } RTR0OMNITIMERSOL; 77 typedef RTR0OMNITIMERSOL *PRTR0OMNITIMERSOL; 78 52 79 /** 53 80 * The internal representation of a Solaris timer handle. … … 61 88 /** Flag indicating that the timer is suspended. */ 62 89 uint8_t volatile fSuspended; 63 /** Run on all CPUs if set*/90 /** Whether the timer must run on all CPUs or not. */ 64 91 uint8_t fAllCpu; 65 92 /** Whether the timer must run on a specific CPU or not. */ … … 67 94 /** The CPU it must run on if fSpecificCpu is set. */ 68 95 uint8_t iCpu; 69 /** The nano second interval for repeating timers */96 /** The nano second interval for repeating timers. */ 70 97 uint64_t interval; 71 /** simple Solaris timer handle. */ 72 vbi_stimer_t *stimer; 73 /** global Solaris timer handle. */ 74 vbi_gtimer_t *gtimer; 98 /** Cyclic timer Id. */ 99 cyclic_id_t hCyclicId; 100 /** @todo Make this a union unless we intend to support omni<=>single timers 101 * conversions. */ 102 /** Single-CPU timer handle. */ 103 PRTR0SINGLETIMERSOL pSingleTimer; 104 /** Omni-CPU timer handle. */ 105 PRTR0OMNITIMERSOL pOmniTimer; 75 106 /** The user callback. */ 76 107 PFNRTTIMER pfnTimer; … … 88 119 { \ 89 120 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); \ 90 AssertReturn((pTimer)->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); \ 121 AssertMsgReturn((pTimer)->u32Magic == RTTIMER_MAGIC, ("pTimer=%p u32Magic=%x expected %x\n", (pTimer), (pTimer)->u32Magic, RTTIMER_MAGIC), \ 122 VERR_INVALID_HANDLE); \ 91 123 } while (0) 92 124 93 125 94 /* 95 * Need a wrapper to get the PRTTIMER passed through 96 */ 97 static void rtTimerSolarisCallbackWrapper(PRTTIMER pTimer, uint64_t tick) 98 { 99 pTimer->pfnTimer(pTimer, pTimer->pvUser, tick); 100 } 101 102 126 /** 127 * Callback wrapper for Omni-CPU and single-CPU timers. 128 * 129 * @param pvArg Opaque pointer to the timer. 130 * 131 * @remarks This will be executed in interrupt context but only at the specified 132 * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the 133 * cyclic subsystem here, neither should pfnTimer(). 134 */ 135 static void rtTimerSolCallbackWrapper(void *pvArg) 136 { 137 PRTTIMER pTimer = (PRTTIMER)pvArg; 138 AssertPtrReturnVoid(pTimer); 139 140 if (pTimer->pSingleTimer) 141 { 142 uint64_t u64Tick = ++pTimer->pSingleTimer->u64Tick; 143 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick); 144 } 145 else if (pTimer->pOmniTimer) 146 { 147 uint64_t u64Tick = ++pTimer->pOmniTimer->au64Ticks[CPU->cpu_id]; 148 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick); 149 } 150 } 151 152 153 /** 154 * Omni-CPU cyclic online event. This is called before the omni cycle begins to 155 * fire on the specified CPU. 156 * 157 * @param pvArg Opaque pointer to the timer. 158 * @param pCpu Pointer to the CPU on which it will fire. 159 * @param pCyclicHandler Pointer to a cyclic handler to add to the CPU 160 * specified in @a pCpu. 161 * @param pCyclicTime Pointer to the cyclic time and interval object. 162 * 163 * @remarks We -CANNOT- call back into the cyclic subsystem here, we can however 164 * block (sleep). 165 */ 166 static void rtTimerSolOmniCpuOnline(void *pvArg, cpu_t *pCpu, cyc_handler_t *pCyclicHandler, cyc_time_t *pCyclicTime) 167 { 168 PRTTIMER pTimer = (PRTTIMER)pvArg; 169 AssertPtrReturnVoid(pTimer); 170 AssertPtrReturnVoid(pCpu); 171 AssertPtrReturnVoid(pCyclicHandler); 172 AssertPtrReturnVoid(pCyclicTime); 173 174 pTimer->pOmniTimer->au64Ticks[pCpu->cpu_id] = 0; 175 pCyclicHandler->cyh_func = rtTimerSolCallbackWrapper; 176 pCyclicHandler->cyh_arg = pTimer; 177 pCyclicHandler->cyh_level = CY_LOCK_LEVEL; 178 179 uint64_t u64Now = RTTimeNanoTS(); 180 if (pTimer->pOmniTimer->u64When < u64Now) 181 pCyclicTime->cyt_when = u64Now + pTimer->interval / 2; 182 else 183 pCyclicTime->cyt_when = pTimer->pOmniTimer->u64When; 184 185 pCyclicTime->cyt_interval = pTimer->interval; 186 } 103 187 104 188 … … 152 236 pTimer->pfnTimer = pfnTimer; 153 237 pTimer->pvUser = pvUser; 154 pTimer->stimer = NULL; 155 pTimer->gtimer = NULL; 156 238 pTimer->pSingleTimer = NULL; 239 pTimer->pOmniTimer = NULL; 240 pTimer->hCyclicId = CYCLIC_NONE; 241 242 cmn_err(CE_NOTE, "Create pTimer->u32Magic=%x RTTIMER_MAGIC=%x\n", pTimer->u32Magic, RTTIMER_MAGIC); 157 243 *ppTimer = pTimer; 158 244 return VINF_SUCCESS; … … 179 265 RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) 180 266 { 267 cmn_err(CE_NOTE, "Start pTimer->u32Magic=%x RTTIMER_MAGIC=%x\n", pTimer->u32Magic, RTTIMER_MAGIC); 181 268 RTTIMER_ASSERT_VALID_RET(pTimer); 182 269 RT_ASSERT_INTS_ON(); … … 185 272 return VERR_TIMER_ACTIVE; 186 273 274 /* One-shot timers are not supported by the cyclic system. */ 275 if (pTimer->interval == 0) 276 return VERR_NOT_SUPPORTED; 277 187 278 pTimer->fSuspended = false; 188 279 if (pTimer->fAllCpu) 189 280 { 190 pTimer->gtimer = vbi_gtimer_begin(rtTimerSolarisCallbackWrapper, pTimer, u64First, pTimer->interval); 191 if (pTimer->gtimer == NULL) 192 return VERR_INVALID_PARAMETER; 281 PRTR0OMNITIMERSOL pOmniTimer = RTMemAllocZ(sizeof(RTR0OMNITIMERSOL)); 282 if (RT_UNLIKELY(!pOmniTimer)) 283 return VERR_NO_MEMORY; 284 285 pOmniTimer->au64Ticks = RTMemAllocZ(RTMpGetCount() * sizeof(uint64_t)); 286 if (RT_UNLIKELY(!pOmniTimer->au64Ticks)) 287 { 288 RTMemFree(pOmniTimer); 289 return VERR_NO_MEMORY; 290 } 291 292 /* 293 * Setup omni (all CPU) timer. The Omni-CPU online event will fire 294 * and from there we setup periodic timers per CPU. 295 */ 296 pTimer->pOmniTimer = pOmniTimer; 297 pOmniTimer->u64When = pTimer->interval + RTTimeNanoTS(); 298 299 cyc_omni_handler_t hOmni; 300 hOmni.cyo_online = rtTimerSolOmniCpuOnline; 301 hOmni.cyo_offline = NULL; 302 hOmni.cyo_arg = pTimer; 303 304 mutex_enter(&cpu_lock); 305 pTimer->hCyclicId = cyclic_add_omni(&hOmni); 306 mutex_exit(&cpu_lock); 193 307 } 194 308 else 195 309 { 196 int iCpu = VBI_ANY_CPU;310 int iCpu = SOL_TIMER_ANY_CPU; 197 311 if (pTimer->fSpecificCpu) 312 { 198 313 iCpu = pTimer->iCpu; 199 pTimer->stimer = vbi_stimer_begin(rtTimerSolarisCallbackWrapper, pTimer, u64First, pTimer->interval, iCpu); 200 if (pTimer->stimer == NULL) 314 if (!RTMpIsCpuOnline(iCpu)) /* ASSUMES: index == cpuid */ 315 return VERR_CPU_OFFLINE; 316 } 317 318 PRTR0SINGLETIMERSOL pSingleTimer = RTMemAllocZ(sizeof(RTR0SINGLETIMERSOL)); 319 if (RT_UNLIKELY(!pSingleTimer)) 320 return VERR_NO_MEMORY; 321 322 pTimer->pSingleTimer = pSingleTimer; 323 pSingleTimer->hHandler.cyh_func = rtTimerSolCallbackWrapper; 324 pSingleTimer->hHandler.cyh_arg = pTimer; 325 pSingleTimer->hHandler.cyh_level = CY_LOCK_LEVEL; 326 327 mutex_enter(&cpu_lock); 328 if (iCpu != SOL_TIMER_ANY_CPU && !cpu_is_online(cpu[iCpu])) 201 329 { 202 if (iCpu != VBI_ANY_CPU) 203 return VERR_CPU_OFFLINE; 204 return VERR_INVALID_PARAMETER; 330 mutex_exit(&cpu_lock); 331 RTMemFree(pSingleTimer); 332 pTimer->pSingleTimer = NULL; 333 return VERR_CPU_OFFLINE; 205 334 } 335 336 pSingleTimer->hFireTime.cyt_when = u64First + RTTimeNanoTS(); 337 if (pTimer->interval == 0) 338 { 339 /* @todo use gethrtime_max instead of LLONG_MAX? */ 340 AssertCompileSize(pSingleTimer->hFireTime.cyt_interval, sizeof(long long)); 341 pSingleTimer->hFireTime.cyt_interval = LLONG_MAX - pSingleTimer->hFireTime.cyt_when; 342 } 343 else 344 pSingleTimer->hFireTime.cyt_interval = pTimer->interval; 345 346 pTimer->hCyclicId = cyclic_add(&pSingleTimer->hHandler, &pSingleTimer->hFireTime); 347 if (iCpu != SOL_TIMER_ANY_CPU) 348 cyclic_bind(pTimer->hCyclicId, cpu[iCpu], NULL /* cpupart */); 349 350 mutex_exit(&cpu_lock); 206 351 } 207 352 … … 219 364 220 365 pTimer->fSuspended = true; 221 if (pTimer->stimer) 222 { 223 vbi_stimer_end(pTimer->stimer); 224 pTimer->stimer = NULL; 225 } 226 else if (pTimer->gtimer) 227 { 228 vbi_gtimer_end(pTimer->gtimer); 229 pTimer->gtimer = NULL; 366 if (pTimer->pSingleTimer) 367 { 368 mutex_enter(&cpu_lock); 369 cyclic_remove(pTimer->hCyclicId); 370 mutex_exit(&cpu_lock); 371 RTMemFree(pTimer->pSingleTimer); 372 } 373 else if (pTimer->pOmniTimer) 374 { 375 mutex_enter(&cpu_lock); 376 cyclic_remove(pTimer->hCyclicId); 377 mutex_exit(&cpu_lock); 378 RTMemFree(pTimer->pOmniTimer->au64Ticks); 379 RTMemFree(pTimer->pOmniTimer); 230 380 } 231 381 … … 234 384 235 385 236 237 386 RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval) 238 387 { … … 247 396 RTDECL(uint32_t) RTTimerGetSystemGranularity(void) 248 397 { 249 return vbi_timer_granularity();398 return nsec_per_tick; 250 399 } 251 400
Note:
See TracChangeset
for help on using the changeset viewer.