VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/mp-r0drv-solaris.c@ 56290

Last change on this file since 56290 was 56290, checked in by vboxsync, 10 years ago

IPRT: Updated (C) year.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 12.0 KB
Line 
1/* $Id: mp-r0drv-solaris.c 56290 2015-06-09 14:01:31Z vboxsync $ */
2/** @file
3 * IPRT - Multiprocessor, Ring-0 Driver, Solaris.
4 */
5
6/*
7 * Copyright (C) 2008-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/mp.h>
34#include <iprt/cpuset.h>
35#include <iprt/thread.h>
36
37#include <iprt/asm.h>
38#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
39# include <iprt/asm-amd64-x86.h>
40#endif
41#include <iprt/err.h>
42#include "r0drv/mp-r0drv.h"
43
44typedef int FNRTMPSOLWORKER(void *pvUser1, void *pvUser2, void *pvUser3);
45typedef FNRTMPSOLWORKER *PFNRTMPSOLWORKER;
46
47
48RTDECL(bool) RTMpIsCpuWorkPending(void)
49{
50 return false;
51}
52
53
54RTDECL(RTCPUID) RTMpCpuId(void)
55{
56 return CPU->cpu_id;
57}
58
59
60RTDECL(int) RTMpCurSetIndex(void)
61{
62 return CPU->cpu_id;
63}
64
65
66RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
67{
68 return *pidCpu = CPU->cpu_id;
69}
70
71
72RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
73{
74 return idCpu < RTCPUSET_MAX_CPUS && idCpu <= max_cpuid ? idCpu : -1;
75}
76
77
78RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
79{
80 return (unsigned)iCpu <= max_cpuid ? iCpu : NIL_RTCPUID;
81}
82
83
84RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
85{
86 return max_cpuid;
87}
88
89
90RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
91{
92 /*
93 * We cannot query CPU status recursively, check cpu member from cached set.
94 */
95 if (idCpu >= ncpus)
96 return false;
97
98 return RTCpuSetIsMember(&g_rtMpSolCpuSet, idCpu);
99}
100
101
102RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
103{
104 return idCpu < ncpus;
105}
106
107
108RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
109{
110 RTCPUID idCpu;
111
112 RTCpuSetEmpty(pSet);
113 idCpu = RTMpGetMaxCpuId(); /* it's inclusive */
114 do
115 {
116 if (RTMpIsCpuPossible(idCpu))
117 RTCpuSetAdd(pSet, idCpu);
118 } while (idCpu-- > 0);
119
120 return pSet;
121}
122
123
124RTDECL(RTCPUID) RTMpGetCount(void)
125{
126 return ncpus;
127}
128
129
130RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
131{
132 /*
133 * We cannot query CPU status recursively, return the cached set.
134 */
135 *pSet = g_rtMpSolCpuSet;
136 return pSet;
137}
138
139
140RTDECL(RTCPUID) RTMpGetOnlineCount(void)
141{
142 RTCPUSET Set;
143 RTMpGetOnlineSet(&Set);
144 return RTCpuSetCount(&Set);
145}
146
147
148/**
149 * Wrapper to Solaris IPI infrastructure.
150 *
151 * @returns Solaris error code.
152 * @param pCpuSet Pointer to Solaris CPU set.
153 * @param pfnSolWorker Function to execute on target CPU(s).
154 * @param pArgs Pointer to RTMPARGS to pass to @a pfnSolWorker.
155 */
156static void rtMpSolCrossCall(PRTSOLCPUSET pCpuSet, PFNRTMPSOLWORKER pfnSolWorker, PRTMPARGS pArgs)
157{
158 AssertPtrReturnVoid(pCpuSet);
159 AssertPtrReturnVoid(pfnSolWorker);
160 AssertPtrReturnVoid(pCpuSet);
161
162 if (g_frtSolOldIPI)
163 {
164 if (g_frtSolOldIPIUlong)
165 {
166 g_rtSolXcCall.u.pfnSol_xc_call_old_ulong((xc_arg_t)pArgs, /* Arg to IPI function */
167 0, /* Arg2, ignored */
168 0, /* Arg3, ignored */
169 IPRT_SOL_X_CALL_HIPRI, /* IPI priority */
170 pCpuSet->auCpus[0], /* Target CPU(s) */
171 (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
172 }
173 else
174 {
175 g_rtSolXcCall.u.pfnSol_xc_call_old((xc_arg_t)pArgs, /* Arg to IPI function */
176 0, /* Arg2, ignored */
177 0, /* Arg3, ignored */
178 IPRT_SOL_X_CALL_HIPRI, /* IPI priority */
179 *pCpuSet, /* Target CPU set */
180 (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
181 }
182 }
183 else
184 {
185 g_rtSolXcCall.u.pfnSol_xc_call((xc_arg_t)pArgs, /* Arg to IPI function */
186 0, /* Arg2 */
187 0, /* Arg3 */
188 &pCpuSet->auCpus[0], /* Target CPU set */
189 (xc_func_t)pfnSolWorker); /* Function to execute on target(s) */
190 }
191}
192
193
194/**
195 * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
196 * for the RTMpOnAll API.
197 *
198 * @returns Solaris error code.
199 * @param uArgs Pointer to the RTMPARGS package.
200 * @param pvIgnored1 Ignored.
201 * @param pvIgnored2 Ignored.
202 */
203static int rtMpSolOnAllCpuWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
204{
205 PRTMPARGS pArgs = (PRTMPARGS)(uArg);
206
207 /*
208 * Solaris CPU cross calls execute on offline CPUs too. Check our CPU cache
209 * set and ignore if it's offline.
210 */
211 if (!RTMpIsCpuOnline(RTMpCpuId()))
212 return 0;
213
214 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
215
216 NOREF(pvIgnored1);
217 NOREF(pvIgnored2);
218 return 0;
219}
220
221
222RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
223{
224 RTMPARGS Args;
225 RTSOLCPUSET CpuSet;
226 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
227 RT_ASSERT_INTS_ON();
228
229 Args.pfnWorker = pfnWorker;
230 Args.pvUser1 = pvUser1;
231 Args.pvUser2 = pvUser2;
232 Args.idCpu = NIL_RTCPUID;
233 Args.cHits = 0;
234
235 for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
236 CpuSet.auCpus[i] = (ulong_t)-1L;
237
238 RTThreadPreemptDisable(&PreemptState);
239
240 rtMpSolCrossCall(&CpuSet, rtMpSolOnAllCpuWrapper, &Args);
241
242 RTThreadPreemptRestore(&PreemptState);
243
244 return VINF_SUCCESS;
245}
246
247
248/**
249 * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
250 * for the RTMpOnOthers API.
251 *
252 * @returns Solaris error code.
253 * @param uArgs Pointer to the RTMPARGS package.
254 * @param pvIgnored1 Ignored.
255 * @param pvIgnored2 Ignored.
256 */
257static int rtMpSolOnOtherCpusWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
258{
259 PRTMPARGS pArgs = (PRTMPARGS)(uArg);
260 RTCPUID idCpu = RTMpCpuId();
261
262 Assert(idCpu != pArgs->idCpu);
263 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
264
265 NOREF(pvIgnored1);
266 NOREF(pvIgnored2);
267 return 0;
268}
269
270
271RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
272{
273 RTMPARGS Args;
274 RTSOLCPUSET CpuSet;
275 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
276 RT_ASSERT_INTS_ON();
277
278 Args.pfnWorker = pfnWorker;
279 Args.pvUser1 = pvUser1;
280 Args.pvUser2 = pvUser2;
281 Args.idCpu = RTMpCpuId();
282 Args.cHits = 0;
283
284 /* The caller is supposed to have disabled preemption, but take no chances. */
285 RTThreadPreemptDisable(&PreemptState);
286
287 for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
288 CpuSet.auCpus[0] = (ulong_t)-1L;
289 BT_CLEAR(CpuSet.auCpus, RTMpCpuId());
290
291 rtMpSolCrossCall(&CpuSet, rtMpSolOnOtherCpusWrapper, &Args);
292
293 RTThreadPreemptRestore(&PreemptState);
294
295 return VINF_SUCCESS;
296}
297
298
299
300/**
301 * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
302 * for the RTMpOnPair API.
303 *
304 * @returns Solaris error code.
305 * @param uArgs Pointer to the RTMPARGS package.
306 * @param pvIgnored1 Ignored.
307 * @param pvIgnored2 Ignored.
308 */
309static int rtMpSolOnPairCpuWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
310{
311 PRTMPARGS pArgs = (PRTMPARGS)(uArg);
312 RTCPUID idCpu = RTMpCpuId();
313
314 Assert(idCpu == pArgs->idCpu || idCpu == pArgs->idCpu2);
315 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
316 ASMAtomicIncU32(&pArgs->cHits);
317
318 NOREF(pvIgnored1);
319 NOREF(pvIgnored2);
320 return 0;
321}
322
323
324RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
325{
326 int rc;
327 RTMPARGS Args;
328 RTSOLCPUSET CpuSet;
329 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
330
331 AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
332 AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
333
334 Args.pfnWorker = pfnWorker;
335 Args.pvUser1 = pvUser1;
336 Args.pvUser2 = pvUser2;
337 Args.idCpu = idCpu1;
338 Args.idCpu2 = idCpu2;
339 Args.cHits = 0;
340
341 for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
342 CpuSet.auCpus[i] = 0;
343 BT_SET(CpuSet.auCpus, idCpu1);
344 BT_SET(CpuSet.auCpus, idCpu2);
345
346 /*
347 * Check that both CPUs are online before doing the broadcast call.
348 */
349 RTThreadPreemptDisable(&PreemptState);
350 if ( RTMpIsCpuOnline(idCpu1)
351 && RTMpIsCpuOnline(idCpu2))
352 {
353 rtMpSolCrossCall(&CpuSet, rtMpSolOnPairCpuWrapper, &Args);
354
355 Assert(Args.cHits <= 2);
356 if (Args.cHits == 2)
357 rc = VINF_SUCCESS;
358 else if (Args.cHits == 1)
359 rc = VERR_NOT_ALL_CPUS_SHOWED;
360 else if (Args.cHits == 0)
361 rc = VERR_CPU_OFFLINE;
362 else
363 rc = VERR_CPU_IPE_1;
364 }
365 /*
366 * A CPU must be present to be considered just offline.
367 */
368 else if ( RTMpIsCpuPresent(idCpu1)
369 && RTMpIsCpuPresent(idCpu2))
370 rc = VERR_CPU_OFFLINE;
371 else
372 rc = VERR_CPU_NOT_FOUND;
373
374 RTThreadPreemptRestore(&PreemptState);
375 return rc;
376}
377
378
379RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
380{
381 return true;
382}
383
384
385/**
386 * Wrapper between the native solaris per-cpu callback and PFNRTWORKER
387 * for the RTMpOnSpecific API.
388 *
389 * @returns Solaris error code.
390 * @param uArgs Pointer to the RTMPARGS package.
391 * @param pvIgnored1 Ignored.
392 * @param pvIgnored2 Ignored.
393 */
394static int rtMpSolOnSpecificCpuWrapper(void *uArg, void *pvIgnored1, void *pvIgnored2)
395{
396 PRTMPARGS pArgs = (PRTMPARGS)(uArg);
397 RTCPUID idCpu = RTMpCpuId();
398
399 Assert(idCpu == pArgs->idCpu);
400 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
401 ASMAtomicIncU32(&pArgs->cHits);
402
403 NOREF(pvIgnored1);
404 NOREF(pvIgnored2);
405 return 0;
406}
407
408
409RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
410{
411 RTMPARGS Args;
412 RTSOLCPUSET CpuSet;
413 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
414 RT_ASSERT_INTS_ON();
415
416 if (idCpu >= ncpus)
417 return VERR_CPU_NOT_FOUND;
418
419 if (RT_UNLIKELY(!RTMpIsCpuOnline(idCpu)))
420 return RTMpIsCpuPresent(idCpu) ? VERR_CPU_OFFLINE : VERR_CPU_NOT_FOUND;
421
422 Args.pfnWorker = pfnWorker;
423 Args.pvUser1 = pvUser1;
424 Args.pvUser2 = pvUser2;
425 Args.idCpu = idCpu;
426 Args.cHits = 0;
427
428 for (int i = 0; i < IPRT_SOL_SET_WORDS; i++)
429 CpuSet.auCpus[i] = 0;
430 BT_SET(CpuSet.auCpus, idCpu);
431
432 RTThreadPreemptDisable(&PreemptState);
433
434 rtMpSolCrossCall(&CpuSet, rtMpSolOnSpecificCpuWrapper, &Args);
435
436 RTThreadPreemptRestore(&PreemptState);
437
438 Assert(ASMAtomicUoReadU32(&Args.cHits) <= 1);
439
440 return ASMAtomicUoReadU32(&Args.cHits) == 1
441 ? VINF_SUCCESS
442 : VERR_CPU_NOT_FOUND;
443}
444
445
446RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
447{
448 return true;
449}
450
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette