VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c@ 85602

Last change on this file since 85602 was 85602, checked in by vboxsync, 4 years ago

IPRT/r0drv/mp-r0drv-linux.c: Use version checks for linux/cpumask.h stuff rather than assuming that everything in it will remain macros forever (num_online_cpus isn't anymore). Fixed possible bug in RTMpGetCount where it wouldn't handle CPU hotplugging right. [corrected nr_cpumask_bits version from 2.6.27 to 2.6.28]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.5 KB
Line 
1/* $Id: mp-r0drv-linux.c 85602 2020-08-04 11:21:28Z vboxsync $ */
2/** @file
3 * IPRT - Multiprocessor, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2008-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-linux-kernel.h"
32#include "internal/iprt.h"
33
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/thread.h>
39#include "r0drv/mp-r0drv.h"
40
41
42/*********************************************************************************************************************************
43* Defined Constants And Macros *
44*********************************************************************************************************************************/
45#if defined(nr_cpumask_bits) || LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
46# define VBOX_NR_CPUMASK_BITS (nr_cpumask_bits) /* same as nr_cpu_ids */
47#else
48# define VBOX_NR_CPUMASK_BITS (NR_CPUS)
49#endif
50
51
52RTDECL(RTCPUID) RTMpCpuId(void)
53{
54 return smp_processor_id();
55}
56RT_EXPORT_SYMBOL(RTMpCpuId);
57
58
59RTDECL(int) RTMpCurSetIndex(void)
60{
61 return smp_processor_id();
62}
63RT_EXPORT_SYMBOL(RTMpCurSetIndex);
64
65
66RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
67{
68 return *pidCpu = smp_processor_id();
69}
70RT_EXPORT_SYMBOL(RTMpCurSetIndexAndId);
71
72
73RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
74{
75 return idCpu < RTCPUSET_MAX_CPUS && idCpu < VBOX_NR_CPUMASK_BITS ? (int)idCpu : -1;
76}
77RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex);
78
79
80RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
81{
82 return (unsigned)iCpu < VBOX_NR_CPUMASK_BITS ? (RTCPUID)iCpu : NIL_RTCPUID;
83}
84RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex);
85
86
87RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
88{
89 return VBOX_NR_CPUMASK_BITS - 1;
90}
91RT_EXPORT_SYMBOL(RTMpGetMaxCpuId);
92
93
94RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
95{
96#if defined(CONFIG_SMP)
97# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 2) || defined(cpu_possible)
98 return idCpu < VBOX_NR_CPUMASK_BITS && cpu_possible(idCpu);
99# else /* < 2.5.29 */
100 return idCpu < (RTCPUID)(smp_num_cpus);
101# endif
102#else
103 return idCpu == RTMpCpuId();
104#endif
105}
106RT_EXPORT_SYMBOL(RTMpIsCpuPossible);
107
108
109RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
110{
111 RTCPUID idCpu;
112
113 RTCpuSetEmpty(pSet);
114 idCpu = RTMpGetMaxCpuId();
115 do
116 {
117 if (RTMpIsCpuPossible(idCpu))
118 RTCpuSetAdd(pSet, idCpu);
119 } while (idCpu-- > 0);
120 return pSet;
121}
122RT_EXPORT_SYMBOL(RTMpGetSet);
123
124
125RTDECL(RTCPUID) RTMpGetCount(void)
126{
127#ifdef CONFIG_SMP
128# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 4) || defined(num_possible_cpus)
129 return num_possible_cpus();
130# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
131 return smp_num_cpus;
132# else
133 RTCPUSET Set;
134 RTMpGetSet(&Set);
135 return RTCpuSetCount(&Set);
136# endif
137#else
138 return 1;
139#endif
140}
141RT_EXPORT_SYMBOL(RTMpGetCount);
142
143
144RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
145{
146#ifdef CONFIG_SMP
147# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(cpu_online)
148 return idCpu < VBOX_NR_CPUMASK_BITS && cpu_online(idCpu);
149# else /* 2.4: */
150 return idCpu < VBOX_NR_CPUMASK_BITS && cpu_online_map & RT_BIT_64(idCpu);
151# endif
152#else
153 return idCpu == RTMpCpuId();
154#endif
155}
156RT_EXPORT_SYMBOL(RTMpIsCpuOnline);
157
158
159RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
160{
161#ifdef CONFIG_SMP
162 RTCPUID idCpu;
163
164 RTCpuSetEmpty(pSet);
165 idCpu = RTMpGetMaxCpuId();
166 do
167 {
168 if (RTMpIsCpuOnline(idCpu))
169 RTCpuSetAdd(pSet, idCpu);
170 } while (idCpu-- > 0);
171#else
172 RTCpuSetEmpty(pSet);
173 RTCpuSetAdd(pSet, RTMpCpuId());
174#endif
175 return pSet;
176}
177RT_EXPORT_SYMBOL(RTMpGetOnlineSet);
178
179
180RTDECL(RTCPUID) RTMpGetOnlineCount(void)
181{
182#ifdef CONFIG_SMP
183# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(num_online_cpus)
184 return num_online_cpus();
185# else
186 RTCPUSET Set;
187 RTMpGetOnlineSet(&Set);
188 return RTCpuSetCount(&Set);
189# endif
190#else
191 return 1;
192#endif
193}
194RT_EXPORT_SYMBOL(RTMpGetOnlineCount);
195
196
197RTDECL(bool) RTMpIsCpuWorkPending(void)
198{
199 /** @todo (not used on non-Windows platforms yet). */
200 return false;
201}
202RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending);
203
204
205/**
206 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER.
207 *
208 * @param pvInfo Pointer to the RTMPARGS package.
209 */
210static void rtmpLinuxWrapper(void *pvInfo)
211{
212 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
213 ASMAtomicIncU32(&pArgs->cHits);
214 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
215}
216
217#ifdef CONFIG_SMP
218
219# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
220/**
221 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER, does hit
222 * increment after calling the worker.
223 *
224 * @param pvInfo Pointer to the RTMPARGS package.
225 */
226static void rtmpLinuxWrapperPostInc(void *pvInfo)
227{
228 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
229 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
230 ASMAtomicIncU32(&pArgs->cHits);
231}
232# endif
233
234
235/**
236 * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
237 *
238 * @param pvInfo Pointer to the RTMPARGS package.
239 */
240static void rtmpLinuxAllWrapper(void *pvInfo)
241{
242 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
243 PRTCPUSET pWorkerSet = pArgs->pWorkerSet;
244 RTCPUID idCpu = RTMpCpuId();
245 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
246
247 if (RTCpuSetIsMember(pWorkerSet, idCpu))
248 {
249 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
250 RTCpuSetDel(pWorkerSet, idCpu);
251 }
252}
253
254#endif /* CONFIG_SMP */
255
256RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
257{
258 IPRT_LINUX_SAVE_EFL_AC();
259 RTMPARGS Args;
260 RTCPUSET OnlineSet;
261 RTCPUID idCpu;
262#ifdef CONFIG_SMP
263 uint32_t cLoops;
264#endif
265
266 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
267
268 Args.pfnWorker = pfnWorker;
269 Args.pvUser1 = pvUser1;
270 Args.pvUser2 = pvUser2;
271 Args.idCpu = NIL_RTCPUID;
272 Args.cHits = 0;
273
274 RTThreadPreemptDisable(&PreemptState);
275 RTMpGetOnlineSet(&OnlineSet);
276 Args.pWorkerSet = &OnlineSet;
277 idCpu = RTMpCpuId();
278
279#ifdef CONFIG_SMP
280 if (RTCpuSetCount(&OnlineSet) > 1)
281 {
282 /* Fire the function on all other CPUs without waiting for completion. */
283# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
284 smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
285# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
286 int rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
287 Assert(!rc); NOREF(rc);
288# else
289 int rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */);
290 Assert(!rc); NOREF(rc);
291# endif
292 }
293#endif
294
295 /* Fire the function on this CPU. */
296 Args.pfnWorker(idCpu, Args.pvUser1, Args.pvUser2);
297 RTCpuSetDel(Args.pWorkerSet, idCpu);
298
299#ifdef CONFIG_SMP
300 /* Wait for all of them finish. */
301 cLoops = 64000;
302 while (!RTCpuSetIsEmpty(Args.pWorkerSet))
303 {
304 /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
305 if (!cLoops--)
306 {
307 RTCPUSET OnlineSetNow;
308 RTMpGetOnlineSet(&OnlineSetNow);
309 RTCpuSetAnd(Args.pWorkerSet, &OnlineSetNow);
310
311 cLoops = 64000;
312 }
313
314 ASMNopPause();
315 }
316#endif
317
318 RTThreadPreemptRestore(&PreemptState);
319 IPRT_LINUX_RESTORE_EFL_AC();
320 return VINF_SUCCESS;
321}
322RT_EXPORT_SYMBOL(RTMpOnAll);
323
324
325RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
326{
327#ifdef CONFIG_SMP
328 IPRT_LINUX_SAVE_EFL_AC();
329 RTMPARGS Args;
330
331 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
332 Args.pfnWorker = pfnWorker;
333 Args.pvUser1 = pvUser1;
334 Args.pvUser2 = pvUser2;
335 Args.idCpu = NIL_RTCPUID;
336 Args.cHits = 0;
337
338 RTThreadPreemptDisable(&PreemptState);
339# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
340 smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
341# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
342 int rc = smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
343 Assert(rc == 0); NOREF(rc);
344# else /* older kernels */
345 int rc = smp_call_function(rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
346 Assert(rc == 0); NOREF(rc);
347# endif /* older kernels */
348 RTThreadPreemptRestore(&PreemptState);
349
350 IPRT_LINUX_RESTORE_EFL_AC();
351#else
352 RT_NOREF(pfnWorker, pvUser1, pvUser2);
353#endif
354 return VINF_SUCCESS;
355}
356RT_EXPORT_SYMBOL(RTMpOnOthers);
357
358
359#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) && defined(CONFIG_SMP)
360/**
361 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
362 * employed by RTMpOnPair on older kernels that lacks smp_call_function_many.
363 *
364 * @param pvInfo Pointer to the RTMPARGS package.
365 */
366static void rtMpLinuxOnPairWrapper(void *pvInfo)
367{
368 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
369 RTCPUID idCpu = RTMpCpuId();
370
371 if ( idCpu == pArgs->idCpu
372 || idCpu == pArgs->idCpu2)
373 {
374 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
375 ASMAtomicIncU32(&pArgs->cHits);
376 }
377}
378#endif
379
380
381RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
382{
383#ifdef CONFIG_SMP
384 IPRT_LINUX_SAVE_EFL_AC();
385 int rc;
386 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
387# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) /* 2.6.28 introduces CONFIG_CPUMASK_OFFSTACK */
388 cpumask_var_t DstCpuMask;
389# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
390 cpumask_t DstCpuMask;
391# endif
392
393 AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
394 AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
395
396 /*
397 * Prepare the CPU mask before we disable preemption.
398 */
399# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
400 if (!zalloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
401 return VERR_NO_MEMORY;
402 cpumask_set_cpu(idCpu1, DstCpuMask);
403 cpumask_set_cpu(idCpu2, DstCpuMask);
404# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
405 if (!alloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
406 return VERR_NO_MEMORY;
407 cpumask_clear(DstCpuMask);
408 cpumask_set_cpu(idCpu1, DstCpuMask);
409 cpumask_set_cpu(idCpu2, DstCpuMask);
410# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
411 cpus_clear(DstCpuMask);
412 cpu_set(idCpu1, DstCpuMask);
413 cpu_set(idCpu2, DstCpuMask);
414# endif
415
416 /*
417 * Check that both CPUs are online before doing the broadcast call.
418 */
419 RTThreadPreemptDisable(&PreemptState);
420 if ( RTMpIsCpuOnline(idCpu1)
421 && RTMpIsCpuOnline(idCpu2))
422 {
423 /*
424 * Use the smp_call_function variant taking a cpu mask where available,
425 * falling back on broadcast with filter. Slight snag if one of the
426 * CPUs is the one we're running on, we must do the call and the post
427 * call wait ourselves.
428 */
429 RTCPUID idCpuSelf = RTMpCpuId();
430 bool const fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
431 RTMPARGS Args;
432 Args.pfnWorker = pfnWorker;
433 Args.pvUser1 = pvUser1;
434 Args.pvUser2 = pvUser2;
435 Args.idCpu = idCpu1;
436 Args.idCpu2 = idCpu2;
437 Args.cHits = 0;
438
439# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
440 smp_call_function_many(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
441 rc = 0;
442# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
443 rc = smp_call_function_mask(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
444# else /* older kernels */
445 rc = smp_call_function(rtMpLinuxOnPairWrapper, &Args, 0 /* retry */, !fCallSelf /* wait */);
446# endif /* older kernels */
447 Assert(rc == 0);
448
449 /* Call ourselves if necessary and wait for the other party to be done. */
450 if (fCallSelf)
451 {
452 uint32_t cLoops = 0;
453 rtmpLinuxWrapper(&Args);
454 while (ASMAtomicReadU32(&Args.cHits) < 2)
455 {
456 if ((cLoops & 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf == idCpu1 ? idCpu2 : idCpu1))
457 break;
458 cLoops++;
459 ASMNopPause();
460 }
461 }
462
463 Assert(Args.cHits <= 2);
464 if (Args.cHits == 2)
465 rc = VINF_SUCCESS;
466 else if (Args.cHits == 1)
467 rc = VERR_NOT_ALL_CPUS_SHOWED;
468 else if (Args.cHits == 0)
469 rc = VERR_CPU_OFFLINE;
470 else
471 rc = VERR_CPU_IPE_1;
472 }
473 /*
474 * A CPU must be present to be considered just offline.
475 */
476 else if ( RTMpIsCpuPresent(idCpu1)
477 && RTMpIsCpuPresent(idCpu2))
478 rc = VERR_CPU_OFFLINE;
479 else
480 rc = VERR_CPU_NOT_FOUND;
481
482 RTThreadPreemptRestore(&PreemptState);;
483# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
484 free_cpumask_var(DstCpuMask);
485# endif
486 IPRT_LINUX_RESTORE_EFL_AC();
487 return rc;
488
489#else /* !CONFIG_SMP */
490 RT_NOREF(idCpu1, idCpu2, fFlags, pfnWorker, pvUser1, pvUser2);
491 return VERR_CPU_NOT_FOUND;
492#endif /* !CONFIG_SMP */
493}
494RT_EXPORT_SYMBOL(RTMpOnPair);
495
496
497RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
498{
499 return true;
500}
501RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported);
502
503
504#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && defined(CONFIG_SMP)
505/**
506 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
507 * employed by RTMpOnSpecific on older kernels that lacks smp_call_function_single.
508 *
509 * @param pvInfo Pointer to the RTMPARGS package.
510 */
511static void rtmpOnSpecificLinuxWrapper(void *pvInfo)
512{
513 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
514 RTCPUID idCpu = RTMpCpuId();
515
516 if (idCpu == pArgs->idCpu)
517 {
518 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
519 ASMAtomicIncU32(&pArgs->cHits);
520 }
521}
522#endif
523
524
525RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
526{
527 IPRT_LINUX_SAVE_EFL_AC();
528 int rc;
529 RTMPARGS Args;
530
531 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
532 Args.pfnWorker = pfnWorker;
533 Args.pvUser1 = pvUser1;
534 Args.pvUser2 = pvUser2;
535 Args.idCpu = idCpu;
536 Args.cHits = 0;
537
538 if (!RTMpIsCpuPossible(idCpu))
539 return VERR_CPU_NOT_FOUND;
540
541 RTThreadPreemptDisable(&PreemptState);
542 if (idCpu != RTMpCpuId())
543 {
544#ifdef CONFIG_SMP
545 if (RTMpIsCpuOnline(idCpu))
546 {
547# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
548 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
549# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
550 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
551# else /* older kernels */
552 rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
553# endif /* older kernels */
554 Assert(rc == 0);
555 rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
556 }
557 else
558#endif /* CONFIG_SMP */
559 rc = VERR_CPU_OFFLINE;
560 }
561 else
562 {
563 rtmpLinuxWrapper(&Args);
564 rc = VINF_SUCCESS;
565 }
566 RTThreadPreemptRestore(&PreemptState);;
567
568 NOREF(rc);
569 IPRT_LINUX_RESTORE_EFL_AC();
570 return rc;
571}
572RT_EXPORT_SYMBOL(RTMpOnSpecific);
573
574
575#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) && defined(CONFIG_SMP)
576/**
577 * Dummy callback used by RTMpPokeCpu.
578 *
579 * @param pvInfo Ignored.
580 */
581static void rtmpLinuxPokeCpuCallback(void *pvInfo)
582{
583 NOREF(pvInfo);
584}
585#endif
586
587
588RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
589{
590#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
591 IPRT_LINUX_SAVE_EFL_AC();
592 int rc;
593 if (RTMpIsCpuPossible(idCpu))
594 {
595 if (RTMpIsCpuOnline(idCpu))
596 {
597# ifdef CONFIG_SMP
598# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
599 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* wait */);
600# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
601 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* retry */, 0 /* wait */);
602# else /* older kernels */
603# error oops
604# endif /* older kernels */
605 Assert(rc == 0);
606# endif /* CONFIG_SMP */
607 rc = VINF_SUCCESS;
608 }
609 else
610 rc = VERR_CPU_OFFLINE;
611 }
612 else
613 rc = VERR_CPU_NOT_FOUND;
614 IPRT_LINUX_RESTORE_EFL_AC();
615 return rc;
616
617#else /* older kernels */
618 /* no unicast here? */
619 return VERR_NOT_SUPPORTED;
620#endif /* older kernels */
621}
622RT_EXPORT_SYMBOL(RTMpPokeCpu);
623
624
625RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
626{
627 return true;
628}
629RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe);
630
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette