VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c@ 78273

Last change on this file since 78273 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.7 KB
Line 
1/* $Id: mp-r0drv-linux.c 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * IPRT - Multiprocessor, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2008-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-linux-kernel.h"
32#include "internal/iprt.h"
33
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/thread.h>
39#include "r0drv/mp-r0drv.h"
40
41#ifdef nr_cpumask_bits
42# define VBOX_NR_CPUMASK_BITS nr_cpumask_bits
43#else
44# define VBOX_NR_CPUMASK_BITS NR_CPUS
45#endif
46
47
48RTDECL(RTCPUID) RTMpCpuId(void)
49{
50 return smp_processor_id();
51}
52RT_EXPORT_SYMBOL(RTMpCpuId);
53
54
55RTDECL(int) RTMpCurSetIndex(void)
56{
57 return smp_processor_id();
58}
59RT_EXPORT_SYMBOL(RTMpCurSetIndex);
60
61
62RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
63{
64 return *pidCpu = smp_processor_id();
65}
66RT_EXPORT_SYMBOL(RTMpCurSetIndexAndId);
67
68
69RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
70{
71 return idCpu < RTCPUSET_MAX_CPUS && idCpu < VBOX_NR_CPUMASK_BITS ? (int)idCpu : -1;
72}
73RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex);
74
75
76RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
77{
78 return iCpu < VBOX_NR_CPUMASK_BITS ? (RTCPUID)iCpu : NIL_RTCPUID;
79}
80RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex);
81
82
83RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
84{
85 return VBOX_NR_CPUMASK_BITS - 1; //???
86}
87RT_EXPORT_SYMBOL(RTMpGetMaxCpuId);
88
89
90RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
91{
92#if defined(CONFIG_SMP)
93 if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
94 return false;
95
96# if defined(cpu_possible)
97 return cpu_possible(idCpu);
98# else /* < 2.5.29 */
99 return idCpu < (RTCPUID)smp_num_cpus;
100# endif
101#else
102 return idCpu == RTMpCpuId();
103#endif
104}
105RT_EXPORT_SYMBOL(RTMpIsCpuPossible);
106
107
108RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
109{
110 RTCPUID idCpu;
111
112 RTCpuSetEmpty(pSet);
113 idCpu = RTMpGetMaxCpuId();
114 do
115 {
116 if (RTMpIsCpuPossible(idCpu))
117 RTCpuSetAdd(pSet, idCpu);
118 } while (idCpu-- > 0);
119 return pSet;
120}
121RT_EXPORT_SYMBOL(RTMpGetSet);
122
123
124RTDECL(RTCPUID) RTMpGetCount(void)
125{
126#ifdef CONFIG_SMP
127# if defined(CONFIG_HOTPLUG_CPU) /* introduced & uses cpu_present */
128 return num_present_cpus();
129# elif defined(num_possible_cpus)
130 return num_possible_cpus();
131# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
132 return smp_num_cpus;
133# else
134 RTCPUSET Set;
135 RTMpGetSet(&Set);
136 return RTCpuSetCount(&Set);
137# endif
138#else
139 return 1;
140#endif
141}
142RT_EXPORT_SYMBOL(RTMpGetCount);
143
144
145RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
146{
147#ifdef CONFIG_SMP
148 if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
149 return false;
150# ifdef cpu_online
151 return cpu_online(idCpu);
152# else /* 2.4: */
153 return cpu_online_map & RT_BIT_64(idCpu);
154# endif
155#else
156 return idCpu == RTMpCpuId();
157#endif
158}
159RT_EXPORT_SYMBOL(RTMpIsCpuOnline);
160
161
162RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
163{
164#ifdef CONFIG_SMP
165 RTCPUID idCpu;
166
167 RTCpuSetEmpty(pSet);
168 idCpu = RTMpGetMaxCpuId();
169 do
170 {
171 if (RTMpIsCpuOnline(idCpu))
172 RTCpuSetAdd(pSet, idCpu);
173 } while (idCpu-- > 0);
174#else
175 RTCpuSetEmpty(pSet);
176 RTCpuSetAdd(pSet, RTMpCpuId());
177#endif
178 return pSet;
179}
180RT_EXPORT_SYMBOL(RTMpGetOnlineSet);
181
182
183RTDECL(RTCPUID) RTMpGetOnlineCount(void)
184{
185#ifdef CONFIG_SMP
186# if defined(num_online_cpus)
187 return num_online_cpus();
188# else
189 RTCPUSET Set;
190 RTMpGetOnlineSet(&Set);
191 return RTCpuSetCount(&Set);
192# endif
193#else
194 return 1;
195#endif
196}
197RT_EXPORT_SYMBOL(RTMpGetOnlineCount);
198
199
200RTDECL(bool) RTMpIsCpuWorkPending(void)
201{
202 /** @todo (not used on non-Windows platforms yet). */
203 return false;
204}
205RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending);
206
207
208/**
209 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER.
210 *
211 * @param pvInfo Pointer to the RTMPARGS package.
212 */
213static void rtmpLinuxWrapper(void *pvInfo)
214{
215 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
216 ASMAtomicIncU32(&pArgs->cHits);
217 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
218}
219
220#ifdef CONFIG_SMP
221
222# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
223/**
224 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER, does hit
225 * increment after calling the worker.
226 *
227 * @param pvInfo Pointer to the RTMPARGS package.
228 */
229static void rtmpLinuxWrapperPostInc(void *pvInfo)
230{
231 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
232 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
233 ASMAtomicIncU32(&pArgs->cHits);
234}
235# endif
236
237
238/**
239 * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
240 *
241 * @param pvInfo Pointer to the RTMPARGS package.
242 */
243static void rtmpLinuxAllWrapper(void *pvInfo)
244{
245 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
246 PRTCPUSET pWorkerSet = pArgs->pWorkerSet;
247 RTCPUID idCpu = RTMpCpuId();
248 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
249
250 if (RTCpuSetIsMember(pWorkerSet, idCpu))
251 {
252 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
253 RTCpuSetDel(pWorkerSet, idCpu);
254 }
255}
256
257#endif /* CONFIG_SMP */
258
259RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
260{
261 IPRT_LINUX_SAVE_EFL_AC();
262 RTMPARGS Args;
263 RTCPUSET OnlineSet;
264 RTCPUID idCpu;
265#ifdef CONFIG_SMP
266 uint32_t cLoops;
267#endif
268
269 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
270
271 Args.pfnWorker = pfnWorker;
272 Args.pvUser1 = pvUser1;
273 Args.pvUser2 = pvUser2;
274 Args.idCpu = NIL_RTCPUID;
275 Args.cHits = 0;
276
277 RTThreadPreemptDisable(&PreemptState);
278 RTMpGetOnlineSet(&OnlineSet);
279 Args.pWorkerSet = &OnlineSet;
280 idCpu = RTMpCpuId();
281
282#ifdef CONFIG_SMP
283 if (RTCpuSetCount(&OnlineSet) > 1)
284 {
285 /* Fire the function on all other CPUs without waiting for completion. */
286# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
287 int rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
288# else
289 int rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */);
290# endif
291 Assert(!rc); NOREF(rc);
292 }
293#endif
294
295 /* Fire the function on this CPU. */
296 Args.pfnWorker(idCpu, Args.pvUser1, Args.pvUser2);
297 RTCpuSetDel(Args.pWorkerSet, idCpu);
298
299#ifdef CONFIG_SMP
300 /* Wait for all of them finish. */
301 cLoops = 64000;
302 while (!RTCpuSetIsEmpty(Args.pWorkerSet))
303 {
304 /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
305 if (!cLoops--)
306 {
307 RTCPUSET OnlineSetNow;
308 RTMpGetOnlineSet(&OnlineSetNow);
309 RTCpuSetAnd(Args.pWorkerSet, &OnlineSetNow);
310
311 cLoops = 64000;
312 }
313
314 ASMNopPause();
315 }
316#endif
317
318 RTThreadPreemptRestore(&PreemptState);
319 IPRT_LINUX_RESTORE_EFL_AC();
320 return VINF_SUCCESS;
321}
322RT_EXPORT_SYMBOL(RTMpOnAll);
323
324
325RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
326{
327#ifdef CONFIG_SMP
328 IPRT_LINUX_SAVE_EFL_AC();
329 int rc;
330 RTMPARGS Args;
331
332 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
333 Args.pfnWorker = pfnWorker;
334 Args.pvUser1 = pvUser1;
335 Args.pvUser2 = pvUser2;
336 Args.idCpu = NIL_RTCPUID;
337 Args.cHits = 0;
338
339 RTThreadPreemptDisable(&PreemptState);
340# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
341 rc = smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
342# else /* older kernels */
343 rc = smp_call_function(rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
344# endif /* older kernels */
345 RTThreadPreemptRestore(&PreemptState);
346
347 Assert(rc == 0); NOREF(rc);
348 IPRT_LINUX_RESTORE_EFL_AC();
349#else
350 RT_NOREF(pfnWorker, pvUser1, pvUser2);
351#endif
352 return VINF_SUCCESS;
353}
354RT_EXPORT_SYMBOL(RTMpOnOthers);
355
356
357#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) && defined(CONFIG_SMP)
358/**
359 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
360 * employed by RTMpOnPair on older kernels that lacks smp_call_function_many.
361 *
362 * @param pvInfo Pointer to the RTMPARGS package.
363 */
364static void rtMpLinuxOnPairWrapper(void *pvInfo)
365{
366 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
367 RTCPUID idCpu = RTMpCpuId();
368
369 if ( idCpu == pArgs->idCpu
370 || idCpu == pArgs->idCpu2)
371 {
372 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
373 ASMAtomicIncU32(&pArgs->cHits);
374 }
375}
376#endif
377
378
379RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
380{
381#ifdef CONFIG_SMP
382 IPRT_LINUX_SAVE_EFL_AC();
383 int rc;
384 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
385
386 AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
387 AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
388
389 /*
390 * Check that both CPUs are online before doing the broadcast call.
391 */
392 RTThreadPreemptDisable(&PreemptState);
393 if ( RTMpIsCpuOnline(idCpu1)
394 && RTMpIsCpuOnline(idCpu2))
395 {
396 /*
397 * Use the smp_call_function variant taking a cpu mask where available,
398 * falling back on broadcast with filter. Slight snag if one of the
399 * CPUs is the one we're running on, we must do the call and the post
400 * call wait ourselves.
401 */
402# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
403 /* 2.6.28 introduces CONFIG_CPUMASK_OFFSTACK */
404 cpumask_var_t DstCpuMask;
405# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
406 cpumask_t DstCpuMask;
407# endif
408 RTCPUID idCpuSelf = RTMpCpuId();
409 bool const fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
410 RTMPARGS Args;
411 Args.pfnWorker = pfnWorker;
412 Args.pvUser1 = pvUser1;
413 Args.pvUser2 = pvUser2;
414 Args.idCpu = idCpu1;
415 Args.idCpu2 = idCpu2;
416 Args.cHits = 0;
417
418# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
419 if (!zalloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
420 return VERR_NO_MEMORY;
421 cpumask_set_cpu(idCpu1, DstCpuMask);
422 cpumask_set_cpu(idCpu2, DstCpuMask);
423# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
424 if (!alloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
425 return VERR_NO_MEMORY;
426 cpumask_clear(DstCpuMask);
427 cpumask_set_cpu(idCpu1, DstCpuMask);
428 cpumask_set_cpu(idCpu2, DstCpuMask);
429# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
430 cpus_clear(DstCpuMask);
431 cpu_set(idCpu1, DstCpuMask);
432 cpu_set(idCpu2, DstCpuMask);
433# endif
434
435# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
436 smp_call_function_many(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
437 rc = 0;
438# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
439 rc = smp_call_function_mask(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
440# else /* older kernels */
441 rc = smp_call_function(rtMpLinuxOnPairWrapper, &Args, 0 /* retry */, !fCallSelf /* wait */);
442# endif /* older kernels */
443 Assert(rc == 0);
444
445 /* Call ourselves if necessary and wait for the other party to be done. */
446 if (fCallSelf)
447 {
448 uint32_t cLoops = 0;
449 rtmpLinuxWrapper(&Args);
450 while (ASMAtomicReadU32(&Args.cHits) < 2)
451 {
452 if ((cLoops & 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf == idCpu1 ? idCpu2 : idCpu1))
453 break;
454 cLoops++;
455 ASMNopPause();
456 }
457 }
458
459 Assert(Args.cHits <= 2);
460 if (Args.cHits == 2)
461 rc = VINF_SUCCESS;
462 else if (Args.cHits == 1)
463 rc = VERR_NOT_ALL_CPUS_SHOWED;
464 else if (Args.cHits == 0)
465 rc = VERR_CPU_OFFLINE;
466 else
467 rc = VERR_CPU_IPE_1;
468
469# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
470 free_cpumask_var(DstCpuMask);
471# endif
472 }
473 /*
474 * A CPU must be present to be considered just offline.
475 */
476 else if ( RTMpIsCpuPresent(idCpu1)
477 && RTMpIsCpuPresent(idCpu2))
478 rc = VERR_CPU_OFFLINE;
479 else
480 rc = VERR_CPU_NOT_FOUND;
481 RTThreadPreemptRestore(&PreemptState);;
482 IPRT_LINUX_RESTORE_EFL_AC();
483 return rc;
484
485#else /* !CONFIG_SMP */
486 RT_NOREF(idCpu1, idCpu2, fFlags, pfnWorker, pvUser1, pvUser2);
487 return VERR_CPU_NOT_FOUND;
488#endif /* !CONFIG_SMP */
489}
490RT_EXPORT_SYMBOL(RTMpOnPair);
491
492
493RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
494{
495 return true;
496}
497RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported);
498
499
500#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && defined(CONFIG_SMP)
501/**
502 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
503 * employed by RTMpOnSpecific on older kernels that lacks smp_call_function_single.
504 *
505 * @param pvInfo Pointer to the RTMPARGS package.
506 */
507static void rtmpOnSpecificLinuxWrapper(void *pvInfo)
508{
509 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
510 RTCPUID idCpu = RTMpCpuId();
511
512 if (idCpu == pArgs->idCpu)
513 {
514 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
515 ASMAtomicIncU32(&pArgs->cHits);
516 }
517}
518#endif
519
520
521RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
522{
523 IPRT_LINUX_SAVE_EFL_AC();
524 int rc;
525 RTMPARGS Args;
526
527 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
528 Args.pfnWorker = pfnWorker;
529 Args.pvUser1 = pvUser1;
530 Args.pvUser2 = pvUser2;
531 Args.idCpu = idCpu;
532 Args.cHits = 0;
533
534 if (!RTMpIsCpuPossible(idCpu))
535 return VERR_CPU_NOT_FOUND;
536
537 RTThreadPreemptDisable(&PreemptState);
538 if (idCpu != RTMpCpuId())
539 {
540#ifdef CONFIG_SMP
541 if (RTMpIsCpuOnline(idCpu))
542 {
543# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
544 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
545# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
546 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
547# else /* older kernels */
548 rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
549# endif /* older kernels */
550 Assert(rc == 0);
551 rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
552 }
553 else
554#endif /* CONFIG_SMP */
555 rc = VERR_CPU_OFFLINE;
556 }
557 else
558 {
559 rtmpLinuxWrapper(&Args);
560 rc = VINF_SUCCESS;
561 }
562 RTThreadPreemptRestore(&PreemptState);;
563
564 NOREF(rc);
565 IPRT_LINUX_RESTORE_EFL_AC();
566 return rc;
567}
568RT_EXPORT_SYMBOL(RTMpOnSpecific);
569
570
571#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) && defined(CONFIG_SMP)
572/**
573 * Dummy callback used by RTMpPokeCpu.
574 *
575 * @param pvInfo Ignored.
576 */
577static void rtmpLinuxPokeCpuCallback(void *pvInfo)
578{
579 NOREF(pvInfo);
580}
581#endif
582
583
584RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
585{
586#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
587 IPRT_LINUX_SAVE_EFL_AC();
588 int rc;
589 if (RTMpIsCpuPossible(idCpu))
590 {
591 if (RTMpIsCpuOnline(idCpu))
592 {
593# ifdef CONFIG_SMP
594# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
595 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* wait */);
596# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
597 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* retry */, 0 /* wait */);
598# else /* older kernels */
599# error oops
600# endif /* older kernels */
601 Assert(rc == 0);
602# endif /* CONFIG_SMP */
603 rc = VINF_SUCCESS;
604 }
605 else
606 rc = VERR_CPU_OFFLINE;
607 }
608 else
609 rc = VERR_CPU_NOT_FOUND;
610 IPRT_LINUX_RESTORE_EFL_AC();
611 return rc;
612
613#else /* older kernels */
614 /* no unicast here? */
615 return VERR_NOT_SUPPORTED;
616#endif /* older kernels */
617}
618RT_EXPORT_SYMBOL(RTMpPokeCpu);
619
620
621RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
622{
623 return true;
624}
625RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe);
626
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette