1 | /* $Id: mp-r0drv-nt.cpp 54501 2015-02-25 15:58:43Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IPRT - Multiprocessor, Ring-0 Driver, NT.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2008-2014 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | *
|
---|
17 | * The contents of this file may alternatively be used under the terms
|
---|
18 | * of the Common Development and Distribution License Version 1.0
|
---|
19 | * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
|
---|
20 | * VirtualBox OSE distribution, in which case the provisions of the
|
---|
21 | * CDDL are applicable instead of those of the GPL.
|
---|
22 | *
|
---|
23 | * You may elect to license modified versions of this file under the
|
---|
24 | * terms and conditions of either the GPL or the CDDL or both.
|
---|
25 | */
|
---|
26 |
|
---|
27 |
|
---|
28 | /*******************************************************************************
|
---|
29 | * Header Files *
|
---|
30 | *******************************************************************************/
|
---|
31 | #include "the-nt-kernel.h"
|
---|
32 |
|
---|
33 | #include <iprt/mp.h>
|
---|
34 | #include <iprt/cpuset.h>
|
---|
35 | #include <iprt/err.h>
|
---|
36 | #include <iprt/asm.h>
|
---|
37 | #include <iprt/log.h>
|
---|
38 | #include <iprt/time.h>
|
---|
39 | #include "r0drv/mp-r0drv.h"
|
---|
40 | #include "internal-r0drv-nt.h"
|
---|
41 |
|
---|
42 |
|
---|
43 | /*******************************************************************************
|
---|
44 | * Structures and Typedefs *
|
---|
45 | *******************************************************************************/
|
---|
46 | typedef enum
|
---|
47 | {
|
---|
48 | RT_NT_CPUID_SPECIFIC,
|
---|
49 | RT_NT_CPUID_PAIR,
|
---|
50 | RT_NT_CPUID_OTHERS,
|
---|
51 | RT_NT_CPUID_ALL
|
---|
52 | } RT_NT_CPUID;
|
---|
53 |
|
---|
54 |
|
---|
55 | /**
|
---|
56 | * Used by the RTMpOnSpecific.
|
---|
57 | */
|
---|
58 | typedef struct RTMPNTONSPECIFICARGS
|
---|
59 | {
|
---|
60 | /** Set if we're executing. */
|
---|
61 | bool volatile fExecuting;
|
---|
62 | /** Set when done executing. */
|
---|
63 | bool volatile fDone;
|
---|
64 | /** Number of references to this heap block. */
|
---|
65 | uint32_t volatile cRefs;
|
---|
66 | /** Event that the calling thread is waiting on. */
|
---|
67 | KEVENT DoneEvt;
|
---|
68 | /** The deferred procedure call object. */
|
---|
69 | KDPC Dpc;
|
---|
70 | /** The callback argument package. */
|
---|
71 | RTMPARGS CallbackArgs;
|
---|
72 | } RTMPNTONSPECIFICARGS;
|
---|
73 | /** Pointer to an argument/state structure for RTMpOnSpecific on NT. */
|
---|
74 | typedef RTMPNTONSPECIFICARGS *PRTMPNTONSPECIFICARGS;
|
---|
75 |
|
---|
76 |
|
---|
77 |
|
---|
78 | /* test a couple of assumption. */
|
---|
79 | AssertCompile(MAXIMUM_PROCESSORS <= RTCPUSET_MAX_CPUS);
|
---|
80 | AssertCompile(NIL_RTCPUID >= MAXIMUM_PROCESSORS);
|
---|
81 |
|
---|
82 | /** @todo
|
---|
83 | * We cannot do other than assume a 1:1 relationship between the
|
---|
84 | * affinity mask and the process despite the vagueness/warnings in
|
---|
85 | * the docs. If someone knows a better way to get this done, please
|
---|
86 | * let bird know.
|
---|
87 | */
|
---|
88 |
|
---|
89 |
|
---|
90 | RTDECL(RTCPUID) RTMpCpuId(void)
|
---|
91 | {
|
---|
92 | /* WDK upgrade warning: PCR->Number changed from BYTE to WORD. */
|
---|
93 | return KeGetCurrentProcessorNumber();
|
---|
94 | }
|
---|
95 |
|
---|
96 |
|
---|
97 | RTDECL(int) RTMpCurSetIndex(void)
|
---|
98 | {
|
---|
99 | /* WDK upgrade warning: PCR->Number changed from BYTE to WORD. */
|
---|
100 | return KeGetCurrentProcessorNumber();
|
---|
101 | }
|
---|
102 |
|
---|
103 |
|
---|
104 | RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
|
---|
105 | {
|
---|
106 | return *pidCpu = KeGetCurrentProcessorNumber();
|
---|
107 | }
|
---|
108 |
|
---|
109 |
|
---|
110 | RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
|
---|
111 | {
|
---|
112 | return idCpu < MAXIMUM_PROCESSORS ? (int)idCpu : -1;
|
---|
113 | }
|
---|
114 |
|
---|
115 |
|
---|
116 | RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
|
---|
117 | {
|
---|
118 | return (unsigned)iCpu < MAXIMUM_PROCESSORS ? iCpu : NIL_RTCPUID;
|
---|
119 | }
|
---|
120 |
|
---|
121 |
|
---|
122 | RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
|
---|
123 | {
|
---|
124 | /** @todo use KeQueryMaximumProcessorCount on vista+ */
|
---|
125 | return MAXIMUM_PROCESSORS - 1;
|
---|
126 | }
|
---|
127 |
|
---|
128 |
|
---|
129 | RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
|
---|
130 | {
|
---|
131 | if (idCpu >= MAXIMUM_PROCESSORS)
|
---|
132 | return false;
|
---|
133 |
|
---|
134 | #if 0 /* this isn't safe at all IRQLs (great work guys) */
|
---|
135 | KAFFINITY Mask = KeQueryActiveProcessors();
|
---|
136 | return !!(Mask & RT_BIT_64(idCpu));
|
---|
137 | #else
|
---|
138 | return RTCpuSetIsMember(&g_rtMpNtCpuSet, idCpu);
|
---|
139 | #endif
|
---|
140 | }
|
---|
141 |
|
---|
142 |
|
---|
143 | RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
|
---|
144 | {
|
---|
145 | /* Cannot easily distinguish between online and offline cpus. */
|
---|
146 | /** @todo online/present cpu stuff must be corrected for proper W2K8 support
|
---|
147 | * (KeQueryMaximumProcessorCount). */
|
---|
148 | return RTMpIsCpuOnline(idCpu);
|
---|
149 | }
|
---|
150 |
|
---|
151 |
|
---|
152 |
|
---|
153 | RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
|
---|
154 | {
|
---|
155 | /** @todo online/present cpu stuff must be corrected for proper W2K8 support
|
---|
156 | * (KeQueryMaximumProcessorCount). */
|
---|
157 | return RTMpGetOnlineSet(pSet);
|
---|
158 | }
|
---|
159 |
|
---|
160 |
|
---|
161 | RTDECL(RTCPUID) RTMpGetCount(void)
|
---|
162 | {
|
---|
163 | /** @todo online/present cpu stuff must be corrected for proper W2K8 support
|
---|
164 | * (KeQueryMaximumProcessorCount). */
|
---|
165 | return RTMpGetOnlineCount();
|
---|
166 | }
|
---|
167 |
|
---|
168 |
|
---|
169 | RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
|
---|
170 | {
|
---|
171 | #if 0 /* this isn't safe at all IRQLs (great work guys) */
|
---|
172 | KAFFINITY Mask = KeQueryActiveProcessors();
|
---|
173 | return RTCpuSetFromU64(pSet, Mask);
|
---|
174 | #else
|
---|
175 | *pSet = g_rtMpNtCpuSet;
|
---|
176 | return pSet;
|
---|
177 | #endif
|
---|
178 | }
|
---|
179 |
|
---|
180 |
|
---|
181 | RTDECL(RTCPUID) RTMpGetOnlineCount(void)
|
---|
182 | {
|
---|
183 | RTCPUSET Set;
|
---|
184 | RTMpGetOnlineSet(&Set);
|
---|
185 | return RTCpuSetCount(&Set);
|
---|
186 | }
|
---|
187 |
|
---|
188 |
|
---|
189 | #if 0
|
---|
190 | /* Experiment with checking the undocumented KPRCB structure
|
---|
191 | * 'dt nt!_kprcb 0xaddress' shows the layout
|
---|
192 | */
|
---|
193 | typedef struct
|
---|
194 | {
|
---|
195 | LIST_ENTRY DpcListHead;
|
---|
196 | ULONG_PTR DpcLock;
|
---|
197 | volatile ULONG DpcQueueDepth;
|
---|
198 | ULONG DpcQueueCount;
|
---|
199 | } KDPC_DATA, *PKDPC_DATA;
|
---|
200 |
|
---|
201 | RTDECL(bool) RTMpIsCpuWorkPending(void)
|
---|
202 | {
|
---|
203 | uint8_t *pkprcb;
|
---|
204 | PKDPC_DATA pDpcData;
|
---|
205 |
|
---|
206 | _asm {
|
---|
207 | mov eax, fs:0x20
|
---|
208 | mov pkprcb, eax
|
---|
209 | }
|
---|
210 | pDpcData = (PKDPC_DATA)(pkprcb + 0x19e0);
|
---|
211 | if (pDpcData->DpcQueueDepth)
|
---|
212 | return true;
|
---|
213 |
|
---|
214 | pDpcData++;
|
---|
215 | if (pDpcData->DpcQueueDepth)
|
---|
216 | return true;
|
---|
217 | return false;
|
---|
218 | }
|
---|
219 | #else
|
---|
220 | RTDECL(bool) RTMpIsCpuWorkPending(void)
|
---|
221 | {
|
---|
222 | /** @todo not implemented */
|
---|
223 | return false;
|
---|
224 | }
|
---|
225 | #endif
|
---|
226 |
|
---|
227 |
|
---|
228 | /**
|
---|
229 | * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
|
---|
230 | * the RTMpOnAll case.
|
---|
231 | *
|
---|
232 | * @param uUserCtx The user context argument (PRTMPARGS).
|
---|
233 | */
|
---|
234 | static ULONG_PTR __stdcall rtmpNtOnAllBroadcastIpiWrapper(ULONG_PTR uUserCtx)
|
---|
235 | {
|
---|
236 | PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
|
---|
237 | /*ASMAtomicIncU32(&pArgs->cHits); - not needed */
|
---|
238 | pArgs->pfnWorker(KeGetCurrentProcessorNumber(), pArgs->pvUser1, pArgs->pvUser2);
|
---|
239 | return 0;
|
---|
240 | }
|
---|
241 |
|
---|
242 |
|
---|
243 | /**
|
---|
244 | * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
|
---|
245 | * the RTMpOnOthers case.
|
---|
246 | *
|
---|
247 | * @param uUserCtx The user context argument (PRTMPARGS).
|
---|
248 | */
|
---|
249 | static ULONG_PTR __stdcall rtmpNtOnOthersBroadcastIpiWrapper(ULONG_PTR uUserCtx)
|
---|
250 | {
|
---|
251 | PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
|
---|
252 | RTCPUID idCpu = KeGetCurrentProcessorNumber();
|
---|
253 | if (pArgs->idCpu != idCpu)
|
---|
254 | {
|
---|
255 | /*ASMAtomicIncU32(&pArgs->cHits); - not needed */
|
---|
256 | pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
|
---|
257 | }
|
---|
258 | return 0;
|
---|
259 | }
|
---|
260 |
|
---|
261 |
|
---|
262 | /**
|
---|
263 | * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
|
---|
264 | * the RTMpOnPair case.
|
---|
265 | *
|
---|
266 | * @param uUserCtx The user context argument (PRTMPARGS).
|
---|
267 | */
|
---|
268 | static ULONG_PTR __stdcall rtmpNtOnPairBroadcastIpiWrapper(ULONG_PTR uUserCtx)
|
---|
269 | {
|
---|
270 | PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
|
---|
271 | RTCPUID idCpu = KeGetCurrentProcessorNumber();
|
---|
272 | if ( pArgs->idCpu == idCpu
|
---|
273 | || pArgs->idCpu2 == idCpu)
|
---|
274 | {
|
---|
275 | ASMAtomicIncU32(&pArgs->cHits);
|
---|
276 | pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
|
---|
277 | }
|
---|
278 | return 0;
|
---|
279 | }
|
---|
280 |
|
---|
281 |
|
---|
282 | /**
|
---|
283 | * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
|
---|
284 | * the RTMpOnSpecific case.
|
---|
285 | *
|
---|
286 | * @param uUserCtx The user context argument (PRTMPARGS).
|
---|
287 | */
|
---|
288 | static ULONG_PTR __stdcall rtmpNtOnSpecificBroadcastIpiWrapper(ULONG_PTR uUserCtx)
|
---|
289 | {
|
---|
290 | PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
|
---|
291 | RTCPUID idCpu = KeGetCurrentProcessorNumber();
|
---|
292 | if (pArgs->idCpu == idCpu)
|
---|
293 | {
|
---|
294 | ASMAtomicIncU32(&pArgs->cHits);
|
---|
295 | pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
|
---|
296 | }
|
---|
297 | return 0;
|
---|
298 | }
|
---|
299 |
|
---|
300 |
|
---|
301 | /**
|
---|
302 | * Internal worker for the RTMpOn* APIs using KeIpiGenericCall.
|
---|
303 | *
|
---|
304 | * @returns VINF_SUCCESS.
|
---|
305 | * @param pfnWorker The callback.
|
---|
306 | * @param pvUser1 User argument 1.
|
---|
307 | * @param pvUser2 User argument 2.
|
---|
308 | * @param idCpu First CPU to match, ultimately specific to the
|
---|
309 | * pfnNativeWrapper used.
|
---|
310 | * @param idCpu2 Second CPU to match, ultimately specific to the
|
---|
311 | * pfnNativeWrapper used.
|
---|
312 | * @param pcHits Where to return the number of this. Optional.
|
---|
313 | */
|
---|
314 | static int rtMpCallUsingBroadcastIpi(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
|
---|
315 | PKIPI_BROADCAST_WORKER pfnNativeWrapper, RTCPUID idCpu, RTCPUID idCpu2,
|
---|
316 | uint32_t *pcHits)
|
---|
317 | {
|
---|
318 | RTMPARGS Args;
|
---|
319 | Args.pfnWorker = pfnWorker;
|
---|
320 | Args.pvUser1 = pvUser1;
|
---|
321 | Args.pvUser2 = pvUser2;
|
---|
322 | Args.idCpu = idCpu;
|
---|
323 | Args.idCpu2 = idCpu2;
|
---|
324 | Args.cRefs = 0;
|
---|
325 | Args.cHits = 0;
|
---|
326 |
|
---|
327 | AssertPtr(g_pfnrtKeIpiGenericCall);
|
---|
328 | g_pfnrtKeIpiGenericCall(pfnNativeWrapper, (uintptr_t)&Args);
|
---|
329 | if (pcHits)
|
---|
330 | *pcHits = Args.cHits;
|
---|
331 | return VINF_SUCCESS;
|
---|
332 | }
|
---|
333 |
|
---|
334 |
|
---|
335 | /**
|
---|
336 | * Wrapper between the native nt per-cpu callbacks and PFNRTWORKER
|
---|
337 | *
|
---|
338 | * @param Dpc DPC object
|
---|
339 | * @param DeferredContext Context argument specified by KeInitializeDpc
|
---|
340 | * @param SystemArgument1 Argument specified by KeInsertQueueDpc
|
---|
341 | * @param SystemArgument2 Argument specified by KeInsertQueueDpc
|
---|
342 | */
|
---|
343 | static VOID __stdcall rtmpNtDPCWrapper(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
|
---|
344 | {
|
---|
345 | PRTMPARGS pArgs = (PRTMPARGS)DeferredContext;
|
---|
346 |
|
---|
347 | ASMAtomicIncU32(&pArgs->cHits);
|
---|
348 | pArgs->pfnWorker(KeGetCurrentProcessorNumber(), pArgs->pvUser1, pArgs->pvUser2);
|
---|
349 |
|
---|
350 | /* Dereference the argument structure. */
|
---|
351 | int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
|
---|
352 | Assert(cRefs >= 0);
|
---|
353 | if (cRefs == 0)
|
---|
354 | ExFreePool(pArgs);
|
---|
355 | }
|
---|
356 |
|
---|
357 |
|
---|
358 | /**
|
---|
359 | * Internal worker for the RTMpOn* APIs.
|
---|
360 | *
|
---|
361 | * @returns IPRT status code.
|
---|
362 | * @param pfnWorker The callback.
|
---|
363 | * @param pvUser1 User argument 1.
|
---|
364 | * @param pvUser2 User argument 2.
|
---|
365 | * @param enmCpuid What to do / is idCpu valid.
|
---|
366 | * @param idCpu Used if enmCpuid is RT_NT_CPUID_SPECIFIC or
|
---|
367 | * RT_NT_CPUID_PAIR, otherwise ignored.
|
---|
368 | * @param idCpu2 Used if enmCpuid is RT_NT_CPUID_PAIR, otherwise ignored.
|
---|
369 | * @param pcHits Where to return the number of this. Optional.
|
---|
370 | */
|
---|
371 | static int rtMpCallUsingDpcs(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
|
---|
372 | RT_NT_CPUID enmCpuid, RTCPUID idCpu, RTCPUID idCpu2, uint32_t *pcHits)
|
---|
373 | {
|
---|
374 | PRTMPARGS pArgs;
|
---|
375 | KDPC *paExecCpuDpcs;
|
---|
376 |
|
---|
377 | #if 0
|
---|
378 | /* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the
|
---|
379 | * driver verifier doesn't complain...
|
---|
380 | */
|
---|
381 | AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL));
|
---|
382 | #endif
|
---|
383 |
|
---|
384 | #ifdef IPRT_TARGET_NT4
|
---|
385 | KAFFINITY Mask;
|
---|
386 | /* g_pfnrtNt* are not present on NT anyway. */
|
---|
387 | return VERR_NOT_SUPPORTED;
|
---|
388 | #else
|
---|
389 | KAFFINITY Mask = KeQueryActiveProcessors();
|
---|
390 | #endif
|
---|
391 |
|
---|
392 | /* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */
|
---|
393 | if (!g_pfnrtNtKeFlushQueuedDpcs)
|
---|
394 | return VERR_NOT_SUPPORTED;
|
---|
395 |
|
---|
396 | pArgs = (PRTMPARGS)ExAllocatePoolWithTag(NonPagedPool, MAXIMUM_PROCESSORS*sizeof(KDPC) + sizeof(RTMPARGS), (ULONG)'RTMp');
|
---|
397 | if (!pArgs)
|
---|
398 | return VERR_NO_MEMORY;
|
---|
399 |
|
---|
400 | pArgs->pfnWorker = pfnWorker;
|
---|
401 | pArgs->pvUser1 = pvUser1;
|
---|
402 | pArgs->pvUser2 = pvUser2;
|
---|
403 | pArgs->idCpu = NIL_RTCPUID;
|
---|
404 | pArgs->idCpu2 = NIL_RTCPUID;
|
---|
405 | pArgs->cHits = 0;
|
---|
406 | pArgs->cRefs = 1;
|
---|
407 |
|
---|
408 | paExecCpuDpcs = (KDPC *)(pArgs + 1);
|
---|
409 |
|
---|
410 | if (enmCpuid == RT_NT_CPUID_SPECIFIC)
|
---|
411 | {
|
---|
412 | KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
|
---|
413 | KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
|
---|
414 | KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
|
---|
415 | pArgs->idCpu = idCpu;
|
---|
416 | }
|
---|
417 | else if (enmCpuid == RT_NT_CPUID_SPECIFIC)
|
---|
418 | {
|
---|
419 | KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
|
---|
420 | KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
|
---|
421 | KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
|
---|
422 | pArgs->idCpu = idCpu;
|
---|
423 |
|
---|
424 | KeInitializeDpc(&paExecCpuDpcs[1], rtmpNtDPCWrapper, pArgs);
|
---|
425 | KeSetImportanceDpc(&paExecCpuDpcs[1], HighImportance);
|
---|
426 | KeSetTargetProcessorDpc(&paExecCpuDpcs[1], (int)idCpu2);
|
---|
427 | pArgs->idCpu2 = idCpu2;
|
---|
428 | }
|
---|
429 | else
|
---|
430 | {
|
---|
431 | for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
|
---|
432 | {
|
---|
433 | KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
|
---|
434 | KeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
|
---|
435 | KeSetTargetProcessorDpc(&paExecCpuDpcs[i], i);
|
---|
436 | }
|
---|
437 | }
|
---|
438 |
|
---|
439 | /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
|
---|
440 | * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
|
---|
441 | */
|
---|
442 | KIRQL oldIrql;
|
---|
443 | KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
|
---|
444 |
|
---|
445 | /*
|
---|
446 | * We cannot do other than assume a 1:1 relationship between the
|
---|
447 | * affinity mask and the process despite the warnings in the docs.
|
---|
448 | * If someone knows a better way to get this done, please let bird know.
|
---|
449 | */
|
---|
450 | ASMCompilerBarrier(); /* paranoia */
|
---|
451 | if (enmCpuid == RT_NT_CPUID_SPECIFIC)
|
---|
452 | {
|
---|
453 | ASMAtomicIncS32(&pArgs->cRefs);
|
---|
454 | BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
|
---|
455 | Assert(ret);
|
---|
456 | }
|
---|
457 | else if (enmCpuid == RT_NT_CPUID_PAIR)
|
---|
458 | {
|
---|
459 | ASMAtomicIncS32(&pArgs->cRefs);
|
---|
460 | BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
|
---|
461 | Assert(ret);
|
---|
462 |
|
---|
463 | ASMAtomicIncS32(&pArgs->cRefs);
|
---|
464 | ret = KeInsertQueueDpc(&paExecCpuDpcs[1], 0, 0);
|
---|
465 | Assert(ret);
|
---|
466 | }
|
---|
467 | else
|
---|
468 | {
|
---|
469 | unsigned iSelf = KeGetCurrentProcessorNumber();
|
---|
470 |
|
---|
471 | for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
|
---|
472 | {
|
---|
473 | if ( (i != iSelf)
|
---|
474 | && (Mask & RT_BIT_64(i)))
|
---|
475 | {
|
---|
476 | ASMAtomicIncS32(&pArgs->cRefs);
|
---|
477 | BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[i], 0, 0);
|
---|
478 | Assert(ret);
|
---|
479 | }
|
---|
480 | }
|
---|
481 | if (enmCpuid != RT_NT_CPUID_OTHERS)
|
---|
482 | pfnWorker(iSelf, pvUser1, pvUser2);
|
---|
483 | }
|
---|
484 |
|
---|
485 | KeLowerIrql(oldIrql);
|
---|
486 |
|
---|
487 | /* Flush all DPCs and wait for completion. (can take long!) */
|
---|
488 | /** @todo Consider changing this to an active wait using some atomic inc/dec
|
---|
489 | * stuff (and check for the current cpu above in the specific case). */
|
---|
490 | /** @todo Seems KeFlushQueuedDpcs doesn't wait for the DPCs to be completely
|
---|
491 | * executed. Seen pArgs being freed while some CPU was using it before
|
---|
492 | * cRefs was added. */
|
---|
493 | g_pfnrtNtKeFlushQueuedDpcs();
|
---|
494 |
|
---|
495 | if (pcHits)
|
---|
496 | *pcHits = pArgs->cHits;
|
---|
497 |
|
---|
498 | /* Dereference the argument structure. */
|
---|
499 | int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
|
---|
500 | Assert(cRefs >= 0);
|
---|
501 | if (cRefs == 0)
|
---|
502 | ExFreePool(pArgs);
|
---|
503 |
|
---|
504 | return VINF_SUCCESS;
|
---|
505 | }
|
---|
506 |
|
---|
507 |
|
---|
508 | RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
|
---|
509 | {
|
---|
510 | if (g_pfnrtKeIpiGenericCall)
|
---|
511 | return rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnAllBroadcastIpiWrapper,
|
---|
512 | NIL_RTCPUID, NIL_RTCPUID, NULL);
|
---|
513 | return rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_ALL, NIL_RTCPUID, NIL_RTCPUID, NULL);
|
---|
514 | }
|
---|
515 |
|
---|
516 |
|
---|
517 | RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
|
---|
518 | {
|
---|
519 | if (g_pfnrtKeIpiGenericCall)
|
---|
520 | return rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnOthersBroadcastIpiWrapper,
|
---|
521 | NIL_RTCPUID, NIL_RTCPUID, NULL);
|
---|
522 | return rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_OTHERS, NIL_RTCPUID, NIL_RTCPUID, NULL);
|
---|
523 | }
|
---|
524 |
|
---|
525 |
|
---|
526 | RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
|
---|
527 | {
|
---|
528 | int rc;
|
---|
529 | AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
|
---|
530 | AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
|
---|
531 | if ((fFlags & RTMPON_F_CONCURRENT_EXEC) && !g_pfnrtKeIpiGenericCall)
|
---|
532 | return VERR_NOT_SUPPORTED;
|
---|
533 |
|
---|
534 | /*
|
---|
535 | * Check that both CPUs are online before doing the broadcast call.
|
---|
536 | */
|
---|
537 | if ( RTMpIsCpuOnline(idCpu1)
|
---|
538 | && RTMpIsCpuOnline(idCpu2))
|
---|
539 | {
|
---|
540 | /*
|
---|
541 | * The broadcast IPI isn't quite as bad as it could have been, because
|
---|
542 | * it looks like windows doesn't synchronize CPUs on the way out, they
|
---|
543 | * seems to get back to normal work while the pair is still busy.
|
---|
544 | */
|
---|
545 | uint32_t cHits = 0;
|
---|
546 | if (g_pfnrtKeIpiGenericCall)
|
---|
547 | rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnPairBroadcastIpiWrapper, idCpu1, idCpu2, &cHits);
|
---|
548 | else
|
---|
549 | rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_PAIR, idCpu1, idCpu2, &cHits);
|
---|
550 | if (RT_SUCCESS(rc))
|
---|
551 | {
|
---|
552 | Assert(cHits <= 2);
|
---|
553 | if (cHits == 2)
|
---|
554 | rc = VINF_SUCCESS;
|
---|
555 | else if (cHits == 1)
|
---|
556 | rc = VERR_NOT_ALL_CPUS_SHOWED;
|
---|
557 | else if (cHits == 0)
|
---|
558 | rc = VERR_CPU_OFFLINE;
|
---|
559 | else
|
---|
560 | rc = VERR_CPU_IPE_1;
|
---|
561 | }
|
---|
562 | }
|
---|
563 | /*
|
---|
564 | * A CPU must be present to be considered just offline.
|
---|
565 | */
|
---|
566 | else if ( RTMpIsCpuPresent(idCpu1)
|
---|
567 | && RTMpIsCpuPresent(idCpu2))
|
---|
568 | rc = VERR_CPU_OFFLINE;
|
---|
569 | else
|
---|
570 | rc = VERR_CPU_NOT_FOUND;
|
---|
571 | return rc;
|
---|
572 | }
|
---|
573 |
|
---|
574 |
|
---|
575 | RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
|
---|
576 | {
|
---|
577 | return g_pfnrtKeIpiGenericCall != NULL;
|
---|
578 | }
|
---|
579 |
|
---|
580 |
|
---|
581 | /**
|
---|
582 | * Releases a reference to a RTMPNTONSPECIFICARGS heap allocation, freeing it
|
---|
583 | * when the last reference is released.
|
---|
584 | */
|
---|
585 | DECLINLINE(void) rtMpNtOnSpecificRelease(PRTMPNTONSPECIFICARGS pArgs)
|
---|
586 | {
|
---|
587 | uint32_t cRefs = ASMAtomicDecU32(&pArgs->cRefs);
|
---|
588 | AssertMsg(cRefs <= 1, ("cRefs=%#x\n", cRefs));
|
---|
589 | if (cRefs == 0)
|
---|
590 | ExFreePool(pArgs);
|
---|
591 | }
|
---|
592 |
|
---|
593 |
|
---|
594 | /**
|
---|
595 | * Wrapper between the native nt per-cpu callbacks and PFNRTWORKER
|
---|
596 | *
|
---|
597 | * @param Dpc DPC object
|
---|
598 | * @param DeferredContext Context argument specified by KeInitializeDpc
|
---|
599 | * @param SystemArgument1 Argument specified by KeInsertQueueDpc
|
---|
600 | * @param SystemArgument2 Argument specified by KeInsertQueueDpc
|
---|
601 | */
|
---|
602 | static VOID __stdcall rtMpNtOnSpecificDpcWrapper(IN PKDPC Dpc, IN PVOID DeferredContext,
|
---|
603 | IN PVOID SystemArgument1, IN PVOID SystemArgument2)
|
---|
604 | {
|
---|
605 | PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)DeferredContext;
|
---|
606 | ASMAtomicWriteBool(&pArgs->fExecuting, true);
|
---|
607 |
|
---|
608 | pArgs->CallbackArgs.pfnWorker(KeGetCurrentProcessorNumber(), pArgs->CallbackArgs.pvUser1, pArgs->CallbackArgs.pvUser2);
|
---|
609 |
|
---|
610 | ASMAtomicWriteBool(&pArgs->fDone, true);
|
---|
611 | KeSetEvent(&pArgs->DoneEvt, 1 /*PriorityIncrement*/, FALSE /*Wait*/);
|
---|
612 |
|
---|
613 | rtMpNtOnSpecificRelease(pArgs);
|
---|
614 | }
|
---|
615 |
|
---|
616 |
|
---|
617 | RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
|
---|
618 | {
|
---|
619 | /*
|
---|
620 | * Don't try mess with an offline CPU.
|
---|
621 | */
|
---|
622 | if (!RTMpIsCpuOnline(idCpu))
|
---|
623 | return !RTMpIsCpuPossible(idCpu)
|
---|
624 | ? VERR_CPU_NOT_FOUND
|
---|
625 | : VERR_CPU_OFFLINE;
|
---|
626 |
|
---|
627 | /*
|
---|
628 | * Use the broadcast IPI routine if there are no more than two CPUs online,
|
---|
629 | * or if the current IRQL is unsuitable for KeWaitForSingleObject.
|
---|
630 | */
|
---|
631 | int rc;
|
---|
632 | uint32_t cHits = 0;
|
---|
633 | if ( g_pfnrtKeIpiGenericCall
|
---|
634 | && ( RTMpGetOnlineCount() <= 2
|
---|
635 | || KeGetCurrentIrql() > APC_LEVEL)
|
---|
636 | )
|
---|
637 | {
|
---|
638 | rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnSpecificBroadcastIpiWrapper,
|
---|
639 | idCpu, NIL_RTCPUID, &cHits);
|
---|
640 | if (RT_SUCCESS(rc))
|
---|
641 | {
|
---|
642 | if (cHits == 1)
|
---|
643 | return VINF_SUCCESS;
|
---|
644 | rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1;
|
---|
645 | }
|
---|
646 | return rc;
|
---|
647 | }
|
---|
648 |
|
---|
649 | #if 0
|
---|
650 | rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_SPECIFIC, idCpu, NIL_RTCPUID, &cHits);
|
---|
651 | if (RT_SUCCESS(rc))
|
---|
652 | {
|
---|
653 | if (cHits == 1)
|
---|
654 | return VINF_SUCCESS;
|
---|
655 | rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1;
|
---|
656 | }
|
---|
657 | return rc;
|
---|
658 |
|
---|
659 | #else
|
---|
660 | /*
|
---|
661 | * Initialize the argument package and the objects within it.
|
---|
662 | * The package is referenced counted to avoid unnecessary spinning to
|
---|
663 | * synchronize cleanup and prevent stack corruption.
|
---|
664 | */
|
---|
665 | PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)ExAllocatePoolWithTag(NonPagedPool, sizeof(*pArgs), (ULONG)'RTMp');
|
---|
666 | if (!pArgs)
|
---|
667 | return VERR_NO_MEMORY;
|
---|
668 | pArgs->cRefs = 2;
|
---|
669 | pArgs->fExecuting = false;
|
---|
670 | pArgs->fDone = false;
|
---|
671 | pArgs->CallbackArgs.pfnWorker = pfnWorker;
|
---|
672 | pArgs->CallbackArgs.pvUser1 = pvUser1;
|
---|
673 | pArgs->CallbackArgs.pvUser2 = pvUser2;
|
---|
674 | pArgs->CallbackArgs.idCpu = idCpu;
|
---|
675 | pArgs->CallbackArgs.cHits = 0;
|
---|
676 | pArgs->CallbackArgs.cRefs = 2;
|
---|
677 | KeInitializeEvent(&pArgs->DoneEvt, SynchronizationEvent, FALSE /* not signalled */);
|
---|
678 | KeInitializeDpc(&pArgs->Dpc, rtMpNtOnSpecificDpcWrapper, pArgs);
|
---|
679 | KeSetImportanceDpc(&pArgs->Dpc, HighImportance);
|
---|
680 | KeSetTargetProcessorDpc(&pArgs->Dpc, (int)idCpu);
|
---|
681 |
|
---|
682 | /*
|
---|
683 | * Disable preemption while we check the current processor and inserts the DPC.
|
---|
684 | */
|
---|
685 | KIRQL bOldIrql;
|
---|
686 | KeRaiseIrql(DISPATCH_LEVEL, &bOldIrql);
|
---|
687 | ASMCompilerBarrier(); /* paranoia */
|
---|
688 |
|
---|
689 | if (RTMpCpuId() == idCpu)
|
---|
690 | {
|
---|
691 | /* Just execute the callback on the current CPU. */
|
---|
692 | pfnWorker(idCpu, pvUser1, pvUser2);
|
---|
693 | KeLowerIrql(bOldIrql);
|
---|
694 |
|
---|
695 | ExFreePool(pArgs);
|
---|
696 | return VINF_SUCCESS;
|
---|
697 | }
|
---|
698 |
|
---|
699 | /* Different CPU, so queue it if the CPU is still online. */
|
---|
700 | if (RTMpIsCpuOnline(idCpu))
|
---|
701 | {
|
---|
702 | BOOLEAN fRc = KeInsertQueueDpc(&pArgs->Dpc, 0, 0);
|
---|
703 | Assert(fRc);
|
---|
704 | KeLowerIrql(bOldIrql);
|
---|
705 |
|
---|
706 | uint64_t const nsRealWaitTS = RTTimeNanoTS();
|
---|
707 |
|
---|
708 | /*
|
---|
709 | * Wait actively for a while in case the CPU/thread responds quickly.
|
---|
710 | */
|
---|
711 | uint32_t cLoopsLeft = 0x20000;
|
---|
712 | while (cLoopsLeft-- > 0)
|
---|
713 | {
|
---|
714 | if (pArgs->fDone)
|
---|
715 | {
|
---|
716 | rtMpNtOnSpecificRelease(pArgs);
|
---|
717 | return VINF_SUCCESS;
|
---|
718 | }
|
---|
719 | ASMNopPause();
|
---|
720 | }
|
---|
721 |
|
---|
722 | /*
|
---|
723 | * It didn't respond, so wait on the event object, poking the CPU if it's slow.
|
---|
724 | */
|
---|
725 | LARGE_INTEGER Timeout;
|
---|
726 | Timeout.QuadPart = -10000; /* 1ms */
|
---|
727 | NTSTATUS rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
|
---|
728 | if (rcNt == STATUS_SUCCESS)
|
---|
729 | {
|
---|
730 | rtMpNtOnSpecificRelease(pArgs);
|
---|
731 | return VINF_SUCCESS;
|
---|
732 | }
|
---|
733 |
|
---|
734 | /* If it hasn't respondend yet, maybe poke it and wait some more. */
|
---|
735 | if (rcNt == STATUS_TIMEOUT)
|
---|
736 | {
|
---|
737 | #ifndef IPRT_TARGET_NT4
|
---|
738 | if ( !pArgs->fExecuting
|
---|
739 | && ( g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalSendSoftwareInterrupt
|
---|
740 | || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiW7Plus
|
---|
741 | || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiPreW7))
|
---|
742 | RTMpPokeCpu(idCpu);
|
---|
743 | #endif
|
---|
744 |
|
---|
745 | Timeout.QuadPart = -1280000; /* 128ms */
|
---|
746 | rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
|
---|
747 | if (rcNt == STATUS_SUCCESS)
|
---|
748 | {
|
---|
749 | rtMpNtOnSpecificRelease(pArgs);
|
---|
750 | return VINF_SUCCESS;
|
---|
751 | }
|
---|
752 | }
|
---|
753 |
|
---|
754 | /*
|
---|
755 | * Something weird is happening, try bail out.
|
---|
756 | */
|
---|
757 | if (KeRemoveQueueDpc(&pArgs->Dpc))
|
---|
758 | {
|
---|
759 | ExFreePool(pArgs); /* DPC was still queued, so we can return without further ado. */
|
---|
760 | LogRel(("RTMpOnSpecific(%#x): Not processed after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
|
---|
761 | }
|
---|
762 | else
|
---|
763 | {
|
---|
764 | /* DPC is running, wait a good while for it to complete. */
|
---|
765 | LogRel(("RTMpOnSpecific(%#x): Still running after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
|
---|
766 |
|
---|
767 | Timeout.QuadPart = -30*1000*1000*10; /* 30 seconds */
|
---|
768 | rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
|
---|
769 | if (rcNt != STATUS_SUCCESS)
|
---|
770 | LogRel(("RTMpOnSpecific(%#x): Giving up on running worker after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
|
---|
771 | }
|
---|
772 | rc = RTErrConvertFromNtStatus(rcNt);
|
---|
773 | }
|
---|
774 | else
|
---|
775 | {
|
---|
776 | /* CPU is offline.*/
|
---|
777 | KeLowerIrql(bOldIrql);
|
---|
778 | rc = !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE;
|
---|
779 | }
|
---|
780 |
|
---|
781 | rtMpNtOnSpecificRelease(pArgs);
|
---|
782 | return rc;
|
---|
783 | #endif
|
---|
784 | }
|
---|
785 |
|
---|
786 |
|
---|
787 |
|
---|
788 |
|
---|
789 | static VOID rtMpNtPokeCpuDummy(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
|
---|
790 | {
|
---|
791 | NOREF(Dpc);
|
---|
792 | NOREF(DeferredContext);
|
---|
793 | NOREF(SystemArgument1);
|
---|
794 | NOREF(SystemArgument2);
|
---|
795 | }
|
---|
796 |
|
---|
797 | #ifndef IPRT_TARGET_NT4
|
---|
798 |
|
---|
799 | /** Callback used by rtMpPokeCpuUsingBroadcastIpi. */
|
---|
800 | static ULONG_PTR __stdcall rtMpIpiGenericCall(ULONG_PTR Argument)
|
---|
801 | {
|
---|
802 | NOREF(Argument);
|
---|
803 | return 0;
|
---|
804 | }
|
---|
805 |
|
---|
806 |
|
---|
807 | /**
|
---|
808 | * RTMpPokeCpu worker that uses broadcast IPIs for doing the work.
|
---|
809 | *
|
---|
810 | * @returns VINF_SUCCESS
|
---|
811 | * @param idCpu The CPU identifier.
|
---|
812 | */
|
---|
813 | int rtMpPokeCpuUsingBroadcastIpi(RTCPUID idCpu)
|
---|
814 | {
|
---|
815 | g_pfnrtKeIpiGenericCall(rtMpIpiGenericCall, 0);
|
---|
816 | return VINF_SUCCESS;
|
---|
817 | }
|
---|
818 |
|
---|
819 |
|
---|
820 | /**
|
---|
821 | * RTMpPokeCpu worker that uses HalSendSoftwareInterrupt to get the job done.
|
---|
822 | *
|
---|
823 | * This is only really available on AMD64, at least at the time of writing.
|
---|
824 | *
|
---|
825 | * @returns VINF_SUCCESS
|
---|
826 | * @param idCpu The CPU identifier.
|
---|
827 | */
|
---|
828 | int rtMpPokeCpuUsingHalSendSoftwareInterrupt(RTCPUID idCpu)
|
---|
829 | {
|
---|
830 | g_pfnrtNtHalSendSoftwareInterrupt(idCpu, DISPATCH_LEVEL);
|
---|
831 | return VINF_SUCCESS;
|
---|
832 | }
|
---|
833 |
|
---|
834 |
|
---|
835 | /**
|
---|
836 | * RTMpPokeCpu worker that uses the Windows 7 and later version of
|
---|
837 | * HalRequestIpip to get the job done.
|
---|
838 | *
|
---|
839 | * @returns VINF_SUCCESS
|
---|
840 | * @param idCpu The CPU identifier.
|
---|
841 | */
|
---|
842 | int rtMpPokeCpuUsingHalReqestIpiW7Plus(RTCPUID idCpu)
|
---|
843 | {
|
---|
844 | /*
|
---|
845 | * I think we'll let idCpu be an NT processor number and not a HAL processor
|
---|
846 | * index. KeAddProcessorAffinityEx is for HAL and uses HAL processor
|
---|
847 | * indexes as input from what I can tell.
|
---|
848 | */
|
---|
849 | PROCESSOR_NUMBER ProcNumber = { /*Group=*/ idCpu / 64, /*Number=*/ idCpu % 64, /* Reserved=*/ 0};
|
---|
850 | KAFFINITY_EX Target;
|
---|
851 | g_pfnrtKeInitializeAffinityEx(&Target);
|
---|
852 | g_pfnrtKeAddProcessorAffinityEx(&Target, g_pfnrtKeGetProcessorIndexFromNumber(&ProcNumber));
|
---|
853 |
|
---|
854 | g_pfnrtHalRequestIpiW7Plus(0, &Target);
|
---|
855 | return VINF_SUCCESS;
|
---|
856 | }
|
---|
857 |
|
---|
858 |
|
---|
859 | /**
|
---|
860 | * RTMpPokeCpu worker that uses the Vista and earlier version of HalRequestIpip
|
---|
861 | * to get the job done.
|
---|
862 | *
|
---|
863 | * @returns VINF_SUCCESS
|
---|
864 | * @param idCpu The CPU identifier.
|
---|
865 | */
|
---|
866 | int rtMpPokeCpuUsingHalReqestIpiPreW7(RTCPUID idCpu)
|
---|
867 | {
|
---|
868 | __debugbreak(); /** @todo this code needs testing!! */
|
---|
869 | KAFFINITY Target = 1;
|
---|
870 | Target <<= idCpu;
|
---|
871 | g_pfnrtHalRequestIpiPreW7(Target);
|
---|
872 | return VINF_SUCCESS;
|
---|
873 | }
|
---|
874 |
|
---|
875 | #endif /* !IPRT_TARGET_NT4 */
|
---|
876 |
|
---|
877 |
|
---|
878 | int rtMpPokeCpuUsingDpc(RTCPUID idCpu)
|
---|
879 | {
|
---|
880 | /*
|
---|
881 | * APC fallback.
|
---|
882 | */
|
---|
883 | static KDPC s_aPokeDpcs[MAXIMUM_PROCESSORS] = {0};
|
---|
884 | static bool s_fPokeDPCsInitialized = false;
|
---|
885 |
|
---|
886 | if (!s_fPokeDPCsInitialized)
|
---|
887 | {
|
---|
888 | for (unsigned i = 0; i < RT_ELEMENTS(s_aPokeDpcs); i++)
|
---|
889 | {
|
---|
890 | KeInitializeDpc(&s_aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL);
|
---|
891 | KeSetImportanceDpc(&s_aPokeDpcs[i], HighImportance);
|
---|
892 | KeSetTargetProcessorDpc(&s_aPokeDpcs[i], (int)i);
|
---|
893 | }
|
---|
894 | s_fPokeDPCsInitialized = true;
|
---|
895 | }
|
---|
896 |
|
---|
897 | /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
|
---|
898 | * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
|
---|
899 | */
|
---|
900 | KIRQL oldIrql;
|
---|
901 | KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
|
---|
902 |
|
---|
903 | KeSetImportanceDpc(&s_aPokeDpcs[idCpu], HighImportance);
|
---|
904 | KeSetTargetProcessorDpc(&s_aPokeDpcs[idCpu], (int)idCpu);
|
---|
905 |
|
---|
906 | /* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately.
|
---|
907 | * @note: not true on at least Vista & Windows 7
|
---|
908 | */
|
---|
909 | BOOLEAN bRet = KeInsertQueueDpc(&s_aPokeDpcs[idCpu], 0, 0);
|
---|
910 |
|
---|
911 | KeLowerIrql(oldIrql);
|
---|
912 | return (bRet == TRUE) ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */;
|
---|
913 | }
|
---|
914 |
|
---|
915 |
|
---|
916 | RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
|
---|
917 | {
|
---|
918 | if (!RTMpIsCpuOnline(idCpu))
|
---|
919 | return !RTMpIsCpuPossible(idCpu)
|
---|
920 | ? VERR_CPU_NOT_FOUND
|
---|
921 | : VERR_CPU_OFFLINE;
|
---|
922 | /* Calls rtMpSendIpiFallback, rtMpSendIpiWin7AndLater or rtMpSendIpiVista. */
|
---|
923 | return g_pfnrtMpPokeCpuWorker(idCpu);
|
---|
924 | }
|
---|
925 |
|
---|
926 |
|
---|
927 | RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
|
---|
928 | {
|
---|
929 | return false;
|
---|
930 | }
|
---|
931 |
|
---|