VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/mp-r0drv-nt.cpp@ 70425

Last change on this file since 70425 was 70212, checked in by vboxsync, 7 years ago

IPRT/r0drv/nt: Dynamically import 4 more function to make it work on NT 3.50 - just because we can :-)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 74.9 KB
Line 
1/* $Id: mp-r0drv-nt.cpp 70212 2017-12-19 02:54:28Z vboxsync $ */
2/** @file
3 * IPRT - Multiprocessor, Ring-0 Driver, NT.
4 */
5
6/*
7 * Copyright (C) 2008-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/mp.h>
34#include <iprt/cpuset.h>
35#include <iprt/err.h>
36#include <iprt/asm.h>
37#include <iprt/log.h>
38#include <iprt/mem.h>
39#include <iprt/time.h>
40#include "r0drv/mp-r0drv.h"
41#include "symdb.h"
42#include "internal-r0drv-nt.h"
43#include "internal/mp.h"
44
45
46/*********************************************************************************************************************************
47* Structures and Typedefs *
48*********************************************************************************************************************************/
49typedef enum
50{
51 RT_NT_CPUID_SPECIFIC,
52 RT_NT_CPUID_PAIR,
53 RT_NT_CPUID_OTHERS,
54 RT_NT_CPUID_ALL
55} RT_NT_CPUID;
56
57
58/**
59 * Used by the RTMpOnSpecific.
60 */
61typedef struct RTMPNTONSPECIFICARGS
62{
63 /** Set if we're executing. */
64 bool volatile fExecuting;
65 /** Set when done executing. */
66 bool volatile fDone;
67 /** Number of references to this heap block. */
68 uint32_t volatile cRefs;
69 /** Event that the calling thread is waiting on. */
70 KEVENT DoneEvt;
71 /** The deferred procedure call object. */
72 KDPC Dpc;
73 /** The callback argument package. */
74 RTMPARGS CallbackArgs;
75} RTMPNTONSPECIFICARGS;
76/** Pointer to an argument/state structure for RTMpOnSpecific on NT. */
77typedef RTMPNTONSPECIFICARGS *PRTMPNTONSPECIFICARGS;
78
79
80/*********************************************************************************************************************************
81* Defined Constants And Macros *
82*********************************************************************************************************************************/
83/** Inactive bit for g_aidRtMpNtByCpuSetIdx. */
84#define RTMPNT_ID_F_INACTIVE RT_BIT_32(31)
85
86
87/*********************************************************************************************************************************
88* Global Variables *
89*********************************************************************************************************************************/
90/** Maximum number of processor groups. */
91uint32_t g_cRtMpNtMaxGroups;
92/** Maximum number of processors. */
93uint32_t g_cRtMpNtMaxCpus;
94/** Number of active processors. */
95uint32_t volatile g_cRtMpNtActiveCpus;
96/** The NT CPU set.
97 * KeQueryActiveProcssors() cannot be called at all IRQLs and therefore we'll
98 * have to cache it. Fortunately, NT doesn't really support taking CPUs offline,
99 * and taking them online was introduced with W2K8 where it is intended for virtual
100 * machines and not real HW. We update this, g_cRtMpNtActiveCpus and
101 * g_aidRtMpNtByCpuSetIdx from the rtR0NtMpProcessorChangeCallback.
102 */
103RTCPUSET g_rtMpNtCpuSet;
104
105/** Static per group info.
106 * @remarks With RTCPUSET_MAX_CPUS as 256, this takes up 33KB. */
107static struct
108{
109 /** The max CPUs in the group. */
110 uint16_t cMaxCpus;
111 /** The number of active CPUs at the time of initialization. */
112 uint16_t cActiveCpus;
113 /** CPU set indexes for each CPU in the group. */
114 int16_t aidxCpuSetMembers[64];
115} g_aRtMpNtCpuGroups[RTCPUSET_MAX_CPUS];
116/** Maps CPU set indexes to RTCPUID.
117 * Inactive CPUs has bit 31 set (RTMPNT_ID_F_INACTIVE) so we can identify them
118 * and shuffle duplicates during CPU hotplugging. We assign temporary IDs to
119 * the inactive CPUs starting at g_cRtMpNtMaxCpus - 1, ASSUMING that active
120 * CPUs has IDs from 0 to g_cRtMpNtActiveCpus. */
121RTCPUID g_aidRtMpNtByCpuSetIdx[RTCPUSET_MAX_CPUS];
122/** The handle of the rtR0NtMpProcessorChangeCallback registration. */
123static PVOID g_pvMpCpuChangeCallback = NULL;
124
125
126/*********************************************************************************************************************************
127* Internal Functions *
128*********************************************************************************************************************************/
129static VOID __stdcall rtR0NtMpProcessorChangeCallback(void *pvUser, PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT pChangeCtx,
130 PNTSTATUS prcOperationStatus);
131static int rtR0NtInitQueryGroupRelations(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX **ppInfo);
132
133
134
135/**
136 * Initalizes multiprocessor globals (called by rtR0InitNative).
137 *
138 * @returns IPRT status code.
139 * @param pOsVerInfo Version information.
140 */
141DECLHIDDEN(int) rtR0MpNtInit(RTNTSDBOSVER const *pOsVerInfo)
142{
143#define MY_CHECK_BREAK(a_Check, a_DbgPrintArgs) \
144 AssertMsgBreakStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs; rc = VERR_INTERNAL_ERROR_4 )
145#define MY_CHECK_RETURN(a_Check, a_DbgPrintArgs, a_rcRet) \
146 AssertMsgReturnStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs, a_rcRet)
147#define MY_CHECK(a_Check, a_DbgPrintArgs) \
148 AssertMsgStmt(a_Check, a_DbgPrintArgs, DbgPrint a_DbgPrintArgs; rc = VERR_INTERNAL_ERROR_4 )
149
150 /*
151 * API combination checks.
152 */
153 MY_CHECK_RETURN(!g_pfnrtKeSetTargetProcessorDpcEx || g_pfnrtKeGetProcessorNumberFromIndex,
154 ("IPRT: Fatal: Missing KeSetTargetProcessorDpcEx without KeGetProcessorNumberFromIndex!\n"),
155 VERR_SYMBOL_NOT_FOUND);
156
157 /*
158 * Get max number of processor groups.
159 *
160 * We may need to upadjust this number below, because windows likes to keep
161 * all options open when it comes to hotplugged CPU group assignments. A
162 * server advertising up to 64 CPUs in the ACPI table will get a result of
163 * 64 from KeQueryMaximumGroupCount. That makes sense. However, when windows
164 * server 2012 does a two processor group setup for it, the sum of the
165 * GroupInfo[*].MaximumProcessorCount members below is 128. This is probably
166 * because windows doesn't want to make decisions grouping of hotpluggable CPUs.
167 * So, we need to bump the maximum count to 128 below do deal with this as we
168 * want to have valid CPU set indexes for all potential CPUs - how could we
169 * otherwise use the RTMpGetSet() result and also RTCpuSetCount(RTMpGetSet())
170 * should equal RTMpGetCount().
171 */
172 if (g_pfnrtKeQueryMaximumGroupCount)
173 {
174 g_cRtMpNtMaxGroups = g_pfnrtKeQueryMaximumGroupCount();
175 MY_CHECK_RETURN(g_cRtMpNtMaxGroups <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxGroups > 0,
176 ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u\n", g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
177 VERR_MP_TOO_MANY_CPUS);
178 }
179 else
180 g_cRtMpNtMaxGroups = 1;
181
182 /*
183 * Get max number CPUs.
184 * This also defines the range of NT CPU indexes, RTCPUID and index into RTCPUSET.
185 */
186 if (g_pfnrtKeQueryMaximumProcessorCountEx)
187 {
188 g_cRtMpNtMaxCpus = g_pfnrtKeQueryMaximumProcessorCountEx(ALL_PROCESSOR_GROUPS);
189 MY_CHECK_RETURN(g_cRtMpNtMaxCpus <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxCpus > 0,
190 ("IPRT: Fatal: g_cRtMpNtMaxCpus=%u, max %u [KeQueryMaximumProcessorCountEx]\n",
191 g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
192 VERR_MP_TOO_MANY_CPUS);
193 }
194 else if (g_pfnrtKeQueryMaximumProcessorCount)
195 {
196 g_cRtMpNtMaxCpus = g_pfnrtKeQueryMaximumProcessorCount();
197 MY_CHECK_RETURN(g_cRtMpNtMaxCpus <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxCpus > 0,
198 ("IPRT: Fatal: g_cRtMpNtMaxCpus=%u, max %u [KeQueryMaximumProcessorCount]\n",
199 g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS),
200 VERR_MP_TOO_MANY_CPUS);
201 }
202 else if (g_pfnrtKeQueryActiveProcessors)
203 {
204 KAFFINITY fActiveProcessors = g_pfnrtKeQueryActiveProcessors();
205 MY_CHECK_RETURN(fActiveProcessors != 0,
206 ("IPRT: Fatal: KeQueryActiveProcessors returned 0!\n"),
207 VERR_INTERNAL_ERROR_2);
208 g_cRtMpNtMaxCpus = 0;
209 do
210 {
211 g_cRtMpNtMaxCpus++;
212 fActiveProcessors >>= 1;
213 } while (fActiveProcessors);
214 }
215 else
216 g_cRtMpNtMaxCpus = KeNumberProcessors;
217
218 /*
219 * Just because we're a bit paranoid about getting something wrong wrt to the
220 * kernel interfaces, we try 16 times to get the KeQueryActiveProcessorCountEx
221 * and KeQueryLogicalProcessorRelationship information to match up.
222 */
223 for (unsigned cTries = 0;; cTries++)
224 {
225 /*
226 * Get number of active CPUs.
227 */
228 if (g_pfnrtKeQueryActiveProcessorCountEx)
229 {
230 g_cRtMpNtActiveCpus = g_pfnrtKeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS);
231 MY_CHECK_RETURN(g_cRtMpNtActiveCpus <= g_cRtMpNtMaxCpus && g_cRtMpNtActiveCpus > 0,
232 ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u [KeQueryActiveProcessorCountEx]\n",
233 g_cRtMpNtMaxGroups, g_cRtMpNtMaxCpus),
234 VERR_MP_TOO_MANY_CPUS);
235 }
236 else if (g_pfnrtKeQueryActiveProcessorCount)
237 {
238 g_cRtMpNtActiveCpus = g_pfnrtKeQueryActiveProcessorCount(NULL);
239 MY_CHECK_RETURN(g_cRtMpNtActiveCpus <= g_cRtMpNtMaxCpus && g_cRtMpNtActiveCpus > 0,
240 ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u [KeQueryActiveProcessorCount]\n",
241 g_cRtMpNtMaxGroups, g_cRtMpNtMaxCpus),
242 VERR_MP_TOO_MANY_CPUS);
243 }
244 else
245 g_cRtMpNtActiveCpus = g_cRtMpNtMaxCpus;
246
247 /*
248 * Query the details for the groups to figure out which CPUs are online as
249 * well as the NT index limit.
250 */
251 for (unsigned i = 0; i < RT_ELEMENTS(g_aidRtMpNtByCpuSetIdx); i++)
252#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
253 g_aidRtMpNtByCpuSetIdx[i] = NIL_RTCPUID;
254#else
255 g_aidRtMpNtByCpuSetIdx[i] = i < g_cRtMpNtMaxCpus ? i : NIL_RTCPUID;
256#endif
257 for (unsigned idxGroup = 0; idxGroup < RT_ELEMENTS(g_aRtMpNtCpuGroups); idxGroup++)
258 {
259 g_aRtMpNtCpuGroups[idxGroup].cMaxCpus = 0;
260 g_aRtMpNtCpuGroups[idxGroup].cActiveCpus = 0;
261 for (unsigned idxMember = 0; idxMember < RT_ELEMENTS(g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers); idxMember++)
262 g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = -1;
263 }
264
265 if (g_pfnrtKeQueryLogicalProcessorRelationship)
266 {
267 MY_CHECK_RETURN(g_pfnrtKeGetProcessorIndexFromNumber,
268 ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeGetProcessorIndexFromNumber!\n"),
269 VERR_SYMBOL_NOT_FOUND);
270 MY_CHECK_RETURN(g_pfnrtKeGetProcessorNumberFromIndex,
271 ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeGetProcessorIndexFromNumber!\n"),
272 VERR_SYMBOL_NOT_FOUND);
273 MY_CHECK_RETURN(g_pfnrtKeSetTargetProcessorDpcEx,
274 ("IPRT: Fatal: Found KeQueryLogicalProcessorRelationship but not KeSetTargetProcessorDpcEx!\n"),
275 VERR_SYMBOL_NOT_FOUND);
276
277 SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = NULL;
278 int rc = rtR0NtInitQueryGroupRelations(&pInfo);
279 if (RT_FAILURE(rc))
280 return rc;
281
282 MY_CHECK(pInfo->Group.MaximumGroupCount == g_cRtMpNtMaxGroups,
283 ("IPRT: Fatal: MaximumGroupCount=%u != g_cRtMpNtMaxGroups=%u!\n",
284 pInfo->Group.MaximumGroupCount, g_cRtMpNtMaxGroups));
285 MY_CHECK(pInfo->Group.ActiveGroupCount > 0 && pInfo->Group.ActiveGroupCount <= g_cRtMpNtMaxGroups,
286 ("IPRT: Fatal: ActiveGroupCount=%u != g_cRtMpNtMaxGroups=%u!\n",
287 pInfo->Group.ActiveGroupCount, g_cRtMpNtMaxGroups));
288
289 /*
290 * First we need to recalc g_cRtMpNtMaxCpus (see above).
291 */
292 uint32_t cMaxCpus = 0;
293 uint32_t idxGroup;
294 for (idxGroup = 0; RT_SUCCESS(rc) && idxGroup < pInfo->Group.ActiveGroupCount; idxGroup++)
295 {
296 const PROCESSOR_GROUP_INFO *pGrpInfo = &pInfo->Group.GroupInfo[idxGroup];
297 MY_CHECK_BREAK(pGrpInfo->MaximumProcessorCount <= MAXIMUM_PROC_PER_GROUP,
298 ("IPRT: Fatal: MaximumProcessorCount=%u\n", pGrpInfo->MaximumProcessorCount));
299 MY_CHECK_BREAK(pGrpInfo->ActiveProcessorCount <= pGrpInfo->MaximumProcessorCount,
300 ("IPRT: Fatal: ActiveProcessorCount=%u > MaximumProcessorCount=%u\n",
301 pGrpInfo->ActiveProcessorCount, pGrpInfo->MaximumProcessorCount));
302 cMaxCpus += pGrpInfo->MaximumProcessorCount;
303 }
304 if (cMaxCpus > g_cRtMpNtMaxCpus && RT_SUCCESS(rc))
305 {
306 DbgPrint("IPRT: g_cRtMpNtMaxCpus=%u -> %u\n", g_cRtMpNtMaxCpus, cMaxCpus);
307#ifndef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
308 uint32_t i = RT_MIN(cMaxCpus, RT_ELEMENTS(g_aidRtMpNtByCpuSetIdx));
309 while (i-- > g_cRtMpNtMaxCpus)
310 g_aidRtMpNtByCpuSetIdx[i] = i;
311#endif
312 g_cRtMpNtMaxCpus = cMaxCpus;
313 if (g_cRtMpNtMaxGroups > RTCPUSET_MAX_CPUS)
314 {
315 MY_CHECK(g_cRtMpNtMaxGroups <= RTCPUSET_MAX_CPUS && g_cRtMpNtMaxGroups > 0,
316 ("IPRT: Fatal: g_cRtMpNtMaxGroups=%u, max %u\n", g_cRtMpNtMaxGroups, RTCPUSET_MAX_CPUS));
317 rc = VERR_MP_TOO_MANY_CPUS;
318 }
319 }
320
321 /*
322 * Calc online mask, partition IDs and such.
323 *
324 * Also check ASSUMPTIONS:
325 *
326 * 1. Processor indexes going from 0 and up to
327 * KeQueryMaximumProcessorCountEx(ALL_PROCESSOR_GROUPS) - 1.
328 *
329 * 2. Currently valid processor indexes, i.e. accepted by
330 * KeGetProcessorIndexFromNumber & KeGetProcessorNumberFromIndex, goes
331 * from 0 thru KeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS) - 1.
332 *
333 * 3. PROCESSOR_GROUP_INFO::MaximumProcessorCount gives the number of
334 * relevant bits in the ActiveProcessorMask (from LSB).
335 *
336 * 4. Active processor count found in KeQueryLogicalProcessorRelationship
337 * output matches what KeQueryActiveProcessorCountEx(ALL) returns.
338 *
339 * 5. Active + inactive processor counts in same does not exceed
340 * KeQueryMaximumProcessorCountEx(ALL).
341 *
342 * Note! Processor indexes are assigned as CPUs come online and are not
343 * preallocated according to group maximums. Since CPUS are only taken
344 * online and never offlined, this means that internal CPU bitmaps are
345 * never sparse and no time is wasted scanning unused bits.
346 *
347 * Unfortunately, it means that ring-3 cannot easily guess the index
348 * assignments when hotswapping is used, and must use GIP when available.
349 */
350 RTCpuSetEmpty(&g_rtMpNtCpuSet);
351 uint32_t cInactive = 0;
352 uint32_t cActive = 0;
353 uint32_t idxCpuMax = 0;
354 uint32_t idxCpuSetNextInactive = g_cRtMpNtMaxCpus - 1;
355 for (idxGroup = 0; RT_SUCCESS(rc) && idxGroup < pInfo->Group.ActiveGroupCount; idxGroup++)
356 {
357 const PROCESSOR_GROUP_INFO *pGrpInfo = &pInfo->Group.GroupInfo[idxGroup];
358 MY_CHECK_BREAK(pGrpInfo->MaximumProcessorCount <= MAXIMUM_PROC_PER_GROUP,
359 ("IPRT: Fatal: MaximumProcessorCount=%u\n", pGrpInfo->MaximumProcessorCount));
360 MY_CHECK_BREAK(pGrpInfo->ActiveProcessorCount <= pGrpInfo->MaximumProcessorCount,
361 ("IPRT: Fatal: ActiveProcessorCount=%u > MaximumProcessorCount=%u\n",
362 pGrpInfo->ActiveProcessorCount, pGrpInfo->MaximumProcessorCount));
363
364 g_aRtMpNtCpuGroups[idxGroup].cMaxCpus = pGrpInfo->MaximumProcessorCount;
365 g_aRtMpNtCpuGroups[idxGroup].cActiveCpus = pGrpInfo->ActiveProcessorCount;
366
367 for (uint32_t idxMember = 0; idxMember < pGrpInfo->MaximumProcessorCount; idxMember++)
368 {
369 PROCESSOR_NUMBER ProcNum;
370 ProcNum.Group = (USHORT)idxGroup;
371 ProcNum.Number = (UCHAR)idxMember;
372 ProcNum.Reserved = 0;
373 ULONG idxCpu = g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum);
374 if (idxCpu != INVALID_PROCESSOR_INDEX)
375 {
376 MY_CHECK_BREAK(idxCpu < g_cRtMpNtMaxCpus && idxCpu < RTCPUSET_MAX_CPUS, /* ASSUMPTION #1 */
377 ("IPRT: Fatal: idxCpu=%u >= g_cRtMpNtMaxCpus=%u (RTCPUSET_MAX_CPUS=%u)\n",
378 idxCpu, g_cRtMpNtMaxCpus, RTCPUSET_MAX_CPUS));
379 if (idxCpu > idxCpuMax)
380 idxCpuMax = idxCpu;
381 g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpu;
382#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
383 g_aidRtMpNtByCpuSetIdx[idxCpu] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember);
384#endif
385
386 ProcNum.Group = UINT16_MAX;
387 ProcNum.Number = UINT8_MAX;
388 ProcNum.Reserved = UINT8_MAX;
389 NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(idxCpu, &ProcNum);
390 MY_CHECK_BREAK(NT_SUCCESS(rcNt),
391 ("IPRT: Fatal: KeGetProcessorNumberFromIndex(%u,) -> %#x!\n", idxCpu, rcNt));
392 MY_CHECK_BREAK(ProcNum.Group == idxGroup && ProcNum.Number == idxMember,
393 ("IPRT: Fatal: KeGetProcessorXxxxFromYyyy roundtrip error for %#x! Group: %u vs %u, Number: %u vs %u\n",
394 idxCpu, ProcNum.Group, idxGroup, ProcNum.Number, idxMember));
395
396 if (pGrpInfo->ActiveProcessorMask & RT_BIT_64(idxMember))
397 {
398 RTCpuSetAddByIndex(&g_rtMpNtCpuSet, idxCpu);
399 cActive++;
400 }
401 else
402 cInactive++; /* (This is a little unexpected, but not important as long as things add up below.) */
403 }
404 else
405 {
406 /* Must be not present / inactive when KeGetProcessorIndexFromNumber fails. */
407 MY_CHECK_BREAK(!(pGrpInfo->ActiveProcessorMask & RT_BIT_64(idxMember)),
408 ("IPRT: Fatal: KeGetProcessorIndexFromNumber(%u/%u) failed but CPU is active! cMax=%u cActive=%u fActive=%p\n",
409 idxGroup, idxMember, pGrpInfo->MaximumProcessorCount, pGrpInfo->ActiveProcessorCount,
410 pGrpInfo->ActiveProcessorMask));
411 cInactive++;
412 if (idxCpuSetNextInactive >= g_cRtMpNtActiveCpus)
413 {
414 g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpuSetNextInactive;
415#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
416 g_aidRtMpNtByCpuSetIdx[idxCpuSetNextInactive] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember)
417 | RTMPNT_ID_F_INACTIVE;
418#endif
419 idxCpuSetNextInactive--;
420 }
421 }
422 }
423 }
424
425 MY_CHECK(cInactive + cActive <= g_cRtMpNtMaxCpus, /* ASSUMPTION #5 (not '==' because of inactive groups) */
426 ("IPRT: Fatal: cInactive=%u + cActive=%u > g_cRtMpNtMaxCpus=%u\n", cInactive, cActive, g_cRtMpNtMaxCpus));
427
428 /* Deal with inactive groups using KeQueryMaximumProcessorCountEx or as
429 best as we can by as best we can by stipulating maximum member counts
430 from the previous group. */
431 if ( RT_SUCCESS(rc)
432 && idxGroup < pInfo->Group.MaximumGroupCount)
433 {
434 uint16_t cInactiveLeft = g_cRtMpNtMaxCpus - (cInactive + cActive);
435 while (idxGroup < pInfo->Group.MaximumGroupCount)
436 {
437 uint32_t cMaxMembers = 0;
438 if (g_pfnrtKeQueryMaximumProcessorCountEx)
439 cMaxMembers = g_pfnrtKeQueryMaximumProcessorCountEx(idxGroup);
440 if (cMaxMembers != 0 || cInactiveLeft == 0)
441 AssertStmt(cMaxMembers <= cInactiveLeft, cMaxMembers = cInactiveLeft);
442 else
443 {
444 uint16_t cGroupsLeft = pInfo->Group.MaximumGroupCount - idxGroup;
445 cMaxMembers = pInfo->Group.GroupInfo[idxGroup - 1].MaximumProcessorCount;
446 while (cMaxMembers * cGroupsLeft < cInactiveLeft)
447 cMaxMembers++;
448 if (cMaxMembers > cInactiveLeft)
449 cMaxMembers = cInactiveLeft;
450 }
451
452 g_aRtMpNtCpuGroups[idxGroup].cMaxCpus = (uint16_t)cMaxMembers;
453 g_aRtMpNtCpuGroups[idxGroup].cActiveCpus = 0;
454 for (uint16_t idxMember = 0; idxMember < cMaxMembers; idxMember++)
455 if (idxCpuSetNextInactive >= g_cRtMpNtActiveCpus)
456 {
457 g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpuSetNextInactive;
458#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
459 g_aidRtMpNtByCpuSetIdx[idxCpuSetNextInactive] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember)
460 | RTMPNT_ID_F_INACTIVE;
461#endif
462 idxCpuSetNextInactive--;
463 }
464 cInactiveLeft -= cMaxMembers;
465 idxGroup++;
466 }
467 }
468
469 /* We're done with pInfo now, free it so we can start returning when assertions fail. */
470 RTMemFree(pInfo);
471 if (RT_FAILURE(rc)) /* MY_CHECK_BREAK sets rc. */
472 return rc;
473 MY_CHECK_RETURN(cActive >= g_cRtMpNtActiveCpus,
474 ("IPRT: Fatal: cActive=%u < g_cRtMpNtActiveCpus=%u - CPUs removed?\n", cActive, g_cRtMpNtActiveCpus),
475 VERR_INTERNAL_ERROR_3);
476 MY_CHECK_RETURN(idxCpuMax < cActive, /* ASSUMPTION #2 */
477 ("IPRT: Fatal: idCpuMax=%u >= cActive=%u! Unexpected CPU index allocation. CPUs removed?\n",
478 idxCpuMax, cActive),
479 VERR_INTERNAL_ERROR_4);
480
481 /* Retry if CPUs were added. */
482 if ( cActive != g_cRtMpNtActiveCpus
483 && cTries < 16)
484 continue;
485 MY_CHECK_RETURN(cActive == g_cRtMpNtActiveCpus, /* ASSUMPTION #4 */
486 ("IPRT: Fatal: cActive=%u != g_cRtMpNtActiveCpus=%u\n", cActive, g_cRtMpNtActiveCpus),
487 VERR_INTERNAL_ERROR_5);
488 }
489 else
490 {
491 /* Legacy: */
492 MY_CHECK_RETURN(g_cRtMpNtMaxGroups == 1, ("IPRT: Fatal: Missing KeQueryLogicalProcessorRelationship!\n"),
493 VERR_SYMBOL_NOT_FOUND);
494
495 /** @todo Is it possible that the affinity mask returned by
496 * KeQueryActiveProcessors is sparse? */
497 if (g_pfnrtKeQueryActiveProcessors)
498 RTCpuSetFromU64(&g_rtMpNtCpuSet, g_pfnrtKeQueryActiveProcessors());
499 else if (g_cRtMpNtMaxCpus < 64)
500 RTCpuSetFromU64(&g_rtMpNtCpuSet, (UINT64_C(1) << g_cRtMpNtMaxCpus) - 1);
501 else
502 {
503 MY_CHECK_RETURN(g_cRtMpNtMaxCpus == 64, ("IPRT: Fatal: g_cRtMpNtMaxCpus=%u, expect 64 or less\n", g_cRtMpNtMaxCpus),
504 VERR_MP_TOO_MANY_CPUS);
505 RTCpuSetFromU64(&g_rtMpNtCpuSet, UINT64_MAX);
506 }
507
508 g_aRtMpNtCpuGroups[0].cMaxCpus = g_cRtMpNtMaxCpus;
509 g_aRtMpNtCpuGroups[0].cActiveCpus = g_cRtMpNtMaxCpus;
510 for (unsigned i = 0; i < g_cRtMpNtMaxCpus; i++)
511 {
512 g_aRtMpNtCpuGroups[0].aidxCpuSetMembers[i] = i;
513#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
514 g_aidRtMpNtByCpuSetIdx[i] = RTMPCPUID_FROM_GROUP_AND_NUMBER(0, i);
515#endif
516 }
517 }
518
519 /*
520 * Register CPU hot plugging callback (it also counts active CPUs).
521 */
522 Assert(g_pvMpCpuChangeCallback == NULL);
523 if (g_pfnrtKeRegisterProcessorChangeCallback)
524 {
525 MY_CHECK_RETURN(g_pfnrtKeDeregisterProcessorChangeCallback,
526 ("IPRT: Fatal: KeRegisterProcessorChangeCallback without KeDeregisterProcessorChangeCallback!\n"),
527 VERR_SYMBOL_NOT_FOUND);
528
529 RTCPUSET const ActiveSetCopy = g_rtMpNtCpuSet;
530 RTCpuSetEmpty(&g_rtMpNtCpuSet);
531 uint32_t const cActiveCpus = g_cRtMpNtActiveCpus;
532 g_cRtMpNtActiveCpus = 0;
533
534 g_pvMpCpuChangeCallback = g_pfnrtKeRegisterProcessorChangeCallback(rtR0NtMpProcessorChangeCallback, NULL /*pvUser*/,
535 KE_PROCESSOR_CHANGE_ADD_EXISTING);
536 if (g_pvMpCpuChangeCallback)
537 {
538 if (cActiveCpus == g_cRtMpNtActiveCpus)
539 { /* likely */ }
540 else
541 {
542 g_pfnrtKeDeregisterProcessorChangeCallback(g_pvMpCpuChangeCallback);
543 if (cTries < 16)
544 {
545 /* Retry if CPUs were added. */
546 MY_CHECK_RETURN(g_cRtMpNtActiveCpus >= cActiveCpus,
547 ("IPRT: Fatal: g_cRtMpNtActiveCpus=%u < cActiveCpus=%u! CPUs removed?\n",
548 g_cRtMpNtActiveCpus, cActiveCpus),
549 VERR_INTERNAL_ERROR_2);
550 MY_CHECK_RETURN(g_cRtMpNtActiveCpus <= g_cRtMpNtMaxCpus,
551 ("IPRT: Fatal: g_cRtMpNtActiveCpus=%u > g_cRtMpNtMaxCpus=%u!\n",
552 g_cRtMpNtActiveCpus, g_cRtMpNtMaxCpus),
553 VERR_INTERNAL_ERROR_2);
554 continue;
555 }
556 MY_CHECK_RETURN(0, ("IPRT: Fatal: g_cRtMpNtActiveCpus=%u cActiveCpus=%u\n", g_cRtMpNtActiveCpus, cActiveCpus),
557 VERR_INTERNAL_ERROR_3);
558 }
559 }
560 else
561 {
562 AssertFailed();
563 g_rtMpNtCpuSet = ActiveSetCopy;
564 g_cRtMpNtActiveCpus = cActiveCpus;
565 }
566 }
567 break;
568 } /* Retry loop for stable active CPU count. */
569
570#undef MY_CHECK_RETURN
571
572 /*
573 * Special IPI fun for RTMpPokeCpu.
574 *
575 * On Vista and later the DPC method doesn't seem to reliably send IPIs,
576 * so we have to use alternative methods.
577 *
578 * On AMD64 We used to use the HalSendSoftwareInterrupt API (also x86 on
579 * W10+), it looks faster and more convenient to use, however we're either
580 * using it wrong or it doesn't reliably do what we want (see @bugref{8343}).
581 *
582 * The HalRequestIpip API is thus far the only alternative to KeInsertQueueDpc
583 * for doing targetted IPIs. Trouble with this API is that it changed
584 * fundamentally in Window 7 when they added support for lots of processors.
585 *
586 * If we really think we cannot use KeInsertQueueDpc, we use the broadcast IPI
587 * API KeIpiGenericCall.
588 */
589 if ( pOsVerInfo->uMajorVer > 6
590 || (pOsVerInfo->uMajorVer == 6 && pOsVerInfo->uMinorVer > 0))
591 g_pfnrtHalRequestIpiPreW7 = NULL;
592 else
593 g_pfnrtHalRequestIpiW7Plus = NULL;
594
595 if ( g_pfnrtHalRequestIpiW7Plus
596 && g_pfnrtKeInitializeAffinityEx
597 && g_pfnrtKeAddProcessorAffinityEx
598 && g_pfnrtKeGetProcessorIndexFromNumber)
599 {
600 DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingHalReqestIpiW7Plus\n");
601 g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingHalReqestIpiW7Plus;
602 }
603 else if (pOsVerInfo->uMajorVer >= 6 && g_pfnrtKeIpiGenericCall)
604 {
605 DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingBroadcastIpi\n");
606 g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingBroadcastIpi;
607 }
608 else if (g_pfnrtKeSetTargetProcessorDpc)
609 {
610 DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingDpc\n");
611 g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingDpc;
612 /* Windows XP should send always send an IPI -> VERIFY */
613 }
614 else
615 {
616 DbgPrint("IPRT: RTMpPoke => rtMpPokeCpuUsingFailureNotSupported\n");
617 Assert(pOsVerInfo->uMajorVer == 3 && pOsVerInfo->uMinorVer <= 50);
618 g_pfnrtMpPokeCpuWorker = rtMpPokeCpuUsingFailureNotSupported;
619 }
620
621 return VINF_SUCCESS;
622}
623
624
625/**
626 * Called by rtR0TermNative.
627 */
628DECLHIDDEN(void) rtR0MpNtTerm(void)
629{
630 /*
631 * Deregister the processor change callback.
632 */
633 PVOID pvMpCpuChangeCallback = g_pvMpCpuChangeCallback;
634 g_pvMpCpuChangeCallback = NULL;
635 if (pvMpCpuChangeCallback)
636 {
637 AssertReturnVoid(g_pfnrtKeDeregisterProcessorChangeCallback);
638 g_pfnrtKeDeregisterProcessorChangeCallback(pvMpCpuChangeCallback);
639 }
640}
641
642
643DECLHIDDEN(int) rtR0MpNotificationNativeInit(void)
644{
645 return VINF_SUCCESS;
646}
647
648
649DECLHIDDEN(void) rtR0MpNotificationNativeTerm(void)
650{
651}
652
653
654/**
655 * Implements the NT PROCESSOR_CALLBACK_FUNCTION callback function.
656 *
657 * This maintains the g_rtMpNtCpuSet and works MP notification callbacks. When
658 * registered, it's called for each active CPU in the system, avoiding racing
659 * CPU hotplugging (as well as testing the callback).
660 *
661 * @param pvUser User context (not used).
662 * @param pChangeCtx Change context (in).
663 * @param prcOperationStatus Operation status (in/out).
664 *
665 * @remarks ASSUMES no concurrent execution of KeProcessorAddCompleteNotify
666 * notification callbacks. At least during callback registration
667 * callout, we're owning KiDynamicProcessorLock.
668 *
669 * @remarks When registering the handler, we first get KeProcessorAddStartNotify
670 * callbacks for all active CPUs, and after they all succeed we get the
671 * KeProcessorAddCompleteNotify callbacks.
672 */
673static VOID __stdcall rtR0NtMpProcessorChangeCallback(void *pvUser, PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT pChangeCtx,
674 PNTSTATUS prcOperationStatus)
675{
676 RT_NOREF(pvUser, prcOperationStatus);
677 switch (pChangeCtx->State)
678 {
679 /*
680 * Check whether we can deal with the CPU, failing the start operation if we
681 * can't. The checks we are doing here are to avoid complicated/impossible
682 * cases in KeProcessorAddCompleteNotify. They are really just verify specs.
683 */
684 case KeProcessorAddStartNotify:
685 {
686 NTSTATUS rcNt = STATUS_SUCCESS;
687 if (pChangeCtx->NtNumber < RTCPUSET_MAX_CPUS)
688 {
689 if (pChangeCtx->NtNumber >= g_cRtMpNtMaxCpus)
690 {
691 DbgPrint("IPRT: KeProcessorAddStartNotify failure: NtNumber=%u is higher than the max CPU count (%u)!\n",
692 pChangeCtx->NtNumber, g_cRtMpNtMaxCpus);
693 rcNt = STATUS_INTERNAL_ERROR;
694 }
695
696 /* The ProcessNumber field was introduced in Windows 7. */
697 PROCESSOR_NUMBER ProcNum;
698 if (g_pfnrtKeGetProcessorIndexFromNumber)
699 {
700 ProcNum = pChangeCtx->ProcNumber;
701 KEPROCESSORINDEX idxCpu = g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum);
702 if (idxCpu != pChangeCtx->NtNumber)
703 {
704 DbgPrint("IPRT: KeProcessorAddStartNotify failure: g_pfnrtKeGetProcessorIndexFromNumber(%u.%u) -> %u, expected %u!\n",
705 ProcNum.Group, ProcNum.Number, idxCpu, pChangeCtx->NtNumber);
706 rcNt = STATUS_INTERNAL_ERROR;
707 }
708 }
709 else
710 {
711 ProcNum.Group = 0;
712 ProcNum.Number = pChangeCtx->NtNumber;
713 }
714
715 if ( ProcNum.Group < RT_ELEMENTS(g_aRtMpNtCpuGroups)
716 && ProcNum.Number < RT_ELEMENTS(g_aRtMpNtCpuGroups[0].aidxCpuSetMembers))
717 {
718 if (ProcNum.Group >= g_cRtMpNtMaxGroups)
719 {
720 DbgPrint("IPRT: KeProcessorAddStartNotify failure: %u.%u is out of range - max groups: %u!\n",
721 ProcNum.Group, ProcNum.Number, g_cRtMpNtMaxGroups);
722 rcNt = STATUS_INTERNAL_ERROR;
723 }
724
725 if (ProcNum.Number < g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus)
726 {
727 Assert(g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] != -1);
728 if (g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] == -1)
729 {
730 DbgPrint("IPRT: KeProcessorAddStartNotify failure: Internal error! %u.%u was assigned -1 as set index!\n",
731 ProcNum.Group, ProcNum.Number);
732 rcNt = STATUS_INTERNAL_ERROR;
733 }
734
735 Assert(g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] != NIL_RTCPUID);
736 if (g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] == NIL_RTCPUID)
737 {
738 DbgPrint("IPRT: KeProcessorAddStartNotify failure: Internal error! %u (%u.%u) translates to NIL_RTCPUID!\n",
739 pChangeCtx->NtNumber, ProcNum.Group, ProcNum.Number);
740 rcNt = STATUS_INTERNAL_ERROR;
741 }
742 }
743 else
744 {
745 DbgPrint("IPRT: KeProcessorAddStartNotify failure: max processors in group %u is %u, cannot add %u to it!\n",
746 ProcNum.Group, g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus, ProcNum.Group, ProcNum.Number);
747 rcNt = STATUS_INTERNAL_ERROR;
748 }
749 }
750 else
751 {
752 DbgPrint("IPRT: KeProcessorAddStartNotify failure: %u.%u is out of range (max %u.%u)!\n",
753 ProcNum.Group, ProcNum.Number, RT_ELEMENTS(g_aRtMpNtCpuGroups), RT_ELEMENTS(g_aRtMpNtCpuGroups[0].aidxCpuSetMembers));
754 rcNt = STATUS_INTERNAL_ERROR;
755 }
756 }
757 else
758 {
759 DbgPrint("IPRT: KeProcessorAddStartNotify failure: NtNumber=%u is outside RTCPUSET_MAX_CPUS (%u)!\n",
760 pChangeCtx->NtNumber, RTCPUSET_MAX_CPUS);
761 rcNt = STATUS_INTERNAL_ERROR;
762 }
763 if (!NT_SUCCESS(rcNt))
764 *prcOperationStatus = rcNt;
765 break;
766 }
767
768 /*
769 * Update the globals. Since we've checked out range limits and other
770 * limitations already we just AssertBreak here.
771 */
772 case KeProcessorAddCompleteNotify:
773 {
774 /*
775 * Calc the processor number and assert conditions checked in KeProcessorAddStartNotify.
776 */
777 AssertBreak(pChangeCtx->NtNumber < RTCPUSET_MAX_CPUS);
778 AssertBreak(pChangeCtx->NtNumber < g_cRtMpNtMaxCpus);
779 Assert(pChangeCtx->NtNumber == g_cRtMpNtActiveCpus); /* light assumption */
780 PROCESSOR_NUMBER ProcNum;
781 if (g_pfnrtKeGetProcessorIndexFromNumber)
782 {
783 ProcNum = pChangeCtx->ProcNumber;
784 AssertBreak(g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum) == pChangeCtx->NtNumber);
785 AssertBreak(ProcNum.Group < RT_ELEMENTS(g_aRtMpNtCpuGroups));
786 AssertBreak(ProcNum.Group < g_cRtMpNtMaxGroups);
787 }
788 else
789 {
790 ProcNum.Group = 0;
791 ProcNum.Number = pChangeCtx->NtNumber;
792 }
793 AssertBreak(ProcNum.Number < RT_ELEMENTS(g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers));
794 AssertBreak(ProcNum.Number < g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus);
795 AssertBreak(g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] != -1);
796 AssertBreak(g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] != NIL_RTCPUID);
797
798 /*
799 * Add ourselves to the online CPU set and update the active CPU count.
800 */
801 RTCpuSetAddByIndex(&g_rtMpNtCpuSet, pChangeCtx->NtNumber);
802 ASMAtomicIncU32(&g_cRtMpNtActiveCpus);
803
804 /*
805 * Update the group info.
806 *
807 * If the index prediction failed (real hotplugging callbacks only) we
808 * have to switch it around. This is particularly annoying when we
809 * use the index as the ID.
810 */
811#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
812 RTCPUID idCpu = RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
813 RTCPUID idOld = g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber];
814 if ((idOld & ~RTMPNT_ID_F_INACTIVE) != idCpu)
815 {
816 Assert(idOld & RTMPNT_ID_F_INACTIVE);
817 int idxDest = g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number];
818 g_aRtMpNtCpuGroups[rtMpCpuIdGetGroup(idOld)].aidxCpuSetMembers[rtMpCpuIdGetGroupMember(idOld)] = idxDest;
819 g_aidRtMpNtByCpuSetIdx[idxDest] = idOld;
820 }
821 g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] = idCpu;
822#else
823 Assert(g_aidRtMpNtByCpuSetIdx[pChangeCtx->NtNumber] == pChangeCtx->NtNumber);
824 int idxDest = g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number];
825 if ((ULONG)idxDest != pChangeCtx->NtNumber)
826 {
827 bool fFound = false;
828 uint32_t idxOldGroup = g_cRtMpNtMaxGroups;
829 while (idxOldGroup-- > 0 && !fFound)
830 {
831 uint32_t idxMember = g_aRtMpNtCpuGroups[idxOldGroup].cMaxCpus;
832 while (idxMember-- > 0)
833 if (g_aRtMpNtCpuGroups[idxOldGroup].aidxCpuSetMembers[idxMember] == (int)pChangeCtx->NtNumber)
834 {
835 g_aRtMpNtCpuGroups[idxOldGroup].aidxCpuSetMembers[idxMember] = idxDest;
836 fFound = true;
837 break;
838 }
839 }
840 Assert(fFound);
841 }
842#endif
843 g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] = pChangeCtx->NtNumber;
844
845 /*
846 * Do MP notification callbacks.
847 */
848 rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, pChangeCtx->NtNumber);
849 break;
850 }
851
852 case KeProcessorAddFailureNotify:
853 /* ignore */
854 break;
855
856 default:
857 AssertMsgFailed(("State=%u\n", pChangeCtx->State));
858 }
859}
860
861
862/**
863 * Wrapper around KeQueryLogicalProcessorRelationship.
864 *
865 * @returns IPRT status code.
866 * @param ppInfo Where to return the info. Pass to RTMemFree when done.
867 */
868static int rtR0NtInitQueryGroupRelations(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX **ppInfo)
869{
870 ULONG cbInfo = sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)
871 + g_cRtMpNtMaxGroups * sizeof(GROUP_RELATIONSHIP);
872 NTSTATUS rcNt;
873 do
874 {
875 SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)RTMemAlloc(cbInfo);
876 if (pInfo)
877 {
878 rcNt = g_pfnrtKeQueryLogicalProcessorRelationship(NULL /*pProcNumber*/, RelationGroup, pInfo, &cbInfo);
879 if (NT_SUCCESS(rcNt))
880 {
881 *ppInfo = pInfo;
882 return VINF_SUCCESS;
883 }
884
885 RTMemFree(pInfo);
886 pInfo = NULL;
887 }
888 else
889 rcNt = STATUS_NO_MEMORY;
890 } while (rcNt == STATUS_INFO_LENGTH_MISMATCH);
891 DbgPrint("IPRT: Fatal: KeQueryLogicalProcessorRelationship failed: %#x\n", rcNt);
892 AssertMsgFailed(("KeQueryLogicalProcessorRelationship failed: %#x\n", rcNt));
893 return RTErrConvertFromNtStatus(rcNt);
894}
895
896
897
898
899
900RTDECL(RTCPUID) RTMpCpuId(void)
901{
902 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
903
904#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
905 PROCESSOR_NUMBER ProcNum;
906 ProcNum.Group = 0;
907 if (g_pfnrtKeGetCurrentProcessorNumberEx)
908 {
909 ProcNum.Number = 0;
910 g_pfnrtKeGetCurrentProcessorNumberEx(&ProcNum);
911 }
912 else
913 ProcNum.Number = KeGetCurrentProcessorNumber(); /* Number is 8-bit, so we're not subject to BYTE -> WORD upgrade in WDK. */
914 return RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
915
916#else
917
918 if (g_pfnrtKeGetCurrentProcessorNumberEx)
919 {
920 KEPROCESSORINDEX idxCpu = g_pfnrtKeGetCurrentProcessorNumberEx(NULL);
921 Assert(idxCpu < RTCPUSET_MAX_CPUS);
922 return idxCpu;
923 }
924
925 return (uint8_t)KeGetCurrentProcessorNumber(); /* PCR->Number was changed from BYTE to WORD in the WDK, thus the cast. */
926#endif
927}
928
929
930RTDECL(int) RTMpCurSetIndex(void)
931{
932#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
933 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
934
935 if (g_pfnrtKeGetCurrentProcessorNumberEx)
936 {
937 KEPROCESSORINDEX idxCpu = g_pfnrtKeGetCurrentProcessorNumberEx(NULL);
938 Assert(idxCpu < RTCPUSET_MAX_CPUS);
939 return idxCpu;
940 }
941 return (uint8_t)KeGetCurrentProcessorNumber(); /* PCR->Number was changed from BYTE to WORD in the WDK, thus the cast. */
942#else
943 return (int)RTMpCpuId();
944#endif
945}
946
947
948RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
949{
950#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
951 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
952
953 PROCESSOR_NUMBER ProcNum = { 0 , 0, 0 };
954 KEPROCESSORINDEX idxCpu = g_pfnrtKeGetCurrentProcessorNumberEx(&ProcNum);
955 Assert(idxCpu < RTCPUSET_MAX_CPUS);
956 *pidCpu = RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
957 return idxCpu;
958#else
959 return *pidCpu = RTMpCpuId();
960#endif
961}
962
963
964RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
965{
966#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
967 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
968
969 if (idCpu != NIL_RTCPUID)
970 {
971 if (g_pfnrtKeGetProcessorIndexFromNumber)
972 {
973 PROCESSOR_NUMBER ProcNum;
974 ProcNum.Group = rtMpCpuIdGetGroup(idCpu);
975 ProcNum.Number = rtMpCpuIdGetGroupMember(idCpu);
976 ProcNum.Reserved = 0;
977 KEPROCESSORINDEX idxCpu = g_pfnrtKeGetProcessorIndexFromNumber(&ProcNum);
978 if (idxCpu != INVALID_PROCESSOR_INDEX)
979 {
980 Assert(idxCpu < g_cRtMpNtMaxCpus);
981 Assert((ULONG)g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number] == idxCpu);
982 return idxCpu;
983 }
984
985 /* Since NT assigned indexes as the CPUs come online, we cannot produce an ID <-> index
986 mapping for not-yet-onlined CPUS that is consistent. We just have to do our best... */
987 if ( ProcNum.Group < g_cRtMpNtMaxGroups
988 && ProcNum.Number < g_aRtMpNtCpuGroups[ProcNum.Group].cMaxCpus)
989 return g_aRtMpNtCpuGroups[ProcNum.Group].aidxCpuSetMembers[ProcNum.Number];
990 }
991 else if (rtMpCpuIdGetGroup(idCpu) == 0)
992 return rtMpCpuIdGetGroupMember(idCpu);
993 }
994 return -1;
995#else
996 /* 1:1 mapping, just do range checks. */
997 return idCpu < RTCPUSET_MAX_CPUS ? (int)idCpu : -1;
998#endif
999}
1000
1001
1002RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
1003{
1004#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
1005 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
1006
1007 if ((unsigned)iCpu < g_cRtMpNtMaxCpus)
1008 {
1009 if (g_pfnrtKeGetProcessorIndexFromNumber)
1010 {
1011 PROCESSOR_NUMBER ProcNum = { 0, 0, 0 };
1012 NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(iCpu, &ProcNum);
1013 if (NT_SUCCESS(rcNt))
1014 {
1015 Assert(ProcNum.Group <= g_cRtMpNtMaxGroups);
1016 Assert( (g_aidRtMpNtByCpuSetIdx[iCpu] & ~RTMPNT_ID_F_INACTIVE)
1017 == RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number));
1018 return RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number);
1019 }
1020 }
1021 return g_aidRtMpNtByCpuSetIdx[iCpu];
1022 }
1023 return NIL_RTCPUID;
1024#else
1025 /* 1:1 mapping, just do range checks. */
1026 return (unsigned)iCpu < RTCPUSET_MAX_CPUS ? iCpu : NIL_RTCPUID;
1027#endif
1028}
1029
1030
1031RTDECL(int) RTMpSetIndexFromCpuGroupMember(uint32_t idxGroup, uint32_t idxMember)
1032{
1033 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
1034
1035 if (idxGroup < g_cRtMpNtMaxGroups)
1036 if (idxMember < g_aRtMpNtCpuGroups[idxGroup].cMaxCpus)
1037 return g_aRtMpNtCpuGroups[idxGroup].aidxCpuSetMembers[idxMember];
1038 return -1;
1039}
1040
1041
1042RTDECL(uint32_t) RTMpGetCpuGroupCounts(uint32_t idxGroup, uint32_t *pcActive)
1043{
1044 if (idxGroup < g_cRtMpNtMaxGroups)
1045 {
1046 if (pcActive)
1047 *pcActive = g_aRtMpNtCpuGroups[idxGroup].cActiveCpus;
1048 return g_aRtMpNtCpuGroups[idxGroup].cMaxCpus;
1049 }
1050 if (pcActive)
1051 *pcActive = 0;
1052 return 0;
1053}
1054
1055
1056RTDECL(uint32_t) RTMpGetMaxCpuGroupCount(void)
1057{
1058 return g_cRtMpNtMaxGroups;
1059}
1060
1061
1062RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
1063{
1064 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
1065
1066#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
1067 return RTMPCPUID_FROM_GROUP_AND_NUMBER(g_cRtMpNtMaxGroups - 1, g_aRtMpNtCpuGroups[g_cRtMpNtMaxGroups - 1].cMaxCpus - 1);
1068#else
1069 /* According to MSDN the processor indexes goes from 0 to the maximum
1070 number of CPUs in the system. We've check this in initterm-r0drv-nt.cpp. */
1071 return g_cRtMpNtMaxCpus - 1;
1072#endif
1073}
1074
1075
1076RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
1077{
1078 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
1079 return RTCpuSetIsMember(&g_rtMpNtCpuSet, idCpu);
1080}
1081
1082
1083RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
1084{
1085 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
1086
1087#ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER
1088 if (idCpu != NIL_RTCPUID)
1089 {
1090 unsigned idxGroup = rtMpCpuIdGetGroup(idCpu);
1091 if (idxGroup < g_cRtMpNtMaxGroups)
1092 return rtMpCpuIdGetGroupMember(idCpu) < g_aRtMpNtCpuGroups[idxGroup].cMaxCpus;
1093 }
1094 return false;
1095
1096#else
1097 /* A possible CPU ID is one with a value lower than g_cRtMpNtMaxCpus (see
1098 comment in RTMpGetMaxCpuId). */
1099 return idCpu < g_cRtMpNtMaxCpus;
1100#endif
1101}
1102
1103
1104
1105RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
1106{
1107 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
1108
1109 /* The set of possible CPU IDs(/indexes) are from 0 up to
1110 g_cRtMpNtMaxCpus (see comment in RTMpGetMaxCpuId). */
1111 RTCpuSetEmpty(pSet);
1112 int idxCpu = g_cRtMpNtMaxCpus;
1113 while (idxCpu-- > 0)
1114 RTCpuSetAddByIndex(pSet, idxCpu);
1115 return pSet;
1116}
1117
1118
1119RTDECL(RTCPUID) RTMpGetCount(void)
1120{
1121 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
1122 return g_cRtMpNtMaxCpus;
1123}
1124
1125
1126RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
1127{
1128 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
1129
1130 *pSet = g_rtMpNtCpuSet;
1131 return pSet;
1132}
1133
1134
1135RTDECL(RTCPUID) RTMpGetOnlineCount(void)
1136{
1137 RTCPUSET Set;
1138 RTMpGetOnlineSet(&Set);
1139 return RTCpuSetCount(&Set);
1140}
1141
1142
1143RTDECL(RTCPUID) RTMpGetOnlineCoreCount(void)
1144{
1145 /** @todo fix me */
1146 return RTMpGetOnlineCount();
1147}
1148
1149
1150
1151#if 0
1152/* Experiment with checking the undocumented KPRCB structure
1153 * 'dt nt!_kprcb 0xaddress' shows the layout
1154 */
1155typedef struct
1156{
1157 LIST_ENTRY DpcListHead;
1158 ULONG_PTR DpcLock;
1159 volatile ULONG DpcQueueDepth;
1160 ULONG DpcQueueCount;
1161} KDPC_DATA, *PKDPC_DATA;
1162
1163RTDECL(bool) RTMpIsCpuWorkPending(void)
1164{
1165 uint8_t *pkprcb;
1166 PKDPC_DATA pDpcData;
1167
1168 _asm {
1169 mov eax, fs:0x20
1170 mov pkprcb, eax
1171 }
1172 pDpcData = (PKDPC_DATA)(pkprcb + 0x19e0);
1173 if (pDpcData->DpcQueueDepth)
1174 return true;
1175
1176 pDpcData++;
1177 if (pDpcData->DpcQueueDepth)
1178 return true;
1179 return false;
1180}
1181#else
1182RTDECL(bool) RTMpIsCpuWorkPending(void)
1183{
1184 /** @todo not implemented */
1185 return false;
1186}
1187#endif
1188
1189
1190/**
1191 * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
1192 * the RTMpOnAll case.
1193 *
1194 * @param uUserCtx The user context argument (PRTMPARGS).
1195 */
1196static ULONG_PTR rtmpNtOnAllBroadcastIpiWrapper(ULONG_PTR uUserCtx)
1197{
1198 PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
1199 /*ASMAtomicIncU32(&pArgs->cHits); - not needed */
1200 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
1201 return 0;
1202}
1203
1204
1205/**
1206 * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
1207 * the RTMpOnOthers case.
1208 *
1209 * @param uUserCtx The user context argument (PRTMPARGS).
1210 */
1211static ULONG_PTR rtmpNtOnOthersBroadcastIpiWrapper(ULONG_PTR uUserCtx)
1212{
1213 PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
1214 RTCPUID idCpu = RTMpCpuId();
1215 if (pArgs->idCpu != idCpu)
1216 {
1217 /*ASMAtomicIncU32(&pArgs->cHits); - not needed */
1218 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
1219 }
1220 return 0;
1221}
1222
1223
1224/**
1225 * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
1226 * the RTMpOnPair case.
1227 *
1228 * @param uUserCtx The user context argument (PRTMPARGS).
1229 */
1230static ULONG_PTR rtmpNtOnPairBroadcastIpiWrapper(ULONG_PTR uUserCtx)
1231{
1232 PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
1233 RTCPUID idCpu = RTMpCpuId();
1234 if ( pArgs->idCpu == idCpu
1235 || pArgs->idCpu2 == idCpu)
1236 {
1237 ASMAtomicIncU32(&pArgs->cHits);
1238 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
1239 }
1240 return 0;
1241}
1242
1243
1244/**
1245 * Wrapper between the native KIPI_BROADCAST_WORKER and IPRT's PFNRTMPWORKER for
1246 * the RTMpOnSpecific case.
1247 *
1248 * @param uUserCtx The user context argument (PRTMPARGS).
1249 */
1250static ULONG_PTR rtmpNtOnSpecificBroadcastIpiWrapper(ULONG_PTR uUserCtx)
1251{
1252 PRTMPARGS pArgs = (PRTMPARGS)uUserCtx;
1253 RTCPUID idCpu = RTMpCpuId();
1254 if (pArgs->idCpu == idCpu)
1255 {
1256 ASMAtomicIncU32(&pArgs->cHits);
1257 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
1258 }
1259 return 0;
1260}
1261
1262
1263/**
1264 * Internal worker for the RTMpOn* APIs using KeIpiGenericCall.
1265 *
1266 * @returns VINF_SUCCESS.
1267 * @param pfnWorker The callback.
1268 * @param pvUser1 User argument 1.
1269 * @param pvUser2 User argument 2.
1270 * @param pfnNativeWrapper The wrapper between the NT and IPRT callbacks.
1271 * @param idCpu First CPU to match, ultimately specific to the
1272 * pfnNativeWrapper used.
1273 * @param idCpu2 Second CPU to match, ultimately specific to the
1274 * pfnNativeWrapper used.
1275 * @param pcHits Where to return the number of this. Optional.
1276 */
1277static int rtMpCallUsingBroadcastIpi(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
1278 PKIPI_BROADCAST_WORKER pfnNativeWrapper, RTCPUID idCpu, RTCPUID idCpu2,
1279 uint32_t *pcHits)
1280{
1281 RTMPARGS Args;
1282 Args.pfnWorker = pfnWorker;
1283 Args.pvUser1 = pvUser1;
1284 Args.pvUser2 = pvUser2;
1285 Args.idCpu = idCpu;
1286 Args.idCpu2 = idCpu2;
1287 Args.cRefs = 0;
1288 Args.cHits = 0;
1289
1290 AssertPtr(g_pfnrtKeIpiGenericCall);
1291 g_pfnrtKeIpiGenericCall(pfnNativeWrapper, (uintptr_t)&Args);
1292 if (pcHits)
1293 *pcHits = Args.cHits;
1294 return VINF_SUCCESS;
1295}
1296
1297
1298/**
1299 * Wrapper between the native nt per-cpu callbacks and PFNRTWORKER
1300 *
1301 * @param Dpc DPC object
1302 * @param DeferredContext Context argument specified by KeInitializeDpc
1303 * @param SystemArgument1 Argument specified by KeInsertQueueDpc
1304 * @param SystemArgument2 Argument specified by KeInsertQueueDpc
1305 */
1306static VOID rtmpNtDPCWrapper(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
1307{
1308 PRTMPARGS pArgs = (PRTMPARGS)DeferredContext;
1309 RT_NOREF3(Dpc, SystemArgument1, SystemArgument2);
1310
1311 ASMAtomicIncU32(&pArgs->cHits);
1312 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
1313
1314 /* Dereference the argument structure. */
1315 int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
1316 Assert(cRefs >= 0);
1317 if (cRefs == 0)
1318 RTMemFree(pArgs);
1319}
1320
1321
1322/**
1323 * Wrapper around KeSetTargetProcessorDpcEx / KeSetTargetProcessorDpc.
1324 *
1325 * This is shared with the timer code.
1326 *
1327 * @returns IPRT status code (errors are asserted).
1328 * @param pDpc The DPC.
1329 * @param idCpu The ID of the new target CPU.
1330 */
1331DECLHIDDEN(int) rtMpNtSetTargetProcessorDpc(KDPC *pDpc, RTCPUID idCpu)
1332{
1333 if (g_pfnrtKeSetTargetProcessorDpcEx)
1334 {
1335 /* Convert to stupid process number (bet KeSetTargetProcessorDpcEx does
1336 the reverse conversion internally). */
1337 PROCESSOR_NUMBER ProcNum;
1338 NTSTATUS rcNt = g_pfnrtKeGetProcessorNumberFromIndex(RTMpCpuIdToSetIndex(idCpu), &ProcNum);
1339 AssertMsgReturn(NT_SUCCESS(rcNt),
1340 ("KeGetProcessorNumberFromIndex(%u) -> %#x\n", idCpu, rcNt),
1341 RTErrConvertFromNtStatus(rcNt));
1342
1343 rcNt = g_pfnrtKeSetTargetProcessorDpcEx(pDpc, &ProcNum);
1344 AssertMsgReturn(NT_SUCCESS(rcNt),
1345 ("KeSetTargetProcessorDpcEx(,%u(%u/%u)) -> %#x\n", idCpu, ProcNum.Group, ProcNum.Number, rcNt),
1346 RTErrConvertFromNtStatus(rcNt));
1347 }
1348 else if (g_pfnrtKeSetTargetProcessorDpc)
1349 g_pfnrtKeSetTargetProcessorDpc(pDpc, RTMpCpuIdToSetIndex(idCpu));
1350 else
1351 return VERR_NOT_SUPPORTED;
1352 return VINF_SUCCESS;
1353}
1354
1355
1356/**
1357 * Internal worker for the RTMpOn* APIs.
1358 *
1359 * @returns IPRT status code.
1360 * @param pfnWorker The callback.
1361 * @param pvUser1 User argument 1.
1362 * @param pvUser2 User argument 2.
1363 * @param enmCpuid What to do / is idCpu valid.
1364 * @param idCpu Used if enmCpuid is RT_NT_CPUID_SPECIFIC or
1365 * RT_NT_CPUID_PAIR, otherwise ignored.
1366 * @param idCpu2 Used if enmCpuid is RT_NT_CPUID_PAIR, otherwise ignored.
1367 * @param pcHits Where to return the number of this. Optional.
1368 */
1369static int rtMpCallUsingDpcs(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
1370 RT_NT_CPUID enmCpuid, RTCPUID idCpu, RTCPUID idCpu2, uint32_t *pcHits)
1371{
1372#if 0
1373 /* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the
1374 * driver verifier doesn't complain...
1375 */
1376 AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL));
1377#endif
1378 /* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */
1379 if (!g_pfnrtNtKeFlushQueuedDpcs)
1380 return VERR_NOT_SUPPORTED;
1381
1382 /*
1383 * Make a copy of the active CPU set and figure out how many KDPCs we really need.
1384 * We must not try setup DPCs for CPUs which aren't there, because that may fail.
1385 */
1386 RTCPUSET OnlineSet = g_rtMpNtCpuSet;
1387 uint32_t cDpcsNeeded;
1388 switch (enmCpuid)
1389 {
1390 case RT_NT_CPUID_SPECIFIC:
1391 cDpcsNeeded = 1;
1392 break;
1393 case RT_NT_CPUID_PAIR:
1394 cDpcsNeeded = 2;
1395 break;
1396 default:
1397 do
1398 {
1399 cDpcsNeeded = g_cRtMpNtActiveCpus;
1400 OnlineSet = g_rtMpNtCpuSet;
1401 } while (cDpcsNeeded != g_cRtMpNtActiveCpus);
1402 break;
1403 }
1404
1405 /*
1406 * Allocate an RTMPARGS structure followed by cDpcsNeeded KDPCs
1407 * and initialize them.
1408 */
1409 PRTMPARGS pArgs = (PRTMPARGS)RTMemAllocZ(sizeof(RTMPARGS) + cDpcsNeeded * sizeof(KDPC));
1410 if (!pArgs)
1411 return VERR_NO_MEMORY;
1412
1413 pArgs->pfnWorker = pfnWorker;
1414 pArgs->pvUser1 = pvUser1;
1415 pArgs->pvUser2 = pvUser2;
1416 pArgs->idCpu = NIL_RTCPUID;
1417 pArgs->idCpu2 = NIL_RTCPUID;
1418 pArgs->cHits = 0;
1419 pArgs->cRefs = 1;
1420
1421 int rc;
1422 KDPC *paExecCpuDpcs = (KDPC *)(pArgs + 1);
1423 if (enmCpuid == RT_NT_CPUID_SPECIFIC)
1424 {
1425 KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
1426 if (g_pfnrtKeSetImportanceDpc)
1427 g_pfnrtKeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
1428 rc = rtMpNtSetTargetProcessorDpc(&paExecCpuDpcs[0], idCpu);
1429 pArgs->idCpu = idCpu;
1430 }
1431 else if (enmCpuid == RT_NT_CPUID_PAIR)
1432 {
1433 KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
1434 if (g_pfnrtKeSetImportanceDpc)
1435 g_pfnrtKeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
1436 rc = rtMpNtSetTargetProcessorDpc(&paExecCpuDpcs[0], idCpu);
1437 pArgs->idCpu = idCpu;
1438
1439 KeInitializeDpc(&paExecCpuDpcs[1], rtmpNtDPCWrapper, pArgs);
1440 if (g_pfnrtKeSetImportanceDpc)
1441 g_pfnrtKeSetImportanceDpc(&paExecCpuDpcs[1], HighImportance);
1442 if (RT_SUCCESS(rc))
1443 rc = rtMpNtSetTargetProcessorDpc(&paExecCpuDpcs[1], (int)idCpu2);
1444 pArgs->idCpu2 = idCpu2;
1445 }
1446 else
1447 {
1448 rc = VINF_SUCCESS;
1449 for (uint32_t i = 0; i < cDpcsNeeded && RT_SUCCESS(rc); i++)
1450 if (RTCpuSetIsMemberByIndex(&OnlineSet, i))
1451 {
1452 KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
1453 if (g_pfnrtKeSetImportanceDpc)
1454 g_pfnrtKeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
1455 rc = rtMpNtSetTargetProcessorDpc(&paExecCpuDpcs[i], RTMpCpuIdFromSetIndex(i));
1456 }
1457 }
1458 if (RT_FAILURE(rc))
1459 {
1460 RTMemFree(pArgs);
1461 return rc;
1462 }
1463
1464 /*
1465 * Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
1466 * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
1467 */
1468 KIRQL oldIrql;
1469 KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
1470
1471 /*
1472 * We cannot do other than assume a 1:1 relationship between the
1473 * affinity mask and the process despite the warnings in the docs.
1474 * If someone knows a better way to get this done, please let bird know.
1475 */
1476 ASMCompilerBarrier(); /* paranoia */
1477 if (enmCpuid == RT_NT_CPUID_SPECIFIC)
1478 {
1479 ASMAtomicIncS32(&pArgs->cRefs);
1480 BOOLEAN fRc = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
1481 Assert(fRc); NOREF(fRc);
1482 }
1483 else if (enmCpuid == RT_NT_CPUID_PAIR)
1484 {
1485 ASMAtomicIncS32(&pArgs->cRefs);
1486 BOOLEAN fRc = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
1487 Assert(fRc); NOREF(fRc);
1488
1489 ASMAtomicIncS32(&pArgs->cRefs);
1490 fRc = KeInsertQueueDpc(&paExecCpuDpcs[1], 0, 0);
1491 Assert(fRc); NOREF(fRc);
1492 }
1493 else
1494 {
1495 uint32_t iSelf = RTMpCurSetIndex();
1496 for (uint32_t i = 0; i < cDpcsNeeded; i++)
1497 {
1498 if ( (i != iSelf)
1499 && RTCpuSetIsMemberByIndex(&OnlineSet, i))
1500 {
1501 ASMAtomicIncS32(&pArgs->cRefs);
1502 BOOLEAN fRc = KeInsertQueueDpc(&paExecCpuDpcs[i], 0, 0);
1503 Assert(fRc); NOREF(fRc);
1504 }
1505 }
1506 if (enmCpuid != RT_NT_CPUID_OTHERS)
1507 pfnWorker(iSelf, pvUser1, pvUser2);
1508 }
1509
1510 KeLowerIrql(oldIrql);
1511
1512 /*
1513 * Flush all DPCs and wait for completion. (can take long!)
1514 */
1515 /** @todo Consider changing this to an active wait using some atomic inc/dec
1516 * stuff (and check for the current cpu above in the specific case). */
1517 /** @todo Seems KeFlushQueuedDpcs doesn't wait for the DPCs to be completely
1518 * executed. Seen pArgs being freed while some CPU was using it before
1519 * cRefs was added. */
1520 if (g_pfnrtNtKeFlushQueuedDpcs)
1521 g_pfnrtNtKeFlushQueuedDpcs();
1522
1523 if (pcHits)
1524 *pcHits = pArgs->cHits;
1525
1526 /* Dereference the argument structure. */
1527 int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
1528 Assert(cRefs >= 0);
1529 if (cRefs == 0)
1530 RTMemFree(pArgs);
1531
1532 return VINF_SUCCESS;
1533}
1534
1535
1536RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
1537{
1538 if (g_pfnrtKeIpiGenericCall)
1539 return rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnAllBroadcastIpiWrapper,
1540 NIL_RTCPUID, NIL_RTCPUID, NULL);
1541 return rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_ALL, NIL_RTCPUID, NIL_RTCPUID, NULL);
1542}
1543
1544
1545RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
1546{
1547 if (g_pfnrtKeIpiGenericCall)
1548 return rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnOthersBroadcastIpiWrapper,
1549 NIL_RTCPUID, NIL_RTCPUID, NULL);
1550 return rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_OTHERS, NIL_RTCPUID, NIL_RTCPUID, NULL);
1551}
1552
1553
1554RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
1555{
1556 int rc;
1557 AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
1558 AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
1559 if ((fFlags & RTMPON_F_CONCURRENT_EXEC) && !g_pfnrtKeIpiGenericCall)
1560 return VERR_NOT_SUPPORTED;
1561
1562 /*
1563 * Check that both CPUs are online before doing the broadcast call.
1564 */
1565 if ( RTMpIsCpuOnline(idCpu1)
1566 && RTMpIsCpuOnline(idCpu2))
1567 {
1568 /*
1569 * The broadcast IPI isn't quite as bad as it could have been, because
1570 * it looks like windows doesn't synchronize CPUs on the way out, they
1571 * seems to get back to normal work while the pair is still busy.
1572 */
1573 uint32_t cHits = 0;
1574 if (g_pfnrtKeIpiGenericCall)
1575 rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnPairBroadcastIpiWrapper, idCpu1, idCpu2, &cHits);
1576 else
1577 rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_PAIR, idCpu1, idCpu2, &cHits);
1578 if (RT_SUCCESS(rc))
1579 {
1580 Assert(cHits <= 2);
1581 if (cHits == 2)
1582 rc = VINF_SUCCESS;
1583 else if (cHits == 1)
1584 rc = VERR_NOT_ALL_CPUS_SHOWED;
1585 else if (cHits == 0)
1586 rc = VERR_CPU_OFFLINE;
1587 else
1588 rc = VERR_CPU_IPE_1;
1589 }
1590 }
1591 /*
1592 * A CPU must be present to be considered just offline.
1593 */
1594 else if ( RTMpIsCpuPresent(idCpu1)
1595 && RTMpIsCpuPresent(idCpu2))
1596 rc = VERR_CPU_OFFLINE;
1597 else
1598 rc = VERR_CPU_NOT_FOUND;
1599 return rc;
1600}
1601
1602
1603RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
1604{
1605 return g_pfnrtKeIpiGenericCall != NULL;
1606}
1607
1608
1609/**
1610 * Releases a reference to a RTMPNTONSPECIFICARGS heap allocation, freeing it
1611 * when the last reference is released.
1612 */
1613DECLINLINE(void) rtMpNtOnSpecificRelease(PRTMPNTONSPECIFICARGS pArgs)
1614{
1615 uint32_t cRefs = ASMAtomicDecU32(&pArgs->cRefs);
1616 AssertMsg(cRefs <= 1, ("cRefs=%#x\n", cRefs));
1617 if (cRefs == 0)
1618 RTMemFree(pArgs);
1619}
1620
1621
1622/**
1623 * Wrapper between the native nt per-cpu callbacks and PFNRTWORKER
1624 *
1625 * @param Dpc DPC object
1626 * @param DeferredContext Context argument specified by KeInitializeDpc
1627 * @param SystemArgument1 Argument specified by KeInsertQueueDpc
1628 * @param SystemArgument2 Argument specified by KeInsertQueueDpc
1629 */
1630static VOID rtMpNtOnSpecificDpcWrapper(IN PKDPC Dpc, IN PVOID DeferredContext,
1631 IN PVOID SystemArgument1, IN PVOID SystemArgument2)
1632{
1633 PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)DeferredContext;
1634 RT_NOREF3(Dpc, SystemArgument1, SystemArgument2);
1635
1636 ASMAtomicWriteBool(&pArgs->fExecuting, true);
1637
1638 pArgs->CallbackArgs.pfnWorker(RTMpCpuId(), pArgs->CallbackArgs.pvUser1, pArgs->CallbackArgs.pvUser2);
1639
1640 ASMAtomicWriteBool(&pArgs->fDone, true);
1641 KeSetEvent(&pArgs->DoneEvt, 1 /*PriorityIncrement*/, FALSE /*Wait*/);
1642
1643 rtMpNtOnSpecificRelease(pArgs);
1644}
1645
1646
1647RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
1648{
1649 /*
1650 * Don't try mess with an offline CPU.
1651 */
1652 if (!RTMpIsCpuOnline(idCpu))
1653 return !RTMpIsCpuPossible(idCpu)
1654 ? VERR_CPU_NOT_FOUND
1655 : VERR_CPU_OFFLINE;
1656
1657 /*
1658 * Use the broadcast IPI routine if there are no more than two CPUs online,
1659 * or if the current IRQL is unsuitable for KeWaitForSingleObject.
1660 */
1661 int rc;
1662 uint32_t cHits = 0;
1663 if ( g_pfnrtKeIpiGenericCall
1664 && ( RTMpGetOnlineCount() <= 2
1665 || KeGetCurrentIrql() > APC_LEVEL)
1666 )
1667 {
1668 rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnSpecificBroadcastIpiWrapper,
1669 idCpu, NIL_RTCPUID, &cHits);
1670 if (RT_SUCCESS(rc))
1671 {
1672 if (cHits == 1)
1673 return VINF_SUCCESS;
1674 rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1;
1675 }
1676 return rc;
1677 }
1678
1679#if 0
1680 rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_SPECIFIC, idCpu, NIL_RTCPUID, &cHits);
1681 if (RT_SUCCESS(rc))
1682 {
1683 if (cHits == 1)
1684 return VINF_SUCCESS;
1685 rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1;
1686 }
1687 return rc;
1688
1689#else
1690 /*
1691 * Initialize the argument package and the objects within it.
1692 * The package is referenced counted to avoid unnecessary spinning to
1693 * synchronize cleanup and prevent stack corruption.
1694 */
1695 PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)RTMemAllocZ(sizeof(*pArgs));
1696 if (!pArgs)
1697 return VERR_NO_MEMORY;
1698 pArgs->cRefs = 2;
1699 pArgs->fExecuting = false;
1700 pArgs->fDone = false;
1701 pArgs->CallbackArgs.pfnWorker = pfnWorker;
1702 pArgs->CallbackArgs.pvUser1 = pvUser1;
1703 pArgs->CallbackArgs.pvUser2 = pvUser2;
1704 pArgs->CallbackArgs.idCpu = idCpu;
1705 pArgs->CallbackArgs.cHits = 0;
1706 pArgs->CallbackArgs.cRefs = 2;
1707 KeInitializeEvent(&pArgs->DoneEvt, SynchronizationEvent, FALSE /* not signalled */);
1708 KeInitializeDpc(&pArgs->Dpc, rtMpNtOnSpecificDpcWrapper, pArgs);
1709 if (g_pfnrtKeSetImportanceDpc)
1710 g_pfnrtKeSetImportanceDpc(&pArgs->Dpc, HighImportance);
1711 rc = rtMpNtSetTargetProcessorDpc(&pArgs->Dpc, idCpu);
1712 if (RT_FAILURE(rc))
1713 {
1714 RTMemFree(pArgs);
1715 return rc;
1716 }
1717
1718 /*
1719 * Disable preemption while we check the current processor and inserts the DPC.
1720 */
1721 KIRQL bOldIrql;
1722 KeRaiseIrql(DISPATCH_LEVEL, &bOldIrql);
1723 ASMCompilerBarrier(); /* paranoia */
1724
1725 if (RTMpCpuId() == idCpu)
1726 {
1727 /* Just execute the callback on the current CPU. */
1728 pfnWorker(idCpu, pvUser1, pvUser2);
1729 KeLowerIrql(bOldIrql);
1730
1731 RTMemFree(pArgs);
1732 return VINF_SUCCESS;
1733 }
1734
1735 /* Different CPU, so queue it if the CPU is still online. */
1736 if (RTMpIsCpuOnline(idCpu))
1737 {
1738 BOOLEAN fRc = KeInsertQueueDpc(&pArgs->Dpc, 0, 0);
1739 Assert(fRc); NOREF(fRc);
1740 KeLowerIrql(bOldIrql);
1741
1742 uint64_t const nsRealWaitTS = RTTimeNanoTS();
1743
1744 /*
1745 * Wait actively for a while in case the CPU/thread responds quickly.
1746 */
1747 uint32_t cLoopsLeft = 0x20000;
1748 while (cLoopsLeft-- > 0)
1749 {
1750 if (pArgs->fDone)
1751 {
1752 rtMpNtOnSpecificRelease(pArgs);
1753 return VINF_SUCCESS;
1754 }
1755 ASMNopPause();
1756 }
1757
1758 /*
1759 * It didn't respond, so wait on the event object, poking the CPU if it's slow.
1760 */
1761 LARGE_INTEGER Timeout;
1762 Timeout.QuadPart = -10000; /* 1ms */
1763 NTSTATUS rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
1764 if (rcNt == STATUS_SUCCESS)
1765 {
1766 rtMpNtOnSpecificRelease(pArgs);
1767 return VINF_SUCCESS;
1768 }
1769
1770 /* If it hasn't respondend yet, maybe poke it and wait some more. */
1771 if (rcNt == STATUS_TIMEOUT)
1772 {
1773 if ( !pArgs->fExecuting
1774 && ( g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiW7Plus
1775 || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiPreW7))
1776 RTMpPokeCpu(idCpu);
1777
1778 Timeout.QuadPart = -1280000; /* 128ms */
1779 rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
1780 if (rcNt == STATUS_SUCCESS)
1781 {
1782 rtMpNtOnSpecificRelease(pArgs);
1783 return VINF_SUCCESS;
1784 }
1785 }
1786
1787 /*
1788 * Something weird is happening, try bail out.
1789 */
1790 if (KeRemoveQueueDpc(&pArgs->Dpc))
1791 {
1792 RTMemFree(pArgs); /* DPC was still queued, so we can return without further ado. */
1793 LogRel(("RTMpOnSpecific(%#x): Not processed after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
1794 }
1795 else
1796 {
1797 /* DPC is running, wait a good while for it to complete. */
1798 LogRel(("RTMpOnSpecific(%#x): Still running after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
1799
1800 Timeout.QuadPart = -30*1000*1000*10; /* 30 seconds */
1801 rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
1802 if (rcNt != STATUS_SUCCESS)
1803 LogRel(("RTMpOnSpecific(%#x): Giving up on running worker after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
1804 }
1805 rc = RTErrConvertFromNtStatus(rcNt);
1806 }
1807 else
1808 {
1809 /* CPU is offline.*/
1810 KeLowerIrql(bOldIrql);
1811 rc = !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE;
1812 }
1813
1814 rtMpNtOnSpecificRelease(pArgs);
1815 return rc;
1816#endif
1817}
1818
1819
1820
1821
1822static VOID rtMpNtPokeCpuDummy(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
1823{
1824 NOREF(Dpc);
1825 NOREF(DeferredContext);
1826 NOREF(SystemArgument1);
1827 NOREF(SystemArgument2);
1828}
1829
1830
1831/** Callback used by rtMpPokeCpuUsingBroadcastIpi. */
1832static ULONG_PTR rtMpIpiGenericCall(ULONG_PTR Argument)
1833{
1834 NOREF(Argument);
1835 return 0;
1836}
1837
1838
1839/**
1840 * RTMpPokeCpu worker that uses broadcast IPIs for doing the work.
1841 *
1842 * @returns VINF_SUCCESS
1843 * @param idCpu The CPU identifier.
1844 */
1845int rtMpPokeCpuUsingBroadcastIpi(RTCPUID idCpu)
1846{
1847 NOREF(idCpu);
1848 g_pfnrtKeIpiGenericCall(rtMpIpiGenericCall, 0);
1849 return VINF_SUCCESS;
1850}
1851
1852
1853/**
1854 * RTMpPokeCpu worker that uses the Windows 7 and later version of
1855 * HalRequestIpip to get the job done.
1856 *
1857 * @returns VINF_SUCCESS
1858 * @param idCpu The CPU identifier.
1859 */
1860int rtMpPokeCpuUsingHalReqestIpiW7Plus(RTCPUID idCpu)
1861{
1862 /* idCpu is an HAL processor index, so we can use it directly. */
1863 KAFFINITY_EX Target;
1864 g_pfnrtKeInitializeAffinityEx(&Target);
1865 g_pfnrtKeAddProcessorAffinityEx(&Target, idCpu);
1866
1867 g_pfnrtHalRequestIpiW7Plus(0, &Target);
1868 return VINF_SUCCESS;
1869}
1870
1871
1872/**
1873 * RTMpPokeCpu worker that uses the Vista and earlier version of HalRequestIpip
1874 * to get the job done.
1875 *
1876 * @returns VINF_SUCCESS
1877 * @param idCpu The CPU identifier.
1878 */
1879int rtMpPokeCpuUsingHalReqestIpiPreW7(RTCPUID idCpu)
1880{
1881 __debugbreak(); /** @todo this code needs testing!! */
1882 KAFFINITY Target = 1;
1883 Target <<= idCpu;
1884 g_pfnrtHalRequestIpiPreW7(Target);
1885 return VINF_SUCCESS;
1886}
1887
1888int rtMpPokeCpuUsingFailureNotSupported(RTCPUID idCpu)
1889{
1890 NOREF(idCpu);
1891 return VERR_NOT_SUPPORTED;
1892}
1893
1894int rtMpPokeCpuUsingDpc(RTCPUID idCpu)
1895{
1896 Assert(g_cRtMpNtMaxCpus > 0 && g_cRtMpNtMaxGroups > 0); /* init order */
1897
1898 /*
1899 * APC fallback.
1900 */
1901 static KDPC s_aPokeDpcs[RTCPUSET_MAX_CPUS] = {0};
1902 static bool s_fPokeDPCsInitialized = false;
1903
1904 if (!s_fPokeDPCsInitialized)
1905 {
1906 for (unsigned i = 0; i < g_cRtMpNtMaxCpus; i++)
1907 {
1908 KeInitializeDpc(&s_aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL);
1909 if (g_pfnrtKeSetImportanceDpc)
1910 g_pfnrtKeSetImportanceDpc(&s_aPokeDpcs[i], HighImportance);
1911 int rc = rtMpNtSetTargetProcessorDpc(&s_aPokeDpcs[i], idCpu);
1912 if (RT_FAILURE(rc))
1913 return rc;
1914 }
1915
1916 s_fPokeDPCsInitialized = true;
1917 }
1918
1919 /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
1920 KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL. */
1921 KIRQL oldIrql;
1922 KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);
1923
1924 if (g_pfnrtKeSetImportanceDpc)
1925 g_pfnrtKeSetImportanceDpc(&s_aPokeDpcs[idCpu], HighImportance);
1926 g_pfnrtKeSetTargetProcessorDpc(&s_aPokeDpcs[idCpu], (int)idCpu);
1927
1928 /* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately.
1929 Note! Not true on at least Vista & Windows 7 */
1930 BOOLEAN fRet = KeInsertQueueDpc(&s_aPokeDpcs[idCpu], 0, 0);
1931
1932 KeLowerIrql(oldIrql);
1933 return fRet == TRUE ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */;
1934}
1935
1936
1937RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
1938{
1939 if (!RTMpIsCpuOnline(idCpu))
1940 return !RTMpIsCpuPossible(idCpu)
1941 ? VERR_CPU_NOT_FOUND
1942 : VERR_CPU_OFFLINE;
1943 /* Calls rtMpPokeCpuUsingDpc, rtMpPokeCpuUsingHalReqestIpiW7Plus or rtMpPokeCpuUsingBroadcastIpi. */
1944 return g_pfnrtMpPokeCpuWorker(idCpu);
1945}
1946
1947
1948RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
1949{
1950 return false;
1951}
1952
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette