VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c@ 105905

Last change on this file since 105905 was 105670, checked in by vboxsync, 8 months ago

Devices/EFI/FirmwareNew: Merge edk2-stable-202405 and make it build on aarch64, bugref:4643

  • Property svn:eol-style set to native
File size: 60.6 KB
Line 
1/** @file
2SMM MP service implementation
3
4Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7SPDX-License-Identifier: BSD-2-Clause-Patent
8
9**/
10
11#include "PiSmmCpuDxeSmm.h"
12
13//
14// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
15//
16MTRR_SETTINGS gSmiMtrrs;
17UINT64 gPhyMask;
18SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
19UINTN mSmmMpSyncDataSize;
20SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
21UINTN mSemaphoreSize;
22SPIN_LOCK *mPFLock = NULL;
23SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
24BOOLEAN mMachineCheckSupported = FALSE;
25MM_COMPLETION mSmmStartupThisApToken;
26
27//
28// Processor specified by mPackageFirstThreadIndex[PackageIndex] will do the package-scope register check.
29//
30UINT32 *mPackageFirstThreadIndex = NULL;
31
32/**
33 Used for BSP to release all APs.
34 Performs an atomic compare exchange operation to release semaphore
35 for each AP.
36
37**/
38VOID
39ReleaseAllAPs (
40 VOID
41 )
42{
43 UINTN Index;
44
45 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
46 if (IsPresentAp (Index)) {
47 SmmCpuSyncReleaseOneAp (mSmmMpSyncData->SyncContext, Index, gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu);
48 }
49 }
50}
51
52/**
53 Check whether the index of CPU perform the package level register
54 programming during System Management Mode initialization.
55
56 The index of Processor specified by mPackageFirstThreadIndex[PackageIndex]
57 will do the package-scope register programming.
58
59 @param[in] CpuIndex Processor Index.
60
61 @retval TRUE Perform the package level register programming.
62 @retval FALSE Don't perform the package level register programming.
63
64**/
65BOOLEAN
66IsPackageFirstThread (
67 IN UINTN CpuIndex
68 )
69{
70 UINT32 PackageIndex;
71
72 PackageIndex = gSmmCpuPrivate->ProcessorInfo[CpuIndex].Location.Package;
73
74 ASSERT (mPackageFirstThreadIndex != NULL);
75
76 //
77 // Set the value of mPackageFirstThreadIndex[PackageIndex].
78 // The package-scope register are checked by the first processor (CpuIndex) in Package.
79 //
80 // If mPackageFirstThreadIndex[PackageIndex] equals to (UINT32)-1, then update
81 // to current CpuIndex. If it doesn't equal to (UINT32)-1, don't change it.
82 //
83 if (mPackageFirstThreadIndex[PackageIndex] == (UINT32)-1) {
84 mPackageFirstThreadIndex[PackageIndex] = (UINT32)CpuIndex;
85 }
86
87 return (BOOLEAN)(mPackageFirstThreadIndex[PackageIndex] == CpuIndex);
88}
89
90/**
91 Returns the Number of SMM Delayed & Blocked & Disabled Thread Count.
92
93 @param[in,out] DelayedCount The Number of SMM Delayed Thread Count.
94 @param[in,out] BlockedCount The Number of SMM Blocked Thread Count.
95 @param[in,out] DisabledCount The Number of SMM Disabled Thread Count.
96
97**/
98VOID
99GetSmmDelayedBlockedDisabledCount (
100 IN OUT UINT32 *DelayedCount,
101 IN OUT UINT32 *BlockedCount,
102 IN OUT UINT32 *DisabledCount
103 )
104{
105 UINTN Index;
106
107 for (Index = 0; Index < mNumberOfCpus; Index++) {
108 if (IsPackageFirstThread (Index)) {
109 if (DelayedCount != NULL) {
110 *DelayedCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed);
111 }
112
113 if (BlockedCount != NULL) {
114 *BlockedCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked);
115 }
116
117 if (DisabledCount != NULL) {
118 *DisabledCount += (UINT32)SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable);
119 }
120 }
121 }
122}
123
124/**
125 Checks if all CPUs (except Blocked & Disabled) have checked in for this SMI run
126
127 @retval TRUE if all CPUs the have checked in.
128 @retval FALSE if at least one Normal AP hasn't checked in.
129
130**/
131BOOLEAN
132AllCpusInSmmExceptBlockedDisabled (
133 VOID
134 )
135{
136 UINT32 BlockedCount;
137 UINT32 DisabledCount;
138
139 BlockedCount = 0;
140 DisabledCount = 0;
141
142 //
143 // Check to make sure the CPU arrival count is valid and not locked.
144 //
145 ASSERT (SmmCpuSyncGetArrivedCpuCount (mSmmMpSyncData->SyncContext) <= mNumberOfCpus);
146
147 //
148 // Check whether all CPUs in SMM.
149 //
150 if (SmmCpuSyncGetArrivedCpuCount (mSmmMpSyncData->SyncContext) == mNumberOfCpus) {
151 return TRUE;
152 }
153
154 //
155 // Check for the Blocked & Disabled Exceptions Case.
156 //
157 GetSmmDelayedBlockedDisabledCount (NULL, &BlockedCount, &DisabledCount);
158
159 //
160 // The CPU arrival count might be updated by all APs concurrently. The value
161 // can be dynamic changed. If some Aps enter the SMI after the BlockedCount &
162 // DisabledCount check, then the CPU arrival count will be increased, thus
163 // leading the retrieved CPU arrival count + BlockedCount + DisabledCount > mNumberOfCpus.
164 // since the BlockedCount & DisabledCount are local variable, it's ok here only for
165 // the checking of all CPUs In Smm.
166 //
167 if (SmmCpuSyncGetArrivedCpuCount (mSmmMpSyncData->SyncContext) + BlockedCount + DisabledCount >= mNumberOfCpus) {
168 return TRUE;
169 }
170
171 return FALSE;
172}
173
174/**
175 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
176
177 @retval TRUE Os enable lmce.
178 @retval FALSE Os not enable lmce.
179
180**/
181BOOLEAN
182IsLmceOsEnabled (
183 VOID
184 )
185{
186 MSR_IA32_MCG_CAP_REGISTER McgCap;
187 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
188 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
189
190 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
191 if (McgCap.Bits.MCG_LMCE_P == 0) {
192 return FALSE;
193 }
194
195 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
196 if (FeatureCtrl.Bits.LmceOn == 0) {
197 return FALSE;
198 }
199
200 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
201 return (BOOLEAN)(McgExtCtrl.Bits.LMCE_EN == 1);
202}
203
204/**
205 Return if Local machine check exception signaled.
206
207 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
208 delivered to only the logical processor.
209
210 @retval TRUE LMCE was signaled.
211 @retval FALSE LMCE was not signaled.
212
213**/
214BOOLEAN
215IsLmceSignaled (
216 VOID
217 )
218{
219 MSR_IA32_MCG_STATUS_REGISTER McgStatus;
220
221 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
222 return (BOOLEAN)(McgStatus.Bits.LMCE_S == 1);
223}
224
225/**
226 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
227 entering SMM, except SMI disabled APs.
228
229**/
230VOID
231SmmWaitForApArrival (
232 VOID
233 )
234{
235 UINT64 Timer;
236 UINTN Index;
237 BOOLEAN LmceEn;
238 BOOLEAN LmceSignal;
239 UINT32 DelayedCount;
240 UINT32 BlockedCount;
241
242 PERF_FUNCTION_BEGIN ();
243
244 DelayedCount = 0;
245 BlockedCount = 0;
246
247 ASSERT (SmmCpuSyncGetArrivedCpuCount (mSmmMpSyncData->SyncContext) <= mNumberOfCpus);
248
249 LmceEn = FALSE;
250 LmceSignal = FALSE;
251 if (mMachineCheckSupported) {
252 LmceEn = IsLmceOsEnabled ();
253 LmceSignal = IsLmceSignaled ();
254 }
255
256 //
257 // Platform implementor should choose a timeout value appropriately:
258 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
259 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
260 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
261 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
262 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
263 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
264 // - The timeout value must be longer than longest possible IO operation in the system
265 //
266
267 //
268 // Sync with APs 1st timeout
269 //
270 for (Timer = StartSyncTimer ();
271 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal);
272 )
273 {
274 mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
275 if (mSmmMpSyncData->AllApArrivedWithException) {
276 break;
277 }
278
279 CpuPause ();
280 }
281
282 //
283 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
284 // because:
285 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
286 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
287 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
288 // work while SMI handling is on-going.
289 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
290 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
291 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
292 // mode work while SMI handling is on-going.
293 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
294 // - In traditional flow, SMI disabling is discouraged.
295 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
296 // In both cases, adding SMI-disabling checking code increases overhead.
297 //
298 if (SmmCpuSyncGetArrivedCpuCount (mSmmMpSyncData->SyncContext) < mNumberOfCpus) {
299 //
300 // Send SMI IPIs to bring outside processors in
301 //
302 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
303 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {
304 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
305 }
306 }
307
308 //
309 // Sync with APs 2nd timeout.
310 //
311 for (Timer = StartSyncTimer ();
312 !IsSyncTimerTimeout (Timer);
313 )
314 {
315 mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
316 if (mSmmMpSyncData->AllApArrivedWithException) {
317 break;
318 }
319
320 CpuPause ();
321 }
322 }
323
324 if (!mSmmMpSyncData->AllApArrivedWithException) {
325 //
326 // Check for the Blocked & Delayed Case.
327 //
328 GetSmmDelayedBlockedDisabledCount (&DelayedCount, &BlockedCount, NULL);
329 DEBUG ((DEBUG_INFO, "SmmWaitForApArrival: Delayed AP Count = %d, Blocked AP Count = %d\n", DelayedCount, BlockedCount));
330 }
331
332 PERF_FUNCTION_END ();
333}
334
335/**
336 Replace OS MTRR's with SMI MTRR's.
337
338 @param CpuIndex Processor Index
339
340**/
341VOID
342ReplaceOSMtrrs (
343 IN UINTN CpuIndex
344 )
345{
346 SmmCpuFeaturesDisableSmrr ();
347
348 //
349 // Replace all MTRRs registers
350 //
351 MtrrSetAllMtrrs (&gSmiMtrrs);
352}
353
354/**
355 Wheck whether task has been finished by all APs.
356
357 @param BlockMode Whether did it in block mode or non-block mode.
358
359 @retval TRUE Task has been finished by all APs.
360 @retval FALSE Task not has been finished by all APs.
361
362**/
363BOOLEAN
364WaitForAllAPsNotBusy (
365 IN BOOLEAN BlockMode
366 )
367{
368 UINTN Index;
369
370 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
371 //
372 // Ignore BSP and APs which not call in SMM.
373 //
374 if (!IsPresentAp (Index)) {
375 continue;
376 }
377
378 if (BlockMode) {
379 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
380 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
381 } else {
382 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
383 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
384 } else {
385 return FALSE;
386 }
387 }
388 }
389
390 return TRUE;
391}
392
393/**
394 Check whether it is an present AP.
395
396 @param CpuIndex The AP index which calls this function.
397
398 @retval TRUE It's a present AP.
399 @retval TRUE This is not an AP or it is not present.
400
401**/
402BOOLEAN
403IsPresentAp (
404 IN UINTN CpuIndex
405 )
406{
407 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
408 *(mSmmMpSyncData->CpuData[CpuIndex].Present));
409}
410
411/**
412 Clean up the status flags used during executing the procedure.
413
414 @param CpuIndex The AP index which calls this function.
415
416**/
417VOID
418ReleaseToken (
419 IN UINTN CpuIndex
420 )
421{
422 PROCEDURE_TOKEN *Token;
423
424 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
425
426 if (InterlockedDecrement (&Token->RunningApCount) == 0) {
427 ReleaseSpinLock (Token->SpinLock);
428 }
429
430 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
431}
432
433/**
434 Free the tokens in the maintained list.
435
436**/
437VOID
438ResetTokens (
439 VOID
440 )
441{
442 //
443 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
444 //
445 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
446}
447
448/**
449 SMI handler for BSP.
450
451 @param CpuIndex BSP processor Index
452 @param SyncMode SMM MP sync mode
453
454**/
455VOID
456BSPHandler (
457 IN UINTN CpuIndex,
458 IN SMM_CPU_SYNC_MODE SyncMode
459 )
460{
461 UINTN CpuCount;
462 UINTN Index;
463 MTRR_SETTINGS Mtrrs;
464 UINTN ApCount;
465 BOOLEAN ClearTopLevelSmiResult;
466 UINTN PresentCount;
467
468 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
469 CpuCount = 0;
470 ApCount = 0;
471
472 PERF_FUNCTION_BEGIN ();
473
474 //
475 // Flag BSP's presence
476 //
477 *mSmmMpSyncData->InsideSmm = TRUE;
478
479 if (mSmmDebugAgentSupport) {
480 //
481 // Initialize Debug Agent to start source level debug in BSP handler
482 //
483 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
484 }
485
486 //
487 // Mark this processor's presence
488 //
489 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
490
491 //
492 // Clear platform top level SMI status bit before calling SMI handlers. If
493 // we cleared it after SMI handlers are run, we would miss the SMI that
494 // occurs after SMI handlers are done and before SMI status bit is cleared.
495 //
496 ClearTopLevelSmiResult = ClearTopLevelSmiStatus ();
497 ASSERT (ClearTopLevelSmiResult == TRUE);
498
499 //
500 // Set running processor index
501 //
502 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
503
504 //
505 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
506 //
507 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
508 //
509 // Wait for APs to arrive
510 //
511 SmmWaitForApArrival ();
512
513 //
514 // Lock door for late coming CPU checkin and retrieve the Arrived number of APs
515 //
516 *mSmmMpSyncData->AllCpusInSync = TRUE;
517
518 SmmCpuSyncLockDoor (mSmmMpSyncData->SyncContext, CpuIndex, &CpuCount);
519
520 ApCount = CpuCount - 1;
521
522 //
523 // Wait for all APs to get ready for programming MTRRs
524 //
525 SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex);
526
527 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
528 //
529 // Signal all APs it's time for backup MTRRs
530 //
531 ReleaseAllAPs ();
532
533 //
534 // SmmCpuSyncWaitForAPs() may wait for ever if an AP happens to enter SMM at
535 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
536 // to a large enough value to avoid this situation.
537 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
538 // We do the backup first and then set MTRR to avoid race condition for threads
539 // in the same core.
540 //
541 MtrrGetAllMtrrs (&Mtrrs);
542
543 //
544 // Wait for all APs to complete their MTRR saving
545 //
546 SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex);
547
548 //
549 // Let all processors program SMM MTRRs together
550 //
551 ReleaseAllAPs ();
552
553 //
554 // SmmCpuSyncWaitForAPs() may wait for ever if an AP happens to enter SMM at
555 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
556 // to a large enough value to avoid this situation.
557 //
558 ReplaceOSMtrrs (CpuIndex);
559
560 //
561 // Wait for all APs to complete their MTRR programming
562 //
563 SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex);
564 }
565 }
566
567 //
568 // The BUSY lock is initialized to Acquired state
569 //
570 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
571
572 //
573 // Perform the pre tasks
574 //
575 PerformPreTasks ();
576
577 //
578 // Invoke SMM Foundation EntryPoint with the processor information context.
579 //
580 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
581
582 //
583 // Make sure all APs have completed their pending none-block tasks
584 //
585 WaitForAllAPsNotBusy (TRUE);
586
587 //
588 // Perform the remaining tasks
589 //
590 PerformRemainingTasks ();
591
592 //
593 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
594 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
595 // will run through freely.
596 //
597 if ((SyncMode != SmmCpuSyncModeTradition) && !SmmCpuFeaturesNeedConfigureMtrrs ()) {
598 //
599 // Lock door for late coming CPU checkin and retrieve the Arrived number of APs
600 //
601 *mSmmMpSyncData->AllCpusInSync = TRUE;
602
603 SmmCpuSyncLockDoor (mSmmMpSyncData->SyncContext, CpuIndex, &CpuCount);
604
605 ApCount = CpuCount - 1;
606
607 //
608 // Make sure all APs have their Present flag set
609 //
610 while (TRUE) {
611 PresentCount = 0;
612 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
613 if (*(mSmmMpSyncData->CpuData[Index].Present)) {
614 PresentCount++;
615 }
616 }
617
618 if (PresentCount > ApCount) {
619 break;
620 }
621 }
622 }
623
624 //
625 // Notify all APs to exit
626 //
627 *mSmmMpSyncData->InsideSmm = FALSE;
628 ReleaseAllAPs ();
629
630 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
631 //
632 // Wait for all APs the readiness to program MTRRs
633 //
634 SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex);
635
636 //
637 // Signal APs to restore MTRRs
638 //
639 ReleaseAllAPs ();
640
641 //
642 // Restore OS MTRRs
643 //
644 SmmCpuFeaturesReenableSmrr ();
645 MtrrSetAllMtrrs (&Mtrrs);
646 }
647
648 if (SmmCpuFeaturesNeedConfigureMtrrs () || mSmmDebugAgentSupport) {
649 //
650 // Wait for all APs to complete their pending tasks including MTRR programming if needed.
651 //
652 SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex);
653
654 //
655 // Signal APs to Reset states/semaphore for this processor
656 //
657 ReleaseAllAPs ();
658 }
659
660 if (mSmmDebugAgentSupport) {
661 //
662 // Stop source level debug in BSP handler, the code below will not be
663 // debugged.
664 //
665 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
666 }
667
668 //
669 // Perform pending operations for hot-plug
670 //
671 SmmCpuUpdate ();
672
673 //
674 // Clear the Present flag of BSP
675 //
676 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
677
678 //
679 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
680 // WaitForAllAps does not depend on the Present flag.
681 //
682 SmmCpuSyncWaitForAPs (mSmmMpSyncData->SyncContext, ApCount, CpuIndex);
683
684 //
685 // At this point, all APs should have exited from APHandler().
686 // Migrate the SMM MP performance logging to standard SMM performance logging.
687 // Any SMM MP performance logging after this point will be migrated in next SMI.
688 //
689 PERF_CODE (
690 MigrateMpPerf (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus, CpuIndex);
691 );
692
693 //
694 // Reset the tokens buffer.
695 //
696 ResetTokens ();
697
698 //
699 // Reset BspIndex to MAX_UINT32, meaning BSP has not been elected.
700 //
701 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
702 mSmmMpSyncData->BspIndex = MAX_UINT32;
703 }
704
705 //
706 // Allow APs to check in from this point on
707 //
708 SmmCpuSyncContextReset (mSmmMpSyncData->SyncContext);
709 *mSmmMpSyncData->AllCpusInSync = FALSE;
710 mSmmMpSyncData->AllApArrivedWithException = FALSE;
711
712 PERF_FUNCTION_END ();
713}
714
715/**
716 SMI handler for AP.
717
718 @param CpuIndex AP processor Index.
719 @param ValidSmi Indicates that current SMI is a valid SMI or not.
720 @param SyncMode SMM MP sync mode.
721
722**/
723VOID
724APHandler (
725 IN UINTN CpuIndex,
726 IN BOOLEAN ValidSmi,
727 IN SMM_CPU_SYNC_MODE SyncMode
728 )
729{
730 UINT64 Timer;
731 UINTN BspIndex;
732 MTRR_SETTINGS Mtrrs;
733 EFI_STATUS ProcedureStatus;
734
735 //
736 // Timeout BSP
737 //
738 for (Timer = StartSyncTimer ();
739 !IsSyncTimerTimeout (Timer) &&
740 !(*mSmmMpSyncData->InsideSmm);
741 )
742 {
743 CpuPause ();
744 }
745
746 if (!(*mSmmMpSyncData->InsideSmm)) {
747 //
748 // BSP timeout in the first round
749 //
750 if (mSmmMpSyncData->BspIndex != MAX_UINT32) {
751 //
752 // BSP Index is known
753 // Existing AP is in SMI now but BSP not in, so, try bring BSP in SMM.
754 //
755 BspIndex = mSmmMpSyncData->BspIndex;
756 ASSERT (CpuIndex != BspIndex);
757
758 //
759 // Send SMI IPI to bring BSP in
760 //
761 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
762
763 //
764 // Now clock BSP for the 2nd time
765 //
766 for (Timer = StartSyncTimer ();
767 !IsSyncTimerTimeout (Timer) &&
768 !(*mSmmMpSyncData->InsideSmm);
769 )
770 {
771 CpuPause ();
772 }
773
774 if (!(*mSmmMpSyncData->InsideSmm)) {
775 //
776 // Give up since BSP is unable to enter SMM
777 // and signal the completion of this AP
778 // Reduce the CPU arrival count!
779 //
780 SmmCpuSyncCheckOutCpu (mSmmMpSyncData->SyncContext, CpuIndex);
781 return;
782 }
783 } else {
784 //
785 // Don't know BSP index. Give up without sending IPI to BSP.
786 // Reduce the CPU arrival count!
787 //
788 SmmCpuSyncCheckOutCpu (mSmmMpSyncData->SyncContext, CpuIndex);
789 return;
790 }
791 }
792
793 //
794 // BSP is available
795 //
796 BspIndex = mSmmMpSyncData->BspIndex;
797 ASSERT (CpuIndex != BspIndex);
798
799 //
800 // Mark this processor's presence
801 //
802 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
803
804 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {
805 //
806 // Notify BSP of arrival at this point
807 //
808 SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
809 }
810
811 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
812 //
813 // Wait for the signal from BSP to backup MTRRs
814 //
815 SmmCpuSyncWaitForBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
816
817 //
818 // Backup OS MTRRs
819 //
820 MtrrGetAllMtrrs (&Mtrrs);
821
822 //
823 // Signal BSP the completion of this AP
824 //
825 SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
826
827 //
828 // Wait for BSP's signal to program MTRRs
829 //
830 SmmCpuSyncWaitForBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
831
832 //
833 // Replace OS MTRRs with SMI MTRRs
834 //
835 ReplaceOSMtrrs (CpuIndex);
836
837 //
838 // Signal BSP the completion of this AP
839 //
840 SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
841 }
842
843 while (TRUE) {
844 //
845 // Wait for something to happen
846 //
847 SmmCpuSyncWaitForBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
848
849 //
850 // Check if BSP wants to exit SMM
851 //
852 if (!(*mSmmMpSyncData->InsideSmm)) {
853 break;
854 }
855
856 //
857 // BUSY should be acquired by SmmStartupThisAp()
858 //
859 ASSERT (
860 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
861 );
862
863 //
864 // Invoke the scheduled procedure
865 //
866 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure)(
867 (VOID *)mSmmMpSyncData->CpuData[CpuIndex].Parameter
868 );
869 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
870 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
871 }
872
873 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
874 ReleaseToken (CpuIndex);
875 }
876
877 //
878 // Release BUSY
879 //
880 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
881 }
882
883 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {
884 //
885 // Notify BSP the readiness of this AP to program MTRRs
886 //
887 SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
888
889 //
890 // Wait for the signal from BSP to program MTRRs
891 //
892 SmmCpuSyncWaitForBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
893
894 //
895 // Restore OS MTRRs
896 //
897 SmmCpuFeaturesReenableSmrr ();
898 MtrrSetAllMtrrs (&Mtrrs);
899 }
900
901 if (SmmCpuFeaturesNeedConfigureMtrrs () || mSmmDebugAgentSupport) {
902 //
903 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
904 //
905 SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
906
907 //
908 // Wait for the signal from BSP to Reset states/semaphore for this processor
909 //
910 SmmCpuSyncWaitForBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
911 }
912
913 //
914 // Reset states/semaphore for this processor
915 //
916 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
917
918 //
919 // Notify BSP the readiness of this AP to exit SMM
920 //
921 SmmCpuSyncReleaseBsp (mSmmMpSyncData->SyncContext, CpuIndex, BspIndex);
922}
923
924/**
925 Checks whether the input token is the current used token.
926
927 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
928 BroadcastProcedure.
929
930 @retval TRUE The input token is the current used token.
931 @retval FALSE The input token is not the current used token.
932**/
933BOOLEAN
934IsTokenInUse (
935 IN SPIN_LOCK *Token
936 )
937{
938 LIST_ENTRY *Link;
939 PROCEDURE_TOKEN *ProcToken;
940
941 if (Token == NULL) {
942 return FALSE;
943 }
944
945 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
946 //
947 // Only search used tokens.
948 //
949 while (Link != gSmmCpuPrivate->FirstFreeToken) {
950 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
951
952 if (ProcToken->SpinLock == Token) {
953 return TRUE;
954 }
955
956 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
957 }
958
959 return FALSE;
960}
961
962/**
963 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
964
965 @return First token of the token buffer.
966**/
967LIST_ENTRY *
968AllocateTokenBuffer (
969 VOID
970 )
971{
972 UINTN SpinLockSize;
973 UINT32 TokenCountPerChunk;
974 UINTN Index;
975 SPIN_LOCK *SpinLock;
976 UINT8 *SpinLockBuffer;
977 PROCEDURE_TOKEN *ProcTokens;
978
979 SpinLockSize = GetSpinLockProperties ();
980
981 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
982 ASSERT (TokenCountPerChunk != 0);
983 if (TokenCountPerChunk == 0) {
984 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
985 CpuDeadLoop ();
986 }
987
988 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
989
990 //
991 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
992 //
993 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
994 ASSERT (SpinLockBuffer != NULL);
995
996 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
997 ASSERT (ProcTokens != NULL);
998
999 for (Index = 0; Index < TokenCountPerChunk; Index++) {
1000 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
1001 InitializeSpinLock (SpinLock);
1002
1003 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
1004 ProcTokens[Index].SpinLock = SpinLock;
1005 ProcTokens[Index].RunningApCount = 0;
1006
1007 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
1008 }
1009
1010 return &ProcTokens[0].Link;
1011}
1012
1013/**
1014 Get the free token.
1015
1016 If no free token, allocate new tokens then return the free one.
1017
1018 @param RunningApsCount The Running Aps count for this token.
1019
1020 @retval return the first free PROCEDURE_TOKEN.
1021
1022**/
1023PROCEDURE_TOKEN *
1024GetFreeToken (
1025 IN UINT32 RunningApsCount
1026 )
1027{
1028 PROCEDURE_TOKEN *NewToken;
1029
1030 //
1031 // If FirstFreeToken meets the end of token list, enlarge the token list.
1032 // Set FirstFreeToken to the first free token.
1033 //
1034 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
1035 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1036 }
1037
1038 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
1039 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
1040
1041 NewToken->RunningApCount = RunningApsCount;
1042 AcquireSpinLock (NewToken->SpinLock);
1043
1044 return NewToken;
1045}
1046
1047/**
1048 Checks status of specified AP.
1049
1050 This function checks whether the specified AP has finished the task assigned
1051 by StartupThisAP(), and whether timeout expires.
1052
1053 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
1054 BroadcastProcedure.
1055
1056 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
1057 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
1058**/
1059EFI_STATUS
1060IsApReady (
1061 IN SPIN_LOCK *Token
1062 )
1063{
1064 if (AcquireSpinLockOrFail (Token)) {
1065 ReleaseSpinLock (Token);
1066 return EFI_SUCCESS;
1067 }
1068
1069 return EFI_NOT_READY;
1070}
1071
1072/**
1073 Schedule a procedure to run on the specified CPU.
1074
1075 @param[in] Procedure The address of the procedure to run
1076 @param[in] CpuIndex Target CPU Index
1077 @param[in,out] ProcArguments The parameter to pass to the procedure
1078 @param[in] Token This is an optional parameter that allows the caller to execute the
1079 procedure in a blocking or non-blocking fashion. If it is NULL the
1080 call is blocking, and the call will not return until the AP has
1081 completed the procedure. If the token is not NULL, the call will
1082 return immediately. The caller can check whether the procedure has
1083 completed with CheckOnProcedure or WaitForProcedure.
1084 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
1085 execution of Procedure, either for blocking or non-blocking mode.
1086 Zero means infinity. If the timeout expires before all APs return
1087 from Procedure, then Procedure on the failed APs is terminated. If
1088 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
1089 If the timeout expires in non-blocking mode, the timeout determined
1090 can be through CheckOnProcedure or WaitForProcedure.
1091 Note that timeout support is optional. Whether an implementation
1092 supports this feature can be determined via the Attributes data
1093 member.
1094 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
1095 by Procedure when it completes execution on the target AP, or with
1096 EFI_TIMEOUT if the Procedure fails to complete within the optional
1097 timeout. The implementation will update this variable with
1098 EFI_NOT_READY prior to starting Procedure on the target AP.
1099
1100 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1101 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1102 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1103 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1104 @retval EFI_SUCCESS The procedure has been successfully scheduled
1105
1106**/
1107EFI_STATUS
1108InternalSmmStartupThisAp (
1109 IN EFI_AP_PROCEDURE2 Procedure,
1110 IN UINTN CpuIndex,
1111 IN OUT VOID *ProcArguments OPTIONAL,
1112 IN MM_COMPLETION *Token,
1113 IN UINTN TimeoutInMicroseconds,
1114 IN OUT EFI_STATUS *CpuStatus
1115 )
1116{
1117 PROCEDURE_TOKEN *ProcToken;
1118
1119 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
1120 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
1121 return EFI_INVALID_PARAMETER;
1122 }
1123
1124 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1125 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
1126 return EFI_INVALID_PARAMETER;
1127 }
1128
1129 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
1130 return EFI_INVALID_PARAMETER;
1131 }
1132
1133 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
1134 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
1135 DEBUG ((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
1136 }
1137
1138 return EFI_INVALID_PARAMETER;
1139 }
1140
1141 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
1142 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
1143 DEBUG ((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
1144 }
1145
1146 return EFI_INVALID_PARAMETER;
1147 }
1148
1149 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1150 return EFI_INVALID_PARAMETER;
1151 }
1152
1153 if (Procedure == NULL) {
1154 return EFI_INVALID_PARAMETER;
1155 }
1156
1157 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1158
1159 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
1160 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
1161 if (Token != NULL) {
1162 if (Token != &mSmmStartupThisApToken) {
1163 //
1164 // When Token points to mSmmStartupThisApToken, this routine is called
1165 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).
1166 //
1167 // In this case, caller wants to startup AP procedure in non-blocking
1168 // mode and cannot get the completion status from the Token because there
1169 // is no way to return the Token to caller from SmmStartupThisAp().
1170 // Caller needs to use its implementation specific way to query the completion status.
1171 //
1172 // There is no need to allocate a token for such case so the 3 overheads
1173 // can be avoided:
1174 // 1. Call AllocateTokenBuffer() when there is no free token.
1175 // 2. Get a free token from the token buffer.
1176 // 3. Call ReleaseToken() in APHandler().
1177 //
1178 ProcToken = GetFreeToken (1);
1179 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
1180 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1181 }
1182 }
1183
1184 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
1185 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
1186 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
1187 }
1188
1189 SmmCpuSyncReleaseOneAp (mSmmMpSyncData->SyncContext, CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu);
1190
1191 if (Token == NULL) {
1192 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1193 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1194 }
1195
1196 return EFI_SUCCESS;
1197}
1198
1199/**
1200 Worker function to execute a caller provided function on all enabled APs.
1201
1202 @param[in] Procedure A pointer to the function to be run on
1203 enabled APs of the system.
1204 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
1205 APs to return from Procedure, either for
1206 blocking or non-blocking mode.
1207 @param[in,out] ProcedureArguments The parameter passed into Procedure for
1208 all APs.
1209 @param[in,out] Token This is an optional parameter that allows the caller to execute the
1210 procedure in a blocking or non-blocking fashion. If it is NULL the
1211 call is blocking, and the call will not return until the AP has
1212 completed the procedure. If the token is not NULL, the call will
1213 return immediately. The caller can check whether the procedure has
1214 completed with CheckOnProcedure or WaitForProcedure.
1215 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
1216 by Procedure when it completes execution on the target AP, or with
1217 EFI_TIMEOUT if the Procedure fails to complete within the optional
1218 timeout. The implementation will update this variable with
1219 EFI_NOT_READY prior to starting Procedure on the target AP.
1220
1221
1222 @retval EFI_SUCCESS In blocking mode, all APs have finished before
1223 the timeout expired.
1224 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
1225 to all enabled APs.
1226 @retval others Failed to Startup all APs.
1227
1228**/
1229EFI_STATUS
1230InternalSmmStartupAllAPs (
1231 IN EFI_AP_PROCEDURE2 Procedure,
1232 IN UINTN TimeoutInMicroseconds,
1233 IN OUT VOID *ProcedureArguments OPTIONAL,
1234 IN OUT MM_COMPLETION *Token,
1235 IN OUT EFI_STATUS *CPUStatus
1236 )
1237{
1238 UINTN Index;
1239 UINTN CpuCount;
1240 PROCEDURE_TOKEN *ProcToken;
1241
1242 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
1243 return EFI_INVALID_PARAMETER;
1244 }
1245
1246 if (Procedure == NULL) {
1247 return EFI_INVALID_PARAMETER;
1248 }
1249
1250 CpuCount = 0;
1251 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1252 if (IsPresentAp (Index)) {
1253 CpuCount++;
1254
1255 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
1256 return EFI_INVALID_PARAMETER;
1257 }
1258
1259 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
1260 return EFI_NOT_READY;
1261 }
1262
1263 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1264 }
1265 }
1266
1267 if (CpuCount == 0) {
1268 return EFI_NOT_STARTED;
1269 }
1270
1271 if (Token != NULL) {
1272 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
1273 *Token = (MM_COMPLETION)ProcToken->SpinLock;
1274 } else {
1275 ProcToken = NULL;
1276 }
1277
1278 //
1279 // Make sure all BUSY should be acquired.
1280 //
1281 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
1282 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
1283 // block mode.
1284 //
1285 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1286 if (IsPresentAp (Index)) {
1287 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
1288 }
1289 }
1290
1291 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1292 if (IsPresentAp (Index)) {
1293 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2)Procedure;
1294 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
1295 if (ProcToken != NULL) {
1296 mSmmMpSyncData->CpuData[Index].Token = ProcToken;
1297 }
1298
1299 if (CPUStatus != NULL) {
1300 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
1301 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
1302 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
1303 }
1304 }
1305 } else {
1306 //
1307 // PI spec requirement:
1308 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
1309 //
1310 if (CPUStatus != NULL) {
1311 CPUStatus[Index] = EFI_NOT_STARTED;
1312 }
1313
1314 //
1315 // Decrease the count to mark this processor(AP or BSP) as finished.
1316 //
1317 if (ProcToken != NULL) {
1318 InterlockedDecrement (&ProcToken->RunningApCount);
1319 }
1320 }
1321 }
1322
1323 ReleaseAllAPs ();
1324
1325 if (Token == NULL) {
1326 //
1327 // Make sure all APs have completed their tasks.
1328 //
1329 WaitForAllAPsNotBusy (TRUE);
1330 }
1331
1332 return EFI_SUCCESS;
1333}
1334
1335/**
1336 ISO C99 6.5.2.2 "Function calls", paragraph 9:
1337 If the function is defined with a type that is not compatible with
1338 the type (of the expression) pointed to by the expression that
1339 denotes the called function, the behavior is undefined.
1340
1341 So add below wrapper function to convert between EFI_AP_PROCEDURE
1342 and EFI_AP_PROCEDURE2.
1343
1344 Wrapper for Procedures.
1345
1346 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
1347
1348**/
1349EFI_STATUS
1350EFIAPI
1351ProcedureWrapper (
1352 IN VOID *Buffer
1353 )
1354{
1355 PROCEDURE_WRAPPER *Wrapper;
1356
1357 Wrapper = Buffer;
1358 Wrapper->Procedure (Wrapper->ProcedureArgument);
1359
1360 return EFI_SUCCESS;
1361}
1362
1363/**
1364 Schedule a procedure to run on the specified CPU in blocking mode.
1365
1366 @param[in] Procedure The address of the procedure to run
1367 @param[in] CpuIndex Target CPU Index
1368 @param[in, out] ProcArguments The parameter to pass to the procedure
1369
1370 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1371 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1372 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1373 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1374 @retval EFI_SUCCESS The procedure has been successfully scheduled
1375
1376**/
1377EFI_STATUS
1378EFIAPI
1379SmmBlockingStartupThisAp (
1380 IN EFI_AP_PROCEDURE Procedure,
1381 IN UINTN CpuIndex,
1382 IN OUT VOID *ProcArguments OPTIONAL
1383 )
1384{
1385 PROCEDURE_WRAPPER Wrapper;
1386
1387 Wrapper.Procedure = Procedure;
1388 Wrapper.ProcedureArgument = ProcArguments;
1389
1390 //
1391 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1392 //
1393 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
1394}
1395
1396/**
1397 Schedule a procedure to run on the specified CPU.
1398
1399 @param Procedure The address of the procedure to run
1400 @param CpuIndex Target CPU Index
1401 @param ProcArguments The parameter to pass to the procedure
1402
1403 @retval EFI_INVALID_PARAMETER CpuNumber not valid
1404 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
1405 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
1406 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
1407 @retval EFI_SUCCESS The procedure has been successfully scheduled
1408
1409**/
1410EFI_STATUS
1411EFIAPI
1412SmmStartupThisAp (
1413 IN EFI_AP_PROCEDURE Procedure,
1414 IN UINTN CpuIndex,
1415 IN OUT VOID *ProcArguments OPTIONAL
1416 )
1417{
1418 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
1419 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
1420
1421 //
1422 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
1423 //
1424 return InternalSmmStartupThisAp (
1425 ProcedureWrapper,
1426 CpuIndex,
1427 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
1428 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,
1429 0,
1430 NULL
1431 );
1432}
1433
1434/**
1435 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
1436 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
1437
1438 NOTE: It might not be appreciated in runtime since it might
1439 conflict with OS debugging facilities. Turn them off in RELEASE.
1440
1441 @param CpuIndex CPU Index
1442
1443**/
1444VOID
1445EFIAPI
1446CpuSmmDebugEntry (
1447 IN UINTN CpuIndex
1448 )
1449{
1450 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1451
1452 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1453 ASSERT (CpuIndex < mMaxNumberOfCpus);
1454 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1455 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1456 AsmWriteDr6 (CpuSaveState->x86._DR6);
1457 AsmWriteDr7 (CpuSaveState->x86._DR7);
1458 } else {
1459 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
1460 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
1461 }
1462 }
1463}
1464
1465/**
1466 This function restores DR6 & DR7 to SMM save state.
1467
1468 NOTE: It might not be appreciated in runtime since it might
1469 conflict with OS debugging facilities. Turn them off in RELEASE.
1470
1471 @param CpuIndex CPU Index
1472
1473**/
1474VOID
1475EFIAPI
1476CpuSmmDebugExit (
1477 IN UINTN CpuIndex
1478 )
1479{
1480 SMRAM_SAVE_STATE_MAP *CpuSaveState;
1481
1482 if (FeaturePcdGet (PcdCpuSmmDebug)) {
1483 ASSERT (CpuIndex < mMaxNumberOfCpus);
1484 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
1485 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
1486 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
1487 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
1488 } else {
1489 CpuSaveState->x64._DR7 = AsmReadDr7 ();
1490 CpuSaveState->x64._DR6 = AsmReadDr6 ();
1491 }
1492 }
1493}
1494
1495/**
1496 C function for SMI entry, each processor comes here upon SMI trigger.
1497
1498 @param CpuIndex CPU Index
1499
1500**/
1501VOID
1502EFIAPI
1503SmiRendezvous (
1504 IN UINTN CpuIndex
1505 )
1506{
1507 EFI_STATUS Status;
1508 BOOLEAN ValidSmi;
1509 BOOLEAN IsBsp;
1510 BOOLEAN BspInProgress;
1511 UINTN Index;
1512 UINTN Cr2;
1513
1514 ASSERT (CpuIndex < mMaxNumberOfCpus);
1515
1516 ASSERT (mSmmInitialized != NULL);
1517
1518 //
1519 // Save Cr2 because Page Fault exception in SMM may override its value,
1520 // when using on-demand paging for above 4G memory.
1521 //
1522 Cr2 = 0;
1523 SaveCr2 (&Cr2);
1524
1525 if (!mSmmInitialized[CpuIndex]) {
1526 //
1527 // Perform InitializeSmm for CpuIndex
1528 //
1529 InitializeSmm ();
1530
1531 //
1532 // Restore Cr2
1533 //
1534 RestoreCr2 (Cr2);
1535
1536 //
1537 // Mark the first SMI init for CpuIndex has been done so as to avoid the reentry.
1538 //
1539 mSmmInitialized[CpuIndex] = TRUE;
1540
1541 return;
1542 }
1543
1544 //
1545 // Call the user register Startup function first.
1546 //
1547 if (mSmmMpSyncData->StartupProcedure != NULL) {
1548 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
1549 }
1550
1551 //
1552 // Perform CPU specific entry hooks
1553 //
1554 PERF_CODE (
1555 MpPerfBegin (CpuIndex, SMM_MP_PERF_PROCEDURE_ID (SmmRendezvousEntry));
1556 );
1557 SmmCpuFeaturesRendezvousEntry (CpuIndex);
1558 PERF_CODE (
1559 MpPerfEnd (CpuIndex, SMM_MP_PERF_PROCEDURE_ID (SmmRendezvousEntry));
1560 );
1561
1562 //
1563 // Determine if this is a valid SMI
1564 //
1565 PERF_CODE (
1566 MpPerfBegin (CpuIndex, SMM_MP_PERF_PROCEDURE_ID (PlatformValidSmi));
1567 );
1568 ValidSmi = PlatformValidSmi ();
1569 PERF_CODE (
1570 MpPerfEnd (CpuIndex, SMM_MP_PERF_PROCEDURE_ID (PlatformValidSmi));
1571 );
1572
1573 //
1574 // Determine if BSP has been already in progress. Note this must be checked after
1575 // ValidSmi because BSP may clear a valid SMI source after checking in.
1576 //
1577 BspInProgress = *mSmmMpSyncData->InsideSmm;
1578
1579 if (!BspInProgress && !ValidSmi) {
1580 //
1581 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
1582 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
1583 // status had been cleared by BSP and an existing SMI run has almost ended. (Note
1584 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
1585 // is nothing we need to do.
1586 //
1587 goto Exit;
1588 } else {
1589 //
1590 // Signal presence of this processor
1591 // CPU check in here!
1592 // "SmmCpuSyncCheckInCpu (mSmmMpSyncData->SyncContext, CpuIndex)" return error means failed
1593 // to check in CPU. BSP has already ended the synchronization.
1594 //
1595 if (RETURN_ERROR (SmmCpuSyncCheckInCpu (mSmmMpSyncData->SyncContext, CpuIndex))) {
1596 //
1597 // BSP has already ended the synchronization, so QUIT!!!
1598 // Existing AP is too late now to enter SMI since BSP has already ended the synchronization!!!
1599 //
1600
1601 //
1602 // Wait for BSP's signal to finish SMI
1603 //
1604 while (*mSmmMpSyncData->AllCpusInSync) {
1605 CpuPause ();
1606 }
1607
1608 goto Exit;
1609 } else {
1610 //
1611 // The BUSY lock is initialized to Released state.
1612 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
1613 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
1614 // after AP's present flag is detected.
1615 //
1616 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
1617 }
1618
1619 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1620 ActivateSmmProfile (CpuIndex);
1621 }
1622
1623 if (BspInProgress) {
1624 //
1625 // BSP has been elected. Follow AP path, regardless of ValidSmi flag
1626 // as BSP may have cleared the SMI status
1627 //
1628 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1629 } else {
1630 //
1631 // We have a valid SMI
1632 //
1633
1634 //
1635 // Elect BSP
1636 //
1637 IsBsp = FALSE;
1638 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1639 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
1640 //
1641 // Call platform hook to do BSP election
1642 //
1643 Status = PlatformSmmBspElection (&IsBsp);
1644 if (EFI_SUCCESS == Status) {
1645 //
1646 // Platform hook determines successfully
1647 //
1648 if (IsBsp) {
1649 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1650 }
1651 } else {
1652 //
1653 // Platform hook fails to determine, use default BSP election method
1654 //
1655 if (mSmmMpSyncData->BspIndex == MAX_UINT32) {
1656 InterlockedCompareExchange32 (
1657 (UINT32 *)&mSmmMpSyncData->BspIndex,
1658 MAX_UINT32,
1659 (UINT32)CpuIndex
1660 );
1661 }
1662 }
1663 }
1664 }
1665
1666 //
1667 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
1668 //
1669 if (mSmmMpSyncData->BspIndex == CpuIndex) {
1670 //
1671 // Clear last request for SwitchBsp.
1672 //
1673 if (mSmmMpSyncData->SwitchBsp) {
1674 mSmmMpSyncData->SwitchBsp = FALSE;
1675 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1676 mSmmMpSyncData->CandidateBsp[Index] = FALSE;
1677 }
1678 }
1679
1680 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1681 SmmProfileRecordSmiNum ();
1682 }
1683
1684 //
1685 // BSP Handler is always called with a ValidSmi == TRUE
1686 //
1687 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
1688 } else {
1689 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
1690 }
1691 }
1692
1693 //
1694 // Wait for BSP's signal to exit SMI
1695 //
1696 while (*mSmmMpSyncData->AllCpusInSync) {
1697 CpuPause ();
1698 }
1699 }
1700
1701Exit:
1702 //
1703 // Note: SmmRendezvousExit perf-logging entry is the only one that will be
1704 // migrated to standard perf-logging database in next SMI by BSPHandler().
1705 // Hence, the number of SmmRendezvousEntry entries will be larger than
1706 // the number of SmmRendezvousExit entries. Delta equals to the number
1707 // of CPU threads.
1708 //
1709 PERF_CODE (
1710 MpPerfBegin (CpuIndex, SMM_MP_PERF_PROCEDURE_ID (SmmRendezvousExit));
1711 );
1712 SmmCpuFeaturesRendezvousExit (CpuIndex);
1713 PERF_CODE (
1714 MpPerfEnd (CpuIndex, SMM_MP_PERF_PROCEDURE_ID (SmmRendezvousExit));
1715 );
1716
1717 //
1718 // Restore Cr2
1719 //
1720 RestoreCr2 (Cr2);
1721}
1722
1723/**
1724 Initialize PackageBsp Info. Processor specified by mPackageFirstThreadIndex[PackageIndex]
1725 will do the package-scope register programming. Set default CpuIndex to (UINT32)-1, which
1726 means not specified yet.
1727
1728**/
1729VOID
1730InitPackageFirstThreadIndexInfo (
1731 VOID
1732 )
1733{
1734 UINT32 Index;
1735 UINT32 PackageId;
1736 UINT32 PackageCount;
1737
1738 PackageId = 0;
1739 PackageCount = 0;
1740
1741 //
1742 // Count the number of package, set to max PackageId + 1
1743 //
1744 for (Index = 0; Index < mNumberOfCpus; Index++) {
1745 if (PackageId < gSmmCpuPrivate->ProcessorInfo[Index].Location.Package) {
1746 PackageId = gSmmCpuPrivate->ProcessorInfo[Index].Location.Package;
1747 }
1748 }
1749
1750 PackageCount = PackageId + 1;
1751
1752 mPackageFirstThreadIndex = (UINT32 *)AllocatePool (sizeof (UINT32) * PackageCount);
1753 ASSERT (mPackageFirstThreadIndex != NULL);
1754 if (mPackageFirstThreadIndex == NULL) {
1755 return;
1756 }
1757
1758 //
1759 // Set default CpuIndex to (UINT32)-1, which means not specified yet.
1760 //
1761 SetMem32 (mPackageFirstThreadIndex, sizeof (UINT32) * PackageCount, (UINT32)-1);
1762}
1763
1764/**
1765 Allocate buffer for SpinLock and Wrapper function buffer.
1766
1767**/
1768VOID
1769InitializeDataForMmMp (
1770 VOID
1771 )
1772{
1773 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1774 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
1775
1776 InitializeListHead (&gSmmCpuPrivate->TokenList);
1777
1778 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
1779}
1780
1781/**
1782 Allocate buffer for all semaphores and spin locks.
1783
1784**/
1785VOID
1786InitializeSmmCpuSemaphores (
1787 VOID
1788 )
1789{
1790 UINTN ProcessorCount;
1791 UINTN TotalSize;
1792 UINTN GlobalSemaphoresSize;
1793 UINTN CpuSemaphoresSize;
1794 UINTN SemaphoreSize;
1795 UINTN Pages;
1796 UINTN *SemaphoreBlock;
1797 UINTN SemaphoreAddr;
1798
1799 SemaphoreSize = GetSpinLockProperties ();
1800 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1801 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
1802 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
1803 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
1804 DEBUG ((DEBUG_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
1805 DEBUG ((DEBUG_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
1806 Pages = EFI_SIZE_TO_PAGES (TotalSize);
1807 SemaphoreBlock = AllocatePages (Pages);
1808 ASSERT (SemaphoreBlock != NULL);
1809 ZeroMem (SemaphoreBlock, TotalSize);
1810
1811 SemaphoreAddr = (UINTN)SemaphoreBlock;
1812 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
1813 SemaphoreAddr += SemaphoreSize;
1814 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
1815 SemaphoreAddr += SemaphoreSize;
1816 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
1817 SemaphoreAddr += SemaphoreSize;
1818 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
1819 = (SPIN_LOCK *)SemaphoreAddr;
1820 SemaphoreAddr += SemaphoreSize;
1821
1822 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
1823 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
1824 SemaphoreAddr += ProcessorCount * SemaphoreSize;
1825 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
1826
1827 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
1828 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
1829
1830 mSemaphoreSize = SemaphoreSize;
1831}
1832
1833/**
1834 Initialize un-cacheable data.
1835
1836**/
1837VOID
1838EFIAPI
1839InitializeMpSyncData (
1840 VOID
1841 )
1842{
1843 RETURN_STATUS Status;
1844
1845 UINTN CpuIndex;
1846
1847 if (mSmmMpSyncData != NULL) {
1848 //
1849 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
1850 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
1851 //
1852 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
1853 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
1854 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
1855 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
1856 //
1857 // Enable BSP election by setting BspIndex to MAX_UINT32
1858 //
1859 mSmmMpSyncData->BspIndex = MAX_UINT32;
1860 } else {
1861 //
1862 // Use NonSMM BSP as SMM BSP
1863 //
1864 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex++) {
1865 if (GetApicId () == gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId) {
1866 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
1867 break;
1868 }
1869 }
1870 }
1871
1872 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
1873
1874 Status = SmmCpuSyncContextInit (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus, &mSmmMpSyncData->SyncContext);
1875 if (EFI_ERROR (Status)) {
1876 DEBUG ((DEBUG_ERROR, "InitializeMpSyncData: SmmCpuSyncContextInit return error %r!\n", Status));
1877 CpuDeadLoop ();
1878 return;
1879 }
1880
1881 ASSERT (mSmmMpSyncData->SyncContext != NULL);
1882
1883 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
1884 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
1885 ASSERT (
1886 mSmmMpSyncData->InsideSmm != NULL &&
1887 mSmmMpSyncData->AllCpusInSync != NULL
1888 );
1889 *mSmmMpSyncData->InsideSmm = FALSE;
1890 *mSmmMpSyncData->AllCpusInSync = FALSE;
1891
1892 mSmmMpSyncData->AllApArrivedWithException = FALSE;
1893
1894 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex++) {
1895 mSmmMpSyncData->CpuData[CpuIndex].Busy =
1896 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
1897 mSmmMpSyncData->CpuData[CpuIndex].Present =
1898 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
1899 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
1900 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
1901 }
1902 }
1903}
1904
1905/**
1906 Initialize global data for MP synchronization.
1907
1908 @param Stacks Base address of SMI stack buffer for all processors.
1909 @param StackSize Stack size for each processor in SMM.
1910 @param ShadowStackSize Shadow Stack size for each processor in SMM.
1911
1912**/
1913UINT32
1914InitializeMpServiceData (
1915 IN VOID *Stacks,
1916 IN UINTN StackSize,
1917 IN UINTN ShadowStackSize
1918 )
1919{
1920 UINT32 Cr3;
1921 UINTN Index;
1922 UINT8 *GdtTssTables;
1923 UINTN GdtTableStepSize;
1924 CPUID_VERSION_INFO_EDX RegEdx;
1925 UINT32 MaxExtendedFunction;
1926 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
1927
1928 //
1929 // Determine if this CPU supports machine check
1930 //
1931 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
1932 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
1933
1934 //
1935 // Allocate memory for all locks and semaphores
1936 //
1937 InitializeSmmCpuSemaphores ();
1938
1939 //
1940 // Initialize mSmmMpSyncData
1941 //
1942 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
1943 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
1944 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA *)AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
1945 ASSERT (mSmmMpSyncData != NULL);
1946 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
1947 InitializeMpSyncData ();
1948
1949 //
1950 // Initialize physical address mask
1951 // NOTE: Physical memory above virtual address limit is not supported !!!
1952 //
1953 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);
1954 if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {
1955 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
1956 } else {
1957 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
1958 }
1959
1960 gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;
1961 //
1962 // Clear the low 12 bits
1963 //
1964 gPhyMask &= 0xfffffffffffff000ULL;
1965
1966 //
1967 // Create page tables
1968 //
1969 Cr3 = SmmInitPageTable ();
1970
1971 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
1972
1973 //
1974 // Install SMI handler for each CPU
1975 //
1976 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1977 InstallSmiHandler (
1978 Index,
1979 (UINT32)mCpuHotPlugData.SmBase[Index],
1980 (VOID *)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
1981 StackSize,
1982 (UINTN)(GdtTssTables + GdtTableStepSize * Index),
1983 gcSmiGdtr.Limit + 1,
1984 gcSmiIdtr.Base,
1985 gcSmiIdtr.Limit + 1,
1986 Cr3
1987 );
1988 }
1989
1990 //
1991 // Record current MTRR settings
1992 //
1993 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
1994 MtrrGetAllMtrrs (&gSmiMtrrs);
1995
1996 return Cr3;
1997}
1998
1999/**
2000
2001 Register the SMM Foundation entry point.
2002
2003 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
2004 @param SmmEntryPoint SMM Foundation EntryPoint
2005
2006 @retval EFI_SUCCESS Successfully to register SMM foundation entry point
2007
2008**/
2009EFI_STATUS
2010EFIAPI
2011RegisterSmmEntry (
2012 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
2013 IN EFI_SMM_ENTRY_POINT SmmEntryPoint
2014 )
2015{
2016 //
2017 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
2018 //
2019 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
2020 return EFI_SUCCESS;
2021}
2022
2023/**
2024
2025 Register the SMM Foundation entry point.
2026
2027 @param[in] Procedure A pointer to the code stream to be run on the designated target AP
2028 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
2029 with the related definitions of
2030 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
2031 If caller may pass a value of NULL to deregister any existing
2032 startup procedure.
2033 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
2034 run by the AP. It is an optional common mailbox between APs and
2035 the caller to share information
2036
2037 @retval EFI_SUCCESS The Procedure has been set successfully.
2038 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
2039
2040**/
2041EFI_STATUS
2042RegisterStartupProcedure (
2043 IN EFI_AP_PROCEDURE Procedure,
2044 IN OUT VOID *ProcedureArguments OPTIONAL
2045 )
2046{
2047 if ((Procedure == NULL) && (ProcedureArguments != NULL)) {
2048 return EFI_INVALID_PARAMETER;
2049 }
2050
2051 if (mSmmMpSyncData == NULL) {
2052 return EFI_NOT_READY;
2053 }
2054
2055 mSmmMpSyncData->StartupProcedure = Procedure;
2056 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
2057
2058 return EFI_SUCCESS;
2059}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette