VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c@ 106901

Last change on this file since 106901 was 105670, checked in by vboxsync, 6 months ago

Devices/EFI/FirmwareNew: Merge edk2-stable-202405 and make it build on aarch64, bugref:4643

  • Property svn:eol-style set to native
File size: 38.6 KB
Line 
1/** @file
2Code for Processor S3 restoration
3
4Copyright (c) 2006 - 2023, Intel Corporation. All rights reserved.<BR>
5SPDX-License-Identifier: BSD-2-Clause-Patent
6
7**/
8
9#include "PiSmmCpuDxeSmm.h"
10#include <PiPei.h>
11#include <Ppi/MpServices2.h>
12
13#pragma pack(1)
14typedef struct {
15 UINTN Lock;
16 VOID *StackStart;
17 UINTN StackSize;
18 VOID *ApFunction;
19 IA32_DESCRIPTOR GdtrProfile;
20 IA32_DESCRIPTOR IdtrProfile;
21 UINT32 BufferStart;
22 UINT32 Cr3;
23 UINTN InitializeFloatingPointUnitsAddress;
24} MP_CPU_EXCHANGE_INFO;
25#pragma pack()
26
27typedef struct {
28 UINT8 *RendezvousFunnelAddress;
29 UINTN PModeEntryOffset;
30 UINTN FlatJumpOffset;
31 UINTN Size;
32 UINTN LModeEntryOffset;
33 UINTN LongJumpOffset;
34} MP_ASSEMBLY_ADDRESS_MAP;
35
36//
37// Flags used when program the register.
38//
39typedef struct {
40 volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
41 volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
42 // core level semaphore.
43 volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
44 // package level semaphore.
45} PROGRAM_CPU_REGISTER_FLAGS;
46
47//
48// Signal that SMM BASE relocation is complete.
49//
50volatile BOOLEAN mInitApsAfterSmmBaseReloc;
51
52/**
53 Get starting address and size of the rendezvous entry for APs.
54 Information for fixing a jump instruction in the code is also returned.
55
56 @param AddressMap Output buffer for address map information.
57**/
58VOID *
59EFIAPI
60AsmGetAddressMap (
61 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
62 );
63
64#define LEGACY_REGION_SIZE (2 * 0x1000)
65#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
66
67PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
68ACPI_CPU_DATA mAcpiCpuData;
69volatile UINT32 mNumberToFinish;
70MP_CPU_EXCHANGE_INFO *mExchangeInfo;
71BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
72
73//
74// S3 boot flag
75//
76BOOLEAN mSmmS3Flag = FALSE;
77
78//
79// Pointer to structure used during S3 Resume
80//
81SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
82
83BOOLEAN mAcpiS3Enable = TRUE;
84
85UINT8 *mApHltLoopCode = NULL;
86UINT8 mApHltLoopCodeTemplate[] = {
87 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
88 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
89 0xFA, // cli
90 0xF4, // hlt
91 0xEB, 0xFC // jmp $-2
92};
93
94/**
95 Sync up the MTRR values for all processors.
96
97 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
98**/
99VOID
100EFIAPI
101LoadMtrrData (
102 EFI_PHYSICAL_ADDRESS MtrrTable
103 )
104
105/*++
106
107Routine Description:
108
109 Sync up the MTRR values for all processors.
110
111Arguments:
112
113Returns:
114 None
115
116--*/
117{
118 MTRR_SETTINGS *MtrrSettings;
119
120 MtrrSettings = (MTRR_SETTINGS *)(UINTN)MtrrTable;
121 MtrrSetAllMtrrs (MtrrSettings);
122}
123
124/**
125 Increment semaphore by 1.
126
127 @param Sem IN: 32-bit unsigned integer
128
129**/
130VOID
131S3ReleaseSemaphore (
132 IN OUT volatile UINT32 *Sem
133 )
134{
135 InterlockedIncrement (Sem);
136}
137
138/**
139 Decrement the semaphore by 1 if it is not zero.
140
141 Performs an atomic decrement operation for semaphore.
142 The compare exchange operation must be performed using
143 MP safe mechanisms.
144
145 @param Sem IN: 32-bit unsigned integer
146
147**/
148VOID
149S3WaitForSemaphore (
150 IN OUT volatile UINT32 *Sem
151 )
152{
153 UINT32 Value;
154
155 do {
156 Value = *Sem;
157 } while (Value == 0 ||
158 InterlockedCompareExchange32 (
159 Sem,
160 Value,
161 Value - 1
162 ) != Value);
163}
164
165/**
166 Read / write CR value.
167
168 @param[in] CrIndex The CR index which need to read/write.
169 @param[in] Read Read or write. TRUE is read.
170 @param[in,out] CrValue CR value.
171
172 @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
173**/
174UINTN
175ReadWriteCr (
176 IN UINT32 CrIndex,
177 IN BOOLEAN Read,
178 IN OUT UINTN *CrValue
179 )
180{
181 switch (CrIndex) {
182 case 0:
183 if (Read) {
184 *CrValue = AsmReadCr0 ();
185 } else {
186 AsmWriteCr0 (*CrValue);
187 }
188
189 break;
190 case 2:
191 if (Read) {
192 *CrValue = AsmReadCr2 ();
193 } else {
194 AsmWriteCr2 (*CrValue);
195 }
196
197 break;
198 case 3:
199 if (Read) {
200 *CrValue = AsmReadCr3 ();
201 } else {
202 AsmWriteCr3 (*CrValue);
203 }
204
205 break;
206 case 4:
207 if (Read) {
208 *CrValue = AsmReadCr4 ();
209 } else {
210 AsmWriteCr4 (*CrValue);
211 }
212
213 break;
214 default:
215 return EFI_UNSUPPORTED;
216 }
217
218 return EFI_SUCCESS;
219}
220
221/**
222 Initialize the CPU registers from a register table.
223
224 @param[in] RegisterTable The register table for this AP.
225 @param[in] ApLocation AP location info for this ap.
226 @param[in] CpuStatus CPU status info for this CPU.
227 @param[in] CpuFlags Flags data structure used when program the register.
228
229 @note This service could be called by BSP/APs.
230**/
231VOID
232ProgramProcessorRegister (
233 IN CPU_REGISTER_TABLE *RegisterTable,
234 IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
235 IN CPU_STATUS_INFORMATION *CpuStatus,
236 IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
237 )
238{
239 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
240 UINTN Index;
241 UINTN Value;
242 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
243 volatile UINT32 *SemaphorePtr;
244 UINT32 FirstThread;
245 UINT32 CurrentThread;
246 UINT32 CurrentCore;
247 UINTN ProcessorIndex;
248 UINT32 *ThreadCountPerPackage;
249 UINT8 *ThreadCountPerCore;
250 EFI_STATUS Status;
251 UINT64 CurrentValue;
252
253 //
254 // Traverse Register Table of this logical processor
255 //
256 RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *)(UINTN)RegisterTable->RegisterTableEntry;
257
258 for (Index = 0; Index < RegisterTable->TableLength; Index++) {
259 RegisterTableEntry = &RegisterTableEntryHead[Index];
260
261 //
262 // Check the type of specified register
263 //
264 switch (RegisterTableEntry->RegisterType) {
265 //
266 // The specified register is Control Register
267 //
268 case ControlRegister:
269 Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
270 if (EFI_ERROR (Status)) {
271 break;
272 }
273
274 if (RegisterTableEntry->TestThenWrite) {
275 CurrentValue = BitFieldRead64 (
276 Value,
277 RegisterTableEntry->ValidBitStart,
278 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
279 );
280 if (CurrentValue == RegisterTableEntry->Value) {
281 break;
282 }
283 }
284
285 Value = (UINTN)BitFieldWrite64 (
286 Value,
287 RegisterTableEntry->ValidBitStart,
288 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
289 RegisterTableEntry->Value
290 );
291 ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
292 break;
293 //
294 // The specified register is Model Specific Register
295 //
296 case Msr:
297 if (RegisterTableEntry->TestThenWrite) {
298 Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
299 if (RegisterTableEntry->ValidBitLength >= 64) {
300 if (Value == RegisterTableEntry->Value) {
301 break;
302 }
303 } else {
304 CurrentValue = BitFieldRead64 (
305 Value,
306 RegisterTableEntry->ValidBitStart,
307 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
308 );
309 if (CurrentValue == RegisterTableEntry->Value) {
310 break;
311 }
312 }
313 }
314
315 //
316 // If this function is called to restore register setting after INIT signal,
317 // there is no need to restore MSRs in register table.
318 //
319 if (RegisterTableEntry->ValidBitLength >= 64) {
320 //
321 // If length is not less than 64 bits, then directly write without reading
322 //
323 AsmWriteMsr64 (
324 RegisterTableEntry->Index,
325 RegisterTableEntry->Value
326 );
327 } else {
328 //
329 // Set the bit section according to bit start and length
330 //
331 AsmMsrBitFieldWrite64 (
332 RegisterTableEntry->Index,
333 RegisterTableEntry->ValidBitStart,
334 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
335 RegisterTableEntry->Value
336 );
337 }
338
339 break;
340 //
341 // MemoryMapped operations
342 //
343 case MemoryMapped:
344 AcquireSpinLock (&CpuFlags->MemoryMappedLock);
345 MmioBitFieldWrite32 (
346 (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
347 RegisterTableEntry->ValidBitStart,
348 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
349 (UINT32)RegisterTableEntry->Value
350 );
351 ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
352 break;
353 //
354 // Enable or disable cache
355 //
356 case CacheControl:
357 //
358 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
359 //
360 if (RegisterTableEntry->Value == 0) {
361 AsmDisableCache ();
362 } else {
363 AsmEnableCache ();
364 }
365
366 break;
367
368 case Semaphore:
369 // Semaphore works logic like below:
370 //
371 // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
372 // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
373 //
374 // All threads (T0...Tn) waits in P() line and continues running
375 // together.
376 //
377 //
378 // T0 T1 ... Tn
379 //
380 // V(0...n) V(0...n) ... V(0...n)
381 // n * P(0) n * P(1) ... n * P(n)
382 //
383 ASSERT (
384 (ApLocation != NULL) &&
385 (CpuStatus->ThreadCountPerPackage != 0) &&
386 (CpuStatus->ThreadCountPerCore != 0) &&
387 (CpuFlags->CoreSemaphoreCount != NULL) &&
388 (CpuFlags->PackageSemaphoreCount != NULL)
389 );
390 switch (RegisterTableEntry->Value) {
391 case CoreDepType:
392 SemaphorePtr = CpuFlags->CoreSemaphoreCount;
393 ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;
394
395 CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;
396 //
397 // Get Offset info for the first thread in the core which current thread belongs to.
398 //
399 FirstThread = CurrentCore * CpuStatus->MaxThreadCount;
400 CurrentThread = FirstThread + ApLocation->Thread;
401
402 //
403 // Different cores may have different valid threads in them. If driver maintail clearly
404 // thread index in different cores, the logic will be much complicated.
405 // Here driver just simply records the max thread number in all cores and use it as expect
406 // thread number for all cores.
407 // In below two steps logic, first current thread will Release semaphore for each thread
408 // in current core. Maybe some threads are not valid in this core, but driver don't
409 // care. Second, driver will let current thread wait semaphore for all valid threads in
410 // current core. Because only the valid threads will do release semaphore for this
411 // thread, driver here only need to wait the valid thread count.
412 //
413
414 //
415 // First Notify ALL THREADs in current Core that this thread is ready.
416 //
417 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex++) {
418 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
419 }
420
421 //
422 // Second, check whether all VALID THREADs (not all threads) in current core are ready.
423 //
424 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex++) {
425 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
426 }
427
428 break;
429
430 case PackageDepType:
431 SemaphorePtr = CpuFlags->PackageSemaphoreCount;
432 ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;
433 //
434 // Get Offset info for the first thread in the package which current thread belongs to.
435 //
436 FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
437 //
438 // Get the possible threads count for current package.
439 //
440 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
441
442 //
443 // Different packages may have different valid threads in them. If driver maintail clearly
444 // thread index in different packages, the logic will be much complicated.
445 // Here driver just simply records the max thread number in all packages and use it as expect
446 // thread number for all packages.
447 // In below two steps logic, first current thread will Release semaphore for each thread
448 // in current package. Maybe some threads are not valid in this package, but driver don't
449 // care. Second, driver will let current thread wait semaphore for all valid threads in
450 // current package. Because only the valid threads will do release semaphore for this
451 // thread, driver here only need to wait the valid thread count.
452 //
453
454 //
455 // First Notify ALL THREADS in current package that this thread is ready.
456 //
457 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex++) {
458 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
459 }
460
461 //
462 // Second, check whether VALID THREADS (not all threads) in current package are ready.
463 //
464 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex++) {
465 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
466 }
467
468 break;
469
470 default:
471 break;
472 }
473
474 break;
475
476 default:
477 break;
478 }
479 }
480}
481
482/**
483
484 Set Processor register for one AP.
485
486 @param PreSmmRegisterTable Use pre Smm register table or register table.
487
488**/
489VOID
490SetRegister (
491 IN BOOLEAN PreSmmRegisterTable
492 )
493{
494 CPU_FEATURE_INIT_DATA *FeatureInitData;
495 CPU_REGISTER_TABLE *RegisterTable;
496 CPU_REGISTER_TABLE *RegisterTables;
497 UINT32 InitApicId;
498 UINTN ProcIndex;
499 UINTN Index;
500
501 FeatureInitData = &mAcpiCpuData.CpuFeatureInitData;
502
503 if (PreSmmRegisterTable) {
504 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->PreSmmInitRegisterTable;
505 } else {
506 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->RegisterTable;
507 }
508
509 if (RegisterTables == NULL) {
510 return;
511 }
512
513 InitApicId = GetInitialApicId ();
514 RegisterTable = NULL;
515 ProcIndex = (UINTN)-1;
516 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
517 if (RegisterTables[Index].InitialApicId == InitApicId) {
518 RegisterTable = &RegisterTables[Index];
519 ProcIndex = Index;
520 break;
521 }
522 }
523
524 ASSERT (RegisterTable != NULL);
525
526 if (FeatureInitData->ApLocation != 0) {
527 ProgramProcessorRegister (
528 RegisterTable,
529 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)FeatureInitData->ApLocation + ProcIndex,
530 &FeatureInitData->CpuStatus,
531 &mCpuFlags
532 );
533 } else {
534 ProgramProcessorRegister (
535 RegisterTable,
536 NULL,
537 &FeatureInitData->CpuStatus,
538 &mCpuFlags
539 );
540 }
541}
542
543/**
544 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
545
546 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
547 and restores MTRRs for both BSP and APs.
548
549 @param IsBsp The CPU this function executes on is BSP or not.
550
551**/
552VOID
553InitializeCpuBeforeRebase (
554 IN BOOLEAN IsBsp
555 )
556{
557 LoadMtrrData (mAcpiCpuData.MtrrTable);
558
559 SetRegister (TRUE);
560
561 ProgramVirtualWireMode ();
562 if (!IsBsp) {
563 DisableLvtInterrupts ();
564 }
565
566 //
567 // Count down the number with lock mechanism.
568 //
569 InterlockedDecrement (&mNumberToFinish);
570
571 if (IsBsp) {
572 //
573 // Bsp wait here till all AP finish the initialization before rebase
574 //
575 while (mNumberToFinish > 0) {
576 CpuPause ();
577 }
578 }
579}
580
581/**
582 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
583
584 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
585 data saved by normal boot path for both BSP and APs.
586
587 @param IsBsp The CPU this function executes on is BSP or not.
588
589**/
590VOID
591InitializeCpuAfterRebase (
592 IN BOOLEAN IsBsp
593 )
594{
595 UINTN TopOfStack;
596 UINT8 Stack[128];
597
598 SetRegister (FALSE);
599
600 if (mSmmS3ResumeState->MpService2Ppi == 0) {
601 if (IsBsp) {
602 while (mNumberToFinish > 0) {
603 CpuPause ();
604 }
605 } else {
606 //
607 // Place AP into the safe code, count down the number with lock mechanism in the safe code.
608 //
609 TopOfStack = (UINTN)Stack + sizeof (Stack);
610 TopOfStack &= ~(UINTN)(CPU_STACK_ALIGNMENT - 1);
611 CopyMem ((VOID *)(UINTN)mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
612 TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
613 }
614 }
615}
616
617/**
618 Cpu initialization procedure.
619
620 @param[in,out] Buffer The pointer to private data buffer.
621
622**/
623VOID
624EFIAPI
625InitializeCpuProcedure (
626 IN OUT VOID *Buffer
627 )
628{
629 BOOLEAN IsBsp;
630
631 IsBsp = (BOOLEAN)(mBspApicId == GetApicId ());
632
633 //
634 // Skip initialization if mAcpiCpuData is not valid
635 //
636 if (mAcpiCpuData.NumberOfCpus > 0) {
637 //
638 // First time microcode load and restore MTRRs
639 //
640 InitializeCpuBeforeRebase (IsBsp);
641 }
642
643 if (IsBsp) {
644 //
645 // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
646 //
647 ExecuteFirstSmiInit ();
648 }
649
650 //
651 // Skip initialization if mAcpiCpuData is not valid
652 //
653 if (mAcpiCpuData.NumberOfCpus > 0) {
654 if (IsBsp) {
655 //
656 // mNumberToFinish should be set before AP executes InitializeCpuAfterRebase()
657 //
658 mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
659 //
660 // Signal that SMM base relocation is complete and to continue initialization for all APs.
661 //
662 mInitApsAfterSmmBaseReloc = TRUE;
663 } else {
664 //
665 // AP Wait for BSP to signal SMM Base relocation done.
666 //
667 while (!mInitApsAfterSmmBaseReloc) {
668 CpuPause ();
669 }
670 }
671
672 //
673 // Restore MSRs for BSP and all APs
674 //
675 InitializeCpuAfterRebase (IsBsp);
676 }
677}
678
679/**
680 Prepares startup vector for APs.
681
682 This function prepares startup vector for APs.
683
684 @param WorkingBuffer The address of the work buffer.
685**/
686VOID
687PrepareApStartupVector (
688 EFI_PHYSICAL_ADDRESS WorkingBuffer
689 )
690{
691 EFI_PHYSICAL_ADDRESS StartupVector;
692 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
693
694 //
695 // Get the address map of startup code for AP,
696 // including code size, and offset of long jump instructions to redirect.
697 //
698 ZeroMem (&AddressMap, sizeof (AddressMap));
699 AsmGetAddressMap (&AddressMap);
700
701 StartupVector = WorkingBuffer;
702
703 //
704 // Copy AP startup code to startup vector, and then redirect the long jump
705 // instructions for mode switching.
706 //
707 CopyMem ((VOID *)(UINTN)StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
708 *(UINT32 *)(UINTN)(StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32)(StartupVector + AddressMap.PModeEntryOffset);
709 if (AddressMap.LongJumpOffset != 0) {
710 *(UINT32 *)(UINTN)(StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32)(StartupVector + AddressMap.LModeEntryOffset);
711 }
712
713 //
714 // Get the start address of exchange data between BSP and AP.
715 //
716 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *)(UINTN)(StartupVector + AddressMap.Size);
717 ZeroMem ((VOID *)mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
718
719 CopyMem ((VOID *)(UINTN)&mExchangeInfo->GdtrProfile, (VOID *)(UINTN)mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
720 CopyMem ((VOID *)(UINTN)&mExchangeInfo->IdtrProfile, (VOID *)(UINTN)mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
721
722 mExchangeInfo->StackStart = (VOID *)(UINTN)mAcpiCpuData.StackAddress;
723 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
724 mExchangeInfo->BufferStart = (UINT32)StartupVector;
725 mExchangeInfo->Cr3 = (UINT32)(AsmReadCr3 ());
726 mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
727 mExchangeInfo->ApFunction = (VOID *)(UINTN)InitializeCpuProcedure;
728}
729
730/**
731 Restore SMM Configuration in S3 boot path.
732
733**/
734VOID
735RestoreSmmConfigurationInS3 (
736 VOID
737 )
738{
739 if (!mAcpiS3Enable) {
740 return;
741 }
742
743 //
744 // Restore SMM Configuration in S3 boot path.
745 //
746 if (mRestoreSmmConfigurationInS3) {
747 //
748 // Need make sure gSmst is correct because below function may use them.
749 //
750 gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
751 gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
752 gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
753 gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
754 gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
755
756 //
757 // Configure SMM Code Access Check feature if available.
758 //
759 ConfigSmmCodeAccessCheck ();
760
761 SmmCpuFeaturesCompleteSmmReadyToLock ();
762
763 mRestoreSmmConfigurationInS3 = FALSE;
764 }
765}
766
767/**
768 Perform SMM initialization for all processors in the S3 boot path.
769
770 For a native platform, MP initialization in the S3 boot path is also performed in this function.
771**/
772VOID
773EFIAPI
774SmmRestoreCpu (
775 VOID
776 )
777{
778 SMM_S3_RESUME_STATE *SmmS3ResumeState;
779 IA32_DESCRIPTOR Ia32Idtr;
780 IA32_DESCRIPTOR X64Idtr;
781 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
782 EFI_STATUS Status;
783 EDKII_PEI_MP_SERVICES2_PPI *Mp2ServicePpi;
784
785 DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));
786
787 mSmmS3Flag = TRUE;
788
789 //
790 // See if there is enough context to resume PEI Phase
791 //
792 if (mSmmS3ResumeState == NULL) {
793 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
794 CpuDeadLoop ();
795 }
796
797 SmmS3ResumeState = mSmmS3ResumeState;
798 ASSERT (SmmS3ResumeState != NULL);
799
800 //
801 // Setup 64bit IDT in 64bit SMM env when called from 32bit PEI.
802 // Note: 64bit PEI and 32bit DXE is not a supported combination.
803 //
804 if ((SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) && (FeaturePcdGet (PcdDxeIplSwitchToLongMode) == TRUE)) {
805 //
806 // Save the IA32 IDT Descriptor
807 //
808 AsmReadIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);
809
810 //
811 // Setup X64 IDT table
812 //
813 ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
814 X64Idtr.Base = (UINTN)IdtEntryTable;
815 X64Idtr.Limit = (UINT16)(sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
816 AsmWriteIdtr ((IA32_DESCRIPTOR *)&X64Idtr);
817
818 //
819 // Setup the default exception handler
820 //
821 Status = InitializeCpuExceptionHandlers (NULL);
822 ASSERT_EFI_ERROR (Status);
823
824 //
825 // Initialize Debug Agent to support source level debug
826 //
827 if (mSmmDebugAgentSupport) {
828 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
829 }
830 }
831
832 mBspApicId = GetApicId ();
833 //
834 // Skip AP initialization if mAcpiCpuData is not valid
835 //
836 if (mAcpiCpuData.NumberOfCpus > 0) {
837 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
838 ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
839 } else {
840 ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
841 }
842
843 mNumberToFinish = (UINT32)mNumberOfCpus;
844
845 //
846 // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
847 //
848 mInitApsAfterSmmBaseReloc = FALSE;
849
850 if (mSmmS3ResumeState->MpService2Ppi != 0) {
851 Mp2ServicePpi = (EDKII_PEI_MP_SERVICES2_PPI *)(UINTN)mSmmS3ResumeState->MpService2Ppi;
852 Mp2ServicePpi->StartupAllCPUs (Mp2ServicePpi, InitializeCpuProcedure, 0, NULL);
853 } else {
854 PrepareApStartupVector (mAcpiCpuData.StartupVector);
855 //
856 // Send INIT IPI - SIPI to all APs
857 //
858 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
859 InitializeCpuProcedure (NULL);
860 }
861 } else {
862 InitializeCpuProcedure (NULL);
863 }
864
865 //
866 // Set a flag to restore SMM configuration in S3 path.
867 //
868 mRestoreSmmConfigurationInS3 = TRUE;
869
870 DEBUG ((DEBUG_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
871 DEBUG ((DEBUG_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
872 DEBUG ((DEBUG_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
873 DEBUG ((DEBUG_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
874 DEBUG ((DEBUG_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
875
876 //
877 // If SMM is in 32-bit mode or PcdDxeIplSwitchToLongMode is FALSE, then use SwitchStack() to resume PEI Phase.
878 // Note: 64bit PEI and 32bit DXE is not a supported combination.
879 //
880 if ((SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) || (FeaturePcdGet (PcdDxeIplSwitchToLongMode) == FALSE)) {
881 DEBUG ((DEBUG_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
882
883 SwitchStack (
884 (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
885 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
886 (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
887 (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
888 );
889 }
890
891 //
892 // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
893 //
894 if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
895 DEBUG ((DEBUG_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
896 //
897 // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
898 //
899 SaveAndSetDebugTimerInterrupt (FALSE);
900 //
901 // Restore IA32 IDT table
902 //
903 AsmWriteIdtr ((IA32_DESCRIPTOR *)&Ia32Idtr);
904 AsmDisablePaging64 (
905 SmmS3ResumeState->ReturnCs,
906 (UINT32)SmmS3ResumeState->ReturnEntryPoint,
907 (UINT32)SmmS3ResumeState->ReturnContext1,
908 (UINT32)SmmS3ResumeState->ReturnContext2,
909 (UINT32)SmmS3ResumeState->ReturnStackPointer
910 );
911 }
912
913 //
914 // Can not resume PEI Phase
915 //
916 DEBUG ((DEBUG_ERROR, "No context to return to PEI Phase\n"));
917 CpuDeadLoop ();
918}
919
920/**
921 Initialize SMM S3 resume state structure used during S3 Resume.
922
923 @param[in] Cr3 The base address of the page tables to use in SMM.
924
925**/
926VOID
927InitSmmS3ResumeState (
928 IN UINT32 Cr3
929 )
930{
931 VOID *GuidHob;
932 EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
933 SMM_S3_RESUME_STATE *SmmS3ResumeState;
934 EFI_PHYSICAL_ADDRESS Address;
935 EFI_STATUS Status;
936
937 if (!mAcpiS3Enable) {
938 return;
939 }
940
941 GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
942 if (GuidHob == NULL) {
943 DEBUG ((
944 DEBUG_ERROR,
945 "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
946 __func__,
947 &gEfiAcpiVariableGuid
948 ));
949 CpuDeadLoop ();
950 } else {
951 SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *)GET_GUID_HOB_DATA (GuidHob);
952
953 DEBUG ((DEBUG_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
954 DEBUG ((DEBUG_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
955
956 SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
957 ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
958
959 mSmmS3ResumeState = SmmS3ResumeState;
960 SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
961
962 SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
963
964 SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
965 SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
966 if (SmmS3ResumeState->SmmS3StackBase == 0) {
967 SmmS3ResumeState->SmmS3StackSize = 0;
968 }
969
970 SmmS3ResumeState->SmmS3Cr0 = (UINT32)AsmReadCr0 ();
971 SmmS3ResumeState->SmmS3Cr3 = Cr3;
972 SmmS3ResumeState->SmmS3Cr4 = (UINT32)AsmReadCr4 ();
973
974 if (sizeof (UINTN) == sizeof (UINT64)) {
975 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
976 }
977
978 if (sizeof (UINTN) == sizeof (UINT32)) {
979 SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
980 }
981
982 //
983 // Patch SmmS3ResumeState->SmmS3Cr3
984 //
985 InitSmmS3Cr3 ();
986 }
987
988 //
989 // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
990 // protected mode on S3 path
991 //
992 Address = BASE_4GB - 1;
993 Status = gBS->AllocatePages (
994 AllocateMaxAddress,
995 EfiACPIMemoryNVS,
996 EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
997 &Address
998 );
999 ASSERT_EFI_ERROR (Status);
1000 mApHltLoopCode = (UINT8 *)(UINTN)Address;
1001}
1002
1003/**
1004 Copy register table from non-SMRAM into SMRAM.
1005
1006 @param[in] DestinationRegisterTableList Points to destination register table.
1007 @param[in] SourceRegisterTableList Points to source register table.
1008 @param[in] NumberOfCpus Number of CPUs.
1009
1010**/
1011VOID
1012CopyRegisterTable (
1013 IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
1014 IN CPU_REGISTER_TABLE *SourceRegisterTableList,
1015 IN UINT32 NumberOfCpus
1016 )
1017{
1018 UINTN Index;
1019 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
1020
1021 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1022 for (Index = 0; Index < NumberOfCpus; Index++) {
1023 if (DestinationRegisterTableList[Index].TableLength != 0) {
1024 DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);
1025 RegisterTableEntry = AllocateCopyPool (
1026 DestinationRegisterTableList[Index].AllocatedSize,
1027 (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
1028 );
1029 ASSERT (RegisterTableEntry != NULL);
1030 DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
1031 }
1032 }
1033}
1034
1035/**
1036 Check whether the register table is empty or not.
1037
1038 @param[in] RegisterTable Point to the register table.
1039 @param[in] NumberOfCpus Number of CPUs.
1040
1041 @retval TRUE The register table is empty.
1042 @retval FALSE The register table is not empty.
1043**/
1044BOOLEAN
1045IsRegisterTableEmpty (
1046 IN CPU_REGISTER_TABLE *RegisterTable,
1047 IN UINT32 NumberOfCpus
1048 )
1049{
1050 UINTN Index;
1051
1052 if (RegisterTable != NULL) {
1053 for (Index = 0; Index < NumberOfCpus; Index++) {
1054 if (RegisterTable[Index].TableLength != 0) {
1055 return FALSE;
1056 }
1057 }
1058 }
1059
1060 return TRUE;
1061}
1062
1063/**
1064 Copy the data used to initialize processor register into SMRAM.
1065
1066 @param[in,out] CpuFeatureInitDataDst Pointer to the destination CPU_FEATURE_INIT_DATA structure.
1067 @param[in] CpuFeatureInitDataSrc Pointer to the source CPU_FEATURE_INIT_DATA structure.
1068
1069**/
1070VOID
1071CopyCpuFeatureInitDatatoSmram (
1072 IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,
1073 IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc
1074 )
1075{
1076 CPU_STATUS_INFORMATION *CpuStatus;
1077
1078 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {
1079 CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1080 ASSERT (CpuFeatureInitDataDst->PreSmmInitRegisterTable != 0);
1081
1082 CopyRegisterTable (
1083 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->PreSmmInitRegisterTable,
1084 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable,
1085 mAcpiCpuData.NumberOfCpus
1086 );
1087 }
1088
1089 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable, mAcpiCpuData.NumberOfCpus)) {
1090 CpuFeatureInitDataDst->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
1091 ASSERT (CpuFeatureInitDataDst->RegisterTable != 0);
1092
1093 CopyRegisterTable (
1094 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->RegisterTable,
1095 (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable,
1096 mAcpiCpuData.NumberOfCpus
1097 );
1098 }
1099
1100 CpuStatus = &CpuFeatureInitDataDst->CpuStatus;
1101 CopyMem (CpuStatus, &CpuFeatureInitDataSrc->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
1102
1103 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {
1104 CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1105 sizeof (UINT32) * CpuStatus->PackageCount,
1106 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage
1107 );
1108 ASSERT (CpuStatus->ThreadCountPerPackage != 0);
1109 }
1110
1111 if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {
1112 CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1113 sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),
1114 (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore
1115 );
1116 ASSERT (CpuStatus->ThreadCountPerCore != 0);
1117 }
1118
1119 if (CpuFeatureInitDataSrc->ApLocation != 0) {
1120 CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
1121 mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
1122 (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation
1123 );
1124 ASSERT (CpuFeatureInitDataDst->ApLocation != 0);
1125 }
1126}
1127
1128/**
1129 Get ACPI CPU data.
1130
1131**/
1132VOID
1133GetAcpiCpuData (
1134 VOID
1135 )
1136{
1137 ACPI_CPU_DATA *AcpiCpuData;
1138 IA32_DESCRIPTOR *Gdtr;
1139 IA32_DESCRIPTOR *Idtr;
1140 VOID *GdtForAp;
1141 VOID *IdtForAp;
1142 VOID *MachineCheckHandlerForAp;
1143 CPU_STATUS_INFORMATION *CpuStatus;
1144
1145 if (!mAcpiS3Enable) {
1146 return;
1147 }
1148
1149 //
1150 // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
1151 //
1152 mAcpiCpuData.NumberOfCpus = 0;
1153
1154 //
1155 // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
1156 //
1157 AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
1158 if (AcpiCpuData == 0) {
1159 return;
1160 }
1161
1162 //
1163 // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
1164 //
1165 CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
1166
1167 mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
1168 ASSERT (mAcpiCpuData.MtrrTable != 0);
1169
1170 CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
1171
1172 mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1173 ASSERT (mAcpiCpuData.GdtrProfile != 0);
1174
1175 CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
1176
1177 mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
1178 ASSERT (mAcpiCpuData.IdtrProfile != 0);
1179
1180 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
1181
1182 //
1183 // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
1184 //
1185 Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
1186 Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
1187
1188 GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
1189 ASSERT (GdtForAp != NULL);
1190 IdtForAp = (VOID *)((UINTN)GdtForAp + (Gdtr->Limit + 1));
1191 MachineCheckHandlerForAp = (VOID *)((UINTN)IdtForAp + (Idtr->Limit + 1));
1192
1193 CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
1194 CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
1195 CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
1196
1197 Gdtr->Base = (UINTN)GdtForAp;
1198 Idtr->Base = (UINTN)IdtForAp;
1199 mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
1200
1201 ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));
1202
1203 if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume)) {
1204 //
1205 // If the CPU features will not be initialized by CpuFeaturesPei module during
1206 // next ACPI S3 resume, copy the CPU features initialization data into SMRAM,
1207 // which will be consumed in SmmRestoreCpu during next S3 resume.
1208 //
1209 CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData.CpuFeatureInitData, &AcpiCpuData->CpuFeatureInitData);
1210
1211 CpuStatus = &mAcpiCpuData.CpuFeatureInitData.CpuStatus;
1212
1213 mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
1214 sizeof (UINT32) * CpuStatus->PackageCount *
1215 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1216 );
1217 ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
1218
1219 mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
1220 sizeof (UINT32) * CpuStatus->PackageCount *
1221 CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
1222 );
1223 ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
1224
1225 InitializeSpinLock ((SPIN_LOCK *)&mCpuFlags.MemoryMappedLock);
1226 }
1227}
1228
1229/**
1230 Get ACPI S3 enable flag.
1231
1232**/
1233VOID
1234GetAcpiS3EnableFlag (
1235 VOID
1236 )
1237{
1238 mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
1239}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette