VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c@ 106901

Last change on this file since 106901 was 105670, checked in by vboxsync, 6 months ago

Devices/EFI/FirmwareNew: Merge edk2-stable-202405 and make it build on aarch64, bugref:4643

  • Property svn:eol-style set to native
File size: 54.9 KB
Line 
1/** @file
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6Copyright (C) 2023 - 2024 Advanced Micro Devices, Inc. All rights reserved.<BR>
7
8SPDX-License-Identifier: BSD-2-Clause-Patent
9
10**/
11
12#include "PiSmmCpuDxeSmm.h"
13
14//
15// SMM CPU Private Data structure that contains SMM Configuration Protocol
16// along its supporting fields.
17//
18SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
19 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
20 NULL, // SmmCpuHandle
21 NULL, // Pointer to ProcessorInfo array
22 NULL, // Pointer to Operation array
23 NULL, // Pointer to CpuSaveStateSize array
24 NULL, // Pointer to CpuSaveState array
25 {
26 { 0 }
27 }, // SmmReservedSmramRegion
28 {
29 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
30 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
31 0, // SmmCoreEntryContext.NumberOfCpus
32 NULL, // SmmCoreEntryContext.CpuSaveStateSize
33 NULL // SmmCoreEntryContext.CpuSaveState
34 },
35 NULL, // SmmCoreEntry
36 {
37 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
38 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
39 },
40 NULL, // pointer to Ap Wrapper Func array
41 { NULL, NULL }, // List_Entry for Tokens.
42};
43
44CPU_HOT_PLUG_DATA mCpuHotPlugData = {
45 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
46 0, // Array Length of SmBase and APIC ID
47 NULL, // Pointer to APIC ID array
48 NULL, // Pointer to SMBASE array
49 0, // Reserved
50 0, // SmrrBase
51 0 // SmrrSize
52};
53
54//
55// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
56//
57SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
58
59///
60/// Handle for the SMM CPU Protocol
61///
62EFI_HANDLE mSmmCpuHandle = NULL;
63
64///
65/// SMM CPU Protocol instance
66///
67EFI_SMM_CPU_PROTOCOL mSmmCpu = {
68 SmmReadSaveState,
69 SmmWriteSaveState
70};
71
72///
73/// SMM Memory Attribute Protocol instance
74///
75EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
76 EdkiiSmmGetMemoryAttributes,
77 EdkiiSmmSetMemoryAttributes,
78 EdkiiSmmClearMemoryAttributes
79};
80
81EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
82
83volatile BOOLEAN *mSmmInitialized = NULL;
84UINT32 mBspApicId = 0;
85
86//
87// SMM stack information
88//
89UINTN mSmmStackArrayBase;
90UINTN mSmmStackArrayEnd;
91UINTN mSmmStackSize;
92
93UINTN mSmmShadowStackSize;
94BOOLEAN mCetSupported = TRUE;
95
96UINTN mMaxNumberOfCpus = 0;
97UINTN mNumberOfCpus = 0;
98
99//
100// SMM ready to lock flag
101//
102BOOLEAN mSmmReadyToLock = FALSE;
103
104//
105// Global used to cache PCD for SMM Code Access Check enable
106//
107BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
108
109//
110// Global used to cache SMM Debug Agent Supported ot not
111//
112BOOLEAN mSmmDebugAgentSupport = FALSE;
113
114//
115// Global copy of the PcdPteMemoryEncryptionAddressOrMask
116//
117UINT64 mAddressEncMask = 0;
118
119//
120// Spin lock used to serialize setting of SMM Code Access Check feature
121//
122SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
123
124//
125// Saved SMM ranges information
126//
127EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
128UINTN mSmmCpuSmramRangeCount;
129
130UINT8 mPhysicalAddressBits;
131
132/**
133 Initialize IDT to setup exception handlers for SMM.
134
135**/
136VOID
137InitializeSmmIdt (
138 VOID
139 )
140{
141 EFI_STATUS Status;
142 BOOLEAN InterruptState;
143 IA32_DESCRIPTOR DxeIdtr;
144
145 //
146 // There are 32 (not 255) entries in it since only processor
147 // generated exceptions will be handled.
148 //
149 gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
150 //
151 // Allocate page aligned IDT, because it might be set as read only.
152 //
153 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));
154 ASSERT (gcSmiIdtr.Base != 0);
155 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
156
157 //
158 // Disable Interrupt and save DXE IDT table
159 //
160 InterruptState = SaveAndDisableInterrupts ();
161 AsmReadIdtr (&DxeIdtr);
162 //
163 // Load SMM temporary IDT table
164 //
165 AsmWriteIdtr (&gcSmiIdtr);
166 //
167 // Setup SMM default exception handlers, SMM IDT table
168 // will be updated and saved in gcSmiIdtr
169 //
170 Status = InitializeCpuExceptionHandlers (NULL);
171 ASSERT_EFI_ERROR (Status);
172 //
173 // Restore DXE IDT table and CPU interrupt
174 //
175 AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);
176 SetInterruptState (InterruptState);
177}
178
179/**
180 Search module name by input IP address and output it.
181
182 @param CallerIpAddress Caller instruction pointer.
183
184**/
185VOID
186DumpModuleInfoByIp (
187 IN UINTN CallerIpAddress
188 )
189{
190 UINTN Pe32Data;
191 VOID *PdbPointer;
192
193 //
194 // Find Image Base
195 //
196 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
197 if (Pe32Data != 0) {
198 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));
199 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);
200 if (PdbPointer != NULL) {
201 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
202 }
203 }
204}
205
206/**
207 Read information from the CPU save state.
208
209 @param This EFI_SMM_CPU_PROTOCOL instance
210 @param Width The number of bytes to read from the CPU save state.
211 @param Register Specifies the CPU register to read form the save state.
212 @param CpuIndex Specifies the zero-based index of the CPU save state.
213 @param Buffer Upon return, this holds the CPU register value read from the save state.
214
215 @retval EFI_SUCCESS The register was read from Save State
216 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
217 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
218
219**/
220EFI_STATUS
221EFIAPI
222SmmReadSaveState (
223 IN CONST EFI_SMM_CPU_PROTOCOL *This,
224 IN UINTN Width,
225 IN EFI_SMM_SAVE_STATE_REGISTER Register,
226 IN UINTN CpuIndex,
227 OUT VOID *Buffer
228 )
229{
230 EFI_STATUS Status;
231
232 //
233 // Retrieve pointer to the specified CPU's SMM Save State buffer
234 //
235 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
236 return EFI_INVALID_PARAMETER;
237 }
238
239 //
240 // The SpeculationBarrier() call here is to ensure the above check for the
241 // CpuIndex has been completed before the execution of subsequent codes.
242 //
243 SpeculationBarrier ();
244
245 //
246 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
247 //
248 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
249 //
250 // The pseudo-register only supports the 64-bit size specified by Width.
251 //
252 if (Width != sizeof (UINT64)) {
253 return EFI_INVALID_PARAMETER;
254 }
255
256 //
257 // If the processor is in SMM at the time the SMI occurred,
258 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
259 // Otherwise, EFI_NOT_FOUND is returned.
260 //
261 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
262 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
263 return EFI_SUCCESS;
264 } else {
265 return EFI_NOT_FOUND;
266 }
267 }
268
269 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
270 return EFI_INVALID_PARAMETER;
271 }
272
273 Status = MmSaveStateReadRegister (CpuIndex, Register, Width, Buffer);
274
275 return Status;
276}
277
278/**
279 Write data to the CPU save state.
280
281 @param This EFI_SMM_CPU_PROTOCOL instance
282 @param Width The number of bytes to read from the CPU save state.
283 @param Register Specifies the CPU register to write to the save state.
284 @param CpuIndex Specifies the zero-based index of the CPU save state
285 @param Buffer Upon entry, this holds the new CPU register value.
286
287 @retval EFI_SUCCESS The register was written from Save State
288 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
289 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct
290
291**/
292EFI_STATUS
293EFIAPI
294SmmWriteSaveState (
295 IN CONST EFI_SMM_CPU_PROTOCOL *This,
296 IN UINTN Width,
297 IN EFI_SMM_SAVE_STATE_REGISTER Register,
298 IN UINTN CpuIndex,
299 IN CONST VOID *Buffer
300 )
301{
302 EFI_STATUS Status;
303
304 //
305 // Retrieve pointer to the specified CPU's SMM Save State buffer
306 //
307 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
308 return EFI_INVALID_PARAMETER;
309 }
310
311 //
312 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
313 //
314 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
315 return EFI_SUCCESS;
316 }
317
318 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
319 return EFI_INVALID_PARAMETER;
320 }
321
322 Status = MmSaveStateWriteRegister (CpuIndex, Register, Width, Buffer);
323
324 return Status;
325}
326
327/**
328 Initialize SMM environment.
329
330**/
331VOID
332InitializeSmm (
333 VOID
334 )
335{
336 UINT32 ApicId;
337 UINTN Index;
338 BOOLEAN IsBsp;
339
340 ApicId = GetApicId ();
341
342 IsBsp = (BOOLEAN)(mBspApicId == ApicId);
343
344 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
345
346 for (Index = 0; Index < mNumberOfCpus; Index++) {
347 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
348 PERF_CODE (
349 MpPerfBegin (Index, SMM_MP_PERF_PROCEDURE_ID (InitializeSmm));
350 );
351 //
352 // Initialize SMM specific features on the currently executing CPU
353 //
354 SmmCpuFeaturesInitializeProcessor (
355 Index,
356 IsBsp,
357 gSmmCpuPrivate->ProcessorInfo,
358 &mCpuHotPlugData
359 );
360
361 if (!mSmmS3Flag) {
362 //
363 // Check XD and BTS features on each processor on normal boot
364 //
365 CheckFeatureSupported ();
366 } else if (IsBsp) {
367 //
368 // BSP rebase is already done above.
369 // Initialize private data during S3 resume
370 //
371 InitializeMpSyncData ();
372 }
373
374 PERF_CODE (
375 MpPerfEnd (Index, SMM_MP_PERF_PROCEDURE_ID (InitializeSmm));
376 );
377
378 return;
379 }
380 }
381
382 ASSERT (FALSE);
383}
384
385/**
386 Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
387
388**/
389VOID
390ExecuteFirstSmiInit (
391 VOID
392 )
393{
394 UINTN Index;
395
396 PERF_FUNCTION_BEGIN ();
397
398 if (mSmmInitialized == NULL) {
399 mSmmInitialized = (BOOLEAN *)AllocatePool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
400 }
401
402 ASSERT (mSmmInitialized != NULL);
403 if (mSmmInitialized == NULL) {
404 PERF_FUNCTION_END ();
405 return;
406 }
407
408 //
409 // Reset the mSmmInitialized to false.
410 //
411 ZeroMem ((VOID *)mSmmInitialized, sizeof (BOOLEAN) * mMaxNumberOfCpus);
412
413 //
414 // Get the BSP ApicId.
415 //
416 mBspApicId = GetApicId ();
417
418 //
419 // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) for SMM init
420 //
421 SendSmiIpi (mBspApicId);
422 SendSmiIpiAllExcludingSelf ();
423
424 //
425 // Wait for all processors to finish its 1st SMI
426 //
427 for (Index = 0; Index < mNumberOfCpus; Index++) {
428 while (!(BOOLEAN)mSmmInitialized[Index]) {
429 }
430 }
431
432 PERF_FUNCTION_END ();
433}
434
435/**
436 SMM Ready To Lock event notification handler.
437
438 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
439 perform additional lock actions that must be performed from SMM on the next SMI.
440
441 @param[in] Protocol Points to the protocol's unique identifier.
442 @param[in] Interface Points to the interface instance.
443 @param[in] Handle The handle on which the interface was installed.
444
445 @retval EFI_SUCCESS Notification handler runs successfully.
446 **/
447EFI_STATUS
448EFIAPI
449SmmReadyToLockEventNotify (
450 IN CONST EFI_GUID *Protocol,
451 IN VOID *Interface,
452 IN EFI_HANDLE Handle
453 )
454{
455 GetAcpiCpuData ();
456
457 //
458 // Cache a copy of UEFI memory map before we start profiling feature.
459 //
460 GetUefiMemoryMap ();
461
462 //
463 // Set SMM ready to lock flag and return
464 //
465 mSmmReadyToLock = TRUE;
466 return EFI_SUCCESS;
467}
468
469/**
470 Function to compare 2 SMM_BASE_HOB_DATA pointer based on ProcessorIndex.
471
472 @param[in] Buffer1 pointer to SMM_BASE_HOB_DATA poiner to compare
473 @param[in] Buffer2 pointer to second SMM_BASE_HOB_DATA pointer to compare
474
475 @retval 0 Buffer1 equal to Buffer2
476 @retval <0 Buffer1 is less than Buffer2
477 @retval >0 Buffer1 is greater than Buffer2
478**/
479INTN
480EFIAPI
481SmBaseHobCompare (
482 IN CONST VOID *Buffer1,
483 IN CONST VOID *Buffer2
484 )
485{
486 if ((*(SMM_BASE_HOB_DATA **)Buffer1)->ProcessorIndex > (*(SMM_BASE_HOB_DATA **)Buffer2)->ProcessorIndex) {
487 return 1;
488 } else if ((*(SMM_BASE_HOB_DATA **)Buffer1)->ProcessorIndex < (*(SMM_BASE_HOB_DATA **)Buffer2)->ProcessorIndex) {
489 return -1;
490 }
491
492 return 0;
493}
494
495/**
496 Extract SmBase for all CPU from SmmBase HOB.
497
498 @param[in] MaxNumberOfCpus Max NumberOfCpus.
499
500 @param[out] AllocatedSmBaseBuffer Pointer to SmBase Buffer allocated
501 by this function. Only set if the
502 function returns EFI_SUCCESS.
503
504 @retval EFI_SUCCESS SmBase Buffer output successfully.
505 @retval EFI_OUT_OF_RESOURCES Memory allocation failed.
506 @retval EFI_NOT_FOUND gSmmBaseHobGuid was never created.
507**/
508STATIC
509EFI_STATUS
510GetSmBase (
511 IN UINTN MaxNumberOfCpus,
512 OUT UINTN **AllocatedSmBaseBuffer
513 )
514{
515 UINTN HobCount;
516 EFI_HOB_GUID_TYPE *GuidHob;
517 SMM_BASE_HOB_DATA *SmmBaseHobData;
518 UINTN NumberOfProcessors;
519 SMM_BASE_HOB_DATA **SmBaseHobs;
520 UINTN *SmBaseBuffer;
521 UINTN HobIndex;
522 UINTN SortBuffer;
523 UINTN ProcessorIndex;
524 UINT64 PrevProcessorIndex;
525 EFI_HOB_GUID_TYPE *FirstSmmBaseGuidHob;
526
527 SmmBaseHobData = NULL;
528 HobIndex = 0;
529 ProcessorIndex = 0;
530 HobCount = 0;
531 NumberOfProcessors = 0;
532
533 FirstSmmBaseGuidHob = GetFirstGuidHob (&gSmmBaseHobGuid);
534 if (FirstSmmBaseGuidHob == NULL) {
535 return EFI_NOT_FOUND;
536 }
537
538 GuidHob = FirstSmmBaseGuidHob;
539 while (GuidHob != NULL) {
540 HobCount++;
541 SmmBaseHobData = GET_GUID_HOB_DATA (GuidHob);
542 NumberOfProcessors += SmmBaseHobData->NumberOfProcessors;
543
544 if (NumberOfProcessors >= MaxNumberOfCpus) {
545 break;
546 }
547
548 GuidHob = GetNextGuidHob (&gSmmBaseHobGuid, GET_NEXT_HOB (GuidHob));
549 }
550
551 ASSERT (NumberOfProcessors == MaxNumberOfCpus);
552 if (NumberOfProcessors != MaxNumberOfCpus) {
553 CpuDeadLoop ();
554 }
555
556 SmBaseHobs = AllocatePool (sizeof (SMM_BASE_HOB_DATA *) * HobCount);
557 if (SmBaseHobs == NULL) {
558 return EFI_OUT_OF_RESOURCES;
559 }
560
561 //
562 // Record each SmmBaseHob pointer in the SmBaseHobs.
563 // The FirstSmmBaseGuidHob is to speed up this while-loop
564 // without needing to look for SmBaseHob from beginning.
565 //
566 GuidHob = FirstSmmBaseGuidHob;
567 while (HobIndex < HobCount) {
568 SmBaseHobs[HobIndex++] = GET_GUID_HOB_DATA (GuidHob);
569 GuidHob = GetNextGuidHob (&gSmmBaseHobGuid, GET_NEXT_HOB (GuidHob));
570 }
571
572 SmBaseBuffer = (UINTN *)AllocatePool (sizeof (UINTN) * (MaxNumberOfCpus));
573 ASSERT (SmBaseBuffer != NULL);
574 if (SmBaseBuffer == NULL) {
575 FreePool (SmBaseHobs);
576 return EFI_OUT_OF_RESOURCES;
577 }
578
579 QuickSort (SmBaseHobs, HobCount, sizeof (SMM_BASE_HOB_DATA *), (BASE_SORT_COMPARE)SmBaseHobCompare, &SortBuffer);
580 PrevProcessorIndex = 0;
581 for (HobIndex = 0; HobIndex < HobCount; HobIndex++) {
582 //
583 // Make sure no overlap and no gap in the CPU range covered by each HOB
584 //
585 ASSERT (SmBaseHobs[HobIndex]->ProcessorIndex == PrevProcessorIndex);
586
587 //
588 // Cache each SmBase in order.
589 //
590 for (ProcessorIndex = 0; ProcessorIndex < SmBaseHobs[HobIndex]->NumberOfProcessors; ProcessorIndex++) {
591 SmBaseBuffer[PrevProcessorIndex + ProcessorIndex] = (UINTN)SmBaseHobs[HobIndex]->SmBase[ProcessorIndex];
592 }
593
594 PrevProcessorIndex += SmBaseHobs[HobIndex]->NumberOfProcessors;
595 }
596
597 FreePool (SmBaseHobs);
598 *AllocatedSmBaseBuffer = SmBaseBuffer;
599 return EFI_SUCCESS;
600}
601
602/**
603 Function to compare 2 MP_INFORMATION2_HOB_DATA pointer based on ProcessorIndex.
604
605 @param[in] Buffer1 pointer to MP_INFORMATION2_HOB_DATA poiner to compare
606 @param[in] Buffer2 pointer to second MP_INFORMATION2_HOB_DATA pointer to compare
607
608 @retval 0 Buffer1 equal to Buffer2
609 @retval <0 Buffer1 is less than Buffer2
610 @retval >0 Buffer1 is greater than Buffer2
611**/
612INTN
613EFIAPI
614MpInformation2HobCompare (
615 IN CONST VOID *Buffer1,
616 IN CONST VOID *Buffer2
617 )
618{
619 if ((*(MP_INFORMATION2_HOB_DATA **)Buffer1)->ProcessorIndex > (*(MP_INFORMATION2_HOB_DATA **)Buffer2)->ProcessorIndex) {
620 return 1;
621 } else if ((*(MP_INFORMATION2_HOB_DATA **)Buffer1)->ProcessorIndex < (*(MP_INFORMATION2_HOB_DATA **)Buffer2)->ProcessorIndex) {
622 return -1;
623 }
624
625 return 0;
626}
627
628/**
629 Extract NumberOfCpus, MaxNumberOfCpus and EFI_PROCESSOR_INFORMATION for all CPU from gEfiMpServiceProtocolGuid.
630
631 @param[out] NumberOfCpus Pointer to NumberOfCpus.
632 @param[out] MaxNumberOfCpus Pointer to MaxNumberOfCpus.
633
634 @retval ProcessorInfo Pointer to EFI_PROCESSOR_INFORMATION buffer.
635**/
636EFI_PROCESSOR_INFORMATION *
637GetMpInformationFromMpServices (
638 OUT UINTN *NumberOfCpus,
639 OUT UINTN *MaxNumberOfCpus
640 )
641{
642 EFI_STATUS Status;
643 UINTN Index;
644 UINTN NumberOfEnabledProcessors;
645 UINTN NumberOfProcessors;
646 EFI_MP_SERVICES_PROTOCOL *MpService;
647 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
648
649 if ((NumberOfCpus == NULL) || (MaxNumberOfCpus == NULL)) {
650 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
651 return NULL;
652 }
653
654 ProcessorInfo = NULL;
655 *NumberOfCpus = 0;
656 *MaxNumberOfCpus = 0;
657
658 /// Get the MP Services Protocol
659 Status = gBS->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpService);
660 if (EFI_ERROR (Status)) {
661 ASSERT_EFI_ERROR (Status);
662 return NULL;
663 }
664
665 /// Get the number of processors
666 Status = MpService->GetNumberOfProcessors (MpService, &NumberOfProcessors, &NumberOfEnabledProcessors);
667 if (EFI_ERROR (Status)) {
668 ASSERT_EFI_ERROR (Status);
669 return NULL;
670 }
671
672 ASSERT (NumberOfProcessors <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
673
674 /// Allocate buffer for processor information
675 ProcessorInfo = AllocateZeroPool (sizeof (EFI_PROCESSOR_INFORMATION) * NumberOfProcessors);
676 if (ProcessorInfo == NULL) {
677 ASSERT_EFI_ERROR (EFI_OUT_OF_RESOURCES);
678 return NULL;
679 }
680
681 /// Get processor information
682 for (Index = 0; Index < NumberOfProcessors; Index++) {
683 Status = MpService->GetProcessorInfo (MpService, Index | CPU_V2_EXTENDED_TOPOLOGY, &ProcessorInfo[Index]);
684 if (EFI_ERROR (Status)) {
685 FreePool (ProcessorInfo);
686 DEBUG ((DEBUG_ERROR, "%a: Failed to get processor information for processor %d\n", __func__, Index));
687 ASSERT_EFI_ERROR (Status);
688 return NULL;
689 }
690 }
691
692 *NumberOfCpus = NumberOfEnabledProcessors;
693
694 ASSERT (*NumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
695 //
696 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
697 //
698 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
699 *MaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
700 } else {
701 *MaxNumberOfCpus = *NumberOfCpus;
702 }
703
704 return ProcessorInfo;
705}
706
707/**
708 Extract NumberOfCpus, MaxNumberOfCpus and EFI_PROCESSOR_INFORMATION for all CPU from MpInformation2 HOB.
709
710 @param[out] NumberOfCpus Pointer to NumberOfCpus.
711 @param[out] MaxNumberOfCpus Pointer to MaxNumberOfCpus.
712
713 @retval ProcessorInfo Pointer to EFI_PROCESSOR_INFORMATION buffer.
714**/
715EFI_PROCESSOR_INFORMATION *
716GetMpInformation (
717 OUT UINTN *NumberOfCpus,
718 OUT UINTN *MaxNumberOfCpus
719 )
720{
721 EFI_HOB_GUID_TYPE *GuidHob;
722 EFI_HOB_GUID_TYPE *FirstMpInfo2Hob;
723 MP_INFORMATION2_HOB_DATA *MpInformation2HobData;
724 UINTN HobCount;
725 UINTN HobIndex;
726 MP_INFORMATION2_HOB_DATA **MpInfo2Hobs;
727 UINTN SortBuffer;
728 UINTN ProcessorIndex;
729 UINT64 PrevProcessorIndex;
730 MP_INFORMATION2_ENTRY *MpInformation2Entry;
731 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
732
733 GuidHob = NULL;
734 MpInformation2HobData = NULL;
735 FirstMpInfo2Hob = NULL;
736 MpInfo2Hobs = NULL;
737 HobIndex = 0;
738 HobCount = 0;
739
740 FirstMpInfo2Hob = GetFirstGuidHob (&gMpInformation2HobGuid);
741 if (FirstMpInfo2Hob == NULL) {
742 DEBUG ((DEBUG_INFO, "%a: [INFO] gMpInformation2HobGuid HOB not found.\n", __func__));
743 return GetMpInformationFromMpServices (NumberOfCpus, MaxNumberOfCpus);
744 }
745
746 GuidHob = FirstMpInfo2Hob;
747 while (GuidHob != NULL) {
748 MpInformation2HobData = GET_GUID_HOB_DATA (GuidHob);
749
750 //
751 // This is the last MpInformationHob in the HOB list.
752 //
753 if (MpInformation2HobData->NumberOfProcessors == 0) {
754 ASSERT (HobCount != 0);
755 break;
756 }
757
758 HobCount++;
759 *NumberOfCpus += MpInformation2HobData->NumberOfProcessors;
760 GuidHob = GetNextGuidHob (&gMpInformation2HobGuid, GET_NEXT_HOB (GuidHob));
761 }
762
763 ASSERT (*NumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
764
765 //
766 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
767 //
768 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
769 *MaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
770 } else {
771 *MaxNumberOfCpus = *NumberOfCpus;
772 }
773
774 MpInfo2Hobs = AllocatePool (sizeof (MP_INFORMATION2_HOB_DATA *) * HobCount);
775 ASSERT (MpInfo2Hobs != NULL);
776 if (MpInfo2Hobs == NULL) {
777 return NULL;
778 }
779
780 //
781 // Record each MpInformation2Hob pointer in the MpInfo2Hobs.
782 // The FirstMpInfo2Hob is to speed up this while-loop without
783 // needing to look for MpInfo2Hob from beginning.
784 //
785 GuidHob = FirstMpInfo2Hob;
786 while (HobIndex < HobCount) {
787 MpInfo2Hobs[HobIndex++] = GET_GUID_HOB_DATA (GuidHob);
788 GuidHob = GetNextGuidHob (&gMpInformation2HobGuid, GET_NEXT_HOB (GuidHob));
789 }
790
791 ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * (*MaxNumberOfCpus));
792 ASSERT (ProcessorInfo != NULL);
793 if (ProcessorInfo == NULL) {
794 FreePool (MpInfo2Hobs);
795 return NULL;
796 }
797
798 QuickSort (MpInfo2Hobs, HobCount, sizeof (MP_INFORMATION2_HOB_DATA *), (BASE_SORT_COMPARE)MpInformation2HobCompare, &SortBuffer);
799 PrevProcessorIndex = 0;
800 for (HobIndex = 0; HobIndex < HobCount; HobIndex++) {
801 //
802 // Make sure no overlap and no gap in the CPU range covered by each HOB
803 //
804 ASSERT (MpInfo2Hobs[HobIndex]->ProcessorIndex == PrevProcessorIndex);
805
806 //
807 // Cache each EFI_PROCESSOR_INFORMATION in order.
808 //
809 for (ProcessorIndex = 0; ProcessorIndex < MpInfo2Hobs[HobIndex]->NumberOfProcessors; ProcessorIndex++) {
810 MpInformation2Entry = GET_MP_INFORMATION_ENTRY (MpInfo2Hobs[HobIndex], ProcessorIndex);
811 CopyMem (
812 &ProcessorInfo[PrevProcessorIndex + ProcessorIndex],
813 &MpInformation2Entry->ProcessorInfo,
814 sizeof (EFI_PROCESSOR_INFORMATION)
815 );
816 }
817
818 PrevProcessorIndex += MpInfo2Hobs[HobIndex]->NumberOfProcessors;
819 }
820
821 FreePool (MpInfo2Hobs);
822 return ProcessorInfo;
823}
824
825/**
826 The module Entry Point of the CPU SMM driver.
827
828 @param ImageHandle The firmware allocated handle for the EFI image.
829 @param SystemTable A pointer to the EFI System Table.
830
831 @retval EFI_SUCCESS The entry point is executed successfully.
832 @retval Other Some error occurs when executing this entry point.
833
834**/
835EFI_STATUS
836EFIAPI
837PiCpuSmmEntry (
838 IN EFI_HANDLE ImageHandle,
839 IN EFI_SYSTEM_TABLE *SystemTable
840 )
841{
842 EFI_STATUS Status;
843 UINTN Index;
844 UINTN TileCodeSize;
845 UINTN TileDataSize;
846 UINTN TileSize;
847 UINT8 *Stacks;
848 VOID *Registration;
849 UINT32 RegEax;
850 UINT32 RegEbx;
851 UINT32 RegEcx;
852 UINT32 RegEdx;
853 UINTN FamilyId;
854 UINTN ModelId;
855 UINT32 Cr3;
856
857 PERF_FUNCTION_BEGIN ();
858
859 //
860 // Initialize address fixup
861 //
862 PiSmmCpuSmiEntryFixupAddress ();
863
864 //
865 // Initialize Debug Agent to support source level debug in SMM code
866 //
867 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, &mSmmDebugAgentSupport, NULL);
868
869 //
870 // Report the start of CPU SMM initialization.
871 //
872 REPORT_STATUS_CODE (
873 EFI_PROGRESS_CODE,
874 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
875 );
876
877 //
878 // Find out SMRR Base and SMRR Size
879 //
880 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
881
882 //
883 // Retrive NumberOfProcessors, MaxNumberOfCpus and EFI_PROCESSOR_INFORMATION for all CPU from MpInformation2 HOB.
884 //
885 gSmmCpuPrivate->ProcessorInfo = GetMpInformation (&mNumberOfCpus, &mMaxNumberOfCpus);
886 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
887
888 //
889 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
890 // A constant BSP index makes no sense because it may be hot removed.
891 //
892 DEBUG_CODE_BEGIN ();
893 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
894 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
895 }
896
897 DEBUG_CODE_END ();
898
899 //
900 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
901 //
902 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
903 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
904
905 //
906 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
907 // Make sure AddressEncMask is contained to smallest supported address field.
908 //
909 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
910 DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
911
912 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
913
914 PERF_CODE (
915 InitializeMpPerf (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
916 );
917
918 //
919 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
920 // allocated buffer. The minimum size of this buffer for a uniprocessor system
921 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
922 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
923 // then the SMI entry point and the CPU save state areas can be tiles to minimize
924 // the total amount SMRAM required for all the CPUs. The tile size can be computed
925 // by adding the // CPU save state size, any extra CPU specific context, and
926 // the size of code that must be placed at the SMI entry point to transfer
927 // control to a C function in the native SMM execution mode. This size is
928 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
929 // The total amount of memory required is the maximum number of CPUs that
930 // platform supports times the tile size. The picture below shows the tiling,
931 // where m is the number of tiles that fit in 32KB.
932 //
933 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
934 // | CPU m+1 Save State |
935 // +-----------------------------+
936 // | CPU m+1 Extra Data |
937 // +-----------------------------+
938 // | Padding |
939 // +-----------------------------+
940 // | CPU 2m SMI Entry |
941 // +#############################+ <-- Base of allocated buffer + 64 KB
942 // | CPU m-1 Save State |
943 // +-----------------------------+
944 // | CPU m-1 Extra Data |
945 // +-----------------------------+
946 // | Padding |
947 // +-----------------------------+
948 // | CPU 2m-1 SMI Entry |
949 // +=============================+ <-- 2^n offset from Base of allocated buffer
950 // | . . . . . . . . . . . . |
951 // +=============================+ <-- 2^n offset from Base of allocated buffer
952 // | CPU 2 Save State |
953 // +-----------------------------+
954 // | CPU 2 Extra Data |
955 // +-----------------------------+
956 // | Padding |
957 // +-----------------------------+
958 // | CPU m+1 SMI Entry |
959 // +=============================+ <-- Base of allocated buffer + 32 KB
960 // | CPU 1 Save State |
961 // +-----------------------------+
962 // | CPU 1 Extra Data |
963 // +-----------------------------+
964 // | Padding |
965 // +-----------------------------+
966 // | CPU m SMI Entry |
967 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
968 // | CPU 0 Save State |
969 // +-----------------------------+
970 // | CPU 0 Extra Data |
971 // +-----------------------------+
972 // | Padding |
973 // +-----------------------------+
974 // | CPU m-1 SMI Entry |
975 // +=============================+ <-- 2^n offset from Base of allocated buffer
976 // | . . . . . . . . . . . . |
977 // +=============================+ <-- 2^n offset from Base of allocated buffer
978 // | Padding |
979 // +-----------------------------+
980 // | CPU 1 SMI Entry |
981 // +=============================+ <-- 2^n offset from Base of allocated buffer
982 // | Padding |
983 // +-----------------------------+
984 // | CPU 0 SMI Entry |
985 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
986 //
987
988 //
989 // Retrieve CPU Family
990 //
991 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
992 FamilyId = (RegEax >> 8) & 0xf;
993 ModelId = (RegEax >> 4) & 0xf;
994 if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {
995 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
996 }
997
998 RegEdx = 0;
999 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
1000 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
1001 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
1002 }
1003
1004 //
1005 // Determine the mode of the CPU at the time an SMI occurs
1006 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
1007 // Volume 3C, Section 34.4.1.1
1008 //
1009 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
1010 if ((RegEdx & BIT29) != 0) {
1011 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
1012 }
1013
1014 if (FamilyId == 0x06) {
1015 if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {
1016 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
1017 }
1018 }
1019
1020 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));
1021 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {
1022 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
1023 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
1024 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);
1025 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));
1026 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));
1027 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));
1028 if ((RegEcx & CPUID_CET_SS) == 0) {
1029 mCetSupported = FALSE;
1030 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
1031 }
1032
1033 if (mCetSupported) {
1034 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);
1035 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));
1036 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);
1037 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
1038 AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);
1039 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
1040 }
1041 } else {
1042 mCetSupported = FALSE;
1043 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
1044 }
1045 } else {
1046 mCetSupported = FALSE;
1047 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
1048 }
1049
1050 //
1051 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
1052 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
1053 // This size is rounded up to nearest power of 2.
1054 //
1055 TileCodeSize = GetSmiHandlerSize ();
1056 TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);
1057 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
1058 TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);
1059 TileSize = TileDataSize + TileCodeSize - 1;
1060 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
1061 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
1062
1063 //
1064 // If the TileSize is larger than space available for the SMI Handler of
1065 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
1066 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
1067 // the SMI Handler size must be reduced or the size of the extra CPU specific
1068 // context must be reduced.
1069 //
1070 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
1071
1072 //
1073 // Check whether the Required TileSize is enough.
1074 //
1075 if (TileSize > SIZE_8KB) {
1076 DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));
1077 FreePool (gSmmCpuPrivate->ProcessorInfo);
1078 CpuDeadLoop ();
1079 return RETURN_BUFFER_TOO_SMALL;
1080 }
1081
1082 //
1083 // Retrieve the allocated SmmBase from gSmmBaseHobGuid. If found,
1084 // means the SmBase relocation has been done.
1085 //
1086 mCpuHotPlugData.SmBase = NULL;
1087 Status = GetSmBase (mMaxNumberOfCpus, &mCpuHotPlugData.SmBase);
1088 ASSERT (!EFI_ERROR (Status));
1089 if (EFI_ERROR (Status)) {
1090 CpuDeadLoop ();
1091 }
1092
1093 //
1094 // ASSERT SmBase has been relocated.
1095 //
1096 ASSERT (mCpuHotPlugData.SmBase != NULL);
1097
1098 //
1099 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
1100 //
1101 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
1102 ASSERT (gSmmCpuPrivate->Operation != NULL);
1103
1104 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
1105 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
1106
1107 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
1108 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
1109
1110 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
1111 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
1112
1113 //
1114 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
1115 //
1116 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
1117 ASSERT (mCpuHotPlugData.ApicId != NULL);
1118 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
1119
1120 //
1121 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
1122 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
1123 // size for each CPU in the platform
1124 //
1125 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1126 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);
1127 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
1128 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
1129
1130 if (Index < mNumberOfCpus) {
1131 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
1132
1133 DEBUG ((
1134 DEBUG_INFO,
1135 "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
1136 Index,
1137 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
1138 mCpuHotPlugData.SmBase[Index],
1139 gSmmCpuPrivate->CpuSaveState[Index],
1140 gSmmCpuPrivate->CpuSaveStateSize[Index]
1141 ));
1142 } else {
1143 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
1144 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
1145 }
1146 }
1147
1148 //
1149 // Allocate SMI stacks for all processors.
1150 //
1151 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));
1152 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1153 //
1154 // SMM Stack Guard Enabled
1155 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.
1156 //
1157 // +--------------------------------------------------+-----+--------------------------------------------------+
1158 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
1159 // +--------------------------------------------------+-----+--------------------------------------------------+
1160 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|
1161 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|
1162 // | | | |
1163 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|
1164 //
1165 mSmmStackSize += EFI_PAGES_TO_SIZE (2);
1166 }
1167
1168 mSmmShadowStackSize = 0;
1169 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1170 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
1171
1172 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1173 //
1174 // SMM Stack Guard Enabled
1175 // Append Shadow Stack after normal stack
1176 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
1177 //
1178 // |= Stacks
1179 // +--------------------------------------------------+---------------------------------------------------------------+
1180 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
1181 // +--------------------------------------------------+---------------------------------------------------------------+
1182 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|
1183 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
1184 // | |
1185 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
1186 //
1187 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
1188 } else {
1189 //
1190 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)
1191 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.
1192 // 1 more pages is allocated for each processor, it is known good stack.
1193 //
1194 //
1195 // |= Stacks
1196 // +-------------------------------------+--------------------------------------------------+
1197 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |
1198 // +-------------------------------------+--------------------------------------------------+
1199 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|
1200 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|
1201 // | |
1202 // |<-------------------------------- Processor N ----------------------------------------->|
1203 //
1204 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);
1205 mSmmStackSize += EFI_PAGES_TO_SIZE (1);
1206 }
1207 }
1208
1209 Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));
1210 ASSERT (Stacks != NULL);
1211 mSmmStackArrayBase = (UINTN)Stacks;
1212 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;
1213
1214 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));
1215 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));
1216 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
1217 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1218 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
1219 }
1220
1221 //
1222 // Initialize IDT
1223 //
1224 InitializeSmmIdt ();
1225
1226 //
1227 // SMM Time initialization
1228 //
1229 InitializeSmmTimer ();
1230
1231 //
1232 // Initialize MP globals
1233 //
1234 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);
1235
1236 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1237 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
1238 SetShadowStack (
1239 Cr3,
1240 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,
1241 mSmmShadowStackSize
1242 );
1243 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1244 ConvertMemoryPageAttributes (
1245 Cr3,
1246 mPagingMode,
1247 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,
1248 EFI_PAGES_TO_SIZE (1),
1249 EFI_MEMORY_RP,
1250 TRUE,
1251 NULL
1252 );
1253 }
1254 }
1255 }
1256
1257 //
1258 // For relocated SMBASE, some MSRs & CSRs are still required to be configured in SMM Mode for SMM Initialization.
1259 // Those MSRs & CSRs must be configured before normal SMI sources happen.
1260 // So, here is to issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
1261 //
1262 ExecuteFirstSmiInit ();
1263
1264 //
1265 // Call hook for BSP to perform extra actions in normal mode after all
1266 // SMM base addresses have been relocated on all CPUs
1267 //
1268 SmmCpuFeaturesSmmRelocationComplete ();
1269
1270 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
1271
1272 //
1273 // Fill in SMM Reserved Regions
1274 //
1275 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
1276 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
1277
1278 //
1279 // Install the SMM Configuration Protocol onto a new handle on the handle database.
1280 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
1281 // to an SMRAM address will be present in the handle database
1282 //
1283 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
1284 &gSmmCpuPrivate->SmmCpuHandle,
1285 &gEfiSmmConfigurationProtocolGuid,
1286 &gSmmCpuPrivate->SmmConfiguration,
1287 NULL
1288 );
1289 ASSERT_EFI_ERROR (Status);
1290
1291 //
1292 // Install the SMM CPU Protocol into SMM protocol database
1293 //
1294 Status = gSmst->SmmInstallProtocolInterface (
1295 &mSmmCpuHandle,
1296 &gEfiSmmCpuProtocolGuid,
1297 EFI_NATIVE_INTERFACE,
1298 &mSmmCpu
1299 );
1300 ASSERT_EFI_ERROR (Status);
1301
1302 //
1303 // Install the SMM Memory Attribute Protocol into SMM protocol database
1304 //
1305 Status = gSmst->SmmInstallProtocolInterface (
1306 &mSmmCpuHandle,
1307 &gEdkiiSmmMemoryAttributeProtocolGuid,
1308 EFI_NATIVE_INTERFACE,
1309 &mSmmMemoryAttribute
1310 );
1311 ASSERT_EFI_ERROR (Status);
1312
1313 //
1314 // Initialize global buffer for MM MP.
1315 //
1316 InitializeDataForMmMp ();
1317
1318 //
1319 // Initialize Package First Thread Index Info.
1320 //
1321 InitPackageFirstThreadIndexInfo ();
1322
1323 //
1324 // Install the SMM Mp Protocol into SMM protocol database
1325 //
1326 Status = gSmst->SmmInstallProtocolInterface (
1327 &mSmmCpuHandle,
1328 &gEfiMmMpProtocolGuid,
1329 EFI_NATIVE_INTERFACE,
1330 &mSmmMp
1331 );
1332 ASSERT_EFI_ERROR (Status);
1333
1334 //
1335 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1336 //
1337 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
1338 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
1339 ASSERT_EFI_ERROR (Status);
1340 }
1341
1342 //
1343 // Initialize SMM CPU Services Support
1344 //
1345 Status = InitializeSmmCpuServices (mSmmCpuHandle);
1346 ASSERT_EFI_ERROR (Status);
1347
1348 //
1349 // register SMM Ready To Lock Protocol notification
1350 //
1351 Status = gSmst->SmmRegisterProtocolNotify (
1352 &gEfiSmmReadyToLockProtocolGuid,
1353 SmmReadyToLockEventNotify,
1354 &Registration
1355 );
1356 ASSERT_EFI_ERROR (Status);
1357
1358 //
1359 // Initialize SMM Profile feature
1360 //
1361 InitSmmProfile (Cr3);
1362
1363 GetAcpiS3EnableFlag ();
1364 InitSmmS3ResumeState (Cr3);
1365
1366 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1367
1368 PERF_FUNCTION_END ();
1369 return EFI_SUCCESS;
1370}
1371
1372/**
1373 Function to compare 2 EFI_SMRAM_DESCRIPTOR based on CpuStart.
1374
1375 @param[in] Buffer1 pointer to Device Path poiner to compare
1376 @param[in] Buffer2 pointer to second DevicePath pointer to compare
1377
1378 @retval 0 Buffer1 equal to Buffer2
1379 @retval <0 Buffer1 is less than Buffer2
1380 @retval >0 Buffer1 is greater than Buffer2
1381**/
1382INTN
1383EFIAPI
1384CpuSmramRangeCompare (
1385 IN CONST VOID *Buffer1,
1386 IN CONST VOID *Buffer2
1387 )
1388{
1389 if (((EFI_SMRAM_DESCRIPTOR *)Buffer1)->CpuStart > ((EFI_SMRAM_DESCRIPTOR *)Buffer2)->CpuStart) {
1390 return 1;
1391 } else if (((EFI_SMRAM_DESCRIPTOR *)Buffer1)->CpuStart < ((EFI_SMRAM_DESCRIPTOR *)Buffer2)->CpuStart) {
1392 return -1;
1393 }
1394
1395 return 0;
1396}
1397
1398/**
1399
1400 Find out SMRAM information including SMRR base and SMRR size.
1401
1402 @param SmrrBase SMRR base
1403 @param SmrrSize SMRR size
1404
1405**/
1406VOID
1407FindSmramInfo (
1408 OUT UINT32 *SmrrBase,
1409 OUT UINT32 *SmrrSize
1410 )
1411{
1412 EFI_STATUS Status;
1413 UINTN Size;
1414 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
1415 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
1416 UINTN Index;
1417 UINT64 MaxSize;
1418 BOOLEAN Found;
1419 EFI_SMRAM_DESCRIPTOR SmramDescriptor;
1420
1421 //
1422 // Get SMM Access Protocol
1423 //
1424 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
1425 ASSERT_EFI_ERROR (Status);
1426
1427 //
1428 // Get SMRAM information
1429 //
1430 Size = 0;
1431 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
1432 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
1433
1434 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1435 ASSERT (mSmmCpuSmramRanges != NULL);
1436
1437 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
1438 ASSERT_EFI_ERROR (Status);
1439
1440 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1441
1442 //
1443 // Sort the mSmmCpuSmramRanges
1444 //
1445 QuickSort (mSmmCpuSmramRanges, mSmmCpuSmramRangeCount, sizeof (EFI_SMRAM_DESCRIPTOR), (BASE_SORT_COMPARE)CpuSmramRangeCompare, &SmramDescriptor);
1446
1447 //
1448 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1449 //
1450 CurrentSmramRange = NULL;
1451 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1452 //
1453 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1454 //
1455 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1456 continue;
1457 }
1458
1459 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1460 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1461 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1462 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1463 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1464 }
1465 }
1466 }
1467 }
1468
1469 ASSERT (CurrentSmramRange != NULL);
1470
1471 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1472 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1473
1474 do {
1475 Found = FALSE;
1476 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1477 if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&
1478 (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))
1479 {
1480 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1481 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1482 Found = TRUE;
1483 } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {
1484 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1485 Found = TRUE;
1486 }
1487 }
1488 } while (Found);
1489
1490 DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1491}
1492
1493/**
1494Configure SMM Code Access Check feature on an AP.
1495SMM Feature Control MSR will be locked after configuration.
1496
1497@param[in,out] Buffer Pointer to private data buffer.
1498**/
1499VOID
1500EFIAPI
1501ConfigSmmCodeAccessCheckOnCurrentProcessor (
1502 IN OUT VOID *Buffer
1503 )
1504{
1505 UINTN CpuIndex;
1506 UINT64 SmmFeatureControlMsr;
1507 UINT64 NewSmmFeatureControlMsr;
1508
1509 //
1510 // Retrieve the CPU Index from the context passed in
1511 //
1512 CpuIndex = *(UINTN *)Buffer;
1513
1514 //
1515 // Get the current SMM Feature Control MSR value
1516 //
1517 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1518
1519 //
1520 // Compute the new SMM Feature Control MSR value
1521 //
1522 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1523 if (mSmmCodeAccessCheckEnable) {
1524 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1525 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1526 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1527 }
1528 }
1529
1530 //
1531 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1532 //
1533 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1534 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1535 }
1536
1537 //
1538 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1539 //
1540 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1541}
1542
1543/**
1544Configure SMM Code Access Check feature for all processors.
1545SMM Feature Control MSR will be locked after configuration.
1546**/
1547VOID
1548ConfigSmmCodeAccessCheck (
1549 VOID
1550 )
1551{
1552 UINTN Index;
1553 EFI_STATUS Status;
1554
1555 PERF_FUNCTION_BEGIN ();
1556
1557 //
1558 // Check to see if the Feature Control MSR is supported on this CPU
1559 //
1560 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1561 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1562 mSmmCodeAccessCheckEnable = FALSE;
1563 PERF_FUNCTION_END ();
1564 return;
1565 }
1566
1567 //
1568 // Check to see if the CPU supports the SMM Code Access Check feature
1569 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1570 //
1571 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1572 mSmmCodeAccessCheckEnable = FALSE;
1573 PERF_FUNCTION_END ();
1574 return;
1575 }
1576
1577 //
1578 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1579 //
1580 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1581
1582 //
1583 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1584 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1585 //
1586 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1587
1588 //
1589 // Enable SMM Code Access Check feature on the BSP.
1590 //
1591 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1592
1593 //
1594 // Enable SMM Code Access Check feature for the APs.
1595 //
1596 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1597 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1598 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1599 //
1600 // If this processor does not exist
1601 //
1602 continue;
1603 }
1604
1605 //
1606 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1607 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1608 //
1609 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1610
1611 //
1612 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1613 //
1614 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1615 ASSERT_EFI_ERROR (Status);
1616
1617 //
1618 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1619 //
1620 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1621 CpuPause ();
1622 }
1623
1624 //
1625 // Release the Config SMM Code Access Check spin lock.
1626 //
1627 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1628 }
1629 }
1630
1631 PERF_FUNCTION_END ();
1632}
1633
1634/**
1635 Allocate pages for code.
1636
1637 @param[in] Pages Number of pages to be allocated.
1638
1639 @return Allocated memory.
1640**/
1641VOID *
1642AllocateCodePages (
1643 IN UINTN Pages
1644 )
1645{
1646 EFI_STATUS Status;
1647 EFI_PHYSICAL_ADDRESS Memory;
1648
1649 if (Pages == 0) {
1650 return NULL;
1651 }
1652
1653 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1654 if (EFI_ERROR (Status)) {
1655 return NULL;
1656 }
1657
1658 return (VOID *)(UINTN)Memory;
1659}
1660
1661/**
1662 Perform the remaining tasks.
1663
1664**/
1665VOID
1666PerformRemainingTasks (
1667 VOID
1668 )
1669{
1670 if (mSmmReadyToLock) {
1671 PERF_FUNCTION_BEGIN ();
1672
1673 //
1674 // Check if all Aps enter SMM. In Relaxed-AP Sync Mode, BSP will not wait for
1675 // all Aps arrive. However,PerformRemainingTasks() needs to wait all Aps arrive before calling
1676 // SetMemMapAttributes() and ConfigSmmCodeAccessCheck() when mSmmReadyToLock
1677 // is true. In SetMemMapAttributes(), SmmSetMemoryAttributesEx() will call
1678 // FlushTlbForAll() that need to start up the aps. So it need to let all
1679 // aps arrive. Same as SetMemMapAttributes(), ConfigSmmCodeAccessCheck()
1680 // also will start up the aps.
1681 //
1682 if (EFI_ERROR (SmmCpuRendezvous (NULL, TRUE))) {
1683 DEBUG ((DEBUG_ERROR, "PerformRemainingTasks: fail to wait for all AP check in SMM!\n"));
1684 }
1685
1686 //
1687 // Start SMM Profile feature
1688 //
1689 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1690 SmmProfileStart ();
1691 }
1692
1693 //
1694 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1695 //
1696 InitPaging ();
1697
1698 //
1699 // Mark critical region to be read-only in page table
1700 //
1701 SetMemMapAttributes ();
1702
1703 if (IsRestrictedMemoryAccess ()) {
1704 //
1705 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1706 //
1707 SetUefiMemMapAttributes ();
1708
1709 //
1710 // Set page table itself to be read-only
1711 //
1712 SetPageTableAttributes ();
1713 }
1714
1715 //
1716 // Configure SMM Code Access Check feature if available.
1717 //
1718 ConfigSmmCodeAccessCheck ();
1719
1720 //
1721 // Measure performance of SmmCpuFeaturesCompleteSmmReadyToLock() from caller side
1722 // as the implementation is provided by platform.
1723 //
1724 PERF_START (NULL, "SmmCompleteReadyToLock", NULL, 0);
1725 SmmCpuFeaturesCompleteSmmReadyToLock ();
1726 PERF_END (NULL, "SmmCompleteReadyToLock", NULL, 0);
1727
1728 //
1729 // Clean SMM ready to lock flag
1730 //
1731 mSmmReadyToLock = FALSE;
1732
1733 PERF_FUNCTION_END ();
1734 }
1735}
1736
1737/**
1738 Perform the pre tasks.
1739
1740**/
1741VOID
1742PerformPreTasks (
1743 VOID
1744 )
1745{
1746 RestoreSmmConfigurationInS3 ();
1747}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette