VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuCommon.c@ 108794

Last change on this file since 108794 was 108794, checked in by vboxsync, 2 weeks ago

Devices/EFI/FirmwareNew: Merge edk2-stable202502 from the vendor branch and make it build for the important platforms, bugref:4643

  • Property svn:eol-style set to native
File size: 49.0 KB
Line 
1/** @file
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4Copyright (c) 2009 - 2024, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6Copyright (C) 2023 - 2024 Advanced Micro Devices, Inc. All rights reserved.<BR>
7
8SPDX-License-Identifier: BSD-2-Clause-Patent
9
10**/
11
12#include "PiSmmCpuCommon.h"
13
14//
15// SMM CPU Private Data structure that contains SMM Configuration Protocol
16// along its supporting fields.
17//
18SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
19 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
20 NULL, // SmmCpuHandle
21 NULL, // Pointer to ProcessorInfo array
22 NULL, // Pointer to Operation array
23 NULL, // Pointer to CpuSaveStateSize array
24 NULL, // Pointer to CpuSaveState array
25 {
26 { 0 }
27 }, // SmmReservedSmramRegion
28 {
29 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
30 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
31 0, // SmmCoreEntryContext.NumberOfCpus
32 NULL, // SmmCoreEntryContext.CpuSaveStateSize
33 NULL // SmmCoreEntryContext.CpuSaveState
34 },
35 NULL, // SmmCoreEntry
36 {
37 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
38 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
39 },
40 NULL, // pointer to Ap Wrapper Func array
41 { NULL, NULL }, // List_Entry for Tokens.
42};
43
44CPU_HOT_PLUG_DATA mCpuHotPlugData = {
45 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
46 0, // Array Length of SmBase and APIC ID
47 NULL, // Pointer to APIC ID array
48 NULL, // Pointer to SMBASE array
49 0, // Reserved
50 0, // SmrrBase
51 0 // SmrrSize
52};
53
54//
55// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
56//
57SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
58
59///
60/// Handle for the SMM CPU Protocol
61///
62EFI_HANDLE mSmmCpuHandle = NULL;
63
64///
65/// SMM CPU Protocol instance
66///
67EFI_SMM_CPU_PROTOCOL mSmmCpu = {
68 SmmReadSaveState,
69 SmmWriteSaveState
70};
71
72///
73/// SMM Memory Attribute Protocol instance
74///
75EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
76 EdkiiSmmGetMemoryAttributes,
77 EdkiiSmmSetMemoryAttributes,
78 EdkiiSmmClearMemoryAttributes
79};
80
81EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
82
83volatile BOOLEAN *mSmmInitialized = NULL;
84UINT32 mBspApicId = 0;
85
86//
87// SMM stack information
88//
89UINTN mSmmStackArrayBase;
90UINTN mSmmStackArrayEnd;
91UINTN mSmmStackSize;
92
93UINTN mSmmShadowStackSize;
94BOOLEAN mCetSupported = TRUE;
95
96UINTN mMaxNumberOfCpus = 0;
97UINTN mNumberOfCpus = 0;
98
99//
100// Global used to cache PCD for SMM Code Access Check enable
101//
102BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
103
104//
105// Global used to cache SMM Debug Agent Supported ot not
106//
107BOOLEAN mSmmDebugAgentSupport = FALSE;
108
109//
110// Global copy of the PcdPteMemoryEncryptionAddressOrMask
111//
112UINT64 mAddressEncMask = 0;
113
114//
115// Spin lock used to serialize setting of SMM Code Access Check feature
116//
117SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
118
119//
120// Saved SMM ranges information
121//
122EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
123UINTN mSmmCpuSmramRangeCount;
124
125UINT8 mPhysicalAddressBits;
126
127/**
128 Initialize IDT to setup exception handlers for SMM.
129
130**/
131VOID
132InitializeSmmIdt (
133 VOID
134 )
135{
136 EFI_STATUS Status;
137 BOOLEAN InterruptState;
138 IA32_DESCRIPTOR DxeIdtr;
139
140 //
141 // There are 32 (not 255) entries in it since only processor
142 // generated exceptions will be handled.
143 //
144 gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
145 //
146 // Allocate page aligned IDT, because it might be set as read only.
147 //
148 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));
149 ASSERT (gcSmiIdtr.Base != 0);
150 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
151
152 //
153 // Disable Interrupt and save DXE IDT table
154 //
155 InterruptState = SaveAndDisableInterrupts ();
156 AsmReadIdtr (&DxeIdtr);
157 //
158 // Load SMM temporary IDT table
159 //
160 AsmWriteIdtr (&gcSmiIdtr);
161 //
162 // Setup SMM default exception handlers, SMM IDT table
163 // will be updated and saved in gcSmiIdtr
164 //
165 Status = InitializeCpuExceptionHandlers (NULL);
166 ASSERT_EFI_ERROR (Status);
167 //
168 // Restore DXE IDT table and CPU interrupt
169 //
170 AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);
171 SetInterruptState (InterruptState);
172}
173
174/**
175 Search module name by input IP address and output it.
176
177 @param CallerIpAddress Caller instruction pointer.
178
179**/
180VOID
181DumpModuleInfoByIp (
182 IN UINTN CallerIpAddress
183 )
184{
185 UINTN Pe32Data;
186 VOID *PdbPointer;
187
188 //
189 // Find Image Base
190 //
191 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
192 if (Pe32Data != 0) {
193 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));
194 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);
195 if (PdbPointer != NULL) {
196 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
197 }
198 }
199}
200
201/**
202 Read information from the CPU save state.
203
204 @param This EFI_SMM_CPU_PROTOCOL instance
205 @param Width The number of bytes to read from the CPU save state.
206 @param Register Specifies the CPU register to read form the save state.
207 @param CpuIndex Specifies the zero-based index of the CPU save state.
208 @param Buffer Upon return, this holds the CPU register value read from the save state.
209
210 @retval EFI_SUCCESS The register was read from Save State
211 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
212 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
213
214**/
215EFI_STATUS
216EFIAPI
217SmmReadSaveState (
218 IN CONST EFI_SMM_CPU_PROTOCOL *This,
219 IN UINTN Width,
220 IN EFI_SMM_SAVE_STATE_REGISTER Register,
221 IN UINTN CpuIndex,
222 OUT VOID *Buffer
223 )
224{
225 EFI_STATUS Status;
226
227 //
228 // Retrieve pointer to the specified CPU's SMM Save State buffer
229 //
230 if ((CpuIndex >= gMmst->NumberOfCpus) || (Buffer == NULL)) {
231 return EFI_INVALID_PARAMETER;
232 }
233
234 //
235 // The SpeculationBarrier() call here is to ensure the above check for the
236 // CpuIndex has been completed before the execution of subsequent codes.
237 //
238 SpeculationBarrier ();
239
240 //
241 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
242 //
243 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
244 //
245 // The pseudo-register only supports the 64-bit size specified by Width.
246 //
247 if (Width != sizeof (UINT64)) {
248 return EFI_INVALID_PARAMETER;
249 }
250
251 //
252 // If the processor is in SMM at the time the SMI occurred,
253 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
254 // Otherwise, EFI_NOT_FOUND is returned.
255 //
256 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
257 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
258 return EFI_SUCCESS;
259 } else {
260 return EFI_NOT_FOUND;
261 }
262 }
263
264 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
265 return EFI_INVALID_PARAMETER;
266 }
267
268 Status = MmSaveStateReadRegister (CpuIndex, Register, Width, Buffer);
269
270 return Status;
271}
272
273/**
274 Write data to the CPU save state.
275
276 @param This EFI_SMM_CPU_PROTOCOL instance
277 @param Width The number of bytes to read from the CPU save state.
278 @param Register Specifies the CPU register to write to the save state.
279 @param CpuIndex Specifies the zero-based index of the CPU save state
280 @param Buffer Upon entry, this holds the new CPU register value.
281
282 @retval EFI_SUCCESS The register was written from Save State
283 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
284 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct
285
286**/
287EFI_STATUS
288EFIAPI
289SmmWriteSaveState (
290 IN CONST EFI_SMM_CPU_PROTOCOL *This,
291 IN UINTN Width,
292 IN EFI_SMM_SAVE_STATE_REGISTER Register,
293 IN UINTN CpuIndex,
294 IN CONST VOID *Buffer
295 )
296{
297 EFI_STATUS Status;
298
299 //
300 // Retrieve pointer to the specified CPU's SMM Save State buffer
301 //
302 if ((CpuIndex >= gMmst->NumberOfCpus) || (Buffer == NULL)) {
303 return EFI_INVALID_PARAMETER;
304 }
305
306 //
307 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
308 //
309 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
310 return EFI_SUCCESS;
311 }
312
313 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
314 return EFI_INVALID_PARAMETER;
315 }
316
317 Status = MmSaveStateWriteRegister (CpuIndex, Register, Width, Buffer);
318
319 return Status;
320}
321
322/**
323 Initialize SMM environment.
324
325**/
326VOID
327InitializeSmm (
328 VOID
329 )
330{
331 UINT32 ApicId;
332 UINTN Index;
333 BOOLEAN IsBsp;
334
335 ApicId = GetApicId ();
336
337 IsBsp = (BOOLEAN)(mBspApicId == ApicId);
338
339 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
340
341 for (Index = 0; Index < mNumberOfCpus; Index++) {
342 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
343 PERF_CODE (
344 MpPerfBegin (Index, SMM_MP_PERF_PROCEDURE_ID (InitializeSmm));
345 );
346 //
347 // Initialize SMM specific features on the currently executing CPU
348 //
349 SmmCpuFeaturesInitializeProcessor (
350 Index,
351 IsBsp,
352 gSmmCpuPrivate->ProcessorInfo,
353 &mCpuHotPlugData
354 );
355
356 if (!mSmmS3Flag) {
357 //
358 // Check XD and BTS features on each processor on normal boot
359 //
360 CheckFeatureSupported (Index);
361
362 if (mIsStandaloneMm) {
363 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
364
365 //
366 // Standalone MM does not allow call out to DXE at anytime.
367 // Code Access check can be enabled in the first SMI.
368 // While SMM needs to defer the enabling to EndOfDxe.
369 //
370 // Enable SMM Code Access Check feature.
371 //
372 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
373 }
374 } else if (IsBsp) {
375 //
376 // BSP rebase is already done above.
377 // Initialize private data during S3 resume
378 //
379 InitializeMpSyncData ();
380 }
381
382 PERF_CODE (
383 MpPerfEnd (Index, SMM_MP_PERF_PROCEDURE_ID (InitializeSmm));
384 );
385
386 return;
387 }
388 }
389
390 ASSERT (FALSE);
391}
392
393/**
394 Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
395
396**/
397VOID
398ExecuteFirstSmiInit (
399 VOID
400 )
401{
402 UINTN Index;
403
404 PERF_FUNCTION_BEGIN ();
405
406 if (mSmmInitialized == NULL) {
407 mSmmInitialized = (BOOLEAN *)AllocatePool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
408 }
409
410 ASSERT (mSmmInitialized != NULL);
411 if (mSmmInitialized == NULL) {
412 PERF_FUNCTION_END ();
413 return;
414 }
415
416 //
417 // Reset the mSmmInitialized to false.
418 //
419 ZeroMem ((VOID *)mSmmInitialized, sizeof (BOOLEAN) * mMaxNumberOfCpus);
420
421 //
422 // Initialize the lock used to serialize the MSR programming in BSP and all APs
423 //
424 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
425
426 //
427 // Get the BSP ApicId.
428 //
429 mBspApicId = GetApicId ();
430
431 //
432 // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) for SMM init
433 //
434 SendSmiIpi (mBspApicId);
435 SendSmiIpiAllExcludingSelf ();
436
437 //
438 // Wait for all processors to finish its 1st SMI
439 //
440 for (Index = 0; Index < mNumberOfCpus; Index++) {
441 while (!(BOOLEAN)mSmmInitialized[Index]) {
442 }
443 }
444
445 PERF_FUNCTION_END ();
446}
447
448/**
449 Function to compare 2 SMM_BASE_HOB_DATA pointer based on ProcessorIndex.
450
451 @param[in] Buffer1 pointer to SMM_BASE_HOB_DATA poiner to compare
452 @param[in] Buffer2 pointer to second SMM_BASE_HOB_DATA pointer to compare
453
454 @retval 0 Buffer1 equal to Buffer2
455 @retval <0 Buffer1 is less than Buffer2
456 @retval >0 Buffer1 is greater than Buffer2
457**/
458INTN
459EFIAPI
460SmBaseHobCompare (
461 IN CONST VOID *Buffer1,
462 IN CONST VOID *Buffer2
463 )
464{
465 if ((*(SMM_BASE_HOB_DATA **)Buffer1)->ProcessorIndex > (*(SMM_BASE_HOB_DATA **)Buffer2)->ProcessorIndex) {
466 return 1;
467 } else if ((*(SMM_BASE_HOB_DATA **)Buffer1)->ProcessorIndex < (*(SMM_BASE_HOB_DATA **)Buffer2)->ProcessorIndex) {
468 return -1;
469 }
470
471 return 0;
472}
473
474/**
475 Extract SmBase for all CPU from SmmBase HOB.
476
477 @param[in] MaxNumberOfCpus Max NumberOfCpus.
478
479 @param[out] AllocatedSmBaseBuffer Pointer to SmBase Buffer allocated
480 by this function. Only set if the
481 function returns EFI_SUCCESS.
482
483 @retval EFI_SUCCESS SmBase Buffer output successfully.
484 @retval EFI_OUT_OF_RESOURCES Memory allocation failed.
485 @retval EFI_NOT_FOUND gSmmBaseHobGuid was never created.
486**/
487STATIC
488EFI_STATUS
489GetSmBase (
490 IN UINTN MaxNumberOfCpus,
491 OUT UINTN **AllocatedSmBaseBuffer
492 )
493{
494 UINTN HobCount;
495 EFI_HOB_GUID_TYPE *GuidHob;
496 SMM_BASE_HOB_DATA *SmmBaseHobData;
497 UINTN NumberOfProcessors;
498 SMM_BASE_HOB_DATA **SmBaseHobs;
499 UINTN *SmBaseBuffer;
500 UINTN HobIndex;
501 UINTN SortBuffer;
502 UINTN ProcessorIndex;
503 UINT64 PrevProcessorIndex;
504 EFI_HOB_GUID_TYPE *FirstSmmBaseGuidHob;
505
506 SmmBaseHobData = NULL;
507 HobIndex = 0;
508 ProcessorIndex = 0;
509 HobCount = 0;
510 NumberOfProcessors = 0;
511
512 FirstSmmBaseGuidHob = GetFirstGuidHob (&gSmmBaseHobGuid);
513 if (FirstSmmBaseGuidHob == NULL) {
514 return EFI_NOT_FOUND;
515 }
516
517 GuidHob = FirstSmmBaseGuidHob;
518 while (GuidHob != NULL) {
519 HobCount++;
520 SmmBaseHobData = GET_GUID_HOB_DATA (GuidHob);
521 NumberOfProcessors += SmmBaseHobData->NumberOfProcessors;
522
523 if (NumberOfProcessors >= MaxNumberOfCpus) {
524 break;
525 }
526
527 GuidHob = GetNextGuidHob (&gSmmBaseHobGuid, GET_NEXT_HOB (GuidHob));
528 }
529
530 ASSERT (NumberOfProcessors == MaxNumberOfCpus);
531 if (NumberOfProcessors != MaxNumberOfCpus) {
532 CpuDeadLoop ();
533 }
534
535 SmBaseHobs = AllocatePool (sizeof (SMM_BASE_HOB_DATA *) * HobCount);
536 if (SmBaseHobs == NULL) {
537 return EFI_OUT_OF_RESOURCES;
538 }
539
540 //
541 // Record each SmmBaseHob pointer in the SmBaseHobs.
542 // The FirstSmmBaseGuidHob is to speed up this while-loop
543 // without needing to look for SmBaseHob from beginning.
544 //
545 GuidHob = FirstSmmBaseGuidHob;
546 while (HobIndex < HobCount) {
547 SmBaseHobs[HobIndex++] = GET_GUID_HOB_DATA (GuidHob);
548 GuidHob = GetNextGuidHob (&gSmmBaseHobGuid, GET_NEXT_HOB (GuidHob));
549 }
550
551 SmBaseBuffer = (UINTN *)AllocatePool (sizeof (UINTN) * (MaxNumberOfCpus));
552 ASSERT (SmBaseBuffer != NULL);
553 if (SmBaseBuffer == NULL) {
554 FreePool (SmBaseHobs);
555 return EFI_OUT_OF_RESOURCES;
556 }
557
558 QuickSort (SmBaseHobs, HobCount, sizeof (SMM_BASE_HOB_DATA *), (BASE_SORT_COMPARE)SmBaseHobCompare, &SortBuffer);
559 PrevProcessorIndex = 0;
560 for (HobIndex = 0; HobIndex < HobCount; HobIndex++) {
561 //
562 // Make sure no overlap and no gap in the CPU range covered by each HOB
563 //
564 ASSERT (SmBaseHobs[HobIndex]->ProcessorIndex == PrevProcessorIndex);
565
566 //
567 // Cache each SmBase in order.
568 //
569 for (ProcessorIndex = 0; ProcessorIndex < SmBaseHobs[HobIndex]->NumberOfProcessors; ProcessorIndex++) {
570 SmBaseBuffer[PrevProcessorIndex + ProcessorIndex] = (UINTN)SmBaseHobs[HobIndex]->SmBase[ProcessorIndex];
571 }
572
573 PrevProcessorIndex += SmBaseHobs[HobIndex]->NumberOfProcessors;
574 }
575
576 FreePool (SmBaseHobs);
577 *AllocatedSmBaseBuffer = SmBaseBuffer;
578 return EFI_SUCCESS;
579}
580
581/**
582 Function to compare 2 MP_INFORMATION2_HOB_DATA pointer based on ProcessorIndex.
583
584 @param[in] Buffer1 pointer to MP_INFORMATION2_HOB_DATA poiner to compare
585 @param[in] Buffer2 pointer to second MP_INFORMATION2_HOB_DATA pointer to compare
586
587 @retval 0 Buffer1 equal to Buffer2
588 @retval <0 Buffer1 is less than Buffer2
589 @retval >0 Buffer1 is greater than Buffer2
590**/
591INTN
592EFIAPI
593MpInformation2HobCompare (
594 IN CONST VOID *Buffer1,
595 IN CONST VOID *Buffer2
596 )
597{
598 if ((*(MP_INFORMATION2_HOB_DATA **)Buffer1)->ProcessorIndex > (*(MP_INFORMATION2_HOB_DATA **)Buffer2)->ProcessorIndex) {
599 return 1;
600 } else if ((*(MP_INFORMATION2_HOB_DATA **)Buffer1)->ProcessorIndex < (*(MP_INFORMATION2_HOB_DATA **)Buffer2)->ProcessorIndex) {
601 return -1;
602 }
603
604 return 0;
605}
606
607/**
608 Extract NumberOfCpus, MaxNumberOfCpus and EFI_PROCESSOR_INFORMATION for all CPU from MpInformation2 HOB.
609
610 @param[out] NumberOfCpus Pointer to NumberOfCpus.
611 @param[out] MaxNumberOfCpus Pointer to MaxNumberOfCpus.
612
613 @retval ProcessorInfo Pointer to EFI_PROCESSOR_INFORMATION buffer.
614**/
615EFI_PROCESSOR_INFORMATION *
616GetMpInformation (
617 OUT UINTN *NumberOfCpus,
618 OUT UINTN *MaxNumberOfCpus
619 )
620{
621 EFI_HOB_GUID_TYPE *GuidHob;
622 EFI_HOB_GUID_TYPE *FirstMpInfo2Hob;
623 MP_INFORMATION2_HOB_DATA *MpInformation2HobData;
624 UINTN HobCount;
625 UINTN HobIndex;
626 MP_INFORMATION2_HOB_DATA **MpInfo2Hobs;
627 UINTN SortBuffer;
628 UINTN ProcessorIndex;
629 UINT64 PrevProcessorIndex;
630 MP_INFORMATION2_ENTRY *MpInformation2Entry;
631 EFI_PROCESSOR_INFORMATION *ProcessorInfo;
632
633 GuidHob = NULL;
634 MpInformation2HobData = NULL;
635 FirstMpInfo2Hob = NULL;
636 MpInfo2Hobs = NULL;
637 HobIndex = 0;
638 HobCount = 0;
639
640 FirstMpInfo2Hob = GetFirstGuidHob (&gMpInformation2HobGuid);
641
642 if (mIsStandaloneMm) {
643 ASSERT (FirstMpInfo2Hob != NULL);
644 } else {
645 if (FirstMpInfo2Hob == NULL) {
646 DEBUG ((DEBUG_INFO, "%a: [INFO] gMpInformation2HobGuid HOB not found.\n", __func__));
647 return GetMpInformationFromMpServices (NumberOfCpus, MaxNumberOfCpus);
648 }
649 }
650
651 GuidHob = FirstMpInfo2Hob;
652 while (GuidHob != NULL) {
653 MpInformation2HobData = GET_GUID_HOB_DATA (GuidHob);
654
655 //
656 // This is the last MpInformationHob in the HOB list.
657 //
658 if (MpInformation2HobData->NumberOfProcessors == 0) {
659 ASSERT (HobCount != 0);
660 break;
661 }
662
663 HobCount++;
664 *NumberOfCpus += MpInformation2HobData->NumberOfProcessors;
665 GuidHob = GetNextGuidHob (&gMpInformation2HobGuid, GET_NEXT_HOB (GuidHob));
666 }
667
668 *MaxNumberOfCpus = *NumberOfCpus;
669
670 if (!mIsStandaloneMm) {
671 ASSERT (*NumberOfCpus <= GetSupportedMaxLogicalProcessorNumber ());
672
673 //
674 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
675 //
676 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
677 *MaxNumberOfCpus = GetSupportedMaxLogicalProcessorNumber ();
678 }
679 }
680
681 MpInfo2Hobs = AllocatePool (sizeof (MP_INFORMATION2_HOB_DATA *) * HobCount);
682 ASSERT (MpInfo2Hobs != NULL);
683 if (MpInfo2Hobs == NULL) {
684 return NULL;
685 }
686
687 //
688 // Record each MpInformation2Hob pointer in the MpInfo2Hobs.
689 // The FirstMpInfo2Hob is to speed up this while-loop without
690 // needing to look for MpInfo2Hob from beginning.
691 //
692 GuidHob = FirstMpInfo2Hob;
693 while (HobIndex < HobCount) {
694 MpInfo2Hobs[HobIndex++] = GET_GUID_HOB_DATA (GuidHob);
695 GuidHob = GetNextGuidHob (&gMpInformation2HobGuid, GET_NEXT_HOB (GuidHob));
696 }
697
698 ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * (*MaxNumberOfCpus));
699 ASSERT (ProcessorInfo != NULL);
700 if (ProcessorInfo == NULL) {
701 FreePool (MpInfo2Hobs);
702 return NULL;
703 }
704
705 QuickSort (MpInfo2Hobs, HobCount, sizeof (MP_INFORMATION2_HOB_DATA *), (BASE_SORT_COMPARE)MpInformation2HobCompare, &SortBuffer);
706 PrevProcessorIndex = 0;
707 for (HobIndex = 0; HobIndex < HobCount; HobIndex++) {
708 //
709 // Make sure no overlap and no gap in the CPU range covered by each HOB
710 //
711 ASSERT (MpInfo2Hobs[HobIndex]->ProcessorIndex == PrevProcessorIndex);
712
713 //
714 // Cache each EFI_PROCESSOR_INFORMATION in order.
715 //
716 for (ProcessorIndex = 0; ProcessorIndex < MpInfo2Hobs[HobIndex]->NumberOfProcessors; ProcessorIndex++) {
717 MpInformation2Entry = GET_MP_INFORMATION_ENTRY (MpInfo2Hobs[HobIndex], ProcessorIndex);
718 CopyMem (
719 &ProcessorInfo[PrevProcessorIndex + ProcessorIndex],
720 &MpInformation2Entry->ProcessorInfo,
721 sizeof (EFI_PROCESSOR_INFORMATION)
722 );
723 }
724
725 PrevProcessorIndex += MpInfo2Hobs[HobIndex]->NumberOfProcessors;
726 }
727
728 FreePool (MpInfo2Hobs);
729 return ProcessorInfo;
730}
731
732/**
733 The module Entry Point of the CPU SMM driver.
734
735 @retval EFI_SUCCESS The common entry point is executed successfully.
736 @retval Other Some error occurs when executing this entry point.
737
738**/
739EFI_STATUS
740PiSmmCpuEntryCommon (
741 VOID
742 )
743{
744 EFI_STATUS Status;
745 UINTN Index;
746 UINTN TileCodeSize;
747 UINTN TileDataSize;
748 UINTN TileSize;
749 UINT8 *Stacks;
750 UINT32 RegEax;
751 UINT32 RegEbx;
752 UINT32 RegEcx;
753 UINT32 RegEdx;
754 CPUID_EXTENDED_CPU_SIG_EDX ExtendedRegEdx;
755 UINTN FamilyId;
756 UINTN ModelId;
757 UINT32 Cr3;
758
759 PERF_FUNCTION_BEGIN ();
760
761 //
762 // Initialize address fixup
763 //
764 PiSmmCpuSmiEntryFixupAddress ();
765
766 //
767 // Initialize Debug Agent to support source level debug in SMM code
768 //
769 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, &mSmmDebugAgentSupport, NULL);
770
771 //
772 // Report the start of CPU SMM initialization.
773 //
774 REPORT_STATUS_CODE (
775 EFI_PROGRESS_CODE,
776 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
777 );
778
779 //
780 // Find out SMRR Base and SMRR Size
781 //
782 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
783
784 //
785 // Retrieve NumberOfProcessors, MaxNumberOfCpus and EFI_PROCESSOR_INFORMATION for all CPU from MpInformation2 HOB.
786 //
787 gSmmCpuPrivate->ProcessorInfo = GetMpInformation (&mNumberOfCpus, &mMaxNumberOfCpus);
788 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
789
790 //
791 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
792 // A constant BSP index makes no sense because it may be hot removed.
793 //
794 DEBUG_CODE_BEGIN ();
795 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
796 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
797 }
798
799 DEBUG_CODE_END ();
800
801 //
802 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
803 //
804 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
805 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
806
807 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
808
809 PERF_CODE (
810 InitializeMpPerf (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
811 );
812
813 //
814 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
815 // allocated buffer. The minimum size of this buffer for a uniprocessor system
816 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
817 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
818 // then the SMI entry point and the CPU save state areas can be tiles to minimize
819 // the total amount SMRAM required for all the CPUs. The tile size can be computed
820 // by adding the // CPU save state size, any extra CPU specific context, and
821 // the size of code that must be placed at the SMI entry point to transfer
822 // control to a C function in the native SMM execution mode. This size is
823 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
824 // The total amount of memory required is the maximum number of CPUs that
825 // platform supports times the tile size. The picture below shows the tiling,
826 // where m is the number of tiles that fit in 32KB.
827 //
828 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
829 // | CPU m+1 Save State |
830 // +-----------------------------+
831 // | CPU m+1 Extra Data |
832 // +-----------------------------+
833 // | Padding |
834 // +-----------------------------+
835 // | CPU 2m SMI Entry |
836 // +#############################+ <-- Base of allocated buffer + 64 KB
837 // | CPU m-1 Save State |
838 // +-----------------------------+
839 // | CPU m-1 Extra Data |
840 // +-----------------------------+
841 // | Padding |
842 // +-----------------------------+
843 // | CPU 2m-1 SMI Entry |
844 // +=============================+ <-- 2^n offset from Base of allocated buffer
845 // | . . . . . . . . . . . . |
846 // +=============================+ <-- 2^n offset from Base of allocated buffer
847 // | CPU 2 Save State |
848 // +-----------------------------+
849 // | CPU 2 Extra Data |
850 // +-----------------------------+
851 // | Padding |
852 // +-----------------------------+
853 // | CPU m+1 SMI Entry |
854 // +=============================+ <-- Base of allocated buffer + 32 KB
855 // | CPU 1 Save State |
856 // +-----------------------------+
857 // | CPU 1 Extra Data |
858 // +-----------------------------+
859 // | Padding |
860 // +-----------------------------+
861 // | CPU m SMI Entry |
862 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
863 // | CPU 0 Save State |
864 // +-----------------------------+
865 // | CPU 0 Extra Data |
866 // +-----------------------------+
867 // | Padding |
868 // +-----------------------------+
869 // | CPU m-1 SMI Entry |
870 // +=============================+ <-- 2^n offset from Base of allocated buffer
871 // | . . . . . . . . . . . . |
872 // +=============================+ <-- 2^n offset from Base of allocated buffer
873 // | Padding |
874 // +-----------------------------+
875 // | CPU 1 SMI Entry |
876 // +=============================+ <-- 2^n offset from Base of allocated buffer
877 // | Padding |
878 // +-----------------------------+
879 // | CPU 0 SMI Entry |
880 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
881 //
882
883 //
884 // Retrieve CPU Family
885 //
886 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
887 FamilyId = (RegEax >> 8) & 0xf;
888 ModelId = (RegEax >> 4) & 0xf;
889 if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {
890 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
891 }
892
893 RegEdx = 0;
894 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
895 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
896 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
897 }
898
899 //
900 // Determine the mode of the CPU at the time an SMI occurs
901 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
902 // Volume 3C, Section 34.4.1.1
903 //
904 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
905 if ((RegEdx & BIT29) != 0) {
906 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
907 }
908
909 if (FamilyId == 0x06) {
910 if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {
911 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
912 }
913 }
914
915 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));
916 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {
917 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
918 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
919 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);
920 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));
921 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));
922 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));
923 if ((RegEcx & CPUID_CET_SS) == 0) {
924 mCetSupported = FALSE;
925 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
926 }
927
928 if (mCetSupported) {
929 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);
930 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));
931 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);
932 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
933 AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);
934 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
935 }
936 } else {
937 mCetSupported = FALSE;
938 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
939 }
940 } else {
941 mCetSupported = FALSE;
942 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
943 }
944
945 //
946 // Check XD supported or not.
947 //
948 RegEax = 0;
949 ExtendedRegEdx.Uint32 = 0;
950 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
951 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
952 //
953 // Extended CPUID functions are not supported on this processor.
954 //
955 mXdSupported = FALSE;
956 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
957 }
958
959 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &ExtendedRegEdx.Uint32);
960 if (ExtendedRegEdx.Bits.NX == 0) {
961 //
962 // Execute Disable Bit feature is not supported on this processor.
963 //
964 mXdSupported = FALSE;
965 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
966 }
967
968 if (StandardSignatureIsAuthenticAMD ()) {
969 //
970 // AMD processors do not support MSR_IA32_MISC_ENABLE
971 //
972 PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1);
973 }
974
975 //
976 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
977 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
978 // This size is rounded up to nearest power of 2.
979 //
980 TileCodeSize = GetSmiHandlerSize ();
981 TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);
982 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
983 TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);
984 TileSize = TileDataSize + TileCodeSize - 1;
985 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
986 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
987
988 //
989 // If the TileSize is larger than space available for the SMI Handler of
990 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
991 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
992 // the SMI Handler size must be reduced or the size of the extra CPU specific
993 // context must be reduced.
994 //
995 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
996
997 //
998 // Check whether the Required TileSize is enough.
999 //
1000 if (TileSize > SIZE_8KB) {
1001 DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));
1002 FreePool (gSmmCpuPrivate->ProcessorInfo);
1003 CpuDeadLoop ();
1004 return RETURN_BUFFER_TOO_SMALL;
1005 }
1006
1007 //
1008 // Retrieve the allocated SmmBase from gSmmBaseHobGuid. If found,
1009 // means the SmBase relocation has been done.
1010 //
1011 mCpuHotPlugData.SmBase = NULL;
1012 Status = GetSmBase (mMaxNumberOfCpus, &mCpuHotPlugData.SmBase);
1013 ASSERT (!EFI_ERROR (Status));
1014 if (EFI_ERROR (Status)) {
1015 CpuDeadLoop ();
1016 }
1017
1018 //
1019 // ASSERT SmBase has been relocated.
1020 //
1021 ASSERT (mCpuHotPlugData.SmBase != NULL);
1022
1023 //
1024 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
1025 //
1026 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
1027 ASSERT (gSmmCpuPrivate->Operation != NULL);
1028
1029 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
1030 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
1031
1032 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
1033 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
1034
1035 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
1036 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
1037
1038 //
1039 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
1040 //
1041 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
1042 ASSERT (mCpuHotPlugData.ApicId != NULL);
1043 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
1044
1045 //
1046 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
1047 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
1048 // size for each CPU in the platform
1049 //
1050 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
1051 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);
1052 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
1053 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
1054
1055 if (Index < mNumberOfCpus) {
1056 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
1057
1058 DEBUG ((
1059 DEBUG_INFO,
1060 "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
1061 Index,
1062 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
1063 mCpuHotPlugData.SmBase[Index],
1064 gSmmCpuPrivate->CpuSaveState[Index],
1065 gSmmCpuPrivate->CpuSaveStateSize[Index]
1066 ));
1067 } else {
1068 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
1069 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
1070 }
1071 }
1072
1073 //
1074 // Allocate SMI stacks for all processors.
1075 //
1076 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));
1077 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1078 //
1079 // SMM Stack Guard Enabled
1080 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.
1081 //
1082 // +--------------------------------------------------+-----+--------------------------------------------------+
1083 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
1084 // +--------------------------------------------------+-----+--------------------------------------------------+
1085 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|
1086 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|
1087 // | | | |
1088 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|
1089 //
1090 mSmmStackSize += EFI_PAGES_TO_SIZE (2);
1091 }
1092
1093 mSmmShadowStackSize = 0;
1094 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1095 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
1096
1097 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1098 //
1099 // SMM Stack Guard Enabled
1100 // Append Shadow Stack after normal stack
1101 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
1102 //
1103 // |= Stacks
1104 // +--------------------------------------------------+---------------------------------------------------------------+
1105 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
1106 // +--------------------------------------------------+---------------------------------------------------------------+
1107 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|
1108 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
1109 // | |
1110 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
1111 //
1112 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
1113 } else {
1114 //
1115 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)
1116 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.
1117 // 1 more pages is allocated for each processor, it is known good stack.
1118 //
1119 //
1120 // |= Stacks
1121 // +-------------------------------------+--------------------------------------------------+
1122 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |
1123 // +-------------------------------------+--------------------------------------------------+
1124 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|
1125 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|
1126 // | |
1127 // |<-------------------------------- Processor N ----------------------------------------->|
1128 //
1129 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);
1130 mSmmStackSize += EFI_PAGES_TO_SIZE (1);
1131 }
1132 }
1133
1134 Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));
1135 ASSERT (Stacks != NULL);
1136 mSmmStackArrayBase = (UINTN)Stacks;
1137 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;
1138
1139 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));
1140 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));
1141 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
1142 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1143 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
1144 }
1145
1146 //
1147 // Initialize IDT
1148 //
1149 InitializeSmmIdt ();
1150
1151 //
1152 // SMM Time initialization
1153 //
1154 InitializeSmmTimer ();
1155
1156 //
1157 // Initialize mSmmProfileEnabled
1158 //
1159 mSmmProfileEnabled = IsSmmProfileEnabled ();
1160
1161 //
1162 // Initialize MP globals
1163 //
1164 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);
1165
1166 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1167 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
1168 SetShadowStack (
1169 Cr3,
1170 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,
1171 mSmmShadowStackSize
1172 );
1173 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1174 ConvertMemoryPageAttributes (
1175 Cr3,
1176 mPagingMode,
1177 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,
1178 EFI_PAGES_TO_SIZE (1),
1179 EFI_MEMORY_RP,
1180 TRUE,
1181 NULL
1182 );
1183 }
1184 }
1185 }
1186
1187 //
1188 // For relocated SMBASE, some MSRs & CSRs are still required to be configured in SMM Mode for SMM Initialization.
1189 // Those MSRs & CSRs must be configured before normal SMI sources happen.
1190 // So, here is to issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
1191 //
1192 ExecuteFirstSmiInit ();
1193
1194 //
1195 // Call hook for BSP to perform extra actions in normal mode after all
1196 // SMM base addresses have been relocated on all CPUs
1197 //
1198 SmmCpuFeaturesSmmRelocationComplete ();
1199
1200 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
1201
1202 //
1203 // Fill in SMM Reserved Regions
1204 //
1205 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
1206 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
1207
1208 //
1209 // Install the SMM CPU Protocol into SMM protocol database
1210 //
1211 Status = gMmst->MmInstallProtocolInterface (
1212 &mSmmCpuHandle,
1213 &gEfiSmmCpuProtocolGuid,
1214 EFI_NATIVE_INTERFACE,
1215 &mSmmCpu
1216 );
1217 ASSERT_EFI_ERROR (Status);
1218
1219 //
1220 // Install the SMM Memory Attribute Protocol into SMM protocol database
1221 //
1222 Status = gMmst->MmInstallProtocolInterface (
1223 &mSmmCpuHandle,
1224 &gEdkiiSmmMemoryAttributeProtocolGuid,
1225 EFI_NATIVE_INTERFACE,
1226 &mSmmMemoryAttribute
1227 );
1228 ASSERT_EFI_ERROR (Status);
1229
1230 //
1231 // Initialize global buffer for MM MP.
1232 //
1233 InitializeDataForMmMp ();
1234
1235 //
1236 // Initialize Package First Thread Index Info.
1237 //
1238 InitPackageFirstThreadIndexInfo ();
1239
1240 //
1241 // Install the SMM Mp Protocol into SMM protocol database
1242 //
1243 Status = gMmst->MmInstallProtocolInterface (
1244 &mSmmCpuHandle,
1245 &gEfiMmMpProtocolGuid,
1246 EFI_NATIVE_INTERFACE,
1247 &mSmmMp
1248 );
1249 ASSERT_EFI_ERROR (Status);
1250
1251 //
1252 // Initialize SMM CPU Services Support
1253 //
1254 Status = InitializeSmmCpuServices (mSmmCpuHandle);
1255 ASSERT_EFI_ERROR (Status);
1256
1257 //
1258 // Initialize SMM Profile feature
1259 //
1260 InitSmmProfile (Cr3);
1261
1262 GetAcpiS3EnableFlag ();
1263 InitSmmS3ResumeState ();
1264
1265 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1266
1267 PERF_FUNCTION_END ();
1268 return EFI_SUCCESS;
1269}
1270
1271/**
1272 Function to compare 2 EFI_SMRAM_DESCRIPTOR based on CpuStart.
1273
1274 @param[in] Buffer1 pointer to Device Path poiner to compare
1275 @param[in] Buffer2 pointer to second DevicePath pointer to compare
1276
1277 @retval 0 Buffer1 equal to Buffer2
1278 @retval <0 Buffer1 is less than Buffer2
1279 @retval >0 Buffer1 is greater than Buffer2
1280**/
1281INTN
1282EFIAPI
1283CpuSmramRangeCompare (
1284 IN CONST VOID *Buffer1,
1285 IN CONST VOID *Buffer2
1286 )
1287{
1288 if (((EFI_SMRAM_DESCRIPTOR *)Buffer1)->CpuStart > ((EFI_SMRAM_DESCRIPTOR *)Buffer2)->CpuStart) {
1289 return 1;
1290 } else if (((EFI_SMRAM_DESCRIPTOR *)Buffer1)->CpuStart < ((EFI_SMRAM_DESCRIPTOR *)Buffer2)->CpuStart) {
1291 return -1;
1292 }
1293
1294 return 0;
1295}
1296
1297/**
1298 Find out SMRAM information including SMRR base and SMRR size.
1299
1300 @param SmrrBase SMRR base
1301 @param SmrrSize SMRR size
1302
1303**/
1304VOID
1305FindSmramInfo (
1306 OUT UINT32 *SmrrBase,
1307 OUT UINT32 *SmrrSize
1308 )
1309{
1310 VOID *GuidHob;
1311 EFI_SMRAM_HOB_DESCRIPTOR_BLOCK *DescriptorBlock;
1312 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
1313 UINTN Index;
1314 UINT64 MaxSize;
1315 BOOLEAN Found;
1316 EFI_SMRAM_DESCRIPTOR SmramDescriptor;
1317
1318 ASSERT (SmrrBase != NULL && SmrrSize != NULL);
1319
1320 //
1321 // Get SMRAM information
1322 //
1323 GuidHob = GetFirstGuidHob (&gEfiSmmSmramMemoryGuid);
1324 ASSERT (GuidHob != NULL);
1325 DescriptorBlock = (EFI_SMRAM_HOB_DESCRIPTOR_BLOCK *)GET_GUID_HOB_DATA (GuidHob);
1326 mSmmCpuSmramRangeCount = DescriptorBlock->NumberOfSmmReservedRegions;
1327 mSmmCpuSmramRanges = DescriptorBlock->Descriptor;
1328
1329 //
1330 // Sort the mSmmCpuSmramRanges
1331 //
1332 QuickSort (mSmmCpuSmramRanges, mSmmCpuSmramRangeCount, sizeof (EFI_SMRAM_DESCRIPTOR), (BASE_SORT_COMPARE)CpuSmramRangeCompare, &SmramDescriptor);
1333
1334 //
1335 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1336 //
1337 CurrentSmramRange = NULL;
1338 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1339 //
1340 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1341 //
1342 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1343 continue;
1344 }
1345
1346 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1347 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1348 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1349 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1350 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1351 }
1352 }
1353 }
1354 }
1355
1356 ASSERT (CurrentSmramRange != NULL);
1357
1358 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1359 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1360
1361 do {
1362 Found = FALSE;
1363 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1364 if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&
1365 (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))
1366 {
1367 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1368 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1369 Found = TRUE;
1370 } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {
1371 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1372 Found = TRUE;
1373 }
1374 }
1375 } while (Found);
1376
1377 DEBUG ((DEBUG_INFO, "%a: SMRR Base = 0x%x, SMRR Size = 0x%x\n", __func__, *SmrrBase, *SmrrSize));
1378}
1379
1380/**
1381Configure SMM Code Access Check feature on an AP.
1382SMM Feature Control MSR will be locked after configuration.
1383
1384@param[in,out] Buffer Pointer to private data buffer.
1385**/
1386VOID
1387EFIAPI
1388ConfigSmmCodeAccessCheckOnCurrentProcessor (
1389 IN OUT VOID *Buffer
1390 )
1391{
1392 UINTN CpuIndex;
1393 UINT64 SmmFeatureControlMsr;
1394 UINT64 NewSmmFeatureControlMsr;
1395
1396 //
1397 // Retrieve the CPU Index from the context passed in
1398 //
1399 CpuIndex = *(UINTN *)Buffer;
1400
1401 //
1402 // Get the current SMM Feature Control MSR value
1403 //
1404 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1405
1406 //
1407 // Compute the new SMM Feature Control MSR value
1408 //
1409 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1410 if (mSmmCodeAccessCheckEnable) {
1411 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1412 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1413 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1414 }
1415 }
1416
1417 //
1418 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1419 //
1420 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1421 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1422 }
1423
1424 //
1425 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1426 //
1427 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1428}
1429
1430/**
1431Configure SMM Code Access Check feature for all processors.
1432SMM Feature Control MSR will be locked after configuration.
1433**/
1434VOID
1435ConfigSmmCodeAccessCheck (
1436 VOID
1437 )
1438{
1439 UINTN Index;
1440 EFI_STATUS Status;
1441
1442 PERF_FUNCTION_BEGIN ();
1443
1444 //
1445 // Check to see if the Feature Control MSR is supported on this CPU
1446 //
1447 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1448
1449 //
1450 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1451 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1452 //
1453 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1454
1455 //
1456 // Enable SMM Code Access Check feature on the BSP.
1457 //
1458 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1459
1460 //
1461 // Enable SMM Code Access Check feature for the APs.
1462 //
1463 for (Index = 0; Index < gMmst->NumberOfCpus; Index++) {
1464 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1465 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1466 //
1467 // If this processor does not exist
1468 //
1469 continue;
1470 }
1471
1472 //
1473 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1474 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1475 //
1476 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1477
1478 //
1479 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1480 //
1481 Status = gMmst->MmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1482 ASSERT_EFI_ERROR (Status);
1483
1484 //
1485 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1486 //
1487 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1488 CpuPause ();
1489 }
1490
1491 //
1492 // Release the Config SMM Code Access Check spin lock.
1493 //
1494 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1495 }
1496 }
1497
1498 PERF_FUNCTION_END ();
1499}
1500
1501/**
1502 Allocate pages for code.
1503
1504 @param[in] Pages Number of pages to be allocated.
1505
1506 @return Allocated memory.
1507**/
1508VOID *
1509AllocateCodePages (
1510 IN UINTN Pages
1511 )
1512{
1513 EFI_STATUS Status;
1514 EFI_PHYSICAL_ADDRESS Memory;
1515
1516 if (Pages == 0) {
1517 return NULL;
1518 }
1519
1520 Status = gMmst->MmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1521 if (EFI_ERROR (Status)) {
1522 return NULL;
1523 }
1524
1525 return (VOID *)(UINTN)Memory;
1526}
1527
1528/**
1529 Perform the pre tasks.
1530
1531**/
1532VOID
1533PerformPreTasks (
1534 VOID
1535 )
1536{
1537 RestoreSmmConfigurationInS3 ();
1538}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette