VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c@ 101291

Last change on this file since 101291 was 101291, checked in by vboxsync, 14 months ago

EFI/FirmwareNew: Make edk2-stable202308 build on all supported platforms (using gcc at least, msvc not tested yet), bugref:4643

  • Property svn:eol-style set to native
File size: 52.1 KB
Line 
1/** @file
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.<BR>
7
8SPDX-License-Identifier: BSD-2-Clause-Patent
9
10**/
11
12#include "PiSmmCpuDxeSmm.h"
13
14//
15// SMM CPU Private Data structure that contains SMM Configuration Protocol
16// along its supporting fields.
17//
18SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
19 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
20 NULL, // SmmCpuHandle
21 NULL, // Pointer to ProcessorInfo array
22 NULL, // Pointer to Operation array
23 NULL, // Pointer to CpuSaveStateSize array
24 NULL, // Pointer to CpuSaveState array
25 {
26 { 0 }
27 }, // SmmReservedSmramRegion
28 {
29 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
30 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
31 0, // SmmCoreEntryContext.NumberOfCpus
32 NULL, // SmmCoreEntryContext.CpuSaveStateSize
33 NULL // SmmCoreEntryContext.CpuSaveState
34 },
35 NULL, // SmmCoreEntry
36 {
37 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
38 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
39 },
40 NULL, // pointer to Ap Wrapper Func array
41 { NULL, NULL }, // List_Entry for Tokens.
42};
43
44CPU_HOT_PLUG_DATA mCpuHotPlugData = {
45 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
46 0, // Array Length of SmBase and APIC ID
47 NULL, // Pointer to APIC ID array
48 NULL, // Pointer to SMBASE array
49 0, // Reserved
50 0, // SmrrBase
51 0 // SmrrSize
52};
53
54//
55// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
56//
57SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
58
59//
60// SMM Relocation variables
61//
62volatile BOOLEAN *mRebased;
63
64///
65/// Handle for the SMM CPU Protocol
66///
67EFI_HANDLE mSmmCpuHandle = NULL;
68
69///
70/// SMM CPU Protocol instance
71///
72EFI_SMM_CPU_PROTOCOL mSmmCpu = {
73 SmmReadSaveState,
74 SmmWriteSaveState
75};
76
77///
78/// SMM Memory Attribute Protocol instance
79///
80EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
81 EdkiiSmmGetMemoryAttributes,
82 EdkiiSmmSetMemoryAttributes,
83 EdkiiSmmClearMemoryAttributes
84};
85
86EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
87
88BOOLEAN mSmmRelocated = FALSE;
89volatile BOOLEAN *mSmmInitialized = NULL;
90UINT32 mBspApicId = 0;
91
92//
93// SMM stack information
94//
95UINTN mSmmStackArrayBase;
96UINTN mSmmStackArrayEnd;
97UINTN mSmmStackSize;
98
99UINTN mSmmShadowStackSize;
100BOOLEAN mCetSupported = TRUE;
101
102UINTN mMaxNumberOfCpus = 1;
103UINTN mNumberOfCpus = 1;
104
105//
106// SMM ready to lock flag
107//
108BOOLEAN mSmmReadyToLock = FALSE;
109
110//
111// Global used to cache PCD for SMM Code Access Check enable
112//
113BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
114
115//
116// Global copy of the PcdPteMemoryEncryptionAddressOrMask
117//
118UINT64 mAddressEncMask = 0;
119
120//
121// Spin lock used to serialize setting of SMM Code Access Check feature
122//
123SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
124
125//
126// Saved SMM ranges information
127//
128EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
129UINTN mSmmCpuSmramRangeCount;
130
131UINT8 mPhysicalAddressBits;
132
133//
134// Control register contents saved for SMM S3 resume state initialization.
135//
136UINT32 mSmmCr0;
137UINT32 mSmmCr4;
138
139/**
140 Initialize IDT to setup exception handlers for SMM.
141
142**/
143VOID
144InitializeSmmIdt (
145 VOID
146 )
147{
148 EFI_STATUS Status;
149 BOOLEAN InterruptState;
150 IA32_DESCRIPTOR DxeIdtr;
151
152 //
153 // There are 32 (not 255) entries in it since only processor
154 // generated exceptions will be handled.
155 //
156 gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
157 //
158 // Allocate page aligned IDT, because it might be set as read only.
159 //
160 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));
161 ASSERT (gcSmiIdtr.Base != 0);
162 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
163
164 //
165 // Disable Interrupt and save DXE IDT table
166 //
167 InterruptState = SaveAndDisableInterrupts ();
168 AsmReadIdtr (&DxeIdtr);
169 //
170 // Load SMM temporary IDT table
171 //
172 AsmWriteIdtr (&gcSmiIdtr);
173 //
174 // Setup SMM default exception handlers, SMM IDT table
175 // will be updated and saved in gcSmiIdtr
176 //
177 Status = InitializeCpuExceptionHandlers (NULL);
178 ASSERT_EFI_ERROR (Status);
179 //
180 // Restore DXE IDT table and CPU interrupt
181 //
182 AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);
183 SetInterruptState (InterruptState);
184}
185
186/**
187 Search module name by input IP address and output it.
188
189 @param CallerIpAddress Caller instruction pointer.
190
191**/
192VOID
193DumpModuleInfoByIp (
194 IN UINTN CallerIpAddress
195 )
196{
197 UINTN Pe32Data;
198 VOID *PdbPointer;
199
200 //
201 // Find Image Base
202 //
203 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
204 if (Pe32Data != 0) {
205 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));
206 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);
207 if (PdbPointer != NULL) {
208 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
209 }
210 }
211}
212
213/**
214 Read information from the CPU save state.
215
216 @param This EFI_SMM_CPU_PROTOCOL instance
217 @param Width The number of bytes to read from the CPU save state.
218 @param Register Specifies the CPU register to read form the save state.
219 @param CpuIndex Specifies the zero-based index of the CPU save state.
220 @param Buffer Upon return, this holds the CPU register value read from the save state.
221
222 @retval EFI_SUCCESS The register was read from Save State
223 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
224 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
225
226**/
227EFI_STATUS
228EFIAPI
229SmmReadSaveState (
230 IN CONST EFI_SMM_CPU_PROTOCOL *This,
231 IN UINTN Width,
232 IN EFI_SMM_SAVE_STATE_REGISTER Register,
233 IN UINTN CpuIndex,
234 OUT VOID *Buffer
235 )
236{
237 EFI_STATUS Status;
238
239 //
240 // Retrieve pointer to the specified CPU's SMM Save State buffer
241 //
242 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
243 return EFI_INVALID_PARAMETER;
244 }
245
246 //
247 // The SpeculationBarrier() call here is to ensure the above check for the
248 // CpuIndex has been completed before the execution of subsequent codes.
249 //
250 SpeculationBarrier ();
251
252 //
253 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
254 //
255 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
256 //
257 // The pseudo-register only supports the 64-bit size specified by Width.
258 //
259 if (Width != sizeof (UINT64)) {
260 return EFI_INVALID_PARAMETER;
261 }
262
263 //
264 // If the processor is in SMM at the time the SMI occurred,
265 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
266 // Otherwise, EFI_NOT_FOUND is returned.
267 //
268 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
269 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
270 return EFI_SUCCESS;
271 } else {
272 return EFI_NOT_FOUND;
273 }
274 }
275
276 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
277 return EFI_INVALID_PARAMETER;
278 }
279
280 Status = MmSaveStateReadRegister (CpuIndex, Register, Width, Buffer);
281
282 return Status;
283}
284
285/**
286 Write data to the CPU save state.
287
288 @param This EFI_SMM_CPU_PROTOCOL instance
289 @param Width The number of bytes to read from the CPU save state.
290 @param Register Specifies the CPU register to write to the save state.
291 @param CpuIndex Specifies the zero-based index of the CPU save state
292 @param Buffer Upon entry, this holds the new CPU register value.
293
294 @retval EFI_SUCCESS The register was written from Save State
295 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
296 @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct
297
298**/
299EFI_STATUS
300EFIAPI
301SmmWriteSaveState (
302 IN CONST EFI_SMM_CPU_PROTOCOL *This,
303 IN UINTN Width,
304 IN EFI_SMM_SAVE_STATE_REGISTER Register,
305 IN UINTN CpuIndex,
306 IN CONST VOID *Buffer
307 )
308{
309 EFI_STATUS Status;
310
311 //
312 // Retrieve pointer to the specified CPU's SMM Save State buffer
313 //
314 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
315 return EFI_INVALID_PARAMETER;
316 }
317
318 //
319 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
320 //
321 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
322 return EFI_SUCCESS;
323 }
324
325 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
326 return EFI_INVALID_PARAMETER;
327 }
328
329 Status = MmSaveStateWriteRegister (CpuIndex, Register, Width, Buffer);
330
331 return Status;
332}
333
334/**
335 C function for SMI handler. To change all processor's SMMBase Register.
336
337**/
338VOID
339EFIAPI
340SmmInitHandler (
341 VOID
342 )
343{
344 UINT32 ApicId;
345 UINTN Index;
346 BOOLEAN IsBsp;
347
348 //
349 // Update SMM IDT entries' code segment and load IDT
350 //
351 AsmWriteIdtr (&gcSmiIdtr);
352 ApicId = GetApicId ();
353
354 IsBsp = (BOOLEAN)(mBspApicId == ApicId);
355
356 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
357
358 for (Index = 0; Index < mNumberOfCpus; Index++) {
359 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
360 PERF_CODE (
361 MpPerfBegin (Index, SMM_MP_PERF_PROCEDURE_ID (SmmInitHandler));
362 );
363 //
364 // Initialize SMM specific features on the currently executing CPU
365 //
366 SmmCpuFeaturesInitializeProcessor (
367 Index,
368 IsBsp,
369 gSmmCpuPrivate->ProcessorInfo,
370 &mCpuHotPlugData
371 );
372
373 if (!mSmmS3Flag) {
374 //
375 // Check XD and BTS features on each processor on normal boot
376 //
377 CheckFeatureSupported ();
378 } else if (IsBsp) {
379 //
380 // BSP rebase is already done above.
381 // Initialize private data during S3 resume
382 //
383 InitializeMpSyncData ();
384 }
385
386 if (!mSmmRelocated) {
387 //
388 // Hook return after RSM to set SMM re-based flag
389 //
390 SemaphoreHook (Index, &mRebased[Index]);
391 }
392
393 PERF_CODE (
394 MpPerfEnd (Index, SMM_MP_PERF_PROCEDURE_ID (SmmInitHandler));
395 );
396
397 return;
398 }
399 }
400
401 ASSERT (FALSE);
402}
403
404/**
405 Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
406
407**/
408VOID
409ExecuteFirstSmiInit (
410 VOID
411 )
412{
413 UINTN Index;
414
415 PERF_FUNCTION_BEGIN ();
416
417 if (mSmmInitialized == NULL) {
418 mSmmInitialized = (BOOLEAN *)AllocatePool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
419 }
420
421 ASSERT (mSmmInitialized != NULL);
422 if (mSmmInitialized == NULL) {
423 PERF_FUNCTION_END ();
424 return;
425 }
426
427 //
428 // Reset the mSmmInitialized to false.
429 //
430 ZeroMem ((VOID *)mSmmInitialized, sizeof (BOOLEAN) * mMaxNumberOfCpus);
431
432 //
433 // Get the BSP ApicId.
434 //
435 mBspApicId = GetApicId ();
436
437 //
438 // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) for SMM init
439 //
440 SendSmiIpi (mBspApicId);
441 SendSmiIpiAllExcludingSelf ();
442
443 //
444 // Wait for all processors to finish its 1st SMI
445 //
446 for (Index = 0; Index < mNumberOfCpus; Index++) {
447 while (!(BOOLEAN)mSmmInitialized[Index]) {
448 }
449 }
450
451 PERF_FUNCTION_END ();
452}
453
454/**
455 Relocate SmmBases for each processor.
456
457 Execute on first boot and all S3 resumes
458
459**/
460VOID
461EFIAPI
462SmmRelocateBases (
463 VOID
464 )
465{
466 UINT8 BakBuf[BACK_BUF_SIZE];
467 SMRAM_SAVE_STATE_MAP BakBuf2;
468 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
469 UINT8 *U8Ptr;
470 UINTN Index;
471 UINTN BspIndex;
472
473 PERF_FUNCTION_BEGIN ();
474
475 //
476 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
477 //
478 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
479
480 //
481 // Patch ASM code template with current CR0, CR3, and CR4 values
482 //
483 mSmmCr0 = (UINT32)AsmReadCr0 ();
484 PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
485 PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
486 mSmmCr4 = (UINT32)AsmReadCr4 ();
487 PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);
488
489 //
490 // Patch GDTR for SMM base relocation
491 //
492 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
493 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
494
495 U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
496 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
497
498 //
499 // Backup original contents at address 0x38000
500 //
501 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
502 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
503
504 //
505 // Load image for relocation
506 //
507 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
508
509 //
510 // Retrieve the local APIC ID of current processor
511 //
512 mBspApicId = GetApicId ();
513
514 //
515 // Relocate SM bases for all APs
516 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
517 //
518 BspIndex = (UINTN)-1;
519 for (Index = 0; Index < mNumberOfCpus; Index++) {
520 mRebased[Index] = FALSE;
521 if (mBspApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
522 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
523 //
524 // Wait for this AP to finish its 1st SMI
525 //
526 while (!mRebased[Index]) {
527 }
528 } else {
529 //
530 // BSP will be Relocated later
531 //
532 BspIndex = Index;
533 }
534 }
535
536 //
537 // Relocate BSP's SMM base
538 //
539 ASSERT (BspIndex != (UINTN)-1);
540 SendSmiIpi (mBspApicId);
541 //
542 // Wait for the BSP to finish its 1st SMI
543 //
544 while (!mRebased[BspIndex]) {
545 }
546
547 //
548 // Restore contents at address 0x38000
549 //
550 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
551 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
552 PERF_FUNCTION_END ();
553}
554
555/**
556 SMM Ready To Lock event notification handler.
557
558 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
559 perform additional lock actions that must be performed from SMM on the next SMI.
560
561 @param[in] Protocol Points to the protocol's unique identifier.
562 @param[in] Interface Points to the interface instance.
563 @param[in] Handle The handle on which the interface was installed.
564
565 @retval EFI_SUCCESS Notification handler runs successfully.
566 **/
567EFI_STATUS
568EFIAPI
569SmmReadyToLockEventNotify (
570 IN CONST EFI_GUID *Protocol,
571 IN VOID *Interface,
572 IN EFI_HANDLE Handle
573 )
574{
575 GetAcpiCpuData ();
576
577 //
578 // Cache a copy of UEFI memory map before we start profiling feature.
579 //
580 GetUefiMemoryMap ();
581
582 //
583 // Set SMM ready to lock flag and return
584 //
585 mSmmReadyToLock = TRUE;
586 return EFI_SUCCESS;
587}
588
589/**
590 The module Entry Point of the CPU SMM driver.
591
592 @param ImageHandle The firmware allocated handle for the EFI image.
593 @param SystemTable A pointer to the EFI System Table.
594
595 @retval EFI_SUCCESS The entry point is executed successfully.
596 @retval Other Some error occurs when executing this entry point.
597
598**/
599EFI_STATUS
600EFIAPI
601PiCpuSmmEntry (
602 IN EFI_HANDLE ImageHandle,
603 IN EFI_SYSTEM_TABLE *SystemTable
604 )
605{
606 EFI_STATUS Status;
607 EFI_MP_SERVICES_PROTOCOL *MpServices;
608 UINTN NumberOfEnabledProcessors;
609 UINTN Index;
610 VOID *Buffer;
611 UINTN BufferPages;
612 UINTN TileCodeSize;
613 UINTN TileDataSize;
614 UINTN TileSize;
615 UINT8 *Stacks;
616 VOID *Registration;
617 UINT32 RegEax;
618 UINT32 RegEbx;
619 UINT32 RegEcx;
620 UINT32 RegEdx;
621 UINTN FamilyId;
622 UINTN ModelId;
623 UINT32 Cr3;
624 EFI_HOB_GUID_TYPE *GuidHob;
625 SMM_BASE_HOB_DATA *SmmBaseHobData;
626
627 GuidHob = NULL;
628 SmmBaseHobData = NULL;
629
630 PERF_FUNCTION_BEGIN ();
631
632 //
633 // Initialize address fixup
634 //
635 PiSmmCpuSmmInitFixupAddress ();
636 PiSmmCpuSmiEntryFixupAddress ();
637
638 //
639 // Initialize Debug Agent to support source level debug in SMM code
640 //
641 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
642
643 //
644 // Report the start of CPU SMM initialization.
645 //
646 REPORT_STATUS_CODE (
647 EFI_PROGRESS_CODE,
648 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
649 );
650
651 //
652 // Find out SMRR Base and SMRR Size
653 //
654 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
655
656 //
657 // Get MP Services Protocol
658 //
659 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
660 ASSERT_EFI_ERROR (Status);
661
662 //
663 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
664 //
665 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
666 ASSERT_EFI_ERROR (Status);
667 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
668
669 //
670 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
671 // A constant BSP index makes no sense because it may be hot removed.
672 //
673 DEBUG_CODE_BEGIN ();
674 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
675 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
676 }
677
678 DEBUG_CODE_END ();
679
680 //
681 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
682 //
683 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
684 DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
685
686 //
687 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
688 // Make sure AddressEncMask is contained to smallest supported address field.
689 //
690 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
691 DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
692
693 //
694 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
695 //
696 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
697 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
698 } else {
699 mMaxNumberOfCpus = mNumberOfCpus;
700 }
701
702 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
703
704 PERF_CODE (
705 InitializeMpPerf (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
706 );
707
708 //
709 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
710 // allocated buffer. The minimum size of this buffer for a uniprocessor system
711 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
712 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
713 // then the SMI entry point and the CPU save state areas can be tiles to minimize
714 // the total amount SMRAM required for all the CPUs. The tile size can be computed
715 // by adding the // CPU save state size, any extra CPU specific context, and
716 // the size of code that must be placed at the SMI entry point to transfer
717 // control to a C function in the native SMM execution mode. This size is
718 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
719 // The total amount of memory required is the maximum number of CPUs that
720 // platform supports times the tile size. The picture below shows the tiling,
721 // where m is the number of tiles that fit in 32KB.
722 //
723 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
724 // | CPU m+1 Save State |
725 // +-----------------------------+
726 // | CPU m+1 Extra Data |
727 // +-----------------------------+
728 // | Padding |
729 // +-----------------------------+
730 // | CPU 2m SMI Entry |
731 // +#############################+ <-- Base of allocated buffer + 64 KB
732 // | CPU m-1 Save State |
733 // +-----------------------------+
734 // | CPU m-1 Extra Data |
735 // +-----------------------------+
736 // | Padding |
737 // +-----------------------------+
738 // | CPU 2m-1 SMI Entry |
739 // +=============================+ <-- 2^n offset from Base of allocated buffer
740 // | . . . . . . . . . . . . |
741 // +=============================+ <-- 2^n offset from Base of allocated buffer
742 // | CPU 2 Save State |
743 // +-----------------------------+
744 // | CPU 2 Extra Data |
745 // +-----------------------------+
746 // | Padding |
747 // +-----------------------------+
748 // | CPU m+1 SMI Entry |
749 // +=============================+ <-- Base of allocated buffer + 32 KB
750 // | CPU 1 Save State |
751 // +-----------------------------+
752 // | CPU 1 Extra Data |
753 // +-----------------------------+
754 // | Padding |
755 // +-----------------------------+
756 // | CPU m SMI Entry |
757 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
758 // | CPU 0 Save State |
759 // +-----------------------------+
760 // | CPU 0 Extra Data |
761 // +-----------------------------+
762 // | Padding |
763 // +-----------------------------+
764 // | CPU m-1 SMI Entry |
765 // +=============================+ <-- 2^n offset from Base of allocated buffer
766 // | . . . . . . . . . . . . |
767 // +=============================+ <-- 2^n offset from Base of allocated buffer
768 // | Padding |
769 // +-----------------------------+
770 // | CPU 1 SMI Entry |
771 // +=============================+ <-- 2^n offset from Base of allocated buffer
772 // | Padding |
773 // +-----------------------------+
774 // | CPU 0 SMI Entry |
775 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
776 //
777
778 //
779 // Retrieve CPU Family
780 //
781 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
782 FamilyId = (RegEax >> 8) & 0xf;
783 ModelId = (RegEax >> 4) & 0xf;
784 if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {
785 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
786 }
787
788 RegEdx = 0;
789 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
790 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
791 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
792 }
793
794 //
795 // Determine the mode of the CPU at the time an SMI occurs
796 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
797 // Volume 3C, Section 34.4.1.1
798 //
799 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
800 if ((RegEdx & BIT29) != 0) {
801 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
802 }
803
804 if (FamilyId == 0x06) {
805 if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {
806 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
807 }
808 }
809
810 DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));
811 if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {
812 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
813 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
814 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);
815 DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));
816 DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));
817 DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));
818 if ((RegEcx & CPUID_CET_SS) == 0) {
819 mCetSupported = FALSE;
820 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
821 }
822
823 if (mCetSupported) {
824 AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);
825 DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));
826 AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);
827 DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
828 AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);
829 DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
830 }
831 } else {
832 mCetSupported = FALSE;
833 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
834 }
835 } else {
836 mCetSupported = FALSE;
837 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
838 }
839
840 //
841 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
842 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
843 // This size is rounded up to nearest power of 2.
844 //
845 TileCodeSize = GetSmiHandlerSize ();
846 TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);
847 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
848 TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);
849 TileSize = TileDataSize + TileCodeSize - 1;
850 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
851 DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
852
853 //
854 // If the TileSize is larger than space available for the SMI Handler of
855 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
856 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
857 // the SMI Handler size must be reduced or the size of the extra CPU specific
858 // context must be reduced.
859 //
860 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
861
862 //
863 // Retrive the allocated SmmBase from gSmmBaseHobGuid. If found,
864 // means the SmBase relocation has been done.
865 //
866 GuidHob = GetFirstGuidHob (&gSmmBaseHobGuid);
867 if (GuidHob != NULL) {
868 //
869 // Check whether the Required TileSize is enough.
870 //
871 if (TileSize > SIZE_8KB) {
872 DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));
873 CpuDeadLoop ();
874 return RETURN_BUFFER_TOO_SMALL;
875 }
876
877 SmmBaseHobData = GET_GUID_HOB_DATA (GuidHob);
878
879 //
880 // Assume single instance of HOB produced, expect the HOB.NumberOfProcessors equals to the mMaxNumberOfCpus.
881 //
882 ASSERT (SmmBaseHobData->NumberOfProcessors == (UINT32)mMaxNumberOfCpus && SmmBaseHobData->ProcessorIndex == 0);
883 mSmmRelocated = TRUE;
884 } else {
885 //
886 // When the HOB doesn't exist, allocate new SMBASE itself.
887 //
888 DEBUG ((DEBUG_INFO, "PiCpuSmmEntry: gSmmBaseHobGuid not found!\n"));
889 //
890 // very old processors (i486 + pentium) need 32k not 4k alignment, exclude them.
891 //
892 ASSERT (FamilyId >= 6);
893 //
894 // Allocate buffer for all of the tiles.
895 //
896 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
897 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
898 if (Buffer == NULL) {
899 DEBUG ((DEBUG_ERROR, "Failed to allocate %Lu pages.\n", (UINT64)BufferPages));
900 CpuDeadLoop ();
901 return EFI_OUT_OF_RESOURCES;
902 }
903
904 ASSERT (Buffer != NULL);
905 DEBUG ((DEBUG_INFO, "New Allcoated SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));
906 }
907
908 //
909 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
910 //
911 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
912 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
913
914 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
915 ASSERT (gSmmCpuPrivate->Operation != NULL);
916
917 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
918 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
919
920 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
921 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
922
923 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
924 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
925
926 //
927 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
928 //
929 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
930 ASSERT (mCpuHotPlugData.ApicId != NULL);
931 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
932 ASSERT (mCpuHotPlugData.SmBase != NULL);
933 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
934
935 //
936 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
937 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
938 // size for each CPU in the platform
939 //
940 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
941 mCpuHotPlugData.SmBase[Index] = mSmmRelocated ? (UINTN)SmmBaseHobData->SmBase[Index] : (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
942
943 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);
944 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
945 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
946
947 if (Index < mNumberOfCpus) {
948 Status = MpServices->GetProcessorInfo (MpServices, Index | CPU_V2_EXTENDED_TOPOLOGY, &gSmmCpuPrivate->ProcessorInfo[Index]);
949 ASSERT_EFI_ERROR (Status);
950 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
951
952 DEBUG ((
953 DEBUG_INFO,
954 "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
955 Index,
956 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
957 mCpuHotPlugData.SmBase[Index],
958 gSmmCpuPrivate->CpuSaveState[Index],
959 gSmmCpuPrivate->CpuSaveStateSize[Index]
960 ));
961 } else {
962 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
963 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
964 }
965 }
966
967 //
968 // Allocate SMI stacks for all processors.
969 //
970 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));
971 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
972 //
973 // SMM Stack Guard Enabled
974 // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.
975 //
976 // +--------------------------------------------------+-----+--------------------------------------------------+
977 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
978 // +--------------------------------------------------+-----+--------------------------------------------------+
979 // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|
980 // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|
981 // | | | |
982 // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|
983 //
984 mSmmStackSize += EFI_PAGES_TO_SIZE (2);
985 }
986
987 mSmmShadowStackSize = 0;
988 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
989 mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
990
991 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
992 //
993 // SMM Stack Guard Enabled
994 // Append Shadow Stack after normal stack
995 // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.
996 //
997 // |= Stacks
998 // +--------------------------------------------------+---------------------------------------------------------------+
999 // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
1000 // +--------------------------------------------------+---------------------------------------------------------------+
1001 // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|
1002 // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
1003 // | |
1004 // |<-------------------------------------------- Processor N ------------------------------------------------------->|
1005 //
1006 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
1007 } else {
1008 //
1009 // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)
1010 // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.
1011 // 1 more pages is allocated for each processor, it is known good stack.
1012 //
1013 //
1014 // |= Stacks
1015 // +-------------------------------------+--------------------------------------------------+
1016 // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |
1017 // +-------------------------------------+--------------------------------------------------+
1018 // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|
1019 // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|
1020 // | |
1021 // |<-------------------------------- Processor N ----------------------------------------->|
1022 //
1023 mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);
1024 mSmmStackSize += EFI_PAGES_TO_SIZE (1);
1025 }
1026 }
1027
1028 Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));
1029 ASSERT (Stacks != NULL);
1030 mSmmStackArrayBase = (UINTN)Stacks;
1031 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;
1032
1033 DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));
1034 DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));
1035 DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
1036 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1037 DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
1038 }
1039
1040 //
1041 // Set SMI stack for SMM base relocation
1042 //
1043 PatchInstructionX86 (
1044 gPatchSmmInitStack,
1045 (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),
1046 sizeof (UINTN)
1047 );
1048
1049 //
1050 // Initialize IDT
1051 //
1052 InitializeSmmIdt ();
1053
1054 //
1055 // Check whether Smm Relocation is done or not.
1056 // If not, will do the SmmBases Relocation here!!!
1057 //
1058 if (!mSmmRelocated) {
1059 //
1060 // Relocate SMM Base addresses to the ones allocated from SMRAM
1061 //
1062 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
1063 ASSERT (mRebased != NULL);
1064 SmmRelocateBases ();
1065
1066 //
1067 // Call hook for BSP to perform extra actions in normal mode after all
1068 // SMM base addresses have been relocated on all CPUs
1069 //
1070 SmmCpuFeaturesSmmRelocationComplete ();
1071 }
1072
1073 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
1074
1075 //
1076 // SMM Time initialization
1077 //
1078 InitializeSmmTimer ();
1079
1080 //
1081 // Initialize MP globals
1082 //
1083 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);
1084
1085 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1086 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
1087 SetShadowStack (
1088 Cr3,
1089 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,
1090 mSmmShadowStackSize
1091 );
1092 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
1093 ConvertMemoryPageAttributes (
1094 Cr3,
1095 mPagingMode,
1096 (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,
1097 EFI_PAGES_TO_SIZE (1),
1098 EFI_MEMORY_RP,
1099 TRUE,
1100 NULL
1101 );
1102 }
1103 }
1104 }
1105
1106 //
1107 // For relocated SMBASE, some MSRs & CSRs are still required to be configured in SMM Mode for SMM Initialization.
1108 // Those MSRs & CSRs must be configured before normal SMI sources happen.
1109 // So, here is to issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
1110 //
1111 if (mSmmRelocated) {
1112 ExecuteFirstSmiInit ();
1113
1114 //
1115 // Call hook for BSP to perform extra actions in normal mode after all
1116 // SMM base addresses have been relocated on all CPUs
1117 //
1118 SmmCpuFeaturesSmmRelocationComplete ();
1119 }
1120
1121 //
1122 // Fill in SMM Reserved Regions
1123 //
1124 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
1125 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
1126
1127 //
1128 // Install the SMM Configuration Protocol onto a new handle on the handle database.
1129 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
1130 // to an SMRAM address will be present in the handle database
1131 //
1132 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
1133 &gSmmCpuPrivate->SmmCpuHandle,
1134 &gEfiSmmConfigurationProtocolGuid,
1135 &gSmmCpuPrivate->SmmConfiguration,
1136 NULL
1137 );
1138 ASSERT_EFI_ERROR (Status);
1139
1140 //
1141 // Install the SMM CPU Protocol into SMM protocol database
1142 //
1143 Status = gSmst->SmmInstallProtocolInterface (
1144 &mSmmCpuHandle,
1145 &gEfiSmmCpuProtocolGuid,
1146 EFI_NATIVE_INTERFACE,
1147 &mSmmCpu
1148 );
1149 ASSERT_EFI_ERROR (Status);
1150
1151 //
1152 // Install the SMM Memory Attribute Protocol into SMM protocol database
1153 //
1154 Status = gSmst->SmmInstallProtocolInterface (
1155 &mSmmCpuHandle,
1156 &gEdkiiSmmMemoryAttributeProtocolGuid,
1157 EFI_NATIVE_INTERFACE,
1158 &mSmmMemoryAttribute
1159 );
1160 ASSERT_EFI_ERROR (Status);
1161
1162 //
1163 // Initialize global buffer for MM MP.
1164 //
1165 InitializeDataForMmMp ();
1166
1167 //
1168 // Initialize Package First Thread Index Info.
1169 //
1170 InitPackageFirstThreadIndexInfo ();
1171
1172 //
1173 // Install the SMM Mp Protocol into SMM protocol database
1174 //
1175 Status = gSmst->SmmInstallProtocolInterface (
1176 &mSmmCpuHandle,
1177 &gEfiMmMpProtocolGuid,
1178 EFI_NATIVE_INTERFACE,
1179 &mSmmMp
1180 );
1181 ASSERT_EFI_ERROR (Status);
1182
1183 //
1184 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
1185 //
1186 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
1187 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
1188 ASSERT_EFI_ERROR (Status);
1189 }
1190
1191 //
1192 // Initialize SMM CPU Services Support
1193 //
1194 Status = InitializeSmmCpuServices (mSmmCpuHandle);
1195 ASSERT_EFI_ERROR (Status);
1196
1197 //
1198 // register SMM Ready To Lock Protocol notification
1199 //
1200 Status = gSmst->SmmRegisterProtocolNotify (
1201 &gEfiSmmReadyToLockProtocolGuid,
1202 SmmReadyToLockEventNotify,
1203 &Registration
1204 );
1205 ASSERT_EFI_ERROR (Status);
1206
1207 //
1208 // Initialize SMM Profile feature
1209 //
1210 InitSmmProfile (Cr3);
1211
1212 GetAcpiS3EnableFlag ();
1213 InitSmmS3ResumeState (Cr3);
1214
1215 DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
1216
1217 PERF_FUNCTION_END ();
1218 return EFI_SUCCESS;
1219}
1220
1221/**
1222 Function to compare 2 EFI_SMRAM_DESCRIPTOR based on CpuStart.
1223
1224 @param[in] Buffer1 pointer to Device Path poiner to compare
1225 @param[in] Buffer2 pointer to second DevicePath pointer to compare
1226
1227 @retval 0 Buffer1 equal to Buffer2
1228 @retval <0 Buffer1 is less than Buffer2
1229 @retval >0 Buffer1 is greater than Buffer2
1230**/
1231INTN
1232EFIAPI
1233CpuSmramRangeCompare (
1234 IN CONST VOID *Buffer1,
1235 IN CONST VOID *Buffer2
1236 )
1237{
1238 if (((EFI_SMRAM_DESCRIPTOR *)Buffer1)->CpuStart > ((EFI_SMRAM_DESCRIPTOR *)Buffer2)->CpuStart) {
1239 return 1;
1240 } else if (((EFI_SMRAM_DESCRIPTOR *)Buffer1)->CpuStart < ((EFI_SMRAM_DESCRIPTOR *)Buffer2)->CpuStart) {
1241 return -1;
1242 }
1243
1244 return 0;
1245}
1246
1247/**
1248
1249 Find out SMRAM information including SMRR base and SMRR size.
1250
1251 @param SmrrBase SMRR base
1252 @param SmrrSize SMRR size
1253
1254**/
1255VOID
1256FindSmramInfo (
1257 OUT UINT32 *SmrrBase,
1258 OUT UINT32 *SmrrSize
1259 )
1260{
1261 EFI_STATUS Status;
1262 UINTN Size;
1263 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
1264 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
1265 UINTN Index;
1266 UINT64 MaxSize;
1267 BOOLEAN Found;
1268 EFI_SMRAM_DESCRIPTOR SmramDescriptor;
1269
1270 //
1271 // Get SMM Access Protocol
1272 //
1273 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
1274 ASSERT_EFI_ERROR (Status);
1275
1276 //
1277 // Get SMRAM information
1278 //
1279 Size = 0;
1280 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
1281 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
1282
1283 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1284 ASSERT (mSmmCpuSmramRanges != NULL);
1285
1286 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
1287 ASSERT_EFI_ERROR (Status);
1288
1289 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1290
1291 //
1292 // Sort the mSmmCpuSmramRanges
1293 //
1294 QuickSort (mSmmCpuSmramRanges, mSmmCpuSmramRangeCount, sizeof (EFI_SMRAM_DESCRIPTOR), (BASE_SORT_COMPARE)CpuSmramRangeCompare, &SmramDescriptor);
1295
1296 //
1297 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1298 //
1299 CurrentSmramRange = NULL;
1300 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1301 //
1302 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1303 //
1304 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1305 continue;
1306 }
1307
1308 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1309 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1310 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1311 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1312 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1313 }
1314 }
1315 }
1316 }
1317
1318 ASSERT (CurrentSmramRange != NULL);
1319
1320 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1321 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1322
1323 do {
1324 Found = FALSE;
1325 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1326 if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&
1327 (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))
1328 {
1329 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1330 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1331 Found = TRUE;
1332 } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {
1333 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1334 Found = TRUE;
1335 }
1336 }
1337 } while (Found);
1338
1339 DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1340}
1341
1342/**
1343Configure SMM Code Access Check feature on an AP.
1344SMM Feature Control MSR will be locked after configuration.
1345
1346@param[in,out] Buffer Pointer to private data buffer.
1347**/
1348VOID
1349EFIAPI
1350ConfigSmmCodeAccessCheckOnCurrentProcessor (
1351 IN OUT VOID *Buffer
1352 )
1353{
1354 UINTN CpuIndex;
1355 UINT64 SmmFeatureControlMsr;
1356 UINT64 NewSmmFeatureControlMsr;
1357
1358 //
1359 // Retrieve the CPU Index from the context passed in
1360 //
1361 CpuIndex = *(UINTN *)Buffer;
1362
1363 //
1364 // Get the current SMM Feature Control MSR value
1365 //
1366 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1367
1368 //
1369 // Compute the new SMM Feature Control MSR value
1370 //
1371 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1372 if (mSmmCodeAccessCheckEnable) {
1373 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1374 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1375 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1376 }
1377 }
1378
1379 //
1380 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1381 //
1382 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1383 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1384 }
1385
1386 //
1387 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1388 //
1389 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1390}
1391
1392/**
1393Configure SMM Code Access Check feature for all processors.
1394SMM Feature Control MSR will be locked after configuration.
1395**/
1396VOID
1397ConfigSmmCodeAccessCheck (
1398 VOID
1399 )
1400{
1401 UINTN Index;
1402 EFI_STATUS Status;
1403
1404 PERF_FUNCTION_BEGIN ();
1405
1406 //
1407 // Check to see if the Feature Control MSR is supported on this CPU
1408 //
1409 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1410 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1411 mSmmCodeAccessCheckEnable = FALSE;
1412 PERF_FUNCTION_END ();
1413 return;
1414 }
1415
1416 //
1417 // Check to see if the CPU supports the SMM Code Access Check feature
1418 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1419 //
1420 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1421 mSmmCodeAccessCheckEnable = FALSE;
1422 PERF_FUNCTION_END ();
1423 return;
1424 }
1425
1426 //
1427 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1428 //
1429 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1430
1431 //
1432 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1433 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1434 //
1435 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1436
1437 //
1438 // Enable SMM Code Access Check feature on the BSP.
1439 //
1440 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1441
1442 //
1443 // Enable SMM Code Access Check feature for the APs.
1444 //
1445 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1446 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1447 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1448 //
1449 // If this processor does not exist
1450 //
1451 continue;
1452 }
1453
1454 //
1455 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1456 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1457 //
1458 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1459
1460 //
1461 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1462 //
1463 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1464 ASSERT_EFI_ERROR (Status);
1465
1466 //
1467 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1468 //
1469 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1470 CpuPause ();
1471 }
1472
1473 //
1474 // Release the Config SMM Code Access Check spin lock.
1475 //
1476 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1477 }
1478 }
1479
1480 PERF_FUNCTION_END ();
1481}
1482
1483/**
1484 Allocate pages for code.
1485
1486 @param[in] Pages Number of pages to be allocated.
1487
1488 @return Allocated memory.
1489**/
1490VOID *
1491AllocateCodePages (
1492 IN UINTN Pages
1493 )
1494{
1495 EFI_STATUS Status;
1496 EFI_PHYSICAL_ADDRESS Memory;
1497
1498 if (Pages == 0) {
1499 return NULL;
1500 }
1501
1502 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1503 if (EFI_ERROR (Status)) {
1504 return NULL;
1505 }
1506
1507 return (VOID *)(UINTN)Memory;
1508}
1509
1510/**
1511 Allocate aligned pages for code.
1512
1513 @param[in] Pages Number of pages to be allocated.
1514 @param[in] Alignment The requested alignment of the allocation.
1515 Must be a power of two.
1516 If Alignment is zero, then byte alignment is used.
1517
1518 @return Allocated memory.
1519**/
1520VOID *
1521AllocateAlignedCodePages (
1522 IN UINTN Pages,
1523 IN UINTN Alignment
1524 )
1525{
1526 EFI_STATUS Status;
1527 EFI_PHYSICAL_ADDRESS Memory;
1528 UINTN AlignedMemory;
1529 UINTN AlignmentMask;
1530 UINTN UnalignedPages;
1531 UINTN RealPages;
1532
1533 //
1534 // Alignment must be a power of two or zero.
1535 //
1536 ASSERT ((Alignment & (Alignment - 1)) == 0);
1537
1538 if (Pages == 0) {
1539 return NULL;
1540 }
1541
1542 if (Alignment > EFI_PAGE_SIZE) {
1543 //
1544 // Calculate the total number of pages since alignment is larger than page size.
1545 //
1546 AlignmentMask = Alignment - 1;
1547 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1548 //
1549 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1550 //
1551 ASSERT (RealPages > Pages);
1552
1553 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1554 if (EFI_ERROR (Status)) {
1555 return NULL;
1556 }
1557
1558 AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;
1559 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);
1560 if (UnalignedPages > 0) {
1561 //
1562 // Free first unaligned page(s).
1563 //
1564 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1565 ASSERT_EFI_ERROR (Status);
1566 }
1567
1568 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1569 UnalignedPages = RealPages - Pages - UnalignedPages;
1570 if (UnalignedPages > 0) {
1571 //
1572 // Free last unaligned page(s).
1573 //
1574 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1575 ASSERT_EFI_ERROR (Status);
1576 }
1577 } else {
1578 //
1579 // Do not over-allocate pages in this case.
1580 //
1581 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1582 if (EFI_ERROR (Status)) {
1583 return NULL;
1584 }
1585
1586 AlignedMemory = (UINTN)Memory;
1587 }
1588
1589 return (VOID *)AlignedMemory;
1590}
1591
1592/**
1593 Perform the remaining tasks.
1594
1595**/
1596VOID
1597PerformRemainingTasks (
1598 VOID
1599 )
1600{
1601 if (mSmmReadyToLock) {
1602 PERF_FUNCTION_BEGIN ();
1603
1604 //
1605 // Check if all Aps enter SMM. In Relaxed-AP Sync Mode, BSP will not wait for
1606 // all Aps arrive. However,PerformRemainingTasks() needs to wait all Aps arrive before calling
1607 // SetMemMapAttributes() and ConfigSmmCodeAccessCheck() when mSmmReadyToLock
1608 // is true. In SetMemMapAttributes(), SmmSetMemoryAttributesEx() will call
1609 // FlushTlbForAll() that need to start up the aps. So it need to let all
1610 // aps arrive. Same as SetMemMapAttributes(), ConfigSmmCodeAccessCheck()
1611 // also will start up the aps.
1612 //
1613 if (EFI_ERROR (SmmCpuRendezvous (NULL, TRUE))) {
1614 DEBUG ((DEBUG_ERROR, "PerformRemainingTasks: fail to wait for all AP check in SMM!\n"));
1615 }
1616
1617 //
1618 // Start SMM Profile feature
1619 //
1620 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1621 SmmProfileStart ();
1622 }
1623
1624 //
1625 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1626 //
1627 InitPaging ();
1628
1629 //
1630 // Mark critical region to be read-only in page table
1631 //
1632 SetMemMapAttributes ();
1633
1634 if (IsRestrictedMemoryAccess ()) {
1635 //
1636 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1637 //
1638 SetUefiMemMapAttributes ();
1639
1640 //
1641 // Set page table itself to be read-only
1642 //
1643 SetPageTableAttributes ();
1644 }
1645
1646 //
1647 // Configure SMM Code Access Check feature if available.
1648 //
1649 ConfigSmmCodeAccessCheck ();
1650
1651 //
1652 // Measure performance of SmmCpuFeaturesCompleteSmmReadyToLock() from caller side
1653 // as the implementation is provided by platform.
1654 //
1655 PERF_START (NULL, "SmmCompleteReadyToLock", NULL, 0);
1656 SmmCpuFeaturesCompleteSmmReadyToLock ();
1657 PERF_END (NULL, "SmmCompleteReadyToLock", NULL, 0);
1658
1659 //
1660 // Clean SMM ready to lock flag
1661 //
1662 mSmmReadyToLock = FALSE;
1663
1664 PERF_FUNCTION_END ();
1665 }
1666}
1667
1668/**
1669 Perform the pre tasks.
1670
1671**/
1672VOID
1673PerformPreTasks (
1674 VOID
1675 )
1676{
1677 RestoreSmmConfigurationInS3 ();
1678}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette