VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c@ 77662

Last change on this file since 77662 was 77662, checked in by vboxsync, 6 years ago

EFI: First step in UDK2018 merge. Does not build yet.

  • Property svn:eol-style set to native
  • Property svn:executable set to *
File size: 42.0 KB
Line 
1/** @file
2Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
3
4Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7This program and the accompanying materials
8are licensed and made available under the terms and conditions of the BSD License
9which accompanies this distribution. The full text of the license may be found at
10http://opensource.org/licenses/bsd-license.php
11
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15**/
16
17#include "PiSmmCpuDxeSmm.h"
18
19//
20// SMM CPU Private Data structure that contains SMM Configuration Protocol
21// along its supporting fields.
22//
23SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
24 SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
25 NULL, // SmmCpuHandle
26 NULL, // Pointer to ProcessorInfo array
27 NULL, // Pointer to Operation array
28 NULL, // Pointer to CpuSaveStateSize array
29 NULL, // Pointer to CpuSaveState array
30 { {0} }, // SmmReservedSmramRegion
31 {
32 SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
33 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
34 0, // SmmCoreEntryContext.NumberOfCpus
35 NULL, // SmmCoreEntryContext.CpuSaveStateSize
36 NULL // SmmCoreEntryContext.CpuSaveState
37 },
38 NULL, // SmmCoreEntry
39 {
40 mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
41 RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
42 },
43};
44
45CPU_HOT_PLUG_DATA mCpuHotPlugData = {
46 CPU_HOT_PLUG_DATA_REVISION_1, // Revision
47 0, // Array Length of SmBase and APIC ID
48 NULL, // Pointer to APIC ID array
49 NULL, // Pointer to SMBASE array
50 0, // Reserved
51 0, // SmrrBase
52 0 // SmrrSize
53};
54
55//
56// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
57//
58SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
59
60//
61// SMM Relocation variables
62//
63volatile BOOLEAN *mRebased;
64volatile BOOLEAN mIsBsp;
65
66///
67/// Handle for the SMM CPU Protocol
68///
69EFI_HANDLE mSmmCpuHandle = NULL;
70
71///
72/// SMM CPU Protocol instance
73///
74EFI_SMM_CPU_PROTOCOL mSmmCpu = {
75 SmmReadSaveState,
76 SmmWriteSaveState
77};
78
79///
80/// SMM Memory Attribute Protocol instance
81///
82EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
83 EdkiiSmmGetMemoryAttributes,
84 EdkiiSmmSetMemoryAttributes,
85 EdkiiSmmClearMemoryAttributes
86};
87
88EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
89
90//
91// SMM stack information
92//
93UINTN mSmmStackArrayBase;
94UINTN mSmmStackArrayEnd;
95UINTN mSmmStackSize;
96
97UINTN mMaxNumberOfCpus = 1;
98UINTN mNumberOfCpus = 1;
99
100//
101// SMM ready to lock flag
102//
103BOOLEAN mSmmReadyToLock = FALSE;
104
105//
106// Global used to cache PCD for SMM Code Access Check enable
107//
108BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
109
110//
111// Global copy of the PcdPteMemoryEncryptionAddressOrMask
112//
113UINT64 mAddressEncMask = 0;
114
115//
116// Spin lock used to serialize setting of SMM Code Access Check feature
117//
118SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
119
120//
121// Saved SMM ranges information
122//
123EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
124UINTN mSmmCpuSmramRangeCount;
125
126UINT8 mPhysicalAddressBits;
127
128/**
129 Initialize IDT to setup exception handlers for SMM.
130
131**/
132VOID
133InitializeSmmIdt (
134 VOID
135 )
136{
137 EFI_STATUS Status;
138 BOOLEAN InterruptState;
139 IA32_DESCRIPTOR DxeIdtr;
140
141 //
142 // There are 32 (not 255) entries in it since only processor
143 // generated exceptions will be handled.
144 //
145 gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
146 //
147 // Allocate page aligned IDT, because it might be set as read only.
148 //
149 gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
150 ASSERT (gcSmiIdtr.Base != 0);
151 ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
152
153 //
154 // Disable Interrupt and save DXE IDT table
155 //
156 InterruptState = SaveAndDisableInterrupts ();
157 AsmReadIdtr (&DxeIdtr);
158 //
159 // Load SMM temporary IDT table
160 //
161 AsmWriteIdtr (&gcSmiIdtr);
162 //
163 // Setup SMM default exception handlers, SMM IDT table
164 // will be updated and saved in gcSmiIdtr
165 //
166 Status = InitializeCpuExceptionHandlers (NULL);
167 ASSERT_EFI_ERROR (Status);
168 //
169 // Restore DXE IDT table and CPU interrupt
170 //
171 AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);
172 SetInterruptState (InterruptState);
173}
174
175/**
176 Search module name by input IP address and output it.
177
178 @param CallerIpAddress Caller instruction pointer.
179
180**/
181VOID
182DumpModuleInfoByIp (
183 IN UINTN CallerIpAddress
184 )
185{
186 UINTN Pe32Data;
187 VOID *PdbPointer;
188
189 //
190 // Find Image Base
191 //
192 Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
193 if (Pe32Data != 0) {
194 DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));
195 PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);
196 if (PdbPointer != NULL) {
197 DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
198 }
199 }
200}
201
202/**
203 Read information from the CPU save state.
204
205 @param This EFI_SMM_CPU_PROTOCOL instance
206 @param Width The number of bytes to read from the CPU save state.
207 @param Register Specifies the CPU register to read form the save state.
208 @param CpuIndex Specifies the zero-based index of the CPU save state.
209 @param Buffer Upon return, this holds the CPU register value read from the save state.
210
211 @retval EFI_SUCCESS The register was read from Save State
212 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
213 @retval EFI_INVALID_PARAMTER This or Buffer is NULL.
214
215**/
216EFI_STATUS
217EFIAPI
218SmmReadSaveState (
219 IN CONST EFI_SMM_CPU_PROTOCOL *This,
220 IN UINTN Width,
221 IN EFI_SMM_SAVE_STATE_REGISTER Register,
222 IN UINTN CpuIndex,
223 OUT VOID *Buffer
224 )
225{
226 EFI_STATUS Status;
227
228 //
229 // Retrieve pointer to the specified CPU's SMM Save State buffer
230 //
231 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
232 return EFI_INVALID_PARAMETER;
233 }
234 //
235 // The AsmLfence() call here is to ensure the above check for the CpuIndex
236 // has been completed before the execution of subsequent codes.
237 //
238 AsmLfence ();
239
240 //
241 // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
242 //
243 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
244 //
245 // The pseudo-register only supports the 64-bit size specified by Width.
246 //
247 if (Width != sizeof (UINT64)) {
248 return EFI_INVALID_PARAMETER;
249 }
250 //
251 // If the processor is in SMM at the time the SMI occurred,
252 // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
253 // Otherwise, EFI_NOT_FOUND is returned.
254 //
255 if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
256 *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
257 return EFI_SUCCESS;
258 } else {
259 return EFI_NOT_FOUND;
260 }
261 }
262
263 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
264 return EFI_INVALID_PARAMETER;
265 }
266
267 Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
268 if (Status == EFI_UNSUPPORTED) {
269 Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
270 }
271 return Status;
272}
273
274/**
275 Write data to the CPU save state.
276
277 @param This EFI_SMM_CPU_PROTOCOL instance
278 @param Width The number of bytes to read from the CPU save state.
279 @param Register Specifies the CPU register to write to the save state.
280 @param CpuIndex Specifies the zero-based index of the CPU save state
281 @param Buffer Upon entry, this holds the new CPU register value.
282
283 @retval EFI_SUCCESS The register was written from Save State
284 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
285 @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct
286
287**/
288EFI_STATUS
289EFIAPI
290SmmWriteSaveState (
291 IN CONST EFI_SMM_CPU_PROTOCOL *This,
292 IN UINTN Width,
293 IN EFI_SMM_SAVE_STATE_REGISTER Register,
294 IN UINTN CpuIndex,
295 IN CONST VOID *Buffer
296 )
297{
298 EFI_STATUS Status;
299
300 //
301 // Retrieve pointer to the specified CPU's SMM Save State buffer
302 //
303 if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
304 return EFI_INVALID_PARAMETER;
305 }
306
307 //
308 // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
309 //
310 if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
311 return EFI_SUCCESS;
312 }
313
314 if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
315 return EFI_INVALID_PARAMETER;
316 }
317
318 Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
319 if (Status == EFI_UNSUPPORTED) {
320 Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
321 }
322 return Status;
323}
324
325
326/**
327 C function for SMI handler. To change all processor's SMMBase Register.
328
329**/
330VOID
331EFIAPI
332SmmInitHandler (
333 VOID
334 )
335{
336 UINT32 ApicId;
337 UINTN Index;
338
339 //
340 // Update SMM IDT entries' code segment and load IDT
341 //
342 AsmWriteIdtr (&gcSmiIdtr);
343 ApicId = GetApicId ();
344
345 ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
346
347 for (Index = 0; Index < mNumberOfCpus; Index++) {
348 if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
349 //
350 // Initialize SMM specific features on the currently executing CPU
351 //
352 SmmCpuFeaturesInitializeProcessor (
353 Index,
354 mIsBsp,
355 gSmmCpuPrivate->ProcessorInfo,
356 &mCpuHotPlugData
357 );
358
359 if (!mSmmS3Flag) {
360 //
361 // Check XD and BTS features on each processor on normal boot
362 //
363 CheckFeatureSupported ();
364 }
365
366 if (mIsBsp) {
367 //
368 // BSP rebase is already done above.
369 // Initialize private data during S3 resume
370 //
371 InitializeMpSyncData ();
372 }
373
374 //
375 // Hook return after RSM to set SMM re-based flag
376 //
377 SemaphoreHook (Index, &mRebased[Index]);
378
379 return;
380 }
381 }
382 ASSERT (FALSE);
383}
384
385/**
386 Relocate SmmBases for each processor.
387
388 Execute on first boot and all S3 resumes
389
390**/
391VOID
392EFIAPI
393SmmRelocateBases (
394 VOID
395 )
396{
397 UINT8 BakBuf[BACK_BUF_SIZE];
398 SMRAM_SAVE_STATE_MAP BakBuf2;
399 SMRAM_SAVE_STATE_MAP *CpuStatePtr;
400 UINT8 *U8Ptr;
401 UINT32 ApicId;
402 UINTN Index;
403 UINTN BspIndex;
404
405 //
406 // Make sure the reserved size is large enough for procedure SmmInitTemplate.
407 //
408 ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
409
410 //
411 // Patch ASM code template with current CR0, CR3, and CR4 values
412 //
413 gSmmCr0 = (UINT32)AsmReadCr0 ();
414 gSmmCr3 = (UINT32)AsmReadCr3 ();
415 gSmmCr4 = (UINT32)AsmReadCr4 ();
416
417 //
418 // Patch GDTR for SMM base relocation
419 //
420 gcSmiInitGdtr.Base = gcSmiGdtr.Base;
421 gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
422
423 U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
424 CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
425
426 //
427 // Backup original contents at address 0x38000
428 //
429 CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
430 CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
431
432 //
433 // Load image for relocation
434 //
435 CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
436
437 //
438 // Retrieve the local APIC ID of current processor
439 //
440 ApicId = GetApicId ();
441
442 //
443 // Relocate SM bases for all APs
444 // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
445 //
446 mIsBsp = FALSE;
447 BspIndex = (UINTN)-1;
448 for (Index = 0; Index < mNumberOfCpus; Index++) {
449 mRebased[Index] = FALSE;
450 if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
451 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
452 //
453 // Wait for this AP to finish its 1st SMI
454 //
455 while (!mRebased[Index]);
456 } else {
457 //
458 // BSP will be Relocated later
459 //
460 BspIndex = Index;
461 }
462 }
463
464 //
465 // Relocate BSP's SMM base
466 //
467 ASSERT (BspIndex != (UINTN)-1);
468 mIsBsp = TRUE;
469 SendSmiIpi (ApicId);
470 //
471 // Wait for the BSP to finish its 1st SMI
472 //
473 while (!mRebased[BspIndex]);
474
475 //
476 // Restore contents at address 0x38000
477 //
478 CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
479 CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
480}
481
482/**
483 SMM Ready To Lock event notification handler.
484
485 The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
486 perform additional lock actions that must be performed from SMM on the next SMI.
487
488 @param[in] Protocol Points to the protocol's unique identifier.
489 @param[in] Interface Points to the interface instance.
490 @param[in] Handle The handle on which the interface was installed.
491
492 @retval EFI_SUCCESS Notification handler runs successfully.
493 **/
494EFI_STATUS
495EFIAPI
496SmmReadyToLockEventNotify (
497 IN CONST EFI_GUID *Protocol,
498 IN VOID *Interface,
499 IN EFI_HANDLE Handle
500 )
501{
502 GetAcpiCpuData ();
503
504 //
505 // Cache a copy of UEFI memory map before we start profiling feature.
506 //
507 GetUefiMemoryMap ();
508
509 //
510 // Set SMM ready to lock flag and return
511 //
512 mSmmReadyToLock = TRUE;
513 return EFI_SUCCESS;
514}
515
516/**
517 The module Entry Point of the CPU SMM driver.
518
519 @param ImageHandle The firmware allocated handle for the EFI image.
520 @param SystemTable A pointer to the EFI System Table.
521
522 @retval EFI_SUCCESS The entry point is executed successfully.
523 @retval Other Some error occurs when executing this entry point.
524
525**/
526EFI_STATUS
527EFIAPI
528PiCpuSmmEntry (
529 IN EFI_HANDLE ImageHandle,
530 IN EFI_SYSTEM_TABLE *SystemTable
531 )
532{
533 EFI_STATUS Status;
534 EFI_MP_SERVICES_PROTOCOL *MpServices;
535 UINTN NumberOfEnabledProcessors;
536 UINTN Index;
537 VOID *Buffer;
538 UINTN BufferPages;
539 UINTN TileCodeSize;
540 UINTN TileDataSize;
541 UINTN TileSize;
542 UINT8 *Stacks;
543 VOID *Registration;
544 UINT32 RegEax;
545 UINT32 RegEdx;
546 UINTN FamilyId;
547 UINTN ModelId;
548 UINT32 Cr3;
549
550 //
551 // Initialize address fixup
552 //
553 PiSmmCpuSmmInitFixupAddress ();
554 PiSmmCpuSmiEntryFixupAddress ();
555
556 //
557 // Initialize Debug Agent to support source level debug in SMM code
558 //
559 InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
560
561 //
562 // Report the start of CPU SMM initialization.
563 //
564 REPORT_STATUS_CODE (
565 EFI_PROGRESS_CODE,
566 EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
567 );
568
569 //
570 // Fix segment address of the long-mode-switch jump
571 //
572 if (sizeof (UINTN) == sizeof (UINT64)) {
573 gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT;
574 }
575
576 //
577 // Find out SMRR Base and SMRR Size
578 //
579 FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
580
581 //
582 // Get MP Services Protocol
583 //
584 Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
585 ASSERT_EFI_ERROR (Status);
586
587 //
588 // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
589 //
590 Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
591 ASSERT_EFI_ERROR (Status);
592 ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
593
594 //
595 // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
596 // A constant BSP index makes no sense because it may be hot removed.
597 //
598 DEBUG_CODE (
599 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
600
601 ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
602 }
603 );
604
605 //
606 // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
607 //
608 mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
609 DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
610
611 //
612 // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
613 // Make sure AddressEncMask is contained to smallest supported address field.
614 //
615 mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
616 DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
617
618 //
619 // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
620 //
621 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
622 mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
623 } else {
624 mMaxNumberOfCpus = mNumberOfCpus;
625 }
626 gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
627
628 //
629 // The CPU save state and code for the SMI entry point are tiled within an SMRAM
630 // allocated buffer. The minimum size of this buffer for a uniprocessor system
631 // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
632 // just below SMBASE + 64KB. If more than one CPU is present in the platform,
633 // then the SMI entry point and the CPU save state areas can be tiles to minimize
634 // the total amount SMRAM required for all the CPUs. The tile size can be computed
635 // by adding the // CPU save state size, any extra CPU specific context, and
636 // the size of code that must be placed at the SMI entry point to transfer
637 // control to a C function in the native SMM execution mode. This size is
638 // rounded up to the nearest power of 2 to give the tile size for a each CPU.
639 // The total amount of memory required is the maximum number of CPUs that
640 // platform supports times the tile size. The picture below shows the tiling,
641 // where m is the number of tiles that fit in 32KB.
642 //
643 // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
644 // | CPU m+1 Save State |
645 // +-----------------------------+
646 // | CPU m+1 Extra Data |
647 // +-----------------------------+
648 // | Padding |
649 // +-----------------------------+
650 // | CPU 2m SMI Entry |
651 // +#############################+ <-- Base of allocated buffer + 64 KB
652 // | CPU m-1 Save State |
653 // +-----------------------------+
654 // | CPU m-1 Extra Data |
655 // +-----------------------------+
656 // | Padding |
657 // +-----------------------------+
658 // | CPU 2m-1 SMI Entry |
659 // +=============================+ <-- 2^n offset from Base of allocated buffer
660 // | . . . . . . . . . . . . |
661 // +=============================+ <-- 2^n offset from Base of allocated buffer
662 // | CPU 2 Save State |
663 // +-----------------------------+
664 // | CPU 2 Extra Data |
665 // +-----------------------------+
666 // | Padding |
667 // +-----------------------------+
668 // | CPU m+1 SMI Entry |
669 // +=============================+ <-- Base of allocated buffer + 32 KB
670 // | CPU 1 Save State |
671 // +-----------------------------+
672 // | CPU 1 Extra Data |
673 // +-----------------------------+
674 // | Padding |
675 // +-----------------------------+
676 // | CPU m SMI Entry |
677 // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
678 // | CPU 0 Save State |
679 // +-----------------------------+
680 // | CPU 0 Extra Data |
681 // +-----------------------------+
682 // | Padding |
683 // +-----------------------------+
684 // | CPU m-1 SMI Entry |
685 // +=============================+ <-- 2^n offset from Base of allocated buffer
686 // | . . . . . . . . . . . . |
687 // +=============================+ <-- 2^n offset from Base of allocated buffer
688 // | Padding |
689 // +-----------------------------+
690 // | CPU 1 SMI Entry |
691 // +=============================+ <-- 2^n offset from Base of allocated buffer
692 // | Padding |
693 // +-----------------------------+
694 // | CPU 0 SMI Entry |
695 // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
696 //
697
698 //
699 // Retrieve CPU Family
700 //
701 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
702 FamilyId = (RegEax >> 8) & 0xf;
703 ModelId = (RegEax >> 4) & 0xf;
704 if (FamilyId == 0x06 || FamilyId == 0x0f) {
705 ModelId = ModelId | ((RegEax >> 12) & 0xf0);
706 }
707
708 RegEdx = 0;
709 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
710 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
711 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
712 }
713 //
714 // Determine the mode of the CPU at the time an SMI occurs
715 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
716 // Volume 3C, Section 34.4.1.1
717 //
718 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
719 if ((RegEdx & BIT29) != 0) {
720 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
721 }
722 if (FamilyId == 0x06) {
723 if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {
724 mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
725 }
726 }
727
728 //
729 // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
730 // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
731 // This size is rounded up to nearest power of 2.
732 //
733 TileCodeSize = GetSmiHandlerSize ();
734 TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
735 TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
736 TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
737 TileSize = TileDataSize + TileCodeSize - 1;
738 TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
739 DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
740
741 //
742 // If the TileSize is larger than space available for the SMI Handler of
743 // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
744 // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
745 // the SMI Handler size must be reduced or the size of the extra CPU specific
746 // context must be reduced.
747 //
748 ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
749
750 //
751 // Allocate buffer for all of the tiles.
752 //
753 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
754 // Volume 3C, Section 34.11 SMBASE Relocation
755 // For Pentium and Intel486 processors, the SMBASE values must be
756 // aligned on a 32-KByte boundary or the processor will enter shutdown
757 // state during the execution of a RSM instruction.
758 //
759 // Intel486 processors: FamilyId is 4
760 // Pentium processors : FamilyId is 5
761 //
762 BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
763 if ((FamilyId == 4) || (FamilyId == 5)) {
764 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
765 } else {
766 Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
767 }
768 ASSERT (Buffer != NULL);
769 DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
770
771 //
772 // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
773 //
774 gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
775 ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
776
777 gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
778 ASSERT (gSmmCpuPrivate->Operation != NULL);
779
780 gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
781 ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
782
783 gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
784 ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
785
786 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
787 mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
788
789 //
790 // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
791 //
792 mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
793 ASSERT (mCpuHotPlugData.ApicId != NULL);
794 mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
795 ASSERT (mCpuHotPlugData.SmBase != NULL);
796 mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
797
798 //
799 // Retrieve APIC ID of each enabled processor from the MP Services protocol.
800 // Also compute the SMBASE address, CPU Save State address, and CPU Save state
801 // size for each CPU in the platform
802 //
803 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
804 mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
805 gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);
806 gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
807 gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
808
809 if (Index < mNumberOfCpus) {
810 Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
811 ASSERT_EFI_ERROR (Status);
812 mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
813
814 DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
815 Index,
816 (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
817 mCpuHotPlugData.SmBase[Index],
818 gSmmCpuPrivate->CpuSaveState[Index],
819 gSmmCpuPrivate->CpuSaveStateSize[Index]
820 ));
821 } else {
822 gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
823 mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
824 }
825 }
826
827 //
828 // Allocate SMI stacks for all processors.
829 //
830 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
831 //
832 // 2 more pages is allocated for each processor.
833 // one is guard page and the other is known good stack.
834 //
835 // +-------------------------------------------+-----+-------------------------------------------+
836 // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
837 // +-------------------------------------------+-----+-------------------------------------------+
838 // | | | |
839 // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
840 //
841 mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2);
842 Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2));
843 ASSERT (Stacks != NULL);
844 mSmmStackArrayBase = (UINTN)Stacks;
845 mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1;
846 } else {
847 mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize);
848 Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize));
849 ASSERT (Stacks != NULL);
850 }
851
852 //
853 // Set SMI stack for SMM base relocation
854 //
855 gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN));
856
857 //
858 // Initialize IDT
859 //
860 InitializeSmmIdt ();
861
862 //
863 // Relocate SMM Base addresses to the ones allocated from SMRAM
864 //
865 mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
866 ASSERT (mRebased != NULL);
867 SmmRelocateBases ();
868
869 //
870 // Call hook for BSP to perform extra actions in normal mode after all
871 // SMM base addresses have been relocated on all CPUs
872 //
873 SmmCpuFeaturesSmmRelocationComplete ();
874
875 DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
876
877 //
878 // SMM Time initialization
879 //
880 InitializeSmmTimer ();
881
882 //
883 // Initialize MP globals
884 //
885 Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize);
886
887 //
888 // Fill in SMM Reserved Regions
889 //
890 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
891 gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
892
893 //
894 // Install the SMM Configuration Protocol onto a new handle on the handle database.
895 // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
896 // to an SMRAM address will be present in the handle database
897 //
898 Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
899 &gSmmCpuPrivate->SmmCpuHandle,
900 &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
901 NULL
902 );
903 ASSERT_EFI_ERROR (Status);
904
905 //
906 // Install the SMM CPU Protocol into SMM protocol database
907 //
908 Status = gSmst->SmmInstallProtocolInterface (
909 &mSmmCpuHandle,
910 &gEfiSmmCpuProtocolGuid,
911 EFI_NATIVE_INTERFACE,
912 &mSmmCpu
913 );
914 ASSERT_EFI_ERROR (Status);
915
916 //
917 // Install the SMM Memory Attribute Protocol into SMM protocol database
918 //
919 Status = gSmst->SmmInstallProtocolInterface (
920 &mSmmCpuHandle,
921 &gEdkiiSmmMemoryAttributeProtocolGuid,
922 EFI_NATIVE_INTERFACE,
923 &mSmmMemoryAttribute
924 );
925 ASSERT_EFI_ERROR (Status);
926
927 //
928 // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
929 //
930 if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
931 Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
932 ASSERT_EFI_ERROR (Status);
933 }
934
935 //
936 // Initialize SMM CPU Services Support
937 //
938 Status = InitializeSmmCpuServices (mSmmCpuHandle);
939 ASSERT_EFI_ERROR (Status);
940
941 //
942 // register SMM Ready To Lock Protocol notification
943 //
944 Status = gSmst->SmmRegisterProtocolNotify (
945 &gEfiSmmReadyToLockProtocolGuid,
946 SmmReadyToLockEventNotify,
947 &Registration
948 );
949 ASSERT_EFI_ERROR (Status);
950
951 //
952 // Initialize SMM Profile feature
953 //
954 InitSmmProfile (Cr3);
955
956 GetAcpiS3EnableFlag ();
957 InitSmmS3ResumeState (Cr3);
958
959 DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
960
961 return EFI_SUCCESS;
962}
963
964/**
965
966 Find out SMRAM information including SMRR base and SMRR size.
967
968 @param SmrrBase SMRR base
969 @param SmrrSize SMRR size
970
971**/
972VOID
973FindSmramInfo (
974 OUT UINT32 *SmrrBase,
975 OUT UINT32 *SmrrSize
976 )
977{
978 EFI_STATUS Status;
979 UINTN Size;
980 EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
981 EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
982 UINTN Index;
983 UINT64 MaxSize;
984 BOOLEAN Found;
985
986 //
987 // Get SMM Access Protocol
988 //
989 Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
990 ASSERT_EFI_ERROR (Status);
991
992 //
993 // Get SMRAM information
994 //
995 Size = 0;
996 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
997 ASSERT (Status == EFI_BUFFER_TOO_SMALL);
998
999 mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
1000 ASSERT (mSmmCpuSmramRanges != NULL);
1001
1002 Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
1003 ASSERT_EFI_ERROR (Status);
1004
1005 mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
1006
1007 //
1008 // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
1009 //
1010 CurrentSmramRange = NULL;
1011 for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
1012 //
1013 // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
1014 //
1015 if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
1016 continue;
1017 }
1018
1019 if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
1020 if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
1021 if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
1022 MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
1023 CurrentSmramRange = &mSmmCpuSmramRanges[Index];
1024 }
1025 }
1026 }
1027 }
1028
1029 ASSERT (CurrentSmramRange != NULL);
1030
1031 *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
1032 *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
1033
1034 do {
1035 Found = FALSE;
1036 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
1037 if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&
1038 *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {
1039 *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
1040 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1041 Found = TRUE;
1042 } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {
1043 *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
1044 Found = TRUE;
1045 }
1046 }
1047 } while (Found);
1048
1049 DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
1050}
1051
1052/**
1053Configure SMM Code Access Check feature on an AP.
1054SMM Feature Control MSR will be locked after configuration.
1055
1056@param[in,out] Buffer Pointer to private data buffer.
1057**/
1058VOID
1059EFIAPI
1060ConfigSmmCodeAccessCheckOnCurrentProcessor (
1061 IN OUT VOID *Buffer
1062 )
1063{
1064 UINTN CpuIndex;
1065 UINT64 SmmFeatureControlMsr;
1066 UINT64 NewSmmFeatureControlMsr;
1067
1068 //
1069 // Retrieve the CPU Index from the context passed in
1070 //
1071 CpuIndex = *(UINTN *)Buffer;
1072
1073 //
1074 // Get the current SMM Feature Control MSR value
1075 //
1076 SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
1077
1078 //
1079 // Compute the new SMM Feature Control MSR value
1080 //
1081 NewSmmFeatureControlMsr = SmmFeatureControlMsr;
1082 if (mSmmCodeAccessCheckEnable) {
1083 NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
1084 if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
1085 NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
1086 }
1087 }
1088
1089 //
1090 // Only set the SMM Feature Control MSR value if the new value is different than the current value
1091 //
1092 if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
1093 SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
1094 }
1095
1096 //
1097 // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
1098 //
1099 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1100}
1101
1102/**
1103Configure SMM Code Access Check feature for all processors.
1104SMM Feature Control MSR will be locked after configuration.
1105**/
1106VOID
1107ConfigSmmCodeAccessCheck (
1108 VOID
1109 )
1110{
1111 UINTN Index;
1112 EFI_STATUS Status;
1113
1114 //
1115 // Check to see if the Feature Control MSR is supported on this CPU
1116 //
1117 Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
1118 if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
1119 mSmmCodeAccessCheckEnable = FALSE;
1120 return;
1121 }
1122
1123 //
1124 // Check to see if the CPU supports the SMM Code Access Check feature
1125 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
1126 //
1127 if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
1128 mSmmCodeAccessCheckEnable = FALSE;
1129 return;
1130 }
1131
1132 //
1133 // Initialize the lock used to serialize the MSR programming in BSP and all APs
1134 //
1135 InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
1136
1137 //
1138 // Acquire Config SMM Code Access Check spin lock. The BSP will release the
1139 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1140 //
1141 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1142
1143 //
1144 // Enable SMM Code Access Check feature on the BSP.
1145 //
1146 ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
1147
1148 //
1149 // Enable SMM Code Access Check feature for the APs.
1150 //
1151 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1152 if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
1153 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
1154 //
1155 // If this processor does not exist
1156 //
1157 continue;
1158 }
1159 //
1160 // Acquire Config SMM Code Access Check spin lock. The AP will release the
1161 // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
1162 //
1163 AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
1164
1165 //
1166 // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
1167 //
1168 Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
1169 ASSERT_EFI_ERROR (Status);
1170
1171 //
1172 // Wait for the AP to release the Config SMM Code Access Check spin lock.
1173 //
1174 while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
1175 CpuPause ();
1176 }
1177
1178 //
1179 // Release the Config SMM Code Access Check spin lock.
1180 //
1181 ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
1182 }
1183 }
1184}
1185
1186/**
1187 This API provides a way to allocate memory for page table.
1188
1189 This API can be called more once to allocate memory for page tables.
1190
1191 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
1192 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
1193 is returned. If there is not enough memory remaining to satisfy the request, then NULL is
1194 returned.
1195
1196 @param Pages The number of 4 KB pages to allocate.
1197
1198 @return A pointer to the allocated buffer or NULL if allocation fails.
1199
1200**/
1201VOID *
1202AllocatePageTableMemory (
1203 IN UINTN Pages
1204 )
1205{
1206 VOID *Buffer;
1207
1208 Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);
1209 if (Buffer != NULL) {
1210 return Buffer;
1211 }
1212 return AllocatePages (Pages);
1213}
1214
1215/**
1216 Allocate pages for code.
1217
1218 @param[in] Pages Number of pages to be allocated.
1219
1220 @return Allocated memory.
1221**/
1222VOID *
1223AllocateCodePages (
1224 IN UINTN Pages
1225 )
1226{
1227 EFI_STATUS Status;
1228 EFI_PHYSICAL_ADDRESS Memory;
1229
1230 if (Pages == 0) {
1231 return NULL;
1232 }
1233
1234 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1235 if (EFI_ERROR (Status)) {
1236 return NULL;
1237 }
1238 return (VOID *) (UINTN) Memory;
1239}
1240
1241/**
1242 Allocate aligned pages for code.
1243
1244 @param[in] Pages Number of pages to be allocated.
1245 @param[in] Alignment The requested alignment of the allocation.
1246 Must be a power of two.
1247 If Alignment is zero, then byte alignment is used.
1248
1249 @return Allocated memory.
1250**/
1251VOID *
1252AllocateAlignedCodePages (
1253 IN UINTN Pages,
1254 IN UINTN Alignment
1255 )
1256{
1257 EFI_STATUS Status;
1258 EFI_PHYSICAL_ADDRESS Memory;
1259 UINTN AlignedMemory;
1260 UINTN AlignmentMask;
1261 UINTN UnalignedPages;
1262 UINTN RealPages;
1263
1264 //
1265 // Alignment must be a power of two or zero.
1266 //
1267 ASSERT ((Alignment & (Alignment - 1)) == 0);
1268
1269 if (Pages == 0) {
1270 return NULL;
1271 }
1272 if (Alignment > EFI_PAGE_SIZE) {
1273 //
1274 // Calculate the total number of pages since alignment is larger than page size.
1275 //
1276 AlignmentMask = Alignment - 1;
1277 RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
1278 //
1279 // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
1280 //
1281 ASSERT (RealPages > Pages);
1282
1283 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
1284 if (EFI_ERROR (Status)) {
1285 return NULL;
1286 }
1287 AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
1288 UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
1289 if (UnalignedPages > 0) {
1290 //
1291 // Free first unaligned page(s).
1292 //
1293 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1294 ASSERT_EFI_ERROR (Status);
1295 }
1296 Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
1297 UnalignedPages = RealPages - Pages - UnalignedPages;
1298 if (UnalignedPages > 0) {
1299 //
1300 // Free last unaligned page(s).
1301 //
1302 Status = gSmst->SmmFreePages (Memory, UnalignedPages);
1303 ASSERT_EFI_ERROR (Status);
1304 }
1305 } else {
1306 //
1307 // Do not over-allocate pages in this case.
1308 //
1309 Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
1310 if (EFI_ERROR (Status)) {
1311 return NULL;
1312 }
1313 AlignedMemory = (UINTN) Memory;
1314 }
1315 return (VOID *) AlignedMemory;
1316}
1317
1318/**
1319 Perform the remaining tasks.
1320
1321**/
1322VOID
1323PerformRemainingTasks (
1324 VOID
1325 )
1326{
1327 if (mSmmReadyToLock) {
1328 //
1329 // Start SMM Profile feature
1330 //
1331 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
1332 SmmProfileStart ();
1333 }
1334 //
1335 // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
1336 //
1337 InitPaging ();
1338
1339 //
1340 // Mark critical region to be read-only in page table
1341 //
1342 SetMemMapAttributes ();
1343
1344 //
1345 // For outside SMRAM, we only map SMM communication buffer or MMIO.
1346 //
1347 SetUefiMemMapAttributes ();
1348
1349 //
1350 // Set page table itself to be read-only
1351 //
1352 SetPageTableAttributes ();
1353
1354 //
1355 // Configure SMM Code Access Check feature if available.
1356 //
1357 ConfigSmmCodeAccessCheck ();
1358
1359 SmmCpuFeaturesCompleteSmmReadyToLock ();
1360
1361 //
1362 // Clean SMM ready to lock flag
1363 //
1364 mSmmReadyToLock = FALSE;
1365 }
1366}
1367
1368/**
1369 Perform the pre tasks.
1370
1371**/
1372VOID
1373PerformPreTasks (
1374 VOID
1375 )
1376{
1377 RestoreSmmConfigurationInS3 ();
1378}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette