VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c@ 108794

Last change on this file since 108794 was 108794, checked in by vboxsync, 2 weeks ago

Devices/EFI/FirmwareNew: Merge edk2-stable202502 from the vendor branch and make it build for the important platforms, bugref:4643

  • Property svn:eol-style set to native
File size: 39.1 KB
Line 
1/** @file
2Enable SMM profile.
3
4Copyright (c) 2012 - 2024, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>
6
7SPDX-License-Identifier: BSD-2-Clause-Patent
8
9**/
10
11#include "PiSmmCpuCommon.h"
12#include "SmmProfileInternal.h"
13
14UINT32 mSmmProfileCr3;
15
16SMM_PROFILE_HEADER *mSmmProfileBase;
17MSR_DS_AREA_STRUCT *mMsrDsAreaBase;
18//
19// The buffer to store SMM profile data.
20//
21UINTN mSmmProfileSize;
22
23//
24// The buffer to enable branch trace store.
25//
26UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
27
28//
29// The flag indicates if execute-disable is supported by processor.
30//
31BOOLEAN mXdSupported = TRUE;
32
33//
34// The flag indicates if execute-disable is enabled on processor.
35//
36BOOLEAN mXdEnabled = FALSE;
37
38//
39// The flag indicates if BTS is supported by processor.
40//
41BOOLEAN mBtsSupported = TRUE;
42
43//
44// The flag indicates if SMM profile is enabled.
45//
46BOOLEAN mSmmProfileEnabled = FALSE;
47
48//
49// The flag indicates if SMM profile starts to record data.
50//
51BOOLEAN mSmmProfileStart = FALSE;
52
53//
54// The flag indicates if #DB will be setup in #PF handler.
55//
56BOOLEAN mSetupDebugTrap = FALSE;
57
58//
59// Record the page fault exception count for one instruction execution.
60//
61UINTN *mPFEntryCount;
62
63UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
64UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
65
66MSR_DS_AREA_STRUCT **mMsrDsArea;
67BRANCH_TRACE_RECORD **mMsrBTSRecord;
68UINTN mBTSRecordNumber;
69PEBS_RECORD **mMsrPEBSRecord;
70
71//
72// These memory ranges are always present, they does not generate the access type of page fault exception,
73// but they possibly generate instruction fetch type of page fault exception.
74//
75MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;
76UINTN mProtectionMemRangeCount = 0;
77
78//
79// Some predefined memory ranges.
80//
81MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
82 //
83 // SMRAM range (to be fixed in runtime).
84 // It is always present and instruction fetches are allowed.
85 //
86 {
87 { 0x00000000, 0x00000000 }, TRUE, FALSE
88 },
89
90 //
91 // SMM profile data range( to be fixed in runtime).
92 // It is always present and instruction fetches are not allowed.
93 //
94 {
95 { 0x00000000, 0x00000000 }, TRUE, TRUE
96 },
97
98 //
99 // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
100 // It is always present and instruction fetches are allowed.
101 // {{0x00000000, 0x00000000},TRUE,FALSE},
102 //
103
104 //
105 // Future extended range could be added here.
106 //
107
108 //
109 // PCI MMIO ranges (to be added in runtime).
110 // They are always present and instruction fetches are not allowed.
111 //
112};
113
114//
115// These memory ranges are mapped by 4KB-page instead of 2MB-page.
116//
117MEMORY_RANGE *mSplitMemRange = NULL;
118UINTN mSplitMemRangeCount = 0;
119
120//
121// SMI command port.
122//
123UINT32 mSmiCommandPort;
124
125/**
126 Disable branch trace store.
127
128**/
129VOID
130DisableBTS (
131 VOID
132 )
133{
134 MSR_IA32_DEBUGCTL_REGISTER DebugCtl;
135
136 DebugCtl.Uint64 = AsmReadMsr64 (MSR_IA32_DEBUGCTL);
137 DebugCtl.Bits.BTS = 0;
138 DebugCtl.Bits.TR = 0;
139
140 AsmWriteMsr64 (MSR_IA32_DEBUGCTL, DebugCtl.Uint64);
141}
142
143/**
144 Enable branch trace store.
145
146**/
147VOID
148EnableBTS (
149 VOID
150 )
151{
152 MSR_IA32_DEBUGCTL_REGISTER DebugCtl;
153
154 DebugCtl.Uint64 = AsmReadMsr64 (MSR_IA32_DEBUGCTL);
155 DebugCtl.Bits.BTS = 1;
156 DebugCtl.Bits.TR = 1;
157
158 AsmWriteMsr64 (MSR_IA32_DEBUGCTL, DebugCtl.Uint64);
159}
160
161/**
162 Get CPU Index from APIC ID.
163
164**/
165UINTN
166GetCpuIndex (
167 VOID
168 )
169{
170 UINTN Index;
171 UINT32 ApicId;
172
173 ApicId = GetApicId ();
174
175 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
176 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
177 return Index;
178 }
179 }
180
181 ASSERT (FALSE);
182 return 0;
183}
184
185/**
186 Get the source of IP after execute-disable exception is triggered.
187
188 @param CpuIndex The index of CPU.
189 @param DestinationIP The destination address.
190
191**/
192UINT64
193GetSourceFromDestinationOnBts (
194 UINTN CpuIndex,
195 UINT64 DestinationIP
196 )
197{
198 BRANCH_TRACE_RECORD *CurrentBTSRecord;
199 UINTN Index;
200 BOOLEAN FirstMatch;
201
202 FirstMatch = FALSE;
203
204 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
205 for (Index = 0; Index < mBTSRecordNumber; Index++) {
206 if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
207 //
208 // Underflow
209 //
210 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
211 CurrentBTSRecord--;
212 }
213
214 if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
215 //
216 // Good! find 1st one, then find 2nd one.
217 //
218 if (!FirstMatch) {
219 //
220 // The first one is DEBUG exception
221 //
222 FirstMatch = TRUE;
223 } else {
224 //
225 // Good find proper one.
226 //
227 return CurrentBTSRecord->LastBranchFrom;
228 }
229 }
230
231 CurrentBTSRecord--;
232 }
233
234 return 0;
235}
236
237/**
238 SMM profile specific INT 1 (single-step) exception handler.
239
240 @param InterruptType Defines the type of interrupt or exception that
241 occurred on the processor.This parameter is processor architecture specific.
242 @param SystemContext A pointer to the processor context when
243 the interrupt occurred on the processor.
244**/
245VOID
246EFIAPI
247DebugExceptionHandler (
248 IN EFI_EXCEPTION_TYPE InterruptType,
249 IN EFI_SYSTEM_CONTEXT SystemContext
250 )
251{
252 UINTN CpuIndex;
253 UINTN PFEntry;
254
255 if (!mSmmProfileStart &&
256 !HEAP_GUARD_NONSTOP_MODE &&
257 !NULL_DETECTION_NONSTOP_MODE)
258 {
259 return;
260 }
261
262 CpuIndex = GetCpuIndex ();
263
264 //
265 // Clear last PF entries
266 //
267 for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
268 *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
269 }
270
271 //
272 // Reset page fault exception count for next page fault.
273 //
274 mPFEntryCount[CpuIndex] = 0;
275
276 //
277 // Flush TLB
278 //
279 CpuFlushTlb ();
280
281 //
282 // Clear TF in EFLAGS
283 //
284 ClearTrapFlag (SystemContext);
285}
286
287/**
288 Check if the input address is in SMM ranges.
289
290 @param[in] Address The input address.
291
292 @retval TRUE The input address is in SMM.
293 @retval FALSE The input address is not in SMM.
294**/
295BOOLEAN
296IsInSmmRanges (
297 IN EFI_PHYSICAL_ADDRESS Address
298 )
299{
300 UINTN Index;
301
302 if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
303 return TRUE;
304 }
305
306 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
307 if ((Address >= mSmmCpuSmramRanges[Index].CpuStart) &&
308 (Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize))
309 {
310 return TRUE;
311 }
312 }
313
314 return FALSE;
315}
316
317/**
318 Check if the SMM profile page fault address above 4GB is in protected range or not.
319
320 @param[in] Address The address of Memory.
321 @param[out] Nx The flag indicates if the memory is execute-disable.
322
323 @retval TRUE The input address is in protected range.
324 @retval FALSE The input address is not in protected range.
325
326**/
327BOOLEAN
328IsSmmProfilePFAddressAbove4GValid (
329 IN EFI_PHYSICAL_ADDRESS Address,
330 OUT BOOLEAN *Nx
331 )
332{
333 UINTN Index;
334
335 //
336 // Check configuration
337 //
338 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
339 if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
340 *Nx = mProtectionMemRange[Index].Nx;
341 return mProtectionMemRange[Index].Present;
342 }
343 }
344
345 *Nx = TRUE;
346 return FALSE;
347}
348
349/**
350 Check if the memory address will be mapped by 4KB-page.
351
352 @param Address The address of Memory.
353
354**/
355BOOLEAN
356IsAddressSplit (
357 IN EFI_PHYSICAL_ADDRESS Address
358 )
359{
360 UINTN Index;
361
362 if (mSmmProfileEnabled) {
363 //
364 // Check configuration
365 //
366 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
367 if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
368 return TRUE;
369 }
370 }
371 } else {
372 if (Address < mCpuHotPlugData.SmrrBase) {
373 if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {
374 return TRUE;
375 }
376 } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {
377 if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {
378 return TRUE;
379 }
380 }
381 }
382
383 //
384 // Return default
385 //
386 return FALSE;
387}
388
389/**
390 Function to compare 2 MEMORY_PROTECTION_RANGE based on range base.
391
392 @param[in] Buffer1 pointer to Device Path poiner to compare
393 @param[in] Buffer2 pointer to second DevicePath pointer to compare
394
395 @retval 0 Buffer1 equal to Buffer2
396 @retval <0 Buffer1 is less than Buffer2
397 @retval >0 Buffer1 is greater than Buffer2
398**/
399INTN
400EFIAPI
401ProtectionRangeCompare (
402 IN CONST VOID *Buffer1,
403 IN CONST VOID *Buffer2
404 )
405{
406 if (((MEMORY_PROTECTION_RANGE *)Buffer1)->Range.Base > ((MEMORY_PROTECTION_RANGE *)Buffer2)->Range.Base) {
407 return 1;
408 } else if (((MEMORY_PROTECTION_RANGE *)Buffer1)->Range.Base < ((MEMORY_PROTECTION_RANGE *)Buffer2)->Range.Base) {
409 return -1;
410 }
411
412 return 0;
413}
414
415/**
416 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
417
418**/
419VOID
420InitProtectedMemRange (
421 VOID
422 )
423{
424 UINTN Index;
425 MM_CPU_MEMORY_REGION *MemoryRegion;
426 UINTN MemoryRegionCount;
427 UINTN NumberOfAddedDescriptors;
428 UINTN NumberOfProtectRange;
429 UINTN NumberOfSpliteRange;
430 UINTN TotalSize;
431 EFI_PHYSICAL_ADDRESS ProtectBaseAddress;
432 EFI_PHYSICAL_ADDRESS ProtectEndAddress;
433 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;
434 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;
435 UINT64 High4KBPageSize;
436 UINT64 Low4KBPageSize;
437 MEMORY_PROTECTION_RANGE MemProtectionRange;
438
439 MemoryRegion = NULL;
440 MemoryRegionCount = 0;
441 NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;
442 NumberOfSpliteRange = 0;
443
444 //
445 // Create extended protection MemoryRegion and add them into protected memory ranges.
446 // Retrieve the accessible regions when SMM profile is enabled.
447 // In SMM: only MMIO is accessible.
448 // In MM: all regions described by resource HOBs are accessible.
449 //
450 CreateExtendedProtectionRange (&MemoryRegion, &MemoryRegionCount);
451 ASSERT (MemoryRegion != NULL);
452
453 NumberOfAddedDescriptors += MemoryRegionCount;
454
455 ASSERT (NumberOfAddedDescriptors != 0);
456
457 TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
458 mProtectionMemRange = (MEMORY_PROTECTION_RANGE *)AllocateZeroPool (TotalSize);
459 ASSERT (mProtectionMemRange != NULL);
460 mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
461
462 //
463 // Copy existing ranges.
464 //
465 CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
466
467 //
468 // Create split ranges which come from protected ranges.
469 //
470 TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
471 mSplitMemRange = (MEMORY_RANGE *)AllocateZeroPool (TotalSize);
472 ASSERT (mSplitMemRange != NULL);
473
474 //
475 // Create SMM ranges which are set to present and execution-enable.
476 //
477 NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
478 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
479 if ((mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base) &&
480 (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top))
481 {
482 //
483 // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
484 //
485 break;
486 }
487
488 mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;
489 mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;
490 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
491 mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;
492 NumberOfProtectRange++;
493 }
494
495 //
496 // Create protection ranges which are set to present and execution-disable.
497 //
498 for (Index = 0; Index < MemoryRegionCount; Index++) {
499 mProtectionMemRange[NumberOfProtectRange].Range.Base = MemoryRegion[Index].Base;
500 mProtectionMemRange[NumberOfProtectRange].Range.Top = MemoryRegion[Index].Base + MemoryRegion[Index].Length;
501 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
502 mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
503 NumberOfProtectRange++;
504 }
505
506 //
507 // Free the MemoryRegion
508 //
509 if (MemoryRegion != NULL) {
510 FreePool (MemoryRegion);
511 }
512
513 //
514 // Check and updated actual protected memory ranges count
515 //
516 ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);
517 mProtectionMemRangeCount = NumberOfProtectRange;
518
519 //
520 // According to protected ranges, create the ranges which will be mapped by 2KB page.
521 //
522 NumberOfSpliteRange = 0;
523 NumberOfProtectRange = mProtectionMemRangeCount;
524 for (Index = 0; Index < NumberOfProtectRange; Index++) {
525 //
526 // If base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
527 //
528 ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
529 ProtectEndAddress = mProtectionMemRange[Index].Range.Top;
530 if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {
531 //
532 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
533 // A mix of 4KB and 2MB page could save SMRAM space.
534 //
535 Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);
536 Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
537 if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
538 ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB))
539 {
540 //
541 // There is an range which could be mapped by 2MB-page.
542 //
543 High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
544 Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
545 if (High4KBPageSize != 0) {
546 //
547 // Add not 2MB-aligned range to be mapped by 4KB-page.
548 //
549 mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
550 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
551 NumberOfSpliteRange++;
552 }
553
554 if (Low4KBPageSize != 0) {
555 //
556 // Add not 2MB-aligned range to be mapped by 4KB-page.
557 //
558 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
559 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
560 NumberOfSpliteRange++;
561 }
562 } else {
563 //
564 // The range could only be mapped by 4KB-page.
565 //
566 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
567 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
568 NumberOfSpliteRange++;
569 }
570 }
571 }
572
573 mSplitMemRangeCount = NumberOfSpliteRange;
574
575 //
576 // Sort the mProtectionMemRange
577 //
578 QuickSort (mProtectionMemRange, mProtectionMemRangeCount, sizeof (MEMORY_PROTECTION_RANGE), (BASE_SORT_COMPARE)ProtectionRangeCompare, &MemProtectionRange);
579
580 DEBUG ((DEBUG_INFO, "SMM Profile Memory Ranges:\n"));
581 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
582 DEBUG ((DEBUG_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
583 DEBUG ((DEBUG_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
584 }
585
586 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
587 DEBUG ((DEBUG_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
588 DEBUG ((DEBUG_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));
589 }
590}
591
592/**
593 This function updates memory attribute according to mProtectionMemRangeCount.
594
595**/
596VOID
597SmmProfileUpdateMemoryAttributes (
598 VOID
599 )
600{
601 RETURN_STATUS Status;
602 UINTN Index;
603 UINTN PageTable;
604 UINT64 Base;
605 UINT64 Length;
606 UINT64 Limit;
607 UINT64 PreviousAddress;
608 UINT64 MemoryAttrMask;
609 BOOLEAN WriteProtect;
610 BOOLEAN CetEnabled;
611
612 DEBUG ((DEBUG_INFO, "SmmProfileUpdateMemoryAttributes Start...\n"));
613
614 WRITE_UNPROTECT_RO_PAGES (WriteProtect, CetEnabled);
615
616 PageTable = AsmReadCr3 ();
617 Limit = LShiftU64 (1, mPhysicalAddressBits);
618
619 //
620 // [0, 4k] may be non-present.
621 //
622 PreviousAddress = ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) ? BASE_4KB : 0;
623
624 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
625 MemoryAttrMask = 0;
626 if (mProtectionMemRange[Index].Nx == TRUE) {
627 MemoryAttrMask = EFI_MEMORY_XP;
628 }
629
630 if (mProtectionMemRange[Index].Present == FALSE) {
631 MemoryAttrMask = EFI_MEMORY_RP;
632 }
633
634 Base = mProtectionMemRange[Index].Range.Base;
635 Length = mProtectionMemRange[Index].Range.Top - Base;
636 if (MemoryAttrMask != 0) {
637 Status = ConvertMemoryPageAttributes (PageTable, mPagingMode, Base, Length, MemoryAttrMask, TRUE, NULL);
638 ASSERT_RETURN_ERROR (Status);
639 }
640
641 if (Base > PreviousAddress) {
642 //
643 // Mark the ranges not in mProtectionMemRange as non-present.
644 //
645 Status = ConvertMemoryPageAttributes (PageTable, mPagingMode, PreviousAddress, Base - PreviousAddress, EFI_MEMORY_RP, TRUE, NULL);
646 ASSERT_RETURN_ERROR (Status);
647 }
648
649 PreviousAddress = Base + Length;
650 }
651
652 //
653 // Set the last remaining range
654 //
655 if (PreviousAddress < Limit) {
656 Status = ConvertMemoryPageAttributes (PageTable, mPagingMode, PreviousAddress, Limit - PreviousAddress, EFI_MEMORY_RP, TRUE, NULL);
657 ASSERT_RETURN_ERROR (Status);
658 }
659
660 //
661 // Flush TLB
662 //
663 CpuFlushTlb ();
664
665 //
666 // Set execute-disable flag
667 //
668 mXdEnabled = TRUE;
669
670 WRITE_PROTECT_RO_PAGES (WriteProtect, CetEnabled);
671
672 DEBUG ((DEBUG_INFO, "SmmProfileUpdateMemoryAttributes Done.\n"));
673}
674
675/**
676 Updates page table to make some memory ranges (like system memory) absent
677 and make some memory ranges (like MMIO) present and execute disable. It also
678 update 2MB-page to 4KB-page for some memory ranges.
679
680**/
681VOID
682SmmProfileStart (
683 VOID
684 )
685{
686 //
687 // The flag indicates SMM profile starts to work.
688 //
689 mSmmProfileStart = TRUE;
690
691 //
692 // Tell #PF handler to prepare a #DB subsequently.
693 //
694 mSetupDebugTrap = TRUE;
695}
696
697/**
698 Initialize SMM profile in SmmReadyToLock protocol callback function.
699
700 @param Protocol Points to the protocol's unique identifier.
701 @param Interface Points to the interface instance.
702 @param Handle The handle on which the interface was installed.
703
704 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
705**/
706EFI_STATUS
707EFIAPI
708InitSmmProfileCallBack (
709 IN CONST EFI_GUID *Protocol,
710 IN VOID *Interface,
711 IN EFI_HANDLE Handle
712 )
713{
714 EFI_STATUS Status;
715 EFI_SMM_VARIABLE_PROTOCOL *SmmProfileVariable;
716
717 //
718 // Locate SmmVariableProtocol.
719 //
720 Status = gMmst->MmLocateProtocol (&gEfiSmmVariableProtocolGuid, NULL, (VOID **)&SmmProfileVariable);
721 ASSERT_EFI_ERROR (Status);
722
723 //
724 // Save to variable so that SMM profile data can be found.
725 //
726 SmmProfileVariable->SmmSetVariable (
727 SMM_PROFILE_NAME,
728 &gEfiCallerIdGuid,
729 EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
730 sizeof (mSmmProfileBase),
731 &mSmmProfileBase
732 );
733
734 return EFI_SUCCESS;
735}
736
737/**
738 Initialize SMM profile data structures.
739
740**/
741VOID
742InitSmmProfileInternal (
743 VOID
744 )
745{
746 EFI_STATUS Status;
747 VOID *Registration;
748 UINTN Index;
749 UINTN MsrDsAreaSizePerCpu;
750 UINT64 SmmProfileSize;
751
752 Status = EFI_SUCCESS;
753 mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);
754 ASSERT (mPFEntryCount != NULL);
755 mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
756 sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus
757 );
758 ASSERT (mLastPFEntryValue != NULL);
759 mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
760 sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus
761 );
762 ASSERT (mLastPFEntryPointer != NULL);
763
764 //
765 // Get Smm Profile Base
766 //
767 mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)GetSmmProfileData (&SmmProfileSize);
768 DEBUG ((DEBUG_ERROR, "SmmProfileBase = 0x%016x.\n", (UINTN)mSmmProfileBase));
769 DEBUG ((DEBUG_ERROR, "SmmProfileSize = 0x%016x.\n", (UINTN)SmmProfileSize));
770
771 if (mBtsSupported) {
772 ASSERT (SmmProfileSize > mMsrDsAreaSize);
773 mSmmProfileSize = (UINTN)SmmProfileSize - mMsrDsAreaSize;
774 } else {
775 mSmmProfileSize = (UINTN)SmmProfileSize;
776 }
777
778 ASSERT ((mSmmProfileSize & 0xFFF) == 0);
779
780 //
781 // Initialize SMM profile data header.
782 //
783 mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);
784 mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof (SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
785 mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof (SMM_PROFILE_ENTRY));
786 mSmmProfileBase->CurDataEntries = 0;
787 mSmmProfileBase->CurDataSize = 0;
788 mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;
789 mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;
790 mSmmProfileBase->NumSmis = 0;
791 mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
792
793 if (mBtsSupported) {
794 mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);
795 ASSERT (mMsrDsArea != NULL);
796 mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);
797 ASSERT (mMsrBTSRecord != NULL);
798 mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);
799 ASSERT (mMsrPEBSRecord != NULL);
800
801 mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)mSmmProfileBase + mSmmProfileSize);
802 MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;
803 mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof (PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof (MSR_DS_AREA_STRUCT)) / sizeof (BRANCH_TRACE_RECORD);
804 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
805 mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
806 mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof (MSR_DS_AREA_STRUCT));
807 mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof (PEBS_RECORD) * PEBS_RECORD_NUMBER);
808
809 mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];
810 mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;
811 mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof (BRANCH_TRACE_RECORD) + 1;
812 mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
813
814 mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];
815 mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;
816 mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof (PEBS_RECORD) + 1;
817 mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
818 }
819 }
820
821 mProtectionMemRange = mProtectionMemRangeTemplate;
822 mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
823
824 //
825 // Update TSeg entry.
826 //
827 mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
828 mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
829
830 //
831 // Update SMM profile entry.
832 //
833 mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
834 mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + SmmProfileSize;
835
836 //
837 // Allocate memory reserved for creating 4KB pages.
838 //
839 InitPagesForPFHandler ();
840
841 //
842 // Start SMM profile when SmmReadyToLock protocol is installed.
843 //
844 if (!mIsStandaloneMm) {
845 Status = gMmst->MmRegisterProtocolNotify (
846 &gEfiSmmReadyToLockProtocolGuid,
847 InitSmmProfileCallBack,
848 &Registration
849 );
850 ASSERT_EFI_ERROR (Status);
851 }
852
853 return;
854}
855
856/**
857 Check if feature is supported by a processor.
858
859 @param CpuIndex The index of the CPU.
860**/
861VOID
862CheckFeatureSupported (
863 IN UINTN CpuIndex
864 )
865{
866 UINT32 RegEax;
867 UINT32 RegEcx;
868 UINT32 RegEdx;
869 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
870
871 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
872 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
873 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
874 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);
875 if ((RegEcx & CPUID_CET_SS) == 0) {
876 mCetSupported = FALSE;
877 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
878 }
879 } else {
880 mCetSupported = FALSE;
881 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
882 }
883 }
884
885 if (mBtsSupported) {
886 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
887 if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
888 //
889 // Per IA32 manuals:
890 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
891 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
892 // availability of the BTS facilities, including the ability to set the BTS and
893 // BTINT bits in the MSR_DEBUGCTLA MSR.
894 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
895 //
896 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
897 if (MiscEnableMsr.Bits.BTS == 1) {
898 //
899 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
900 //
901 mBtsSupported = FALSE;
902 }
903 }
904 }
905
906 if (mSmmCodeAccessCheckEnable) {
907 //
908 // Check to see if the CPU supports the SMM Code Access Check feature
909 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
910 //
911 if (!SmmCpuFeaturesIsSmmRegisterSupported (CpuIndex, SmmRegFeatureControl) ||
912 ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0))
913 {
914 mSmmCodeAccessCheckEnable = FALSE;
915 }
916 }
917}
918
919/**
920 Enable single step.
921
922**/
923VOID
924ActivateSingleStepDB (
925 VOID
926 )
927{
928 UINTN Dr6;
929
930 Dr6 = AsmReadDr6 ();
931 if ((Dr6 & DR6_SINGLE_STEP) != 0) {
932 return;
933 }
934
935 Dr6 |= DR6_SINGLE_STEP;
936 AsmWriteDr6 (Dr6);
937}
938
939/**
940 Enable last branch.
941
942**/
943VOID
944ActivateLBR (
945 VOID
946 )
947{
948 MSR_IA32_DEBUGCTL_REGISTER DebugCtl;
949
950 DebugCtl.Uint64 = AsmReadMsr64 (MSR_IA32_DEBUGCTL);
951 if (DebugCtl.Bits.LBR) {
952 return;
953 }
954
955 DebugCtl.Bits.LBR = 1;
956 AsmWriteMsr64 (MSR_IA32_DEBUGCTL, DebugCtl.Uint64);
957}
958
959/**
960 Enable branch trace store.
961
962 @param CpuIndex The index of the processor.
963
964**/
965VOID
966ActivateBTS (
967 IN UINTN CpuIndex
968 )
969{
970 MSR_IA32_DEBUGCTL_REGISTER DebugCtl;
971
972 DebugCtl.Uint64 = AsmReadMsr64 (MSR_IA32_DEBUGCTL);
973 if ((DebugCtl.Bits.BTS)) {
974 return;
975 }
976
977 AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
978
979 //
980 // Enable BTS
981 //
982 DebugCtl.Bits.BTS = 1;
983 DebugCtl.Bits.TR = 1;
984
985 DebugCtl.Bits.BTINT = 0;
986 AsmWriteMsr64 (MSR_IA32_DEBUGCTL, DebugCtl.Uint64);
987}
988
989/**
990 Increase SMI number in each SMI entry.
991
992**/
993VOID
994SmmProfileRecordSmiNum (
995 VOID
996 )
997{
998 if (mSmmProfileStart) {
999 mSmmProfileBase->NumSmis++;
1000 }
1001}
1002
1003/**
1004 Initialize processor environment for SMM profile.
1005
1006 @param CpuIndex The index of the processor.
1007
1008**/
1009VOID
1010ActivateSmmProfile (
1011 IN UINTN CpuIndex
1012 )
1013{
1014 //
1015 // Enable Single Step DB#
1016 //
1017 ActivateSingleStepDB ();
1018
1019 if (mBtsSupported) {
1020 //
1021 // We can not get useful information from LER, so we have to use BTS.
1022 //
1023 ActivateLBR ();
1024
1025 //
1026 // Enable BTS
1027 //
1028 ActivateBTS (CpuIndex);
1029 }
1030}
1031
1032/**
1033 Initialize SMM profile in SMM CPU entry point.
1034
1035 @param[in] Cr3 The base address of the page tables to use in SMM.
1036
1037**/
1038VOID
1039InitSmmProfile (
1040 UINT32 Cr3
1041 )
1042{
1043 //
1044 // Save Cr3
1045 //
1046 mSmmProfileCr3 = Cr3;
1047
1048 //
1049 // Skip SMM profile initialization if feature is disabled
1050 //
1051 if (!mSmmProfileEnabled &&
1052 !HEAP_GUARD_NONSTOP_MODE &&
1053 !NULL_DETECTION_NONSTOP_MODE)
1054 {
1055 return;
1056 }
1057
1058 //
1059 // Initialize SmmProfile here
1060 //
1061 InitSmmProfileInternal ();
1062
1063 //
1064 // Initialize profile IDT.
1065 //
1066 InitIdtr ();
1067}
1068
1069/**
1070 Update page table to map the memory correctly in order to make the instruction
1071 which caused page fault execute successfully. And it also save the original page
1072 table to be restored in single-step exception.
1073
1074 @param PageTable PageTable Address.
1075 @param PFAddress The memory address which caused page fault exception.
1076 @param CpuIndex The index of the processor.
1077 @param ErrorCode The Error code of exception.
1078
1079**/
1080VOID
1081RestorePageTableBelow4G (
1082 UINT64 *PageTable,
1083 UINT64 PFAddress,
1084 UINTN CpuIndex,
1085 UINTN ErrorCode
1086 )
1087{
1088 UINTN PTIndex;
1089 UINTN PFIndex;
1090 IA32_CR4 Cr4;
1091 BOOLEAN Enable5LevelPaging;
1092
1093 Cr4.UintN = AsmReadCr4 ();
1094 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
1095
1096 //
1097 // PML5
1098 //
1099 if (Enable5LevelPaging) {
1100 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
1101 ASSERT (PageTable[PTIndex] != 0);
1102 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1103 }
1104
1105 //
1106 // PML4
1107 //
1108 if (sizeof (UINT64) == sizeof (UINTN)) {
1109 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
1110 ASSERT (PageTable[PTIndex] != 0);
1111 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1112 }
1113
1114 //
1115 // PDPTE
1116 //
1117 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
1118
1119 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
1120 //
1121 // For 32-bit case, because a full map page table for 0-4G is created by default,
1122 // and since the PDPTE must be one non-leaf entry, the PDPTE must always be present.
1123 // So, ASSERT it must be the 64-bit case running here.
1124 //
1125 ASSERT (sizeof (UINT64) == sizeof (UINTN));
1126
1127 //
1128 // If the entry is not present, allocate one page from page pool for it
1129 //
1130 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
1131 }
1132
1133 ASSERT (PageTable[PTIndex] != 0);
1134 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1135
1136 //
1137 // PD
1138 //
1139 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
1140 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
1141 //
1142 // A 2M page size will be used directly when the 2M entry is marked as non-present.
1143 //
1144
1145 //
1146 // Record old entries with non-present status
1147 // Old entries include the memory which instruction is at and the memory which instruction access.
1148 //
1149 //
1150 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1151 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1152 PFIndex = mPFEntryCount[CpuIndex];
1153 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1154 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1155 mPFEntryCount[CpuIndex]++;
1156 }
1157
1158 //
1159 // Set new entry
1160 //
1161 PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
1162 PageTable[PTIndex] |= (UINT64)IA32_PG_PS;
1163 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1164 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1165 PageTable[PTIndex] &= ~IA32_PG_NX;
1166 }
1167 } else {
1168 //
1169 // If the 2M entry is marked as present, a 4K page size will be utilized.
1170 // In this scenario, the 2M entry must be a non-leaf entry.
1171 //
1172 ASSERT (PageTable[PTIndex] != 0);
1173 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1174
1175 //
1176 // 4K PTE
1177 //
1178 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
1179
1180 //
1181 // Record old entries with non-present status
1182 // Old entries include the memory which instruction is at and the memory which instruction access.
1183 //
1184 //
1185 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1186 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1187 PFIndex = mPFEntryCount[CpuIndex];
1188 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1189 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1190 mPFEntryCount[CpuIndex]++;
1191 }
1192
1193 //
1194 // Set new entry
1195 //
1196 PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
1197 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1198 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1199 PageTable[PTIndex] &= ~IA32_PG_NX;
1200 }
1201 }
1202}
1203
1204/**
1205 Handler for Page Fault triggered by Guard page.
1206
1207 @param ErrorCode The Error code of exception.
1208
1209**/
1210VOID
1211GuardPagePFHandler (
1212 UINTN ErrorCode
1213 )
1214{
1215 UINT64 *PageTable;
1216 UINT64 PFAddress;
1217 UINT64 RestoreAddress;
1218 UINTN RestorePageNumber;
1219 UINTN CpuIndex;
1220
1221 PageTable = (UINT64 *)AsmReadCr3 ();
1222 PFAddress = AsmReadCr2 ();
1223 CpuIndex = GetCpuIndex ();
1224
1225 //
1226 // Memory operation cross pages, like "rep mov" instruction, will cause
1227 // infinite loop between this and Debug Trap handler. We have to make sure
1228 // that current page and the page followed are both in PRESENT state.
1229 //
1230 RestorePageNumber = 2;
1231 RestoreAddress = PFAddress;
1232 while (RestorePageNumber > 0) {
1233 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1234 RestoreAddress += EFI_PAGE_SIZE;
1235 RestorePageNumber--;
1236 }
1237
1238 //
1239 // Flush TLB
1240 //
1241 CpuFlushTlb ();
1242}
1243
1244/**
1245 The Page fault handler to save SMM profile data.
1246
1247 @param Rip The RIP when exception happens.
1248 @param ErrorCode The Error code of exception.
1249
1250**/
1251VOID
1252SmmProfilePFHandler (
1253 UINTN Rip,
1254 UINTN ErrorCode
1255 )
1256{
1257 UINT64 *PageTable;
1258 UINT64 PFAddress;
1259 UINT64 RestoreAddress;
1260 UINTN RestorePageNumber;
1261 UINTN CpuIndex;
1262 UINTN Index;
1263 UINT64 InstructionAddress;
1264 UINTN MaxEntryNumber;
1265 UINTN CurrentEntryNumber;
1266 BOOLEAN IsValidPFAddress;
1267 SMM_PROFILE_ENTRY *SmmProfileEntry;
1268 UINT64 SmiCommand;
1269 EFI_STATUS Status;
1270 UINT8 SoftSmiValue;
1271 EFI_SMM_SAVE_STATE_IO_INFO IoInfo;
1272
1273 if (mBtsSupported) {
1274 DisableBTS ();
1275 }
1276
1277 IsValidPFAddress = FALSE;
1278 PageTable = (UINT64 *)AsmReadCr3 ();
1279 PFAddress = AsmReadCr2 ();
1280 CpuIndex = GetCpuIndex ();
1281
1282 //
1283 // Memory operation cross pages, like "rep mov" instruction, will cause
1284 // infinite loop between this and Debug Trap handler. We have to make sure
1285 // that current page and the page followed are both in PRESENT state.
1286 //
1287 RestorePageNumber = 2;
1288 RestoreAddress = PFAddress;
1289 while (RestorePageNumber > 0) {
1290 if (RestoreAddress <= 0xFFFFFFFF) {
1291 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1292 } else {
1293 RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
1294 }
1295
1296 RestoreAddress += EFI_PAGE_SIZE;
1297 RestorePageNumber--;
1298 }
1299
1300 if (!IsValidPFAddress) {
1301 InstructionAddress = Rip;
1302 if (((ErrorCode & IA32_PF_EC_ID) != 0) && (mBtsSupported)) {
1303 //
1304 // If it is instruction fetch failure, get the correct IP from BTS.
1305 //
1306 InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
1307 if (InstructionAddress == 0) {
1308 //
1309 // It indicates the instruction which caused page fault is not a jump instruction,
1310 // set instruction address same as the page fault address.
1311 //
1312 InstructionAddress = PFAddress;
1313 }
1314 }
1315
1316 //
1317 // Indicate it is not software SMI
1318 //
1319 SmiCommand = 0xFFFFFFFFFFFFFFFFULL;
1320 for (Index = 0; Index < gMmst->NumberOfCpus; Index++) {
1321 Status = SmmReadSaveState (&mSmmCpu, sizeof (IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
1322 if (EFI_ERROR (Status)) {
1323 continue;
1324 }
1325
1326 if (IoInfo.IoPort == mSmiCommandPort) {
1327 //
1328 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1329 //
1330 SoftSmiValue = IoRead8 (mSmiCommandPort);
1331 SmiCommand = (UINT64)SoftSmiValue;
1332 break;
1333 }
1334 }
1335
1336 SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
1337 //
1338 // Check if there is already a same entry in profile data.
1339 //
1340 for (Index = 0; Index < (UINTN)mSmmProfileBase->CurDataEntries; Index++) {
1341 if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&
1342 (SmmProfileEntry[Index].Address == PFAddress) &&
1343 (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&
1344 (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
1345 (SmmProfileEntry[Index].SmiCmd == SmiCommand))
1346 {
1347 //
1348 // Same record exist, need not save again.
1349 //
1350 break;
1351 }
1352 }
1353
1354 if (Index == mSmmProfileBase->CurDataEntries) {
1355 CurrentEntryNumber = (UINTN)mSmmProfileBase->CurDataEntries;
1356 MaxEntryNumber = (UINTN)mSmmProfileBase->MaxDataEntries;
1357 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
1358 CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
1359 }
1360
1361 if (CurrentEntryNumber < MaxEntryNumber) {
1362 //
1363 // Log the new entry
1364 //
1365 SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;
1366 SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;
1367 SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();
1368 SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;
1369 SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;
1370 SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
1371 SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;
1372 //
1373 // Update current entry index and data size in the header.
1374 //
1375 mSmmProfileBase->CurDataEntries++;
1376 mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
1377 }
1378 }
1379 }
1380
1381 //
1382 // Flush TLB
1383 //
1384 CpuFlushTlb ();
1385
1386 if (mBtsSupported) {
1387 EnableBTS ();
1388 }
1389}
1390
1391/**
1392 Replace INT1 exception handler to restore page table to absent/execute-disable state
1393 in order to trigger page fault again to save SMM profile data..
1394
1395**/
1396VOID
1397InitIdtr (
1398 VOID
1399 )
1400{
1401 EFI_STATUS Status;
1402
1403 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
1404 ASSERT_EFI_ERROR (Status);
1405}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette