VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c@ 108794

Last change on this file since 108794 was 108794, checked in by vboxsync, 2 weeks ago

Devices/EFI/FirmwareNew: Merge edk2-stable202502 from the vendor branch and make it build for the important platforms, bugref:4643

  • Property svn:eol-style set to native
File size: 25.5 KB
Line 
1/** @file
2Page Fault (#PF) handler for X64 processors
3
4Copyright (c) 2009 - 2024, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7SPDX-License-Identifier: BSD-2-Clause-Patent
8
9**/
10
11#include "PiSmmCpuCommon.h"
12
13#define PAGE_TABLE_PAGES 8
14#define ACC_MAX_BIT BIT3
15
16LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
17BOOLEAN m1GPageTableSupport = FALSE;
18X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
19
20/**
21 Check if 1-GByte pages is supported by processor or not.
22
23 @retval TRUE 1-GByte pages is supported.
24 @retval FALSE 1-GByte pages is not supported.
25
26**/
27BOOLEAN
28Is1GPageSupport (
29 VOID
30 )
31{
32 UINT32 RegEax;
33 UINT32 RegEdx;
34
35 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
36 if (RegEax >= 0x80000001) {
37 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
38 if ((RegEdx & BIT26) != 0) {
39 return TRUE;
40 }
41 }
42
43 return FALSE;
44}
45
46/**
47 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
48 the max physical address bits is bigger than 48. Because 4-level paging can support
49 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
50 with max physical address bits <= 48.
51
52 @retval TRUE 5-level paging enabling is needed.
53 @retval FALSE 5-level paging enabling is not needed.
54**/
55BOOLEAN
56Is5LevelPagingNeeded (
57 VOID
58 )
59{
60 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
61 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
62 UINT32 MaxExtendedFunctionId;
63
64 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
65 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
66 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
67 } else {
68 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
69 }
70
71 AsmCpuidEx (
72 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
73 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
74 NULL,
75 NULL,
76 &ExtFeatureEcx.Uint32,
77 NULL
78 );
79 DEBUG ((
80 DEBUG_INFO,
81 "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
82 VirPhyAddressSize.Bits.PhysicalAddressBits,
83 ExtFeatureEcx.Bits.FiveLevelPage
84 ));
85
86 if ((VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) &&
87 (ExtFeatureEcx.Bits.FiveLevelPage == 1))
88 {
89 return TRUE;
90 } else {
91 return FALSE;
92 }
93}
94
95/**
96 Set sub-entries number in entry.
97
98 @param[in, out] Entry Pointer to entry
99 @param[in] SubEntryNum Sub-entries number based on 0:
100 0 means there is 1 sub-entry under this entry
101 0x1ff means there is 512 sub-entries under this entry
102
103**/
104VOID
105SetSubEntriesNum (
106 IN OUT UINT64 *Entry,
107 IN UINT64 SubEntryNum
108 )
109{
110 //
111 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
112 //
113 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
114}
115
116/**
117 Return sub-entries number in entry.
118
119 @param[in] Entry Pointer to entry
120
121 @return Sub-entries number based on 0:
122 0 means there is 1 sub-entry under this entry
123 0x1ff means there is 512 sub-entries under this entry
124**/
125UINT64
126GetSubEntriesNum (
127 IN UINT64 *Entry
128 )
129{
130 //
131 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
132 //
133 return BitFieldRead64 (*Entry, 52, 60);
134}
135
136/**
137 Calculate the maximum support address.
138
139 @param[in] Is5LevelPagingNeeded If 5-level paging enabling is needed.
140
141 @return the maximum support address.
142**/
143UINT8
144CalculateMaximumSupportAddress (
145 BOOLEAN Is5LevelPagingNeeded
146 )
147{
148 UINT32 RegEax;
149 UINT8 PhysicalAddressBits;
150 VOID *Hob;
151
152 //
153 // Get physical address bits supported.
154 //
155 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
156 if (Hob != NULL) {
157 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;
158 } else {
159 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
160 if (RegEax >= 0x80000008) {
161 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
162 PhysicalAddressBits = (UINT8)RegEax;
163 } else {
164 PhysicalAddressBits = 36;
165 }
166 }
167
168 //
169 // 4-level paging supports translating 48-bit linear addresses to 52-bit physical addresses.
170 // Since linear addresses are sign-extended, the linear-address space of 4-level paging is:
171 // [0, 2^47-1] and [0xffff8000_00000000, 0xffffffff_ffffffff].
172 // So only [0, 2^47-1] linear-address range maps to the identical physical-address range when
173 // 5-Level paging is disabled.
174 //
175 ASSERT (PhysicalAddressBits <= 52);
176 if (!Is5LevelPagingNeeded && (PhysicalAddressBits > 47)) {
177 PhysicalAddressBits = 47;
178 }
179
180 return PhysicalAddressBits;
181}
182
183/**
184 Create PageTable for SMM use.
185
186 @return The address of PML4 (to set CR3).
187 Zero if any error occurs.
188
189**/
190UINT32
191SmmInitPageTable (
192 VOID
193 )
194{
195 UINTN PageTable;
196 LIST_ENTRY *FreePage;
197 UINTN Index;
198 UINTN PageFaultHandlerHookAddress;
199 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
200 EFI_STATUS Status;
201 UINT64 *PdptEntry;
202 UINT64 *Pml4Entry;
203 UINT64 *Pml5Entry;
204
205 Pml4Entry = NULL;
206 Pml5Entry = NULL;
207
208 //
209 // Initialize spin lock
210 //
211 InitializeSpinLock (mPFLock);
212
213 m1GPageTableSupport = Is1GPageSupport ();
214 m5LevelPagingNeeded = Is5LevelPagingNeeded ();
215 mPhysicalAddressBits = CalculateMaximumSupportAddress (m5LevelPagingNeeded);
216 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
217 if (m5LevelPagingNeeded) {
218 mPagingMode = m1GPageTableSupport ? Paging5Level1GB : Paging5Level;
219 } else {
220 mPagingMode = m1GPageTableSupport ? Paging4Level1GB : Paging4Level;
221 }
222
223 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
224 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
225 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
226
227 //
228 // Generate initial SMM page table.
229 //
230 PageTable = GenSmmPageTable (mPagingMode, mPhysicalAddressBits);
231
232 if (mSmmProfileEnabled) {
233 if (m5LevelPagingNeeded) {
234 Pml5Entry = (UINT64 *)PageTable;
235 //
236 // Set Pml5Entry sub-entries number for smm PF handler usage.
237 //
238 SetSubEntriesNum (Pml5Entry, 1);
239 Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & gPhyMask);
240 } else {
241 Pml4Entry = (UINT64 *)PageTable;
242 }
243
244 //
245 // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
246 //
247 PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
248 for (Index = 0; Index < 4; Index++) {
249 PdptEntry[Index] |= IA32_PG_PMNT;
250 }
251
252 //
253 // Set Pml4Entry sub-entries number for smm PF handler usage.
254 //
255 SetSubEntriesNum (Pml4Entry, 3);
256
257 //
258 // Add pages to page pool
259 //
260 FreePage = (LIST_ENTRY *)AllocatePageTableMemory (PAGE_TABLE_PAGES);
261 if (FreePage == NULL) {
262 FreePages (Pml4Entry, 1);
263 if (Pml5Entry != NULL) {
264 FreePages (Pml5Entry, 1);
265 }
266
267 ASSERT (FreePage != NULL);
268 return 0;
269 }
270
271 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
272 InsertTailList (&mPagePool, FreePage);
273 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
274 }
275 }
276
277 if (mSmmProfileEnabled ||
278 HEAP_GUARD_NONSTOP_MODE ||
279 NULL_DETECTION_NONSTOP_MODE)
280 {
281 //
282 // Set own Page Fault entry instead of the default one, because SMM Profile
283 // feature depends on IRET instruction to do Single Step
284 //
285 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
286 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
287 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
288 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
289 IdtEntry->Bits.Reserved_0 = 0;
290 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
291 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
292 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
293 IdtEntry->Bits.Reserved_1 = 0;
294 } else {
295 //
296 // Register Smm Page Fault Handler
297 //
298 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
299 ASSERT_EFI_ERROR (Status);
300 }
301
302 //
303 // Additional SMM IDT initialization for SMM stack guard
304 //
305 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
306 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Stack Guard\n"));
307 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
308 }
309
310 //
311 // Additional SMM IDT initialization for SMM CET shadow stack
312 //
313 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
314 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Shadow Stack\n"));
315 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
316 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK, 1);
317 }
318
319 //
320 // Return the address of PML4/PML5 (to set CR3)
321 //
322 return (UINT32)PageTable;
323}
324
325/**
326 Set access record in entry.
327
328 @param[in, out] Entry Pointer to entry
329 @param[in] Acc Access record value
330
331**/
332VOID
333SetAccNum (
334 IN OUT UINT64 *Entry,
335 IN UINT64 Acc
336 )
337{
338 //
339 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
340 //
341 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
342}
343
344/**
345 Return access record in entry.
346
347 @param[in] Entry Pointer to entry
348
349 @return Access record value.
350
351**/
352UINT64
353GetAccNum (
354 IN UINT64 *Entry
355 )
356{
357 //
358 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
359 //
360 return BitFieldRead64 (*Entry, 9, 11);
361}
362
363/**
364 Return and update the access record in entry.
365
366 @param[in, out] Entry Pointer to entry
367
368 @return Access record value.
369
370**/
371UINT64
372GetAndUpdateAccNum (
373 IN OUT UINT64 *Entry
374 )
375{
376 UINT64 Acc;
377
378 Acc = GetAccNum (Entry);
379 if ((*Entry & IA32_PG_A) != 0) {
380 //
381 // If this entry has been accessed, clear access flag in Entry and update access record
382 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
383 //
384 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
385 SetAccNum (Entry, 0x7);
386 return (0x7 + ACC_MAX_BIT);
387 } else {
388 if (Acc != 0) {
389 //
390 // If the access record is not the smallest value 0, minus 1 and update the access record field
391 //
392 SetAccNum (Entry, Acc - 1);
393 }
394 }
395
396 return Acc;
397}
398
399/**
400 Reclaim free pages for PageFault handler.
401
402 Search the whole entries tree to find the leaf entry that has the smallest
403 access record value. Insert the page pointed by this leaf entry into the
404 page pool. And check its upper entries if need to be inserted into the page
405 pool or not.
406
407**/
408VOID
409ReclaimPages (
410 VOID
411 )
412{
413 UINT64 Pml5Entry;
414 UINT64 *Pml5;
415 UINT64 *Pml4;
416 UINT64 *Pdpt;
417 UINT64 *Pdt;
418 UINTN Pml5Index;
419 UINTN Pml4Index;
420 UINTN PdptIndex;
421 UINTN PdtIndex;
422 UINTN MinPml5;
423 UINTN MinPml4;
424 UINTN MinPdpt;
425 UINTN MinPdt;
426 UINT64 MinAcc;
427 UINT64 Acc;
428 UINT64 SubEntriesNum;
429 BOOLEAN PML4EIgnore;
430 BOOLEAN PDPTEIgnore;
431 UINT64 *ReleasePageAddress;
432 IA32_CR4 Cr4;
433 BOOLEAN Enable5LevelPaging;
434 UINT64 PFAddress;
435 UINT64 PFAddressPml5Index;
436 UINT64 PFAddressPml4Index;
437 UINT64 PFAddressPdptIndex;
438 UINT64 PFAddressPdtIndex;
439
440 Pml4 = NULL;
441 Pdpt = NULL;
442 Pdt = NULL;
443 MinAcc = (UINT64)-1;
444 MinPml4 = (UINTN)-1;
445 MinPml5 = (UINTN)-1;
446 MinPdpt = (UINTN)-1;
447 MinPdt = (UINTN)-1;
448 Acc = 0;
449 ReleasePageAddress = 0;
450 PFAddress = AsmReadCr2 ();
451 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
452 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
453 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
454 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
455
456 Cr4.UintN = AsmReadCr4 ();
457 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
458 Pml5 = (UINT64 *)(UINTN)(AsmReadCr3 () & gPhyMask);
459
460 if (!Enable5LevelPaging) {
461 //
462 // Create one fake PML5 entry for 4-Level Paging
463 // so that the page table parsing logic only handles 5-Level page structure.
464 //
465 Pml5Entry = (UINTN)Pml5 | IA32_PG_P;
466 Pml5 = &Pml5Entry;
467 }
468
469 //
470 // First, find the leaf entry has the smallest access record value
471 //
472 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
473 if (((Pml5[Pml5Index] & IA32_PG_P) == 0) || ((Pml5[Pml5Index] & IA32_PG_PMNT) != 0)) {
474 //
475 // If the PML5 entry is not present or is masked, skip it
476 //
477 continue;
478 }
479
480 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & gPhyMask);
481 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
482 if (((Pml4[Pml4Index] & IA32_PG_P) == 0) || ((Pml4[Pml4Index] & IA32_PG_PMNT) != 0)) {
483 //
484 // If the PML4 entry is not present or is masked, skip it
485 //
486 continue;
487 }
488
489 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
490 PML4EIgnore = FALSE;
491 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
492 if (((Pdpt[PdptIndex] & IA32_PG_P) == 0) || ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0)) {
493 //
494 // If the PDPT entry is not present or is masked, skip it
495 //
496 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
497 //
498 // If the PDPT entry is masked, we will ignore checking the PML4 entry
499 //
500 PML4EIgnore = TRUE;
501 }
502
503 continue;
504 }
505
506 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
507 //
508 // It's not 1-GByte pages entry, it should be a PDPT entry,
509 // we will not check PML4 entry more
510 //
511 PML4EIgnore = TRUE;
512 Pdt = (UINT64 *)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
513 PDPTEIgnore = FALSE;
514 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof (*Pdt); PdtIndex++) {
515 if (((Pdt[PdtIndex] & IA32_PG_P) == 0) || ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0)) {
516 //
517 // If the PD entry is not present or is masked, skip it
518 //
519 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
520 //
521 // If the PD entry is masked, we will not PDPT entry more
522 //
523 PDPTEIgnore = TRUE;
524 }
525
526 continue;
527 }
528
529 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
530 //
531 // It's not 2 MByte page table entry, it should be PD entry
532 // we will find the entry has the smallest access record value
533 //
534 PDPTEIgnore = TRUE;
535 if ((PdtIndex != PFAddressPdtIndex) || (PdptIndex != PFAddressPdptIndex) ||
536 (Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index))
537 {
538 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
539 if (Acc < MinAcc) {
540 //
541 // If the PD entry has the smallest access record value,
542 // save the Page address to be released
543 //
544 MinAcc = Acc;
545 MinPml5 = Pml5Index;
546 MinPml4 = Pml4Index;
547 MinPdpt = PdptIndex;
548 MinPdt = PdtIndex;
549 ReleasePageAddress = Pdt + PdtIndex;
550 }
551 }
552 }
553 }
554
555 if (!PDPTEIgnore) {
556 //
557 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
558 // it should only has the entries point to 2 MByte Pages
559 //
560 if ((PdptIndex != PFAddressPdptIndex) || (Pml4Index != PFAddressPml4Index) ||
561 (Pml5Index != PFAddressPml5Index))
562 {
563 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
564 if (Acc < MinAcc) {
565 //
566 // If the PDPT entry has the smallest access record value,
567 // save the Page address to be released
568 //
569 MinAcc = Acc;
570 MinPml5 = Pml5Index;
571 MinPml4 = Pml4Index;
572 MinPdpt = PdptIndex;
573 MinPdt = (UINTN)-1;
574 ReleasePageAddress = Pdpt + PdptIndex;
575 }
576 }
577 }
578 }
579 }
580
581 if (!PML4EIgnore) {
582 //
583 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
584 // it should only has the entries point to 1 GByte Pages
585 //
586 if ((Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index)) {
587 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
588 if (Acc < MinAcc) {
589 //
590 // If the PML4 entry has the smallest access record value,
591 // save the Page address to be released
592 //
593 MinAcc = Acc;
594 MinPml5 = Pml5Index;
595 MinPml4 = Pml4Index;
596 MinPdpt = (UINTN)-1;
597 MinPdt = (UINTN)-1;
598 ReleasePageAddress = Pml4 + Pml4Index;
599 }
600 }
601 }
602 }
603 }
604
605 //
606 // Make sure one PML4/PDPT/PD entry is selected
607 //
608 ASSERT (MinAcc != (UINT64)-1);
609
610 //
611 // Secondly, insert the page pointed by this entry into page pool and clear this entry
612 //
613 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
614 *ReleasePageAddress = 0;
615
616 //
617 // Lastly, check this entry's upper entries if need to be inserted into page pool
618 // or not
619 //
620 while (TRUE) {
621 if (MinPdt != (UINTN)-1) {
622 //
623 // If 4 KByte Page Table is released, check the PDPT entry
624 //
625 Pml4 = (UINT64 *)(UINTN)(Pml5[MinPml5] & gPhyMask);
626 Pdpt = (UINT64 *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
627 SubEntriesNum = GetSubEntriesNum (Pdpt + MinPdpt);
628 if ((SubEntriesNum == 0) &&
629 ((MinPdpt != PFAddressPdptIndex) || (MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index)))
630 {
631 //
632 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
633 // clear the Page directory entry
634 //
635 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
636 Pdpt[MinPdpt] = 0;
637 //
638 // Go on checking the PML4 table
639 //
640 MinPdt = (UINTN)-1;
641 continue;
642 }
643
644 //
645 // Update the sub-entries filed in PDPT entry and exit
646 //
647 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
648 break;
649 }
650
651 if (MinPdpt != (UINTN)-1) {
652 //
653 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
654 //
655 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
656 if ((SubEntriesNum == 0) && ((MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index))) {
657 //
658 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
659 // clear the Page directory entry
660 //
661 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
662 Pml4[MinPml4] = 0;
663 MinPdpt = (UINTN)-1;
664 continue;
665 }
666
667 //
668 // Update the sub-entries filed in PML4 entry and exit
669 //
670 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
671 break;
672 }
673
674 //
675 // PLM4 table has been released before, exit it
676 //
677 break;
678 }
679}
680
681/**
682 Allocate free Page for PageFault handler use.
683
684 @return Page address.
685
686**/
687UINT64
688AllocPage (
689 VOID
690 )
691{
692 UINT64 RetVal;
693
694 if (IsListEmpty (&mPagePool)) {
695 //
696 // If page pool is empty, reclaim the used pages and insert one into page pool
697 //
698 ReclaimPages ();
699 }
700
701 //
702 // Get one free page and remove it from page pool
703 //
704 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
705 RemoveEntryList (mPagePool.ForwardLink);
706 //
707 // Clean this page and return
708 //
709 ZeroMem ((VOID *)(UINTN)RetVal, EFI_PAGE_SIZE);
710 return RetVal;
711}
712
713/**
714 ThePage Fault handler wrapper for SMM use.
715
716 @param InterruptType Defines the type of interrupt or exception that
717 occurred on the processor.This parameter is processor architecture specific.
718 @param SystemContext A pointer to the processor context when
719 the interrupt occurred on the processor.
720**/
721VOID
722EFIAPI
723SmiPFHandler (
724 IN EFI_EXCEPTION_TYPE InterruptType,
725 IN EFI_SYSTEM_CONTEXT SystemContext
726 )
727{
728 UINTN PFAddress;
729 UINTN GuardPageAddress;
730 UINTN ShadowStackGuardPageAddress;
731 UINTN CpuIndex;
732
733 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
734
735 AcquireSpinLock (mPFLock);
736
737 PFAddress = AsmReadCr2 ();
738
739 if (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1))) {
740 DumpCpuContext (InterruptType, SystemContext);
741 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
742 CpuDeadLoop ();
743 goto Exit;
744 }
745
746 //
747 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
748 // or SMM page protection violation.
749 //
750 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
751 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)))
752 {
753 DumpCpuContext (InterruptType, SystemContext);
754 CpuIndex = GetCpuIndex ();
755 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
756 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
757 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
758 (PFAddress >= GuardPageAddress) &&
759 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE)))
760 {
761 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
762 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
763 (mSmmShadowStackSize > 0) &&
764 (PFAddress >= ShadowStackGuardPageAddress) &&
765 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE)))
766 {
767 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
768 } else {
769 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
770 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
771 DEBUG_CODE (
772 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
773 );
774 } else {
775 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
776 DEBUG_CODE (
777 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
778 );
779 }
780
781 if (HEAP_GUARD_NONSTOP_MODE) {
782 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
783 goto Exit;
784 }
785 }
786
787 CpuDeadLoop ();
788 goto Exit;
789 }
790
791 //
792 // If a page fault occurs in non-SMRAM range.
793 //
794 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
795 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))
796 {
797 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
798 DumpCpuContext (InterruptType, SystemContext);
799 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
800 DEBUG_CODE (
801 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
802 );
803 CpuDeadLoop ();
804 goto Exit;
805 }
806
807 //
808 // If NULL pointer was just accessed
809 //
810 if (((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) &&
811 (PFAddress < EFI_PAGE_SIZE))
812 {
813 DumpCpuContext (InterruptType, SystemContext);
814 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
815 DEBUG_CODE (
816 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
817 );
818
819 if (NULL_DETECTION_NONSTOP_MODE) {
820 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
821 goto Exit;
822 }
823
824 CpuDeadLoop ();
825 goto Exit;
826 }
827
828 if (IsSmmCommBufferForbiddenAddress (PFAddress)) {
829 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
830 }
831 }
832
833 if (mSmmProfileEnabled) {
834 if (mIsStandaloneMm) {
835 //
836 // Only logging ranges shall run here in MM env.
837 //
838 ASSERT (IsNonMmramLoggingAddress (PFAddress));
839 }
840
841 SmmProfilePFHandler (
842 SystemContext.SystemContextX64->Rip,
843 SystemContext.SystemContextX64->ExceptionData
844 );
845 } else {
846 DumpCpuContext (InterruptType, SystemContext);
847 DEBUG_CODE (
848 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
849 );
850 CpuDeadLoop ();
851 }
852
853Exit:
854 ReleaseSpinLock (mPFLock);
855}
856
857/**
858 This function reads CR2 register.
859
860 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
861**/
862VOID
863SaveCr2 (
864 OUT UINTN *Cr2
865 )
866{
867 //
868 // A page fault (#PF) that triggers an update to the page
869 // table only occurs if SmiProfile is enabled. Therefore, it is
870 // necessary to save the CR2 register if SmiProfile is
871 // configured to be enabled.
872 //
873 if (mSmmProfileEnabled) {
874 *Cr2 = AsmReadCr2 ();
875 }
876}
877
878/**
879 This function restores CR2 register.
880
881 @param[in] Cr2 Value to write into CR2 register.
882**/
883VOID
884RestoreCr2 (
885 IN UINTN Cr2
886 )
887{
888 //
889 // A page fault (#PF) that triggers an update to the page
890 // table only occurs if SmiProfile is enabled. Therefore, it is
891 // necessary to restore the CR2 register if SmiProfile is
892 // configured to be enabled.
893 //
894 if (mSmmProfileEnabled) {
895 AsmWriteCr2 (Cr2);
896 }
897}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette