VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c@ 108793

Last change on this file since 108793 was 105670, checked in by vboxsync, 8 months ago

Devices/EFI/FirmwareNew: Merge edk2-stable-202405 and make it build on aarch64, bugref:4643

  • Property svn:eol-style set to native
File size: 29.7 KB
Line 
1/** @file
2Page Fault (#PF) handler for X64 processors
3
4Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7SPDX-License-Identifier: BSD-2-Clause-Patent
8
9**/
10
11#include "PiSmmCpuDxeSmm.h"
12
13#define PAGE_TABLE_PAGES 8
14#define ACC_MAX_BIT BIT3
15
16LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
17BOOLEAN m1GPageTableSupport = FALSE;
18BOOLEAN mCpuSmmRestrictedMemoryAccess;
19X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
20
21/**
22 Check if 1-GByte pages is supported by processor or not.
23
24 @retval TRUE 1-GByte pages is supported.
25 @retval FALSE 1-GByte pages is not supported.
26
27**/
28BOOLEAN
29Is1GPageSupport (
30 VOID
31 )
32{
33 UINT32 RegEax;
34 UINT32 RegEdx;
35
36 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
37 if (RegEax >= 0x80000001) {
38 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
39 if ((RegEdx & BIT26) != 0) {
40 return TRUE;
41 }
42 }
43
44 return FALSE;
45}
46
47/**
48 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
49 the max physical address bits is bigger than 48. Because 4-level paging can support
50 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
51 with max physical address bits <= 48.
52
53 @retval TRUE 5-level paging enabling is needed.
54 @retval FALSE 5-level paging enabling is not needed.
55**/
56BOOLEAN
57Is5LevelPagingNeeded (
58 VOID
59 )
60{
61 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
62 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
63 UINT32 MaxExtendedFunctionId;
64
65 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
66 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
67 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
68 } else {
69 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
70 }
71
72 AsmCpuidEx (
73 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
74 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
75 NULL,
76 NULL,
77 &ExtFeatureEcx.Uint32,
78 NULL
79 );
80 DEBUG ((
81 DEBUG_INFO,
82 "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
83 VirPhyAddressSize.Bits.PhysicalAddressBits,
84 ExtFeatureEcx.Bits.FiveLevelPage
85 ));
86
87 if ((VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) &&
88 (ExtFeatureEcx.Bits.FiveLevelPage == 1))
89 {
90 return TRUE;
91 } else {
92 return FALSE;
93 }
94}
95
96/**
97 Set sub-entries number in entry.
98
99 @param[in, out] Entry Pointer to entry
100 @param[in] SubEntryNum Sub-entries number based on 0:
101 0 means there is 1 sub-entry under this entry
102 0x1ff means there is 512 sub-entries under this entry
103
104**/
105VOID
106SetSubEntriesNum (
107 IN OUT UINT64 *Entry,
108 IN UINT64 SubEntryNum
109 )
110{
111 //
112 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
113 //
114 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
115}
116
117/**
118 Return sub-entries number in entry.
119
120 @param[in] Entry Pointer to entry
121
122 @return Sub-entries number based on 0:
123 0 means there is 1 sub-entry under this entry
124 0x1ff means there is 512 sub-entries under this entry
125**/
126UINT64
127GetSubEntriesNum (
128 IN UINT64 *Entry
129 )
130{
131 //
132 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
133 //
134 return BitFieldRead64 (*Entry, 52, 60);
135}
136
137/**
138 Calculate the maximum support address.
139
140 @param[in] Is5LevelPagingNeeded If 5-level paging enabling is needed.
141
142 @return the maximum support address.
143**/
144UINT8
145CalculateMaximumSupportAddress (
146 BOOLEAN Is5LevelPagingNeeded
147 )
148{
149 UINT32 RegEax;
150 UINT8 PhysicalAddressBits;
151 VOID *Hob;
152
153 //
154 // Get physical address bits supported.
155 //
156 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
157 if (Hob != NULL) {
158 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;
159 } else {
160 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
161 if (RegEax >= 0x80000008) {
162 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
163 PhysicalAddressBits = (UINT8)RegEax;
164 } else {
165 PhysicalAddressBits = 36;
166 }
167 }
168
169 //
170 // 4-level paging supports translating 48-bit linear addresses to 52-bit physical addresses.
171 // Since linear addresses are sign-extended, the linear-address space of 4-level paging is:
172 // [0, 2^47-1] and [0xffff8000_00000000, 0xffffffff_ffffffff].
173 // So only [0, 2^47-1] linear-address range maps to the identical physical-address range when
174 // 5-Level paging is disabled.
175 //
176 ASSERT (PhysicalAddressBits <= 52);
177 if (!Is5LevelPagingNeeded && (PhysicalAddressBits > 47)) {
178 PhysicalAddressBits = 47;
179 }
180
181 return PhysicalAddressBits;
182}
183
184/**
185 Create PageTable for SMM use.
186
187 @return The address of PML4 (to set CR3).
188
189**/
190UINT32
191SmmInitPageTable (
192 VOID
193 )
194{
195 UINTN PageTable;
196 LIST_ENTRY *FreePage;
197 UINTN Index;
198 UINTN PageFaultHandlerHookAddress;
199 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
200 EFI_STATUS Status;
201 UINT64 *PdptEntry;
202 UINT64 *Pml4Entry;
203 UINT64 *Pml5Entry;
204 UINT8 PhysicalAddressBits;
205
206 //
207 // Initialize spin lock
208 //
209 InitializeSpinLock (mPFLock);
210
211 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
212 m1GPageTableSupport = Is1GPageSupport ();
213 m5LevelPagingNeeded = Is5LevelPagingNeeded ();
214 mPhysicalAddressBits = CalculateMaximumSupportAddress (m5LevelPagingNeeded);
215 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
216 if (m5LevelPagingNeeded) {
217 mPagingMode = m1GPageTableSupport ? Paging5Level1GB : Paging5Level;
218 } else {
219 mPagingMode = m1GPageTableSupport ? Paging4Level1GB : Paging4Level;
220 }
221
222 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
223 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
224 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
225 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
226
227 //
228 // Generate initial SMM page table.
229 // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
230 //
231 PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ? mPhysicalAddressBits : 32;
232 PageTable = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
233
234 if (m5LevelPagingNeeded) {
235 Pml5Entry = (UINT64 *)PageTable;
236 //
237 // Set Pml5Entry sub-entries number for smm PF handler usage.
238 //
239 SetSubEntriesNum (Pml5Entry, 1);
240 Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & gPhyMask);
241 } else {
242 Pml4Entry = (UINT64 *)PageTable;
243 }
244
245 //
246 // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
247 //
248 PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
249 for (Index = 0; Index < 4; Index++) {
250 PdptEntry[Index] |= IA32_PG_PMNT;
251 }
252
253 if (!mCpuSmmRestrictedMemoryAccess) {
254 //
255 // Set Pml4Entry sub-entries number for smm PF handler usage.
256 //
257 SetSubEntriesNum (Pml4Entry, 3);
258
259 //
260 // Add pages to page pool
261 //
262 FreePage = (LIST_ENTRY *)AllocatePageTableMemory (PAGE_TABLE_PAGES);
263 ASSERT (FreePage != NULL);
264 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
265 InsertTailList (&mPagePool, FreePage);
266 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
267 }
268 }
269
270 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
271 HEAP_GUARD_NONSTOP_MODE ||
272 NULL_DETECTION_NONSTOP_MODE)
273 {
274 //
275 // Set own Page Fault entry instead of the default one, because SMM Profile
276 // feature depends on IRET instruction to do Single Step
277 //
278 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
279 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
280 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
281 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
282 IdtEntry->Bits.Reserved_0 = 0;
283 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
284 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
285 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
286 IdtEntry->Bits.Reserved_1 = 0;
287 } else {
288 //
289 // Register Smm Page Fault Handler
290 //
291 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
292 ASSERT_EFI_ERROR (Status);
293 }
294
295 //
296 // Additional SMM IDT initialization for SMM stack guard
297 //
298 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
299 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Stack Guard\n"));
300 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
301 }
302
303 //
304 // Additional SMM IDT initialization for SMM CET shadow stack
305 //
306 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
307 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Shadow Stack\n"));
308 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
309 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK, 1);
310 }
311
312 //
313 // Return the address of PML4/PML5 (to set CR3)
314 //
315 return (UINT32)PageTable;
316}
317
318/**
319 Set access record in entry.
320
321 @param[in, out] Entry Pointer to entry
322 @param[in] Acc Access record value
323
324**/
325VOID
326SetAccNum (
327 IN OUT UINT64 *Entry,
328 IN UINT64 Acc
329 )
330{
331 //
332 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
333 //
334 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
335}
336
337/**
338 Return access record in entry.
339
340 @param[in] Entry Pointer to entry
341
342 @return Access record value.
343
344**/
345UINT64
346GetAccNum (
347 IN UINT64 *Entry
348 )
349{
350 //
351 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
352 //
353 return BitFieldRead64 (*Entry, 9, 11);
354}
355
356/**
357 Return and update the access record in entry.
358
359 @param[in, out] Entry Pointer to entry
360
361 @return Access record value.
362
363**/
364UINT64
365GetAndUpdateAccNum (
366 IN OUT UINT64 *Entry
367 )
368{
369 UINT64 Acc;
370
371 Acc = GetAccNum (Entry);
372 if ((*Entry & IA32_PG_A) != 0) {
373 //
374 // If this entry has been accessed, clear access flag in Entry and update access record
375 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
376 //
377 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
378 SetAccNum (Entry, 0x7);
379 return (0x7 + ACC_MAX_BIT);
380 } else {
381 if (Acc != 0) {
382 //
383 // If the access record is not the smallest value 0, minus 1 and update the access record field
384 //
385 SetAccNum (Entry, Acc - 1);
386 }
387 }
388
389 return Acc;
390}
391
392/**
393 Reclaim free pages for PageFault handler.
394
395 Search the whole entries tree to find the leaf entry that has the smallest
396 access record value. Insert the page pointed by this leaf entry into the
397 page pool. And check its upper entries if need to be inserted into the page
398 pool or not.
399
400**/
401VOID
402ReclaimPages (
403 VOID
404 )
405{
406 UINT64 Pml5Entry;
407 UINT64 *Pml5;
408 UINT64 *Pml4;
409 UINT64 *Pdpt;
410 UINT64 *Pdt;
411 UINTN Pml5Index;
412 UINTN Pml4Index;
413 UINTN PdptIndex;
414 UINTN PdtIndex;
415 UINTN MinPml5;
416 UINTN MinPml4;
417 UINTN MinPdpt;
418 UINTN MinPdt;
419 UINT64 MinAcc;
420 UINT64 Acc;
421 UINT64 SubEntriesNum;
422 BOOLEAN PML4EIgnore;
423 BOOLEAN PDPTEIgnore;
424 UINT64 *ReleasePageAddress;
425 IA32_CR4 Cr4;
426 BOOLEAN Enable5LevelPaging;
427 UINT64 PFAddress;
428 UINT64 PFAddressPml5Index;
429 UINT64 PFAddressPml4Index;
430 UINT64 PFAddressPdptIndex;
431 UINT64 PFAddressPdtIndex;
432
433 Pml4 = NULL;
434 Pdpt = NULL;
435 Pdt = NULL;
436 MinAcc = (UINT64)-1;
437 MinPml4 = (UINTN)-1;
438 MinPml5 = (UINTN)-1;
439 MinPdpt = (UINTN)-1;
440 MinPdt = (UINTN)-1;
441 Acc = 0;
442 ReleasePageAddress = 0;
443 PFAddress = AsmReadCr2 ();
444 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
445 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
446 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
447 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
448
449 Cr4.UintN = AsmReadCr4 ();
450 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
451 Pml5 = (UINT64 *)(UINTN)(AsmReadCr3 () & gPhyMask);
452
453 if (!Enable5LevelPaging) {
454 //
455 // Create one fake PML5 entry for 4-Level Paging
456 // so that the page table parsing logic only handles 5-Level page structure.
457 //
458 Pml5Entry = (UINTN)Pml5 | IA32_PG_P;
459 Pml5 = &Pml5Entry;
460 }
461
462 //
463 // First, find the leaf entry has the smallest access record value
464 //
465 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
466 if (((Pml5[Pml5Index] & IA32_PG_P) == 0) || ((Pml5[Pml5Index] & IA32_PG_PMNT) != 0)) {
467 //
468 // If the PML5 entry is not present or is masked, skip it
469 //
470 continue;
471 }
472
473 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & gPhyMask);
474 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
475 if (((Pml4[Pml4Index] & IA32_PG_P) == 0) || ((Pml4[Pml4Index] & IA32_PG_PMNT) != 0)) {
476 //
477 // If the PML4 entry is not present or is masked, skip it
478 //
479 continue;
480 }
481
482 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
483 PML4EIgnore = FALSE;
484 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
485 if (((Pdpt[PdptIndex] & IA32_PG_P) == 0) || ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0)) {
486 //
487 // If the PDPT entry is not present or is masked, skip it
488 //
489 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
490 //
491 // If the PDPT entry is masked, we will ignore checking the PML4 entry
492 //
493 PML4EIgnore = TRUE;
494 }
495
496 continue;
497 }
498
499 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
500 //
501 // It's not 1-GByte pages entry, it should be a PDPT entry,
502 // we will not check PML4 entry more
503 //
504 PML4EIgnore = TRUE;
505 Pdt = (UINT64 *)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
506 PDPTEIgnore = FALSE;
507 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof (*Pdt); PdtIndex++) {
508 if (((Pdt[PdtIndex] & IA32_PG_P) == 0) || ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0)) {
509 //
510 // If the PD entry is not present or is masked, skip it
511 //
512 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
513 //
514 // If the PD entry is masked, we will not PDPT entry more
515 //
516 PDPTEIgnore = TRUE;
517 }
518
519 continue;
520 }
521
522 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
523 //
524 // It's not 2 MByte page table entry, it should be PD entry
525 // we will find the entry has the smallest access record value
526 //
527 PDPTEIgnore = TRUE;
528 if ((PdtIndex != PFAddressPdtIndex) || (PdptIndex != PFAddressPdptIndex) ||
529 (Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index))
530 {
531 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
532 if (Acc < MinAcc) {
533 //
534 // If the PD entry has the smallest access record value,
535 // save the Page address to be released
536 //
537 MinAcc = Acc;
538 MinPml5 = Pml5Index;
539 MinPml4 = Pml4Index;
540 MinPdpt = PdptIndex;
541 MinPdt = PdtIndex;
542 ReleasePageAddress = Pdt + PdtIndex;
543 }
544 }
545 }
546 }
547
548 if (!PDPTEIgnore) {
549 //
550 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
551 // it should only has the entries point to 2 MByte Pages
552 //
553 if ((PdptIndex != PFAddressPdptIndex) || (Pml4Index != PFAddressPml4Index) ||
554 (Pml5Index != PFAddressPml5Index))
555 {
556 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
557 if (Acc < MinAcc) {
558 //
559 // If the PDPT entry has the smallest access record value,
560 // save the Page address to be released
561 //
562 MinAcc = Acc;
563 MinPml5 = Pml5Index;
564 MinPml4 = Pml4Index;
565 MinPdpt = PdptIndex;
566 MinPdt = (UINTN)-1;
567 ReleasePageAddress = Pdpt + PdptIndex;
568 }
569 }
570 }
571 }
572 }
573
574 if (!PML4EIgnore) {
575 //
576 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
577 // it should only has the entries point to 1 GByte Pages
578 //
579 if ((Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index)) {
580 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
581 if (Acc < MinAcc) {
582 //
583 // If the PML4 entry has the smallest access record value,
584 // save the Page address to be released
585 //
586 MinAcc = Acc;
587 MinPml5 = Pml5Index;
588 MinPml4 = Pml4Index;
589 MinPdpt = (UINTN)-1;
590 MinPdt = (UINTN)-1;
591 ReleasePageAddress = Pml4 + Pml4Index;
592 }
593 }
594 }
595 }
596 }
597
598 //
599 // Make sure one PML4/PDPT/PD entry is selected
600 //
601 ASSERT (MinAcc != (UINT64)-1);
602
603 //
604 // Secondly, insert the page pointed by this entry into page pool and clear this entry
605 //
606 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
607 *ReleasePageAddress = 0;
608
609 //
610 // Lastly, check this entry's upper entries if need to be inserted into page pool
611 // or not
612 //
613 while (TRUE) {
614 if (MinPdt != (UINTN)-1) {
615 //
616 // If 4 KByte Page Table is released, check the PDPT entry
617 //
618 Pml4 = (UINT64 *)(UINTN)(Pml5[MinPml5] & gPhyMask);
619 Pdpt = (UINT64 *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
620 SubEntriesNum = GetSubEntriesNum (Pdpt + MinPdpt);
621 if ((SubEntriesNum == 0) &&
622 ((MinPdpt != PFAddressPdptIndex) || (MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index)))
623 {
624 //
625 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
626 // clear the Page directory entry
627 //
628 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
629 Pdpt[MinPdpt] = 0;
630 //
631 // Go on checking the PML4 table
632 //
633 MinPdt = (UINTN)-1;
634 continue;
635 }
636
637 //
638 // Update the sub-entries filed in PDPT entry and exit
639 //
640 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
641 break;
642 }
643
644 if (MinPdpt != (UINTN)-1) {
645 //
646 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
647 //
648 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
649 if ((SubEntriesNum == 0) && ((MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index))) {
650 //
651 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
652 // clear the Page directory entry
653 //
654 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
655 Pml4[MinPml4] = 0;
656 MinPdpt = (UINTN)-1;
657 continue;
658 }
659
660 //
661 // Update the sub-entries filed in PML4 entry and exit
662 //
663 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
664 break;
665 }
666
667 //
668 // PLM4 table has been released before, exit it
669 //
670 break;
671 }
672}
673
674/**
675 Allocate free Page for PageFault handler use.
676
677 @return Page address.
678
679**/
680UINT64
681AllocPage (
682 VOID
683 )
684{
685 UINT64 RetVal;
686
687 if (IsListEmpty (&mPagePool)) {
688 //
689 // If page pool is empty, reclaim the used pages and insert one into page pool
690 //
691 ReclaimPages ();
692 }
693
694 //
695 // Get one free page and remove it from page pool
696 //
697 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
698 RemoveEntryList (mPagePool.ForwardLink);
699 //
700 // Clean this page and return
701 //
702 ZeroMem ((VOID *)(UINTN)RetVal, EFI_PAGE_SIZE);
703 return RetVal;
704}
705
706/**
707 Page Fault handler for SMM use.
708
709**/
710VOID
711SmiDefaultPFHandler (
712 VOID
713 )
714{
715 UINT64 *PageTable;
716 UINT64 *PageTableTop;
717 UINT64 PFAddress;
718 UINTN StartBit;
719 UINTN EndBit;
720 UINT64 PTIndex;
721 UINTN Index;
722 SMM_PAGE_SIZE_TYPE PageSize;
723 UINTN NumOfPages;
724 UINTN PageAttribute;
725 EFI_STATUS Status;
726 UINT64 *UpperEntry;
727 BOOLEAN Enable5LevelPaging;
728 IA32_CR4 Cr4;
729
730 //
731 // Set default SMM page attribute
732 //
733 PageSize = SmmPageSize2M;
734 NumOfPages = 1;
735 PageAttribute = 0;
736
737 EndBit = 0;
738 PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
739 PFAddress = AsmReadCr2 ();
740
741 Cr4.UintN = AsmReadCr4 ();
742 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
743
744 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
745 //
746 // If platform not support page table attribute, set default SMM page attribute
747 //
748 if (Status != EFI_SUCCESS) {
749 PageSize = SmmPageSize2M;
750 NumOfPages = 1;
751 PageAttribute = 0;
752 }
753
754 if (PageSize >= MaxSmmPageSizeType) {
755 PageSize = SmmPageSize2M;
756 }
757
758 if (NumOfPages > 512) {
759 NumOfPages = 512;
760 }
761
762 switch (PageSize) {
763 case SmmPageSize4K:
764 //
765 // BIT12 to BIT20 is Page Table index
766 //
767 EndBit = 12;
768 break;
769 case SmmPageSize2M:
770 //
771 // BIT21 to BIT29 is Page Directory index
772 //
773 EndBit = 21;
774 PageAttribute |= (UINTN)IA32_PG_PS;
775 break;
776 case SmmPageSize1G:
777 if (!m1GPageTableSupport) {
778 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
779 ASSERT (FALSE);
780 }
781
782 //
783 // BIT30 to BIT38 is Page Directory Pointer Table index
784 //
785 EndBit = 30;
786 PageAttribute |= (UINTN)IA32_PG_PS;
787 break;
788 default:
789 ASSERT (FALSE);
790 }
791
792 //
793 // If execute-disable is enabled, set NX bit
794 //
795 if (mXdEnabled) {
796 PageAttribute |= IA32_PG_NX;
797 }
798
799 for (Index = 0; Index < NumOfPages; Index++) {
800 PageTable = PageTableTop;
801 UpperEntry = NULL;
802 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
803 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
804 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
805 //
806 // If the entry is not present, allocate one page from page pool for it
807 //
808 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
809 } else {
810 //
811 // Save the upper entry address
812 //
813 UpperEntry = PageTable + PTIndex;
814 }
815
816 //
817 // BIT9 to BIT11 of entry is used to save access record,
818 // initialize value is 7
819 //
820 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
821 SetAccNum (PageTable + PTIndex, 7);
822 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
823 }
824
825 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
826 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
827 //
828 // Check if the entry has already existed, this issue may occur when the different
829 // size page entries created under the same entry
830 //
831 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
832 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
833 ASSERT (FALSE);
834 }
835
836 //
837 // Fill the new entry
838 //
839 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
840 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
841 if (UpperEntry != NULL) {
842 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
843 }
844
845 //
846 // Get the next page address if we need to create more page tables
847 //
848 PFAddress += (1ull << EndBit);
849 }
850}
851
852/**
853 ThePage Fault handler wrapper for SMM use.
854
855 @param InterruptType Defines the type of interrupt or exception that
856 occurred on the processor.This parameter is processor architecture specific.
857 @param SystemContext A pointer to the processor context when
858 the interrupt occurred on the processor.
859**/
860VOID
861EFIAPI
862SmiPFHandler (
863 IN EFI_EXCEPTION_TYPE InterruptType,
864 IN EFI_SYSTEM_CONTEXT SystemContext
865 )
866{
867 UINTN PFAddress;
868 UINTN GuardPageAddress;
869 UINTN ShadowStackGuardPageAddress;
870 UINTN CpuIndex;
871
872 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
873
874 AcquireSpinLock (mPFLock);
875
876 PFAddress = AsmReadCr2 ();
877
878 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
879 DumpCpuContext (InterruptType, SystemContext);
880 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
881 CpuDeadLoop ();
882 goto Exit;
883 }
884
885 //
886 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
887 // or SMM page protection violation.
888 //
889 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
890 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)))
891 {
892 DumpCpuContext (InterruptType, SystemContext);
893 CpuIndex = GetCpuIndex ();
894 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
895 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
896 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
897 (PFAddress >= GuardPageAddress) &&
898 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE)))
899 {
900 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
901 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
902 (mSmmShadowStackSize > 0) &&
903 (PFAddress >= ShadowStackGuardPageAddress) &&
904 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE)))
905 {
906 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
907 } else {
908 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
909 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
910 DEBUG_CODE (
911 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
912 );
913 } else {
914 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
915 DEBUG_CODE (
916 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
917 );
918 }
919
920 if (HEAP_GUARD_NONSTOP_MODE) {
921 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
922 goto Exit;
923 }
924 }
925
926 CpuDeadLoop ();
927 goto Exit;
928 }
929
930 //
931 // If a page fault occurs in non-SMRAM range.
932 //
933 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
934 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))
935 {
936 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
937 DumpCpuContext (InterruptType, SystemContext);
938 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
939 DEBUG_CODE (
940 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
941 );
942 CpuDeadLoop ();
943 goto Exit;
944 }
945
946 //
947 // If NULL pointer was just accessed
948 //
949 if (((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) &&
950 (PFAddress < EFI_PAGE_SIZE))
951 {
952 DumpCpuContext (InterruptType, SystemContext);
953 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
954 DEBUG_CODE (
955 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
956 );
957
958 if (NULL_DETECTION_NONSTOP_MODE) {
959 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
960 goto Exit;
961 }
962
963 CpuDeadLoop ();
964 goto Exit;
965 }
966
967 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
968 DumpCpuContext (InterruptType, SystemContext);
969 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
970 DEBUG_CODE (
971 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
972 );
973 CpuDeadLoop ();
974 goto Exit;
975 }
976 }
977
978 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
979 SmmProfilePFHandler (
980 SystemContext.SystemContextX64->Rip,
981 SystemContext.SystemContextX64->ExceptionData
982 );
983 } else {
984 SmiDefaultPFHandler ();
985 }
986
987Exit:
988 ReleaseSpinLock (mPFLock);
989}
990
991/**
992 This function reads CR2 register when on-demand paging is enabled.
993
994 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
995**/
996VOID
997SaveCr2 (
998 OUT UINTN *Cr2
999 )
1000{
1001 if (!mCpuSmmRestrictedMemoryAccess) {
1002 //
1003 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1004 //
1005 *Cr2 = AsmReadCr2 ();
1006 }
1007}
1008
1009/**
1010 This function restores CR2 register when on-demand paging is enabled.
1011
1012 @param[in] Cr2 Value to write into CR2 register.
1013**/
1014VOID
1015RestoreCr2 (
1016 IN UINTN Cr2
1017 )
1018{
1019 if (!mCpuSmmRestrictedMemoryAccess) {
1020 //
1021 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1022 //
1023 AsmWriteCr2 (Cr2);
1024 }
1025}
1026
1027/**
1028 Return whether access to non-SMRAM is restricted.
1029
1030 @retval TRUE Access to non-SMRAM is restricted.
1031 @retval FALSE Access to non-SMRAM is not restricted.
1032**/
1033BOOLEAN
1034IsRestrictedMemoryAccess (
1035 VOID
1036 )
1037{
1038 return mCpuSmmRestrictedMemoryAccess;
1039}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette