VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c@ 101291

Last change on this file since 101291 was 101291, checked in by vboxsync, 17 months ago

EFI/FirmwareNew: Make edk2-stable202308 build on all supported platforms (using gcc at least, msvc not tested yet), bugref:4643

  • Property svn:eol-style set to native
File size: 29.0 KB
Line 
1/** @file
2Page Fault (#PF) handler for X64 processors
3
4Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7SPDX-License-Identifier: BSD-2-Clause-Patent
8
9**/
10
11#include "PiSmmCpuDxeSmm.h"
12
13#define PAGE_TABLE_PAGES 8
14#define ACC_MAX_BIT BIT3
15
16LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
17BOOLEAN m1GPageTableSupport = FALSE;
18BOOLEAN mCpuSmmRestrictedMemoryAccess;
19X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
20
21/**
22 Check if 1-GByte pages is supported by processor or not.
23
24 @retval TRUE 1-GByte pages is supported.
25 @retval FALSE 1-GByte pages is not supported.
26
27**/
28BOOLEAN
29Is1GPageSupport (
30 VOID
31 )
32{
33 UINT32 RegEax;
34 UINT32 RegEdx;
35
36 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
37 if (RegEax >= 0x80000001) {
38 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
39 if ((RegEdx & BIT26) != 0) {
40 return TRUE;
41 }
42 }
43
44 return FALSE;
45}
46
47/**
48 The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
49 the max physical address bits is bigger than 48. Because 4-level paging can support
50 to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
51 with max physical address bits <= 48.
52
53 @retval TRUE 5-level paging enabling is needed.
54 @retval FALSE 5-level paging enabling is not needed.
55**/
56BOOLEAN
57Is5LevelPagingNeeded (
58 VOID
59 )
60{
61 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
62 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
63 UINT32 MaxExtendedFunctionId;
64
65 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
66 if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
67 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
68 } else {
69 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
70 }
71
72 AsmCpuidEx (
73 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
74 CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
75 NULL,
76 NULL,
77 &ExtFeatureEcx.Uint32,
78 NULL
79 );
80 DEBUG ((
81 DEBUG_INFO,
82 "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
83 VirPhyAddressSize.Bits.PhysicalAddressBits,
84 ExtFeatureEcx.Bits.FiveLevelPage
85 ));
86
87 if ((VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) &&
88 (ExtFeatureEcx.Bits.FiveLevelPage == 1))
89 {
90 return TRUE;
91 } else {
92 return FALSE;
93 }
94}
95
96/**
97 Set sub-entries number in entry.
98
99 @param[in, out] Entry Pointer to entry
100 @param[in] SubEntryNum Sub-entries number based on 0:
101 0 means there is 1 sub-entry under this entry
102 0x1ff means there is 512 sub-entries under this entry
103
104**/
105VOID
106SetSubEntriesNum (
107 IN OUT UINT64 *Entry,
108 IN UINT64 SubEntryNum
109 )
110{
111 //
112 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
113 //
114 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
115}
116
117/**
118 Return sub-entries number in entry.
119
120 @param[in] Entry Pointer to entry
121
122 @return Sub-entries number based on 0:
123 0 means there is 1 sub-entry under this entry
124 0x1ff means there is 512 sub-entries under this entry
125**/
126UINT64
127GetSubEntriesNum (
128 IN UINT64 *Entry
129 )
130{
131 //
132 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
133 //
134 return BitFieldRead64 (*Entry, 52, 60);
135}
136
137/**
138 Calculate the maximum support address.
139
140 @return the maximum support address.
141**/
142UINT8
143CalculateMaximumSupportAddress (
144 VOID
145 )
146{
147 UINT32 RegEax;
148 UINT8 PhysicalAddressBits;
149 VOID *Hob;
150
151 //
152 // Get physical address bits supported.
153 //
154 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
155 if (Hob != NULL) {
156 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;
157 } else {
158 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
159 if (RegEax >= 0x80000008) {
160 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
161 PhysicalAddressBits = (UINT8)RegEax;
162 } else {
163 PhysicalAddressBits = 36;
164 }
165 }
166
167 return PhysicalAddressBits;
168}
169
170/**
171 Create PageTable for SMM use.
172
173 @return The address of PML4 (to set CR3).
174
175**/
176UINT32
177SmmInitPageTable (
178 VOID
179 )
180{
181 UINTN PageTable;
182 LIST_ENTRY *FreePage;
183 UINTN Index;
184 UINTN PageFaultHandlerHookAddress;
185 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
186 EFI_STATUS Status;
187 UINT64 *PdptEntry;
188 UINT64 *Pml4Entry;
189 UINT64 *Pml5Entry;
190 UINT8 PhysicalAddressBits;
191
192 //
193 // Initialize spin lock
194 //
195 InitializeSpinLock (mPFLock);
196
197 mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
198 m1GPageTableSupport = Is1GPageSupport ();
199 m5LevelPagingNeeded = Is5LevelPagingNeeded ();
200 mPhysicalAddressBits = CalculateMaximumSupportAddress ();
201 PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
202 if (m5LevelPagingNeeded) {
203 mPagingMode = m1GPageTableSupport ? Paging5Level1GB : Paging5Level;
204 } else {
205 mPagingMode = m1GPageTableSupport ? Paging4Level1GB : Paging4Level;
206 }
207
208 DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
209 DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
210 DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
211 DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
212
213 //
214 // Generate initial SMM page table.
215 // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
216 //
217 PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ? mPhysicalAddressBits : 32;
218 PageTable = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
219
220 if (m5LevelPagingNeeded) {
221 Pml5Entry = (UINT64 *)PageTable;
222 //
223 // Set Pml5Entry sub-entries number for smm PF handler usage.
224 //
225 SetSubEntriesNum (Pml5Entry, 1);
226 Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & gPhyMask);
227 } else {
228 Pml4Entry = (UINT64 *)PageTable;
229 }
230
231 //
232 // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
233 //
234 PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
235 for (Index = 0; Index < 4; Index++) {
236 PdptEntry[Index] |= IA32_PG_PMNT;
237 }
238
239 if (!mCpuSmmRestrictedMemoryAccess) {
240 //
241 // Set Pml4Entry sub-entries number for smm PF handler usage.
242 //
243 SetSubEntriesNum (Pml4Entry, 3);
244
245 //
246 // Add pages to page pool
247 //
248 FreePage = (LIST_ENTRY *)AllocatePageTableMemory (PAGE_TABLE_PAGES);
249 ASSERT (FreePage != NULL);
250 for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
251 InsertTailList (&mPagePool, FreePage);
252 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
253 }
254 }
255
256 if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
257 HEAP_GUARD_NONSTOP_MODE ||
258 NULL_DETECTION_NONSTOP_MODE)
259 {
260 //
261 // Set own Page Fault entry instead of the default one, because SMM Profile
262 // feature depends on IRET instruction to do Single Step
263 //
264 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
265 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
266 IdtEntry += EXCEPT_IA32_PAGE_FAULT;
267 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
268 IdtEntry->Bits.Reserved_0 = 0;
269 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
270 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
271 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
272 IdtEntry->Bits.Reserved_1 = 0;
273 } else {
274 //
275 // Register Smm Page Fault Handler
276 //
277 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
278 ASSERT_EFI_ERROR (Status);
279 }
280
281 //
282 // Additional SMM IDT initialization for SMM stack guard
283 //
284 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
285 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Stack Guard\n"));
286 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
287 }
288
289 //
290 // Additional SMM IDT initialization for SMM CET shadow stack
291 //
292 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
293 DEBUG ((DEBUG_INFO, "Initialize IDT IST field for SMM Shadow Stack\n"));
294 InitializeIdtIst (EXCEPT_IA32_PAGE_FAULT, 1);
295 InitializeIdtIst (EXCEPT_IA32_MACHINE_CHECK, 1);
296 }
297
298 //
299 // Return the address of PML4/PML5 (to set CR3)
300 //
301 return (UINT32)PageTable;
302}
303
304/**
305 Set access record in entry.
306
307 @param[in, out] Entry Pointer to entry
308 @param[in] Acc Access record value
309
310**/
311VOID
312SetAccNum (
313 IN OUT UINT64 *Entry,
314 IN UINT64 Acc
315 )
316{
317 //
318 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
319 //
320 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
321}
322
323/**
324 Return access record in entry.
325
326 @param[in] Entry Pointer to entry
327
328 @return Access record value.
329
330**/
331UINT64
332GetAccNum (
333 IN UINT64 *Entry
334 )
335{
336 //
337 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
338 //
339 return BitFieldRead64 (*Entry, 9, 11);
340}
341
342/**
343 Return and update the access record in entry.
344
345 @param[in, out] Entry Pointer to entry
346
347 @return Access record value.
348
349**/
350UINT64
351GetAndUpdateAccNum (
352 IN OUT UINT64 *Entry
353 )
354{
355 UINT64 Acc;
356
357 Acc = GetAccNum (Entry);
358 if ((*Entry & IA32_PG_A) != 0) {
359 //
360 // If this entry has been accessed, clear access flag in Entry and update access record
361 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
362 //
363 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
364 SetAccNum (Entry, 0x7);
365 return (0x7 + ACC_MAX_BIT);
366 } else {
367 if (Acc != 0) {
368 //
369 // If the access record is not the smallest value 0, minus 1 and update the access record field
370 //
371 SetAccNum (Entry, Acc - 1);
372 }
373 }
374
375 return Acc;
376}
377
378/**
379 Reclaim free pages for PageFault handler.
380
381 Search the whole entries tree to find the leaf entry that has the smallest
382 access record value. Insert the page pointed by this leaf entry into the
383 page pool. And check its upper entries if need to be inserted into the page
384 pool or not.
385
386**/
387VOID
388ReclaimPages (
389 VOID
390 )
391{
392 UINT64 Pml5Entry;
393 UINT64 *Pml5;
394 UINT64 *Pml4;
395 UINT64 *Pdpt;
396 UINT64 *Pdt;
397 UINTN Pml5Index;
398 UINTN Pml4Index;
399 UINTN PdptIndex;
400 UINTN PdtIndex;
401 UINTN MinPml5;
402 UINTN MinPml4;
403 UINTN MinPdpt;
404 UINTN MinPdt;
405 UINT64 MinAcc;
406 UINT64 Acc;
407 UINT64 SubEntriesNum;
408 BOOLEAN PML4EIgnore;
409 BOOLEAN PDPTEIgnore;
410 UINT64 *ReleasePageAddress;
411 IA32_CR4 Cr4;
412 BOOLEAN Enable5LevelPaging;
413 UINT64 PFAddress;
414 UINT64 PFAddressPml5Index;
415 UINT64 PFAddressPml4Index;
416 UINT64 PFAddressPdptIndex;
417 UINT64 PFAddressPdtIndex;
418
419 Pml4 = NULL;
420 Pdpt = NULL;
421 Pdt = NULL;
422 MinAcc = (UINT64)-1;
423 MinPml4 = (UINTN)-1;
424 MinPml5 = (UINTN)-1;
425 MinPdpt = (UINTN)-1;
426 MinPdt = (UINTN)-1;
427 Acc = 0;
428 ReleasePageAddress = 0;
429 PFAddress = AsmReadCr2 ();
430 PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
431 PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
432 PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
433 PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
434
435 Cr4.UintN = AsmReadCr4 ();
436 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
437 Pml5 = (UINT64 *)(UINTN)(AsmReadCr3 () & gPhyMask);
438
439 if (!Enable5LevelPaging) {
440 //
441 // Create one fake PML5 entry for 4-Level Paging
442 // so that the page table parsing logic only handles 5-Level page structure.
443 //
444 Pml5Entry = (UINTN)Pml5 | IA32_PG_P;
445 Pml5 = &Pml5Entry;
446 }
447
448 //
449 // First, find the leaf entry has the smallest access record value
450 //
451 for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
452 if (((Pml5[Pml5Index] & IA32_PG_P) == 0) || ((Pml5[Pml5Index] & IA32_PG_PMNT) != 0)) {
453 //
454 // If the PML5 entry is not present or is masked, skip it
455 //
456 continue;
457 }
458
459 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & gPhyMask);
460 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
461 if (((Pml4[Pml4Index] & IA32_PG_P) == 0) || ((Pml4[Pml4Index] & IA32_PG_PMNT) != 0)) {
462 //
463 // If the PML4 entry is not present or is masked, skip it
464 //
465 continue;
466 }
467
468 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
469 PML4EIgnore = FALSE;
470 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
471 if (((Pdpt[PdptIndex] & IA32_PG_P) == 0) || ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0)) {
472 //
473 // If the PDPT entry is not present or is masked, skip it
474 //
475 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
476 //
477 // If the PDPT entry is masked, we will ignore checking the PML4 entry
478 //
479 PML4EIgnore = TRUE;
480 }
481
482 continue;
483 }
484
485 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
486 //
487 // It's not 1-GByte pages entry, it should be a PDPT entry,
488 // we will not check PML4 entry more
489 //
490 PML4EIgnore = TRUE;
491 Pdt = (UINT64 *)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
492 PDPTEIgnore = FALSE;
493 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof (*Pdt); PdtIndex++) {
494 if (((Pdt[PdtIndex] & IA32_PG_P) == 0) || ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0)) {
495 //
496 // If the PD entry is not present or is masked, skip it
497 //
498 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
499 //
500 // If the PD entry is masked, we will not PDPT entry more
501 //
502 PDPTEIgnore = TRUE;
503 }
504
505 continue;
506 }
507
508 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
509 //
510 // It's not 2 MByte page table entry, it should be PD entry
511 // we will find the entry has the smallest access record value
512 //
513 PDPTEIgnore = TRUE;
514 if ((PdtIndex != PFAddressPdtIndex) || (PdptIndex != PFAddressPdptIndex) ||
515 (Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index))
516 {
517 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
518 if (Acc < MinAcc) {
519 //
520 // If the PD entry has the smallest access record value,
521 // save the Page address to be released
522 //
523 MinAcc = Acc;
524 MinPml5 = Pml5Index;
525 MinPml4 = Pml4Index;
526 MinPdpt = PdptIndex;
527 MinPdt = PdtIndex;
528 ReleasePageAddress = Pdt + PdtIndex;
529 }
530 }
531 }
532 }
533
534 if (!PDPTEIgnore) {
535 //
536 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
537 // it should only has the entries point to 2 MByte Pages
538 //
539 if ((PdptIndex != PFAddressPdptIndex) || (Pml4Index != PFAddressPml4Index) ||
540 (Pml5Index != PFAddressPml5Index))
541 {
542 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
543 if (Acc < MinAcc) {
544 //
545 // If the PDPT entry has the smallest access record value,
546 // save the Page address to be released
547 //
548 MinAcc = Acc;
549 MinPml5 = Pml5Index;
550 MinPml4 = Pml4Index;
551 MinPdpt = PdptIndex;
552 MinPdt = (UINTN)-1;
553 ReleasePageAddress = Pdpt + PdptIndex;
554 }
555 }
556 }
557 }
558 }
559
560 if (!PML4EIgnore) {
561 //
562 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
563 // it should only has the entries point to 1 GByte Pages
564 //
565 if ((Pml4Index != PFAddressPml4Index) || (Pml5Index != PFAddressPml5Index)) {
566 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
567 if (Acc < MinAcc) {
568 //
569 // If the PML4 entry has the smallest access record value,
570 // save the Page address to be released
571 //
572 MinAcc = Acc;
573 MinPml5 = Pml5Index;
574 MinPml4 = Pml4Index;
575 MinPdpt = (UINTN)-1;
576 MinPdt = (UINTN)-1;
577 ReleasePageAddress = Pml4 + Pml4Index;
578 }
579 }
580 }
581 }
582 }
583
584 //
585 // Make sure one PML4/PDPT/PD entry is selected
586 //
587 ASSERT (MinAcc != (UINT64)-1);
588
589 //
590 // Secondly, insert the page pointed by this entry into page pool and clear this entry
591 //
592 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
593 *ReleasePageAddress = 0;
594
595 //
596 // Lastly, check this entry's upper entries if need to be inserted into page pool
597 // or not
598 //
599 while (TRUE) {
600 if (MinPdt != (UINTN)-1) {
601 //
602 // If 4 KByte Page Table is released, check the PDPT entry
603 //
604 Pml4 = (UINT64 *)(UINTN)(Pml5[MinPml5] & gPhyMask);
605 Pdpt = (UINT64 *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
606 SubEntriesNum = GetSubEntriesNum (Pdpt + MinPdpt);
607 if ((SubEntriesNum == 0) &&
608 ((MinPdpt != PFAddressPdptIndex) || (MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index)))
609 {
610 //
611 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
612 // clear the Page directory entry
613 //
614 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
615 Pdpt[MinPdpt] = 0;
616 //
617 // Go on checking the PML4 table
618 //
619 MinPdt = (UINTN)-1;
620 continue;
621 }
622
623 //
624 // Update the sub-entries filed in PDPT entry and exit
625 //
626 SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
627 break;
628 }
629
630 if (MinPdpt != (UINTN)-1) {
631 //
632 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
633 //
634 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
635 if ((SubEntriesNum == 0) && ((MinPml4 != PFAddressPml4Index) || (MinPml5 != PFAddressPml5Index))) {
636 //
637 // Release the empty PML4 table if there was no more 1G KByte Page Table entry
638 // clear the Page directory entry
639 //
640 InsertTailList (&mPagePool, (LIST_ENTRY *)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
641 Pml4[MinPml4] = 0;
642 MinPdpt = (UINTN)-1;
643 continue;
644 }
645
646 //
647 // Update the sub-entries filed in PML4 entry and exit
648 //
649 SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
650 break;
651 }
652
653 //
654 // PLM4 table has been released before, exit it
655 //
656 break;
657 }
658}
659
660/**
661 Allocate free Page for PageFault handler use.
662
663 @return Page address.
664
665**/
666UINT64
667AllocPage (
668 VOID
669 )
670{
671 UINT64 RetVal;
672
673 if (IsListEmpty (&mPagePool)) {
674 //
675 // If page pool is empty, reclaim the used pages and insert one into page pool
676 //
677 ReclaimPages ();
678 }
679
680 //
681 // Get one free page and remove it from page pool
682 //
683 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
684 RemoveEntryList (mPagePool.ForwardLink);
685 //
686 // Clean this page and return
687 //
688 ZeroMem ((VOID *)(UINTN)RetVal, EFI_PAGE_SIZE);
689 return RetVal;
690}
691
692/**
693 Page Fault handler for SMM use.
694
695**/
696VOID
697SmiDefaultPFHandler (
698 VOID
699 )
700{
701 UINT64 *PageTable;
702 UINT64 *PageTableTop;
703 UINT64 PFAddress;
704 UINTN StartBit;
705 UINTN EndBit;
706 UINT64 PTIndex;
707 UINTN Index;
708 SMM_PAGE_SIZE_TYPE PageSize;
709 UINTN NumOfPages;
710 UINTN PageAttribute;
711 EFI_STATUS Status;
712 UINT64 *UpperEntry;
713 BOOLEAN Enable5LevelPaging;
714 IA32_CR4 Cr4;
715
716 //
717 // Set default SMM page attribute
718 //
719 PageSize = SmmPageSize2M;
720 NumOfPages = 1;
721 PageAttribute = 0;
722
723 EndBit = 0;
724 PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
725 PFAddress = AsmReadCr2 ();
726
727 Cr4.UintN = AsmReadCr4 ();
728 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
729
730 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
731 //
732 // If platform not support page table attribute, set default SMM page attribute
733 //
734 if (Status != EFI_SUCCESS) {
735 PageSize = SmmPageSize2M;
736 NumOfPages = 1;
737 PageAttribute = 0;
738 }
739
740 if (PageSize >= MaxSmmPageSizeType) {
741 PageSize = SmmPageSize2M;
742 }
743
744 if (NumOfPages > 512) {
745 NumOfPages = 512;
746 }
747
748 switch (PageSize) {
749 case SmmPageSize4K:
750 //
751 // BIT12 to BIT20 is Page Table index
752 //
753 EndBit = 12;
754 break;
755 case SmmPageSize2M:
756 //
757 // BIT21 to BIT29 is Page Directory index
758 //
759 EndBit = 21;
760 PageAttribute |= (UINTN)IA32_PG_PS;
761 break;
762 case SmmPageSize1G:
763 if (!m1GPageTableSupport) {
764 DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
765 ASSERT (FALSE);
766 }
767
768 //
769 // BIT30 to BIT38 is Page Directory Pointer Table index
770 //
771 EndBit = 30;
772 PageAttribute |= (UINTN)IA32_PG_PS;
773 break;
774 default:
775 ASSERT (FALSE);
776 }
777
778 //
779 // If execute-disable is enabled, set NX bit
780 //
781 if (mXdEnabled) {
782 PageAttribute |= IA32_PG_NX;
783 }
784
785 for (Index = 0; Index < NumOfPages; Index++) {
786 PageTable = PageTableTop;
787 UpperEntry = NULL;
788 for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
789 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
790 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
791 //
792 // If the entry is not present, allocate one page from page pool for it
793 //
794 PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
795 } else {
796 //
797 // Save the upper entry address
798 //
799 UpperEntry = PageTable + PTIndex;
800 }
801
802 //
803 // BIT9 to BIT11 of entry is used to save access record,
804 // initialize value is 7
805 //
806 PageTable[PTIndex] |= (UINT64)IA32_PG_A;
807 SetAccNum (PageTable + PTIndex, 7);
808 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
809 }
810
811 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
812 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
813 //
814 // Check if the entry has already existed, this issue may occur when the different
815 // size page entries created under the same entry
816 //
817 DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
818 DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
819 ASSERT (FALSE);
820 }
821
822 //
823 // Fill the new entry
824 //
825 PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
826 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
827 if (UpperEntry != NULL) {
828 SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
829 }
830
831 //
832 // Get the next page address if we need to create more page tables
833 //
834 PFAddress += (1ull << EndBit);
835 }
836}
837
838/**
839 ThePage Fault handler wrapper for SMM use.
840
841 @param InterruptType Defines the type of interrupt or exception that
842 occurred on the processor.This parameter is processor architecture specific.
843 @param SystemContext A pointer to the processor context when
844 the interrupt occurred on the processor.
845**/
846VOID
847EFIAPI
848SmiPFHandler (
849 IN EFI_EXCEPTION_TYPE InterruptType,
850 IN EFI_SYSTEM_CONTEXT SystemContext
851 )
852{
853 UINTN PFAddress;
854 UINTN GuardPageAddress;
855 UINTN ShadowStackGuardPageAddress;
856 UINTN CpuIndex;
857
858 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
859
860 AcquireSpinLock (mPFLock);
861
862 PFAddress = AsmReadCr2 ();
863
864 if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
865 DumpCpuContext (InterruptType, SystemContext);
866 DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
867 CpuDeadLoop ();
868 goto Exit;
869 }
870
871 //
872 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
873 // or SMM page protection violation.
874 //
875 if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
876 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)))
877 {
878 DumpCpuContext (InterruptType, SystemContext);
879 CpuIndex = GetCpuIndex ();
880 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
881 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
882 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
883 (PFAddress >= GuardPageAddress) &&
884 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE)))
885 {
886 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
887 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
888 (mSmmShadowStackSize > 0) &&
889 (PFAddress >= ShadowStackGuardPageAddress) &&
890 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE)))
891 {
892 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
893 } else {
894 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
895 DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
896 DEBUG_CODE (
897 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
898 );
899 } else {
900 DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
901 DEBUG_CODE (
902 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
903 );
904 }
905
906 if (HEAP_GUARD_NONSTOP_MODE) {
907 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
908 goto Exit;
909 }
910 }
911
912 CpuDeadLoop ();
913 goto Exit;
914 }
915
916 //
917 // If a page fault occurs in non-SMRAM range.
918 //
919 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
920 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))
921 {
922 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
923 DumpCpuContext (InterruptType, SystemContext);
924 DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
925 DEBUG_CODE (
926 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
927 );
928 CpuDeadLoop ();
929 goto Exit;
930 }
931
932 //
933 // If NULL pointer was just accessed
934 //
935 if (((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) &&
936 (PFAddress < EFI_PAGE_SIZE))
937 {
938 DumpCpuContext (InterruptType, SystemContext);
939 DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
940 DEBUG_CODE (
941 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
942 );
943
944 if (NULL_DETECTION_NONSTOP_MODE) {
945 GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
946 goto Exit;
947 }
948
949 CpuDeadLoop ();
950 goto Exit;
951 }
952
953 if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
954 DumpCpuContext (InterruptType, SystemContext);
955 DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
956 DEBUG_CODE (
957 DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
958 );
959 CpuDeadLoop ();
960 goto Exit;
961 }
962 }
963
964 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
965 SmmProfilePFHandler (
966 SystemContext.SystemContextX64->Rip,
967 SystemContext.SystemContextX64->ExceptionData
968 );
969 } else {
970 SmiDefaultPFHandler ();
971 }
972
973Exit:
974 ReleaseSpinLock (mPFLock);
975}
976
977/**
978 This function reads CR2 register when on-demand paging is enabled.
979
980 @param[out] *Cr2 Pointer to variable to hold CR2 register value.
981**/
982VOID
983SaveCr2 (
984 OUT UINTN *Cr2
985 )
986{
987 if (!mCpuSmmRestrictedMemoryAccess) {
988 //
989 // On-demand paging is enabled when access to non-SMRAM is not restricted.
990 //
991 *Cr2 = AsmReadCr2 ();
992 }
993}
994
995/**
996 This function restores CR2 register when on-demand paging is enabled.
997
998 @param[in] Cr2 Value to write into CR2 register.
999**/
1000VOID
1001RestoreCr2 (
1002 IN UINTN Cr2
1003 )
1004{
1005 if (!mCpuSmmRestrictedMemoryAccess) {
1006 //
1007 // On-demand paging is enabled when access to non-SMRAM is not restricted.
1008 //
1009 AsmWriteCr2 (Cr2);
1010 }
1011}
1012
1013/**
1014 Return whether access to non-SMRAM is restricted.
1015
1016 @retval TRUE Access to non-SMRAM is restricted.
1017 @retval FALSE Access to non-SMRAM is not restricted.
1018**/
1019BOOLEAN
1020IsRestrictedMemoryAccess (
1021 VOID
1022 )
1023{
1024 return mCpuSmmRestrictedMemoryAccess;
1025}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette