VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c@ 105905

Last change on this file since 105905 was 101291, checked in by vboxsync, 19 months ago

EFI/FirmwareNew: Make edk2-stable202308 build on all supported platforms (using gcc at least, msvc not tested yet), bugref:4643

  • Property svn:eol-style set to native
File size: 9.1 KB
Line 
1/** @file
2X64 processor specific functions to enable SMM profile.
3
4Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7SPDX-License-Identifier: BSD-2-Clause-Patent
8
9**/
10
11#include "PiSmmCpuDxeSmm.h"
12#include "SmmProfileInternal.h"
13
14//
15// Current page index.
16//
17UINTN mPFPageIndex;
18
19//
20// Pool for dynamically creating page table in page fault handler.
21//
22UINT64 mPFPageBuffer;
23
24//
25// Store the uplink information for each page being used.
26//
27UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];
28
29/**
30 Create SMM page table for S3 path.
31
32**/
33VOID
34InitSmmS3Cr3 (
35 VOID
36 )
37{
38 //
39 // Generate level4 page table for the first 4GB memory space
40 // Return the address of PML4 (to set CR3)
41 //
42 //
43 // The SmmS3Cr3 is only used by S3Resume PEIM to switch CPU from 32bit to 64bit
44 //
45 mSmmS3ResumeState->SmmS3Cr3 = (UINT32)GenSmmPageTable (Paging4Level, 32);
46
47 return;
48}
49
50/**
51 Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
52
53**/
54VOID
55InitPagesForPFHandler (
56 VOID
57 )
58{
59 VOID *Address;
60
61 //
62 // Pre-Allocate memory for page fault handler
63 //
64 Address = NULL;
65 Address = AllocatePages (MAX_PF_PAGE_COUNT);
66 ASSERT (Address != NULL);
67
68 mPFPageBuffer = (UINT64)(UINTN)Address;
69 mPFPageIndex = 0;
70 ZeroMem ((VOID *)(UINTN)mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);
71 ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));
72
73 return;
74}
75
76/**
77 Allocate one page for creating 4KB-page based on 2MB-page.
78
79 @param Uplink The address of Page-Directory entry.
80
81**/
82VOID
83AcquirePage (
84 UINT64 *Uplink
85 )
86{
87 UINT64 Address;
88
89 //
90 // Get the buffer
91 //
92 Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);
93 ZeroMem ((VOID *)(UINTN)Address, EFI_PAGE_SIZE);
94
95 //
96 // Cut the previous uplink if it exists and wasn't overwritten
97 //
98 if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK) == Address)) {
99 *mPFPageUplink[mPFPageIndex] = 0;
100 }
101
102 //
103 // Link & Record the current uplink
104 //
105 *Uplink = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
106 mPFPageUplink[mPFPageIndex] = Uplink;
107
108 mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;
109}
110
111/**
112 Update page table to map the memory correctly in order to make the instruction
113 which caused page fault execute successfully. And it also save the original page
114 table to be restored in single-step exception.
115
116 @param PageTable PageTable Address.
117 @param PFAddress The memory address which caused page fault exception.
118 @param CpuIndex The index of the processor.
119 @param ErrorCode The Error code of exception.
120 @param IsValidPFAddress The flag indicates if SMM profile data need be added.
121
122**/
123VOID
124RestorePageTableAbove4G (
125 UINT64 *PageTable,
126 UINT64 PFAddress,
127 UINTN CpuIndex,
128 UINTN ErrorCode,
129 BOOLEAN *IsValidPFAddress
130 )
131{
132 UINTN PTIndex;
133 UINT64 Address;
134 BOOLEAN Nx;
135 BOOLEAN Existed;
136 UINTN Index;
137 UINTN PFIndex;
138 IA32_CR4 Cr4;
139 BOOLEAN Enable5LevelPaging;
140
141 ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));
142
143 Cr4.UintN = AsmReadCr4 ();
144 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
145
146 //
147 // If page fault address is 4GB above.
148 //
149
150 //
151 // Check if page fault address has existed in page table.
152 // If it exists in page table but page fault is generated,
153 // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.
154 //
155 Existed = FALSE;
156 PageTable = (UINT64 *)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
157 PTIndex = 0;
158 if (Enable5LevelPaging) {
159 PTIndex = BitFieldRead64 (PFAddress, 48, 56);
160 }
161
162 if ((!Enable5LevelPaging) || ((PageTable[PTIndex] & IA32_PG_P) != 0)) {
163 // PML5E
164 if (Enable5LevelPaging) {
165 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
166 }
167
168 PTIndex = BitFieldRead64 (PFAddress, 39, 47);
169 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
170 // PML4E
171 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
172 PTIndex = BitFieldRead64 (PFAddress, 30, 38);
173 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
174 // PDPTE
175 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
176 PTIndex = BitFieldRead64 (PFAddress, 21, 29);
177 // PD
178 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
179 //
180 // 2MB page
181 //
182 Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
183 if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
184 Existed = TRUE;
185 }
186 } else {
187 //
188 // 4KB page
189 //
190 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
191 if (PageTable != 0) {
192 //
193 // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
194 //
195 PTIndex = BitFieldRead64 (PFAddress, 12, 20);
196 Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
197 if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
198 Existed = TRUE;
199 }
200 }
201 }
202 }
203 }
204 }
205
206 //
207 // If page entry does not existed in page table at all, create a new entry.
208 //
209 if (!Existed) {
210 if (IsAddressValid (PFAddress, &Nx)) {
211 //
212 // If page fault address above 4GB is in protected range but it causes a page fault exception,
213 // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.
214 // this access is not saved into SMM profile data.
215 //
216 *IsValidPFAddress = TRUE;
217 }
218
219 //
220 // Create one entry in page table for page fault address.
221 //
222 SmiDefaultPFHandler ();
223 //
224 // Find the page table entry created just now.
225 //
226 PageTable = (UINT64 *)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
227 PFAddress = AsmReadCr2 ();
228 // PML5E
229 if (Enable5LevelPaging) {
230 PTIndex = BitFieldRead64 (PFAddress, 48, 56);
231 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
232 }
233
234 // PML4E
235 PTIndex = BitFieldRead64 (PFAddress, 39, 47);
236 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
237 // PDPTE
238 PTIndex = BitFieldRead64 (PFAddress, 30, 38);
239 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
240 // PD
241 PTIndex = BitFieldRead64 (PFAddress, 21, 29);
242 Address = PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK;
243 //
244 // Check if 2MB-page entry need be changed to 4KB-page entry.
245 //
246 if (IsAddressSplit (Address)) {
247 AcquirePage (&PageTable[PTIndex]);
248
249 // PTE
250 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
251 for (Index = 0; Index < 512; Index++) {
252 PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
253 if (!IsAddressValid (Address, &Nx)) {
254 PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
255 }
256
257 if (Nx && mXdSupported) {
258 PageTable[Index] = PageTable[Index] | IA32_PG_NX;
259 }
260
261 if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
262 PTIndex = Index;
263 }
264
265 Address += SIZE_4KB;
266 } // end for PT
267 } else {
268 //
269 // Update 2MB page entry.
270 //
271 if (!IsAddressValid (Address, &Nx)) {
272 //
273 // Patch to remove present flag and rw flag.
274 //
275 PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
276 }
277
278 //
279 // Set XD bit to 1
280 //
281 if (Nx && mXdSupported) {
282 PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;
283 }
284 }
285 }
286
287 //
288 // Record old entries with non-present status
289 // Old entries include the memory which instruction is at and the memory which instruction access.
290 //
291 //
292 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
293 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
294 PFIndex = mPFEntryCount[CpuIndex];
295 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
296 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
297 mPFEntryCount[CpuIndex]++;
298 }
299
300 //
301 // Add present flag or clear XD flag to make page fault handler succeed.
302 //
303 PageTable[PTIndex] |= (UINT64)(PAGE_ATTRIBUTE_BITS);
304 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
305 //
306 // If page fault is caused by instruction fetch, clear XD bit in the entry.
307 //
308 PageTable[PTIndex] &= ~IA32_PG_NX;
309 }
310
311 return;
312}
313
314/**
315 Clear TF in FLAGS.
316
317 @param SystemContext A pointer to the processor context when
318 the interrupt occurred on the processor.
319
320**/
321VOID
322ClearTrapFlag (
323 IN OUT EFI_SYSTEM_CONTEXT SystemContext
324 )
325{
326 SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;
327}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette