Changeset 89983 in vbox for trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg
- Timestamp:
- Jul 1, 2021 8:17:41 AM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 145450
- Location:
- trunk/src/VBox/Devices/EFI/FirmwareNew
- Files:
-
- 43 added
- 9 deleted
- 81 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/EFI/FirmwareNew
-
Property svn:mergeinfo
changed from (toggle deleted branches)
to (toggle deleted branches)/vendor/edk2/current 103735-103757,103769-103776,129194-139864 /vendor/edk2/current 103735-103757,103769-103776,129194-145445
-
Property svn:mergeinfo
changed from (toggle deleted branches)
-
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuDxe/CpuDxe.c
r85718 r89983 10 10 #include "CpuMp.h" 11 11 #include "CpuPageTable.h" 12 13 #define CACHE_ATTRIBUTE_MASK (EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_WP)14 #define MEMORY_ATTRIBUTE_MASK (EFI_MEMORY_RP | EFI_MEMORY_XP | EFI_MEMORY_RO)15 12 16 13 // … … 418 415 } 419 416 420 CacheAttributes = Attributes & CACHE_ATTRIBUTE_MASK;421 MemoryAttributes = Attributes & MEMORY_ATTRIBUTE_MASK;417 CacheAttributes = Attributes & EFI_CACHE_ATTRIBUTE_MASK; 418 MemoryAttributes = Attributes & EFI_MEMORY_ATTRIBUTE_MASK; 422 419 423 420 if (Attributes != (CacheAttributes | MemoryAttributes)) { … … 678 675 RegionStart, 679 676 RegionLength, 680 (MemorySpaceMap[Index].Attributes & ~EFI_ MEMORY_CACHETYPE_MASK) | (MemorySpaceMap[Index].Capabilities & Attributes)677 (MemorySpaceMap[Index].Attributes & ~EFI_CACHE_ATTRIBUTE_MASK) | (MemorySpaceMap[Index].Capabilities & Attributes) 681 678 ); 682 679 } … … 755 752 MemorySpaceMap[Index].BaseAddress, 756 753 MemorySpaceMap[Index].Length, 757 (MemorySpaceMap[Index].Attributes & ~EFI_ MEMORY_CACHETYPE_MASK) |754 (MemorySpaceMap[Index].Attributes & ~EFI_CACHE_ATTRIBUTE_MASK) | 758 755 (MemorySpaceMap[Index].Capabilities & DefaultAttributes) 759 756 ); -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuDxe/CpuDxe.h
r85718 r89983 40 40 #include <Guid/VectorHandoffTable.h> 41 41 42 #define EFI_MEMORY_CACHETYPE_MASK (EFI_MEMORY_UC | \43 EFI_MEMORY_WC | \44 EFI_MEMORY_WT | \45 EFI_MEMORY_WB | \46 EFI_MEMORY_UCE \47 )48 49 #define EFI_MEMORY_PAGETYPE_MASK (EFI_MEMORY_RP | \50 EFI_MEMORY_XP | \51 EFI_MEMORY_RO \52 )53 54 42 #define HEAP_GUARD_NONSTOP_MODE \ 55 43 ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT6|BIT4|BIT1|BIT0)) > BIT6) -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuDxe/CpuGdt.c
r85718 r89983 3 3 requiring a minimal assembly interrupt entry point. 4 4 5 Copyright (c) 2006 - 20 15, Intel Corporation. All rights reserved.<BR>5 Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR> 6 6 SPDX-License-Identifier: BSD-2-Clause-Patent 7 7 … … 14 14 // Global descriptor table (GDT) Template 15 15 // 16 STATIC GDT_ENTRIES GdtTemplate = {16 STATIC GDT_ENTRIES mGdtTemplate = { 17 17 // 18 18 // NULL_SEL … … 71 71 }, 72 72 // 73 // S PARE4_SEL73 // SYS_CODE16_SEL 74 74 // 75 75 { 76 0x0 ,// limit 15:076 0x0FFFF, // limit 15:0 77 77 0x0, // base 15:0 78 78 0x0, // base 23:16 79 0x0 , // type80 0x0 , // limit 19:16, flags79 0x09A, // present, ring 0, code, execute/read 80 0x08F, // page-granular, 16-bit 81 81 0x0, // base 31:24 82 82 }, … … 125 125 ) 126 126 { 127 GDT_ENTRIES *gdt; 128 IA32_DESCRIPTOR gdtPtr; 127 #ifndef VBOX 128 EFI_STATUS Status; 129 #endif 130 GDT_ENTRIES *Gdt; 131 IA32_DESCRIPTOR Gdtr; 132 #ifndef VBOX 133 EFI_PHYSICAL_ADDRESS Memory; 134 #endif 129 135 130 136 // 131 // Allocate Runtime Data for the GDT 137 // Allocate Runtime Data below 4GB for the GDT 138 // AP uses the same GDT when it's waken up from real mode so 139 // the GDT needs to be below 4GB. 132 140 // 133 141 #ifndef VBOX 134 gdt = AllocateRuntimePool (sizeof (GdtTemplate) + 8); 142 Memory = SIZE_4GB - 1; 143 Status = gBS->AllocatePages ( 144 AllocateMaxAddress, 145 EfiRuntimeServicesData, 146 EFI_SIZE_TO_PAGES (sizeof (mGdtTemplate)), 147 &Memory 148 ); 149 ASSERT_EFI_ERROR (Status); 150 ASSERT ((Memory != 0) && (Memory < SIZE_4GB)); 151 Gdt = (GDT_ENTRIES *) (UINTN) Memory; 135 152 #else 136 153 /* … … 142 159 * related bug in Apples bootloader. 143 160 */ 144 gdt = AllocateReservedPool (sizeof (GdtTemplate) + 8); 161 Gdt = AllocateReservedPool (sizeof (mGdtTemplate) + 8); 162 ASSERT (Gdt != NULL); 163 Gdt = ALIGN_POINTER (Gdt, 8); 145 164 #endif 146 ASSERT (gdt != NULL);147 gdt = ALIGN_POINTER (gdt, 8);148 165 149 166 // 150 167 // Initialize all GDT entries 151 168 // 152 CopyMem ( gdt, &GdtTemplate, sizeof (GdtTemplate));169 CopyMem (Gdt, &mGdtTemplate, sizeof (mGdtTemplate)); 153 170 154 171 // 155 172 // Write GDT register 156 173 // 157 gdtPtr.Base = (UINT32)(UINTN)(VOID*) gdt;158 gdtPtr.Limit = (UINT16) (sizeof (GdtTemplate) - 1);159 AsmWriteGdtr (& gdtPtr);174 Gdtr.Base = (UINT32) (UINTN) Gdt; 175 Gdtr.Limit = (UINT16) (sizeof (mGdtTemplate) - 1); 176 AsmWriteGdtr (&Gdtr); 160 177 161 178 // … … 165 182 SetDataSelectors ((UINT16)CPU_DATA_SEL); 166 183 } 167 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuDxe/CpuGdt.h
r85718 r89983 37 37 GDT_ENTRY SysData; 38 38 GDT_ENTRY SysCode; 39 GDT_ENTRY S pare4;39 GDT_ENTRY SysCode16; 40 40 GDT_ENTRY LinearData64; 41 41 GDT_ENTRY LinearCode64; … … 50 50 #define SYS_DATA_SEL OFFSET_OF (GDT_ENTRIES, SysData) 51 51 #define SYS_CODE_SEL OFFSET_OF (GDT_ENTRIES, SysCode) 52 #define S PARE4_SEL OFFSET_OF (GDT_ENTRIES, Spare4)52 #define SYS_CODE16_SEL OFFSET_OF (GDT_ENTRIES, SysCode16) 53 53 #define LINEAR_DATA64_SEL OFFSET_OF (GDT_ENTRIES, LinearData64) 54 54 #define LINEAR_CODE64_SEL OFFSET_OF (GDT_ENTRIES, LinearCode64) -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuDxe/CpuPageTable.c
r85718 r89983 790 790 } 791 791 792 if ((Attributes & ~ (EFI_MEMORY_RP | EFI_MEMORY_RO | EFI_MEMORY_XP)) != 0) {792 if ((Attributes & ~EFI_MEMORY_ATTRIBUTE_MASK) != 0) { 793 793 DEBUG ((DEBUG_ERROR, "Attributes(0x%lx) has unsupported bit\n", Attributes)); 794 794 return EFI_UNSUPPORTED; … … 1091 1091 Length = MIN (PageLength, MemorySpaceLength); 1092 1092 if (Attributes != (MemorySpaceMap[Index].Attributes & 1093 EFI_MEMORY_ PAGETYPE_MASK)) {1093 EFI_MEMORY_ATTRIBUTE_MASK)) { 1094 1094 NewAttributes = (MemorySpaceMap[Index].Attributes & 1095 ~EFI_MEMORY_ PAGETYPE_MASK) | Attributes;1095 ~EFI_MEMORY_ATTRIBUTE_MASK) | Attributes; 1096 1096 Status = gDS->SetMemorySpaceAttributes ( 1097 1097 BaseAddress, -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuDxe/X64/CpuAsm.nasm
r80721 r89983 21 21 global ASM_PFX(SetCodeSelector) 22 22 ASM_PFX(SetCodeSelector): 23 sub rsp, 0x1023 push rcx 24 24 lea rax, [setCodeSelectorLongJump] 25 mov [rsp], rax 26 mov [rsp+4], cx 27 jmp dword far [rsp] 25 push rax 26 o64 retf 28 27 setCodeSelectorLongJump: 29 add rsp, 0x1030 28 ret 31 29 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuIo2Smm/CpuIo2Smm.c
r80721 r89983 3 3 4 4 Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR> 5 Copyright (c) Microsoft Corporation. 5 6 SPDX-License-Identifier: BSD-2-Clause-Patent 6 7 7 8 **/ 8 9 9 #include "CpuIo2Smm.h"10 #include <PiSmm.h> 10 11 11 // 12 // Handle for the SMM CPU I/O Protocol 13 // 14 EFI_HANDLE mHandle = NULL; 15 16 // 17 // SMM CPU I/O Protocol instance 18 // 19 EFI_SMM_CPU_IO2_PROTOCOL mSmmCpuIo2 = { 20 { 21 CpuMemoryServiceRead, 22 CpuMemoryServiceWrite 23 }, 24 { 25 CpuIoServiceRead, 26 CpuIoServiceWrite 27 } 28 }; 29 30 // 31 // Lookup table for increment values based on transfer widths 32 // 33 UINT8 mStride[] = { 34 1, // SMM_IO_UINT8 35 2, // SMM_IO_UINT16 36 4, // SMM_IO_UINT32 37 8 // SMM_IO_UINT64 38 }; 12 #include "CpuIo2Mm.h" 39 13 40 14 /** 41 Check parameters to a SMM CPU I/O Protocol service request. 42 43 @param[in] MmioOperation TRUE for an MMIO operation, FALSE for I/O Port operation. 44 @param[in] Width Signifies the width of the I/O operations. 45 @param[in] Address The base address of the I/O operations. The caller is 46 responsible for aligning the Address if required. 47 @param[in] Count The number of I/O operations to perform. 48 @param[in] Buffer For read operations, the destination buffer to store 49 the results. For write operations, the source buffer 50 from which to write data. 51 52 @retval EFI_SUCCESS The data was read from or written to the device. 53 @retval EFI_UNSUPPORTED The Address is not valid for this system. 54 @retval EFI_INVALID_PARAMETER Width or Count, or both, were invalid. 55 56 **/ 57 EFI_STATUS 58 CpuIoCheckParameter ( 59 IN BOOLEAN MmioOperation, 60 IN EFI_SMM_IO_WIDTH Width, 61 IN UINT64 Address, 62 IN UINTN Count, 63 IN VOID *Buffer 64 ) 65 { 66 UINT64 MaxCount; 67 UINT64 Limit; 68 69 // 70 // Check to see if Buffer is NULL 71 // 72 if (Buffer == NULL) { 73 return EFI_INVALID_PARAMETER; 74 } 75 76 // 77 // Check to see if Width is in the valid range 78 // 79 if ((UINT32)Width > SMM_IO_UINT64) { 80 return EFI_INVALID_PARAMETER; 81 } 82 83 // 84 // Check to see if Width is in the valid range for I/O Port operations 85 // 86 if (!MmioOperation && (Width == SMM_IO_UINT64)) { 87 return EFI_INVALID_PARAMETER; 88 } 89 90 // 91 // Check to see if any address associated with this transfer exceeds the maximum 92 // allowed address. The maximum address implied by the parameters passed in is 93 // Address + Size * Count. If the following condition is met, then the transfer 94 // is not supported. 95 // 96 // Address + Size * Count > (MmioOperation ? MAX_ADDRESS : MAX_IO_PORT_ADDRESS) + 1 97 // 98 // Since MAX_ADDRESS can be the maximum integer value supported by the CPU and Count 99 // can also be the maximum integer value supported by the CPU, this range 100 // check must be adjusted to avoid all overflow conditions. 101 // 102 // The following form of the range check is equivalent but assumes that 103 // MAX_ADDRESS and MAX_IO_PORT_ADDRESS are of the form (2^n - 1). 104 // 105 Limit = (MmioOperation ? MAX_ADDRESS : MAX_IO_PORT_ADDRESS); 106 if (Count == 0) { 107 if (Address > Limit) { 108 return EFI_UNSUPPORTED; 109 } 110 } else { 111 MaxCount = RShiftU64 (Limit, Width); 112 if (MaxCount < (Count - 1)) { 113 return EFI_UNSUPPORTED; 114 } 115 if (Address > LShiftU64 (MaxCount - Count + 1, Width)) { 116 return EFI_UNSUPPORTED; 117 } 118 } 119 120 // 121 // Check to see if Address is aligned 122 // 123 if ((Address & ((UINT64)mStride[Width] - 1)) != 0) { 124 return EFI_UNSUPPORTED; 125 } 126 127 return EFI_SUCCESS; 128 } 129 130 /** 131 Reads memory-mapped registers. 132 133 The I/O operations are carried out exactly as requested. The caller is 134 responsible for any alignment and I/O width issues that the bus, device, 135 platform, or type of I/O might require. 136 137 @param[in] This The EFI_SMM_CPU_IO2_PROTOCOL instance. 138 @param[in] Width Signifies the width of the I/O operations. 139 @param[in] Address The base address of the I/O operations. The caller is 140 responsible for aligning the Address if required. 141 @param[in] Count The number of I/O operations to perform. 142 @param[out] Buffer For read operations, the destination buffer to store 143 the results. For write operations, the source buffer 144 from which to write data. 145 146 @retval EFI_SUCCESS The data was read from or written to the device. 147 @retval EFI_UNSUPPORTED The Address is not valid for this system. 148 @retval EFI_INVALID_PARAMETER Width or Count, or both, were invalid. 149 @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a 150 lack of resources 151 152 **/ 153 EFI_STATUS 154 EFIAPI 155 CpuMemoryServiceRead ( 156 IN CONST EFI_SMM_CPU_IO2_PROTOCOL *This, 157 IN EFI_SMM_IO_WIDTH Width, 158 IN UINT64 Address, 159 IN UINTN Count, 160 OUT VOID *Buffer 161 ) 162 { 163 EFI_STATUS Status; 164 UINT8 Stride; 165 UINT8 *Uint8Buffer; 166 167 Status = CpuIoCheckParameter (TRUE, Width, Address, Count, Buffer); 168 if (EFI_ERROR (Status)) { 169 return Status; 170 } 171 172 // 173 // Select loop based on the width of the transfer 174 // 175 Stride = mStride[Width]; 176 for (Uint8Buffer = Buffer; Count > 0; Address += Stride, Uint8Buffer += Stride, Count--) { 177 if (Width == SMM_IO_UINT8) { 178 *Uint8Buffer = MmioRead8 ((UINTN)Address); 179 } else if (Width == SMM_IO_UINT16) { 180 *((UINT16 *)Uint8Buffer) = MmioRead16 ((UINTN)Address); 181 } else if (Width == SMM_IO_UINT32) { 182 *((UINT32 *)Uint8Buffer) = MmioRead32 ((UINTN)Address); 183 } else if (Width == SMM_IO_UINT64) { 184 *((UINT64 *)Uint8Buffer) = MmioRead64 ((UINTN)Address); 185 } 186 } 187 return EFI_SUCCESS; 188 } 189 190 /** 191 Writes memory-mapped registers. 192 193 The I/O operations are carried out exactly as requested. The caller is 194 responsible for any alignment and I/O width issues that the bus, device, 195 platform, or type of I/O might require. 196 197 @param[in] This The EFI_SMM_CPU_IO2_PROTOCOL instance. 198 @param[in] Width Signifies the width of the I/O operations. 199 @param[in] Address The base address of the I/O operations. The caller is 200 responsible for aligning the Address if required. 201 @param[in] Count The number of I/O operations to perform. 202 @param[in] Buffer For read operations, the destination buffer to store 203 the results. For write operations, the source buffer 204 from which to write data. 205 206 @retval EFI_SUCCESS The data was read from or written to the device. 207 @retval EFI_UNSUPPORTED The Address is not valid for this system. 208 @retval EFI_INVALID_PARAMETER Width or Count, or both, were invalid. 209 @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a 210 lack of resources 211 212 **/ 213 EFI_STATUS 214 EFIAPI 215 CpuMemoryServiceWrite ( 216 IN CONST EFI_SMM_CPU_IO2_PROTOCOL *This, 217 IN EFI_SMM_IO_WIDTH Width, 218 IN UINT64 Address, 219 IN UINTN Count, 220 IN VOID *Buffer 221 ) 222 { 223 EFI_STATUS Status; 224 UINT8 Stride; 225 UINT8 *Uint8Buffer; 226 227 Status = CpuIoCheckParameter (TRUE, Width, Address, Count, Buffer); 228 if (EFI_ERROR (Status)) { 229 return Status; 230 } 231 232 // 233 // Select loop based on the width of the transfer 234 // 235 Stride = mStride[Width]; 236 for (Uint8Buffer = Buffer; Count > 0; Address += Stride, Uint8Buffer += Stride, Count--) { 237 if (Width == SMM_IO_UINT8) { 238 MmioWrite8 ((UINTN)Address, *Uint8Buffer); 239 } else if (Width == SMM_IO_UINT16) { 240 MmioWrite16 ((UINTN)Address, *((UINT16 *)Uint8Buffer)); 241 } else if (Width == SMM_IO_UINT32) { 242 MmioWrite32 ((UINTN)Address, *((UINT32 *)Uint8Buffer)); 243 } else if (Width == SMM_IO_UINT64) { 244 MmioWrite64 ((UINTN)Address, *((UINT64 *)Uint8Buffer)); 245 } 246 } 247 return EFI_SUCCESS; 248 } 249 250 /** 251 Reads I/O registers. 252 253 The I/O operations are carried out exactly as requested. The caller is 254 responsible for any alignment and I/O width issues that the bus, device, 255 platform, or type of I/O might require. 256 257 @param[in] This The EFI_SMM_CPU_IO2_PROTOCOL instance. 258 @param[in] Width Signifies the width of the I/O operations. 259 @param[in] Address The base address of the I/O operations. The caller is 260 responsible for aligning the Address if required. 261 @param[in] Count The number of I/O operations to perform. 262 @param[out] Buffer For read operations, the destination buffer to store 263 the results. For write operations, the source buffer 264 from which to write data. 265 266 @retval EFI_SUCCESS The data was read from or written to the device. 267 @retval EFI_UNSUPPORTED The Address is not valid for this system. 268 @retval EFI_INVALID_PARAMETER Width or Count, or both, were invalid. 269 @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a 270 lack of resources 271 272 **/ 273 EFI_STATUS 274 EFIAPI 275 CpuIoServiceRead ( 276 IN CONST EFI_SMM_CPU_IO2_PROTOCOL *This, 277 IN EFI_SMM_IO_WIDTH Width, 278 IN UINT64 Address, 279 IN UINTN Count, 280 OUT VOID *Buffer 281 ) 282 { 283 EFI_STATUS Status; 284 UINT8 Stride; 285 UINT8 *Uint8Buffer; 286 287 Status = CpuIoCheckParameter (FALSE, Width, Address, Count, Buffer); 288 if (EFI_ERROR (Status)) { 289 return Status; 290 } 291 292 // 293 // Select loop based on the width of the transfer 294 // 295 Stride = mStride[Width]; 296 for (Uint8Buffer = Buffer; Count > 0; Address += Stride, Uint8Buffer += Stride, Count--) { 297 if (Width == SMM_IO_UINT8) { 298 *Uint8Buffer = IoRead8 ((UINTN)Address); 299 } else if (Width == SMM_IO_UINT16) { 300 *((UINT16 *)Uint8Buffer) = IoRead16 ((UINTN)Address); 301 } else if (Width == SMM_IO_UINT32) { 302 *((UINT32 *)Uint8Buffer) = IoRead32 ((UINTN)Address); 303 } 304 } 305 306 return EFI_SUCCESS; 307 } 308 309 /** 310 Write I/O registers. 311 312 The I/O operations are carried out exactly as requested. The caller is 313 responsible for any alignment and I/O width issues that the bus, device, 314 platform, or type of I/O might require. 315 316 @param[in] This The EFI_SMM_CPU_IO2_PROTOCOL instance. 317 @param[in] Width Signifies the width of the I/O operations. 318 @param[in] Address The base address of the I/O operations. The caller is 319 responsible for aligning the Address if required. 320 @param[in] Count The number of I/O operations to perform. 321 @param[in] Buffer For read operations, the destination buffer to store 322 the results. For write operations, the source buffer 323 from which to write data. 324 325 @retval EFI_SUCCESS The data was read from or written to the device. 326 @retval EFI_UNSUPPORTED The Address is not valid for this system. 327 @retval EFI_INVALID_PARAMETER Width or Count, or both, were invalid. 328 @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a 329 lack of resources 330 331 **/ 332 EFI_STATUS 333 EFIAPI 334 CpuIoServiceWrite ( 335 IN CONST EFI_SMM_CPU_IO2_PROTOCOL *This, 336 IN EFI_SMM_IO_WIDTH Width, 337 IN UINT64 Address, 338 IN UINTN Count, 339 IN VOID *Buffer 340 ) 341 { 342 EFI_STATUS Status; 343 UINT8 Stride; 344 UINT8 *Uint8Buffer; 345 346 // 347 // Make sure the parameters are valid 348 // 349 Status = CpuIoCheckParameter (FALSE, Width, Address, Count, Buffer); 350 if (EFI_ERROR (Status)) { 351 return Status; 352 } 353 354 // 355 // Select loop based on the width of the transfer 356 // 357 Stride = mStride[Width]; 358 for (Uint8Buffer = (UINT8 *)Buffer; Count > 0; Address += Stride, Uint8Buffer += Stride, Count--) { 359 if (Width == SMM_IO_UINT8) { 360 IoWrite8 ((UINTN)Address, *Uint8Buffer); 361 } else if (Width == SMM_IO_UINT16) { 362 IoWrite16 ((UINTN)Address, *((UINT16 *)Uint8Buffer)); 363 } else if (Width == SMM_IO_UINT32) { 364 IoWrite32 ((UINTN)Address, *((UINT32 *)Uint8Buffer)); 365 } 366 } 367 368 return EFI_SUCCESS; 369 } 370 371 /** 372 The module Entry Point SmmCpuIoProtocol driver 15 The module Entry Point for Traditional MM CpuIoProtocol driver 373 16 374 17 @param[in] ImageHandle The firmware allocated handle for the EFI image. … … 386 29 ) 387 30 { 388 EFI_STATUS Status; 389 390 // 391 // Copy the SMM CPU I/O Protocol instance into the System Management System Table 392 // 393 CopyMem (&gSmst->SmmIo, &mSmmCpuIo2, sizeof (mSmmCpuIo2)); 394 395 // 396 // Install the SMM CPU I/O Protocol into the SMM protocol database 397 // 398 Status = gSmst->SmmInstallProtocolInterface ( 399 &mHandle, 400 &gEfiSmmCpuIo2ProtocolGuid, 401 EFI_NATIVE_INTERFACE, 402 &mSmmCpuIo2 403 ); 404 ASSERT_EFI_ERROR (Status); 405 406 return Status; 31 return CommonCpuIo2Initialize (); 407 32 } -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuIo2Smm/CpuIo2Smm.inf
r80721 r89983 25 25 [Sources] 26 26 CpuIo2Smm.c 27 CpuIo2Smm.h 27 CpuIo2Mm.c 28 CpuIo2Mm.h 28 29 29 30 [Packages] … … 35 36 DebugLib 36 37 IoLib 37 SmmServicesTableLib38 MmServicesTableLib 38 39 BaseMemoryLib 39 40 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuMpPei/CpuMpPei.c
r80721 r89983 2 2 CPU PEI Module installs CPU Multiple Processor PPI. 3 3 4 Copyright (c) 2015 - 20 19, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2015 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 … … 496 496 NewStackSize = FixedPcdGet32 (PcdCpuKnownGoodStackSize) * ExceptionNumber; 497 497 498 Status = PeiServicesAllocatePool ( 499 NewStackSize * NumberOfProcessors, 500 (VOID **)&StackTop 501 ); 498 StackTop = AllocatePages (EFI_SIZE_TO_PAGES (NewStackSize * NumberOfProcessors)); 502 499 ASSERT(StackTop != NULL); 503 if (EFI_ERROR (Status)) { 504 ASSERT_EFI_ERROR (Status); 500 if (StackTop == NULL) { 505 501 return; 506 502 } -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuMpPei/CpuMpPei.h
r80721 r89983 2 2 Definitions to install Multiple Processor PPI. 3 3 4 Copyright (c) 2015 - 20 19, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2015 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 … … 28 28 #include <Library/MpInitLib.h> 29 29 #include <Library/BaseMemoryLib.h> 30 #include <Library/MemoryAllocationLib.h> 30 31 31 32 extern EFI_PEI_PPI_DESCRIPTOR mPeiCpuMpPpiDesc; … … 399 400 400 401 /** 402 Migrates the Global Descriptor Table (GDT) to permanent memory. 403 404 @retval EFI_SUCCESS The GDT was migrated successfully. 405 @retval EFI_OUT_OF_RESOURCES The GDT could not be migrated due to lack of available memory. 406 407 **/ 408 EFI_STATUS 409 MigrateGdt ( 410 VOID 411 ); 412 413 /** 401 414 Initializes MP and exceptions handlers. 402 415 … … 413 426 414 427 /** 415 Enabl /setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.428 Enable/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE. 416 429 417 430 Doing this in the memory-discovered callback is to make sure the Stack Guard -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuMpPei/CpuMpPei.inf
r80721 r89983 2 2 # CPU driver installs CPU PI Multi-processor PPI. 3 3 # 4 # Copyright (c) 2015 - 20 19, Intel Corporation. All rights reserved.<BR>4 # Copyright (c) 2015 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 # SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 # … … 46 46 BaseMemoryLib 47 47 CpuLib 48 MemoryAllocationLib 49 50 [Guids] 51 gEdkiiMigratedFvInfoGuid ## SOMETIMES_CONSUMES ## HOB 48 52 49 53 [Ppis] -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuMpPei/CpuPaging.c
r81196 r89983 13 13 #include <Library/CpuLib.h> 14 14 #include <Library/BaseLib.h> 15 #include <Guid/MigratedFvInfo.h> 15 16 #ifdef VBOX 16 17 # define IN_RING0 … … 181 182 182 183 @retval Page512G PML4 paging. 183 @retval Page1G PAE pa ing.184 @retval Page1G PAE paging. 184 185 185 186 **/ … … 644 645 645 646 /** 646 Enabl /setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.647 Enable/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE. 647 648 648 649 Doing this in the memory-discovered callback is to make sure the Stack Guard … … 664 665 ) 665 666 { 666 EFI_STATUS Status; 667 BOOLEAN InitStackGuard; 667 EFI_STATUS Status; 668 BOOLEAN InitStackGuard; 669 EDKII_MIGRATED_FV_INFO *MigratedFvInfo; 670 EFI_PEI_HOB_POINTERS Hob; 668 671 669 672 // … … 673 676 // 674 677 InitStackGuard = FALSE; 675 if (IsIa32PaeSupported () && PcdGetBool (PcdCpuStackGuard)) { 678 Hob.Raw = NULL; 679 if (IsIa32PaeSupported ()) { 680 Hob.Raw = GetFirstGuidHob (&gEdkiiMigratedFvInfoGuid); 681 InitStackGuard = PcdGetBool (PcdCpuStackGuard); 682 } 683 684 if (InitStackGuard || Hob.Raw != NULL) { 676 685 EnablePaging (); 677 InitStackGuard = TRUE;678 686 } 679 687 … … 685 693 } 686 694 695 while (Hob.Raw != NULL) { 696 MigratedFvInfo = GET_GUID_HOB_DATA (Hob); 697 698 // 699 // Enable #PF exception, so if the code access SPI after disable NEM, it will generate 700 // the exception to avoid potential vulnerability. 701 // 702 ConvertMemoryPageAttributes (MigratedFvInfo->FvOrgBase, MigratedFvInfo->FvLength, 0); 703 704 Hob.Raw = GET_NEXT_HOB (Hob); 705 Hob.Raw = GetNextGuidHob (&gEdkiiMigratedFvInfoGuid, Hob.Raw); 706 } 707 CpuFlushTlb (); 708 687 709 return Status; 688 710 } -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/CpuS3DataDxe/CpuS3Data.c
r80721 r89983 166 166 UINTN NumberOfEnabledProcessors; 167 167 VOID *Stack; 168 UINTN TableSize;169 CPU_REGISTER_TABLE *RegisterTable;170 UINTN Index;171 EFI_PROCESSOR_INFORMATION ProcessorInfoBuffer;172 168 UINTN GdtSize; 173 169 UINTN IdtSize; … … 256 252 AcpiCpuData->ApLocation = OldAcpiCpuData->ApLocation; 257 253 CopyMem (&AcpiCpuData->CpuStatus, &OldAcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION)); 258 } else {259 //260 // Allocate buffer for empty RegisterTable and PreSmmInitRegisterTable for all CPUs261 //262 TableSize = 2 * NumberOfCpus * sizeof (CPU_REGISTER_TABLE);263 RegisterTable = (CPU_REGISTER_TABLE *)AllocateZeroPages (TableSize);264 ASSERT (RegisterTable != NULL);265 266 for (Index = 0; Index < NumberOfCpus; Index++) {267 Status = MpServices->GetProcessorInfo (268 MpServices,269 Index,270 &ProcessorInfoBuffer271 );272 ASSERT_EFI_ERROR (Status);273 274 RegisterTable[Index].InitialApicId = (UINT32)ProcessorInfoBuffer.ProcessorId;275 RegisterTable[Index].TableLength = 0;276 RegisterTable[Index].AllocatedSize = 0;277 RegisterTable[Index].RegisterTableEntry = 0;278 279 RegisterTable[NumberOfCpus + Index].InitialApicId = (UINT32)ProcessorInfoBuffer.ProcessorId;280 RegisterTable[NumberOfCpus + Index].TableLength = 0;281 RegisterTable[NumberOfCpus + Index].AllocatedSize = 0;282 RegisterTable[NumberOfCpus + Index].RegisterTableEntry = 0;283 }284 AcpiCpuData->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTable;285 AcpiCpuData->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)(RegisterTable + NumberOfCpus);286 254 } 287 255 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Include/AcpiCpuData.h
r80721 r89983 2 2 Definitions for CPU S3 data. 3 3 4 Copyright (c) 2013 - 20 18, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2013 - 2020, Intel Corporation. All rights reserved.<BR> 5 5 SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 … … 61 61 // 62 62 // This field points to an array. 63 // This array saves valid corecount (type UINT32) of each package.63 // This array saves thread count (type UINT32) of each package. 64 64 // The array has PackageCount elements. 65 65 // … … 68 68 // this field to 0. 69 69 // 70 EFI_PHYSICAL_ADDRESS ValidCoreCountPerPackage; 70 EFI_PHYSICAL_ADDRESS ThreadCountPerPackage; 71 // 72 // This field points to an array. 73 // This array saves thread count (type UINT8) of each core. 74 // The array has PackageCount * MaxCoreCount elements. 75 // 76 // If the platform does not support MSR setting at S3 resume, and 77 // therefore it doesn't need the dependency semaphores, it should set 78 // this field to 0. 79 // 80 EFI_PHYSICAL_ADDRESS ThreadCountPerCore; 71 81 } CPU_STATUS_INFORMATION; 72 82 … … 169 179 // initialize the CPU that matches InitialApicId, during an ACPI S3 resume, 170 180 // before SMBASE relocation is performed. 181 // If a register table is not required for any one of the CPUs, then 182 // PreSmmInitRegisterTable may be set to 0. 171 183 // 172 184 EFI_PHYSICAL_ADDRESS PreSmmInitRegisterTable; … … 178 190 // initialize the CPU that matches InitialApicId, during an ACPI S3 resume, 179 191 // after SMBASE relocation is performed. 192 // If a register table is not required for any one of the CPUs, then 193 // RegisterTable may be set to 0. 180 194 // 181 195 EFI_PHYSICAL_ADDRESS RegisterTable; -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Include/Library/MtrrLib.h
r80721 r89983 2 2 MTRR setting library 3 3 4 Copyright (c) 2008 - 20 18, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2008 - 2020, Intel Corporation. All rights reserved.<BR> 5 5 SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 … … 187 187 188 188 /** 189 This function will get the raw value in variable MTRRs190 191 @param[out] VariableSettings A buffer to hold variable MTRRs content.192 193 @return The buffer point to MTRR_VARIABLE_SETTINGS in which holds the content of the variable MTRR194 195 **/196 MTRR_VARIABLE_SETTINGS*197 EFIAPI198 MtrrGetVariableMtrr (199 OUT MTRR_VARIABLE_SETTINGS *VariableSettings200 );201 202 203 /**204 This function sets variable MTRRs205 206 @param[in] VariableSettings A buffer to hold variable MTRRs content.207 208 @return The pointer of VariableSettings209 210 **/211 MTRR_VARIABLE_SETTINGS*212 EFIAPI213 MtrrSetVariableMtrr (214 IN MTRR_VARIABLE_SETTINGS *VariableSettings215 );216 217 218 /**219 189 This function gets the content in fixed MTRRs 220 190 … … 228 198 MtrrGetFixedMtrr ( 229 199 OUT MTRR_FIXED_SETTINGS *FixedSettings 230 );231 232 233 /**234 This function sets fixed MTRRs235 236 @param[in] FixedSettings A buffer holding fixed MTRRs content.237 238 @return The pointer of FixedSettings239 240 **/241 MTRR_FIXED_SETTINGS*242 EFIAPI243 MtrrSetFixedMtrr (244 IN MTRR_FIXED_SETTINGS *FixedSettings245 200 ); 246 201 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Include/Library/UefiCpuLib.h
r80721 r89983 6 6 7 7 Copyright (c) 2009, Intel Corporation. All rights reserved.<BR> 8 Copyright (c) 2020, AMD Inc. All rights reserved.<BR> 8 9 SPDX-License-Identifier: BSD-2-Clause-Patent 9 10 … … 30 31 ); 31 32 33 /** 34 Determine if the standard CPU signature is "AuthenticAMD". 35 36 @retval TRUE The CPU signature matches. 37 @retval FALSE The CPU signature does not match. 38 39 **/ 40 BOOLEAN 41 EFIAPI 42 StandardSignatureIsAuthenticAMD ( 43 VOID 44 ); 45 32 46 #endif -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/BaseUefiCpuLib/BaseUefiCpuLib.inf
r80721 r89983 5 5 # 6 6 # Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR> 7 # Copyright (c) 2020, AMD Inc. All rights reserved.<BR> 7 8 # SPDX-License-Identifier: BSD-2-Clause-Patent 8 9 # … … 30 31 X64/InitializeFpu.nasm 31 32 33 [Sources] 34 BaseUefiCpuLib.c 35 32 36 [Packages] 33 37 MdePkg/MdePkg.dec 34 38 UefiCpuPkg/UefiCpuPkg.dec 39 40 [LibraryClasses] 41 BaseLib -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/BaseXApicLib/BaseXApicLib.c
r80721 r89983 5 5 6 6 Copyright (c) 2010 - 2019, Intel Corporation. All rights reserved.<BR> 7 Copyright (c) 2017 , AMD Inc. All rights reserved.<BR>7 Copyright (c) 2017 - 2020, AMD Inc. All rights reserved.<BR> 8 8 9 9 SPDX-License-Identifier: BSD-2-Clause-Patent … … 22 22 #include <Library/TimerLib.h> 23 23 #include <Library/PcdLib.h> 24 #include <Library/UefiCpuLib.h> 24 25 25 26 // 26 27 // Library internal functions 27 28 // 28 29 /**30 Determine if the standard CPU signature is "AuthenticAMD".31 32 @retval TRUE The CPU signature matches.33 @retval FALSE The CPU signature does not match.34 35 **/36 BOOLEAN37 StandardSignatureIsAuthenticAMD (38 VOID39 )40 {41 UINT32 RegEbx;42 UINT32 RegEcx;43 UINT32 RegEdx;44 45 AsmCpuid (CPUID_SIGNATURE, NULL, &RegEbx, &RegEcx, &RegEdx);46 return (RegEbx == CPUID_SIGNATURE_AUTHENTIC_AMD_EBX &&47 RegEcx == CPUID_SIGNATURE_AUTHENTIC_AMD_ECX &&48 RegEdx == CPUID_SIGNATURE_AUTHENTIC_AMD_EDX);49 }50 29 51 30 /** -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/BaseXApicLib/BaseXApicLib.inf
r80721 r89983 6 6 # 7 7 # Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR> 8 # Copyright (c) 2020, AMD Inc. All rights reserved.<BR> 8 9 # SPDX-License-Identifier: BSD-2-Clause-Patent 9 10 # … … 38 39 IoLib 39 40 PcdLib 41 UefiCpuLib 40 42 41 43 [Pcd] -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/BaseXApicX2ApicLib/BaseXApicX2ApicLib.c
r80721 r89983 6 6 7 7 Copyright (c) 2010 - 2019, Intel Corporation. All rights reserved.<BR> 8 Copyright (c) 2017 , AMD Inc. All rights reserved.<BR>8 Copyright (c) 2017 - 2020, AMD Inc. All rights reserved.<BR> 9 9 10 10 SPDX-License-Identifier: BSD-2-Clause-Patent … … 23 23 #include <Library/TimerLib.h> 24 24 #include <Library/PcdLib.h> 25 #include <Library/UefiCpuLib.h> 25 26 26 27 // 27 28 // Library internal functions 28 29 // 29 30 /**31 Determine if the standard CPU signature is "AuthenticAMD".32 33 @retval TRUE The CPU signature matches.34 @retval FALSE The CPU signature does not match.35 36 **/37 BOOLEAN38 StandardSignatureIsAuthenticAMD (39 VOID40 )41 {42 UINT32 RegEbx;43 UINT32 RegEcx;44 UINT32 RegEdx;45 46 AsmCpuid (CPUID_SIGNATURE, NULL, &RegEbx, &RegEcx, &RegEdx);47 return (RegEbx == CPUID_SIGNATURE_AUTHENTIC_AMD_EBX &&48 RegEcx == CPUID_SIGNATURE_AUTHENTIC_AMD_ECX &&49 RegEdx == CPUID_SIGNATURE_AUTHENTIC_AMD_EDX);50 }51 30 52 31 /** -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/BaseXApicX2ApicLib/BaseXApicX2ApicLib.inf
r80721 r89983 6 6 # 7 7 # Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR> 8 # Copyright (c) 2020, AMD Inc. All rights reserved.<BR> 8 9 # SPDX-License-Identifier: BSD-2-Clause-Patent 9 10 # … … 38 39 IoLib 39 40 PcdLib 41 UefiCpuLib 40 42 41 43 [Pcd] -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuCommonFeaturesLib/MachineCheck.c
r80721 r89983 288 288 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP); 289 289 if (ProcessorNumber == 0) { 290 DEBUG (( EFI_D_INFO, "LMCE eanble = %x\n", (BOOLEAN) (McgCap.Bits.MCG_LMCE_P != 0)));290 DEBUG ((DEBUG_INFO, "LMCE enable = %x\n", (BOOLEAN) (McgCap.Bits.MCG_LMCE_P != 0))); 291 291 } 292 292 return (BOOLEAN) (McgCap.Bits.MCG_LMCE_P != 0); -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/CpuExceptionCommon.c
r80721 r89983 15 15 // 1 means an error code will be pushed, otherwise 0 16 16 // 17 CONST UINT32 mErrorCodeFlag = 0x 00227d00;17 CONST UINT32 mErrorCodeFlag = 0x20227d00; 18 18 19 19 // … … 45 45 "#XM - SIMD floating-point", 46 46 "#VE - Virtualization", 47 "#CP - Control Protection" 47 "#CP - Control Protection", 48 "Reserved", 49 "Reserved", 50 "Reserved", 51 "Reserved", 52 "Reserved", 53 "Reserved", 54 "Reserved", 55 "#VC - VMM Communication", 48 56 }; 49 57 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/CpuExceptionCommon.h
r80721 r89983 91 91 VOID 92 92 ArchUpdateIdtEntry ( 93 IN IA32_IDT_GATE_DESCRIPTOR*IdtEntry,94 IN UINTNInterruptHandler93 OUT IA32_IDT_GATE_DESCRIPTOR *IdtEntry, 94 IN UINTN InterruptHandler 95 95 ); 96 96 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/DxeCpuExceptionHandlerLib.inf
r85718 r89983 44 44 gUefiCpuPkgTokenSpaceGuid.PcdCpuKnownGoodStackSize 45 45 46 [FeaturePcd] 47 gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES 48 46 49 [Packages] 47 50 MdePkg/MdePkg.dec … … 58 61 MemoryAllocationLib 59 62 DebugLib 63 VmgExitLib -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/Ia32/ArchExceptionHandler.c
r80721 r89983 19 19 VOID 20 20 ArchUpdateIdtEntry ( 21 INIA32_IDT_GATE_DESCRIPTOR *IdtEntry,22 IN UINTN InterruptHandler21 OUT IA32_IDT_GATE_DESCRIPTOR *IdtEntry, 22 IN UINTN InterruptHandler 23 23 ) 24 24 { -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/PeiCpuExceptionHandlerLib.inf
r85718 r89983 53 53 MemoryAllocationLib 54 54 SynchronizationLib 55 VmgExitLib 55 56 56 57 [Pcd] 57 58 gEfiMdeModulePkgTokenSpaceGuid.PcdCpuStackGuard # CONSUMES 58 59 60 [FeaturePcd] 61 gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES 62 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/PeiDxeSmmCpuException.c
r85718 r89983 7 7 **/ 8 8 9 #include <Library/DebugLib.h> 10 #include <Library/VmgExitLib.h> 9 11 #include "CpuExceptionCommon.h" 10 #include <Library/DebugLib.h>11 12 12 13 /** … … 27 28 RESERVED_VECTORS_DATA *ReservedVectors; 28 29 EFI_CPU_INTERRUPT_HANDLER *ExternalInterruptHandler; 30 31 if (ExceptionType == VC_EXCEPTION) { 32 EFI_STATUS Status; 33 // 34 // #VC needs to be handled immediately upon enabling exception handling 35 // and therefore can't use the RegisterCpuInterruptHandler() interface. 36 // 37 // Handle the #VC: 38 // On EFI_SUCCESS - Exception has been handled, return 39 // On other - ExceptionType contains (possibly new) exception 40 // value 41 // 42 Status = VmgExitHandleVc (&ExceptionType, SystemContext); 43 if (!EFI_ERROR (Status)) { 44 return; 45 } 46 } 29 47 30 48 ExceptionHandlerContext = (EXCEPTION_HANDLER_CONTEXT *) (UINTN) (SystemContext.SystemContextIa32); -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/SecPeiCpuException.c
r80721 r89983 8 8 9 9 #include <PiPei.h> 10 #include <Library/VmgExitLib.h> 10 11 #include "CpuExceptionCommon.h" 11 12 … … 25 26 ) 26 27 { 28 if (ExceptionType == VC_EXCEPTION) { 29 EFI_STATUS Status; 30 // 31 // #VC needs to be handled immediately upon enabling exception handling 32 // and therefore can't use the RegisterCpuInterruptHandler() interface 33 // (which isn't supported under Sec and Pei anyway). 34 // 35 // Handle the #VC: 36 // On EFI_SUCCESS - Exception has been handled, return 37 // On other - ExceptionType contains (possibly new) exception 38 // value 39 // 40 Status = VmgExitHandleVc (&ExceptionType, SystemContext); 41 if (!EFI_ERROR (Status)) { 42 return; 43 } 44 } 45 27 46 // 28 47 // Initialize the serial port before dumping. … … 88 107 if (IdtEntryCount > CPU_EXCEPTION_NUM) { 89 108 // 90 // CPU ex eption library only setup CPU_EXCEPTION_NUM exception handler at most109 // CPU exception library only setup CPU_EXCEPTION_NUM exception handler at most 91 110 // 92 111 IdtEntryCount = CPU_EXCEPTION_NUM; -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/SecPeiCpuExceptionHandlerLib.inf
r80721 r89983 49 49 LocalApicLib 50 50 PeCoffGetEntryPointLib 51 VmgExitLib 52 53 [FeaturePcd] 54 gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES 55 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/SmmCpuExceptionHandlerLib.inf
r85718 r89983 14 14 MODULE_TYPE = DXE_SMM_DRIVER 15 15 VERSION_STRING = 1.1 16 LIBRARY_CLASS = CpuExceptionHandlerLib|DXE_SMM_DRIVER 16 LIBRARY_CLASS = CpuExceptionHandlerLib|DXE_SMM_DRIVER MM_STANDALONE MM_CORE_STANDALONE 17 17 18 18 # … … 52 52 PeCoffGetEntryPointLib 53 53 DebugLib 54 VmgExitLib 54 55 56 [FeaturePcd] 57 gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES 58 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/X64/ArchExceptionHandler.c
r80721 r89983 18 18 VOID 19 19 ArchUpdateIdtEntry ( 20 IN IA32_IDT_GATE_DESCRIPTOR*IdtEntry,21 IN UINTNInterruptHandler20 OUT IA32_IDT_GATE_DESCRIPTOR *IdtEntry, 21 IN UINTN InterruptHandler 22 22 ) 23 23 { -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/X64/ExceptionHandlerAsm.nasm
r85718 r89983 18 18 ; CommonExceptionHandler() 19 19 ; 20 21 %define VC_EXCEPTION 29 20 22 21 23 extern ASM_PFX(mErrorCodeFlag) ; Error code flags for exceptions … … 225 227 226 228 ;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; 229 cmp qword [rbp + 8], VC_EXCEPTION 230 je VcDebugRegs ; For SEV-ES (#VC) Debug registers ignored 231 227 232 mov rax, dr7 228 233 push rax … … 237 242 mov rax, dr0 238 243 push rax 239 244 jmp DrFinish 245 246 VcDebugRegs: 247 ;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7 are skipped for #VC to avoid exception recursion 248 xor rax, rax 249 push rax 250 push rax 251 push rax 252 push rax 253 push rax 254 push rax 255 256 DrFinish: 240 257 ;; FX_SAVE_STATE_X64 FxSaveState; 241 258 sub rsp, 512 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/X64/Xcode5ExceptionHandlerAsm.nasm
r85718 r89983 14 14 ; 15 15 ;------------------------------------------------------------------------------ 16 %include "Nasm.inc" 16 17 17 18 ; 18 19 ; CommonExceptionHandler() 19 20 ; 21 22 %define VC_EXCEPTION 29 20 23 21 24 extern ASM_PFX(mErrorCodeFlag) ; Error code flags for exceptions 22 25 extern ASM_PFX(mDoFarReturnFlag) ; Do far return flag 23 26 extern ASM_PFX(CommonExceptionHandler) 27 extern ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard)) 24 28 25 29 SECTION .data … … 226 230 227 231 ;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7; 232 cmp qword [rbp + 8], VC_EXCEPTION 233 je VcDebugRegs ; For SEV-ES (#VC) Debug registers ignored 234 228 235 mov rax, dr7 229 236 push rax … … 238 245 mov rax, dr0 239 246 push rax 240 247 jmp DrFinish 248 249 VcDebugRegs: 250 ;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7 are skipped for #VC to avoid exception recursion 251 xor rax, rax 252 push rax 253 push rax 254 push rax 255 push rax 256 push rax 257 push rax 258 259 DrFinish: 241 260 ;; FX_SAVE_STATE_X64 FxSaveState; 242 261 sub rsp, 512 … … 355 374 mov rax, [rax] ; restore rax 356 375 popfq ; restore EFLAGS 357 DB 0x48 ; prefix to composite "retq" with next "retf" 358 retf ; far return 376 377 ; The follow algorithm is used for clear shadow stack token busy bit. 378 ; The comment is based on the sample shadow stack. 379 ; The sample shadow stack layout : 380 ; Address | Context 381 ; +-------------------------+ 382 ; 0xFD0 | FREE | it is 0xFD8|0x02|(LMA & CS.L), after SAVEPREVSSP. 383 ; +-------------------------+ 384 ; 0xFD8 | Prev SSP | 385 ; +-------------------------+ 386 ; 0xFE0 | RIP | 387 ; +-------------------------+ 388 ; 0xFE8 | CS | 389 ; +-------------------------+ 390 ; 0xFF0 | 0xFF0 | BUSY | BUSY flag cleared after CLRSSBSY 391 ; +-------------------------+ 392 ; 0xFF8 | 0xFD8|0x02|(LMA & CS.L) | 393 ; +-------------------------+ 394 ; Instructions for Intel Control Flow Enforcement Technology (CET) are supported since NASM version 2.15.01. 395 push rax ; SSP should be 0xFD8 at this point 396 cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0 397 jz CetDone 398 mov rax, cr4 399 and rax, 0x800000 ; check if CET is enabled 400 jz CetDone 401 mov rax, 0x04 ; advance past cs:lip:prevssp;supervisor shadow stack token 402 INCSSP_RAX ; After this SSP should be 0xFF8 403 SAVEPREVSSP ; now the shadow stack restore token will be created at 0xFD0 404 READSSP_RAX ; Read new SSP, SSP should be 0x1000 405 push rax 406 sub rax, 0x10 407 CLRSSBSY_RAX ; Clear token at 0xFF0, SSP should be 0 after this 408 sub rax, 0x20 409 RSTORSSP_RAX ; Restore to token at 0xFD0, new SSP will be 0xFD0 410 pop rax 411 mov rax, 0x01 ; Pop off the new save token created 412 INCSSP_RAX ; SSP should be 0xFD8 now 413 CetDone: 414 pop rax ; restore rax 415 416 DB 0x48 ; prefix to composite "retq" with next "retf" 417 retf ; far return 359 418 DoIret: 360 419 iretq -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuExceptionHandlerLib/Xcode5SecPeiCpuExceptionHandlerLib.inf
r85718 r89983 54 54 LocalApicLib 55 55 PeCoffGetEntryPointLib 56 VmgExitLib 57 58 [FeaturePcd] 59 gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES 60 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/CpuTimerLib/BaseCpuTimerLib.inf
r80721 r89983 5 5 # counter features are provided by the processors time stamp counter. 6 6 # 7 # Copyright (c) 20 19, Intel Corporation. All rights reserved.<BR>7 # Copyright (c) 2021, Intel Corporation. All rights reserved.<BR> 8 8 # SPDX-License-Identifier: BSD-2-Clause-Patent 9 9 # … … 16 16 MODULE_TYPE = BASE 17 17 VERSION_STRING = 1.0 18 LIBRARY_CLASS = TimerLib |SEC PEI_CORE PEIM18 LIBRARY_CLASS = TimerLib 19 19 MODULE_UNI_FILE = BaseCpuTimerLib.uni 20 20 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/MpInitLib/DxeMpInitLib.inf
r85718 r89983 2 2 # MP Initialize Library instance for DXE driver. 3 3 # 4 # Copyright (c) 2016 - 202 0, Intel Corporation. All rights reserved.<BR>4 # Copyright (c) 2016 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 # SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 # … … 23 23 24 24 [Sources.IA32] 25 Ia32/MpEqu.inc26 25 Ia32/MpFuncs.nasm 27 26 28 27 [Sources.X64] 29 X64/MpEqu.inc30 28 X64/MpFuncs.nasm 31 29 32 30 [Sources.common] 31 MpEqu.inc 33 32 DxeMpLib.c 34 33 MpLib.c … … 53 52 SynchronizationLib 54 53 PcdLib 54 VmgExitLib 55 MicrocodeLib 55 56 56 57 [Protocols] … … 72 73 gUefiCpuPkgTokenSpaceGuid.PcdCpuApTargetCstate ## SOMETIMES_CONSUMES 73 74 gUefiCpuPkgTokenSpaceGuid.PcdCpuApStatusCheckIntervalInMicroSeconds ## CONSUMES 75 gUefiCpuPkgTokenSpaceGuid.PcdSevEsIsEnabled ## CONSUMES 76 gUefiCpuPkgTokenSpaceGuid.PcdSevEsWorkAreaBase ## SOMETIMES_CONSUMES 74 77 gEfiMdeModulePkgTokenSpaceGuid.PcdCpuStackGuard ## CONSUMES 78 gEfiMdeModulePkgTokenSpaceGuid.PcdGhcbBase ## CONSUMES -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
r85718 r89983 13 13 #include <Library/DebugAgentLib.h> 14 14 #include <Library/DxeServicesTableLib.h> 15 #include <Library/VmgExitLib.h> 16 #include <Register/Amd/Fam17Msr.h> 17 #include <Register/Amd/Ghcb.h> 15 18 16 19 #include <Protocol/Timer.h> … … 84 87 EFI_STATUS Status; 85 88 EFI_PHYSICAL_ADDRESS StartAddress; 89 EFI_MEMORY_TYPE MemoryType; 90 91 if (PcdGetBool (PcdSevEsIsEnabled)) { 92 MemoryType = EfiReservedMemoryType; 93 } else { 94 MemoryType = EfiBootServicesData; 95 } 86 96 87 97 // … … 96 106 Status = gBS->AllocatePages ( 97 107 AllocateMaxAddress, 98 EfiBootServicesData,108 MemoryType, 99 109 EFI_SIZE_TO_PAGES (WakeupBufferSize), 100 110 &StartAddress … … 146 156 147 157 /** 158 Return the address of the SEV-ES AP jump table. 159 160 This buffer is required in order for an SEV-ES guest to transition from 161 UEFI into an OS. 162 163 @return Return SEV-ES AP jump table buffer 164 **/ 165 UINTN 166 GetSevEsAPMemory ( 167 VOID 168 ) 169 { 170 EFI_STATUS Status; 171 EFI_PHYSICAL_ADDRESS StartAddress; 172 MSR_SEV_ES_GHCB_REGISTER Msr; 173 GHCB *Ghcb; 174 BOOLEAN InterruptState; 175 176 // 177 // Allocate 1 page for AP jump table page 178 // 179 StartAddress = BASE_4GB - 1; 180 Status = gBS->AllocatePages ( 181 AllocateMaxAddress, 182 EfiReservedMemoryType, 183 1, 184 &StartAddress 185 ); 186 ASSERT_EFI_ERROR (Status); 187 188 DEBUG ((DEBUG_INFO, "Dxe: SevEsAPMemory = %lx\n", (UINTN) StartAddress)); 189 190 // 191 // Save the SevEsAPMemory as the AP jump table. 192 // 193 Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB); 194 Ghcb = Msr.Ghcb; 195 196 VmgInit (Ghcb, &InterruptState); 197 VmgExit (Ghcb, SVM_EXIT_AP_JUMP_TABLE, 0, (UINT64) (UINTN) StartAddress); 198 VmgDone (Ghcb, InterruptState); 199 200 return (UINTN) StartAddress; 201 } 202 203 /** 148 204 Checks APs status and updates APs status if needed. 149 205 … … 220 276 221 277 /** 222 Get Protected mode code segment from current GDT table. 223 224 @return Protected mode code segment value. 278 Get Protected mode code segment with 16-bit default addressing 279 from current GDT table. 280 281 @return Protected mode 16-bit code segment value. 225 282 **/ 226 283 UINT16 227 GetProtectedMode CS (284 GetProtectedMode16CS ( 228 285 VOID 229 286 ) … … 234 291 UINT16 Index; 235 292 293 Index = (UINT16) -1; 236 294 AsmReadGdtr (&GdtrDesc); 237 295 GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR); … … 239 297 for (Index = 0; Index < GdtEntryCount; Index++) { 240 298 if (GdtEntry->Bits.L == 0) { 241 if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits. L== 0) {299 if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits.DB == 0) { 242 300 break; 243 301 } … … 250 308 251 309 /** 310 Get Protected mode code segment from current GDT table. 311 312 @return Protected mode code segment value. 313 **/ 314 UINT16 315 GetProtectedModeCS ( 316 VOID 317 ) 318 { 319 IA32_DESCRIPTOR GdtrDesc; 320 IA32_SEGMENT_DESCRIPTOR *GdtEntry; 321 UINTN GdtEntryCount; 322 UINT16 Index; 323 324 AsmReadGdtr (&GdtrDesc); 325 GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR); 326 GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base; 327 for (Index = 0; Index < GdtEntryCount; Index++) { 328 if (GdtEntry->Bits.L == 0) { 329 if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits.DB == 1) { 330 break; 331 } 332 } 333 GdtEntry++; 334 } 335 ASSERT (Index != GdtEntryCount); 336 return Index * 8; 337 } 338 339 /** 252 340 Do sync on APs. 253 341 … … 264 352 ASM_RELOCATE_AP_LOOP AsmRelocateApLoopFunc; 265 353 UINTN ProcessorNumber; 354 UINTN StackStart; 266 355 267 356 MpInitLibWhoAmI (&ProcessorNumber); 268 357 CpuMpData = GetCpuMpData (); 269 358 MwaitSupport = IsMwaitSupport (); 359 if (CpuMpData->SevEsIsEnabled) { 360 StackStart = CpuMpData->SevEsAPResetStackStart; 361 } else { 362 StackStart = mReservedTopOfApStack; 363 } 270 364 AsmRelocateApLoopFunc = (ASM_RELOCATE_AP_LOOP) (UINTN) mReservedApLoopFunc; 271 365 AsmRelocateApLoopFunc ( … … 273 367 CpuMpData->ApTargetCState, 274 368 CpuMpData->PmCodeSegment, 275 mReservedTopOfApStack - ProcessorNumber * AP_SAFE_STACK_SIZE, 276 (UINTN) &mNumberToFinish 369 StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE, 370 (UINTN) &mNumberToFinish, 371 CpuMpData->Pm16CodeSegment, 372 CpuMpData->SevEsAPBuffer, 373 CpuMpData->WakeupBuffer 277 374 ); 278 375 // … … 301 398 CpuMpData = GetCpuMpData (); 302 399 CpuMpData->PmCodeSegment = GetProtectedModeCS (); 400 CpuMpData->Pm16CodeSegment = GetProtectedMode16CS (); 303 401 CpuMpData->ApLoopMode = PcdGet8 (PcdCpuApLoopMode); 304 402 mNumberToFinish = CpuMpData->CpuCount - 1; … … 307 405 CpuPause (); 308 406 } 407 408 if (CpuMpData->SevEsIsEnabled && (CpuMpData->WakeupBuffer != (UINTN) -1)) { 409 // 410 // There are APs present. Re-use reserved memory area below 1MB from 411 // WakeupBuffer as the area to be used for transitioning to 16-bit mode 412 // in support of booting of the AP by an OS. 413 // 414 CopyMem ( 415 (VOID *) CpuMpData->WakeupBuffer, 416 (VOID *) (CpuMpData->AddressMap.RendezvousFunnelAddress + 417 CpuMpData->AddressMap.SwitchToRealPM16ModeOffset), 418 CpuMpData->AddressMap.SwitchToRealPM16ModeSize 419 ); 420 } 421 309 422 DEBUG ((DEBUG_INFO, "%a() done!\n", __FUNCTION__)); 310 423 } -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/MpInitLib/Ia32/MpFuncs.nasm
r80721 r89983 1 1 ;------------------------------------------------------------------------------ ; 2 ; Copyright (c) 2015 - 20 19, Intel Corporation. All rights reserved.<BR>2 ; Copyright (c) 2015 - 2021, Intel Corporation. All rights reserved.<BR> 3 3 ; SPDX-License-Identifier: BSD-2-Clause-Patent 4 4 ; … … 40 40 mov gs, ax 41 41 42 mov si, BufferStartLocation42 mov si, MP_CPU_EXCHANGE_INFO_FIELD (BufferStart) 43 43 mov ebx, [si] 44 44 45 mov si, DataSegmentLocation45 mov si, MP_CPU_EXCHANGE_INFO_FIELD (DataSegment) 46 46 mov edx, [si] 47 47 … … 49 49 ; Get start address of 32-bit code in low memory (<1MB) 50 50 ; 51 mov edi, M odeTransitionMemoryLocation52 53 mov si, GdtrLocation51 mov edi, MP_CPU_EXCHANGE_INFO_FIELD (ModeTransitionMemory) 52 53 mov si, MP_CPU_EXCHANGE_INFO_FIELD (GdtrProfile) 54 54 o32 lgdt [cs:si] 55 55 56 mov si, IdtrLocation56 mov si, MP_CPU_EXCHANGE_INFO_FIELD (IdtrProfile) 57 57 o32 lidt [cs:si] 58 58 … … 83 83 84 84 mov edi, esi 85 add edi, EnableExecuteDisableLocation85 add edi, MP_CPU_EXCHANGE_INFO_FIELD (EnableExecuteDisable) 86 86 cmp byte [edi], 0 87 87 jz SkipEnableExecuteDisable … … 97 97 98 98 mov edi, esi 99 add edi, Cr3Location99 add edi, MP_CPU_EXCHANGE_INFO_FIELD (Cr3) 100 100 mov eax, dword [edi] 101 101 mov cr3, eax … … 111 111 SkipEnableExecuteDisable: 112 112 mov edi, esi 113 add edi, InitFlagLocation113 add edi, MP_CPU_EXCHANGE_INFO_FIELD (InitFlag) 114 114 cmp dword [edi], 1 ; 1 == ApInitConfig 115 115 jnz GetApicId … … 118 118 ; This is decremented in C code when AP is finished executing 119 119 mov edi, esi 120 add edi, NumApsExecutingLocation120 add edi, MP_CPU_EXCHANGE_INFO_FIELD (NumApsExecuting) 121 121 lock inc dword [edi] 122 122 123 123 ; AP init 124 124 mov edi, esi 125 add edi, LockLocation 126 mov eax, NotVacantFlag 127 128 TestLock: 129 xchg [edi], eax 130 cmp eax, NotVacantFlag 131 jz TestLock 132 133 mov ecx, esi 134 add ecx, ApIndexLocation 135 inc dword [ecx] 136 mov ebx, [ecx] 137 138 Releaselock: 139 mov eax, VacantFlag 140 xchg [edi], eax 141 142 mov edi, esi 143 add edi, StackSizeLocation 125 add edi, MP_CPU_EXCHANGE_INFO_FIELD (ApIndex) 126 mov ebx, 1 127 lock xadd dword [edi], ebx ; EBX = ApIndex++ 128 inc ebx ; EBX is CpuNumber 129 130 mov edi, esi 131 add edi, MP_CPU_EXCHANGE_INFO_FIELD (StackSize) 144 132 mov eax, [edi] 145 133 mov ecx, ebx … … 147 135 mul ecx ; EAX = StackSize * (CpuNumber + 1) 148 136 mov edi, esi 149 add edi, StackStartAddressLocation137 add edi, MP_CPU_EXCHANGE_INFO_FIELD (StackStart) 150 138 add eax, [edi] 151 139 mov esp, eax … … 180 168 ; 181 169 xor ebx, ebx 182 lea eax, [esi + CpuInfoLocation]170 lea eax, [esi + MP_CPU_EXCHANGE_INFO_FIELD (CpuInfo)] 183 171 mov edi, [eax] 184 172 185 173 GetNextProcNumber: 186 cmp [edi], edx; APIC ID match?174 cmp dword [edi + CPU_INFO_IN_HOB.InitialApicId], edx ; APIC ID match? 187 175 jz ProgramStack 188 add edi, 20176 add edi, CPU_INFO_IN_HOB_size 189 177 inc ebx 190 178 jmp GetNextProcNumber 191 179 192 180 ProgramStack: 193 mov esp, [edi + 12]181 mov esp, dword [edi + CPU_INFO_IN_HOB.ApTopOfStack] 194 182 195 183 CProcedureInvoke: … … 204 192 push ebx ; Push ApIndex 205 193 mov eax, esi 206 add eax, LockLocation194 add eax, MP_CPU_EXCHANGE_INFO_OFFSET 207 195 push eax ; push address of exchange info data buffer 208 196 209 197 mov edi, esi 210 add edi, ApProcedureLocation198 add edi, MP_CPU_EXCHANGE_INFO_FIELD (CFunction) 211 199 mov eax, [edi] 212 200 … … 217 205 218 206 ;------------------------------------------------------------------------------------- 219 ; AsmRelocateApLoop (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish); 207 ;SwitchToRealProc procedure follows. 208 ;NOT USED IN 32 BIT MODE. 209 ;------------------------------------------------------------------------------------- 210 global ASM_PFX(SwitchToRealProc) 211 ASM_PFX(SwitchToRealProc): 212 SwitchToRealProcStart: 213 jmp $ ; Never reach here 214 SwitchToRealProcEnd: 215 216 ;------------------------------------------------------------------------------------- 217 ; AsmRelocateApLoop (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish, Pm16CodeSegment, SevEsAPJumpTable, WakeupBuffer); 218 ; 219 ; The last three parameters (Pm16CodeSegment, SevEsAPJumpTable and WakeupBuffer) are 220 ; specific to SEV-ES support and are not applicable on IA32. 220 221 ;------------------------------------------------------------------------------------- 221 222 global ASM_PFX(AsmRelocateApLoop) … … 258 259 259 260 mov ebx, [ebp + 24h] 260 mov dword [ebx], RendezvousFunnelProcStart 261 mov dword [ebx + 4h], Flat32Start - RendezvousFunnelProcStart 262 mov dword [ebx + 8h], RendezvousFunnelProcEnd - RendezvousFunnelProcStart 263 mov dword [ebx + 0Ch], AsmRelocateApLoopStart 264 mov dword [ebx + 10h], AsmRelocateApLoopEnd - AsmRelocateApLoopStart 265 mov dword [ebx + 14h], Flat32Start - RendezvousFunnelProcStart 261 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelAddress], RendezvousFunnelProcStart 262 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.ModeEntryOffset], Flat32Start - RendezvousFunnelProcStart 263 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelSize], RendezvousFunnelProcEnd - RendezvousFunnelProcStart 264 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddress], AsmRelocateApLoopStart 265 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSize], AsmRelocateApLoopEnd - AsmRelocateApLoopStart 266 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.ModeTransitionOffset], Flat32Start - RendezvousFunnelProcStart 267 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealSize], SwitchToRealProcEnd - SwitchToRealProcStart 268 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealOffset], SwitchToRealProcStart - RendezvousFunnelProcStart 269 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealNoNxOffset], SwitchToRealProcStart - Flat32Start 270 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeOffset], 0 271 mov dword [ebx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeSize], 0 266 272 267 273 popad … … 293 299 push eax 294 300 295 sgdt [esi + 8]296 sidt [esi + 14]301 sgdt [esi + CPU_EXCHANGE_ROLE_INFO.Gdtr] 302 sidt [esi + CPU_EXCHANGE_ROLE_INFO.Idtr] 297 303 298 304 ; Store the its StackPointer 299 mov [esi + 4],esp305 mov [esi + CPU_EXCHANGE_ROLE_INFO.StackPointer],esp 300 306 301 307 ; update its switch state to STORED 302 mov byte [esi ], CPU_SWITCH_STATE_STORED308 mov byte [esi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_STORED 303 309 304 310 WaitForOtherStored: 305 311 ; wait until the other CPU finish storing its state 306 cmp byte [edi ], CPU_SWITCH_STATE_STORED312 cmp byte [edi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_STORED 307 313 jz OtherStored 308 314 pause … … 312 318 ; Since another CPU already stored its state, load them 313 319 ; load GDTR value 314 lgdt [edi + 8]320 lgdt [edi + CPU_EXCHANGE_ROLE_INFO.Gdtr] 315 321 316 322 ; load IDTR value 317 lidt [edi + 14]323 lidt [edi + CPU_EXCHANGE_ROLE_INFO.Idtr] 318 324 319 325 ; load its future StackPointer 320 mov esp, [edi + 4]326 mov esp, [edi + CPU_EXCHANGE_ROLE_INFO.StackPointer] 321 327 322 328 ; update the other CPU's switch state to LOADED 323 mov byte [edi ], CPU_SWITCH_STATE_LOADED329 mov byte [edi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_LOADED 324 330 325 331 WaitForOtherLoaded: 326 332 ; wait until the other CPU finish loading new state, 327 333 ; otherwise the data in stack may corrupt 328 cmp byte [esi ], CPU_SWITCH_STATE_LOADED334 cmp byte [esi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_LOADED 329 335 jz OtherLoaded 330 336 pause -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/MpInitLib/Microcode.c
r85718 r89983 2 2 Implementation of loading microcode on processors. 3 3 4 Copyright (c) 2015 - 202 0, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2015 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 … … 10 10 11 11 /** 12 Get microcode update signature of currently loaded microcode update.13 14 @return Microcode signature.15 **/16 UINT3217 GetCurrentMicrocodeSignature (18 VOID19 )20 {21 MSR_IA32_BIOS_SIGN_ID_REGISTER BiosSignIdMsr;22 23 AsmWriteMsr64 (MSR_IA32_BIOS_SIGN_ID, 0);24 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, NULL);25 BiosSignIdMsr.Uint64 = AsmReadMsr64 (MSR_IA32_BIOS_SIGN_ID);26 return BiosSignIdMsr.Bits.MicrocodeUpdateSignature;27 }28 29 /**30 12 Detect whether specified processor can find matching microcode patch and load it. 31 32 Microcode Payload as the following format:33 +----------------------------------------+------------------+34 | CPU_MICROCODE_HEADER | |35 +----------------------------------------+ CheckSum Part1 |36 | Microcode Binary | |37 +----------------------------------------+------------------+38 | CPU_MICROCODE_EXTENDED_TABLE_HEADER | |39 +----------------------------------------+ CheckSum Part2 |40 | CPU_MICROCODE_EXTENDED_TABLE | |41 | ... | |42 +----------------------------------------+------------------+43 44 There may by multiple CPU_MICROCODE_EXTENDED_TABLE in this format.45 The count of CPU_MICROCODE_EXTENDED_TABLE is indicated by ExtendedSignatureCount46 of CPU_MICROCODE_EXTENDED_TABLE_HEADER structure.47 48 When we are trying to verify the CheckSum32 with extended table.49 We should use the fields of exnteded table to replace the corresponding50 fields in CPU_MICROCODE_HEADER structure, and recalculate the51 CheckSum32 with CPU_MICROCODE_HEADER + Microcode Binary. We named52 it as CheckSum Part3.53 54 The CheckSum Part2 is used to verify the CPU_MICROCODE_EXTENDED_TABLE_HEADER55 and CPU_MICROCODE_EXTENDED_TABLE parts. We should make sure CheckSum Part256 is correct before we are going to verify each CPU_MICROCODE_EXTENDED_TABLE.57 58 Only ProcessorSignature, ProcessorFlag and CheckSum are different between59 CheckSum Part1 and CheckSum Part3. To avoid multiple computing CheckSum Part3.60 Save an in-complete CheckSum32 from CheckSum Part1 for common parts.61 When we are going to calculate CheckSum32, just should use the corresponding part62 of the ProcessorSignature, ProcessorFlag and CheckSum with in-complete CheckSum32.63 64 Notes: CheckSum32 is not a strong verification.65 It does not guarantee that the data has not been modified.66 CPU has its own mechanism to verify Microcode Binary part.67 13 68 14 @param[in] CpuMpData The pointer to CPU MP Data structure. … … 77 23 ) 78 24 { 79 UINT32 ExtendedTableLength; 80 UINT32 ExtendedTableCount; 81 CPU_MICROCODE_EXTENDED_TABLE *ExtendedTable; 82 CPU_MICROCODE_EXTENDED_TABLE_HEADER *ExtendedTableHeader; 83 CPU_MICROCODE_HEADER *MicrocodeEntryPoint; 25 CPU_MICROCODE_HEADER *Microcode; 84 26 UINTN MicrocodeEnd; 85 UINTN Index; 86 UINT8 PlatformId; 87 CPUID_VERSION_INFO_EAX Eax; 88 CPU_AP_DATA *CpuData; 89 UINT32 CurrentRevision; 27 CPU_AP_DATA *BspData; 90 28 UINT32 LatestRevision; 91 UINTN TotalSize; 92 UINT32 CheckSum32; 93 UINT32 InCompleteCheckSum32; 94 BOOLEAN CorrectMicrocode; 95 VOID *MicrocodeData; 96 MSR_IA32_PLATFORM_ID_REGISTER PlatformIdMsr; 29 CPU_MICROCODE_HEADER *LatestMicrocode; 97 30 UINT32 ThreadId; 98 BOOLEAN IsBspCallIn;31 EDKII_PEI_MICROCODE_CPU_ID MicrocodeCpuId; 99 32 100 33 if (CpuMpData->MicrocodePatchRegionSize == 0) { … … 104 37 return; 105 38 } 106 107 CurrentRevision = GetCurrentMicrocodeSignature ();108 IsBspCallIn = (ProcessorNumber == (UINTN)CpuMpData->BspNumber) ? TRUE : FALSE;109 39 110 40 GetProcessorLocationByApicId (GetInitialApicId (), NULL, NULL, &ThreadId); … … 116 46 } 117 47 118 ExtendedTableLength = 0; 119 // 120 // Here data of CPUID leafs have not been collected into context buffer, so 121 // GetProcessorCpuid() cannot be used here to retrieve CPUID data. 122 // 123 AsmCpuid (CPUID_VERSION_INFO, &Eax.Uint32, NULL, NULL, NULL); 124 125 // 126 // The index of platform information resides in bits 50:52 of MSR IA32_PLATFORM_ID 127 // 128 PlatformIdMsr.Uint64 = AsmReadMsr64 (MSR_IA32_PLATFORM_ID); 129 PlatformId = (UINT8) PlatformIdMsr.Bits.PlatformId; 130 131 132 // 133 // Check whether AP has same processor with BSP. 134 // If yes, direct use microcode info saved by BSP. 135 // 136 if (!IsBspCallIn) { 137 // 138 // Get the CPU data for BSP 139 // 140 CpuData = &(CpuMpData->CpuData[CpuMpData->BspNumber]); 141 if ((CpuData->ProcessorSignature == Eax.Uint32) && 142 (CpuData->PlatformId == PlatformId) && 143 (CpuData->MicrocodeEntryAddr != 0)) { 144 MicrocodeEntryPoint = (CPU_MICROCODE_HEADER *)(UINTN) CpuData->MicrocodeEntryAddr; 145 MicrocodeData = (VOID *) (MicrocodeEntryPoint + 1); 146 LatestRevision = MicrocodeEntryPoint->UpdateRevision; 147 goto Done; 48 GetProcessorMicrocodeCpuId (&MicrocodeCpuId); 49 50 if (ProcessorNumber != (UINTN) CpuMpData->BspNumber) { 51 // 52 // Direct use microcode of BSP if AP is the same as BSP. 53 // Assume BSP calls this routine() before AP. 54 // 55 BspData = &(CpuMpData->CpuData[CpuMpData->BspNumber]); 56 if ((BspData->ProcessorSignature == MicrocodeCpuId.ProcessorSignature) && 57 (BspData->PlatformId == MicrocodeCpuId.PlatformId) && 58 (BspData->MicrocodeEntryAddr != 0)) { 59 LatestMicrocode = (CPU_MICROCODE_HEADER *)(UINTN) BspData->MicrocodeEntryAddr; 60 LatestRevision = LatestMicrocode->UpdateRevision; 61 goto LoadMicrocode; 148 62 } 149 63 } 150 64 151 LatestRevision = 0; 152 MicrocodeData = NULL; 153 MicrocodeEnd = (UINTN) (CpuMpData->MicrocodePatchAddress + CpuMpData->MicrocodePatchRegionSize); 154 MicrocodeEntryPoint = (CPU_MICROCODE_HEADER *) (UINTN) CpuMpData->MicrocodePatchAddress; 65 // 66 // BSP or AP which is different from BSP runs here 67 // Use 0 as the starting revision to search for microcode because MicrocodePatchInfo HOB needs 68 // the latest microcode location even it's loaded to the processor. 69 // 70 LatestRevision = 0; 71 LatestMicrocode = NULL; 72 Microcode = (CPU_MICROCODE_HEADER *) (UINTN) CpuMpData->MicrocodePatchAddress; 73 MicrocodeEnd = (UINTN) Microcode + (UINTN) CpuMpData->MicrocodePatchRegionSize; 155 74 156 75 do { 157 // 158 // Check if the microcode is for the Cpu and the version is newer 159 // and the update can be processed on the platform 160 // 161 CorrectMicrocode = FALSE; 162 163 if (MicrocodeEntryPoint->DataSize == 0) { 164 TotalSize = sizeof (CPU_MICROCODE_HEADER) + 2000; 165 } else { 166 TotalSize = sizeof (CPU_MICROCODE_HEADER) + MicrocodeEntryPoint->DataSize; 167 } 168 169 /// 170 /// 0x0 MicrocodeBegin MicrocodeEntry MicrocodeEnd 0xffffffff 171 /// |--------------|---------------|---------------|---------------| 172 /// valid TotalSize 173 /// TotalSize is only valid between 0 and (MicrocodeEnd - MicrocodeEntry). 174 /// And it should be aligned with 4 bytes. 175 /// If the TotalSize is invalid, skip 1KB to check next entry. 176 /// 177 if ( (UINTN)MicrocodeEntryPoint > (MAX_ADDRESS - TotalSize) || 178 ((UINTN)MicrocodeEntryPoint + TotalSize) > MicrocodeEnd || 179 (TotalSize & 0x3) != 0 180 ) { 181 MicrocodeEntryPoint = (CPU_MICROCODE_HEADER *) (((UINTN) MicrocodeEntryPoint) + SIZE_1KB); 182 continue; 183 } 184 185 // 186 // Save an in-complete CheckSum32 from CheckSum Part1 for common parts. 187 // 188 InCompleteCheckSum32 = CalculateSum32 ( 189 (UINT32 *) MicrocodeEntryPoint, 190 TotalSize 191 ); 192 InCompleteCheckSum32 -= MicrocodeEntryPoint->ProcessorSignature.Uint32; 193 InCompleteCheckSum32 -= MicrocodeEntryPoint->ProcessorFlags; 194 InCompleteCheckSum32 -= MicrocodeEntryPoint->Checksum; 195 196 if (MicrocodeEntryPoint->HeaderVersion == 0x1) { 197 // 198 // It is the microcode header. It is not the padding data between microcode patches 199 // because the padding data should not include 0x00000001 and it should be the repeated 200 // byte format (like 0xXYXYXYXY....). 201 // 202 if (MicrocodeEntryPoint->ProcessorSignature.Uint32 == Eax.Uint32 && 203 MicrocodeEntryPoint->UpdateRevision > LatestRevision && 204 (MicrocodeEntryPoint->ProcessorFlags & (1 << PlatformId)) 205 ) { 206 // 207 // Calculate CheckSum Part1. 208 // 209 CheckSum32 = InCompleteCheckSum32; 210 CheckSum32 += MicrocodeEntryPoint->ProcessorSignature.Uint32; 211 CheckSum32 += MicrocodeEntryPoint->ProcessorFlags; 212 CheckSum32 += MicrocodeEntryPoint->Checksum; 213 if (CheckSum32 == 0) { 214 CorrectMicrocode = TRUE; 215 } 216 } else if ((MicrocodeEntryPoint->DataSize != 0) && 217 (MicrocodeEntryPoint->UpdateRevision > LatestRevision)) { 218 ExtendedTableLength = MicrocodeEntryPoint->TotalSize - (MicrocodeEntryPoint->DataSize + 219 sizeof (CPU_MICROCODE_HEADER)); 220 if (ExtendedTableLength != 0) { 221 // 222 // Extended Table exist, check if the CPU in support list 223 // 224 ExtendedTableHeader = (CPU_MICROCODE_EXTENDED_TABLE_HEADER *) ((UINT8 *) (MicrocodeEntryPoint) 225 + MicrocodeEntryPoint->DataSize + sizeof (CPU_MICROCODE_HEADER)); 226 // 227 // Calculate Extended Checksum 228 // 229 if ((ExtendedTableLength % 4) == 0) { 230 // 231 // Calculate CheckSum Part2. 232 // 233 CheckSum32 = CalculateSum32 ((UINT32 *) ExtendedTableHeader, ExtendedTableLength); 234 if (CheckSum32 == 0) { 235 // 236 // Checksum correct 237 // 238 ExtendedTableCount = ExtendedTableHeader->ExtendedSignatureCount; 239 ExtendedTable = (CPU_MICROCODE_EXTENDED_TABLE *) (ExtendedTableHeader + 1); 240 for (Index = 0; Index < ExtendedTableCount; Index ++) { 241 // 242 // Calculate CheckSum Part3. 243 // 244 CheckSum32 = InCompleteCheckSum32; 245 CheckSum32 += ExtendedTable->ProcessorSignature.Uint32; 246 CheckSum32 += ExtendedTable->ProcessorFlag; 247 CheckSum32 += ExtendedTable->Checksum; 248 if (CheckSum32 == 0) { 249 // 250 // Verify Header 251 // 252 if ((ExtendedTable->ProcessorSignature.Uint32 == Eax.Uint32) && 253 (ExtendedTable->ProcessorFlag & (1 << PlatformId)) ) { 254 // 255 // Find one 256 // 257 CorrectMicrocode = TRUE; 258 break; 259 } 260 } 261 ExtendedTable ++; 262 } 263 } 264 } 265 } 266 } 267 } else { 76 if (!IsValidMicrocode (Microcode, MicrocodeEnd - (UINTN) Microcode, LatestRevision, &MicrocodeCpuId, 1, TRUE)) { 268 77 // 269 78 // It is the padding data between the microcode patches for microcode patches alignment. … … 273 82 // find the next possible microcode patch header. 274 83 // 275 Microcode EntryPoint = (CPU_MICROCODE_HEADER *) (((UINTN) MicrocodeEntryPoint)+ SIZE_1KB);84 Microcode = (CPU_MICROCODE_HEADER *) ((UINTN) Microcode + SIZE_1KB); 276 85 continue; 277 86 } 278 // 279 // Get the next patch. 280 // 281 if (MicrocodeEntryPoint->DataSize == 0) { 282 TotalSize = 2048; 283 } else { 284 TotalSize = MicrocodeEntryPoint->TotalSize; 285 } 286 287 if (CorrectMicrocode) { 288 LatestRevision = MicrocodeEntryPoint->UpdateRevision; 289 MicrocodeData = (VOID *) ((UINTN) MicrocodeEntryPoint + sizeof (CPU_MICROCODE_HEADER)); 290 } 291 292 MicrocodeEntryPoint = (CPU_MICROCODE_HEADER *) (((UINTN) MicrocodeEntryPoint) + TotalSize); 293 } while (((UINTN) MicrocodeEntryPoint < MicrocodeEnd)); 294 295 Done: 87 LatestMicrocode = Microcode; 88 LatestRevision = LatestMicrocode->UpdateRevision; 89 90 Microcode = (CPU_MICROCODE_HEADER *) (((UINTN) Microcode) + GetMicrocodeLength (Microcode)); 91 } while ((UINTN) Microcode < MicrocodeEnd); 92 93 LoadMicrocode: 296 94 if (LatestRevision != 0) { 297 95 // 298 // Save the detected microcode patch entry address (including the 299 // microcode patch header) for each processor.96 // Save the detected microcode patch entry address (including the microcode 97 // patch header) for each processor even it's the same as the loaded one. 300 98 // It will be used when building the microcode patch cache HOB. 301 99 // 302 CpuMpData->CpuData[ProcessorNumber].MicrocodeEntryAddr = 303 (UINTN) MicrocodeData - sizeof (CPU_MICROCODE_HEADER); 304 } 305 306 if (LatestRevision > CurrentRevision) { 100 CpuMpData->CpuData[ProcessorNumber].MicrocodeEntryAddr = (UINTN) LatestMicrocode; 101 } 102 103 if (LatestRevision > GetProcessorMicrocodeSignature ()) { 307 104 // 308 105 // BIOS only authenticate updates that contain a numerically larger revision … … 311 108 // revision equal to zero. 312 109 // 313 ASSERT (MicrocodeData != NULL); 314 AsmWriteMsr64 ( 315 MSR_IA32_BIOS_UPDT_TRIG, 316 (UINT64) (UINTN) MicrocodeData 317 ); 318 // 319 // Get and check new microcode signature 320 // 321 CurrentRevision = GetCurrentMicrocodeSignature (); 322 if (CurrentRevision != LatestRevision) { 323 AcquireSpinLock(&CpuMpData->MpLock); 324 DEBUG ((EFI_D_ERROR, "Updated microcode signature [0x%08x] does not match \ 325 loaded microcode signature [0x%08x]\n", CurrentRevision, LatestRevision)); 326 ReleaseSpinLock(&CpuMpData->MpLock); 327 } 328 } 329 } 330 331 /** 332 Determine if a microcode patch matchs the specific processor signature and flag. 333 334 @param[in] CpuMpData The pointer to CPU MP Data structure. 335 @param[in] ProcessorSignature The processor signature field value 336 supported by a microcode patch. 337 @param[in] ProcessorFlags The prcessor flags field value supported by 338 a microcode patch. 339 340 @retval TRUE The specified microcode patch will be loaded. 341 @retval FALSE The specified microcode patch will not be loaded. 342 **/ 343 BOOLEAN 344 IsProcessorMatchedMicrocodePatch ( 345 IN CPU_MP_DATA *CpuMpData, 346 IN UINT32 ProcessorSignature, 347 IN UINT32 ProcessorFlags 348 ) 349 { 350 UINTN Index; 351 CPU_AP_DATA *CpuData; 352 353 for (Index = 0; Index < CpuMpData->CpuCount; Index++) { 354 CpuData = &CpuMpData->CpuData[Index]; 355 if ((ProcessorSignature == CpuData->ProcessorSignature) && 356 (ProcessorFlags & (1 << CpuData->PlatformId)) != 0) { 357 return TRUE; 358 } 359 } 360 361 return FALSE; 362 } 363 364 /** 365 Check the 'ProcessorSignature' and 'ProcessorFlags' of the microcode 366 patch header with the CPUID and PlatformID of the processors within 367 system to decide if it will be copied into memory. 368 369 @param[in] CpuMpData The pointer to CPU MP Data structure. 370 @param[in] MicrocodeEntryPoint The pointer to the microcode patch header. 371 372 @retval TRUE The specified microcode patch need to be loaded. 373 @retval FALSE The specified microcode patch dosen't need to be loaded. 374 **/ 375 BOOLEAN 376 IsMicrocodePatchNeedLoad ( 377 IN CPU_MP_DATA *CpuMpData, 378 CPU_MICROCODE_HEADER *MicrocodeEntryPoint 379 ) 380 { 381 BOOLEAN NeedLoad; 382 UINTN DataSize; 383 UINTN TotalSize; 384 CPU_MICROCODE_EXTENDED_TABLE_HEADER *ExtendedTableHeader; 385 UINT32 ExtendedTableCount; 386 CPU_MICROCODE_EXTENDED_TABLE *ExtendedTable; 387 UINTN Index; 388 389 // 390 // Check the 'ProcessorSignature' and 'ProcessorFlags' in microcode patch header. 391 // 392 NeedLoad = IsProcessorMatchedMicrocodePatch ( 393 CpuMpData, 394 MicrocodeEntryPoint->ProcessorSignature.Uint32, 395 MicrocodeEntryPoint->ProcessorFlags 396 ); 397 398 // 399 // If the Extended Signature Table exists, check if the processor is in the 400 // support list 401 // 402 DataSize = MicrocodeEntryPoint->DataSize; 403 TotalSize = (DataSize == 0) ? 2048 : MicrocodeEntryPoint->TotalSize; 404 if ((!NeedLoad) && (DataSize != 0) && 405 (TotalSize - DataSize > sizeof (CPU_MICROCODE_HEADER) + 406 sizeof (CPU_MICROCODE_EXTENDED_TABLE_HEADER))) { 407 ExtendedTableHeader = (CPU_MICROCODE_EXTENDED_TABLE_HEADER *) ((UINT8 *) (MicrocodeEntryPoint) 408 + DataSize + sizeof (CPU_MICROCODE_HEADER)); 409 ExtendedTableCount = ExtendedTableHeader->ExtendedSignatureCount; 410 ExtendedTable = (CPU_MICROCODE_EXTENDED_TABLE *) (ExtendedTableHeader + 1); 411 412 for (Index = 0; Index < ExtendedTableCount; Index ++) { 413 // 414 // Check the 'ProcessorSignature' and 'ProcessorFlag' of the Extended 415 // Signature Table entry with the CPUID and PlatformID of the processors 416 // within system to decide if it will be copied into memory 417 // 418 NeedLoad = IsProcessorMatchedMicrocodePatch ( 419 CpuMpData, 420 ExtendedTable->ProcessorSignature.Uint32, 421 ExtendedTable->ProcessorFlag 422 ); 423 if (NeedLoad) { 424 break; 425 } 426 ExtendedTable ++; 427 } 428 } 429 430 return NeedLoad; 431 } 432 110 LoadMicrocode (LatestMicrocode); 111 } 112 // 113 // It's possible that the microcode fails to load. Just capture the CPU microcode revision after loading. 114 // 115 CpuMpData->CpuData[ProcessorNumber].MicrocodeRevision = GetProcessorMicrocodeSignature (); 116 } 433 117 434 118 /** … … 501 185 ) 502 186 { 187 UINTN Index; 503 188 CPU_MICROCODE_HEADER *MicrocodeEntryPoint; 504 189 UINTN MicrocodeEnd; 505 UINTN DataSize;506 190 UINTN TotalSize; 507 191 MICROCODE_PATCH_INFO *PatchInfoBuffer; … … 509 193 UINTN PatchCount; 510 194 UINTN TotalLoadSize; 195 EDKII_PEI_MICROCODE_CPU_ID *MicrocodeCpuIds; 196 BOOLEAN Valid; 511 197 512 198 // … … 536 222 } 537 223 224 MicrocodeCpuIds = AllocatePages ( 225 EFI_SIZE_TO_PAGES (CpuMpData->CpuCount * sizeof (EDKII_PEI_MICROCODE_CPU_ID)) 226 ); 227 if (MicrocodeCpuIds == NULL) { 228 FreePool (PatchInfoBuffer); 229 return; 230 } 231 232 for (Index = 0; Index < CpuMpData->CpuCount; Index++) { 233 MicrocodeCpuIds[Index].PlatformId = CpuMpData->CpuData[Index].PlatformId; 234 MicrocodeCpuIds[Index].ProcessorSignature = CpuMpData->CpuData[Index].ProcessorSignature; 235 } 236 538 237 // 539 238 // Process the header of each microcode patch within the region. 540 239 // The purpose is to decide which microcode patch(es) will be loaded into memory. 240 // Microcode checksum is not verified because it's slow when performing on flash. 541 241 // 542 242 do { 543 if (MicrocodeEntryPoint->HeaderVersion != 0x1) { 243 Valid = IsValidMicrocode ( 244 MicrocodeEntryPoint, 245 MicrocodeEnd - (UINTN) MicrocodeEntryPoint, 246 0, 247 MicrocodeCpuIds, 248 CpuMpData->CpuCount, 249 FALSE 250 ); 251 if (!Valid) { 544 252 // 545 253 // Padding data between the microcode patches, skip 1KB to check next entry. … … 549 257 } 550 258 551 DataSize = MicrocodeEntryPoint->DataSize; 552 TotalSize = (DataSize == 0) ? 2048 : MicrocodeEntryPoint->TotalSize; 553 if ( (UINTN)MicrocodeEntryPoint > (MAX_ADDRESS - TotalSize) || 554 ((UINTN)MicrocodeEntryPoint + TotalSize) > MicrocodeEnd || 555 (DataSize & 0x3) != 0 || 556 (TotalSize & (SIZE_1KB - 1)) != 0 || 557 TotalSize < DataSize 558 ) { 559 // 560 // Not a valid microcode header, skip 1KB to check next entry. 561 // 562 MicrocodeEntryPoint = (CPU_MICROCODE_HEADER *) (((UINTN) MicrocodeEntryPoint) + SIZE_1KB); 563 continue; 259 PatchCount++; 260 if (PatchCount > MaxPatchNumber) { 261 // 262 // Current 'PatchInfoBuffer' cannot hold the information, double the size 263 // and allocate a new buffer. 264 // 265 if (MaxPatchNumber > MAX_UINTN / 2 / sizeof (MICROCODE_PATCH_INFO)) { 266 // 267 // Overflow check for MaxPatchNumber 268 // 269 goto OnExit; 270 } 271 272 PatchInfoBuffer = ReallocatePool ( 273 MaxPatchNumber * sizeof (MICROCODE_PATCH_INFO), 274 2 * MaxPatchNumber * sizeof (MICROCODE_PATCH_INFO), 275 PatchInfoBuffer 276 ); 277 if (PatchInfoBuffer == NULL) { 278 goto OnExit; 279 } 280 MaxPatchNumber = MaxPatchNumber * 2; 564 281 } 565 282 566 if (IsMicrocodePatchNeedLoad (CpuMpData, MicrocodeEntryPoint)) { 567 PatchCount++; 568 if (PatchCount > MaxPatchNumber) { 569 // 570 // Current 'PatchInfoBuffer' cannot hold the information, double the size 571 // and allocate a new buffer. 572 // 573 if (MaxPatchNumber > MAX_UINTN / 2 / sizeof (MICROCODE_PATCH_INFO)) { 574 // 575 // Overflow check for MaxPatchNumber 576 // 577 goto OnExit; 578 } 579 580 PatchInfoBuffer = ReallocatePool ( 581 MaxPatchNumber * sizeof (MICROCODE_PATCH_INFO), 582 2 * MaxPatchNumber * sizeof (MICROCODE_PATCH_INFO), 583 PatchInfoBuffer 584 ); 585 if (PatchInfoBuffer == NULL) { 586 goto OnExit; 587 } 588 MaxPatchNumber = MaxPatchNumber * 2; 589 } 590 591 // 592 // Store the information of this microcode patch 593 // 594 PatchInfoBuffer[PatchCount - 1].Address = (UINTN) MicrocodeEntryPoint; 595 PatchInfoBuffer[PatchCount - 1].Size = TotalSize; 596 TotalLoadSize += TotalSize; 597 } 283 TotalSize = GetMicrocodeLength (MicrocodeEntryPoint); 284 285 // 286 // Store the information of this microcode patch 287 // 288 PatchInfoBuffer[PatchCount - 1].Address = (UINTN) MicrocodeEntryPoint; 289 PatchInfoBuffer[PatchCount - 1].Size = TotalSize; 290 TotalLoadSize += TotalSize; 598 291 599 292 // 600 293 // Process the next microcode patch 601 294 // 602 MicrocodeEntryPoint = (CPU_MICROCODE_HEADER *) (( (UINTN) MicrocodeEntryPoint)+ TotalSize);603 } while (( (UINTN) MicrocodeEntryPoint < MicrocodeEnd));295 MicrocodeEntryPoint = (CPU_MICROCODE_HEADER *) ((UINTN) MicrocodeEntryPoint + TotalSize); 296 } while ((UINTN) MicrocodeEntryPoint < MicrocodeEnd); 604 297 605 298 if (PatchCount != 0) { … … 617 310 FreePool (PatchInfoBuffer); 618 311 } 619 return;312 FreePages (MicrocodeCpuIds, EFI_SIZE_TO_PAGES (CpuMpData->CpuCount * sizeof (EDKII_PEI_MICROCODE_CPU_ID))); 620 313 } 621 314 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/MpInitLib/MpLib.c
r85951 r89983 2 2 CPU MP Initialize Library common functions. 3 3 4 Copyright (c) 2016 - 202 0, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2016 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 Copyright (c) 2020, AMD Inc. All rights reserved.<BR> 6 6 … … 10 10 11 11 #include "MpLib.h" 12 #include <Library/VmgExitLib.h> 13 #include <Register/Amd/Fam17Msr.h> 14 #include <Register/Amd/Ghcb.h> 12 15 #ifdef VBOX 13 16 # include <Library/IoLib.h> … … 17 20 EFI_GUID mCpuInitMpLibHobGuid = CPU_INIT_MP_LIB_HOB_GUID; 18 21 19 20 /**21 Determine if the standard CPU signature is "AuthenticAMD".22 23 @retval TRUE The CPU signature matches.24 @retval FALSE The CPU signature does not match.25 26 **/27 STATIC28 BOOLEAN29 StandardSignatureIsAuthenticAMD (30 VOID31 )32 {33 UINT32 RegEbx;34 UINT32 RegEcx;35 UINT32 RegEdx;36 37 AsmCpuid (CPUID_SIGNATURE, NULL, &RegEbx, &RegEcx, &RegEdx);38 return (RegEbx == CPUID_SIGNATURE_AUTHENTIC_AMD_EBX &&39 RegEcx == CPUID_SIGNATURE_AUTHENTIC_AMD_ECX &&40 RegEdx == CPUID_SIGNATURE_AUTHENTIC_AMD_EDX);41 }42 22 43 23 /** … … 316 296 // If processor does not support MONITOR/MWAIT feature, 317 297 // force AP in Hlt-loop mode 298 // 299 ApLoopMode = ApInHltLoop; 300 } 301 302 if (PcdGetBool (PcdSevEsIsEnabled)) { 303 // 304 // For SEV-ES, force AP in Hlt-loop mode in order to use the GHCB 305 // protocol for starting APs 318 306 // 319 307 ApLoopMode = ApInHltLoop; … … 523 511 WakeUpAP (CpuMpData, TRUE, 0, NULL, NULL, TRUE); 524 512 CpuMpData->InitFlag = ApInitDone; 513 // 514 // When InitFlag == ApInitConfig, WakeUpAP () guarantees all APs are checked in. 515 // FinishedCount is the number of check-in APs. 516 // 517 CpuMpData->CpuCount = CpuMpData->FinishedCount + 1; 525 518 ASSERT (CpuMpData->CpuCount <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber)); 526 //527 // Wait for all APs finished the initialization528 //529 while (CpuMpData->FinishedCount < (CpuMpData->CpuCount - 1)) {530 CpuPause ();531 }532 533 519 534 520 // … … 640 626 InitializeSpinLock(&CpuMpData->CpuData[ProcessorNumber].ApLock); 641 627 SetApState (&CpuMpData->CpuData[ProcessorNumber], CpuStateIdle); 628 } 629 630 /** 631 Get Protected mode code segment with 16-bit default addressing 632 from current GDT table. 633 634 @return Protected mode 16-bit code segment value. 635 **/ 636 STATIC 637 UINT16 638 GetProtectedMode16CS ( 639 VOID 640 ) 641 { 642 IA32_DESCRIPTOR GdtrDesc; 643 IA32_SEGMENT_DESCRIPTOR *GdtEntry; 644 UINTN GdtEntryCount; 645 UINT16 Index; 646 647 Index = (UINT16) -1; 648 AsmReadGdtr (&GdtrDesc); 649 GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR); 650 GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base; 651 for (Index = 0; Index < GdtEntryCount; Index++) { 652 if (GdtEntry->Bits.L == 0 && 653 GdtEntry->Bits.DB == 0 && 654 GdtEntry->Bits.Type > 8) { 655 break; 656 } 657 GdtEntry++; 658 } 659 ASSERT (Index != GdtEntryCount); 660 return Index * 8; 661 } 662 663 /** 664 Get Protected mode code segment with 32-bit default addressing 665 from current GDT table. 666 667 @return Protected mode 32-bit code segment value. 668 **/ 669 STATIC 670 UINT16 671 GetProtectedMode32CS ( 672 VOID 673 ) 674 { 675 IA32_DESCRIPTOR GdtrDesc; 676 IA32_SEGMENT_DESCRIPTOR *GdtEntry; 677 UINTN GdtEntryCount; 678 UINT16 Index; 679 680 Index = (UINT16) -1; 681 AsmReadGdtr (&GdtrDesc); 682 GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR); 683 GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base; 684 for (Index = 0; Index < GdtEntryCount; Index++) { 685 if (GdtEntry->Bits.L == 0 && 686 GdtEntry->Bits.DB == 1 && 687 GdtEntry->Bits.Type > 8) { 688 break; 689 } 690 GdtEntry++; 691 } 692 ASSERT (Index != GdtEntryCount); 693 return Index * 8; 694 } 695 696 /** 697 Reset an AP when in SEV-ES mode. 698 699 If successful, this function never returns. 700 701 @param[in] Ghcb Pointer to the GHCB 702 @param[in] CpuMpData Pointer to CPU MP Data 703 704 **/ 705 STATIC 706 VOID 707 MpInitLibSevEsAPReset ( 708 IN GHCB *Ghcb, 709 IN CPU_MP_DATA *CpuMpData 710 ) 711 { 712 EFI_STATUS Status; 713 UINTN ProcessorNumber; 714 UINT16 Code16, Code32; 715 AP_RESET *APResetFn; 716 UINTN BufferStart; 717 UINTN StackStart; 718 719 Status = GetProcessorNumber (CpuMpData, &ProcessorNumber); 720 ASSERT_EFI_ERROR (Status); 721 722 Code16 = GetProtectedMode16CS (); 723 Code32 = GetProtectedMode32CS (); 724 725 if (CpuMpData->WakeupBufferHigh != 0) { 726 APResetFn = (AP_RESET *) (CpuMpData->WakeupBufferHigh + CpuMpData->AddressMap.SwitchToRealNoNxOffset); 727 } else { 728 APResetFn = (AP_RESET *) (CpuMpData->MpCpuExchangeInfo->BufferStart + CpuMpData->AddressMap.SwitchToRealOffset); 729 } 730 731 BufferStart = CpuMpData->MpCpuExchangeInfo->BufferStart; 732 StackStart = CpuMpData->SevEsAPResetStackStart - 733 (AP_RESET_STACK_SIZE * ProcessorNumber); 734 735 // 736 // This call never returns. 737 // 738 APResetFn (BufferStart, Code16, Code32, StackStart); 642 739 } 643 740 … … 684 781 while (TRUE) { 685 782 if (CpuMpData->InitFlag == ApInitConfig) { 686 //687 // Add CPU number688 //689 InterlockedIncrement ((UINT32 *) &CpuMpData->CpuCount);690 783 ProcessorNumber = ApIndex; 691 784 // … … 702 795 InitializeApData (CpuMpData, ProcessorNumber, BistData, ApTopOfStack); 703 796 ApStartupSignalBuffer = CpuMpData->CpuData[ProcessorNumber].StartupApSignal; 704 705 InterlockedDecrement ((UINT32 *) &CpuMpData->MpCpuExchangeInfo->NumApsExecuting);706 797 } else { 707 798 // … … 792 883 } 793 884 885 if (CpuMpData->ApLoopMode == ApInHltLoop) { 886 // 887 // Save AP volatile registers 888 // 889 SaveVolatileRegisters (&CpuMpData->CpuData[ProcessorNumber].VolatileRegisters); 890 } 891 794 892 // 795 893 // AP finished executing C code … … 797 895 InterlockedIncrement ((UINT32 *) &CpuMpData->FinishedCount); 798 896 897 if (CpuMpData->InitFlag == ApInitConfig) { 898 // 899 // Delay decrementing the APs executing count when SEV-ES is enabled 900 // to allow the APs to issue an AP_RESET_HOLD before the BSP possibly 901 // performs another INIT-SIPI-SIPI sequence. 902 // 903 if (!CpuMpData->SevEsIsEnabled) { 904 InterlockedDecrement ((UINT32 *) &CpuMpData->MpCpuExchangeInfo->NumApsExecuting); 905 } 906 } 907 799 908 // 800 909 // Place AP is specified loop mode 801 910 // 802 911 if (CpuMpData->ApLoopMode == ApInHltLoop) { 803 //804 // Save AP volatile registers805 //806 SaveVolatileRegisters (&CpuMpData->CpuData[ProcessorNumber].VolatileRegisters);807 912 // 808 913 // Place AP in HLT-loop … … 810 915 while (TRUE) { 811 916 DisableInterrupts (); 812 CpuSleep (); 917 if (CpuMpData->SevEsIsEnabled) { 918 MSR_SEV_ES_GHCB_REGISTER Msr; 919 GHCB *Ghcb; 920 UINT64 Status; 921 BOOLEAN DoDecrement; 922 BOOLEAN InterruptState; 923 924 DoDecrement = (BOOLEAN) (CpuMpData->InitFlag == ApInitConfig); 925 926 while (TRUE) { 927 Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB); 928 Ghcb = Msr.Ghcb; 929 930 VmgInit (Ghcb, &InterruptState); 931 932 if (DoDecrement) { 933 DoDecrement = FALSE; 934 935 // 936 // Perform the delayed decrement just before issuing the first 937 // VMGEXIT with AP_RESET_HOLD. 938 // 939 InterlockedDecrement ((UINT32 *) &CpuMpData->MpCpuExchangeInfo->NumApsExecuting); 940 } 941 942 Status = VmgExit (Ghcb, SVM_EXIT_AP_RESET_HOLD, 0, 0); 943 if ((Status == 0) && (Ghcb->SaveArea.SwExitInfo2 != 0)) { 944 VmgDone (Ghcb, InterruptState); 945 break; 946 } 947 948 VmgDone (Ghcb, InterruptState); 949 } 950 951 // 952 // Awakened in a new phase? Use the new CpuMpData 953 // 954 if (CpuMpData->NewCpuMpData != NULL) { 955 CpuMpData = CpuMpData->NewCpuMpData; 956 } 957 958 MpInitLibSevEsAPReset (Ghcb, CpuMpData); 959 } else { 960 CpuSleep (); 961 } 813 962 CpuPause (); 814 963 } … … 889 1038 890 1039 ExchangeInfo = CpuMpData->MpCpuExchangeInfo; 891 ExchangeInfo->Lock = 0;892 1040 ExchangeInfo->StackStart = CpuMpData->Buffer; 893 1041 ExchangeInfo->StackSize = CpuMpData->CpuApStackSize; … … 922 1070 ExchangeInfo->Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1); 923 1071 DEBUG ((DEBUG_INFO, "%a: 5-Level Paging = %d\n", gEfiCallerBaseName, ExchangeInfo->Enable5LevelPaging)); 1072 1073 ExchangeInfo->SevEsIsEnabled = CpuMpData->SevEsIsEnabled; 1074 ExchangeInfo->GhcbBase = (UINTN) CpuMpData->GhcbBase; 924 1075 925 1076 // … … 949 1100 // 950 1101 if (CpuMpData->WakeupBufferHigh != 0) { 951 Size = CpuMpData->AddressMap.RendezvousFunnelSize - 952 CpuMpData->AddressMap.ModeTransitionOffset; 1102 Size = CpuMpData->AddressMap.RendezvousFunnelSize + 1103 CpuMpData->AddressMap.SwitchToRealSize - 1104 CpuMpData->AddressMap.ModeTransitionOffset; 953 1105 CopyMem ( 954 1106 (VOID *)CpuMpData->WakeupBufferHigh, … … 1003 1155 (VOID *) CpuMpData->WakeupBuffer, 1004 1156 (VOID *) CpuMpData->AddressMap.RendezvousFunnelAddress, 1005 CpuMpData->AddressMap.RendezvousFunnelSize 1157 CpuMpData->AddressMap.RendezvousFunnelSize + 1158 CpuMpData->AddressMap.SwitchToRealSize 1006 1159 ); 1007 1160 } … … 1025 1178 1026 1179 /** 1180 Calculate the size of the reset vector. 1181 1182 @param[in] AddressMap The pointer to Address Map structure. 1183 1184 @return Total amount of memory required for the AP reset area 1185 **/ 1186 STATIC 1187 UINTN 1188 GetApResetVectorSize ( 1189 IN MP_ASSEMBLY_ADDRESS_MAP *AddressMap 1190 ) 1191 { 1192 UINTN Size; 1193 1194 Size = AddressMap->RendezvousFunnelSize + 1195 AddressMap->SwitchToRealSize + 1196 sizeof (MP_CPU_EXCHANGE_INFO); 1197 1198 // 1199 // The AP reset stack is only used by SEV-ES guests. Do not add to the 1200 // allocation if SEV-ES is not enabled. 1201 // 1202 if (PcdGetBool (PcdSevEsIsEnabled)) { 1203 // 1204 // Stack location is based on APIC ID, so use the total number of 1205 // processors for calculating the total stack area. 1206 // 1207 Size += AP_RESET_STACK_SIZE * PcdGet32 (PcdCpuMaxLogicalProcessorNumber); 1208 1209 Size = ALIGN_VALUE (Size, CPU_STACK_ALIGNMENT); 1210 } 1211 1212 return Size; 1213 } 1214 1215 /** 1027 1216 Allocate reset vector buffer. 1028 1217 … … 1037 1226 1038 1227 if (CpuMpData->WakeupBuffer == (UINTN) -1) { 1039 ApResetVectorSize = CpuMpData->AddressMap.RendezvousFunnelSize + 1040 sizeof (MP_CPU_EXCHANGE_INFO); 1228 ApResetVectorSize = GetApResetVectorSize (&CpuMpData->AddressMap); 1041 1229 1042 1230 CpuMpData->WakeupBuffer = GetWakeupBuffer (ApResetVectorSize); 1043 1231 CpuMpData->MpCpuExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) 1044 (CpuMpData->WakeupBuffer + CpuMpData->AddressMap.RendezvousFunnelSize); 1232 (CpuMpData->WakeupBuffer + 1233 CpuMpData->AddressMap.RendezvousFunnelSize + 1234 CpuMpData->AddressMap.SwitchToRealSize); 1045 1235 CpuMpData->WakeupBufferHigh = GetModeTransitionBuffer ( 1046 CpuMpData->AddressMap.RendezvousFunnelSize - 1236 CpuMpData->AddressMap.RendezvousFunnelSize + 1237 CpuMpData->AddressMap.SwitchToRealSize - 1047 1238 CpuMpData->AddressMap.ModeTransitionOffset 1048 1239 ); 1240 // 1241 // The reset stack starts at the end of the buffer. 1242 // 1243 CpuMpData->SevEsAPResetStackStart = CpuMpData->WakeupBuffer + ApResetVectorSize; 1049 1244 } 1050 1245 BackupAndPrepareWakeupBuffer (CpuMpData); … … 1061 1256 ) 1062 1257 { 1063 RestoreWakeupBuffer (CpuMpData); 1258 // 1259 // If SEV-ES is enabled, the reset area is needed for AP parking and 1260 // and AP startup in the OS, so the reset area is reserved. Do not 1261 // perform the restore as this will overwrite memory which has data 1262 // needed by SEV-ES. 1263 // 1264 if (!CpuMpData->SevEsIsEnabled) { 1265 RestoreWakeupBuffer (CpuMpData); 1266 } 1267 } 1268 1269 /** 1270 Allocate the SEV-ES AP jump table buffer. 1271 1272 @param[in, out] CpuMpData The pointer to CPU MP Data structure. 1273 **/ 1274 VOID 1275 AllocateSevEsAPMemory ( 1276 IN OUT CPU_MP_DATA *CpuMpData 1277 ) 1278 { 1279 if (CpuMpData->SevEsAPBuffer == (UINTN) -1) { 1280 CpuMpData->SevEsAPBuffer = 1281 CpuMpData->SevEsIsEnabled ? GetSevEsAPMemory () : 0; 1282 } 1283 } 1284 1285 /** 1286 Program the SEV-ES AP jump table buffer. 1287 1288 @param[in] SipiVector The SIPI vector used for the AP Reset 1289 **/ 1290 VOID 1291 SetSevEsJumpTable ( 1292 IN UINTN SipiVector 1293 ) 1294 { 1295 SEV_ES_AP_JMP_FAR *JmpFar; 1296 UINT32 Offset, InsnByte; 1297 UINT8 LoNib, HiNib; 1298 1299 JmpFar = (SEV_ES_AP_JMP_FAR *) (UINTN) FixedPcdGet32 (PcdSevEsWorkAreaBase); 1300 ASSERT (JmpFar != NULL); 1301 1302 // 1303 // Obtain the address of the Segment/Rip location in the workarea. 1304 // This will be set to a value derived from the SIPI vector and will 1305 // be the memory address used for the far jump below. 1306 // 1307 Offset = FixedPcdGet32 (PcdSevEsWorkAreaBase); 1308 Offset += sizeof (JmpFar->InsnBuffer); 1309 LoNib = (UINT8) Offset; 1310 HiNib = (UINT8) (Offset >> 8); 1311 1312 // 1313 // Program the workarea (which is the initial AP boot address) with 1314 // far jump to the SIPI vector (where XX and YY represent the 1315 // address of where the SIPI vector is stored. 1316 // 1317 // JMP FAR [CS:XXYY] => 2E FF 2E YY XX 1318 // 1319 InsnByte = 0; 1320 JmpFar->InsnBuffer[InsnByte++] = 0x2E; // CS override prefix 1321 JmpFar->InsnBuffer[InsnByte++] = 0xFF; // JMP (FAR) 1322 JmpFar->InsnBuffer[InsnByte++] = 0x2E; // ModRM (JMP memory location) 1323 JmpFar->InsnBuffer[InsnByte++] = LoNib; // YY offset ... 1324 JmpFar->InsnBuffer[InsnByte++] = HiNib; // XX offset ... 1325 1326 // 1327 // Program the Segment/Rip based on the SIPI vector (always at least 1328 // 16-byte aligned, so Rip is set to 0). 1329 // 1330 JmpFar->Rip = 0; 1331 JmpFar->Segment = (UINT16) (SipiVector >> 4); 1064 1332 } 1065 1333 … … 1098 1366 ResetVectorRequired = TRUE; 1099 1367 AllocateResetVector (CpuMpData); 1368 AllocateSevEsAPMemory (CpuMpData); 1100 1369 FillExchangeInfoData (CpuMpData); 1101 1370 SaveLocalApicTimerSetting (CpuMpData); … … 1134 1403 } 1135 1404 if (ResetVectorRequired) { 1405 // 1406 // For SEV-ES, the initial AP boot address will be defined by 1407 // PcdSevEsWorkAreaBase. The Segment/Rip must be the jump address 1408 // from the original INIT-SIPI-SIPI. 1409 // 1410 if (CpuMpData->SevEsIsEnabled) { 1411 SetSevEsJumpTable (ExchangeInfo->BufferStart); 1412 } 1413 1136 1414 // 1137 1415 // Wakeup all APs … … 1225 1503 if (ResetVectorRequired) { 1226 1504 CpuInfoInHob = (CPU_INFO_IN_HOB *) (UINTN) CpuMpData->CpuInfoInHob; 1505 1506 // 1507 // For SEV-ES, the initial AP boot address will be defined by 1508 // PcdSevEsWorkAreaBase. The Segment/Rip must be the jump address 1509 // from the original INIT-SIPI-SIPI. 1510 // 1511 if (CpuMpData->SevEsIsEnabled) { 1512 SetSevEsJumpTable (ExchangeInfo->BufferStart); 1513 } 1514 1227 1515 SendInitSipiSipi ( 1228 1516 CpuInfoInHob[ProcessorNumber].ApicId, … … 1701 1989 1702 1990 AsmGetAddressMap (&AddressMap); 1703 ApResetVectorSize = AddressMap.RendezvousFunnelSize + sizeof (MP_CPU_EXCHANGE_INFO);1991 ApResetVectorSize = GetApResetVectorSize (&AddressMap); 1704 1992 ApStackSize = PcdGet32(PcdCpuApStackSize); 1705 1993 ApLoopMode = GetApLoopMode (&MonitorFilterSize); … … 1759 2047 CpuMpData->CpuInfoInHob = (UINT64) (UINTN) (CpuMpData->CpuData + MaxLogicalProcessorNumber); 1760 2048 InitializeSpinLock(&CpuMpData->MpLock); 2049 CpuMpData->SevEsIsEnabled = PcdGetBool (PcdSevEsIsEnabled); 2050 CpuMpData->SevEsAPBuffer = (UINTN) -1; 2051 CpuMpData->GhcbBase = PcdGet64 (PcdGhcbBase); 1761 2052 1762 2053 // … … 1817 2108 // from HOB 1818 2109 // 2110 OldCpuMpData->NewCpuMpData = CpuMpData; 1819 2111 CpuMpData->CpuCount = OldCpuMpData->CpuCount; 1820 2112 CpuMpData->BspNumber = OldCpuMpData->BspNumber; … … 1875 2167 } 1876 2168 2169 // 2170 // Dump the microcode revision for each core. 2171 // 2172 DEBUG_CODE ( 2173 UINT32 ThreadId; 2174 UINT32 ExpectedMicrocodeRevision; 2175 CpuInfoInHob = (CPU_INFO_IN_HOB *) (UINTN) CpuMpData->CpuInfoInHob; 2176 for (Index = 0; Index < CpuMpData->CpuCount; Index++) { 2177 GetProcessorLocationByApicId (CpuInfoInHob[Index].InitialApicId, NULL, NULL, &ThreadId); 2178 if (ThreadId == 0) { 2179 // 2180 // MicrocodeDetect() loads microcode in first thread of each core, so, 2181 // CpuMpData->CpuData[Index].MicrocodeEntryAddr is initialized only for first thread of each core. 2182 // 2183 ExpectedMicrocodeRevision = 0; 2184 if (CpuMpData->CpuData[Index].MicrocodeEntryAddr != 0) { 2185 ExpectedMicrocodeRevision = ((CPU_MICROCODE_HEADER *)(UINTN)CpuMpData->CpuData[Index].MicrocodeEntryAddr)->UpdateRevision; 2186 } 2187 DEBUG (( 2188 DEBUG_INFO, "CPU[%04d]: Microcode revision = %08x, expected = %08x\n", 2189 Index, CpuMpData->CpuData[Index].MicrocodeRevision, ExpectedMicrocodeRevision 2190 )); 2191 } 2192 } 2193 ); 1877 2194 // 1878 2195 // Initialize global data for MP support -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/MpInitLib/MpLib.h
r85718 r89983 2 2 Common header file for MP Initialize Library. 3 3 4 Copyright (c) 2016 - 202 0, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2016 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 Copyright (c) 2020, AMD Inc. All rights reserved.<BR> 6 6 … … 33 33 #include <Library/HobLib.h> 34 34 #include <Library/PcdLib.h> 35 #include <Library/MicrocodeLib.h> 35 36 36 37 #include <Guid/MicrocodePatchHob.h> … … 145 146 UINT8 PlatformId; 146 147 UINT64 MicrocodeEntryAddr; 148 UINT32 MicrocodeRevision; 147 149 } CPU_AP_DATA; 148 150 … … 174 176 UINTN RelocateApLoopFuncSize; 175 177 UINTN ModeTransitionOffset; 178 UINTN SwitchToRealSize; 179 UINTN SwitchToRealOffset; 180 UINTN SwitchToRealNoNxOffset; 181 UINTN SwitchToRealPM16ModeOffset; 182 UINTN SwitchToRealPM16ModeSize; 176 183 } MP_ASSEMBLY_ADDRESS_MAP; 177 184 … … 186 193 // 187 194 typedef struct { 188 UINTN Lock;189 195 UINTN StackStart; 190 196 UINTN StackSize; … … 212 218 // 213 219 BOOLEAN Enable5LevelPaging; 220 BOOLEAN SevEsIsEnabled; 221 UINTN GhcbBase; 214 222 } MP_CPU_EXCHANGE_INFO; 215 223 … … 258 266 UINT8 ApTargetCState; 259 267 UINT16 PmCodeSegment; 268 UINT16 Pm16CodeSegment; 260 269 CPU_AP_DATA *CpuData; 261 270 volatile MP_CPU_EXCHANGE_INFO *MpCpuExchangeInfo; … … 277 286 // 278 287 BOOLEAN WakeUpByInitSipiSipi; 288 289 BOOLEAN SevEsIsEnabled; 290 UINTN SevEsAPBuffer; 291 UINTN SevEsAPResetStackStart; 292 CPU_MP_DATA *NewCpuMpData; 293 294 UINT64 GhcbBase; 279 295 }; 296 297 #define AP_SAFE_STACK_SIZE 128 298 #define AP_RESET_STACK_SIZE AP_SAFE_STACK_SIZE 299 300 #pragma pack(1) 301 302 typedef struct { 303 UINT8 InsnBuffer[8]; 304 UINT16 Rip; 305 UINT16 Segment; 306 } SEV_ES_AP_JMP_FAR; 307 308 #pragma pack() 309 310 /** 311 Assembly code to move an AP from long mode to real mode. 312 313 Move an AP from long mode to real mode in preparation to invoking 314 the reset vector. This is used for SEV-ES guests where a hypervisor 315 is not allowed to set the CS and RIP to point to the reset vector. 316 317 @param[in] BufferStart The reset vector target. 318 @param[in] Code16 16-bit protected mode code segment value. 319 @param[in] Code32 32-bit protected mode code segment value. 320 @param[in] StackStart The start of a stack to be used for transitioning 321 from long mode to real mode. 322 **/ 323 typedef 324 VOID 325 (EFIAPI AP_RESET) ( 326 IN UINTN BufferStart, 327 IN UINT16 Code16, 328 IN UINT16 Code32, 329 IN UINTN StackStart 330 ); 280 331 281 332 extern EFI_GUID mCpuInitMpLibHobGuid; … … 302 353 IN UINTN PmCodeSegment, 303 354 IN UINTN TopOfApStack, 304 IN UINTN NumberToFinish 355 IN UINTN NumberToFinish, 356 IN UINTN Pm16CodeSegment, 357 IN UINTN SevEsAPJumpTable, 358 IN UINTN WakeupBuffer 305 359 ); 306 360 … … 382 436 GetModeTransitionBuffer ( 383 437 IN UINTN BufferSize 438 ); 439 440 /** 441 Return the address of the SEV-ES AP jump table. 442 443 This buffer is required in order for an SEV-ES guest to transition from 444 UEFI into an OS. 445 446 @return Return SEV-ES AP jump table buffer 447 **/ 448 UINTN 449 GetSevEsAPMemory ( 450 VOID 384 451 ); 385 452 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/MpInitLib/PeiMpInitLib.inf
r85718 r89983 2 2 # MP Initialize Library instance for PEI driver. 3 3 # 4 # Copyright (c) 2016 - 202 0, Intel Corporation. All rights reserved.<BR>4 # Copyright (c) 2016 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 # SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 # … … 23 23 24 24 [Sources.IA32] 25 Ia32/MpEqu.inc26 25 Ia32/MpFuncs.nasm 27 26 28 27 [Sources.X64] 29 X64/MpEqu.inc30 28 X64/MpFuncs.nasm 31 29 32 30 [Sources.common] 31 MpEqu.inc 33 32 PeiMpLib.c 34 33 MpLib.c … … 52 51 PeiServicesLib 53 52 PcdLib 53 VmgExitLib 54 MicrocodeLib 54 55 55 56 [Pcd] … … 62 63 gUefiCpuPkgTokenSpaceGuid.PcdCpuApLoopMode ## CONSUMES 63 64 gUefiCpuPkgTokenSpaceGuid.PcdCpuApTargetCstate ## SOMETIMES_CONSUMES 65 gUefiCpuPkgTokenSpaceGuid.PcdSevEsIsEnabled ## CONSUMES 66 gUefiCpuPkgTokenSpaceGuid.PcdSevEsWorkAreaBase ## SOMETIMES_CONSUMES 67 gEfiMdeModulePkgTokenSpaceGuid.PcdGhcbBase ## CONSUMES 64 68 65 69 [Ppis] -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/MpInitLib/PeiMpLib.c
r85718 r89983 273 273 GetModeTransitionBuffer ( 274 274 IN UINTN BufferSize 275 ) 276 { 277 // 278 // PEI phase doesn't need to do such transition. So simply return 0. 279 // 280 return 0; 281 } 282 283 /** 284 Return the address of the SEV-ES AP jump table. 285 286 This buffer is required in order for an SEV-ES guest to transition from 287 UEFI into an OS. 288 289 @return Return SEV-ES AP jump table buffer 290 **/ 291 UINTN 292 GetSevEsAPMemory ( 293 VOID 275 294 ) 276 295 { -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
r80721 r89983 1 1 ;------------------------------------------------------------------------------ ; 2 ; Copyright (c) 2015 - 20 19, Intel Corporation. All rights reserved.<BR>2 ; Copyright (c) 2015 - 2021, Intel Corporation. All rights reserved.<BR> 3 3 ; SPDX-License-Identifier: BSD-2-Clause-Patent 4 4 ; … … 44 44 mov gs, ax 45 45 46 mov si, BufferStartLocation46 mov si, MP_CPU_EXCHANGE_INFO_FIELD (BufferStart) 47 47 mov ebx, [si] 48 48 49 mov si, DataSegmentLocation49 mov si, MP_CPU_EXCHANGE_INFO_FIELD (DataSegment) 50 50 mov edx, [si] 51 51 … … 53 53 ; Get start address of 32-bit code in low memory (<1MB) 54 54 ; 55 mov edi, M odeTransitionMemoryLocation56 57 mov si, GdtrLocation55 mov edi, MP_CPU_EXCHANGE_INFO_FIELD (ModeTransitionMemory) 56 57 mov si, MP_CPU_EXCHANGE_INFO_FIELD (GdtrProfile) 58 58 o32 lgdt [cs:si] 59 59 60 mov si, IdtrLocation60 mov si, MP_CPU_EXCHANGE_INFO_FIELD (IdtrProfile) 61 61 o32 lidt [cs:si] 62 62 … … 86 86 ; Enable execute disable bit 87 87 ; 88 mov esi, EnableExecuteDisableLocation88 mov esi, MP_CPU_EXCHANGE_INFO_FIELD (EnableExecuteDisable) 89 89 cmp byte [ebx + esi], 0 90 90 jz SkipEnableExecuteDisableBit … … 102 102 bts eax, 5 103 103 104 mov esi, Enable5LevelPagingLocation104 mov esi, MP_CPU_EXCHANGE_INFO_FIELD (Enable5LevelPaging) 105 105 cmp byte [ebx + esi], 0 106 106 jz SkipEnable5LevelPaging … … 118 118 ; Load page table 119 119 ; 120 mov esi, Cr3Location; Save CR3 in ecx120 mov esi, MP_CPU_EXCHANGE_INFO_FIELD (Cr3) ; Save CR3 in ecx 121 121 mov ecx, [ebx + esi] 122 122 mov cr3, ecx ; Load CR3 … … 140 140 ; Far jump to 64-bit code 141 141 ; 142 mov edi, M odeHighMemoryLocation142 mov edi, MP_CPU_EXCHANGE_INFO_FIELD (ModeHighMemory) 143 143 add edi, ebx 144 144 jmp far [edi] … … 147 147 LongModeStart: 148 148 mov esi, ebx 149 lea edi, [esi + InitFlagLocation]149 lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (InitFlag)] 150 150 cmp qword [edi], 1 ; ApInitConfig 151 151 jnz GetApicId … … 154 154 ; This is decremented in C code when AP is finished executing 155 155 mov edi, esi 156 add edi, NumApsExecutingLocation156 add edi, MP_CPU_EXCHANGE_INFO_FIELD (NumApsExecuting) 157 157 lock inc dword [edi] 158 158 159 159 ; AP init 160 160 mov edi, esi 161 add edi, LockLocation 162 mov rax, NotVacantFlag 163 164 TestLock: 165 xchg qword [edi], rax 166 cmp rax, NotVacantFlag 167 jz TestLock 168 169 lea ecx, [esi + ApIndexLocation] 170 inc dword [ecx] 171 mov ebx, [ecx] 172 173 Releaselock: 174 mov rax, VacantFlag 175 xchg qword [edi], rax 161 add edi, MP_CPU_EXCHANGE_INFO_FIELD (ApIndex) 162 mov ebx, 1 163 lock xadd dword [edi], ebx ; EBX = ApIndex++ 164 inc ebx ; EBX is CpuNumber 165 176 166 ; program stack 177 167 mov edi, esi 178 add edi, StackSizeLocation168 add edi, MP_CPU_EXCHANGE_INFO_FIELD (StackSize) 179 169 mov eax, dword [edi] 180 170 mov ecx, ebx … … 182 172 mul ecx ; EAX = StackSize * (CpuNumber + 1) 183 173 mov edi, esi 184 add edi, StackStartAddressLocation174 add edi, MP_CPU_EXCHANGE_INFO_FIELD (StackStart) 185 175 add rax, qword [edi] 186 176 mov rsp, rax 177 178 lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (SevEsIsEnabled)] 179 cmp byte [edi], 1 ; SevEsIsEnabled 180 jne CProcedureInvoke 181 182 ; 183 ; program GHCB 184 ; Each page after the GHCB is a per-CPU page, so the calculation programs 185 ; a GHCB to be every 8KB. 186 ; 187 mov eax, SIZE_4KB 188 shl eax, 1 ; EAX = SIZE_4K * 2 189 mov ecx, ebx 190 mul ecx ; EAX = SIZE_4K * 2 * CpuNumber 191 mov edi, esi 192 add edi, MP_CPU_EXCHANGE_INFO_FIELD (GhcbBase) 193 add rax, qword [edi] 194 mov rdx, rax 195 shr rdx, 32 196 mov rcx, 0xc0010130 197 wrmsr 187 198 jmp CProcedureInvoke 188 199 189 200 GetApicId: 201 lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (SevEsIsEnabled)] 202 cmp byte [edi], 1 ; SevEsIsEnabled 203 jne DoCpuid 204 205 ; 206 ; Since we don't have a stack yet, we can't take a #VC 207 ; exception. Use the GHCB protocol to perform the CPUID 208 ; calls. 209 ; 210 mov rcx, 0xc0010130 211 rdmsr 212 shl rdx, 32 213 or rax, rdx 214 mov rdi, rax ; RDI now holds the original GHCB GPA 215 216 mov rdx, 0 ; CPUID function 0 217 mov rax, 0 ; RAX register requested 218 or rax, 4 219 wrmsr 220 rep vmmcall 221 rdmsr 222 cmp edx, 0bh 223 jb NoX2ApicSevEs ; CPUID level below CPUID_EXTENDED_TOPOLOGY 224 225 mov rdx, 0bh ; CPUID function 0x0b 226 mov rax, 040000000h ; RBX register requested 227 or rax, 4 228 wrmsr 229 rep vmmcall 230 rdmsr 231 test edx, 0ffffh 232 jz NoX2ApicSevEs ; CPUID.0BH:EBX[15:0] is zero 233 234 mov rdx, 0bh ; CPUID function 0x0b 235 mov rax, 0c0000000h ; RDX register requested 236 or rax, 4 237 wrmsr 238 rep vmmcall 239 rdmsr 240 241 ; Processor is x2APIC capable; 32-bit x2APIC ID is now in EDX 242 jmp RestoreGhcb 243 244 NoX2ApicSevEs: 245 ; Processor is not x2APIC capable, so get 8-bit APIC ID 246 mov rdx, 1 ; CPUID function 1 247 mov rax, 040000000h ; RBX register requested 248 or rax, 4 249 wrmsr 250 rep vmmcall 251 rdmsr 252 shr edx, 24 253 254 RestoreGhcb: 255 mov rbx, rdx ; Save x2APIC/APIC ID 256 257 mov rdx, rdi ; RDI holds the saved GHCB GPA 258 shr rdx, 32 259 mov eax, edi 260 wrmsr 261 262 mov rdx, rbx 263 264 ; x2APIC ID or APIC ID is in EDX 265 jmp GetProcessorNumber 266 267 DoCpuid: 190 268 mov eax, 0 191 269 cpuid … … 215 293 ; 216 294 xor ebx, ebx 217 lea eax, [esi + CpuInfoLocation]218 mov edi, [eax]295 lea eax, [esi + MP_CPU_EXCHANGE_INFO_FIELD (CpuInfo)] 296 mov rdi, [eax] 219 297 220 298 GetNextProcNumber: 221 cmp dword [ edi], edx ; APIC ID match?299 cmp dword [rdi + CPU_INFO_IN_HOB.InitialApicId], edx ; APIC ID match? 222 300 jz ProgramStack 223 add edi, 20301 add rdi, CPU_INFO_IN_HOB_size 224 302 inc ebx 225 303 jmp GetNextProcNumber 226 304 227 305 ProgramStack: 228 mov rsp, qword [ edi + 12]306 mov rsp, qword [rdi + CPU_INFO_IN_HOB.ApTopOfStack] 229 307 230 308 CProcedureInvoke: … … 234 312 mov rbp, rsp 235 313 236 mov rax, qword [esi + InitializeFloatingPointUnitsAddress]314 mov rax, qword [esi + MP_CPU_EXCHANGE_INFO_FIELD (InitializeFloatingPointUnits)] 237 315 sub rsp, 20h 238 316 call rax ; Call assembly function to initialize FPU per UEFI spec … … 241 319 mov edx, ebx ; edx is ApIndex 242 320 mov ecx, esi 243 add ecx, LockLocation; rcx is address of exchange info data buffer321 add ecx, MP_CPU_EXCHANGE_INFO_OFFSET ; rcx is address of exchange info data buffer 244 322 245 323 mov edi, esi 246 add edi, ApProcedureLocation324 add edi, MP_CPU_EXCHANGE_INFO_FIELD (CFunction) 247 325 mov rax, qword [edi] 248 326 … … 255 333 256 334 ;------------------------------------------------------------------------------------- 257 ; AsmRelocateApLoop (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish); 335 ;SwitchToRealProc procedure follows. 336 ;ALSO THIS PROCEDURE IS EXECUTED BY APs TRANSITIONING TO 16 BIT MODE. HENCE THIS PROC 337 ;IS IN MACHINE CODE. 338 ; SwitchToRealProc (UINTN BufferStart, UINT16 Code16, UINT16 Code32, UINTN StackStart) 339 ; rcx - Buffer Start 340 ; rdx - Code16 Selector Offset 341 ; r8 - Code32 Selector Offset 342 ; r9 - Stack Start 343 ;------------------------------------------------------------------------------------- 344 global ASM_PFX(SwitchToRealProc) 345 ASM_PFX(SwitchToRealProc): 346 SwitchToRealProcStart: 347 BITS 64 348 cli 349 350 ; 351 ; Get RDX reset value before changing stacks since the 352 ; new stack won't be able to accomodate a #VC exception. 353 ; 354 push rax 355 push rbx 356 push rcx 357 push rdx 358 359 mov rax, 1 360 cpuid 361 mov rsi, rax ; Save off the reset value for RDX 362 363 pop rdx 364 pop rcx 365 pop rbx 366 pop rax 367 368 ; 369 ; Establish stack below 1MB 370 ; 371 mov rsp, r9 372 373 ; 374 ; Push ultimate Reset Vector onto the stack 375 ; 376 mov rax, rcx 377 shr rax, 4 378 push word 0x0002 ; RFLAGS 379 push ax ; CS 380 push word 0x0000 ; RIP 381 push word 0x0000 ; For alignment, will be discarded 382 383 ; 384 ; Get address of "16-bit operand size" label 385 ; 386 lea rbx, [PM16Mode] 387 388 ; 389 ; Push addresses used to change to compatibility mode 390 ; 391 lea rax, [CompatMode] 392 push r8 393 push rax 394 395 ; 396 ; Clear R8 - R15, for reset, before going into 32-bit mode 397 ; 398 xor r8, r8 399 xor r9, r9 400 xor r10, r10 401 xor r11, r11 402 xor r12, r12 403 xor r13, r13 404 xor r14, r14 405 xor r15, r15 406 407 ; 408 ; Far return into 32-bit mode 409 ; 410 o64 retf 411 412 BITS 32 413 CompatMode: 414 ; 415 ; Set up stack to prepare for exiting protected mode 416 ; 417 push edx ; Code16 CS 418 push ebx ; PM16Mode label address 419 420 ; 421 ; Disable paging 422 ; 423 mov eax, cr0 ; Read CR0 424 btr eax, 31 ; Set PG=0 425 mov cr0, eax ; Write CR0 426 427 ; 428 ; Disable long mode 429 ; 430 mov ecx, 0c0000080h ; EFER MSR number 431 rdmsr ; Read EFER 432 btr eax, 8 ; Set LME=0 433 wrmsr ; Write EFER 434 435 ; 436 ; Disable PAE 437 ; 438 mov eax, cr4 ; Read CR4 439 btr eax, 5 ; Set PAE=0 440 mov cr4, eax ; Write CR4 441 442 mov edx, esi ; Restore RDX reset value 443 444 ; 445 ; Switch to 16-bit operand size 446 ; 447 retf 448 449 BITS 16 450 ; 451 ; At entry to this label 452 ; - RDX will have its reset value 453 ; - On the top of the stack 454 ; - Alignment data (two bytes) to be discarded 455 ; - IP for Real Mode (two bytes) 456 ; - CS for Real Mode (two bytes) 457 ; 458 ; This label is also used with AsmRelocateApLoop. During MP finalization, 459 ; the code from PM16Mode to SwitchToRealProcEnd is copied to the start of 460 ; the WakeupBuffer, allowing a parked AP to be booted by an OS. 461 ; 462 PM16Mode: 463 mov eax, cr0 ; Read CR0 464 btr eax, 0 ; Set PE=0 465 mov cr0, eax ; Write CR0 466 467 pop ax ; Discard alignment data 468 469 ; 470 ; Clear registers (except RDX and RSP) before going into 16-bit mode 471 ; 472 xor eax, eax 473 xor ebx, ebx 474 xor ecx, ecx 475 xor esi, esi 476 xor edi, edi 477 xor ebp, ebp 478 479 iret 480 481 SwitchToRealProcEnd: 482 483 ;------------------------------------------------------------------------------------- 484 ; AsmRelocateApLoop (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish, Pm16CodeSegment, SevEsAPJumpTable, WakeupBuffer); 258 485 ;------------------------------------------------------------------------------------- 259 486 global ASM_PFX(AsmRelocateApLoop) 260 487 ASM_PFX(AsmRelocateApLoop): 261 488 AsmRelocateApLoopStart: 489 BITS 64 490 cmp qword [rsp + 56], 0 ; SevEsAPJumpTable 491 je NoSevEs 492 493 ; 494 ; Perform some SEV-ES related setup before leaving 64-bit mode 495 ; 496 push rcx 497 push rdx 498 499 ; 500 ; Get the RDX reset value using CPUID 501 ; 502 mov rax, 1 503 cpuid 504 mov rsi, rax ; Save off the reset value for RDX 505 506 ; 507 ; Prepare the GHCB for the AP_HLT_LOOP VMGEXIT call 508 ; - Must be done while in 64-bit long mode so that writes to 509 ; the GHCB memory will be unencrypted. 510 ; - No NAE events can be generated once this is set otherwise 511 ; the AP_RESET_HOLD SW_EXITCODE will be overwritten. 512 ; 513 mov rcx, 0xc0010130 514 rdmsr ; Retrieve current GHCB address 515 shl rdx, 32 516 or rdx, rax 517 518 mov rdi, rdx 519 xor rax, rax 520 mov rcx, 0x800 521 shr rcx, 3 522 rep stosq ; Clear the GHCB 523 524 mov rax, 0x80000004 ; VMGEXIT AP_RESET_HOLD 525 mov [rdx + 0x390], rax 526 mov rax, 114 ; Set SwExitCode valid bit 527 bts [rdx + 0x3f0], rax 528 inc rax ; Set SwExitInfo1 valid bit 529 bts [rdx + 0x3f0], rax 530 inc rax ; Set SwExitInfo2 valid bit 531 bts [rdx + 0x3f0], rax 532 533 pop rdx 534 pop rcx 535 536 NoSevEs: 262 537 cli ; Disable interrupt before switching to 32-bit mode 263 538 mov rax, [rsp + 40] ; CountTofinish 264 539 lock dec dword [rax] ; (*CountTofinish)-- 265 mov rsp, r9 266 push rcx 267 push rdx 268 269 lea rsi, [PmEntry] ; rsi <- The start address of transition code 540 541 mov r10, [rsp + 48] ; Pm16CodeSegment 542 mov rax, [rsp + 56] ; SevEsAPJumpTable 543 mov rbx, [rsp + 64] ; WakeupBuffer 544 mov rsp, r9 ; TopOfApStack 545 546 push rax ; Save SevEsAPJumpTable 547 push rbx ; Save WakeupBuffer 548 push r10 ; Save Pm16CodeSegment 549 push rcx ; Save MwaitSupport 550 push rdx ; Save ApTargetCState 551 552 lea rax, [PmEntry] ; rax <- The start address of transition code 270 553 271 554 push r8 272 push rsi 273 DB 0x48 274 retf 555 push rax 556 557 ; 558 ; Clear R8 - R15, for reset, before going into 32-bit mode 559 ; 560 xor r8, r8 561 xor r9, r9 562 xor r10, r10 563 xor r11, r11 564 xor r12, r12 565 xor r13, r13 566 xor r14, r14 567 xor r15, r15 568 569 ; 570 ; Far return into 32-bit mode 571 ; 572 o64 retf 573 275 574 BITS 32 276 575 PmEntry: … … 279 578 mov cr0, eax ; Disable paging and caches 280 579 281 mov ebx, edx ; Save EntryPoint to rbx, for rdmsr will overwrite rdx282 580 mov ecx, 0xc0000080 283 581 rdmsr … … 292 590 pop ecx, 293 591 add esp, 4 592 593 MwaitCheck: 294 594 cmp cl, 1 ; Check mwait-monitor support 295 595 jnz HltLoop … … 305 605 mwait 306 606 jmp MwaitLoop 607 307 608 HltLoop: 609 pop edx ; PM16CodeSegment 610 add esp, 4 611 pop ebx ; WakeupBuffer 612 add esp, 4 613 pop eax ; SevEsAPJumpTable 614 add esp, 4 615 cmp eax, 0 ; Check for SEV-ES 616 je DoHlt 617 618 cli 619 ; 620 ; SEV-ES is enabled, use VMGEXIT (GHCB information already 621 ; set by caller) 622 ; 623 BITS 64 624 rep vmmcall 625 BITS 32 626 627 ; 628 ; Back from VMGEXIT AP_HLT_LOOP 629 ; Push the FLAGS/CS/IP values to use 630 ; 631 push word 0x0002 ; EFLAGS 632 xor ecx, ecx 633 mov cx, [eax + 2] ; CS 634 push cx 635 mov cx, [eax] ; IP 636 push cx 637 push word 0x0000 ; For alignment, will be discarded 638 639 push edx 640 push ebx 641 642 mov edx, esi ; Restore RDX reset value 643 644 retf 645 646 DoHlt: 308 647 cli 309 648 hlt 310 jmp HltLoop 649 jmp DoHlt 650 311 651 BITS 64 312 652 AsmRelocateApLoopEnd: … … 318 658 ASM_PFX(AsmGetAddressMap): 319 659 lea rax, [ASM_PFX(RendezvousFunnelProc)] 320 mov qword [rcx ], rax321 mov qword [rcx + 8h], LongModeStart - RendezvousFunnelProcStart322 mov qword [rcx + 10h], RendezvousFunnelProcEnd - RendezvousFunnelProcStart660 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelAddress], rax 661 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.ModeEntryOffset], LongModeStart - RendezvousFunnelProcStart 662 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RendezvousFunnelSize], RendezvousFunnelProcEnd - RendezvousFunnelProcStart 323 663 lea rax, [ASM_PFX(AsmRelocateApLoop)] 324 mov qword [rcx + 18h], rax 325 mov qword [rcx + 20h], AsmRelocateApLoopEnd - AsmRelocateApLoopStart 326 mov qword [rcx + 28h], Flat32Start - RendezvousFunnelProcStart 664 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddress], rax 665 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSize], AsmRelocateApLoopEnd - AsmRelocateApLoopStart 666 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.ModeTransitionOffset], Flat32Start - RendezvousFunnelProcStart 667 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealSize], SwitchToRealProcEnd - SwitchToRealProcStart 668 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealOffset], SwitchToRealProcStart - RendezvousFunnelProcStart 669 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealNoNxOffset], SwitchToRealProcStart - Flat32Start 670 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeOffset], PM16Mode - RendezvousFunnelProcStart 671 mov qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeSize], SwitchToRealProcEnd - PM16Mode 327 672 ret 328 673 … … 367 712 ;Store EFLAGS, GDTR and IDTR regiter to stack 368 713 pushfq 369 sgdt [rsi + 16]370 sidt [rsi + 26]714 sgdt [rsi + CPU_EXCHANGE_ROLE_INFO.Gdtr] 715 sidt [rsi + CPU_EXCHANGE_ROLE_INFO.Idtr] 371 716 372 717 ; Store the its StackPointer 373 mov [rsi + 8], rsp718 mov [rsi + CPU_EXCHANGE_ROLE_INFO.StackPointer], rsp 374 719 375 720 ; update its switch state to STORED 376 mov byte [rsi ], CPU_SWITCH_STATE_STORED721 mov byte [rsi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_STORED 377 722 378 723 WaitForOtherStored: 379 724 ; wait until the other CPU finish storing its state 380 cmp byte [rdi ], CPU_SWITCH_STATE_STORED725 cmp byte [rdi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_STORED 381 726 jz OtherStored 382 727 pause … … 386 731 ; Since another CPU already stored its state, load them 387 732 ; load GDTR value 388 lgdt [rdi + 16]733 lgdt [rdi + CPU_EXCHANGE_ROLE_INFO.Gdtr] 389 734 390 735 ; load IDTR value 391 lidt [rdi + 26]736 lidt [rdi + CPU_EXCHANGE_ROLE_INFO.Idtr] 392 737 393 738 ; load its future StackPointer 394 mov rsp, [rdi + 8]739 mov rsp, [rdi + CPU_EXCHANGE_ROLE_INFO.StackPointer] 395 740 396 741 ; update the other CPU's switch state to LOADED 397 mov byte [rdi ], CPU_SWITCH_STATE_LOADED742 mov byte [rdi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_LOADED 398 743 399 744 WaitForOtherLoaded: 400 745 ; wait until the other CPU finish loading new state, 401 746 ; otherwise the data in stack may corrupt 402 cmp byte [rsi ], CPU_SWITCH_STATE_LOADED747 cmp byte [rsi + CPU_EXCHANGE_ROLE_INFO.State], CPU_SWITCH_STATE_LOADED 403 748 jz OtherLoaded 404 749 pause -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/MtrrLib/MtrrLib.c
r80721 r89983 6 6 except for MtrrSetAllMtrrs() which is used to sync BSP's MTRR setting to APs. 7 7 8 Copyright (c) 2008 - 20 19, Intel Corporation. All rights reserved.<BR>8 Copyright (c) 2008 - 2020, Intel Corporation. All rights reserved.<BR> 9 9 SPDX-License-Identifier: BSD-2-Clause-Patent 10 10 … … 458 458 459 459 /** 460 This function will get the raw value in variable MTRRs461 462 @param[out] VariableSettings A buffer to hold variable MTRRs content.463 464 @return The VariableSettings input pointer465 466 **/467 MTRR_VARIABLE_SETTINGS*468 EFIAPI469 MtrrGetVariableMtrr (470 OUT MTRR_VARIABLE_SETTINGS *VariableSettings471 )472 {473 if (!IsMtrrSupported ()) {474 return VariableSettings;475 }476 477 return MtrrGetVariableMtrrWorker (478 NULL,479 GetVariableMtrrCountWorker (),480 VariableSettings481 );482 }483 484 /**485 460 Programs fixed MTRRs registers. 486 461 … … 2584 2559 } 2585 2560 2586 2587 /**2588 This function sets variable MTRRs2589 2590 @param[in] VariableSettings A buffer to hold variable MTRRs content.2591 2592 @return The pointer of VariableSettings2593 2594 **/2595 MTRR_VARIABLE_SETTINGS*2596 EFIAPI2597 MtrrSetVariableMtrr (2598 IN MTRR_VARIABLE_SETTINGS *VariableSettings2599 )2600 {2601 MTRR_CONTEXT MtrrContext;2602 2603 if (!IsMtrrSupported ()) {2604 return VariableSettings;2605 }2606 2607 MtrrLibPreMtrrChange (&MtrrContext);2608 MtrrSetVariableMtrrWorker (VariableSettings);2609 MtrrLibPostMtrrChange (&MtrrContext);2610 MtrrDebugPrintAllMtrrs ();2611 2612 return VariableSettings;2613 }2614 2615 2561 /** 2616 2562 Worker function setting fixed MTRRs … … 2632 2578 ); 2633 2579 } 2634 }2635 2636 2637 /**2638 This function sets fixed MTRRs2639 2640 @param[in] FixedSettings A buffer to hold fixed MTRRs content.2641 2642 @retval The pointer of FixedSettings2643 2644 **/2645 MTRR_FIXED_SETTINGS*2646 EFIAPI2647 MtrrSetFixedMtrr (2648 IN MTRR_FIXED_SETTINGS *FixedSettings2649 )2650 {2651 MTRR_CONTEXT MtrrContext;2652 2653 if (!IsMtrrSupported ()) {2654 return FixedSettings;2655 }2656 2657 MtrrLibPreMtrrChange (&MtrrContext);2658 MtrrSetFixedMtrrWorker (FixedSettings);2659 MtrrLibPostMtrrChange (&MtrrContext);2660 MtrrDebugPrintAllMtrrs ();2661 2662 return FixedSettings;2663 2580 } 2664 2581 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/RegisterCpuFeaturesLib/CpuFeaturesInitialize.c
r85718 r89983 104 104 UINT32 Thread; 105 105 EFI_CPU_PHYSICAL_LOCATION *Location; 106 BOOLEAN *CoresVisited;107 UINTN Index;108 106 UINT32 PackageIndex; 109 107 UINT32 CoreIndex; 110 UINT32 First; 108 UINTN Pages; 109 UINT32 FirstPackage; 110 UINT32 *FirstCore; 111 UINT32 *FirstThread; 111 112 ACPI_CPU_DATA *AcpiCpuData; 112 113 CPU_STATUS_INFORMATION *CpuStatus; 113 UINT32 *ValidCoreCountPerPackage; 114 UINT32 *ThreadCountPerPackage; 115 UINT8 *ThreadCountPerCore; 114 116 UINTN NumberOfCpus; 115 117 UINTN NumberOfEnabledProcessors; … … 128 130 GetNumberOfProcessor (&NumberOfCpus, &NumberOfEnabledProcessors); 129 131 130 CpuFeaturesData->InitOrder = Allocate ZeroPool (sizeof (CPU_FEATURES_INIT_ORDER) * NumberOfCpus);132 CpuFeaturesData->InitOrder = AllocatePages (EFI_SIZE_TO_PAGES (sizeof (CPU_FEATURES_INIT_ORDER) * NumberOfCpus)); 131 133 ASSERT (CpuFeaturesData->InitOrder != NULL); 134 ZeroMem (CpuFeaturesData->InitOrder, sizeof (CPU_FEATURES_INIT_ORDER) * NumberOfCpus); 132 135 133 136 // … … 203 206 // Collect valid core count in each package because not all cores are valid. 204 207 // 205 ValidCoreCountPerPackage= AllocateZeroPool (sizeof (UINT32) * CpuStatus->PackageCount); 206 ASSERT (ValidCoreCountPerPackage != 0); 207 CpuStatus->ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)ValidCoreCountPerPackage; 208 CoresVisited = AllocatePool (sizeof (BOOLEAN) * CpuStatus->MaxCoreCount); 209 ASSERT (CoresVisited != NULL); 210 211 for (Index = 0; Index < CpuStatus->PackageCount; Index ++ ) { 212 ZeroMem (CoresVisited, sizeof (BOOLEAN) * CpuStatus->MaxCoreCount); 213 // 214 // Collect valid cores in Current package. 215 // 216 for (ProcessorNumber = 0; ProcessorNumber < NumberOfCpus; ProcessorNumber++) { 217 Location = &CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.ProcessorInfo.Location; 218 if (Location->Package == Index && !CoresVisited[Location->Core] ) { 219 // 220 // The ValidCores position for Location->Core is valid. 221 // The possible values in ValidCores[Index] are 0 or 1. 222 // FALSE means no valid threads in this Core. 223 // TRUE means have valid threads in this core, no matter the thead count is 1 or more. 224 // 225 CoresVisited[Location->Core] = TRUE; 226 ValidCoreCountPerPackage[Index]++; 208 ThreadCountPerPackage = AllocateZeroPool (sizeof (UINT32) * CpuStatus->PackageCount); 209 ASSERT (ThreadCountPerPackage != NULL); 210 CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)ThreadCountPerPackage; 211 212 ThreadCountPerCore = AllocateZeroPool (sizeof (UINT8) * CpuStatus->PackageCount * CpuStatus->MaxCoreCount); 213 ASSERT (ThreadCountPerCore != NULL); 214 CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)ThreadCountPerCore; 215 216 for (ProcessorNumber = 0; ProcessorNumber < NumberOfCpus; ProcessorNumber++) { 217 Location = &CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.ProcessorInfo.Location; 218 ThreadCountPerPackage[Location->Package]++; 219 ThreadCountPerCore[Location->Package * CpuStatus->MaxCoreCount + Location->Core]++; 220 } 221 222 for (PackageIndex = 0; PackageIndex < CpuStatus->PackageCount; PackageIndex++) { 223 if (ThreadCountPerPackage[PackageIndex] != 0) { 224 DEBUG ((DEBUG_INFO, "P%02d: Thread Count = %d\n", PackageIndex, ThreadCountPerPackage[PackageIndex])); 225 for (CoreIndex = 0; CoreIndex < CpuStatus->MaxCoreCount; CoreIndex++) { 226 if (ThreadCountPerCore[PackageIndex * CpuStatus->MaxCoreCount + CoreIndex] != 0) { 227 DEBUG (( 228 DEBUG_INFO, " P%02d C%04d, Thread Count = %d\n", PackageIndex, CoreIndex, 229 ThreadCountPerCore[PackageIndex * CpuStatus->MaxCoreCount + CoreIndex] 230 )); 231 } 227 232 } 228 233 } 229 }230 FreePool (CoresVisited);231 232 for (Index = 0; Index <= Package; Index++) {233 DEBUG ((DEBUG_INFO, "Package: %d, Valid Core : %d\n", Index, ValidCoreCountPerPackage[Index]));234 234 } 235 235 … … 241 241 // 242 242 // Initialize CpuFeaturesData->InitOrder[].CpuInfo.First 243 // 244 245 // 246 // Set First.Package for each thread belonging to the first package. 247 // 248 First = MAX_UINT32; 243 // Use AllocatePages () instead of AllocatePool () because pool cannot be freed in PEI phase but page can. 244 // 245 Pages = EFI_SIZE_TO_PAGES (CpuStatus->PackageCount * sizeof (UINT32) + CpuStatus->PackageCount * CpuStatus->MaxCoreCount * sizeof (UINT32)); 246 FirstCore = AllocatePages (Pages); 247 ASSERT (FirstCore != NULL); 248 FirstThread = FirstCore + CpuStatus->PackageCount; 249 250 // 251 // Set FirstPackage, FirstCore[], FirstThread[] to maximum package ID, core ID, thread ID. 252 // 253 FirstPackage = MAX_UINT32; 254 SetMem32 (FirstCore, CpuStatus->PackageCount * sizeof (UINT32), MAX_UINT32); 255 SetMem32 (FirstThread, CpuStatus->PackageCount * CpuStatus->MaxCoreCount * sizeof (UINT32), MAX_UINT32); 256 249 257 for (ProcessorNumber = 0; ProcessorNumber < NumberOfCpus; ProcessorNumber++) { 250 258 Location = &CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.ProcessorInfo.Location; 251 First = MIN (Location->Package, First); 252 } 259 260 // 261 // Save the minimum package ID in the platform. 262 // 263 FirstPackage = MIN (Location->Package, FirstPackage); 264 265 // 266 // Save the minimum core ID per package. 267 // 268 FirstCore[Location->Package] = MIN (Location->Core, FirstCore[Location->Package]); 269 270 // 271 // Save the minimum thread ID per core. 272 // 273 FirstThread[Location->Package * CpuStatus->MaxCoreCount + Location->Core] = MIN ( 274 Location->Thread, 275 FirstThread[Location->Package * CpuStatus->MaxCoreCount + Location->Core] 276 ); 277 } 278 279 // 280 // Update the First field. 281 // 253 282 for (ProcessorNumber = 0; ProcessorNumber < NumberOfCpus; ProcessorNumber++) { 254 283 Location = &CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.ProcessorInfo.Location; 255 if (Location->Package == First) { 284 285 if (Location->Package == FirstPackage) { 256 286 CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.First.Package = 1; 257 287 } 258 } 259 260 // 261 // Set First.Die/Tile/Module for each thread assuming: 262 // single Die under each package, single Tile under each Die, single Module under each Tile 263 // 264 for (ProcessorNumber = 0; ProcessorNumber < NumberOfCpus; ProcessorNumber++) { 288 289 // 290 // Set First.Die/Tile/Module for each thread assuming: 291 // single Die under each package, single Tile under each Die, single Module under each Tile 292 // 265 293 CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.First.Die = 1; 266 294 CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.First.Tile = 1; 267 295 CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.First.Module = 1; 268 } 269 270 for (PackageIndex = 0; PackageIndex < CpuStatus->PackageCount; PackageIndex++) { 271 // 272 // Set First.Core for each thread in the first core of each package. 273 // 274 First = MAX_UINT32; 275 for (ProcessorNumber = 0; ProcessorNumber < NumberOfCpus; ProcessorNumber++) { 276 Location = &CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.ProcessorInfo.Location; 277 if (Location->Package == PackageIndex) { 278 First = MIN (Location->Core, First); 279 } 280 } 281 282 for (ProcessorNumber = 0; ProcessorNumber < NumberOfCpus; ProcessorNumber++) { 283 Location = &CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.ProcessorInfo.Location; 284 if (Location->Package == PackageIndex && Location->Core == First) { 285 CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.First.Core = 1; 286 } 287 } 288 } 289 290 for (PackageIndex = 0; PackageIndex < CpuStatus->PackageCount; PackageIndex++) { 291 for (CoreIndex = 0; CoreIndex < CpuStatus->MaxCoreCount; CoreIndex++) { 292 // 293 // Set First.Thread for the first thread of each core. 294 // 295 First = MAX_UINT32; 296 for (ProcessorNumber = 0; ProcessorNumber < NumberOfCpus; ProcessorNumber++) { 297 Location = &CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.ProcessorInfo.Location; 298 if (Location->Package == PackageIndex && Location->Core == CoreIndex) { 299 First = MIN (Location->Thread, First); 300 } 301 } 302 303 for (ProcessorNumber = 0; ProcessorNumber < NumberOfCpus; ProcessorNumber++) { 304 Location = &CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.ProcessorInfo.Location; 305 if (Location->Package == PackageIndex && Location->Core == CoreIndex && Location->Thread == First) { 306 CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.First.Thread = 1; 307 } 308 } 309 } 310 } 296 297 if (Location->Core == FirstCore[Location->Package]) { 298 CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.First.Core = 1; 299 } 300 if (Location->Thread == FirstThread[Location->Package * CpuStatus->MaxCoreCount + Location->Core]) { 301 CpuFeaturesData->InitOrder[ProcessorNumber].CpuInfo.First.Thread = 1; 302 } 303 } 304 305 FreePages (FirstCore, Pages); 311 306 } 312 307 … … 895 890 volatile UINT32 *SemaphorePtr; 896 891 UINT32 FirstThread; 897 UINT32 PackageThreadsCount;898 892 UINT32 CurrentThread; 893 UINT32 CurrentCore; 899 894 UINTN ProcessorIndex; 900 UINT N ValidThreadCount;901 UINT 32 *ValidCoreCountPerPackage;895 UINT32 *ThreadCountPerPackage; 896 UINT8 *ThreadCountPerCore; 902 897 EFI_STATUS Status; 903 898 UINT64 CurrentValue; … … 1030 1025 case CoreDepType: 1031 1026 SemaphorePtr = CpuFlags->CoreSemaphoreCount; 1027 ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore; 1028 1029 CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core; 1032 1030 // 1033 1031 // Get Offset info for the first thread in the core which current thread belongs to. 1034 1032 // 1035 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core)* CpuStatus->MaxThreadCount;1033 FirstThread = CurrentCore * CpuStatus->MaxThreadCount; 1036 1034 CurrentThread = FirstThread + ApLocation->Thread; 1037 // 1038 // First Notify all threads in current Core that this thread has ready. 1035 1036 // 1037 // Different cores may have different valid threads in them. If driver maintail clearly 1038 // thread index in different cores, the logic will be much complicated. 1039 // Here driver just simply records the max thread number in all cores and use it as expect 1040 // thread number for all cores. 1041 // In below two steps logic, first current thread will Release semaphore for each thread 1042 // in current core. Maybe some threads are not valid in this core, but driver don't 1043 // care. Second, driver will let current thread wait semaphore for all valid threads in 1044 // current core. Because only the valid threads will do release semaphore for this 1045 // thread, driver here only need to wait the valid thread count. 1046 // 1047 1048 // 1049 // First Notify ALL THREADs in current Core that this thread is ready. 1039 1050 // 1040 1051 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) { 1041 LibReleaseSemaphore ( (UINT32 *)&SemaphorePtr[FirstThread + ProcessorIndex]);1052 LibReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]); 1042 1053 } 1043 1054 // 1044 // Second, check whether all valid threads in current core have ready.1045 // 1046 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {1055 // Second, check whether all VALID THREADs (not all threads) in current core are ready. 1056 // 1057 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex ++) { 1047 1058 LibWaitForSemaphore (&SemaphorePtr[CurrentThread]); 1048 1059 } … … 1051 1062 case PackageDepType: 1052 1063 SemaphorePtr = CpuFlags->PackageSemaphoreCount; 1053 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;1064 ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage; 1054 1065 // 1055 1066 // Get Offset info for the first thread in the package which current thread belongs to. … … 1059 1070 // Get the possible threads count for current package. 1060 1071 // 1061 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;1062 1072 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread; 1063 // 1064 // Get the valid thread count for current package. 1065 // 1066 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package]; 1067 1068 // 1069 // Different packages may have different valid cores in them. If driver maintail clearly 1070 // cores number in different packages, the logic will be much complicated. 1071 // Here driver just simply records the max core number in all packages and use it as expect 1072 // core number for all packages. 1073 1074 // 1075 // Different packages may have different valid threads in them. If driver maintail clearly 1076 // thread index in different packages, the logic will be much complicated. 1077 // Here driver just simply records the max thread number in all packages and use it as expect 1078 // thread number for all packages. 1073 1079 // In below two steps logic, first current thread will Release semaphore for each thread 1074 1080 // in current package. Maybe some threads are not valid in this package, but driver don't … … 1079 1085 1080 1086 // 1081 // First Notify ALL THREADS in current package that this thread has ready.1082 // 1083 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount; ProcessorIndex ++) {1084 LibReleaseSemaphore ( (UINT32 *)&SemaphorePtr[FirstThread + ProcessorIndex]);1087 // First Notify ALL THREADS in current package that this thread is ready. 1088 // 1089 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex ++) { 1090 LibReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]); 1085 1091 } 1086 1092 // 1087 // Second, check whether VALID THREADS (not all threads) in current package have ready.1088 // 1089 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {1093 // Second, check whether VALID THREADS (not all threads) in current package are ready. 1094 // 1095 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex ++) { 1090 1096 LibWaitForSemaphore (&SemaphorePtr[CurrentThread]); 1091 1097 } -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/RegisterCpuFeaturesLib/PeiRegisterCpuFeaturesLib.c
r80721 r89983 2 2 CPU Register Table Library functions. 3 3 4 Copyright (c) 2016 - 20 19, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2016 - 2020, Intel Corporation. All rights reserved.<BR> 5 5 SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 … … 12 12 #include <Library/PeiServicesLib.h> 13 13 #include <Library/PeiServicesTablePointerLib.h> 14 #include <Ppi/MpServices.h>15 14 #include <Ppi/MpServices2.h> 16 15 … … 76 75 77 76 // 78 // Get MP Services Protocol77 // Get MP Services2 Ppi 79 78 // 80 79 Status = PeiServicesLocatePpi ( 81 &gE fiPeiMpServicesPpiGuid,80 &gEdkiiPeiMpServices2PpiGuid, 82 81 0, 83 82 NULL, … … 101 100 { 102 101 EFI_STATUS Status; 103 E FI_PEI_MP_SERVICES_PPI *CpuMpPpi;102 EDKII_PEI_MP_SERVICES2_PPI *CpuMp2Ppi; 104 103 UINTN ProcessorIndex; 105 104 106 CpuMp Ppi = CpuFeaturesData->MpService.Ppi;105 CpuMp2Ppi = CpuFeaturesData->MpService.Ppi; 107 106 108 107 // … … 111 110 // 2. Check WhoAmI implementation, this parameter will not be used. 112 111 // 113 Status = CpuMp Ppi->WhoAmI(NULL, CpuMpPpi, &ProcessorIndex);112 Status = CpuMp2Ppi->WhoAmI (CpuMp2Ppi, &ProcessorIndex); 114 113 ASSERT_EFI_ERROR (Status); 115 114 return ProcessorIndex; … … 132 131 ) 133 132 { 134 E FI_PEI_MP_SERVICES_PPI *CpuMpPpi;133 EDKII_PEI_MP_SERVICES2_PPI *CpuMp2Ppi; 135 134 EFI_STATUS Status; 136 135 CPU_FEATURES_DATA *CpuFeaturesData; 137 136 138 137 CpuFeaturesData = GetCpuFeaturesData (); 139 CpuMpPpi = CpuFeaturesData->MpService.Ppi; 140 141 Status = CpuMpPpi->GetProcessorInfo ( 142 GetPeiServicesTablePointer(), 143 CpuMpPpi, 138 CpuMp2Ppi = CpuFeaturesData->MpService.Ppi; 139 140 Status = CpuMp2Ppi->GetProcessorInfo ( 141 CpuMp2Ppi, 144 142 ProcessorNumber, 145 143 ProcessorInfoBuffer … … 163 161 { 164 162 EFI_STATUS Status; 165 E FI_PEI_MP_SERVICES_PPI *CpuMpPpi;163 EDKII_PEI_MP_SERVICES2_PPI *CpuMp2Ppi; 166 164 CPU_FEATURES_DATA *CpuFeaturesData; 167 165 168 166 CpuFeaturesData = GetCpuFeaturesData (); 169 CpuMp Ppi = CpuFeaturesData->MpService.Ppi;167 CpuMp2Ppi = CpuFeaturesData->MpService.Ppi; 170 168 171 169 // 172 170 // Wakeup all APs for data collection. 173 171 // 174 Status = CpuMpPpi->StartupAllAPs ( 175 GetPeiServicesTablePointer (), 176 CpuMpPpi, 172 Status = CpuMp2Ppi->StartupAllAPs ( 173 CpuMp2Ppi, 177 174 Procedure, 178 175 FALSE, … … 204 201 // Get MP Services2 Ppi 205 202 // 206 Status = PeiServicesLocatePpi ( 207 &gEdkiiPeiMpServices2PpiGuid, 208 0, 209 NULL, 210 (VOID **)&CpuMp2Ppi 211 ); 212 ASSERT_EFI_ERROR (Status); 213 214 // 215 // Wakeup all APs for data collection. 216 // 203 CpuMp2Ppi = CpuFeaturesData->MpService.Ppi; 217 204 Status = CpuMp2Ppi->StartupAllCPUs ( 218 205 CpuMp2Ppi, … … 235 222 { 236 223 EFI_STATUS Status; 237 E FI_PEI_MP_SERVICES_PPI *CpuMpPpi;224 EDKII_PEI_MP_SERVICES2_PPI *CpuMp2Ppi; 238 225 CPU_FEATURES_DATA *CpuFeaturesData; 239 226 240 227 CpuFeaturesData = GetCpuFeaturesData (); 241 CpuMp Ppi = CpuFeaturesData->MpService.Ppi;228 CpuMp2Ppi = CpuFeaturesData->MpService.Ppi; 242 229 243 230 // 244 231 // Wakeup all APs for data collection. 245 232 // 246 Status = CpuMpPpi->SwitchBSP ( 247 GetPeiServicesTablePointer (), 248 CpuMpPpi, 233 Status = CpuMp2Ppi->SwitchBSP ( 234 CpuMp2Ppi, 249 235 ProcessorNumber, 250 236 TRUE … … 270 256 { 271 257 EFI_STATUS Status; 272 E FI_PEI_MP_SERVICES_PPI *CpuMpPpi;258 EDKII_PEI_MP_SERVICES2_PPI *CpuMp2Ppi; 273 259 CPU_FEATURES_DATA *CpuFeaturesData; 274 260 275 261 CpuFeaturesData = GetCpuFeaturesData (); 276 CpuMp Ppi = CpuFeaturesData->MpService.Ppi;262 CpuMp2Ppi = CpuFeaturesData->MpService.Ppi; 277 263 278 264 // 279 265 // Get the number of CPUs 280 266 // 281 Status = CpuMpPpi->GetNumberOfProcessors ( 282 GetPeiServicesTablePointer (), 283 CpuMpPpi, 267 Status = CpuMp2Ppi->GetNumberOfProcessors ( 268 CpuMp2Ppi, 284 269 NumberOfCpus, 285 270 NumberOfEnabledProcessors -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/RegisterCpuFeaturesLib/PeiRegisterCpuFeaturesLib.inf
r80721 r89983 2 2 # Register CPU Features Library PEI instance. 3 3 # 4 # Copyright (c) 2017 - 20 19, Intel Corporation. All rights reserved.<BR>4 # Copyright (c) 2017 - 2020, Intel Corporation. All rights reserved.<BR> 5 5 # SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 # … … 46 46 47 47 [Ppis] 48 gEfiPeiMpServicesPpiGuid ## CONSUMES49 48 gEdkiiPeiMpServices2PpiGuid ## CONSUMES 50 49 … … 56 55 57 56 [Depex] 58 gE fiPeiMpServicesPpiGuid AND gEdkiiCpuFeaturesSetDoneGuid57 gEdkiiPeiMpServices2PpiGuid AND gEdkiiCpuFeaturesSetDoneGuid -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/RegisterCpuFeaturesLib/RegisterCpuFeatures.h
r85718 r89983 11 11 #include <PiPei.h> 12 12 #include <PiDxe.h> 13 #include <Ppi/MpServices .h>13 #include <Ppi/MpServices2.h> 14 14 #include <Protocol/MpService.h> 15 15 … … 65 65 66 66 typedef union { 67 EFI_MP_SERVICES_PROTOCOL *Protocol;68 E FI_PEI_MP_SERVICES_PPI*Ppi;67 EFI_MP_SERVICES_PROTOCOL *Protocol; 68 EDKII_PEI_MP_SERVICES2_PPI *Ppi; 69 69 } MP_SERVICES; 70 70 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/RegisterCpuFeaturesLib/RegisterCpuFeaturesLib.c
r85718 r89983 2 2 CPU Register Table Library functions. 3 3 4 Copyright (c) 2017 - 202 0, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2017 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 … … 938 938 939 939 AcpiCpuData = (ACPI_CPU_DATA *) (UINTN) PcdGet64 (PcdCpuS3DataAddress); 940 if (AcpiCpuData != NULL) { 941 return AcpiCpuData; 942 } 943 944 AcpiCpuData = AllocatePages (EFI_SIZE_TO_PAGES (sizeof (ACPI_CPU_DATA))); 945 ASSERT (AcpiCpuData != NULL); 946 947 // 948 // Set PcdCpuS3DataAddress to the base address of the ACPI_CPU_DATA structure 949 // 950 Status = PcdSet64S (PcdCpuS3DataAddress, (UINT64)(UINTN)AcpiCpuData); 951 ASSERT_EFI_ERROR (Status); 952 953 GetNumberOfProcessor (&NumberOfCpus, &NumberOfEnabledProcessors); 954 AcpiCpuData->NumberOfCpus = (UINT32)NumberOfCpus; 955 956 // 957 // Allocate buffer for empty RegisterTable and PreSmmInitRegisterTable for all CPUs 958 // 959 TableSize = 2 * NumberOfCpus * sizeof (CPU_REGISTER_TABLE); 960 RegisterTable = AllocatePages (EFI_SIZE_TO_PAGES (TableSize)); 961 ASSERT (RegisterTable != NULL); 962 963 for (Index = 0; Index < NumberOfCpus; Index++) { 964 Status = GetProcessorInformation (Index, &ProcessorInfoBuffer); 940 if (AcpiCpuData == NULL) { 941 AcpiCpuData = AllocatePages (EFI_SIZE_TO_PAGES (sizeof (ACPI_CPU_DATA))); 942 ASSERT (AcpiCpuData != NULL); 943 ZeroMem (AcpiCpuData, sizeof (ACPI_CPU_DATA)); 944 945 // 946 // Set PcdCpuS3DataAddress to the base address of the ACPI_CPU_DATA structure 947 // 948 Status = PcdSet64S (PcdCpuS3DataAddress, (UINT64)(UINTN)AcpiCpuData); 965 949 ASSERT_EFI_ERROR (Status); 966 950 967 RegisterTable[Index].InitialApicId = (UINT32)ProcessorInfoBuffer.ProcessorId; 968 RegisterTable[Index].TableLength = 0; 969 RegisterTable[Index].AllocatedSize = 0; 970 RegisterTable[Index].RegisterTableEntry = 0; 971 972 RegisterTable[NumberOfCpus + Index].InitialApicId = (UINT32)ProcessorInfoBuffer.ProcessorId; 973 RegisterTable[NumberOfCpus + Index].TableLength = 0; 974 RegisterTable[NumberOfCpus + Index].AllocatedSize = 0; 975 RegisterTable[NumberOfCpus + Index].RegisterTableEntry = 0; 976 } 977 AcpiCpuData->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTable; 978 AcpiCpuData->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)(RegisterTable + NumberOfCpus); 951 GetNumberOfProcessor (&NumberOfCpus, &NumberOfEnabledProcessors); 952 AcpiCpuData->NumberOfCpus = (UINT32)NumberOfCpus; 953 } 954 955 if (AcpiCpuData->RegisterTable == 0 || 956 AcpiCpuData->PreSmmInitRegisterTable == 0) { 957 // 958 // Allocate buffer for empty RegisterTable and PreSmmInitRegisterTable for all CPUs 959 // 960 NumberOfCpus = AcpiCpuData->NumberOfCpus; 961 TableSize = 2 * NumberOfCpus * sizeof (CPU_REGISTER_TABLE); 962 RegisterTable = AllocatePages (EFI_SIZE_TO_PAGES (TableSize)); 963 ASSERT (RegisterTable != NULL); 964 965 for (Index = 0; Index < NumberOfCpus; Index++) { 966 Status = GetProcessorInformation (Index, &ProcessorInfoBuffer); 967 ASSERT_EFI_ERROR (Status); 968 969 RegisterTable[Index].InitialApicId = (UINT32)ProcessorInfoBuffer.ProcessorId; 970 RegisterTable[Index].TableLength = 0; 971 RegisterTable[Index].AllocatedSize = 0; 972 RegisterTable[Index].RegisterTableEntry = 0; 973 974 RegisterTable[NumberOfCpus + Index].InitialApicId = (UINT32)ProcessorInfoBuffer.ProcessorId; 975 RegisterTable[NumberOfCpus + Index].TableLength = 0; 976 RegisterTable[NumberOfCpus + Index].AllocatedSize = 0; 977 RegisterTable[NumberOfCpus + Index].RegisterTableEntry = 0; 978 } 979 if (AcpiCpuData->RegisterTable == 0) { 980 AcpiCpuData->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTable; 981 } 982 if (AcpiCpuData->PreSmmInitRegisterTable == 0) { 983 AcpiCpuData->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)(RegisterTable + NumberOfCpus); 984 } 985 } 979 986 980 987 return AcpiCpuData; -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/SmmCpuFeaturesLib/Ia32/SmmStmSupport.c
r80721 r89983 7 7 **/ 8 8 9 #include <Pi Smm.h>9 #include <PiMm.h> 10 10 #include <Library/DebugLib.h> 11 11 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLib.c
r85718 r89983 1 1 /** @file 2 The CPU specific programming for PiSmmCpuDxeSmm module.2 Implementation specific to the SmmCpuFeatureLib library instance. 3 3 4 Copyright (c) 2010 - 2019, Intel Corporation. All rights reserved.<BR>4 Copyright (c) Microsoft Corporation.<BR> 5 5 SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 7 7 **/ 8 8 9 #include <PiSmm.h> 10 #include <Library/SmmCpuFeaturesLib.h> 11 #include <Library/BaseLib.h> 12 #include <Library/MtrrLib.h> 13 #include <Library/PcdLib.h> 14 #include <Library/MemoryAllocationLib.h> 15 #include <Library/DebugLib.h> 16 #include <Register/Intel/Cpuid.h> 17 #include <Register/Intel/SmramSaveStateMap.h> 18 19 // 20 // Machine Specific Registers (MSRs) 21 // 22 #define SMM_FEATURES_LIB_IA32_MTRR_CAP 0x0FE 23 #define SMM_FEATURES_LIB_IA32_FEATURE_CONTROL 0x03A 24 #define SMM_FEATURES_LIB_IA32_SMRR_PHYSBASE 0x1F2 25 #define SMM_FEATURES_LIB_IA32_SMRR_PHYSMASK 0x1F3 26 #define SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSBASE 0x0A0 27 #define SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSMASK 0x0A1 28 #define EFI_MSR_SMRR_MASK 0xFFFFF000 29 #define EFI_MSR_SMRR_PHYS_MASK_VALID BIT11 30 #define SMM_FEATURES_LIB_SMM_FEATURE_CONTROL 0x4E0 31 32 // 33 // MSRs required for configuration of SMM Code Access Check 34 // 35 #define SMM_FEATURES_LIB_IA32_MCA_CAP 0x17D 36 #define SMM_CODE_ACCESS_CHK_BIT BIT58 9 #include <PiMm.h> 10 #include "CpuFeaturesLib.h" 37 11 38 12 /** 39 Internal worker function that is called to complete CPU initialization at the 40 end of SmmCpuFeaturesInitializeProcessor(). 41 42 **/ 43 VOID 44 FinishSmmCpuFeaturesInitializeProcessor ( 45 VOID 46 ); 47 48 // 49 // Set default value to assume SMRR is not supported 50 // 51 BOOLEAN mSmrrSupported = FALSE; 52 53 // 54 // Set default value to assume MSR_SMM_FEATURE_CONTROL is not supported 55 // 56 BOOLEAN mSmmFeatureControlSupported = FALSE; 57 58 // 59 // Set default value to assume IA-32 Architectural MSRs are used 60 // 61 UINT32 mSmrrPhysBaseMsr = SMM_FEATURES_LIB_IA32_SMRR_PHYSBASE; 62 UINT32 mSmrrPhysMaskMsr = SMM_FEATURES_LIB_IA32_SMRR_PHYSMASK; 63 64 // 65 // Set default value to assume MTRRs need to be configured on each SMI 66 // 67 BOOLEAN mNeedConfigureMtrrs = TRUE; 68 69 // 70 // Array for state of SMRR enable on all CPUs 71 // 72 BOOLEAN *mSmrrEnabled; 73 74 /** 75 The constructor function 13 The constructor function for the Traditional MM library instance without STM. 76 14 77 15 @param[in] ImageHandle The firmware allocated handle for the EFI image. … … 88 26 ) 89 27 { 90 UINT32 RegEax; 91 UINT32 RegEdx; 92 UINTN FamilyId; 93 UINTN ModelId; 94 95 // 96 // Retrieve CPU Family and Model 97 // 98 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx); 99 FamilyId = (RegEax >> 8) & 0xf; 100 ModelId = (RegEax >> 4) & 0xf; 101 if (FamilyId == 0x06 || FamilyId == 0x0f) { 102 ModelId = ModelId | ((RegEax >> 12) & 0xf0); 103 } 104 105 // 106 // Check CPUID(CPUID_VERSION_INFO).EDX[12] for MTRR capability 107 // 108 if ((RegEdx & BIT12) != 0) { 109 // 110 // Check MTRR_CAP MSR bit 11 for SMRR support 111 // 112 if ((AsmReadMsr64 (SMM_FEATURES_LIB_IA32_MTRR_CAP) & BIT11) != 0) { 113 mSmrrSupported = TRUE; 114 } 115 } 116 117 // 118 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual 119 // Volume 3C, Section 35.3 MSRs in the Intel(R) Atom(TM) Processor Family 120 // 121 // If CPU Family/Model is 06_1CH, 06_26H, 06_27H, 06_35H or 06_36H, then 122 // SMRR Physical Base and SMM Physical Mask MSRs are not available. 123 // 124 if (FamilyId == 0x06) { 125 if (ModelId == 0x1C || ModelId == 0x26 || ModelId == 0x27 || ModelId == 0x35 || ModelId == 0x36) { 126 mSmrrSupported = FALSE; 127 } 128 } 129 130 // 131 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual 132 // Volume 3C, Section 35.2 MSRs in the Intel(R) Core(TM) 2 Processor Family 133 // 134 // If CPU Family/Model is 06_0F or 06_17, then use Intel(R) Core(TM) 2 135 // Processor Family MSRs 136 // 137 if (FamilyId == 0x06) { 138 if (ModelId == 0x17 || ModelId == 0x0f) { 139 mSmrrPhysBaseMsr = SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSBASE; 140 mSmrrPhysMaskMsr = SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSMASK; 141 } 142 } 143 144 // 145 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual 146 // Volume 3C, Section 34.4.2 SMRAM Caching 147 // An IA-32 processor does not automatically write back and invalidate its 148 // caches before entering SMM or before exiting SMM. Because of this behavior, 149 // care must be taken in the placement of the SMRAM in system memory and in 150 // the caching of the SMRAM to prevent cache incoherence when switching back 151 // and forth between SMM and protected mode operation. 152 // 153 // An IA-32 processor is a processor that does not support the Intel 64 154 // Architecture. Support for the Intel 64 Architecture can be detected from 155 // CPUID(CPUID_EXTENDED_CPU_SIG).EDX[29] 156 // 157 // If an IA-32 processor is detected, then set mNeedConfigureMtrrs to TRUE, 158 // so caches are flushed on SMI entry and SMI exit, the interrupted code 159 // MTRRs are saved/restored, and MTRRs for SMM are loaded. 160 // 161 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL); 162 if (RegEax >= CPUID_EXTENDED_CPU_SIG) { 163 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx); 164 if ((RegEdx & BIT29) != 0) { 165 mNeedConfigureMtrrs = FALSE; 166 } 167 } 168 169 // 170 // Allocate array for state of SMRR enable on all CPUs 171 // 172 mSmrrEnabled = (BOOLEAN *)AllocatePool (sizeof (BOOLEAN) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber)); 173 ASSERT (mSmrrEnabled != NULL); 28 CpuFeaturesLibInitialization (); 174 29 175 30 return EFI_SUCCESS; 176 31 } 177 178 /**179 Called during the very first SMI into System Management Mode to initialize180 CPU features, including SMBASE, for the currently executing CPU. Since this181 is the first SMI, the SMRAM Save State Map is at the default address of182 SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET. The currently executing183 CPU is specified by CpuIndex and CpuIndex can be used to access information184 about the currently executing CPU in the ProcessorInfo array and the185 HotPlugCpuData data structure.186 187 @param[in] CpuIndex The index of the CPU to initialize. The value188 must be between 0 and the NumberOfCpus field in189 the System Management System Table (SMST).190 @param[in] IsMonarch TRUE if the CpuIndex is the index of the CPU that191 was elected as monarch during System Management192 Mode initialization.193 FALSE if the CpuIndex is not the index of the CPU194 that was elected as monarch during System195 Management Mode initialization.196 @param[in] ProcessorInfo Pointer to an array of EFI_PROCESSOR_INFORMATION197 structures. ProcessorInfo[CpuIndex] contains the198 information for the currently executing CPU.199 @param[in] CpuHotPlugData Pointer to the CPU_HOT_PLUG_DATA structure that200 contains the ApidId and SmBase arrays.201 **/202 VOID203 EFIAPI204 SmmCpuFeaturesInitializeProcessor (205 IN UINTN CpuIndex,206 IN BOOLEAN IsMonarch,207 IN EFI_PROCESSOR_INFORMATION *ProcessorInfo,208 IN CPU_HOT_PLUG_DATA *CpuHotPlugData209 )210 {211 SMRAM_SAVE_STATE_MAP *CpuState;212 UINT64 FeatureControl;213 UINT32 RegEax;214 UINT32 RegEdx;215 UINTN FamilyId;216 UINTN ModelId;217 218 //219 // Configure SMBASE.220 //221 CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);222 CpuState->x86.SMBASE = (UINT32)CpuHotPlugData->SmBase[CpuIndex];223 224 //225 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual226 // Volume 3C, Section 35.2 MSRs in the Intel(R) Core(TM) 2 Processor Family227 //228 // If Intel(R) Core(TM) Core(TM) 2 Processor Family MSRs are being used, then229 // make sure SMRR Enable(BIT3) of MSR_FEATURE_CONTROL MSR(0x3A) is set before230 // accessing SMRR base/mask MSRs. If Lock(BIT0) of MSR_FEATURE_CONTROL MSR(0x3A)231 // is set, then the MSR is locked and can not be modified.232 //233 if (mSmrrSupported && mSmrrPhysBaseMsr == SMM_FEATURES_LIB_IA32_CORE_SMRR_PHYSBASE) {234 FeatureControl = AsmReadMsr64 (SMM_FEATURES_LIB_IA32_FEATURE_CONTROL);235 if ((FeatureControl & BIT3) == 0) {236 if ((FeatureControl & BIT0) == 0) {237 AsmWriteMsr64 (SMM_FEATURES_LIB_IA32_FEATURE_CONTROL, FeatureControl | BIT3);238 } else {239 mSmrrSupported = FALSE;240 }241 }242 }243 244 //245 // If SMRR is supported, then program SMRR base/mask MSRs.246 // The EFI_MSR_SMRR_PHYS_MASK_VALID bit is not set until the first normal SMI.247 // The code that initializes SMM environment is running in normal mode248 // from SMRAM region. If SMRR is enabled here, then the SMRAM region249 // is protected and the normal mode code execution will fail.250 //251 if (mSmrrSupported) {252 //253 // SMRR size cannot be less than 4-KBytes254 // SMRR size must be of length 2^n255 // SMRR base alignment cannot be less than SMRR length256 //257 if ((CpuHotPlugData->SmrrSize < SIZE_4KB) ||258 (CpuHotPlugData->SmrrSize != GetPowerOfTwo32 (CpuHotPlugData->SmrrSize)) ||259 ((CpuHotPlugData->SmrrBase & ~(CpuHotPlugData->SmrrSize - 1)) != CpuHotPlugData->SmrrBase)) {260 //261 // Print message and halt if CPU is Monarch262 //263 if (IsMonarch) {264 DEBUG ((DEBUG_ERROR, "SMM Base/Size does not meet alignment/size requirement!\n"));265 CpuDeadLoop ();266 }267 } else {268 AsmWriteMsr64 (mSmrrPhysBaseMsr, CpuHotPlugData->SmrrBase | MTRR_CACHE_WRITE_BACK);269 AsmWriteMsr64 (mSmrrPhysMaskMsr, (~(CpuHotPlugData->SmrrSize - 1) & EFI_MSR_SMRR_MASK));270 mSmrrEnabled[CpuIndex] = FALSE;271 }272 }273 274 //275 // Retrieve CPU Family and Model276 //277 AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx);278 FamilyId = (RegEax >> 8) & 0xf;279 ModelId = (RegEax >> 4) & 0xf;280 if (FamilyId == 0x06 || FamilyId == 0x0f) {281 ModelId = ModelId | ((RegEax >> 12) & 0xf0);282 }283 284 //285 // Intel(R) 64 and IA-32 Architectures Software Developer's Manual286 // Volume 3C, Section 35.10.1 MSRs in 4th Generation Intel(R) Core(TM)287 // Processor Family.288 //289 // If CPU Family/Model is 06_3C, 06_45, or 06_46 then use 4th Generation290 // Intel(R) Core(TM) Processor Family MSRs.291 //292 if (FamilyId == 0x06) {293 if (ModelId == 0x3C || ModelId == 0x45 || ModelId == 0x46 ||294 ModelId == 0x3D || ModelId == 0x47 || ModelId == 0x4E || ModelId == 0x4F ||295 ModelId == 0x3F || ModelId == 0x56 || ModelId == 0x57 || ModelId == 0x5C) {296 //297 // Check to see if the CPU supports the SMM Code Access Check feature298 // Do not access this MSR unless the CPU supports the SmmRegFeatureControl299 //300 if ((AsmReadMsr64 (SMM_FEATURES_LIB_IA32_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) != 0) {301 mSmmFeatureControlSupported = TRUE;302 }303 }304 }305 306 //307 // Call internal worker function that completes the CPU initialization308 //309 FinishSmmCpuFeaturesInitializeProcessor ();310 }311 312 /**313 This function updates the SMRAM save state on the currently executing CPU314 to resume execution at a specific address after an RSM instruction. This315 function must evaluate the SMRAM save state to determine the execution mode316 the RSM instruction resumes and update the resume execution address with317 either NewInstructionPointer32 or NewInstructionPoint. The auto HALT restart318 flag in the SMRAM save state must always be cleared. This function returns319 the value of the instruction pointer from the SMRAM save state that was320 replaced. If this function returns 0, then the SMRAM save state was not321 modified.322 323 This function is called during the very first SMI on each CPU after324 SmmCpuFeaturesInitializeProcessor() to set a flag in normal execution mode325 to signal that the SMBASE of each CPU has been updated before the default326 SMBASE address is used for the first SMI to the next CPU.327 328 @param[in] CpuIndex The index of the CPU to hook. The value329 must be between 0 and the NumberOfCpus330 field in the System Management System Table331 (SMST).332 @param[in] CpuState Pointer to SMRAM Save State Map for the333 currently executing CPU.334 @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to335 32-bit execution mode from 64-bit SMM.336 @param[in] NewInstructionPointer Instruction pointer to use if resuming to337 same execution mode as SMM.338 339 @retval 0 This function did modify the SMRAM save state.340 @retval > 0 The original instruction pointer value from the SMRAM save state341 before it was replaced.342 **/343 UINT64344 EFIAPI345 SmmCpuFeaturesHookReturnFromSmm (346 IN UINTN CpuIndex,347 IN SMRAM_SAVE_STATE_MAP *CpuState,348 IN UINT64 NewInstructionPointer32,349 IN UINT64 NewInstructionPointer350 )351 {352 return 0;353 }354 355 /**356 Hook point in normal execution mode that allows the one CPU that was elected357 as monarch during System Management Mode initialization to perform additional358 initialization actions immediately after all of the CPUs have processed their359 first SMI and called SmmCpuFeaturesInitializeProcessor() relocating SMBASE360 into a buffer in SMRAM and called SmmCpuFeaturesHookReturnFromSmm().361 **/362 VOID363 EFIAPI364 SmmCpuFeaturesSmmRelocationComplete (365 VOID366 )367 {368 }369 370 /**371 Determines if MTRR registers must be configured to set SMRAM cache-ability372 when executing in System Management Mode.373 374 @retval TRUE MTRR registers must be configured to set SMRAM cache-ability.375 @retval FALSE MTRR registers do not need to be configured to set SMRAM376 cache-ability.377 **/378 BOOLEAN379 EFIAPI380 SmmCpuFeaturesNeedConfigureMtrrs (381 VOID382 )383 {384 return mNeedConfigureMtrrs;385 }386 387 /**388 Disable SMRR register if SMRR is supported and SmmCpuFeaturesNeedConfigureMtrrs()389 returns TRUE.390 **/391 VOID392 EFIAPI393 SmmCpuFeaturesDisableSmrr (394 VOID395 )396 {397 if (mSmrrSupported && mNeedConfigureMtrrs) {398 AsmWriteMsr64 (mSmrrPhysMaskMsr, AsmReadMsr64(mSmrrPhysMaskMsr) & ~EFI_MSR_SMRR_PHYS_MASK_VALID);399 }400 }401 402 /**403 Enable SMRR register if SMRR is supported and SmmCpuFeaturesNeedConfigureMtrrs()404 returns TRUE.405 **/406 VOID407 EFIAPI408 SmmCpuFeaturesReenableSmrr (409 VOID410 )411 {412 if (mSmrrSupported && mNeedConfigureMtrrs) {413 AsmWriteMsr64 (mSmrrPhysMaskMsr, AsmReadMsr64(mSmrrPhysMaskMsr) | EFI_MSR_SMRR_PHYS_MASK_VALID);414 }415 }416 417 /**418 Processor specific hook point each time a CPU enters System Management Mode.419 420 @param[in] CpuIndex The index of the CPU that has entered SMM. The value421 must be between 0 and the NumberOfCpus field in the422 System Management System Table (SMST).423 **/424 VOID425 EFIAPI426 SmmCpuFeaturesRendezvousEntry (427 IN UINTN CpuIndex428 )429 {430 //431 // If SMRR is supported and this is the first normal SMI, then enable SMRR432 //433 if (mSmrrSupported && !mSmrrEnabled[CpuIndex]) {434 AsmWriteMsr64 (mSmrrPhysMaskMsr, AsmReadMsr64 (mSmrrPhysMaskMsr) | EFI_MSR_SMRR_PHYS_MASK_VALID);435 mSmrrEnabled[CpuIndex] = TRUE;436 }437 }438 439 /**440 Processor specific hook point each time a CPU exits System Management Mode.441 442 @param[in] CpuIndex The index of the CPU that is exiting SMM. The value must443 be between 0 and the NumberOfCpus field in the System444 Management System Table (SMST).445 **/446 VOID447 EFIAPI448 SmmCpuFeaturesRendezvousExit (449 IN UINTN CpuIndex450 )451 {452 }453 454 /**455 Check to see if an SMM register is supported by a specified CPU.456 457 @param[in] CpuIndex The index of the CPU to check for SMM register support.458 The value must be between 0 and the NumberOfCpus field459 in the System Management System Table (SMST).460 @param[in] RegName Identifies the SMM register to check for support.461 462 @retval TRUE The SMM register specified by RegName is supported by the CPU463 specified by CpuIndex.464 @retval FALSE The SMM register specified by RegName is not supported by the465 CPU specified by CpuIndex.466 **/467 BOOLEAN468 EFIAPI469 SmmCpuFeaturesIsSmmRegisterSupported (470 IN UINTN CpuIndex,471 IN SMM_REG_NAME RegName472 )473 {474 if (mSmmFeatureControlSupported && RegName == SmmRegFeatureControl) {475 return TRUE;476 }477 return FALSE;478 }479 480 /**481 Returns the current value of the SMM register for the specified CPU.482 If the SMM register is not supported, then 0 is returned.483 484 @param[in] CpuIndex The index of the CPU to read the SMM register. The485 value must be between 0 and the NumberOfCpus field in486 the System Management System Table (SMST).487 @param[in] RegName Identifies the SMM register to read.488 489 @return The value of the SMM register specified by RegName from the CPU490 specified by CpuIndex.491 **/492 UINT64493 EFIAPI494 SmmCpuFeaturesGetSmmRegister (495 IN UINTN CpuIndex,496 IN SMM_REG_NAME RegName497 )498 {499 if (mSmmFeatureControlSupported && RegName == SmmRegFeatureControl) {500 return AsmReadMsr64 (SMM_FEATURES_LIB_SMM_FEATURE_CONTROL);501 }502 return 0;503 }504 505 /**506 Sets the value of an SMM register on a specified CPU.507 If the SMM register is not supported, then no action is performed.508 509 @param[in] CpuIndex The index of the CPU to write the SMM register. The510 value must be between 0 and the NumberOfCpus field in511 the System Management System Table (SMST).512 @param[in] RegName Identifies the SMM register to write.513 registers are read-only.514 @param[in] Value The value to write to the SMM register.515 **/516 VOID517 EFIAPI518 SmmCpuFeaturesSetSmmRegister (519 IN UINTN CpuIndex,520 IN SMM_REG_NAME RegName,521 IN UINT64 Value522 )523 {524 if (mSmmFeatureControlSupported && RegName == SmmRegFeatureControl) {525 AsmWriteMsr64 (SMM_FEATURES_LIB_SMM_FEATURE_CONTROL, Value);526 }527 }528 529 /**530 Read an SMM Save State register on the target processor. If this function531 returns EFI_UNSUPPORTED, then the caller is responsible for reading the532 SMM Save Sate register.533 534 @param[in] CpuIndex The index of the CPU to read the SMM Save State. The535 value must be between 0 and the NumberOfCpus field in536 the System Management System Table (SMST).537 @param[in] Register The SMM Save State register to read.538 @param[in] Width The number of bytes to read from the CPU save state.539 @param[out] Buffer Upon return, this holds the CPU register value read540 from the save state.541 542 @retval EFI_SUCCESS The register was read from Save State.543 @retval EFI_INVALID_PARAMETER Buffer is NULL.544 @retval EFI_UNSUPPORTED This function does not support reading Register.545 546 **/547 EFI_STATUS548 EFIAPI549 SmmCpuFeaturesReadSaveStateRegister (550 IN UINTN CpuIndex,551 IN EFI_SMM_SAVE_STATE_REGISTER Register,552 IN UINTN Width,553 OUT VOID *Buffer554 )555 {556 return EFI_UNSUPPORTED;557 }558 559 /**560 Writes an SMM Save State register on the target processor. If this function561 returns EFI_UNSUPPORTED, then the caller is responsible for writing the562 SMM Save Sate register.563 564 @param[in] CpuIndex The index of the CPU to write the SMM Save State. The565 value must be between 0 and the NumberOfCpus field in566 the System Management System Table (SMST).567 @param[in] Register The SMM Save State register to write.568 @param[in] Width The number of bytes to write to the CPU save state.569 @param[in] Buffer Upon entry, this holds the new CPU register value.570 571 @retval EFI_SUCCESS The register was written to Save State.572 @retval EFI_INVALID_PARAMETER Buffer is NULL.573 @retval EFI_UNSUPPORTED This function does not support writing Register.574 **/575 EFI_STATUS576 EFIAPI577 SmmCpuFeaturesWriteSaveStateRegister (578 IN UINTN CpuIndex,579 IN EFI_SMM_SAVE_STATE_REGISTER Register,580 IN UINTN Width,581 IN CONST VOID *Buffer582 )583 {584 return EFI_UNSUPPORTED;585 }586 587 /**588 This function is hook point called after the gEfiSmmReadyToLockProtocolGuid589 notification is completely processed.590 **/591 VOID592 EFIAPI593 SmmCpuFeaturesCompleteSmmReadyToLock (594 VOID595 )596 {597 }598 599 /**600 This API provides a method for a CPU to allocate a specific region for storing page tables.601 602 This API can be called more once to allocate memory for page tables.603 604 Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the605 allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL606 is returned. If there is not enough memory remaining to satisfy the request, then NULL is607 returned.608 609 This function can also return NULL if there is no preference on where the page tables are allocated in SMRAM.610 611 @param Pages The number of 4 KB pages to allocate.612 613 @return A pointer to the allocated buffer for page tables.614 @retval NULL Fail to allocate a specific region for storing page tables,615 Or there is no preference on where the page tables are allocated in SMRAM.616 617 **/618 VOID *619 EFIAPI620 SmmCpuFeaturesAllocatePageTableMemory (621 IN UINTN Pages622 )623 {624 return NULL;625 }626 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLib.inf
r80721 r89983 18 18 19 19 [Sources] 20 CpuFeaturesLib.h 20 21 SmmCpuFeaturesLib.c 22 SmmCpuFeaturesLibCommon.c 21 23 SmmCpuFeaturesLibNoStm.c 24 TraditionalMmCpuFeaturesLib.c 22 25 23 26 [Packages] -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibNoStm.c
r80721 r89983 8 8 **/ 9 9 10 #include <Pi Smm.h>10 #include <PiMm.h> 11 11 #include <Library/SmmCpuFeaturesLib.h> 12 #include "CpuFeaturesLib.h" 12 13 13 14 /** -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibStm.inf
r80721 r89983 19 19 20 20 [Sources] 21 SmmCpuFeaturesLib.c 21 CpuFeaturesLib.h 22 SmmCpuFeaturesLibCommon.c 22 23 SmmStm.c 23 24 SmmStm.h 25 TraditionalMmCpuFeaturesLib.c 24 26 25 27 [Sources.Ia32] -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmStm.c
r85718 r89983 7 7 **/ 8 8 9 #include <Pi Smm.h>9 #include <PiMm.h> 10 10 #include <Library/BaseLib.h> 11 11 #include <Library/BaseMemoryLib.h> … … 22 22 #include <Protocol/MpService.h> 23 23 24 #include "CpuFeaturesLib.h" 24 25 #include "SmmStm.h" 25 26 … … 29 30 #define RDWR_ACCS 3 30 31 #define FULL_ACCS 7 31 32 /**33 The constructor function34 35 @param[in] ImageHandle The firmware allocated handle for the EFI image.36 @param[in] SystemTable A pointer to the EFI System Table.37 38 @retval EFI_SUCCESS The constructor always returns EFI_SUCCESS.39 40 **/41 EFI_STATUS42 EFIAPI43 SmmCpuFeaturesLibConstructor (44 IN EFI_HANDLE ImageHandle,45 IN EFI_SYSTEM_TABLE *SystemTable46 );47 32 48 33 EFI_HANDLE mStmSmmCpuHandle = NULL; … … 112 97 113 98 /** 114 The constructor function 99 The constructor function for the Traditional MM library instance with STM. 115 100 116 101 @param[in] ImageHandle The firmware allocated handle for the EFI image. … … 138 123 139 124 // 140 // Call the common constructor function 141 // 142 Status = SmmCpuFeaturesLibConstructor (ImageHandle, SystemTable); 143 ASSERT_EFI_ERROR (Status); 125 // Perform library initialization common across all instances 126 // 127 CpuFeaturesLibInitialization (); 144 128 145 129 // -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmmStmSupport.c
r80721 r89983 7 7 **/ 8 8 9 #include <Pi Smm.h>9 #include <PiMm.h> 10 10 #include <Library/DebugLib.h> 11 11 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCommunication/PiSmmCommunicationPei.c
r85718 r89983 2 2 PiSmmCommunication PEI Driver. 3 3 4 Copyright (c) 2010 - 20 15, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2010 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 … … 48 48 | EFI_SMM_COMMUNICATION_CONTEXT | 49 49 | SwSmiNumber | <- SMRAM 50 | BufferPtrAddress |---------------- 51 +----------------------------------+ | 52 | 53 +----------------------------------+ | 54 | EFI_SMM_COMMUNICATION_ACPI_TABLE | | 55 | SwSmiNumber | <- AcpiTable | 56 | BufferPtrAddress |--- | 57 +----------------------------------+ | | 58 | | 59 +----------------------------------+<--------------- 50 | BufferPtrAddress |--- 51 +----------------------------------+ | 52 | 53 +----------------------------------+<-- 60 54 | Communication Buffer Pointer | <- AcpiNvs 61 55 +----------------------------------+--- -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
r85718 r89983 2 2 Code for Processor S3 restoration 3 3 4 Copyright (c) 2006 - 20 19, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 SPDX-License-Identifier: BSD-2-Clause-Patent 6 6 … … 236 236 volatile UINT32 *SemaphorePtr; 237 237 UINT32 FirstThread; 238 UINT32 PackageThreadsCount;239 238 UINT32 CurrentThread; 239 UINT32 CurrentCore; 240 240 UINTN ProcessorIndex; 241 UINT N ValidThreadCount;242 UINT 32 *ValidCoreCountPerPackage;241 UINT32 *ThreadCountPerPackage; 242 UINT8 *ThreadCountPerCore; 243 243 EFI_STATUS Status; 244 244 UINT64 CurrentValue; … … 373 373 ASSERT ( 374 374 (ApLocation != NULL) && 375 (CpuStatus->ValidCoreCountPerPackage != 0) && 375 (CpuStatus->ThreadCountPerPackage != 0) && 376 (CpuStatus->ThreadCountPerCore != 0) && 376 377 (CpuFlags->CoreSemaphoreCount != NULL) && 377 378 (CpuFlags->PackageSemaphoreCount != NULL) … … 380 381 case CoreDepType: 381 382 SemaphorePtr = CpuFlags->CoreSemaphoreCount; 383 ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore; 384 385 CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core; 382 386 // 383 387 // Get Offset info for the first thread in the core which current thread belongs to. 384 388 // 385 FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core)* CpuStatus->MaxThreadCount;389 FirstThread = CurrentCore * CpuStatus->MaxThreadCount; 386 390 CurrentThread = FirstThread + ApLocation->Thread; 387 // 388 // First Notify all threads in current Core that this thread has ready. 391 392 // 393 // Different cores may have different valid threads in them. If driver maintail clearly 394 // thread index in different cores, the logic will be much complicated. 395 // Here driver just simply records the max thread number in all cores and use it as expect 396 // thread number for all cores. 397 // In below two steps logic, first current thread will Release semaphore for each thread 398 // in current core. Maybe some threads are not valid in this core, but driver don't 399 // care. Second, driver will let current thread wait semaphore for all valid threads in 400 // current core. Because only the valid threads will do release semaphore for this 401 // thread, driver here only need to wait the valid thread count. 402 // 403 404 // 405 // First Notify ALL THREADs in current Core that this thread is ready. 389 406 // 390 407 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) { … … 392 409 } 393 410 // 394 // Second, check whether all valid threads in current core have ready.395 // 396 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {411 // Second, check whether all VALID THREADs (not all threads) in current core are ready. 412 // 413 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex ++) { 397 414 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]); 398 415 } … … 401 418 case PackageDepType: 402 419 SemaphorePtr = CpuFlags->PackageSemaphoreCount; 403 ValidCoreCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoreCountPerPackage;420 ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage; 404 421 // 405 422 // Get Offset info for the first thread in the package which current thread belongs to. … … 409 426 // Get the possible threads count for current package. 410 427 // 411 PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;412 428 CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread; 413 // 414 // Get the valid thread count for current package. 415 // 416 ValidThreadCount = CpuStatus->MaxThreadCount * ValidCoreCountPerPackage[ApLocation->Package]; 417 418 // 419 // Different packages may have different valid cores in them. If driver maintail clearly 420 // cores number in different packages, the logic will be much complicated. 421 // Here driver just simply records the max core number in all packages and use it as expect 422 // core number for all packages. 429 430 // 431 // Different packages may have different valid threads in them. If driver maintail clearly 432 // thread index in different packages, the logic will be much complicated. 433 // Here driver just simply records the max thread number in all packages and use it as expect 434 // thread number for all packages. 423 435 // In below two steps logic, first current thread will Release semaphore for each thread 424 436 // in current package. Maybe some threads are not valid in this package, but driver don't … … 429 441 430 442 // 431 // First Notify all threads in current package that this thread has ready.432 // 433 for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount; ProcessorIndex ++) {443 // First Notify ALL THREADS in current package that this thread is ready. 444 // 445 for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex ++) { 434 446 S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]); 435 447 } 436 448 // 437 // Second, check whether all valid threads in current package have ready.438 // 439 for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {449 // Second, check whether VALID THREADS (not all threads) in current package are ready. 450 // 451 for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex ++) { 440 452 S3WaitForSemaphore (&SemaphorePtr[CurrentThread]); 441 453 } … … 475 487 } else { 476 488 RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable; 489 } 490 if (RegisterTables == NULL) { 491 return; 477 492 } 478 493 … … 937 952 938 953 /** 939 Copy register table from ACPI NVS memoryinto SMRAM.954 Copy register table from non-SMRAM into SMRAM. 940 955 941 956 @param[in] DestinationRegisterTableList Points to destination register table. … … 956 971 CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE)); 957 972 for (Index = 0; Index < NumberOfCpus; Index++) { 958 if (DestinationRegisterTableList[Index].AllocatedSize != 0) { 973 if (DestinationRegisterTableList[Index].TableLength != 0) { 974 DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY); 959 975 RegisterTableEntry = AllocateCopyPool ( 960 976 DestinationRegisterTableList[Index].AllocatedSize, … … 968 984 969 985 /** 986 Check whether the register table is empty or not. 987 988 @param[in] RegisterTable Point to the register table. 989 @param[in] NumberOfCpus Number of CPUs. 990 991 @retval TRUE The register table is empty. 992 @retval FALSE The register table is not empty. 993 **/ 994 BOOLEAN 995 IsRegisterTableEmpty ( 996 IN CPU_REGISTER_TABLE *RegisterTable, 997 IN UINT32 NumberOfCpus 998 ) 999 { 1000 UINTN Index; 1001 1002 if (RegisterTable != NULL) { 1003 for (Index = 0; Index < NumberOfCpus; Index++) { 1004 if (RegisterTable[Index].TableLength != 0) { 1005 return FALSE; 1006 } 1007 } 1008 } 1009 1010 return TRUE; 1011 } 1012 1013 /** 970 1014 Get ACPI CPU data. 971 1015 … … 1021 1065 CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR)); 1022 1066 1023 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE)); 1024 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0); 1025 1026 CopyRegisterTable ( 1027 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable, 1028 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable, 1029 mAcpiCpuData.NumberOfCpus 1030 ); 1031 1032 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE)); 1033 ASSERT (mAcpiCpuData.RegisterTable != 0); 1034 1035 CopyRegisterTable ( 1036 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable, 1037 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable, 1038 mAcpiCpuData.NumberOfCpus 1039 ); 1067 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) { 1068 mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE)); 1069 ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0); 1070 1071 CopyRegisterTable ( 1072 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable, 1073 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable, 1074 mAcpiCpuData.NumberOfCpus 1075 ); 1076 } else { 1077 mAcpiCpuData.PreSmmInitRegisterTable = 0; 1078 } 1079 1080 if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable, mAcpiCpuData.NumberOfCpus)) { 1081 mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE)); 1082 ASSERT (mAcpiCpuData.RegisterTable != 0); 1083 1084 CopyRegisterTable ( 1085 (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable, 1086 (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable, 1087 mAcpiCpuData.NumberOfCpus 1088 ); 1089 } else { 1090 mAcpiCpuData.RegisterTable = 0; 1091 } 1040 1092 1041 1093 // … … 1060 1112 CpuStatus = &mAcpiCpuData.CpuStatus; 1061 1113 CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION)); 1062 if (AcpiCpuData->CpuStatus. ValidCoreCountPerPackage != 0) {1063 CpuStatus-> ValidCoreCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (1114 if (AcpiCpuData->CpuStatus.ThreadCountPerPackage != 0) { 1115 CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool ( 1064 1116 sizeof (UINT32) * CpuStatus->PackageCount, 1065 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus. ValidCoreCountPerPackage1117 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ThreadCountPerPackage 1066 1118 ); 1067 ASSERT (CpuStatus->ValidCoreCountPerPackage != 0); 1119 ASSERT (CpuStatus->ThreadCountPerPackage != 0); 1120 } 1121 if (AcpiCpuData->CpuStatus.ThreadCountPerCore != 0) { 1122 CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool ( 1123 sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount), 1124 (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ThreadCountPerCore 1125 ); 1126 ASSERT (CpuStatus->ThreadCountPerCore != 0); 1068 1127 } 1069 1128 if (AcpiCpuData->ApLocation != 0) { -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
r85718 r89983 28 28 VOID 29 29 ); 30 31 /** 32 Get page table base address and the depth of the page table. 33 34 @param[out] Base Page table base address. 35 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging. 36 **/ 37 VOID 38 GetPageTable ( 39 OUT UINTN *Base, 40 OUT BOOLEAN *FiveLevels OPTIONAL 41 ) 42 { 43 *Base = ((mInternalCr3 == 0) ? 44 (AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64) : 45 mInternalCr3); 46 if (FiveLevels != NULL) { 47 *FiveLevels = FALSE; 48 } 49 } 30 50 31 51 /** … … 227 247 UINT64 *L2PageTable; 228 248 UINT64 *L3PageTable; 249 UINTN PageTableBase; 229 250 BOOLEAN IsSplitted; 230 251 BOOLEAN PageTableSplitted; … … 269 290 PageTableSplitted = FALSE; 270 291 271 L3PageTable = (UINT64 *)GetPageTableBase (); 272 273 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); 292 GetPageTable (&PageTableBase, NULL); 293 L3PageTable = (UINT64 *)PageTableBase; 294 295 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); 274 296 PageTableSplitted = (PageTableSplitted || IsSplitted); 275 297 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
r80721 r89983 1 1 ;------------------------------------------------------------------------------ ; 2 2 ; Copyright (c) 2016 - 2019, Intel Corporation. All rights reserved.<BR> 3 ; Copyright (c) 2020, AMD Incorporated. All rights reserved.<BR> 3 4 ; SPDX-License-Identifier: BSD-2-Clause-Patent 4 5 ; … … 60 61 extern ASM_PFX(mXdSupported) 61 62 global ASM_PFX(gPatchXdSupported) 63 global ASM_PFX(gPatchMsrIa32MiscEnableSupported) 62 64 extern ASM_PFX(gSmiHandlerIdtr) 63 65 … … 154 156 cmp al, 0 155 157 jz @SkipXd 158 159 ; If MSR_IA32_MISC_ENABLE is supported, clear XD Disable bit 160 mov al, strict byte 1 ; source operand may be patched 161 ASM_PFX(gPatchMsrIa32MiscEnableSupported): 162 cmp al, 1 163 jz MsrIa32MiscEnableSupported 164 165 ; MSR_IA32_MISC_ENABLE not supported 166 xor edx, edx 167 push edx ; don't try to restore the XD Disable bit just before RSM 168 jmp EnableNxe 169 156 170 ; 157 171 ; Check XD disable bit 158 172 ; 173 MsrIa32MiscEnableSupported: 159 174 mov ecx, MSR_IA32_MISC_ENABLE 160 175 rdmsr 161 176 push edx ; save MSR_IA32_MISC_ENABLE[63-32] 162 177 test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34] 163 jz .5178 jz EnableNxe 164 179 and dx, 0xFFFB ; clear XD Disable bit if it is set 165 180 wrmsr 166 .5:181 EnableNxe: 167 182 mov ecx, MSR_EFER 168 183 rdmsr -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
r85718 r89983 2 2 SMM MP service implementation 3 3 4 Copyright (c) 2009 - 202 0, Intel Corporation. All rights reserved.<BR>4 Copyright (c) 2009 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR> 6 6 … … 23 23 SMM_CPU_SYNC_MODE mCpuSmmSyncMode; 24 24 BOOLEAN mMachineCheckSupported = FALSE; 25 MM_COMPLETION mSmmStartupThisApToken; 26 27 extern UINTN mSmmShadowStackSize; 25 28 26 29 /** … … 41 44 UINT32 Value; 42 45 43 do{46 for (;;) { 44 47 Value = *Sem; 45 } while (Value == 0 || 46 InterlockedCompareExchange32 ( 47 (UINT32*)Sem, 48 Value, 49 Value - 1 50 ) != Value); 48 if (Value != 0 && 49 InterlockedCompareExchange32 ( 50 (UINT32*)Sem, 51 Value, 52 Value - 1 53 ) == Value) { 54 break; 55 } 56 CpuPause (); 57 } 51 58 return Value - 1; 52 59 } … … 917 924 // then find the lower 2MB aligned address. 918 925 // 919 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);926 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1); 920 927 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1; 921 928 } … … 968 975 // 969 976 Pte[Index] = PageAddress | mAddressEncMask; 970 GuardPage += mSmmStackSize;977 GuardPage += (mSmmStackSize + mSmmShadowStackSize); 971 978 if (GuardPage > mSmmStackArrayEnd) { 972 979 GuardPage = 0; … … 1235 1242 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments; 1236 1243 if (Token != NULL) { 1237 ProcToken= GetFreeToken (1); 1238 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken; 1239 *Token = (MM_COMPLETION)ProcToken->SpinLock; 1244 if (Token != &mSmmStartupThisApToken) { 1245 // 1246 // When Token points to mSmmStartupThisApToken, this routine is called 1247 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE). 1248 // 1249 // In this case, caller wants to startup AP procedure in non-blocking 1250 // mode and cannot get the completion status from the Token because there 1251 // is no way to return the Token to caller from SmmStartupThisAp(). 1252 // Caller needs to use its implementation specific way to query the completion status. 1253 // 1254 // There is no need to allocate a token for such case so the 3 overheads 1255 // can be avoided: 1256 // 1. Call AllocateTokenBuffer() when there is no free token. 1257 // 2. Get a free token from the token buffer. 1258 // 3. Call ReleaseToken() in APHandler(). 1259 // 1260 ProcToken = GetFreeToken (1); 1261 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken; 1262 *Token = (MM_COMPLETION)ProcToken->SpinLock; 1263 } 1240 1264 } 1241 1265 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus; … … 1469 1493 ) 1470 1494 { 1471 MM_COMPLETION Token;1472 1473 1495 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure; 1474 1496 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments; … … 1481 1503 CpuIndex, 1482 1504 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex], 1483 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : & Token,1505 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken, 1484 1506 0, 1485 1507 NULL … … 1866 1888 ) 1867 1889 { 1868 UINT32 Cr3; 1869 UINTN Index; 1870 UINT8 *GdtTssTables; 1871 UINTN GdtTableStepSize; 1872 CPUID_VERSION_INFO_EDX RegEdx; 1890 UINT32 Cr3; 1891 UINTN Index; 1892 UINT8 *GdtTssTables; 1893 UINTN GdtTableStepSize; 1894 CPUID_VERSION_INFO_EDX RegEdx; 1895 UINT32 MaxExtendedFunction; 1896 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize; 1873 1897 1874 1898 // … … 1897 1921 // NOTE: Physical memory above virtual address limit is not supported !!! 1898 1922 // 1899 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL); 1900 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1; 1901 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE; 1923 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL); 1924 if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) { 1925 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL); 1926 } else { 1927 VirPhyAddressSize.Bits.PhysicalAddressBits = 36; 1928 } 1929 gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1; 1930 // 1931 // Clear the low 12 bits 1932 // 1933 gPhyMask &= 0xfffffffffffff000ULL; 1902 1934 1903 1935 // -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
r85718 r89983 264 264 extern EFI_SMM_CPU_PROTOCOL mSmmCpu; 265 265 extern EFI_MM_MP_PROTOCOL mSmmMp; 266 extern UINTN mInternalCr3; 266 267 267 268 /// … … 337 338 @retval EFI_SUCCESS The register was read from Save State. 338 339 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor. 339 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.340 @retval EFI_INVALID_PARAMETER Buffer is NULL, or Width does not meet requirement per Register type. 340 341 341 342 **/ … … 943 944 944 945 /** 945 Return page table base. 946 947 @return page table base. 948 **/ 949 UINTN 950 GetPageTableBase ( 951 VOID 946 Get page table base address and the depth of the page table. 947 948 @param[out] Base Page table base address. 949 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging. 950 **/ 951 VOID 952 GetPageTable ( 953 OUT UINTN *Base, 954 OUT BOOLEAN *FiveLevels OPTIONAL 952 955 ); 953 956 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
r85718 r89983 33 33 }; 34 34 35 UINTN mInternal Gr3;35 UINTN mInternalCr3; 36 36 37 37 /** … … 47 47 ) 48 48 { 49 mInternalGr3 = Cr3; 50 } 51 52 /** 53 Return page table base. 54 55 @return page table base. 56 **/ 57 UINTN 58 GetPageTableBase ( 59 VOID 60 ) 61 { 62 if (mInternalGr3 != 0) { 63 return mInternalGr3; 64 } 65 return (AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64); 49 mInternalCr3 = Cr3; 66 50 } 67 51 … … 132 116 UINT64 *L4PageTable; 133 117 UINT64 *L5PageTable; 134 IA32_CR4 Cr4;118 UINTN PageTableBase; 135 119 BOOLEAN Enable5LevelPaging; 120 121 GetPageTable (&PageTableBase, &Enable5LevelPaging); 136 122 137 123 Index5 = ((UINTN)RShiftU64 (Address, 48)) & PAGING_PAE_INDEX_MASK; … … 141 127 Index1 = ((UINTN)Address >> 12) & PAGING_PAE_INDEX_MASK; 142 128 143 Cr4.UintN = AsmReadCr4 ();144 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);145 146 129 if (sizeof(UINTN) == sizeof(UINT64)) { 147 130 if (Enable5LevelPaging) { 148 L5PageTable = (UINT64 *) GetPageTableBase ();131 L5PageTable = (UINT64 *)PageTableBase; 149 132 if (L5PageTable[Index5] == 0) { 150 133 *PageAttribute = PageNone; … … 154 137 L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64); 155 138 } else { 156 L4PageTable = (UINT64 *) GetPageTableBase ();139 L4PageTable = (UINT64 *)PageTableBase; 157 140 } 158 141 if (L4PageTable[Index4] == 0) { … … 163 146 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64); 164 147 } else { 165 L3PageTable = (UINT64 *) GetPageTableBase ();148 L3PageTable = (UINT64 *)PageTableBase; 166 149 } 167 150 if (L3PageTable[Index3] == 0) { … … 253 236 if (IsSet) { 254 237 NewPageEntry &= ~(UINT64)IA32_PG_RW; 255 if (mInternal Gr3 != 0) {238 if (mInternalCr3 != 0) { 256 239 // Environment setup 257 240 // ReadOnly page need set Dirty bit for shadow stack … … 436 419 437 420 ASSERT (Attributes != 0); 438 ASSERT ((Attributes & ~ (EFI_MEMORY_RP | EFI_MEMORY_RO | EFI_MEMORY_XP)) == 0);421 ASSERT ((Attributes & ~EFI_MEMORY_ATTRIBUTE_MASK) == 0); 439 422 440 423 ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0); -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
r85718 r89983 3 3 4 4 Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR> 5 Copyright (c) 2017 , AMD Incorporated. All rights reserved.<BR>5 Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR> 6 6 7 7 SPDX-License-Identifier: BSD-2-Clause-Patent … … 1016 1016 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1); 1017 1017 } 1018 1019 if (StandardSignatureIsAuthenticAMD ()) { 1020 // 1021 // AMD processors do not support MSR_IA32_MISC_ENABLE 1022 // 1023 PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1); 1024 } 1018 1025 } 1019 1026 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h
r80721 r89983 3 3 4 4 Copyright (c) 2012 - 2018, Intel Corporation. All rights reserved.<BR> 5 Copyright (c) 2020, AMD Incorporated. All rights reserved.<BR> 5 6 SPDX-License-Identifier: BSD-2-Clause-Patent 6 7 … … 14 15 #include <Library/DxeServicesTableLib.h> 15 16 #include <Library/CpuLib.h> 17 #include <Library/UefiCpuLib.h> 16 18 #include <IndustryStandard/Acpi.h> 17 19 … … 100 102 extern BOOLEAN mXdSupported; 101 103 X86_ASSEMBLY_PATCH_LABEL gPatchXdSupported; 104 X86_ASSEMBLY_PATCH_LABEL gPatchMsrIa32MiscEnableSupported; 102 105 extern UINTN *mPFEntryCount; 103 106 extern UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT]; -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
r85718 r89983 344 344 @retval EFI_SUCCESS The register was read from Save State. 345 345 @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor. 346 @retval EFI_INVALID_PARAMETER This or Buffer is NULL.346 @retval EFI_INVALID_PARAMETER Buffer is NULL, or Width does not meet requirement per Register type. 347 347 348 348 **/ … … 417 417 if (mSmmCpuIoWidth[IoMisc.Bits.Length].Width == 0 || mSmmCpuIoType[IoMisc.Bits.Type] == 0) { 418 418 return EFI_NOT_FOUND; 419 } 420 421 // 422 // Make sure the incoming buffer is large enough to hold IoInfo before accessing 423 // 424 if (Width < sizeof (EFI_SMM_SAVE_STATE_IO_INFO)) { 425 return EFI_INVALID_PARAMETER; 419 426 } 420 427 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
r85718 r89983 13 13 #define PAGE_TABLE_PAGES 8 14 14 #define ACC_MAX_BIT BIT3 15 16 extern UINTN mSmmShadowStackSize; 15 17 16 18 LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool); … … 102 104 } else { 103 105 return FALSE; 106 } 107 } 108 109 /** 110 Get page table base address and the depth of the page table. 111 112 @param[out] Base Page table base address. 113 @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging. 114 **/ 115 VOID 116 GetPageTable ( 117 OUT UINTN *Base, 118 OUT BOOLEAN *FiveLevels OPTIONAL 119 ) 120 { 121 IA32_CR4 Cr4; 122 123 if (mInternalCr3 == 0) { 124 *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64; 125 if (FiveLevels != NULL) { 126 Cr4.UintN = AsmReadCr4 (); 127 *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1); 128 } 129 return; 130 } 131 132 *Base = mInternalCr3; 133 if (FiveLevels != NULL) { 134 *FiveLevels = m5LevelPagingNeeded; 104 135 } 105 136 } … … 181 212 Set static page table. 182 213 183 @param[in] PageTable Address of page table. 214 @param[in] PageTable Address of page table. 215 @param[in] PhysicalAddressBits The maximum physical address bits supported. 184 216 **/ 185 217 VOID 186 218 SetStaticPageTable ( 187 IN UINTN PageTable 219 IN UINTN PageTable, 220 IN UINT8 PhysicalAddressBits 188 221 ) 189 222 { … … 207 240 // when 5-Level Paging is disabled. 208 241 // 209 ASSERT ( mPhysicalAddressBits <= 52);210 if (!m5LevelPagingNeeded && mPhysicalAddressBits > 48) {211 mPhysicalAddressBits = 48;242 ASSERT (PhysicalAddressBits <= 52); 243 if (!m5LevelPagingNeeded && PhysicalAddressBits > 48) { 244 PhysicalAddressBits = 48; 212 245 } 213 246 214 247 NumberOfPml5EntriesNeeded = 1; 215 if ( mPhysicalAddressBits > 48) {216 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 48);217 mPhysicalAddressBits = 48;248 if (PhysicalAddressBits > 48) { 249 NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 48); 250 PhysicalAddressBits = 48; 218 251 } 219 252 220 253 NumberOfPml4EntriesNeeded = 1; 221 if ( mPhysicalAddressBits > 39) {222 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 39);223 mPhysicalAddressBits = 39;254 if (PhysicalAddressBits > 39) { 255 NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 39); 256 PhysicalAddressBits = 39; 224 257 } 225 258 226 259 NumberOfPdpEntriesNeeded = 1; 227 ASSERT ( mPhysicalAddressBits > 30);228 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, mPhysicalAddressBits - 30);260 ASSERT (PhysicalAddressBits > 30); 261 NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 30); 229 262 230 263 // … … 408 441 // that covers all memory space. 409 442 // 410 SetStaticPageTable ((UINTN)PTEntry );443 SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits); 411 444 } else { 412 445 // … … 986 1019 UINTN PFAddress; 987 1020 UINTN GuardPageAddress; 1021 UINTN ShadowStackGuardPageAddress; 988 1022 UINTN CpuIndex; 989 1023 … … 1002 1036 1003 1037 // 1004 // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,1038 // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page, 1005 1039 // or SMM page protection violation. 1006 1040 // … … 1009 1043 DumpCpuContext (InterruptType, SystemContext); 1010 1044 CpuIndex = GetCpuIndex (); 1011 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize); 1045 GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize)); 1046 ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize)); 1012 1047 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) && 1013 1048 (PFAddress >= GuardPageAddress) && 1014 1049 (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) { 1015 1050 DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n")); 1051 } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) && 1052 (mSmmShadowStackSize > 0) && 1053 (PFAddress >= ShadowStackGuardPageAddress) && 1054 (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE))) { 1055 DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n")); 1016 1056 } else { 1017 1057 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) { … … 1112 1152 UINT64 *L4PageTable; 1113 1153 UINT64 *L5PageTable; 1154 UINTN PageTableBase; 1114 1155 BOOLEAN IsSplitted; 1115 1156 BOOLEAN PageTableSplitted; 1116 1157 BOOLEAN CetEnabled; 1117 IA32_CR4 Cr4;1118 1158 BOOLEAN Enable5LevelPaging; 1119 1120 Cr4.UintN = AsmReadCr4 ();1121 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);1122 1159 1123 1160 // … … 1164 1201 PageTableSplitted = FALSE; 1165 1202 L5PageTable = NULL; 1203 1204 GetPageTable (&PageTableBase, &Enable5LevelPaging); 1205 1166 1206 if (Enable5LevelPaging) { 1167 L5PageTable = (UINT64 *) GetPageTableBase ();1168 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS) (UINTN)L5PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);1207 L5PageTable = (UINT64 *)PageTableBase; 1208 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); 1169 1209 PageTableSplitted = (PageTableSplitted || IsSplitted); 1170 1210 } … … 1177 1217 } 1178 1218 } else { 1179 L4PageTable = (UINT64 *) GetPageTableBase ();1219 L4PageTable = (UINT64 *)PageTableBase; 1180 1220 } 1181 1221 SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted); -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
r85718 r89983 1 1 ;------------------------------------------------------------------------------ ; 2 2 ; Copyright (c) 2016 - 2019, Intel Corporation. All rights reserved.<BR> 3 ; Copyright (c) 2020, AMD Incorporated. All rights reserved.<BR> 3 4 ; SPDX-License-Identifier: BSD-2-Clause-Patent 4 5 ; … … 68 69 extern ASM_PFX(mXdSupported) 69 70 global ASM_PFX(gPatchXdSupported) 71 global ASM_PFX(gPatchMsrIa32MiscEnableSupported) 70 72 global ASM_PFX(gPatchSmiStack) 71 73 global ASM_PFX(gPatchSmiCr3) … … 153 155 cmp al, 0 154 156 jz @SkipXd 157 158 ; If MSR_IA32_MISC_ENABLE is supported, clear XD Disable bit 159 mov al, strict byte 1 ; source operand may be patched 160 ASM_PFX(gPatchMsrIa32MiscEnableSupported): 161 cmp al, 1 162 jz MsrIa32MiscEnableSupported 163 164 ; MSR_IA32_MISC_ENABLE not supported 165 sub esp, 4 166 xor rdx, rdx 167 push rdx ; don't try to restore the XD Disable bit just before RSM 168 jmp EnableNxe 169 155 170 ; 156 171 ; Check XD disable bit 157 172 ; 173 MsrIa32MiscEnableSupported: 158 174 mov ecx, MSR_IA32_MISC_ENABLE 159 175 rdmsr … … 161 177 push rdx ; save MSR_IA32_MISC_ENABLE[63-32] 162 178 test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34] 163 jz .0179 jz EnableNxe 164 180 and dx, 0xFFFB ; clear XD Disable bit if it is set 165 181 wrmsr 166 .0:182 EnableNxe: 167 183 mov ecx, MSR_EFER 168 184 rdmsr -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
r80721 r89983 94 94 // Setup top of known good stack as IST1 for each processor. 95 95 // 96 *(UINTN *)(TssBase + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);96 *(UINTN *)(TssBase + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * (mSmmStackSize + mSmmShadowStackSize)); 97 97 } 98 98 } … … 122 122 for (Index = 0; Index < GdtEntryCount; Index++) { 123 123 if (GdtEntry->Bits.L == 0) { 124 if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits. L == 0) {124 if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits.DB == 1) { 125 125 break; 126 126 } … … 174 174 UINTN SmmShadowStackSize; 175 175 UINT64 *InterruptSspTable; 176 UINT32 InterruptSsp; 176 177 177 178 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) { … … 192 193 DEBUG ((DEBUG_INFO, "mSmmInterruptSspTables - 0x%x\n", mSmmInterruptSspTables)); 193 194 } 194 mCetInterruptSsp = (UINT32)((UINTN)ShadowStack + EFI_PAGES_TO_SIZE(1) - sizeof(UINT64)); 195 196 // 197 // The highest address on the stack (0xFF8) is a save-previous-ssp token pointing to a location that is 40 bytes away - 0xFD0. 198 // The supervisor shadow stack token is just above it at address 0xFF0. This is where the interrupt SSP table points. 199 // So when an interrupt of exception occurs, we can use SAVESSP/RESTORESSP/CLEARSSBUSY for the supervisor shadow stack, 200 // due to the reason the RETF in SMM exception handler cannot clear the BUSY flag with same CPL. 201 // (only IRET or RETF with different CPL can clear BUSY flag) 202 // Please refer to UefiCpuPkg/Library/CpuExceptionHandlerLib/X64 for the full stack frame at runtime. 203 // 204 InterruptSsp = (UINT32)((UINTN)ShadowStack + EFI_PAGES_TO_SIZE(1) - sizeof(UINT64)); 205 *(UINT32 *)(UINTN)InterruptSsp = (InterruptSsp - sizeof(UINT64) * 4) | 0x2; 206 mCetInterruptSsp = InterruptSsp - sizeof(UINT64); 207 195 208 mCetInterruptSspTable = (UINT32)(UINTN)(mSmmInterruptSspTables + sizeof(UINT64) * 8 * CpuIndex); 196 209 InterruptSspTable = (UINT64 *)(UINTN)mCetInterruptSspTable; -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/ResetVector/Vtf0/Ia16/Real16ToFlat32.asm
r80721 r89983 130 130 %endif 131 131 132 ; linear code segment descriptor 133 LINEAR_CODE16_SEL equ $-GDT_BASE 134 DW 0xffff ; limit 15:0 135 DW 0 ; base 15:0 136 DB 0 ; base 23:16 137 DB PRESENT_FLAG(1)|DPL(0)|SYSTEM_FLAG(1)|DESC_TYPE(CODE32_TYPE) 138 DB GRANULARITY_FLAG(1)|DEFAULT_SIZE32(0)|CODE64_FLAG(0)|UPPER_LIMIT(0xf) 139 DB 0 ; base 31:24 140 132 141 GDT_END: 133 142 -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/SecCore/Ia32/ResetVec.nasmb
r80721 r89983 11 11 ; 12 12 ; Reset Vector Data structure 13 ; This structure is located at 0xFFFFF FC013 ; This structure is located at 0xFFFFF000 14 14 ; 15 15 ;------------------------------------------------------------------------------ … … 24 24 25 25 ORG 0h 26 27 ; 28 ; 0xFFFFF000 29 ; 30 ; We enter here with CS:IP = 0xFF00:0x0000. Do a far-jump to change CS to 0xF000 31 ; and IP to ApStartup. 32 ; 33 ApVector: 34 mov di, "AP" 35 jmp 0xF000:0xF000+ApStartup 36 37 TIMES 0xFC0-($-$$) nop 38 39 ; 40 ; This should be at 0xFFFFFFC0 41 ; 42 26 43 ; 27 44 ; Reserved … … 29 46 ReservedData: DD 0eeeeeeeeh, 0eeeeeeeeh 30 47 31 TIMES 0x 10-($-$$) DB 048 TIMES 0xFD0-($-$$) nop 32 49 ; 33 ; This is located at 0xFFFFFFD0 h50 ; This is located at 0xFFFFFFD0 34 51 ; 35 52 mov di, "PA" 36 53 jmp ApStartup 37 54 38 TIMES 0x 20-($-$$) DB 055 TIMES 0xFE0-($-$$) nop 39 56 ; 40 57 ; Pointer to the entry point of the PEI core … … 54 71 iret 55 72 56 TIMES 0x 30-($-$$) DB 073 TIMES 0xFF0-($-$$) nop 57 74 ; 58 75 ; For IA32, the reset vector must be at 0xFFFFFFF0, i.e., 4G-16 byte … … 75 92 76 93 77 TIMES 0x 38-($-$$) DB 094 TIMES 0xFF8-($-$$) nop 78 95 ; 79 96 ; Ap reset vector segment address is at 0xFFFFFFF8 … … 84 101 ApSegAddress: dd 12345678h 85 102 86 TIMES 0x 3c-($-$$) DB 0103 TIMES 0xFFC-($-$$) nop 87 104 ; 88 105 ; BFV Base is at 0xFFFFFFFC -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/SecCore/SecCore.inf
r80721 r89983 69 69 gPeiSecPerformancePpiGuid 70 70 gEfiPeiCoreFvLocationPpiGuid 71 ## CONSUMES 72 gRepublishSecPpiPpiGuid 71 73 72 74 [Guids] … … 76 78 [Pcd] 77 79 gUefiCpuPkgTokenSpaceGuid.PcdPeiTemporaryRamStackSize ## CONSUMES 80 gEfiMdeModulePkgTokenSpaceGuid.PcdMigrateTemporaryRamFirmwareVolumes ## CONSUMES 78 81 79 82 [UserExtensions.TianoCore."ExtraFiles"] -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/SecCore/SecMain.c
r85718 r89983 35 35 } 36 36 }; 37 38 /** 39 Migrates the Global Descriptor Table (GDT) to permanent memory. 40 41 @retval EFI_SUCCESS The GDT was migrated successfully. 42 @retval EFI_OUT_OF_RESOURCES The GDT could not be migrated due to lack of available memory. 43 44 **/ 45 EFI_STATUS 46 MigrateGdt ( 47 VOID 48 ) 49 { 50 EFI_STATUS Status; 51 UINTN GdtBufferSize; 52 IA32_DESCRIPTOR Gdtr; 53 VOID *GdtBuffer; 54 55 AsmReadGdtr ((IA32_DESCRIPTOR *) &Gdtr); 56 GdtBufferSize = sizeof (IA32_SEGMENT_DESCRIPTOR) -1 + Gdtr.Limit + 1; 57 58 Status = PeiServicesAllocatePool ( 59 GdtBufferSize, 60 &GdtBuffer 61 ); 62 ASSERT (GdtBuffer != NULL); 63 if (EFI_ERROR (Status)) { 64 return EFI_OUT_OF_RESOURCES; 65 } 66 67 GdtBuffer = ALIGN_POINTER (GdtBuffer, sizeof (IA32_SEGMENT_DESCRIPTOR)); 68 CopyMem (GdtBuffer, (VOID *) Gdtr.Base, Gdtr.Limit + 1); 69 Gdtr.Base = (UINTN) GdtBuffer; 70 AsmWriteGdtr (&Gdtr); 71 72 return EFI_SUCCESS; 73 } 37 74 38 75 // … … 371 408 ) 372 409 { 373 BOOLEAN State; 410 EFI_STATUS Status; 411 EFI_STATUS Status2; 412 UINTN Index; 413 BOOLEAN State; 414 EFI_PEI_PPI_DESCRIPTOR *PeiPpiDescriptor; 415 REPUBLISH_SEC_PPI_PPI *RepublishSecPpiPpi; 374 416 375 417 // … … 379 421 380 422 // 423 // Re-install SEC PPIs using a PEIM produced service if published 424 // 425 for (Index = 0, Status = EFI_SUCCESS; Status == EFI_SUCCESS; Index++) { 426 Status = PeiServicesLocatePpi ( 427 &gRepublishSecPpiPpiGuid, 428 Index, 429 &PeiPpiDescriptor, 430 (VOID **) &RepublishSecPpiPpi 431 ); 432 if (!EFI_ERROR (Status)) { 433 DEBUG ((DEBUG_INFO, "Calling RepublishSecPpi instance %d.\n", Index)); 434 Status2 = RepublishSecPpiPpi->RepublishSecPpis (); 435 ASSERT_EFI_ERROR (Status2); 436 } 437 } 438 439 // 381 440 // Migrate DebugAgentContext. 382 441 // … … 386 445 // Disable interrupts and save current interrupt state 387 446 // 388 State = SaveAndDisableInterrupts(); 447 State = SaveAndDisableInterrupts (); 448 449 // 450 // Migrate GDT before NEM near down 451 // 452 if (PcdGetBool (PcdMigrateTemporaryRamFirmwareVolumes)) { 453 Status = MigrateGdt (); 454 ASSERT_EFI_ERROR (Status); 455 } 389 456 390 457 // -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/SecCore/SecMain.h
r80721 r89983 16 16 #include <Ppi/SecPerformance.h> 17 17 #include <Ppi/PeiCoreFvLocation.h> 18 #include <Ppi/RepublishSecPpi.h> 18 19 19 20 #include <Guid/FirmwarePerformance.h> -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/UefiCpuPkg.ci.yaml
r85718 r89983 3 3 # 4 4 # Copyright (c) Microsoft Corporation 5 # Copyright (c) 2020, Intel Corporation. All rights reserved.<BR> 5 6 # SPDX-License-Identifier: BSD-2-Clause-Patent 6 7 ## 7 8 { 9 "LicenseCheck": { 10 "IgnoreFiles": [] 11 }, 12 "EccCheck": { 13 ## Exception sample looks like below: 14 ## "ExceptionList": [ 15 ## "<ErrorID>", "<KeyWord>" 16 ## ] 17 "ExceptionList": [ 18 ], 19 ## Both file path and directory path are accepted. 20 "IgnoreFiles": [ 21 ] 22 }, 8 23 "CompilerPlugin": { 9 24 "DscPath": "UefiCpuPkg.dsc" 25 }, 26 ## options defined ci/Plugin/HostUnitTestCompilerPlugin 27 "HostUnitTestCompilerPlugin": { 28 "DscPath": "Test/UefiCpuPkgHostTest.dsc" 10 29 }, 11 30 "CharEncodingCheck": { … … 19 38 ], 20 39 # For host based unit tests 21 "AcceptableDependencies-HOST_APPLICATION":[], 40 "AcceptableDependencies-HOST_APPLICATION":[ 41 "UnitTestFrameworkPkg/UnitTestFrameworkPkg.dec" 42 ], 22 43 # For UEFI shell based apps 23 44 "AcceptableDependencies-UEFI_APPLICATION":[], … … 30 51 "UefiCpuPkg/ResetVector/Vtf0/Vtf0.inf" 31 52 ] 53 }, 54 "HostUnitTestDscCompleteCheck": { 55 "IgnoreInf": [""], 56 "DscPath": "Test/UefiCpuPkgHostTest.dsc" 32 57 }, 33 58 "GuidCheck": { -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/UefiCpuPkg.dec
r85718 r89983 2 2 # This Package provides UEFI compatible CPU modules and libraries. 3 3 # 4 # Copyright (c) 2007 - 202 0, Intel Corporation. All rights reserved.<BR>4 # Copyright (c) 2007 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 # 6 6 # SPDX-License-Identifier: BSD-2-Clause-Patent … … 54 54 MpInitLib|Include/Library/MpInitLib.h 55 55 56 ## @libraryclass Provides function to support VMGEXIT processing. 57 VmgExitLib|Include/Library/VmgExitLib.h 58 59 ## @libraryclass Provides function to get CPU cache information. 60 CpuCacheInfoLib|Include/Library/CpuCacheInfoLib.h 61 62 ## @libraryclass Provides function for loading microcode. 63 MicrocodeLib|Include/Library/MicrocodeLib.h 64 56 65 [Guids] 57 66 gUefiCpuPkgTokenSpaceGuid = { 0xac05bf33, 0x995a, 0x4ed4, { 0xaa, 0xb8, 0xef, 0x7a, 0xe8, 0xf, 0x5c, 0xb0 }} … … 84 93 ## Include/Ppi/ShadowMicrocode.h 85 94 gEdkiiPeiShadowMicrocodePpiGuid = { 0x430f6965, 0x9a69, 0x41c5, { 0x93, 0xed, 0x8b, 0xf0, 0x64, 0x35, 0xc1, 0xc6 }} 95 96 ## Include/Ppi/RepublishSecPpi.h 97 gRepublishSecPpiPpiGuid = { 0x27a71b1e, 0x73ee, 0x43d6, { 0xac, 0xe3, 0x52, 0x1a, 0x2d, 0xc5, 0xd0, 0x92 }} 86 98 87 99 [PcdsFeatureFlag] … … 158 170 # @Prompt Specify the count of pre allocated SMM MP tokens per chunk. 159 171 gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmMpTokenCountPerChunk|64|UINT32|0x30002002 172 173 ## Area of memory where the SEV-ES work area block lives. 174 # @Prompt Configure the SEV-ES work area base 175 gUefiCpuPkgTokenSpaceGuid.PcdSevEsWorkAreaBase|0x0|UINT32|0x30002005 176 177 ## Size of teh area of memory where the SEV-ES work area block lives. 178 # @Prompt Configure the SEV-ES work area base 179 gUefiCpuPkgTokenSpaceGuid.PcdSevEsWorkAreaSize|0x0|UINT32|0x30002006 160 180 161 181 [PcdsFixedAtBuild, PcdsPatchableInModule] … … 371 391 gUefiCpuPkgTokenSpaceGuid.PcdCpuProcTraceOutputScheme|0x0|UINT8|0x60000015 372 392 393 ## This dynamic PCD indicates whether SEV-ES is enabled 394 # TRUE - SEV-ES is enabled 395 # FALSE - SEV-ES is not enabled 396 # @Prompt SEV-ES Status 397 gUefiCpuPkgTokenSpaceGuid.PcdSevEsIsEnabled|FALSE|BOOLEAN|0x60000016 398 373 399 [UserExtensions.TianoCore."ExtraFiles"] 374 400 UefiCpuPkgExtra.uni -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/UefiCpuPkg.dsc
r85718 r89983 2 2 # UefiCpuPkg Package 3 3 # 4 # Copyright (c) 2007 - 20 19, Intel Corporation. All rights reserved.<BR>4 # Copyright (c) 2007 - 2021, Intel Corporation. All rights reserved.<BR> 5 5 # 6 6 # SPDX-License-Identifier: BSD-2-Clause-Patent … … 22 22 # 23 23 24 !include MdePkg/MdeLibs.dsc.inc 25 24 26 [LibraryClasses] 25 27 BaseLib|MdePkg/Library/BaseLib/BaseLib.inf … … 39 41 UefiBootServicesTableLib|MdePkg/Library/UefiBootServicesTableLib/UefiBootServicesTableLib.inf 40 42 UefiDriverEntryPoint|MdePkg/Library/UefiDriverEntryPoint/UefiDriverEntryPoint.inf 43 StandaloneMmDriverEntryPoint|MdePkg/Library/StandaloneMmDriverEntryPoint/StandaloneMmDriverEntryPoint.inf 41 44 DxeServicesTableLib|MdePkg/Library/DxeServicesTableLib/DxeServicesTableLib.inf 42 45 PeimEntryPoint|MdePkg/Library/PeimEntryPoint/PeimEntryPoint.inf … … 57 60 PeCoffExtraActionLib|MdePkg/Library/BasePeCoffExtraActionLibNull/BasePeCoffExtraActionLibNull.inf 58 61 TpmMeasurementLib|MdeModulePkg/Library/TpmMeasurementLibNull/TpmMeasurementLibNull.inf 62 VmgExitLib|UefiCpuPkg/Library/VmgExitLibNull/VmgExitLibNull.inf 63 MicrocodeLib|UefiCpuPkg/Library/MicrocodeLib/MicrocodeLib.inf 59 64 60 65 [LibraryClasses.common.SEC] … … 75 80 MpInitLib|UefiCpuPkg/Library/MpInitLib/PeiMpInitLib.inf 76 81 RegisterCpuFeaturesLib|UefiCpuPkg/Library/RegisterCpuFeaturesLib/PeiRegisterCpuFeaturesLib.inf 82 CpuCacheInfoLib|UefiCpuPkg/Library/CpuCacheInfoLib/PeiCpuCacheInfoLib.inf 77 83 78 84 [LibraryClasses.IA32.PEIM, LibraryClasses.X64.PEIM] … … 86 92 MpInitLib|UefiCpuPkg/Library/MpInitLib/DxeMpInitLib.inf 87 93 RegisterCpuFeaturesLib|UefiCpuPkg/Library/RegisterCpuFeaturesLib/DxeRegisterCpuFeaturesLib.inf 94 CpuCacheInfoLib|UefiCpuPkg/Library/CpuCacheInfoLib/DxeCpuCacheInfoLib.inf 88 95 89 96 [LibraryClasses.common.DXE_SMM_DRIVER] 90 97 SmmServicesTableLib|MdePkg/Library/SmmServicesTableLib/SmmServicesTableLib.inf 98 MmServicesTableLib|MdePkg/Library/MmServicesTableLib/MmServicesTableLib.inf 91 99 MemoryAllocationLib|MdePkg/Library/SmmMemoryAllocationLib/SmmMemoryAllocationLib.inf 92 100 HobLib|MdePkg/Library/DxeHobLib/DxeHobLib.inf 93 101 CpuExceptionHandlerLib|UefiCpuPkg/Library/CpuExceptionHandlerLib/SmmCpuExceptionHandlerLib.inf 102 103 [LibraryClasses.common.MM_STANDALONE] 104 MmServicesTableLib|MdePkg/Library/StandaloneMmServicesTableLib/StandaloneMmServicesTableLib.inf 94 105 95 106 [LibraryClasses.common.UEFI_APPLICATION] … … 107 118 UefiCpuPkg/Application/Cpuid/Cpuid.inf 108 119 UefiCpuPkg/Library/CpuTimerLib/BaseCpuTimerLib.inf 109 UefiCpuPkg/Library/Cpu TimerLib/DxeCpuTimerLib.inf110 UefiCpuPkg/Library/Cpu TimerLib/PeiCpuTimerLib.inf120 UefiCpuPkg/Library/CpuCacheInfoLib/PeiCpuCacheInfoLib.inf 121 UefiCpuPkg/Library/CpuCacheInfoLib/DxeCpuCacheInfoLib.inf 111 122 112 123 [Components.IA32, Components.X64] … … 121 132 } 122 133 UefiCpuPkg/CpuIo2Smm/CpuIo2Smm.inf 134 UefiCpuPkg/CpuIo2Smm/CpuIo2StandaloneMm.inf 123 135 UefiCpuPkg/CpuMpPei/CpuMpPei.inf 124 136 UefiCpuPkg/CpuS3DataDxe/CpuS3DataDxe.inf … … 137 149 UefiCpuPkg/Library/MpInitLib/DxeMpInitLib.inf 138 150 UefiCpuPkg/Library/MpInitLibUp/MpInitLibUp.inf 151 UefiCpuPkg/Library/MicrocodeLib/MicrocodeLib.inf 139 152 UefiCpuPkg/Library/MtrrLib/MtrrLib.inf 140 153 UefiCpuPkg/Library/PlatformSecLibNull/PlatformSecLibNull.inf … … 144 157 UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLib.inf 145 158 UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibStm.inf 159 UefiCpuPkg/Library/SmmCpuFeaturesLib/StandaloneMmCpuFeaturesLib.inf 160 UefiCpuPkg/Library/VmgExitLibNull/VmgExitLibNull.inf 146 161 UefiCpuPkg/PiSmmCommunication/PiSmmCommunicationPei.inf 147 162 UefiCpuPkg/PiSmmCommunication/PiSmmCommunicationSmm.inf 148 163 UefiCpuPkg/SecCore/SecCore.inf 164 UefiCpuPkg/SecMigrationPei/SecMigrationPei.inf 149 165 UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf 150 166 UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf { -
trunk/src/VBox/Devices/EFI/FirmwareNew/UefiCpuPkg/UefiCpuPkg.uni
r85718 r89983 279 279 #string STR_gUefiCpuPkgTokenSpaceGuid_PcdCpuApStatusCheckIntervalInMicroSeconds_PROMPT #language en-US "Periodic interval value in microseconds for AP status check in DXE.\n" 280 280 #string STR_gUefiCpuPkgTokenSpaceGuid_PcdCpuApStatusCheckIntervalInMicroSeconds_HELP #language en-US "Periodic interval value in microseconds for the status check of APs for StartupAllAPs() and StartupThisAP() executed in non-blocking mode in DXE phase.\n" 281 282 #string STR_gUefiCpuPkgTokenSpaceGuid_PcdSevEsIsEnabled_PROMPT #language en-US "Specifies whether SEV-ES is enabled" 283 #string STR_gUefiCpuPkgTokenSpaceGuid_PcdSevEsIsEnabled_HELP #language en-US "Set to TRUE when running as an SEV-ES guest, FALSE otherwise." 284 285 #string STR_gUefiCpuPkgTokenSpaceGuid_PcdSevEsWorkAreaBase_PROMPT #language en-US "Specify the address of the SEV-ES work area" 286 287 #string STR_gUefiCpuPkgTokenSpaceGuid_PcdSevEsWorkAreaBase_HELP #language en-US "Specifies the address of the work area used by an SEV-ES guest." 288 289 #string STR_gUefiCpuPkgTokenSpaceGuid_PcdSevEsWorkAreaSize_PROMPT #language en-US "Specify the size of the SEV-ES work area" 290 291 #string STR_gUefiCpuPkgTokenSpaceGuid_PcdSevEsWorkAreaSize_HELP #language en-US "Specifies the size of the work area used by an SEV-ES guest."
Note:
See TracChangeset
for help on using the changeset viewer.