1 | /** @file
|
---|
2 | SMM CPU misc functions for x64 arch specific.
|
---|
3 |
|
---|
4 | Copyright (c) 2015 - 2019, Intel Corporation. All rights reserved.<BR>
|
---|
5 | SPDX-License-Identifier: BSD-2-Clause-Patent
|
---|
6 |
|
---|
7 | **/
|
---|
8 |
|
---|
9 | #include "PiSmmCpuDxeSmm.h"
|
---|
10 |
|
---|
11 | EFI_PHYSICAL_ADDRESS mGdtBuffer;
|
---|
12 | UINTN mGdtBufferSize;
|
---|
13 |
|
---|
14 | extern BOOLEAN mCetSupported;
|
---|
15 | extern UINTN mSmmShadowStackSize;
|
---|
16 |
|
---|
17 | X86_ASSEMBLY_PATCH_LABEL mPatchCetPl0Ssp;
|
---|
18 | X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSsp;
|
---|
19 | X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSspTable;
|
---|
20 | UINT32 mCetPl0Ssp;
|
---|
21 | UINT32 mCetInterruptSsp;
|
---|
22 | UINT32 mCetInterruptSspTable;
|
---|
23 |
|
---|
24 | UINTN mSmmInterruptSspTables;
|
---|
25 |
|
---|
26 | /**
|
---|
27 | Initialize IDT for SMM Stack Guard.
|
---|
28 |
|
---|
29 | **/
|
---|
30 | VOID
|
---|
31 | EFIAPI
|
---|
32 | InitializeIDTSmmStackGuard (
|
---|
33 | VOID
|
---|
34 | )
|
---|
35 | {
|
---|
36 | IA32_IDT_GATE_DESCRIPTOR *IdtGate;
|
---|
37 |
|
---|
38 | //
|
---|
39 | // If SMM Stack Guard feature is enabled, set the IST field of
|
---|
40 | // the interrupt gate for Page Fault Exception to be 1
|
---|
41 | //
|
---|
42 | IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
|
---|
43 | IdtGate += EXCEPT_IA32_PAGE_FAULT;
|
---|
44 | IdtGate->Bits.Reserved_0 = 1;
|
---|
45 | }
|
---|
46 |
|
---|
47 | /**
|
---|
48 | Initialize Gdt for all processors.
|
---|
49 |
|
---|
50 | @param[in] Cr3 CR3 value.
|
---|
51 | @param[out] GdtStepSize The step size for GDT table.
|
---|
52 |
|
---|
53 | @return GdtBase for processor 0.
|
---|
54 | GdtBase for processor X is: GdtBase + (GdtStepSize * X)
|
---|
55 | **/
|
---|
56 | VOID *
|
---|
57 | InitGdt (
|
---|
58 | IN UINTN Cr3,
|
---|
59 | OUT UINTN *GdtStepSize
|
---|
60 | )
|
---|
61 | {
|
---|
62 | UINTN Index;
|
---|
63 | IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
|
---|
64 | UINTN TssBase;
|
---|
65 | UINTN GdtTssTableSize;
|
---|
66 | UINT8 *GdtTssTables;
|
---|
67 | UINTN GdtTableStepSize;
|
---|
68 |
|
---|
69 | //
|
---|
70 | // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
|
---|
71 | // on each SMI entry.
|
---|
72 | //
|
---|
73 | GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
|
---|
74 | mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
|
---|
75 | GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
|
---|
76 | ASSERT (GdtTssTables != NULL);
|
---|
77 | mGdtBuffer = (UINTN)GdtTssTables;
|
---|
78 | GdtTableStepSize = GdtTssTableSize;
|
---|
79 |
|
---|
80 | for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
|
---|
81 | CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
|
---|
82 |
|
---|
83 | //
|
---|
84 | // Fixup TSS descriptors
|
---|
85 | //
|
---|
86 | TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
|
---|
87 | GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
|
---|
88 | GdtDescriptor->Bits.BaseLow = (UINT16)(UINTN)TssBase;
|
---|
89 | GdtDescriptor->Bits.BaseMid = (UINT8)((UINTN)TssBase >> 16);
|
---|
90 | GdtDescriptor->Bits.BaseHigh = (UINT8)((UINTN)TssBase >> 24);
|
---|
91 |
|
---|
92 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
|
---|
93 | //
|
---|
94 | // Setup top of known good stack as IST1 for each processor.
|
---|
95 | //
|
---|
96 | *(UINTN *)(TssBase + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);
|
---|
97 | }
|
---|
98 | }
|
---|
99 |
|
---|
100 | *GdtStepSize = GdtTableStepSize;
|
---|
101 | return GdtTssTables;
|
---|
102 | }
|
---|
103 |
|
---|
104 | /**
|
---|
105 | Get Protected mode code segment from current GDT table.
|
---|
106 |
|
---|
107 | @return Protected mode code segment value.
|
---|
108 | **/
|
---|
109 | UINT16
|
---|
110 | GetProtectedModeCS (
|
---|
111 | VOID
|
---|
112 | )
|
---|
113 | {
|
---|
114 | IA32_DESCRIPTOR GdtrDesc;
|
---|
115 | IA32_SEGMENT_DESCRIPTOR *GdtEntry;
|
---|
116 | UINTN GdtEntryCount;
|
---|
117 | UINT16 Index;
|
---|
118 |
|
---|
119 | AsmReadGdtr (&GdtrDesc);
|
---|
120 | GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
|
---|
121 | GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base;
|
---|
122 | for (Index = 0; Index < GdtEntryCount; Index++) {
|
---|
123 | if (GdtEntry->Bits.L == 0) {
|
---|
124 | if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits.L == 0) {
|
---|
125 | break;
|
---|
126 | }
|
---|
127 | }
|
---|
128 | GdtEntry++;
|
---|
129 | }
|
---|
130 | ASSERT (Index != GdtEntryCount);
|
---|
131 | return Index * 8;
|
---|
132 | }
|
---|
133 |
|
---|
134 | /**
|
---|
135 | Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
|
---|
136 |
|
---|
137 | @param[in] ApHltLoopCode The address of the safe hlt-loop function.
|
---|
138 | @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
|
---|
139 | @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
|
---|
140 |
|
---|
141 | **/
|
---|
142 | VOID
|
---|
143 | TransferApToSafeState (
|
---|
144 | IN UINTN ApHltLoopCode,
|
---|
145 | IN UINTN TopOfStack,
|
---|
146 | IN UINTN NumberToFinishAddress
|
---|
147 | )
|
---|
148 | {
|
---|
149 | AsmDisablePaging64 (
|
---|
150 | GetProtectedModeCS (),
|
---|
151 | (UINT32)ApHltLoopCode,
|
---|
152 | (UINT32)NumberToFinishAddress,
|
---|
153 | 0,
|
---|
154 | (UINT32)TopOfStack
|
---|
155 | );
|
---|
156 | //
|
---|
157 | // It should never reach here
|
---|
158 | //
|
---|
159 | ASSERT (FALSE);
|
---|
160 | }
|
---|
161 |
|
---|
162 | /**
|
---|
163 | Initialize the shadow stack related data structure.
|
---|
164 |
|
---|
165 | @param CpuIndex The index of CPU.
|
---|
166 | @param ShadowStack The bottom of the shadow stack for this CPU.
|
---|
167 | **/
|
---|
168 | VOID
|
---|
169 | InitShadowStack (
|
---|
170 | IN UINTN CpuIndex,
|
---|
171 | IN VOID *ShadowStack
|
---|
172 | )
|
---|
173 | {
|
---|
174 | UINTN SmmShadowStackSize;
|
---|
175 | UINT64 *InterruptSspTable;
|
---|
176 |
|
---|
177 | if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
|
---|
178 | SmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
|
---|
179 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
|
---|
180 | SmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
|
---|
181 | }
|
---|
182 | mCetPl0Ssp = (UINT32)((UINTN)ShadowStack + SmmShadowStackSize - sizeof(UINT64));
|
---|
183 | PatchInstructionX86 (mPatchCetPl0Ssp, mCetPl0Ssp, 4);
|
---|
184 | DEBUG ((DEBUG_INFO, "mCetPl0Ssp - 0x%x\n", mCetPl0Ssp));
|
---|
185 | DEBUG ((DEBUG_INFO, "ShadowStack - 0x%x\n", ShadowStack));
|
---|
186 | DEBUG ((DEBUG_INFO, " SmmShadowStackSize - 0x%x\n", SmmShadowStackSize));
|
---|
187 |
|
---|
188 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
|
---|
189 | if (mSmmInterruptSspTables == 0) {
|
---|
190 | mSmmInterruptSspTables = (UINTN)AllocateZeroPool(sizeof(UINT64) * 8 * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
|
---|
191 | ASSERT (mSmmInterruptSspTables != 0);
|
---|
192 | DEBUG ((DEBUG_INFO, "mSmmInterruptSspTables - 0x%x\n", mSmmInterruptSspTables));
|
---|
193 | }
|
---|
194 | mCetInterruptSsp = (UINT32)((UINTN)ShadowStack + EFI_PAGES_TO_SIZE(1) - sizeof(UINT64));
|
---|
195 | mCetInterruptSspTable = (UINT32)(UINTN)(mSmmInterruptSspTables + sizeof(UINT64) * 8 * CpuIndex);
|
---|
196 | InterruptSspTable = (UINT64 *)(UINTN)mCetInterruptSspTable;
|
---|
197 | InterruptSspTable[1] = mCetInterruptSsp;
|
---|
198 | PatchInstructionX86 (mPatchCetInterruptSsp, mCetInterruptSsp, 4);
|
---|
199 | PatchInstructionX86 (mPatchCetInterruptSspTable, mCetInterruptSspTable, 4);
|
---|
200 | DEBUG ((DEBUG_INFO, "mCetInterruptSsp - 0x%x\n", mCetInterruptSsp));
|
---|
201 | DEBUG ((DEBUG_INFO, "mCetInterruptSspTable - 0x%x\n", mCetInterruptSspTable));
|
---|
202 | }
|
---|
203 | }
|
---|
204 | }
|
---|
205 |
|
---|