1 | /** @file
|
---|
2 | SMM MP service implementation
|
---|
3 |
|
---|
4 | Copyright (c) 2009 - 2020, Intel Corporation. All rights reserved.<BR>
|
---|
5 | Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
|
---|
6 |
|
---|
7 | SPDX-License-Identifier: BSD-2-Clause-Patent
|
---|
8 |
|
---|
9 | **/
|
---|
10 |
|
---|
11 | #include "PiSmmCpuDxeSmm.h"
|
---|
12 |
|
---|
13 | //
|
---|
14 | // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
|
---|
15 | //
|
---|
16 | MTRR_SETTINGS gSmiMtrrs;
|
---|
17 | UINT64 gPhyMask;
|
---|
18 | SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
|
---|
19 | UINTN mSmmMpSyncDataSize;
|
---|
20 | SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
|
---|
21 | UINTN mSemaphoreSize;
|
---|
22 | SPIN_LOCK *mPFLock = NULL;
|
---|
23 | SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
|
---|
24 | BOOLEAN mMachineCheckSupported = FALSE;
|
---|
25 |
|
---|
26 | /**
|
---|
27 | Performs an atomic compare exchange operation to get semaphore.
|
---|
28 | The compare exchange operation must be performed using
|
---|
29 | MP safe mechanisms.
|
---|
30 |
|
---|
31 | @param Sem IN: 32-bit unsigned integer
|
---|
32 | OUT: original integer - 1
|
---|
33 | @return Original integer - 1
|
---|
34 |
|
---|
35 | **/
|
---|
36 | UINT32
|
---|
37 | WaitForSemaphore (
|
---|
38 | IN OUT volatile UINT32 *Sem
|
---|
39 | )
|
---|
40 | {
|
---|
41 | UINT32 Value;
|
---|
42 |
|
---|
43 | do {
|
---|
44 | Value = *Sem;
|
---|
45 | } while (Value == 0 ||
|
---|
46 | InterlockedCompareExchange32 (
|
---|
47 | (UINT32*)Sem,
|
---|
48 | Value,
|
---|
49 | Value - 1
|
---|
50 | ) != Value);
|
---|
51 | return Value - 1;
|
---|
52 | }
|
---|
53 |
|
---|
54 |
|
---|
55 | /**
|
---|
56 | Performs an atomic compare exchange operation to release semaphore.
|
---|
57 | The compare exchange operation must be performed using
|
---|
58 | MP safe mechanisms.
|
---|
59 |
|
---|
60 | @param Sem IN: 32-bit unsigned integer
|
---|
61 | OUT: original integer + 1
|
---|
62 | @return Original integer + 1
|
---|
63 |
|
---|
64 | **/
|
---|
65 | UINT32
|
---|
66 | ReleaseSemaphore (
|
---|
67 | IN OUT volatile UINT32 *Sem
|
---|
68 | )
|
---|
69 | {
|
---|
70 | UINT32 Value;
|
---|
71 |
|
---|
72 | do {
|
---|
73 | Value = *Sem;
|
---|
74 | } while (Value + 1 != 0 &&
|
---|
75 | InterlockedCompareExchange32 (
|
---|
76 | (UINT32*)Sem,
|
---|
77 | Value,
|
---|
78 | Value + 1
|
---|
79 | ) != Value);
|
---|
80 | return Value + 1;
|
---|
81 | }
|
---|
82 |
|
---|
83 | /**
|
---|
84 | Performs an atomic compare exchange operation to lock semaphore.
|
---|
85 | The compare exchange operation must be performed using
|
---|
86 | MP safe mechanisms.
|
---|
87 |
|
---|
88 | @param Sem IN: 32-bit unsigned integer
|
---|
89 | OUT: -1
|
---|
90 | @return Original integer
|
---|
91 |
|
---|
92 | **/
|
---|
93 | UINT32
|
---|
94 | LockdownSemaphore (
|
---|
95 | IN OUT volatile UINT32 *Sem
|
---|
96 | )
|
---|
97 | {
|
---|
98 | UINT32 Value;
|
---|
99 |
|
---|
100 | do {
|
---|
101 | Value = *Sem;
|
---|
102 | } while (InterlockedCompareExchange32 (
|
---|
103 | (UINT32*)Sem,
|
---|
104 | Value, (UINT32)-1
|
---|
105 | ) != Value);
|
---|
106 | return Value;
|
---|
107 | }
|
---|
108 |
|
---|
109 | /**
|
---|
110 | Wait all APs to performs an atomic compare exchange operation to release semaphore.
|
---|
111 |
|
---|
112 | @param NumberOfAPs AP number
|
---|
113 |
|
---|
114 | **/
|
---|
115 | VOID
|
---|
116 | WaitForAllAPs (
|
---|
117 | IN UINTN NumberOfAPs
|
---|
118 | )
|
---|
119 | {
|
---|
120 | UINTN BspIndex;
|
---|
121 |
|
---|
122 | BspIndex = mSmmMpSyncData->BspIndex;
|
---|
123 | while (NumberOfAPs-- > 0) {
|
---|
124 | WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
|
---|
125 | }
|
---|
126 | }
|
---|
127 |
|
---|
128 | /**
|
---|
129 | Performs an atomic compare exchange operation to release semaphore
|
---|
130 | for each AP.
|
---|
131 |
|
---|
132 | **/
|
---|
133 | VOID
|
---|
134 | ReleaseAllAPs (
|
---|
135 | VOID
|
---|
136 | )
|
---|
137 | {
|
---|
138 | UINTN Index;
|
---|
139 |
|
---|
140 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
---|
141 | if (IsPresentAp (Index)) {
|
---|
142 | ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
|
---|
143 | }
|
---|
144 | }
|
---|
145 | }
|
---|
146 |
|
---|
147 | /**
|
---|
148 | Checks if all CPUs (with certain exceptions) have checked in for this SMI run
|
---|
149 |
|
---|
150 | @param Exceptions CPU Arrival exception flags.
|
---|
151 |
|
---|
152 | @retval TRUE if all CPUs the have checked in.
|
---|
153 | @retval FALSE if at least one Normal AP hasn't checked in.
|
---|
154 |
|
---|
155 | **/
|
---|
156 | BOOLEAN
|
---|
157 | AllCpusInSmmWithExceptions (
|
---|
158 | SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
|
---|
159 | )
|
---|
160 | {
|
---|
161 | UINTN Index;
|
---|
162 | SMM_CPU_DATA_BLOCK *CpuData;
|
---|
163 | EFI_PROCESSOR_INFORMATION *ProcessorInfo;
|
---|
164 |
|
---|
165 | ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
|
---|
166 |
|
---|
167 | if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
|
---|
168 | return TRUE;
|
---|
169 | }
|
---|
170 |
|
---|
171 | CpuData = mSmmMpSyncData->CpuData;
|
---|
172 | ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
|
---|
173 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
---|
174 | if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
|
---|
175 | if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
|
---|
176 | continue;
|
---|
177 | }
|
---|
178 | if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
|
---|
179 | continue;
|
---|
180 | }
|
---|
181 | if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
|
---|
182 | continue;
|
---|
183 | }
|
---|
184 | return FALSE;
|
---|
185 | }
|
---|
186 | }
|
---|
187 |
|
---|
188 |
|
---|
189 | return TRUE;
|
---|
190 | }
|
---|
191 |
|
---|
192 | /**
|
---|
193 | Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
|
---|
194 |
|
---|
195 | @retval TRUE Os enable lmce.
|
---|
196 | @retval FALSE Os not enable lmce.
|
---|
197 |
|
---|
198 | **/
|
---|
199 | BOOLEAN
|
---|
200 | IsLmceOsEnabled (
|
---|
201 | VOID
|
---|
202 | )
|
---|
203 | {
|
---|
204 | MSR_IA32_MCG_CAP_REGISTER McgCap;
|
---|
205 | MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
|
---|
206 | MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
|
---|
207 |
|
---|
208 | McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
|
---|
209 | if (McgCap.Bits.MCG_LMCE_P == 0) {
|
---|
210 | return FALSE;
|
---|
211 | }
|
---|
212 |
|
---|
213 | FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
|
---|
214 | if (FeatureCtrl.Bits.LmceOn == 0) {
|
---|
215 | return FALSE;
|
---|
216 | }
|
---|
217 |
|
---|
218 | McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
|
---|
219 | return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
|
---|
220 | }
|
---|
221 |
|
---|
222 | /**
|
---|
223 | Return if Local machine check exception signaled.
|
---|
224 |
|
---|
225 | Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
|
---|
226 | delivered to only the logical processor.
|
---|
227 |
|
---|
228 | @retval TRUE LMCE was signaled.
|
---|
229 | @retval FALSE LMCE was not signaled.
|
---|
230 |
|
---|
231 | **/
|
---|
232 | BOOLEAN
|
---|
233 | IsLmceSignaled (
|
---|
234 | VOID
|
---|
235 | )
|
---|
236 | {
|
---|
237 | MSR_IA32_MCG_STATUS_REGISTER McgStatus;
|
---|
238 |
|
---|
239 | McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
|
---|
240 | return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
|
---|
241 | }
|
---|
242 |
|
---|
243 | /**
|
---|
244 | Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
|
---|
245 | entering SMM, except SMI disabled APs.
|
---|
246 |
|
---|
247 | **/
|
---|
248 | VOID
|
---|
249 | SmmWaitForApArrival (
|
---|
250 | VOID
|
---|
251 | )
|
---|
252 | {
|
---|
253 | UINT64 Timer;
|
---|
254 | UINTN Index;
|
---|
255 | BOOLEAN LmceEn;
|
---|
256 | BOOLEAN LmceSignal;
|
---|
257 |
|
---|
258 | ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
|
---|
259 |
|
---|
260 | LmceEn = FALSE;
|
---|
261 | LmceSignal = FALSE;
|
---|
262 | if (mMachineCheckSupported) {
|
---|
263 | LmceEn = IsLmceOsEnabled ();
|
---|
264 | LmceSignal = IsLmceSignaled();
|
---|
265 | }
|
---|
266 |
|
---|
267 | //
|
---|
268 | // Platform implementor should choose a timeout value appropriately:
|
---|
269 | // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
|
---|
270 | // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
|
---|
271 | // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
|
---|
272 | // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
|
---|
273 | // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
|
---|
274 | // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
|
---|
275 | // - The timeout value must be longer than longest possible IO operation in the system
|
---|
276 | //
|
---|
277 |
|
---|
278 | //
|
---|
279 | // Sync with APs 1st timeout
|
---|
280 | //
|
---|
281 | for (Timer = StartSyncTimer ();
|
---|
282 | !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
|
---|
283 | !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
|
---|
284 | ) {
|
---|
285 | CpuPause ();
|
---|
286 | }
|
---|
287 |
|
---|
288 | //
|
---|
289 | // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
|
---|
290 | // because:
|
---|
291 | // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
|
---|
292 | // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
|
---|
293 | // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
|
---|
294 | // work while SMI handling is on-going.
|
---|
295 | // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
|
---|
296 | // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
|
---|
297 | // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
|
---|
298 | // mode work while SMI handling is on-going.
|
---|
299 | // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
|
---|
300 | // - In traditional flow, SMI disabling is discouraged.
|
---|
301 | // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
|
---|
302 | // In both cases, adding SMI-disabling checking code increases overhead.
|
---|
303 | //
|
---|
304 | if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
|
---|
305 | //
|
---|
306 | // Send SMI IPIs to bring outside processors in
|
---|
307 | //
|
---|
308 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
---|
309 | if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
|
---|
310 | SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
|
---|
311 | }
|
---|
312 | }
|
---|
313 |
|
---|
314 | //
|
---|
315 | // Sync with APs 2nd timeout.
|
---|
316 | //
|
---|
317 | for (Timer = StartSyncTimer ();
|
---|
318 | !IsSyncTimerTimeout (Timer) &&
|
---|
319 | !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
|
---|
320 | ) {
|
---|
321 | CpuPause ();
|
---|
322 | }
|
---|
323 | }
|
---|
324 |
|
---|
325 | return;
|
---|
326 | }
|
---|
327 |
|
---|
328 |
|
---|
329 | /**
|
---|
330 | Replace OS MTRR's with SMI MTRR's.
|
---|
331 |
|
---|
332 | @param CpuIndex Processor Index
|
---|
333 |
|
---|
334 | **/
|
---|
335 | VOID
|
---|
336 | ReplaceOSMtrrs (
|
---|
337 | IN UINTN CpuIndex
|
---|
338 | )
|
---|
339 | {
|
---|
340 | SmmCpuFeaturesDisableSmrr ();
|
---|
341 |
|
---|
342 | //
|
---|
343 | // Replace all MTRRs registers
|
---|
344 | //
|
---|
345 | MtrrSetAllMtrrs (&gSmiMtrrs);
|
---|
346 | }
|
---|
347 |
|
---|
348 | /**
|
---|
349 | Wheck whether task has been finished by all APs.
|
---|
350 |
|
---|
351 | @param BlockMode Whether did it in block mode or non-block mode.
|
---|
352 |
|
---|
353 | @retval TRUE Task has been finished by all APs.
|
---|
354 | @retval FALSE Task not has been finished by all APs.
|
---|
355 |
|
---|
356 | **/
|
---|
357 | BOOLEAN
|
---|
358 | WaitForAllAPsNotBusy (
|
---|
359 | IN BOOLEAN BlockMode
|
---|
360 | )
|
---|
361 | {
|
---|
362 | UINTN Index;
|
---|
363 |
|
---|
364 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
---|
365 | //
|
---|
366 | // Ignore BSP and APs which not call in SMM.
|
---|
367 | //
|
---|
368 | if (!IsPresentAp(Index)) {
|
---|
369 | continue;
|
---|
370 | }
|
---|
371 |
|
---|
372 | if (BlockMode) {
|
---|
373 | AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
|
---|
374 | ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
|
---|
375 | } else {
|
---|
376 | if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
|
---|
377 | ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
|
---|
378 | } else {
|
---|
379 | return FALSE;
|
---|
380 | }
|
---|
381 | }
|
---|
382 | }
|
---|
383 |
|
---|
384 | return TRUE;
|
---|
385 | }
|
---|
386 |
|
---|
387 | /**
|
---|
388 | Check whether it is an present AP.
|
---|
389 |
|
---|
390 | @param CpuIndex The AP index which calls this function.
|
---|
391 |
|
---|
392 | @retval TRUE It's a present AP.
|
---|
393 | @retval TRUE This is not an AP or it is not present.
|
---|
394 |
|
---|
395 | **/
|
---|
396 | BOOLEAN
|
---|
397 | IsPresentAp (
|
---|
398 | IN UINTN CpuIndex
|
---|
399 | )
|
---|
400 | {
|
---|
401 | return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
|
---|
402 | *(mSmmMpSyncData->CpuData[CpuIndex].Present));
|
---|
403 | }
|
---|
404 |
|
---|
405 | /**
|
---|
406 | Clean up the status flags used during executing the procedure.
|
---|
407 |
|
---|
408 | @param CpuIndex The AP index which calls this function.
|
---|
409 |
|
---|
410 | **/
|
---|
411 | VOID
|
---|
412 | ReleaseToken (
|
---|
413 | IN UINTN CpuIndex
|
---|
414 | )
|
---|
415 | {
|
---|
416 | PROCEDURE_TOKEN *Token;
|
---|
417 |
|
---|
418 | Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
|
---|
419 |
|
---|
420 | if (InterlockedDecrement (&Token->RunningApCount) == 0) {
|
---|
421 | ReleaseSpinLock (Token->SpinLock);
|
---|
422 | }
|
---|
423 |
|
---|
424 | mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
|
---|
425 | }
|
---|
426 |
|
---|
427 | /**
|
---|
428 | Free the tokens in the maintained list.
|
---|
429 |
|
---|
430 | **/
|
---|
431 | VOID
|
---|
432 | ResetTokens (
|
---|
433 | VOID
|
---|
434 | )
|
---|
435 | {
|
---|
436 | //
|
---|
437 | // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
|
---|
438 | //
|
---|
439 | gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
|
---|
440 | }
|
---|
441 |
|
---|
442 | /**
|
---|
443 | SMI handler for BSP.
|
---|
444 |
|
---|
445 | @param CpuIndex BSP processor Index
|
---|
446 | @param SyncMode SMM MP sync mode
|
---|
447 |
|
---|
448 | **/
|
---|
449 | VOID
|
---|
450 | BSPHandler (
|
---|
451 | IN UINTN CpuIndex,
|
---|
452 | IN SMM_CPU_SYNC_MODE SyncMode
|
---|
453 | )
|
---|
454 | {
|
---|
455 | UINTN Index;
|
---|
456 | MTRR_SETTINGS Mtrrs;
|
---|
457 | UINTN ApCount;
|
---|
458 | BOOLEAN ClearTopLevelSmiResult;
|
---|
459 | UINTN PresentCount;
|
---|
460 |
|
---|
461 | ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
|
---|
462 | ApCount = 0;
|
---|
463 |
|
---|
464 | //
|
---|
465 | // Flag BSP's presence
|
---|
466 | //
|
---|
467 | *mSmmMpSyncData->InsideSmm = TRUE;
|
---|
468 |
|
---|
469 | //
|
---|
470 | // Initialize Debug Agent to start source level debug in BSP handler
|
---|
471 | //
|
---|
472 | InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
|
---|
473 |
|
---|
474 | //
|
---|
475 | // Mark this processor's presence
|
---|
476 | //
|
---|
477 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
|
---|
478 |
|
---|
479 | //
|
---|
480 | // Clear platform top level SMI status bit before calling SMI handlers. If
|
---|
481 | // we cleared it after SMI handlers are run, we would miss the SMI that
|
---|
482 | // occurs after SMI handlers are done and before SMI status bit is cleared.
|
---|
483 | //
|
---|
484 | ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
|
---|
485 | ASSERT (ClearTopLevelSmiResult == TRUE);
|
---|
486 |
|
---|
487 | //
|
---|
488 | // Set running processor index
|
---|
489 | //
|
---|
490 | gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
|
---|
491 |
|
---|
492 | //
|
---|
493 | // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
|
---|
494 | //
|
---|
495 | if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
|
---|
496 |
|
---|
497 | //
|
---|
498 | // Wait for APs to arrive
|
---|
499 | //
|
---|
500 | SmmWaitForApArrival();
|
---|
501 |
|
---|
502 | //
|
---|
503 | // Lock the counter down and retrieve the number of APs
|
---|
504 | //
|
---|
505 | *mSmmMpSyncData->AllCpusInSync = TRUE;
|
---|
506 | ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
|
---|
507 |
|
---|
508 | //
|
---|
509 | // Wait for all APs to get ready for programming MTRRs
|
---|
510 | //
|
---|
511 | WaitForAllAPs (ApCount);
|
---|
512 |
|
---|
513 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {
|
---|
514 | //
|
---|
515 | // Signal all APs it's time for backup MTRRs
|
---|
516 | //
|
---|
517 | ReleaseAllAPs ();
|
---|
518 |
|
---|
519 | //
|
---|
520 | // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
|
---|
521 | // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
|
---|
522 | // to a large enough value to avoid this situation.
|
---|
523 | // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
|
---|
524 | // We do the backup first and then set MTRR to avoid race condition for threads
|
---|
525 | // in the same core.
|
---|
526 | //
|
---|
527 | MtrrGetAllMtrrs(&Mtrrs);
|
---|
528 |
|
---|
529 | //
|
---|
530 | // Wait for all APs to complete their MTRR saving
|
---|
531 | //
|
---|
532 | WaitForAllAPs (ApCount);
|
---|
533 |
|
---|
534 | //
|
---|
535 | // Let all processors program SMM MTRRs together
|
---|
536 | //
|
---|
537 | ReleaseAllAPs ();
|
---|
538 |
|
---|
539 | //
|
---|
540 | // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
|
---|
541 | // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
|
---|
542 | // to a large enough value to avoid this situation.
|
---|
543 | //
|
---|
544 | ReplaceOSMtrrs (CpuIndex);
|
---|
545 |
|
---|
546 | //
|
---|
547 | // Wait for all APs to complete their MTRR programming
|
---|
548 | //
|
---|
549 | WaitForAllAPs (ApCount);
|
---|
550 | }
|
---|
551 | }
|
---|
552 |
|
---|
553 | //
|
---|
554 | // The BUSY lock is initialized to Acquired state
|
---|
555 | //
|
---|
556 | AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
|
---|
557 |
|
---|
558 | //
|
---|
559 | // Perform the pre tasks
|
---|
560 | //
|
---|
561 | PerformPreTasks ();
|
---|
562 |
|
---|
563 | //
|
---|
564 | // Invoke SMM Foundation EntryPoint with the processor information context.
|
---|
565 | //
|
---|
566 | gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
|
---|
567 |
|
---|
568 | //
|
---|
569 | // Make sure all APs have completed their pending none-block tasks
|
---|
570 | //
|
---|
571 | WaitForAllAPsNotBusy (TRUE);
|
---|
572 |
|
---|
573 | //
|
---|
574 | // Perform the remaining tasks
|
---|
575 | //
|
---|
576 | PerformRemainingTasks ();
|
---|
577 |
|
---|
578 | //
|
---|
579 | // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
|
---|
580 | // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
|
---|
581 | // will run through freely.
|
---|
582 | //
|
---|
583 | if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
|
---|
584 |
|
---|
585 | //
|
---|
586 | // Lock the counter down and retrieve the number of APs
|
---|
587 | //
|
---|
588 | *mSmmMpSyncData->AllCpusInSync = TRUE;
|
---|
589 | ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
|
---|
590 | //
|
---|
591 | // Make sure all APs have their Present flag set
|
---|
592 | //
|
---|
593 | while (TRUE) {
|
---|
594 | PresentCount = 0;
|
---|
595 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
---|
596 | if (*(mSmmMpSyncData->CpuData[Index].Present)) {
|
---|
597 | PresentCount ++;
|
---|
598 | }
|
---|
599 | }
|
---|
600 | if (PresentCount > ApCount) {
|
---|
601 | break;
|
---|
602 | }
|
---|
603 | }
|
---|
604 | }
|
---|
605 |
|
---|
606 | //
|
---|
607 | // Notify all APs to exit
|
---|
608 | //
|
---|
609 | *mSmmMpSyncData->InsideSmm = FALSE;
|
---|
610 | ReleaseAllAPs ();
|
---|
611 |
|
---|
612 | //
|
---|
613 | // Wait for all APs to complete their pending tasks
|
---|
614 | //
|
---|
615 | WaitForAllAPs (ApCount);
|
---|
616 |
|
---|
617 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {
|
---|
618 | //
|
---|
619 | // Signal APs to restore MTRRs
|
---|
620 | //
|
---|
621 | ReleaseAllAPs ();
|
---|
622 |
|
---|
623 | //
|
---|
624 | // Restore OS MTRRs
|
---|
625 | //
|
---|
626 | SmmCpuFeaturesReenableSmrr ();
|
---|
627 | MtrrSetAllMtrrs(&Mtrrs);
|
---|
628 |
|
---|
629 | //
|
---|
630 | // Wait for all APs to complete MTRR programming
|
---|
631 | //
|
---|
632 | WaitForAllAPs (ApCount);
|
---|
633 | }
|
---|
634 |
|
---|
635 | //
|
---|
636 | // Stop source level debug in BSP handler, the code below will not be
|
---|
637 | // debugged.
|
---|
638 | //
|
---|
639 | InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
|
---|
640 |
|
---|
641 | //
|
---|
642 | // Signal APs to Reset states/semaphore for this processor
|
---|
643 | //
|
---|
644 | ReleaseAllAPs ();
|
---|
645 |
|
---|
646 | //
|
---|
647 | // Perform pending operations for hot-plug
|
---|
648 | //
|
---|
649 | SmmCpuUpdate ();
|
---|
650 |
|
---|
651 | //
|
---|
652 | // Clear the Present flag of BSP
|
---|
653 | //
|
---|
654 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
|
---|
655 |
|
---|
656 | //
|
---|
657 | // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
|
---|
658 | // WaitForAllAps does not depend on the Present flag.
|
---|
659 | //
|
---|
660 | WaitForAllAPs (ApCount);
|
---|
661 |
|
---|
662 | //
|
---|
663 | // Reset the tokens buffer.
|
---|
664 | //
|
---|
665 | ResetTokens ();
|
---|
666 |
|
---|
667 | //
|
---|
668 | // Reset BspIndex to -1, meaning BSP has not been elected.
|
---|
669 | //
|
---|
670 | if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
|
---|
671 | mSmmMpSyncData->BspIndex = (UINT32)-1;
|
---|
672 | }
|
---|
673 |
|
---|
674 | //
|
---|
675 | // Allow APs to check in from this point on
|
---|
676 | //
|
---|
677 | *mSmmMpSyncData->Counter = 0;
|
---|
678 | *mSmmMpSyncData->AllCpusInSync = FALSE;
|
---|
679 | }
|
---|
680 |
|
---|
681 | /**
|
---|
682 | SMI handler for AP.
|
---|
683 |
|
---|
684 | @param CpuIndex AP processor Index.
|
---|
685 | @param ValidSmi Indicates that current SMI is a valid SMI or not.
|
---|
686 | @param SyncMode SMM MP sync mode.
|
---|
687 |
|
---|
688 | **/
|
---|
689 | VOID
|
---|
690 | APHandler (
|
---|
691 | IN UINTN CpuIndex,
|
---|
692 | IN BOOLEAN ValidSmi,
|
---|
693 | IN SMM_CPU_SYNC_MODE SyncMode
|
---|
694 | )
|
---|
695 | {
|
---|
696 | UINT64 Timer;
|
---|
697 | UINTN BspIndex;
|
---|
698 | MTRR_SETTINGS Mtrrs;
|
---|
699 | EFI_STATUS ProcedureStatus;
|
---|
700 |
|
---|
701 | //
|
---|
702 | // Timeout BSP
|
---|
703 | //
|
---|
704 | for (Timer = StartSyncTimer ();
|
---|
705 | !IsSyncTimerTimeout (Timer) &&
|
---|
706 | !(*mSmmMpSyncData->InsideSmm);
|
---|
707 | ) {
|
---|
708 | CpuPause ();
|
---|
709 | }
|
---|
710 |
|
---|
711 | if (!(*mSmmMpSyncData->InsideSmm)) {
|
---|
712 | //
|
---|
713 | // BSP timeout in the first round
|
---|
714 | //
|
---|
715 | if (mSmmMpSyncData->BspIndex != -1) {
|
---|
716 | //
|
---|
717 | // BSP Index is known
|
---|
718 | //
|
---|
719 | BspIndex = mSmmMpSyncData->BspIndex;
|
---|
720 | ASSERT (CpuIndex != BspIndex);
|
---|
721 |
|
---|
722 | //
|
---|
723 | // Send SMI IPI to bring BSP in
|
---|
724 | //
|
---|
725 | SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
|
---|
726 |
|
---|
727 | //
|
---|
728 | // Now clock BSP for the 2nd time
|
---|
729 | //
|
---|
730 | for (Timer = StartSyncTimer ();
|
---|
731 | !IsSyncTimerTimeout (Timer) &&
|
---|
732 | !(*mSmmMpSyncData->InsideSmm);
|
---|
733 | ) {
|
---|
734 | CpuPause ();
|
---|
735 | }
|
---|
736 |
|
---|
737 | if (!(*mSmmMpSyncData->InsideSmm)) {
|
---|
738 | //
|
---|
739 | // Give up since BSP is unable to enter SMM
|
---|
740 | // and signal the completion of this AP
|
---|
741 | WaitForSemaphore (mSmmMpSyncData->Counter);
|
---|
742 | return;
|
---|
743 | }
|
---|
744 | } else {
|
---|
745 | //
|
---|
746 | // Don't know BSP index. Give up without sending IPI to BSP.
|
---|
747 | //
|
---|
748 | WaitForSemaphore (mSmmMpSyncData->Counter);
|
---|
749 | return;
|
---|
750 | }
|
---|
751 | }
|
---|
752 |
|
---|
753 | //
|
---|
754 | // BSP is available
|
---|
755 | //
|
---|
756 | BspIndex = mSmmMpSyncData->BspIndex;
|
---|
757 | ASSERT (CpuIndex != BspIndex);
|
---|
758 |
|
---|
759 | //
|
---|
760 | // Mark this processor's presence
|
---|
761 | //
|
---|
762 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
|
---|
763 |
|
---|
764 | if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
|
---|
765 | //
|
---|
766 | // Notify BSP of arrival at this point
|
---|
767 | //
|
---|
768 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
|
---|
769 | }
|
---|
770 |
|
---|
771 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {
|
---|
772 | //
|
---|
773 | // Wait for the signal from BSP to backup MTRRs
|
---|
774 | //
|
---|
775 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
|
---|
776 |
|
---|
777 | //
|
---|
778 | // Backup OS MTRRs
|
---|
779 | //
|
---|
780 | MtrrGetAllMtrrs(&Mtrrs);
|
---|
781 |
|
---|
782 | //
|
---|
783 | // Signal BSP the completion of this AP
|
---|
784 | //
|
---|
785 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
|
---|
786 |
|
---|
787 | //
|
---|
788 | // Wait for BSP's signal to program MTRRs
|
---|
789 | //
|
---|
790 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
|
---|
791 |
|
---|
792 | //
|
---|
793 | // Replace OS MTRRs with SMI MTRRs
|
---|
794 | //
|
---|
795 | ReplaceOSMtrrs (CpuIndex);
|
---|
796 |
|
---|
797 | //
|
---|
798 | // Signal BSP the completion of this AP
|
---|
799 | //
|
---|
800 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
|
---|
801 | }
|
---|
802 |
|
---|
803 | while (TRUE) {
|
---|
804 | //
|
---|
805 | // Wait for something to happen
|
---|
806 | //
|
---|
807 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
|
---|
808 |
|
---|
809 | //
|
---|
810 | // Check if BSP wants to exit SMM
|
---|
811 | //
|
---|
812 | if (!(*mSmmMpSyncData->InsideSmm)) {
|
---|
813 | break;
|
---|
814 | }
|
---|
815 |
|
---|
816 | //
|
---|
817 | // BUSY should be acquired by SmmStartupThisAp()
|
---|
818 | //
|
---|
819 | ASSERT (
|
---|
820 | !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
|
---|
821 | );
|
---|
822 |
|
---|
823 | //
|
---|
824 | // Invoke the scheduled procedure
|
---|
825 | //
|
---|
826 | ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
|
---|
827 | (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
|
---|
828 | );
|
---|
829 | if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
|
---|
830 | *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
|
---|
831 | }
|
---|
832 |
|
---|
833 | if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
|
---|
834 | ReleaseToken (CpuIndex);
|
---|
835 | }
|
---|
836 |
|
---|
837 | //
|
---|
838 | // Release BUSY
|
---|
839 | //
|
---|
840 | ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
|
---|
841 | }
|
---|
842 |
|
---|
843 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {
|
---|
844 | //
|
---|
845 | // Notify BSP the readiness of this AP to program MTRRs
|
---|
846 | //
|
---|
847 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
|
---|
848 |
|
---|
849 | //
|
---|
850 | // Wait for the signal from BSP to program MTRRs
|
---|
851 | //
|
---|
852 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
|
---|
853 |
|
---|
854 | //
|
---|
855 | // Restore OS MTRRs
|
---|
856 | //
|
---|
857 | SmmCpuFeaturesReenableSmrr ();
|
---|
858 | MtrrSetAllMtrrs(&Mtrrs);
|
---|
859 | }
|
---|
860 |
|
---|
861 | //
|
---|
862 | // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
|
---|
863 | //
|
---|
864 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
|
---|
865 |
|
---|
866 | //
|
---|
867 | // Wait for the signal from BSP to Reset states/semaphore for this processor
|
---|
868 | //
|
---|
869 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
|
---|
870 |
|
---|
871 | //
|
---|
872 | // Reset states/semaphore for this processor
|
---|
873 | //
|
---|
874 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
|
---|
875 |
|
---|
876 | //
|
---|
877 | // Notify BSP the readiness of this AP to exit SMM
|
---|
878 | //
|
---|
879 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
|
---|
880 |
|
---|
881 | }
|
---|
882 |
|
---|
883 | /**
|
---|
884 | Create 4G PageTable in SMRAM.
|
---|
885 |
|
---|
886 | @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
|
---|
887 | @return PageTable Address
|
---|
888 |
|
---|
889 | **/
|
---|
890 | UINT32
|
---|
891 | Gen4GPageTable (
|
---|
892 | IN BOOLEAN Is32BitPageTable
|
---|
893 | )
|
---|
894 | {
|
---|
895 | VOID *PageTable;
|
---|
896 | UINTN Index;
|
---|
897 | UINT64 *Pte;
|
---|
898 | UINTN PagesNeeded;
|
---|
899 | UINTN Low2MBoundary;
|
---|
900 | UINTN High2MBoundary;
|
---|
901 | UINTN Pages;
|
---|
902 | UINTN GuardPage;
|
---|
903 | UINT64 *Pdpte;
|
---|
904 | UINTN PageIndex;
|
---|
905 | UINTN PageAddress;
|
---|
906 |
|
---|
907 | Low2MBoundary = 0;
|
---|
908 | High2MBoundary = 0;
|
---|
909 | PagesNeeded = 0;
|
---|
910 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
|
---|
911 | //
|
---|
912 | // Add one more page for known good stack, then find the lower 2MB aligned address.
|
---|
913 | //
|
---|
914 | Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
|
---|
915 | //
|
---|
916 | // Add two more pages for known good stack and stack guard page,
|
---|
917 | // then find the lower 2MB aligned address.
|
---|
918 | //
|
---|
919 | High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
|
---|
920 | PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
|
---|
921 | }
|
---|
922 | //
|
---|
923 | // Allocate the page table
|
---|
924 | //
|
---|
925 | PageTable = AllocatePageTableMemory (5 + PagesNeeded);
|
---|
926 | ASSERT (PageTable != NULL);
|
---|
927 |
|
---|
928 | PageTable = (VOID *)((UINTN)PageTable);
|
---|
929 | Pte = (UINT64*)PageTable;
|
---|
930 |
|
---|
931 | //
|
---|
932 | // Zero out all page table entries first
|
---|
933 | //
|
---|
934 | ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
|
---|
935 |
|
---|
936 | //
|
---|
937 | // Set Page Directory Pointers
|
---|
938 | //
|
---|
939 | for (Index = 0; Index < 4; Index++) {
|
---|
940 | Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
|
---|
941 | (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
|
---|
942 | }
|
---|
943 | Pte += EFI_PAGE_SIZE / sizeof (*Pte);
|
---|
944 |
|
---|
945 | //
|
---|
946 | // Fill in Page Directory Entries
|
---|
947 | //
|
---|
948 | for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
|
---|
949 | Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
|
---|
950 | }
|
---|
951 |
|
---|
952 | Pdpte = (UINT64*)PageTable;
|
---|
953 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
|
---|
954 | Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
|
---|
955 | GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
|
---|
956 | for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
|
---|
957 | Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
|
---|
958 | Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
|
---|
959 | //
|
---|
960 | // Fill in Page Table Entries
|
---|
961 | //
|
---|
962 | Pte = (UINT64*)Pages;
|
---|
963 | PageAddress = PageIndex;
|
---|
964 | for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
|
---|
965 | if (PageAddress == GuardPage) {
|
---|
966 | //
|
---|
967 | // Mark the guard page as non-present
|
---|
968 | //
|
---|
969 | Pte[Index] = PageAddress | mAddressEncMask;
|
---|
970 | GuardPage += mSmmStackSize;
|
---|
971 | if (GuardPage > mSmmStackArrayEnd) {
|
---|
972 | GuardPage = 0;
|
---|
973 | }
|
---|
974 | } else {
|
---|
975 | Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
|
---|
976 | }
|
---|
977 | PageAddress+= EFI_PAGE_SIZE;
|
---|
978 | }
|
---|
979 | Pages += EFI_PAGE_SIZE;
|
---|
980 | }
|
---|
981 | }
|
---|
982 |
|
---|
983 | if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
|
---|
984 | Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
|
---|
985 | if ((Pte[0] & IA32_PG_PS) == 0) {
|
---|
986 | // 4K-page entries are already mapped. Just hide the first one anyway.
|
---|
987 | Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
|
---|
988 | Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
|
---|
989 | } else {
|
---|
990 | // Create 4K-page entries
|
---|
991 | Pages = (UINTN)AllocatePageTableMemory (1);
|
---|
992 | ASSERT (Pages != 0);
|
---|
993 |
|
---|
994 | Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
|
---|
995 |
|
---|
996 | Pte = (UINT64*)Pages;
|
---|
997 | PageAddress = 0;
|
---|
998 | Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
|
---|
999 | for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
|
---|
1000 | PageAddress += EFI_PAGE_SIZE;
|
---|
1001 | Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
|
---|
1002 | }
|
---|
1003 | }
|
---|
1004 | }
|
---|
1005 |
|
---|
1006 | return (UINT32)(UINTN)PageTable;
|
---|
1007 | }
|
---|
1008 |
|
---|
1009 | /**
|
---|
1010 | Checks whether the input token is the current used token.
|
---|
1011 |
|
---|
1012 | @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
|
---|
1013 | BroadcastProcedure.
|
---|
1014 |
|
---|
1015 | @retval TRUE The input token is the current used token.
|
---|
1016 | @retval FALSE The input token is not the current used token.
|
---|
1017 | **/
|
---|
1018 | BOOLEAN
|
---|
1019 | IsTokenInUse (
|
---|
1020 | IN SPIN_LOCK *Token
|
---|
1021 | )
|
---|
1022 | {
|
---|
1023 | LIST_ENTRY *Link;
|
---|
1024 | PROCEDURE_TOKEN *ProcToken;
|
---|
1025 |
|
---|
1026 | if (Token == NULL) {
|
---|
1027 | return FALSE;
|
---|
1028 | }
|
---|
1029 |
|
---|
1030 | Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
|
---|
1031 | //
|
---|
1032 | // Only search used tokens.
|
---|
1033 | //
|
---|
1034 | while (Link != gSmmCpuPrivate->FirstFreeToken) {
|
---|
1035 | ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
|
---|
1036 |
|
---|
1037 | if (ProcToken->SpinLock == Token) {
|
---|
1038 | return TRUE;
|
---|
1039 | }
|
---|
1040 |
|
---|
1041 | Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
|
---|
1042 | }
|
---|
1043 |
|
---|
1044 | return FALSE;
|
---|
1045 | }
|
---|
1046 |
|
---|
1047 | /**
|
---|
1048 | Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
|
---|
1049 |
|
---|
1050 | @return First token of the token buffer.
|
---|
1051 | **/
|
---|
1052 | LIST_ENTRY *
|
---|
1053 | AllocateTokenBuffer (
|
---|
1054 | VOID
|
---|
1055 | )
|
---|
1056 | {
|
---|
1057 | UINTN SpinLockSize;
|
---|
1058 | UINT32 TokenCountPerChunk;
|
---|
1059 | UINTN Index;
|
---|
1060 | SPIN_LOCK *SpinLock;
|
---|
1061 | UINT8 *SpinLockBuffer;
|
---|
1062 | PROCEDURE_TOKEN *ProcTokens;
|
---|
1063 |
|
---|
1064 | SpinLockSize = GetSpinLockProperties ();
|
---|
1065 |
|
---|
1066 | TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
|
---|
1067 | ASSERT (TokenCountPerChunk != 0);
|
---|
1068 | if (TokenCountPerChunk == 0) {
|
---|
1069 | DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
|
---|
1070 | CpuDeadLoop ();
|
---|
1071 | }
|
---|
1072 | DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
|
---|
1073 |
|
---|
1074 | //
|
---|
1075 | // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
|
---|
1076 | //
|
---|
1077 | SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
|
---|
1078 | ASSERT (SpinLockBuffer != NULL);
|
---|
1079 |
|
---|
1080 | ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
|
---|
1081 | ASSERT (ProcTokens != NULL);
|
---|
1082 |
|
---|
1083 | for (Index = 0; Index < TokenCountPerChunk; Index++) {
|
---|
1084 | SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
|
---|
1085 | InitializeSpinLock (SpinLock);
|
---|
1086 |
|
---|
1087 | ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
|
---|
1088 | ProcTokens[Index].SpinLock = SpinLock;
|
---|
1089 | ProcTokens[Index].RunningApCount = 0;
|
---|
1090 |
|
---|
1091 | InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
|
---|
1092 | }
|
---|
1093 |
|
---|
1094 | return &ProcTokens[0].Link;
|
---|
1095 | }
|
---|
1096 |
|
---|
1097 | /**
|
---|
1098 | Get the free token.
|
---|
1099 |
|
---|
1100 | If no free token, allocate new tokens then return the free one.
|
---|
1101 |
|
---|
1102 | @param RunningApsCount The Running Aps count for this token.
|
---|
1103 |
|
---|
1104 | @retval return the first free PROCEDURE_TOKEN.
|
---|
1105 |
|
---|
1106 | **/
|
---|
1107 | PROCEDURE_TOKEN *
|
---|
1108 | GetFreeToken (
|
---|
1109 | IN UINT32 RunningApsCount
|
---|
1110 | )
|
---|
1111 | {
|
---|
1112 | PROCEDURE_TOKEN *NewToken;
|
---|
1113 |
|
---|
1114 | //
|
---|
1115 | // If FirstFreeToken meets the end of token list, enlarge the token list.
|
---|
1116 | // Set FirstFreeToken to the first free token.
|
---|
1117 | //
|
---|
1118 | if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
|
---|
1119 | gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
|
---|
1120 | }
|
---|
1121 | NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
|
---|
1122 | gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
|
---|
1123 |
|
---|
1124 | NewToken->RunningApCount = RunningApsCount;
|
---|
1125 | AcquireSpinLock (NewToken->SpinLock);
|
---|
1126 |
|
---|
1127 | return NewToken;
|
---|
1128 | }
|
---|
1129 |
|
---|
1130 | /**
|
---|
1131 | Checks status of specified AP.
|
---|
1132 |
|
---|
1133 | This function checks whether the specified AP has finished the task assigned
|
---|
1134 | by StartupThisAP(), and whether timeout expires.
|
---|
1135 |
|
---|
1136 | @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
|
---|
1137 | BroadcastProcedure.
|
---|
1138 |
|
---|
1139 | @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
|
---|
1140 | @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
|
---|
1141 | **/
|
---|
1142 | EFI_STATUS
|
---|
1143 | IsApReady (
|
---|
1144 | IN SPIN_LOCK *Token
|
---|
1145 | )
|
---|
1146 | {
|
---|
1147 | if (AcquireSpinLockOrFail (Token)) {
|
---|
1148 | ReleaseSpinLock (Token);
|
---|
1149 | return EFI_SUCCESS;
|
---|
1150 | }
|
---|
1151 |
|
---|
1152 | return EFI_NOT_READY;
|
---|
1153 | }
|
---|
1154 |
|
---|
1155 | /**
|
---|
1156 | Schedule a procedure to run on the specified CPU.
|
---|
1157 |
|
---|
1158 | @param[in] Procedure The address of the procedure to run
|
---|
1159 | @param[in] CpuIndex Target CPU Index
|
---|
1160 | @param[in,out] ProcArguments The parameter to pass to the procedure
|
---|
1161 | @param[in] Token This is an optional parameter that allows the caller to execute the
|
---|
1162 | procedure in a blocking or non-blocking fashion. If it is NULL the
|
---|
1163 | call is blocking, and the call will not return until the AP has
|
---|
1164 | completed the procedure. If the token is not NULL, the call will
|
---|
1165 | return immediately. The caller can check whether the procedure has
|
---|
1166 | completed with CheckOnProcedure or WaitForProcedure.
|
---|
1167 | @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
|
---|
1168 | execution of Procedure, either for blocking or non-blocking mode.
|
---|
1169 | Zero means infinity. If the timeout expires before all APs return
|
---|
1170 | from Procedure, then Procedure on the failed APs is terminated. If
|
---|
1171 | the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
|
---|
1172 | If the timeout expires in non-blocking mode, the timeout determined
|
---|
1173 | can be through CheckOnProcedure or WaitForProcedure.
|
---|
1174 | Note that timeout support is optional. Whether an implementation
|
---|
1175 | supports this feature can be determined via the Attributes data
|
---|
1176 | member.
|
---|
1177 | @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
|
---|
1178 | by Procedure when it completes execution on the target AP, or with
|
---|
1179 | EFI_TIMEOUT if the Procedure fails to complete within the optional
|
---|
1180 | timeout. The implementation will update this variable with
|
---|
1181 | EFI_NOT_READY prior to starting Procedure on the target AP.
|
---|
1182 |
|
---|
1183 | @retval EFI_INVALID_PARAMETER CpuNumber not valid
|
---|
1184 | @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
|
---|
1185 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
|
---|
1186 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
|
---|
1187 | @retval EFI_SUCCESS The procedure has been successfully scheduled
|
---|
1188 |
|
---|
1189 | **/
|
---|
1190 | EFI_STATUS
|
---|
1191 | InternalSmmStartupThisAp (
|
---|
1192 | IN EFI_AP_PROCEDURE2 Procedure,
|
---|
1193 | IN UINTN CpuIndex,
|
---|
1194 | IN OUT VOID *ProcArguments OPTIONAL,
|
---|
1195 | IN MM_COMPLETION *Token,
|
---|
1196 | IN UINTN TimeoutInMicroseconds,
|
---|
1197 | IN OUT EFI_STATUS *CpuStatus
|
---|
1198 | )
|
---|
1199 | {
|
---|
1200 | PROCEDURE_TOKEN *ProcToken;
|
---|
1201 |
|
---|
1202 | if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
|
---|
1203 | DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
|
---|
1204 | return EFI_INVALID_PARAMETER;
|
---|
1205 | }
|
---|
1206 | if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
|
---|
1207 | DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
|
---|
1208 | return EFI_INVALID_PARAMETER;
|
---|
1209 | }
|
---|
1210 | if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
|
---|
1211 | return EFI_INVALID_PARAMETER;
|
---|
1212 | }
|
---|
1213 | if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
|
---|
1214 | if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
|
---|
1215 | DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
|
---|
1216 | }
|
---|
1217 | return EFI_INVALID_PARAMETER;
|
---|
1218 | }
|
---|
1219 | if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
|
---|
1220 | if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
|
---|
1221 | DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
|
---|
1222 | }
|
---|
1223 | return EFI_INVALID_PARAMETER;
|
---|
1224 | }
|
---|
1225 | if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
|
---|
1226 | return EFI_INVALID_PARAMETER;
|
---|
1227 | }
|
---|
1228 | if (Procedure == NULL) {
|
---|
1229 | return EFI_INVALID_PARAMETER;
|
---|
1230 | }
|
---|
1231 |
|
---|
1232 | AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
|
---|
1233 |
|
---|
1234 | mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
|
---|
1235 | mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
|
---|
1236 | if (Token != NULL) {
|
---|
1237 | ProcToken= GetFreeToken (1);
|
---|
1238 | mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
|
---|
1239 | *Token = (MM_COMPLETION)ProcToken->SpinLock;
|
---|
1240 | }
|
---|
1241 | mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
|
---|
1242 | if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
|
---|
1243 | *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
|
---|
1244 | }
|
---|
1245 |
|
---|
1246 | ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
|
---|
1247 |
|
---|
1248 | if (Token == NULL) {
|
---|
1249 | AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
|
---|
1250 | ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
|
---|
1251 | }
|
---|
1252 |
|
---|
1253 | return EFI_SUCCESS;
|
---|
1254 | }
|
---|
1255 |
|
---|
1256 | /**
|
---|
1257 | Worker function to execute a caller provided function on all enabled APs.
|
---|
1258 |
|
---|
1259 | @param[in] Procedure A pointer to the function to be run on
|
---|
1260 | enabled APs of the system.
|
---|
1261 | @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
|
---|
1262 | APs to return from Procedure, either for
|
---|
1263 | blocking or non-blocking mode.
|
---|
1264 | @param[in,out] ProcedureArguments The parameter passed into Procedure for
|
---|
1265 | all APs.
|
---|
1266 | @param[in,out] Token This is an optional parameter that allows the caller to execute the
|
---|
1267 | procedure in a blocking or non-blocking fashion. If it is NULL the
|
---|
1268 | call is blocking, and the call will not return until the AP has
|
---|
1269 | completed the procedure. If the token is not NULL, the call will
|
---|
1270 | return immediately. The caller can check whether the procedure has
|
---|
1271 | completed with CheckOnProcedure or WaitForProcedure.
|
---|
1272 | @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
|
---|
1273 | by Procedure when it completes execution on the target AP, or with
|
---|
1274 | EFI_TIMEOUT if the Procedure fails to complete within the optional
|
---|
1275 | timeout. The implementation will update this variable with
|
---|
1276 | EFI_NOT_READY prior to starting Procedure on the target AP.
|
---|
1277 |
|
---|
1278 |
|
---|
1279 | @retval EFI_SUCCESS In blocking mode, all APs have finished before
|
---|
1280 | the timeout expired.
|
---|
1281 | @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
|
---|
1282 | to all enabled APs.
|
---|
1283 | @retval others Failed to Startup all APs.
|
---|
1284 |
|
---|
1285 | **/
|
---|
1286 | EFI_STATUS
|
---|
1287 | InternalSmmStartupAllAPs (
|
---|
1288 | IN EFI_AP_PROCEDURE2 Procedure,
|
---|
1289 | IN UINTN TimeoutInMicroseconds,
|
---|
1290 | IN OUT VOID *ProcedureArguments OPTIONAL,
|
---|
1291 | IN OUT MM_COMPLETION *Token,
|
---|
1292 | IN OUT EFI_STATUS *CPUStatus
|
---|
1293 | )
|
---|
1294 | {
|
---|
1295 | UINTN Index;
|
---|
1296 | UINTN CpuCount;
|
---|
1297 | PROCEDURE_TOKEN *ProcToken;
|
---|
1298 |
|
---|
1299 | if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
|
---|
1300 | return EFI_INVALID_PARAMETER;
|
---|
1301 | }
|
---|
1302 | if (Procedure == NULL) {
|
---|
1303 | return EFI_INVALID_PARAMETER;
|
---|
1304 | }
|
---|
1305 |
|
---|
1306 | CpuCount = 0;
|
---|
1307 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
---|
1308 | if (IsPresentAp (Index)) {
|
---|
1309 | CpuCount ++;
|
---|
1310 |
|
---|
1311 | if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
|
---|
1312 | return EFI_INVALID_PARAMETER;
|
---|
1313 | }
|
---|
1314 |
|
---|
1315 | if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {
|
---|
1316 | return EFI_NOT_READY;
|
---|
1317 | }
|
---|
1318 | ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
|
---|
1319 | }
|
---|
1320 | }
|
---|
1321 | if (CpuCount == 0) {
|
---|
1322 | return EFI_NOT_STARTED;
|
---|
1323 | }
|
---|
1324 |
|
---|
1325 | if (Token != NULL) {
|
---|
1326 | ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
|
---|
1327 | *Token = (MM_COMPLETION)ProcToken->SpinLock;
|
---|
1328 | } else {
|
---|
1329 | ProcToken = NULL;
|
---|
1330 | }
|
---|
1331 |
|
---|
1332 | //
|
---|
1333 | // Make sure all BUSY should be acquired.
|
---|
1334 | //
|
---|
1335 | // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
|
---|
1336 | // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
|
---|
1337 | // block mode.
|
---|
1338 | //
|
---|
1339 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
---|
1340 | if (IsPresentAp (Index)) {
|
---|
1341 | AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
|
---|
1342 | }
|
---|
1343 | }
|
---|
1344 |
|
---|
1345 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
---|
1346 | if (IsPresentAp (Index)) {
|
---|
1347 | mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;
|
---|
1348 | mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
|
---|
1349 | if (ProcToken != NULL) {
|
---|
1350 | mSmmMpSyncData->CpuData[Index].Token = ProcToken;
|
---|
1351 | }
|
---|
1352 | if (CPUStatus != NULL) {
|
---|
1353 | mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
|
---|
1354 | if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
|
---|
1355 | *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
|
---|
1356 | }
|
---|
1357 | }
|
---|
1358 | } else {
|
---|
1359 | //
|
---|
1360 | // PI spec requirement:
|
---|
1361 | // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
|
---|
1362 | //
|
---|
1363 | if (CPUStatus != NULL) {
|
---|
1364 | CPUStatus[Index] = EFI_NOT_STARTED;
|
---|
1365 | }
|
---|
1366 |
|
---|
1367 | //
|
---|
1368 | // Decrease the count to mark this processor(AP or BSP) as finished.
|
---|
1369 | //
|
---|
1370 | if (ProcToken != NULL) {
|
---|
1371 | WaitForSemaphore (&ProcToken->RunningApCount);
|
---|
1372 | }
|
---|
1373 | }
|
---|
1374 | }
|
---|
1375 |
|
---|
1376 | ReleaseAllAPs ();
|
---|
1377 |
|
---|
1378 | if (Token == NULL) {
|
---|
1379 | //
|
---|
1380 | // Make sure all APs have completed their tasks.
|
---|
1381 | //
|
---|
1382 | WaitForAllAPsNotBusy (TRUE);
|
---|
1383 | }
|
---|
1384 |
|
---|
1385 | return EFI_SUCCESS;
|
---|
1386 | }
|
---|
1387 |
|
---|
1388 | /**
|
---|
1389 | ISO C99 6.5.2.2 "Function calls", paragraph 9:
|
---|
1390 | If the function is defined with a type that is not compatible with
|
---|
1391 | the type (of the expression) pointed to by the expression that
|
---|
1392 | denotes the called function, the behavior is undefined.
|
---|
1393 |
|
---|
1394 | So add below wrapper function to convert between EFI_AP_PROCEDURE
|
---|
1395 | and EFI_AP_PROCEDURE2.
|
---|
1396 |
|
---|
1397 | Wrapper for Procedures.
|
---|
1398 |
|
---|
1399 | @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
|
---|
1400 |
|
---|
1401 | **/
|
---|
1402 | EFI_STATUS
|
---|
1403 | EFIAPI
|
---|
1404 | ProcedureWrapper (
|
---|
1405 | IN VOID *Buffer
|
---|
1406 | )
|
---|
1407 | {
|
---|
1408 | PROCEDURE_WRAPPER *Wrapper;
|
---|
1409 |
|
---|
1410 | Wrapper = Buffer;
|
---|
1411 | Wrapper->Procedure (Wrapper->ProcedureArgument);
|
---|
1412 |
|
---|
1413 | return EFI_SUCCESS;
|
---|
1414 | }
|
---|
1415 |
|
---|
1416 | /**
|
---|
1417 | Schedule a procedure to run on the specified CPU in blocking mode.
|
---|
1418 |
|
---|
1419 | @param[in] Procedure The address of the procedure to run
|
---|
1420 | @param[in] CpuIndex Target CPU Index
|
---|
1421 | @param[in, out] ProcArguments The parameter to pass to the procedure
|
---|
1422 |
|
---|
1423 | @retval EFI_INVALID_PARAMETER CpuNumber not valid
|
---|
1424 | @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
|
---|
1425 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
|
---|
1426 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
|
---|
1427 | @retval EFI_SUCCESS The procedure has been successfully scheduled
|
---|
1428 |
|
---|
1429 | **/
|
---|
1430 | EFI_STATUS
|
---|
1431 | EFIAPI
|
---|
1432 | SmmBlockingStartupThisAp (
|
---|
1433 | IN EFI_AP_PROCEDURE Procedure,
|
---|
1434 | IN UINTN CpuIndex,
|
---|
1435 | IN OUT VOID *ProcArguments OPTIONAL
|
---|
1436 | )
|
---|
1437 | {
|
---|
1438 | PROCEDURE_WRAPPER Wrapper;
|
---|
1439 |
|
---|
1440 | Wrapper.Procedure = Procedure;
|
---|
1441 | Wrapper.ProcedureArgument = ProcArguments;
|
---|
1442 |
|
---|
1443 | //
|
---|
1444 | // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
|
---|
1445 | //
|
---|
1446 | return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
|
---|
1447 | }
|
---|
1448 |
|
---|
1449 | /**
|
---|
1450 | Schedule a procedure to run on the specified CPU.
|
---|
1451 |
|
---|
1452 | @param Procedure The address of the procedure to run
|
---|
1453 | @param CpuIndex Target CPU Index
|
---|
1454 | @param ProcArguments The parameter to pass to the procedure
|
---|
1455 |
|
---|
1456 | @retval EFI_INVALID_PARAMETER CpuNumber not valid
|
---|
1457 | @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
|
---|
1458 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
|
---|
1459 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
|
---|
1460 | @retval EFI_SUCCESS The procedure has been successfully scheduled
|
---|
1461 |
|
---|
1462 | **/
|
---|
1463 | EFI_STATUS
|
---|
1464 | EFIAPI
|
---|
1465 | SmmStartupThisAp (
|
---|
1466 | IN EFI_AP_PROCEDURE Procedure,
|
---|
1467 | IN UINTN CpuIndex,
|
---|
1468 | IN OUT VOID *ProcArguments OPTIONAL
|
---|
1469 | )
|
---|
1470 | {
|
---|
1471 | MM_COMPLETION Token;
|
---|
1472 |
|
---|
1473 | gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
|
---|
1474 | gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
|
---|
1475 |
|
---|
1476 | //
|
---|
1477 | // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
|
---|
1478 | //
|
---|
1479 | return InternalSmmStartupThisAp (
|
---|
1480 | ProcedureWrapper,
|
---|
1481 | CpuIndex,
|
---|
1482 | &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
|
---|
1483 | FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,
|
---|
1484 | 0,
|
---|
1485 | NULL
|
---|
1486 | );
|
---|
1487 | }
|
---|
1488 |
|
---|
1489 | /**
|
---|
1490 | This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
|
---|
1491 | They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
|
---|
1492 |
|
---|
1493 | NOTE: It might not be appreciated in runtime since it might
|
---|
1494 | conflict with OS debugging facilities. Turn them off in RELEASE.
|
---|
1495 |
|
---|
1496 | @param CpuIndex CPU Index
|
---|
1497 |
|
---|
1498 | **/
|
---|
1499 | VOID
|
---|
1500 | EFIAPI
|
---|
1501 | CpuSmmDebugEntry (
|
---|
1502 | IN UINTN CpuIndex
|
---|
1503 | )
|
---|
1504 | {
|
---|
1505 | SMRAM_SAVE_STATE_MAP *CpuSaveState;
|
---|
1506 |
|
---|
1507 | if (FeaturePcdGet (PcdCpuSmmDebug)) {
|
---|
1508 | ASSERT(CpuIndex < mMaxNumberOfCpus);
|
---|
1509 | CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
|
---|
1510 | if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
|
---|
1511 | AsmWriteDr6 (CpuSaveState->x86._DR6);
|
---|
1512 | AsmWriteDr7 (CpuSaveState->x86._DR7);
|
---|
1513 | } else {
|
---|
1514 | AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
|
---|
1515 | AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
|
---|
1516 | }
|
---|
1517 | }
|
---|
1518 | }
|
---|
1519 |
|
---|
1520 | /**
|
---|
1521 | This function restores DR6 & DR7 to SMM save state.
|
---|
1522 |
|
---|
1523 | NOTE: It might not be appreciated in runtime since it might
|
---|
1524 | conflict with OS debugging facilities. Turn them off in RELEASE.
|
---|
1525 |
|
---|
1526 | @param CpuIndex CPU Index
|
---|
1527 |
|
---|
1528 | **/
|
---|
1529 | VOID
|
---|
1530 | EFIAPI
|
---|
1531 | CpuSmmDebugExit (
|
---|
1532 | IN UINTN CpuIndex
|
---|
1533 | )
|
---|
1534 | {
|
---|
1535 | SMRAM_SAVE_STATE_MAP *CpuSaveState;
|
---|
1536 |
|
---|
1537 | if (FeaturePcdGet (PcdCpuSmmDebug)) {
|
---|
1538 | ASSERT(CpuIndex < mMaxNumberOfCpus);
|
---|
1539 | CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
|
---|
1540 | if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
|
---|
1541 | CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
|
---|
1542 | CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
|
---|
1543 | } else {
|
---|
1544 | CpuSaveState->x64._DR7 = AsmReadDr7 ();
|
---|
1545 | CpuSaveState->x64._DR6 = AsmReadDr6 ();
|
---|
1546 | }
|
---|
1547 | }
|
---|
1548 | }
|
---|
1549 |
|
---|
1550 | /**
|
---|
1551 | C function for SMI entry, each processor comes here upon SMI trigger.
|
---|
1552 |
|
---|
1553 | @param CpuIndex CPU Index
|
---|
1554 |
|
---|
1555 | **/
|
---|
1556 | VOID
|
---|
1557 | EFIAPI
|
---|
1558 | SmiRendezvous (
|
---|
1559 | IN UINTN CpuIndex
|
---|
1560 | )
|
---|
1561 | {
|
---|
1562 | EFI_STATUS Status;
|
---|
1563 | BOOLEAN ValidSmi;
|
---|
1564 | BOOLEAN IsBsp;
|
---|
1565 | BOOLEAN BspInProgress;
|
---|
1566 | UINTN Index;
|
---|
1567 | UINTN Cr2;
|
---|
1568 |
|
---|
1569 | ASSERT(CpuIndex < mMaxNumberOfCpus);
|
---|
1570 |
|
---|
1571 | //
|
---|
1572 | // Save Cr2 because Page Fault exception in SMM may override its value,
|
---|
1573 | // when using on-demand paging for above 4G memory.
|
---|
1574 | //
|
---|
1575 | Cr2 = 0;
|
---|
1576 | SaveCr2 (&Cr2);
|
---|
1577 |
|
---|
1578 | //
|
---|
1579 | // Call the user register Startup function first.
|
---|
1580 | //
|
---|
1581 | if (mSmmMpSyncData->StartupProcedure != NULL) {
|
---|
1582 | mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
|
---|
1583 | }
|
---|
1584 |
|
---|
1585 | //
|
---|
1586 | // Perform CPU specific entry hooks
|
---|
1587 | //
|
---|
1588 | SmmCpuFeaturesRendezvousEntry (CpuIndex);
|
---|
1589 |
|
---|
1590 | //
|
---|
1591 | // Determine if this is a valid SMI
|
---|
1592 | //
|
---|
1593 | ValidSmi = PlatformValidSmi();
|
---|
1594 |
|
---|
1595 | //
|
---|
1596 | // Determine if BSP has been already in progress. Note this must be checked after
|
---|
1597 | // ValidSmi because BSP may clear a valid SMI source after checking in.
|
---|
1598 | //
|
---|
1599 | BspInProgress = *mSmmMpSyncData->InsideSmm;
|
---|
1600 |
|
---|
1601 | if (!BspInProgress && !ValidSmi) {
|
---|
1602 | //
|
---|
1603 | // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
|
---|
1604 | // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
|
---|
1605 | // status had been cleared by BSP and an existing SMI run has almost ended. (Note
|
---|
1606 | // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
|
---|
1607 | // is nothing we need to do.
|
---|
1608 | //
|
---|
1609 | goto Exit;
|
---|
1610 | } else {
|
---|
1611 | //
|
---|
1612 | // Signal presence of this processor
|
---|
1613 | //
|
---|
1614 | if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
|
---|
1615 | //
|
---|
1616 | // BSP has already ended the synchronization, so QUIT!!!
|
---|
1617 | //
|
---|
1618 |
|
---|
1619 | //
|
---|
1620 | // Wait for BSP's signal to finish SMI
|
---|
1621 | //
|
---|
1622 | while (*mSmmMpSyncData->AllCpusInSync) {
|
---|
1623 | CpuPause ();
|
---|
1624 | }
|
---|
1625 | goto Exit;
|
---|
1626 | } else {
|
---|
1627 |
|
---|
1628 | //
|
---|
1629 | // The BUSY lock is initialized to Released state.
|
---|
1630 | // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
|
---|
1631 | // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
|
---|
1632 | // after AP's present flag is detected.
|
---|
1633 | //
|
---|
1634 | InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
|
---|
1635 | }
|
---|
1636 |
|
---|
1637 | if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
|
---|
1638 | ActivateSmmProfile (CpuIndex);
|
---|
1639 | }
|
---|
1640 |
|
---|
1641 | if (BspInProgress) {
|
---|
1642 | //
|
---|
1643 | // BSP has been elected. Follow AP path, regardless of ValidSmi flag
|
---|
1644 | // as BSP may have cleared the SMI status
|
---|
1645 | //
|
---|
1646 | APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
|
---|
1647 | } else {
|
---|
1648 | //
|
---|
1649 | // We have a valid SMI
|
---|
1650 | //
|
---|
1651 |
|
---|
1652 | //
|
---|
1653 | // Elect BSP
|
---|
1654 | //
|
---|
1655 | IsBsp = FALSE;
|
---|
1656 | if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
|
---|
1657 | if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
|
---|
1658 | //
|
---|
1659 | // Call platform hook to do BSP election
|
---|
1660 | //
|
---|
1661 | Status = PlatformSmmBspElection (&IsBsp);
|
---|
1662 | if (EFI_SUCCESS == Status) {
|
---|
1663 | //
|
---|
1664 | // Platform hook determines successfully
|
---|
1665 | //
|
---|
1666 | if (IsBsp) {
|
---|
1667 | mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
|
---|
1668 | }
|
---|
1669 | } else {
|
---|
1670 | //
|
---|
1671 | // Platform hook fails to determine, use default BSP election method
|
---|
1672 | //
|
---|
1673 | InterlockedCompareExchange32 (
|
---|
1674 | (UINT32*)&mSmmMpSyncData->BspIndex,
|
---|
1675 | (UINT32)-1,
|
---|
1676 | (UINT32)CpuIndex
|
---|
1677 | );
|
---|
1678 | }
|
---|
1679 | }
|
---|
1680 | }
|
---|
1681 |
|
---|
1682 | //
|
---|
1683 | // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
|
---|
1684 | //
|
---|
1685 | if (mSmmMpSyncData->BspIndex == CpuIndex) {
|
---|
1686 |
|
---|
1687 | //
|
---|
1688 | // Clear last request for SwitchBsp.
|
---|
1689 | //
|
---|
1690 | if (mSmmMpSyncData->SwitchBsp) {
|
---|
1691 | mSmmMpSyncData->SwitchBsp = FALSE;
|
---|
1692 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
---|
1693 | mSmmMpSyncData->CandidateBsp[Index] = FALSE;
|
---|
1694 | }
|
---|
1695 | }
|
---|
1696 |
|
---|
1697 | if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
|
---|
1698 | SmmProfileRecordSmiNum ();
|
---|
1699 | }
|
---|
1700 |
|
---|
1701 | //
|
---|
1702 | // BSP Handler is always called with a ValidSmi == TRUE
|
---|
1703 | //
|
---|
1704 | BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
|
---|
1705 | } else {
|
---|
1706 | APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
|
---|
1707 | }
|
---|
1708 | }
|
---|
1709 |
|
---|
1710 | ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
|
---|
1711 |
|
---|
1712 | //
|
---|
1713 | // Wait for BSP's signal to exit SMI
|
---|
1714 | //
|
---|
1715 | while (*mSmmMpSyncData->AllCpusInSync) {
|
---|
1716 | CpuPause ();
|
---|
1717 | }
|
---|
1718 | }
|
---|
1719 |
|
---|
1720 | Exit:
|
---|
1721 | SmmCpuFeaturesRendezvousExit (CpuIndex);
|
---|
1722 |
|
---|
1723 | //
|
---|
1724 | // Restore Cr2
|
---|
1725 | //
|
---|
1726 | RestoreCr2 (Cr2);
|
---|
1727 | }
|
---|
1728 |
|
---|
1729 | /**
|
---|
1730 | Allocate buffer for SpinLock and Wrapper function buffer.
|
---|
1731 |
|
---|
1732 | **/
|
---|
1733 | VOID
|
---|
1734 | InitializeDataForMmMp (
|
---|
1735 | VOID
|
---|
1736 | )
|
---|
1737 | {
|
---|
1738 | gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
|
---|
1739 | ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
|
---|
1740 |
|
---|
1741 | InitializeListHead (&gSmmCpuPrivate->TokenList);
|
---|
1742 |
|
---|
1743 | gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
|
---|
1744 | }
|
---|
1745 |
|
---|
1746 | /**
|
---|
1747 | Allocate buffer for all semaphores and spin locks.
|
---|
1748 |
|
---|
1749 | **/
|
---|
1750 | VOID
|
---|
1751 | InitializeSmmCpuSemaphores (
|
---|
1752 | VOID
|
---|
1753 | )
|
---|
1754 | {
|
---|
1755 | UINTN ProcessorCount;
|
---|
1756 | UINTN TotalSize;
|
---|
1757 | UINTN GlobalSemaphoresSize;
|
---|
1758 | UINTN CpuSemaphoresSize;
|
---|
1759 | UINTN SemaphoreSize;
|
---|
1760 | UINTN Pages;
|
---|
1761 | UINTN *SemaphoreBlock;
|
---|
1762 | UINTN SemaphoreAddr;
|
---|
1763 |
|
---|
1764 | SemaphoreSize = GetSpinLockProperties ();
|
---|
1765 | ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
|
---|
1766 | GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
|
---|
1767 | CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
|
---|
1768 | TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
|
---|
1769 | DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
|
---|
1770 | DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
|
---|
1771 | Pages = EFI_SIZE_TO_PAGES (TotalSize);
|
---|
1772 | SemaphoreBlock = AllocatePages (Pages);
|
---|
1773 | ASSERT (SemaphoreBlock != NULL);
|
---|
1774 | ZeroMem (SemaphoreBlock, TotalSize);
|
---|
1775 |
|
---|
1776 | SemaphoreAddr = (UINTN)SemaphoreBlock;
|
---|
1777 | mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
|
---|
1778 | SemaphoreAddr += SemaphoreSize;
|
---|
1779 | mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
|
---|
1780 | SemaphoreAddr += SemaphoreSize;
|
---|
1781 | mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
|
---|
1782 | SemaphoreAddr += SemaphoreSize;
|
---|
1783 | mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
|
---|
1784 | SemaphoreAddr += SemaphoreSize;
|
---|
1785 | mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
|
---|
1786 | = (SPIN_LOCK *)SemaphoreAddr;
|
---|
1787 | SemaphoreAddr += SemaphoreSize;
|
---|
1788 |
|
---|
1789 | SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
|
---|
1790 | mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
|
---|
1791 | SemaphoreAddr += ProcessorCount * SemaphoreSize;
|
---|
1792 | mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
|
---|
1793 | SemaphoreAddr += ProcessorCount * SemaphoreSize;
|
---|
1794 | mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
|
---|
1795 |
|
---|
1796 | mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
|
---|
1797 | mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
|
---|
1798 |
|
---|
1799 | mSemaphoreSize = SemaphoreSize;
|
---|
1800 | }
|
---|
1801 |
|
---|
1802 | /**
|
---|
1803 | Initialize un-cacheable data.
|
---|
1804 |
|
---|
1805 | **/
|
---|
1806 | VOID
|
---|
1807 | EFIAPI
|
---|
1808 | InitializeMpSyncData (
|
---|
1809 | VOID
|
---|
1810 | )
|
---|
1811 | {
|
---|
1812 | UINTN CpuIndex;
|
---|
1813 |
|
---|
1814 | if (mSmmMpSyncData != NULL) {
|
---|
1815 | //
|
---|
1816 | // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
|
---|
1817 | // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
|
---|
1818 | //
|
---|
1819 | ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
|
---|
1820 | mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
|
---|
1821 | mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
|
---|
1822 | if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
|
---|
1823 | //
|
---|
1824 | // Enable BSP election by setting BspIndex to -1
|
---|
1825 | //
|
---|
1826 | mSmmMpSyncData->BspIndex = (UINT32)-1;
|
---|
1827 | }
|
---|
1828 | mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
|
---|
1829 |
|
---|
1830 | mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
|
---|
1831 | mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
|
---|
1832 | mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
|
---|
1833 | ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
|
---|
1834 | mSmmMpSyncData->AllCpusInSync != NULL);
|
---|
1835 | *mSmmMpSyncData->Counter = 0;
|
---|
1836 | *mSmmMpSyncData->InsideSmm = FALSE;
|
---|
1837 | *mSmmMpSyncData->AllCpusInSync = FALSE;
|
---|
1838 |
|
---|
1839 | for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
|
---|
1840 | mSmmMpSyncData->CpuData[CpuIndex].Busy =
|
---|
1841 | (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
|
---|
1842 | mSmmMpSyncData->CpuData[CpuIndex].Run =
|
---|
1843 | (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
|
---|
1844 | mSmmMpSyncData->CpuData[CpuIndex].Present =
|
---|
1845 | (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
|
---|
1846 | *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
|
---|
1847 | *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
|
---|
1848 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
|
---|
1849 | }
|
---|
1850 | }
|
---|
1851 | }
|
---|
1852 |
|
---|
1853 | /**
|
---|
1854 | Initialize global data for MP synchronization.
|
---|
1855 |
|
---|
1856 | @param Stacks Base address of SMI stack buffer for all processors.
|
---|
1857 | @param StackSize Stack size for each processor in SMM.
|
---|
1858 | @param ShadowStackSize Shadow Stack size for each processor in SMM.
|
---|
1859 |
|
---|
1860 | **/
|
---|
1861 | UINT32
|
---|
1862 | InitializeMpServiceData (
|
---|
1863 | IN VOID *Stacks,
|
---|
1864 | IN UINTN StackSize,
|
---|
1865 | IN UINTN ShadowStackSize
|
---|
1866 | )
|
---|
1867 | {
|
---|
1868 | UINT32 Cr3;
|
---|
1869 | UINTN Index;
|
---|
1870 | UINT8 *GdtTssTables;
|
---|
1871 | UINTN GdtTableStepSize;
|
---|
1872 | CPUID_VERSION_INFO_EDX RegEdx;
|
---|
1873 |
|
---|
1874 | //
|
---|
1875 | // Determine if this CPU supports machine check
|
---|
1876 | //
|
---|
1877 | AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
|
---|
1878 | mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
|
---|
1879 |
|
---|
1880 | //
|
---|
1881 | // Allocate memory for all locks and semaphores
|
---|
1882 | //
|
---|
1883 | InitializeSmmCpuSemaphores ();
|
---|
1884 |
|
---|
1885 | //
|
---|
1886 | // Initialize mSmmMpSyncData
|
---|
1887 | //
|
---|
1888 | mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
|
---|
1889 | (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
|
---|
1890 | mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
|
---|
1891 | ASSERT (mSmmMpSyncData != NULL);
|
---|
1892 | mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
|
---|
1893 | InitializeMpSyncData ();
|
---|
1894 |
|
---|
1895 | //
|
---|
1896 | // Initialize physical address mask
|
---|
1897 | // NOTE: Physical memory above virtual address limit is not supported !!!
|
---|
1898 | //
|
---|
1899 | AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);
|
---|
1900 | gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;
|
---|
1901 | gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;
|
---|
1902 |
|
---|
1903 | //
|
---|
1904 | // Create page tables
|
---|
1905 | //
|
---|
1906 | Cr3 = SmmInitPageTable ();
|
---|
1907 |
|
---|
1908 | GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
|
---|
1909 |
|
---|
1910 | //
|
---|
1911 | // Install SMI handler for each CPU
|
---|
1912 | //
|
---|
1913 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
|
---|
1914 | InstallSmiHandler (
|
---|
1915 | Index,
|
---|
1916 | (UINT32)mCpuHotPlugData.SmBase[Index],
|
---|
1917 | (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
|
---|
1918 | StackSize,
|
---|
1919 | (UINTN)(GdtTssTables + GdtTableStepSize * Index),
|
---|
1920 | gcSmiGdtr.Limit + 1,
|
---|
1921 | gcSmiIdtr.Base,
|
---|
1922 | gcSmiIdtr.Limit + 1,
|
---|
1923 | Cr3
|
---|
1924 | );
|
---|
1925 | }
|
---|
1926 |
|
---|
1927 | //
|
---|
1928 | // Record current MTRR settings
|
---|
1929 | //
|
---|
1930 | ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
|
---|
1931 | MtrrGetAllMtrrs (&gSmiMtrrs);
|
---|
1932 |
|
---|
1933 | return Cr3;
|
---|
1934 | }
|
---|
1935 |
|
---|
1936 | /**
|
---|
1937 |
|
---|
1938 | Register the SMM Foundation entry point.
|
---|
1939 |
|
---|
1940 | @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
|
---|
1941 | @param SmmEntryPoint SMM Foundation EntryPoint
|
---|
1942 |
|
---|
1943 | @retval EFI_SUCCESS Successfully to register SMM foundation entry point
|
---|
1944 |
|
---|
1945 | **/
|
---|
1946 | EFI_STATUS
|
---|
1947 | EFIAPI
|
---|
1948 | RegisterSmmEntry (
|
---|
1949 | IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
|
---|
1950 | IN EFI_SMM_ENTRY_POINT SmmEntryPoint
|
---|
1951 | )
|
---|
1952 | {
|
---|
1953 | //
|
---|
1954 | // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
|
---|
1955 | //
|
---|
1956 | gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
|
---|
1957 | return EFI_SUCCESS;
|
---|
1958 | }
|
---|
1959 |
|
---|
1960 | /**
|
---|
1961 |
|
---|
1962 | Register the SMM Foundation entry point.
|
---|
1963 |
|
---|
1964 | @param[in] Procedure A pointer to the code stream to be run on the designated target AP
|
---|
1965 | of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
|
---|
1966 | with the related definitions of
|
---|
1967 | EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
|
---|
1968 | If caller may pass a value of NULL to deregister any existing
|
---|
1969 | startup procedure.
|
---|
1970 | @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
|
---|
1971 | run by the AP. It is an optional common mailbox between APs and
|
---|
1972 | the caller to share information
|
---|
1973 |
|
---|
1974 | @retval EFI_SUCCESS The Procedure has been set successfully.
|
---|
1975 | @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
|
---|
1976 |
|
---|
1977 | **/
|
---|
1978 | EFI_STATUS
|
---|
1979 | RegisterStartupProcedure (
|
---|
1980 | IN EFI_AP_PROCEDURE Procedure,
|
---|
1981 | IN OUT VOID *ProcedureArguments OPTIONAL
|
---|
1982 | )
|
---|
1983 | {
|
---|
1984 | if (Procedure == NULL && ProcedureArguments != NULL) {
|
---|
1985 | return EFI_INVALID_PARAMETER;
|
---|
1986 | }
|
---|
1987 | if (mSmmMpSyncData == NULL) {
|
---|
1988 | return EFI_NOT_READY;
|
---|
1989 | }
|
---|
1990 |
|
---|
1991 | mSmmMpSyncData->StartupProcedure = Procedure;
|
---|
1992 | mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
|
---|
1993 |
|
---|
1994 | return EFI_SUCCESS;
|
---|
1995 | }
|
---|