VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/CpuMpPei/CpuPaging.c@ 81196

Last change on this file since 81196 was 81196, checked in by vboxsync, 5 years ago

EFI/Firmware: Applied the SplitPage fix to the copy in CpuMpPei too as well as surrounding page entry updates that I could easily spot. bugref:4643

  • Property svn:eol-style set to native
File size: 18.4 KB
Line 
1/** @file
2 Basic paging support for the CPU to enable Stack Guard.
3
4Copyright (c) 2018 - 2019, Intel Corporation. All rights reserved.<BR>
5
6SPDX-License-Identifier: BSD-2-Clause-Patent
7
8**/
9
10#include <Register/Intel/Cpuid.h>
11#include <Register/Intel/Msr.h>
12#include <Library/MemoryAllocationLib.h>
13#include <Library/CpuLib.h>
14#include <Library/BaseLib.h>
15#ifdef VBOX
16# define IN_RING0
17# include <iprt/asm.h>
18#endif
19
20#include "CpuMpPei.h"
21
22#define IA32_PG_P BIT0
23#define IA32_PG_RW BIT1
24#define IA32_PG_U BIT2
25#define IA32_PG_A BIT5
26#define IA32_PG_D BIT6
27#define IA32_PG_PS BIT7
28#define IA32_PG_NX BIT63
29
30#define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)
31#define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\
32 PAGE_ATTRIBUTE_BITS)
33
34#define PAGING_PAE_INDEX_MASK 0x1FF
35#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
36#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
37#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
38#define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull
39
40typedef enum {
41 PageNone = 0,
42 PageMin = 1,
43 Page4K = PageMin,
44 Page2M = 2,
45 Page1G = 3,
46 Page512G = 4,
47 PageMax = Page512G
48} PAGE_ATTRIBUTE;
49
50typedef struct {
51 PAGE_ATTRIBUTE Attribute;
52 UINT64 Length;
53 UINT64 AddressMask;
54 UINTN AddressBitOffset;
55 UINTN AddressBitLength;
56} PAGE_ATTRIBUTE_TABLE;
57
58PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
59 {PageNone, 0, 0, 0, 0},
60 {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64, 12, 9},
61 {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64, 21, 9},
62 {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64, 30, 9},
63 {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},
64};
65
66EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList[] = {
67 {
68 (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
69 &gEfiPeiMemoryDiscoveredPpiGuid,
70 MemoryDiscoveredPpiNotifyCallback
71 }
72};
73
74#ifdef VBOX
75/**
76 Safe page table entry write function, make 104% sure the compiler won't
77 split up the access (fatal if modifying entries for current code or data).
78
79 @param[in] PageEntry The page table entry to modify.*
80 @param[in] CurrentPageEntry The old page table value (for cmpxchg8b).
81 @param[in] NewPageEntry What to write.
82**/
83static VOID SafePageTableEntryWrite64 (UINT64 volatile *PageEntry, UINT64 CurrentPageEntry, UINT64 NewPageEntry)
84{
85# ifdef VBOX
86 ASMAtomicWriteU64(PageEntry, NewPageEntry); RT_NOREF(CurrentPageEntry);
87# else
88 for (;;) {
89 UINT64 CurValue = InterlockedCompareExchange64(PageEntry, CurrentPageEntry, NewPageEntry);
90 if (CurValue == CurrentPageEntry)
91 return;
92 CurrentPageEntry = CurValue;
93 }
94# endif
95}
96#endif
97
98/**
99 The function will check if IA32 PAE is supported.
100
101 @retval TRUE IA32 PAE is supported.
102 @retval FALSE IA32 PAE is not supported.
103
104**/
105BOOLEAN
106IsIa32PaeSupported (
107 VOID
108 )
109{
110 UINT32 RegEax;
111 CPUID_VERSION_INFO_EDX RegEdx;
112
113 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
114 if (RegEax >= CPUID_VERSION_INFO) {
115 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
116 if (RegEdx.Bits.PAE != 0) {
117 return TRUE;
118 }
119 }
120
121 return FALSE;
122}
123
124/**
125 This API provides a way to allocate memory for page table.
126
127 @param Pages The number of 4 KB pages to allocate.
128
129 @return A pointer to the allocated buffer or NULL if allocation fails.
130
131**/
132VOID *
133AllocatePageTableMemory (
134 IN UINTN Pages
135 )
136{
137 VOID *Address;
138
139 Address = AllocatePages(Pages);
140 if (Address != NULL) {
141 ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));
142 }
143
144 return Address;
145}
146
147/**
148 Get the address width supported by current processor.
149
150 @retval 32 If processor is in 32-bit mode.
151 @retval 36-48 If processor is in 64-bit mode.
152
153**/
154UINTN
155GetPhysicalAddressWidth (
156 VOID
157 )
158{
159 UINT32 RegEax;
160
161 if (sizeof(UINTN) == 4) {
162 return 32;
163 }
164
165 AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
166 if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {
167 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);
168 RegEax &= 0xFF;
169 if (RegEax > 48) {
170 return 48;
171 }
172
173 return (UINTN)RegEax;
174 }
175
176 return 36;
177}
178
179/**
180 Get the type of top level page table.
181
182 @retval Page512G PML4 paging.
183 @retval Page1G PAE paing.
184
185**/
186PAGE_ATTRIBUTE
187GetPageTableTopLevelType (
188 VOID
189 )
190{
191 MSR_IA32_EFER_REGISTER MsrEfer;
192
193 MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);
194
195 return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;
196}
197
198/**
199 Return page table entry matching the address.
200
201 @param[in] Address The address to be checked.
202 @param[out] PageAttributes The page attribute of the page entry.
203
204 @return The page entry.
205**/
206VOID *
207GetPageTableEntry (
208 IN PHYSICAL_ADDRESS Address,
209 OUT PAGE_ATTRIBUTE *PageAttribute
210 )
211{
212 INTN Level;
213 UINTN Index;
214 UINT64 *PageTable;
215 UINT64 AddressEncMask;
216
217 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
218 PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);
219 for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {
220 Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);
221 Index &= PAGING_PAE_INDEX_MASK;
222
223 //
224 // No mapping?
225 //
226 if (PageTable[Index] == 0) {
227 *PageAttribute = PageNone;
228 return NULL;
229 }
230
231 //
232 // Page memory?
233 //
234 if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {
235 *PageAttribute = (PAGE_ATTRIBUTE)Level;
236 return &PageTable[Index];
237 }
238
239 //
240 // Page directory or table
241 //
242 PageTable = (UINT64 *)(UINTN)(PageTable[Index] &
243 ~AddressEncMask &
244 PAGING_4K_ADDRESS_MASK_64);
245 }
246
247 *PageAttribute = PageNone;
248 return NULL;
249}
250
251/**
252 This function splits one page entry to smaller page entries.
253
254 @param[in] PageEntry The page entry to be splitted.
255 @param[in] PageAttribute The page attribute of the page entry.
256 @param[in] SplitAttribute How to split the page entry.
257 @param[in] Recursively Do the split recursively or not.
258
259 @retval RETURN_SUCCESS The page entry is splitted.
260 @retval RETURN_INVALID_PARAMETER If target page attribute is invalid
261 @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
262**/
263RETURN_STATUS
264SplitPage (
265#ifdef VBOX
266 IN UINT64 volatile *PageEntry,
267#else
268 IN UINT64 *PageEntry,
269#endif
270 IN PAGE_ATTRIBUTE PageAttribute,
271 IN PAGE_ATTRIBUTE SplitAttribute,
272 IN BOOLEAN Recursively
273 )
274{
275#ifdef VBOX
276 UINT64 CurrentPageEntry;
277#endif
278 UINT64 BaseAddress;
279 UINT64 *NewPageEntry;
280 UINTN Index;
281 UINT64 AddressEncMask;
282 PAGE_ATTRIBUTE SplitTo;
283
284 if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {
285 ASSERT (SplitAttribute != PageNone);
286 ASSERT (SplitAttribute < PageAttribute);
287 return RETURN_INVALID_PARAMETER;
288 }
289
290 NewPageEntry = AllocatePageTableMemory (1);
291 if (NewPageEntry == NULL) {
292 ASSERT (NewPageEntry != NULL);
293 return RETURN_OUT_OF_RESOURCES;
294 }
295
296 //
297 // One level down each step to achieve more compact page table.
298 //
299 SplitTo = PageAttribute - 1;
300 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
301 mPageAttributeTable[SplitTo].AddressMask;
302#ifdef VBOX
303 CurrentPageEntry = *PageEntry;
304 BaseAddress = CurrentPageEntry &
305#else
306 BaseAddress = *PageEntry &
307#endif
308 ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
309 mPageAttributeTable[PageAttribute].AddressMask;
310 for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
311 NewPageEntry[Index] = BaseAddress | AddressEncMask |
312#ifdef VBOX
313 (CurrentPageEntry & PAGE_PROGATE_BITS);
314#else
315 ((*PageEntry) & PAGE_PROGATE_BITS);
316#endif
317
318 if (SplitTo != PageMin) {
319 NewPageEntry[Index] |= IA32_PG_PS;
320 }
321
322 if (Recursively && SplitTo > SplitAttribute) {
323 SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);
324 }
325
326 BaseAddress += mPageAttributeTable[SplitTo].Length;
327 }
328
329#ifdef VBOX
330 SafePageTableEntryWrite64 (PageEntry, CurrentPageEntry,
331 (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS);
332#else
333 (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;
334#endif
335
336 return RETURN_SUCCESS;
337}
338
339/**
340 This function modifies the page attributes for the memory region specified
341 by BaseAddress and Length from their current attributes to the attributes
342 specified by Attributes.
343
344 Caller should make sure BaseAddress and Length is at page boundary.
345
346 @param[in] BaseAddress Start address of a memory region.
347 @param[in] Length Size in bytes of the memory region.
348 @param[in] Attributes Bit mask of attributes to modify.
349
350 @retval RETURN_SUCCESS The attributes were modified for the memory
351 region.
352 @retval RETURN_INVALID_PARAMETER Length is zero; or,
353 Attributes specified an illegal combination
354 of attributes that cannot be set together; or
355 Addressis not 4KB aligned.
356 @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify
357 the attributes.
358 @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.
359
360**/
361RETURN_STATUS
362EFIAPI
363ConvertMemoryPageAttributes (
364 IN PHYSICAL_ADDRESS BaseAddress,
365 IN UINT64 Length,
366 IN UINT64 Attributes
367 )
368{
369#ifdef VBOX
370 UINT64 volatile *PageEntry;
371 UINT64 CurrentPageEntry;
372#else
373 UINT64 *PageEntry;
374#endif
375 PAGE_ATTRIBUTE PageAttribute;
376 RETURN_STATUS Status;
377 EFI_PHYSICAL_ADDRESS MaximumAddress;
378
379 if (Length == 0 ||
380 (BaseAddress & (SIZE_4KB - 1)) != 0 ||
381 (Length & (SIZE_4KB - 1)) != 0) {
382
383 ASSERT (Length > 0);
384 ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);
385 ASSERT ((Length & (SIZE_4KB - 1)) == 0);
386
387 return RETURN_INVALID_PARAMETER;
388 }
389
390 MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;
391 if (BaseAddress > MaximumAddress ||
392 Length > MaximumAddress ||
393 (BaseAddress > MaximumAddress - (Length - 1))) {
394 return RETURN_UNSUPPORTED;
395 }
396
397 //
398 // Below logic is to check 2M/4K page to make sure we do not waste memory.
399 //
400 while (Length != 0) {
401 PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);
402 if (PageEntry == NULL) {
403 return RETURN_UNSUPPORTED;
404 }
405
406 if (PageAttribute != Page4K) {
407 Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);
408 if (RETURN_ERROR (Status)) {
409 return Status;
410 }
411 //
412 // Do it again until the page is 4K.
413 //
414 continue;
415 }
416
417 //
418 // Just take care of 'present' bit for Stack Guard.
419 //
420#ifdef VBOX
421 CurrentPageEntry = *PageEntry;
422 if ((CurrentPageEntry & IA32_PG_P) != (Attributes & IA32_PG_P))
423 SafePageTableEntryWrite64 (PageEntry, CurrentPageEntry,
424 (CurrentPageEntry & ~(UINT64)IA32_PG_P) | (Attributes & IA32_PG_P));
425#else
426 if ((Attributes & IA32_PG_P) != 0) {
427 *PageEntry |= (UINT64)IA32_PG_P;
428 } else {
429 *PageEntry &= ~((UINT64)IA32_PG_P);
430 }
431#endif
432
433 //
434 // Convert success, move to next
435 //
436 BaseAddress += SIZE_4KB;
437 Length -= SIZE_4KB;
438 }
439
440 return RETURN_SUCCESS;
441}
442
443/**
444 Get maximum size of page memory supported by current processor.
445
446 @param[in] TopLevelType The type of top level page entry.
447
448 @retval Page1G If processor supports 1G page and PML4.
449 @retval Page2M For all other situations.
450
451**/
452PAGE_ATTRIBUTE
453GetMaxMemoryPage (
454 IN PAGE_ATTRIBUTE TopLevelType
455 )
456{
457 UINT32 RegEax;
458 UINT32 RegEdx;
459
460 if (TopLevelType == Page512G) {
461 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
462 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
463 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
464 if ((RegEdx & BIT26) != 0) {
465 return Page1G;
466 }
467 }
468 }
469
470 return Page2M;
471}
472
473/**
474 Create PML4 or PAE page table.
475
476 @return The address of page table.
477
478**/
479UINTN
480CreatePageTable (
481 VOID
482 )
483{
484 RETURN_STATUS Status;
485 UINTN PhysicalAddressBits;
486 UINTN NumberOfEntries;
487 PAGE_ATTRIBUTE TopLevelPageAttr;
488 UINTN PageTable;
489 PAGE_ATTRIBUTE MaxMemoryPage;
490 UINTN Index;
491 UINT64 AddressEncMask;
492 UINT64 *PageEntry;
493 EFI_PHYSICAL_ADDRESS PhysicalAddress;
494
495 TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();
496 PhysicalAddressBits = GetPhysicalAddressWidth ();
497 NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -
498 mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);
499
500 PageTable = (UINTN) AllocatePageTableMemory (1);
501 if (PageTable == 0) {
502 return 0;
503 }
504
505 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
506 AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;
507 MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);
508 PageEntry = (UINT64 *)PageTable;
509
510 PhysicalAddress = 0;
511 for (Index = 0; Index < NumberOfEntries; ++Index) {
512 *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;
513
514 //
515 // Split the top page table down to the maximum page size supported
516 //
517 if (MaxMemoryPage < TopLevelPageAttr) {
518 Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);
519 ASSERT_EFI_ERROR (Status);
520 }
521
522 if (TopLevelPageAttr == Page1G) {
523 //
524 // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.
525 //
526 *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);
527 }
528
529 PageEntry += 1;
530 PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;
531 }
532
533
534 return PageTable;
535}
536
537/**
538 Setup page tables and make them work.
539
540**/
541VOID
542EnablePaging (
543 VOID
544 )
545{
546 UINTN PageTable;
547
548 PageTable = CreatePageTable ();
549 ASSERT (PageTable != 0);
550 if (PageTable != 0) {
551 AsmWriteCr3(PageTable);
552 AsmWriteCr4 (AsmReadCr4 () | BIT5); // CR4.PAE
553 AsmWriteCr0 (AsmReadCr0 () | BIT31); // CR0.PG
554 }
555}
556
557/**
558 Get the base address of current AP's stack.
559
560 This function is called in AP's context and assumes that whole calling stacks
561 (till this function) consumed by AP's wakeup procedure will not exceed 4KB.
562
563 PcdCpuApStackSize must be configured with value taking the Guard page into
564 account.
565
566 @param[in,out] Buffer The pointer to private data buffer.
567
568**/
569VOID
570EFIAPI
571GetStackBase (
572 IN OUT VOID *Buffer
573 )
574{
575 EFI_PHYSICAL_ADDRESS StackBase;
576
577 StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;
578 StackBase += BASE_4KB;
579 StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);
580 StackBase -= PcdGet32(PcdCpuApStackSize);
581
582 *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;
583}
584
585/**
586 Setup stack Guard page at the stack base of each processor. BSP and APs have
587 different way to get stack base address.
588
589**/
590VOID
591SetupStackGuardPage (
592 VOID
593 )
594{
595 EFI_PEI_HOB_POINTERS Hob;
596 EFI_PHYSICAL_ADDRESS StackBase;
597 UINTN NumberOfProcessors;
598 UINTN Bsp;
599 UINTN Index;
600
601 //
602 // One extra page at the bottom of the stack is needed for Guard page.
603 //
604 if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {
605 DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));
606 ASSERT (FALSE);
607 }
608
609 MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);
610 MpInitLibWhoAmI (&Bsp);
611 for (Index = 0; Index < NumberOfProcessors; ++Index) {
612 StackBase = 0;
613
614 if (Index == Bsp) {
615 Hob.Raw = GetHobList ();
616 while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {
617 if (CompareGuid (&gEfiHobMemoryAllocStackGuid,
618 &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {
619 StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;
620 break;
621 }
622 Hob.Raw = GET_NEXT_HOB (Hob);
623 }
624 } else {
625 //
626 // Ask AP to return is stack base address.
627 //
628 MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);
629 }
630 ASSERT (StackBase != 0);
631 //
632 // Set Guard page at stack base address.
633 //
634 ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);
635 DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",
636 (UINT64)StackBase, (UINT64)Index));
637 }
638
639 //
640 // Publish the changes of page table.
641 //
642 CpuFlushTlb ();
643}
644
645/**
646 Enabl/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.
647
648 Doing this in the memory-discovered callback is to make sure the Stack Guard
649 feature to cover as most PEI code as possible.
650
651 @param[in] PeiServices General purpose services available to every PEIM.
652 @param[in] NotifyDescriptor The notification structure this PEIM registered on install.
653 @param[in] Ppi The memory discovered PPI. Not used.
654
655 @retval EFI_SUCCESS The function completed successfully.
656 @retval others There's error in MP initialization.
657**/
658EFI_STATUS
659EFIAPI
660MemoryDiscoveredPpiNotifyCallback (
661 IN EFI_PEI_SERVICES **PeiServices,
662 IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDescriptor,
663 IN VOID *Ppi
664 )
665{
666 EFI_STATUS Status;
667 BOOLEAN InitStackGuard;
668
669 //
670 // Paging must be setup first. Otherwise the exception TSS setup during MP
671 // initialization later will not contain paging information and then fail
672 // the task switch (for the sake of stack switch).
673 //
674 InitStackGuard = FALSE;
675 if (IsIa32PaeSupported () && PcdGetBool (PcdCpuStackGuard)) {
676 EnablePaging ();
677 InitStackGuard = TRUE;
678 }
679
680 Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);
681 ASSERT_EFI_ERROR (Status);
682
683 if (InitStackGuard) {
684 SetupStackGuardPage ();
685 }
686
687 return Status;
688}
689
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette