VirtualBox

source: vbox/trunk/src/VBox/ValidationKit/bootsectors/bs3kit/bs3-cmn-PagingProtect.c@ 77922

Last change on this file since 77922 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.8 KB
Line 
1/* $Id: bs3-cmn-PagingProtect.c 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * BS3Kit - Bs3PagingProtect
4 */
5
6/*
7 * Copyright (C) 2007-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "bs3kit-template-header.h"
32#include "bs3-cmn-paging.h"
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/param.h>
35
36
37/*********************************************************************************************************************************
38* Defined Constants And Macros *
39*********************************************************************************************************************************/
40#if 0
41# define BS3PAGING_DPRINTF1(a) Bs3TestPrintf a
42#else
43# define BS3PAGING_DPRINTF1(a) do { } while (0)
44#endif
45#if 0
46# define BS3PAGING_DPRINTF2(a) Bs3TestPrintf a
47#else
48# define BS3PAGING_DPRINTF2(a) do { } while (0)
49#endif
50
51
52static void *bs3PagingBuildPaeTable(uint64_t uTmpl, uint64_t cbIncrement, BS3MEMKIND enmKind, int *prc)
53{
54 uint64_t BS3_FAR *pau64 = (uint64_t BS3_FAR *)Bs3MemAlloc(enmKind, _4K);
55 if (pau64)
56 {
57 unsigned i;
58 for (i = 0; i < _4K / sizeof(uint64_t); i++, uTmpl += cbIncrement)
59 pau64[i] = uTmpl;
60 }
61 else
62 *prc = VERR_NO_MEMORY;
63 return pau64;
64}
65
66
67#undef bs3PagingGetLegacyPte
68BS3_CMN_DEF(X86PTE BS3_FAR *, bs3PagingGetLegacyPte,(RTCCUINTXREG cr3, uint32_t uFlat, bool fUseInvlPg, int *prc))
69{
70 X86PTE BS3_FAR *pPTE = NULL;
71#if TMPL_BITS == 16
72 uint32_t const uMaxAddr = BS3_MODE_IS_RM_OR_V86(g_bBs3CurrentMode) ? _1M - 1 : BS3_SEL_TILED_AREA_SIZE - 1;
73#else
74 uint32_t const uMaxAddr = UINT32_MAX;
75#endif
76 BS3PAGING_DPRINTF2(("bs3PagingGetLegacyPte: cr3=%RX32 uFlat=%RX32 uMaxAddr=%RX32\n", (uint32_t)cr3, uFlat, uMaxAddr));
77
78 *prc = VERR_OUT_OF_RANGE;
79 if (cr3 <= uMaxAddr)
80 {
81 unsigned const iPde = (uFlat >> X86_PD_SHIFT) & X86_PD_MASK;
82 PX86PD const pPD = (PX86PD)Bs3XptrFlatToCurrent(cr3 & X86_CR3_PAGE_MASK);
83
84 BS3_ASSERT(pPD->a[iPde].b.u1Present);
85 if (pPD->a[iPde].b.u1Present)
86 {
87 unsigned const iPte = (uFlat >> X86_PT_SHIFT) & X86_PT_MASK;
88
89 BS3_ASSERT(pPD->a[iPde].b.u1Present);
90 BS3PAGING_DPRINTF2(("bs3PagingGetLegacyPte: pPD=%p iPde=%#x: %#RX32\n", pPD, iPde, pPD->a[iPde]));
91 if (pPD->a[iPde].b.u1Present)
92 {
93 if (!pPD->a[iPde].b.u1Size)
94 {
95 if (pPD->a[iPde].u <= uMaxAddr)
96 pPTE = &((X86PT BS3_FAR *)Bs3XptrFlatToCurrent(pPD->a[iPde].u & ~(uint32_t)PAGE_OFFSET_MASK))->a[iPte];
97 else
98 BS3PAGING_DPRINTF1(("bs3PagingGetLegacyPte: out of range! iPde=%#x: %#x\n", iPde, pPD->a[iPde].u));
99 }
100 else
101 {
102 X86PT BS3_FAR *pPT;
103 uint32_t uPte = (pPD->a[iPde].u & ~(uint32_t)(X86_PDE4M_PS | X86_PDE4M_G | X86_PDE4M_PG_HIGH_MASK)) \
104 | X86_PTE_D;
105 if (pPD->a[iPde].b.u1Global)
106 uPte |= X86_PTE_G;
107 if (pPD->a[iPde].b.u1PAT)
108 uPte |= X86_PTE_PAT;
109
110 pPT = (X86PT BS3_FAR *)bs3PagingBuildPaeTable(RT_MAKE_U64(uPte, uPte | PAGE_SIZE),
111 RT_MAKE_U64(PAGE_SIZE*2, PAGE_SIZE*2),
112 uMaxAddr > _1M ? BS3MEMKIND_TILED : BS3MEMKIND_REAL, prc);
113
114 BS3PAGING_DPRINTF2(("bs3PagingGetLegacyPte: Built pPT=%p uPte=%RX32\n", pPT, uPte));
115 if (pPT)
116 {
117 ASMAtomicUoWriteU32(&pPD->a[iPde].u,
118 Bs3SelPtrToFlat(pPT)
119 | ( pPD->a[iPde].u
120 & ~(uint32_t)(X86_PTE_PG_MASK | X86_PDE4M_PS | X86_PDE4M_G | X86_PDE4M_D)));
121 BS3PAGING_DPRINTF2(("bs3PagingGetLegacyPte: iPde=%#x: %#RX32\n", iPde, pPD->a[iPde].u));
122 if (fUseInvlPg)
123 ASMInvalidatePage(uFlat);
124 pPTE = &pPT->a[iPte];
125 }
126 }
127 }
128 }
129 }
130 else
131 BS3PAGING_DPRINTF1(("bs3PagingGetLegacyPte: out of range! cr3=%#x\n", cr3));
132 return pPTE;
133}
134
135
136/**
137 * Get the PTE for an address, given a PAE or long mode CR3.
138 *
139 * @returns Pointer to the PTE on success, NULL on failure.
140 * @param cr3 The CR3.
141 * @param bMode Indicates whether it's PAE or long mode.
142 * @param uFlat The address for which we want the PTE.
143 * @param fUseInvlPg Whether we can use invalidate page when
144 * replacing large pages.
145 * @param prc Updated only on failure.
146 */
147#undef bs3PagingGetPaePte
148BS3_CMN_DEF(X86PTEPAE BS3_FAR *, bs3PagingGetPaePte,(RTCCUINTXREG cr3, uint8_t bMode, uint64_t uFlat, bool fUseInvlPg, int *prc))
149{
150 X86PTEPAE BS3_FAR *pPTE = NULL;
151#if TMPL_BITS == 16
152 uint32_t const uMaxAddr = BS3_MODE_IS_RM_OR_V86(g_bBs3CurrentMode) ? _1M - 1 : BS3_SEL_TILED_AREA_SIZE - 1;
153#else
154 uintptr_t const uMaxAddr = ~(uintptr_t)0;
155#endif
156
157 *prc = VERR_OUT_OF_RANGE;
158 if ((cr3 & X86_CR3_AMD64_PAGE_MASK) <= uMaxAddr)
159 {
160 X86PDPAE BS3_FAR *pPD;
161 if (BS3_MODE_IS_64BIT_SYS(bMode))
162 {
163 unsigned const iPml4e = (uFlat >> X86_PML4_SHIFT) & X86_PML4_MASK;
164 X86PML4 BS3_FAR *pPml4 = (X86PML4 BS3_FAR *)Bs3XptrFlatToCurrent(cr3 & X86_CR3_AMD64_PAGE_MASK);
165 BS3_ASSERT(pPml4->a[iPml4e].n.u1Present);
166 if ((pPml4->a[iPml4e].u & X86_PML4E_PG_MASK) <= uMaxAddr)
167 {
168 unsigned const iPdpte = (uFlat >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
169 X86PDPT BS3_FAR *pPdpt = (X86PDPT BS3_FAR *)Bs3XptrFlatToCurrent(pPml4->a[iPml4e].u & X86_PML4E_PG_MASK);
170 BS3_ASSERT(pPdpt->a[iPdpte].n.u1Present);
171 if (!pPdpt->a[iPdpte].b.u1Size)
172 {
173 if ((pPdpt->a[iPdpte].u & X86_PDPE_PG_MASK) <= uMaxAddr)
174 pPD = (X86PDPAE BS3_FAR *)Bs3XptrFlatToCurrent(pPdpt->a[iPdpte].u & ~(uint64_t)PAGE_OFFSET_MASK);
175 else
176 BS3PAGING_DPRINTF1(("bs3PagingGetPaePte: out of range! iPdpte=%#x: %RX64 max=%RX32\n",
177 iPdpte, pPdpt->a[iPdpte].u, (uint32_t)uMaxAddr));
178 }
179 else
180 {
181 /* Split 1GB page. */
182 pPD = (X86PDPAE BS3_FAR *)bs3PagingBuildPaeTable(pPdpt->a[iPdpte].u, _2M,
183 uMaxAddr > _1M ? BS3MEMKIND_TILED : BS3MEMKIND_REAL, prc);
184 if (pPD)
185 {
186 ASMAtomicUoWriteU64(&pPdpt->a[iPdpte].u,
187 Bs3SelPtrToFlat(pPD)
188 | ( pPdpt->a[iPdpte].u
189 & ~(uint64_t)(X86_PDPE_PG_MASK | X86_PDE4M_PS | X86_PDE4M_G | X86_PDE4M_D)));
190 if (fUseInvlPg)
191 ASMInvalidatePage(uFlat);
192 }
193 }
194 }
195 }
196 //else if (uFlat <= UINT32_MAX) - fixme!
197 else if (!(uFlat >> 32))
198 {
199 unsigned const iPdpte = ((uint32_t)uFlat >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
200 X86PDPT BS3_FAR *pPdpt = (X86PDPT BS3_FAR *)Bs3XptrFlatToCurrent(cr3 & X86_CR3_PAE_PAGE_MASK);
201 BS3_ASSERT(pPdpt->a[iPdpte].n.u1Present);
202 if ((pPdpt->a[iPdpte].u & X86_PDPE_PG_MASK) <= uMaxAddr)
203 pPD = (X86PDPAE BS3_FAR *)Bs3XptrFlatToCurrent(pPdpt->a[iPdpte].u & X86_PDPE_PG_MASK);
204 else
205 BS3PAGING_DPRINTF1(("bs3PagingGetPaePte: out of range! iPdpte=%#x: %RX64 max=%RX32\n",
206 iPdpte, pPdpt->a[iPdpte].u, (uint32_t)uMaxAddr));
207 }
208 else
209 {
210 pPD = NULL;
211 BS3PAGING_DPRINTF1(("bs3PagingGetPaePte: out of range! uFlat=%#RX64 max=%RX32\n", uFlat, (uint32_t)uMaxAddr));
212 }
213 if (pPD)
214 {
215 unsigned const iPte = (uFlat >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
216 unsigned const iPde = (uFlat >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
217 if (!pPD->a[iPde].b.u1Size)
218 {
219 if ((pPD->a[iPde].u & X86_PDE_PAE_PG_MASK) <= uMaxAddr)
220 pPTE = &((X86PTPAE BS3_FAR *)Bs3XptrFlatToCurrent(pPD->a[iPde].u & ~(uint64_t)PAGE_OFFSET_MASK))->a[iPte];
221 else
222 BS3PAGING_DPRINTF1(("bs3PagingGetPaePte: out of range! iPde=%#x: %RX64 max=%RX32\n",
223 iPde, pPD->a[iPde].u, (uint32_t)uMaxAddr));
224 }
225 else
226 {
227 /* Split 2MB page. */
228 X86PTPAE BS3_FAR *pPT;
229 uint64_t uTmpl = pPD->a[iPde].u & ~(uint64_t)(X86_PDE4M_G | X86_PDE4M_PS | X86_PDE4M_PAT);
230 if (!pPD->a[iPde].b.u1Global)
231 uTmpl |= X86_PTE_G;
232 if (!pPD->a[iPde].b.u1PAT)
233 uTmpl |= X86_PTE_PAT;
234
235 pPT = (X86PTPAE BS3_FAR *)bs3PagingBuildPaeTable(uTmpl, PAGE_SIZE,
236 uMaxAddr > _1M ? BS3MEMKIND_TILED : BS3MEMKIND_REAL, prc);
237 if (pPT)
238 {
239 ASMAtomicUoWriteU64(&pPD->a[iPde].u,
240 Bs3SelPtrToFlat(pPT)
241 | ( pPD->a[iPde].u
242 & ~(uint64_t)(X86_PTE_PAE_PG_MASK | X86_PDE4M_PS | X86_PDE4M_G | X86_PDE4M_D)));
243 if (fUseInvlPg)
244 ASMInvalidatePage(uFlat);
245 pPTE = &pPT->a[iPte];
246 }
247 }
248 }
249 }
250 else
251 BS3PAGING_DPRINTF1(("bs3PagingGetPaePte: out of range! cr3=%#RX32 uMaxAddr=%#RX32\n", (uint32_t)cr3, (uint32_t)uMaxAddr));
252 return pPTE;
253}
254
255
256#undef Bs3PagingProtect
257BS3_CMN_DEF(int, Bs3PagingProtect,(uint64_t uFlat, uint64_t cb, uint64_t fSet, uint64_t fClear))
258{
259#if ARCH_BITS == 16
260 if (!BS3_MODE_IS_V86(g_bBs3CurrentMode))
261#endif
262 {
263 RTCCUINTXREG const cr3 = ASMGetCR3();
264 RTCCUINTXREG const cr4 = g_uBs3CpuDetected & BS3CPU_F_CPUID ? ASMGetCR4() : 0;
265 bool const fLegacyPTs = !(cr4 & X86_CR4_PAE);
266 bool const fUseInvlPg = (g_uBs3CpuDetected & BS3CPU_TYPE_MASK) >= BS3CPU_80486
267 && ( cb < UINT64_C(16)*PAGE_SIZE
268 || (cr4 & X86_CR4_PGE));
269 unsigned cEntries;
270 int rc;
271
272 /*
273 * Adjust the range parameters.
274 */
275 cb += uFlat & PAGE_OFFSET_MASK;
276 cb = RT_ALIGN_64(cb, PAGE_SIZE);
277 uFlat &= ~(uint64_t)PAGE_OFFSET_MASK;
278
279 fSet &= ~X86_PTE_PAE_PG_MASK;
280 fClear &= ~X86_PTE_PAE_PG_MASK;
281
282 BS3PAGING_DPRINTF1(("Bs3PagingProtect: uFlat=%RX64 cb=%RX64 fSet=%RX64 fClear=%RX64 %s %s\n", uFlat, cb, fSet, fClear,
283 fLegacyPTs ? "legacy" : "pae/amd64", fUseInvlPg ? "invlpg" : "reload-cr3"));
284 if (fLegacyPTs)
285 {
286 /*
287 * Legacy page tables.
288 */
289 while ((uint32_t)cb > 0)
290 {
291 PX86PTE pPte = BS3_CMN_FAR_NM(bs3PagingGetLegacyPte)(cr3, (uint32_t)uFlat, fUseInvlPg, &rc);
292 if (!pPte)
293 return rc;
294
295 cEntries = X86_PG_ENTRIES - ((uFlat >> X86_PT_SHIFT) & X86_PT_MASK);
296 while (cEntries-- > 0 && cb > 0)
297 {
298 pPte->u &= ~(uint32_t)fClear;
299 pPte->u |= (uint32_t)fSet;
300 if (fUseInvlPg)
301 ASMInvalidatePage(uFlat);
302
303 pPte++;
304 uFlat += PAGE_SIZE;
305 cb -= PAGE_SIZE;
306 }
307 }
308 }
309 else
310 {
311 /*
312 * Long mode or PAE page tables (at this level they are the same).
313 */
314 while (cb > 0)
315 {
316 PX86PTEPAE pPte = BS3_CMN_FAR_NM(bs3PagingGetPaePte)(cr3, g_bBs3CurrentMode, uFlat, fUseInvlPg, &rc);
317 if (!pPte)
318 return rc;
319
320 cEntries = X86_PG_ENTRIES - ((uFlat >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK);
321 while (cEntries-- > 0 && cb > 0)
322 {
323 pPte->u &= ~fClear;
324 pPte->u |= fSet;
325 if (fUseInvlPg)
326 ASMInvalidatePage(uFlat);
327
328 pPte++;
329 uFlat += PAGE_SIZE;
330 cb -= PAGE_SIZE;
331 }
332 }
333 }
334
335 /*
336 * Flush the TLB if we didn't use INVLPG above.
337 */
338 BS3PAGING_DPRINTF2(("Bs3PagingProtect: reloading cr3=%RX32\n", (uint32_t)cr3));
339 //if (!fUseInvlPg)
340 ASMSetCR3(cr3);
341 BS3PAGING_DPRINTF2(("Bs3PagingProtect: reloaded cr3=%RX32\n", (uint32_t)cr3));
342 }
343#if ARCH_BITS == 16
344 /*
345 * We can do this stuff in v8086 mode.
346 */
347 else
348 return Bs3SwitchFromV86To16BitAndCallC((FPFNBS3FAR)Bs3PagingProtect_f16, sizeof(uint64_t) * 4, uFlat, cb, fSet, fClear);
349#endif
350 return VINF_SUCCESS;
351}
352
353
354#undef Bs3PagingProtectPtr
355BS3_CMN_DEF(int, Bs3PagingProtectPtr,(void *pv, size_t cb, uint64_t fSet, uint64_t fClear))
356{
357#if ARCH_BITS == 16
358 return BS3_CMN_NM(Bs3PagingProtect)(Bs3SelPtrToFlat(pv), cb, fSet, fClear);
359#else
360 return BS3_CMN_NM(Bs3PagingProtect)((uintptr_t)pv, cb, fSet, fClear);
361#endif
362}
363
364
365#undef Bs3PagingGetPte
366BS3_CMN_DEF(void BS3_FAR *, Bs3PagingGetPte,(uint64_t uFlat, int *prc))
367{
368 RTCCUINTXREG const cr3 = ASMGetCR3();
369 RTCCUINTXREG const cr4 = g_uBs3CpuDetected & BS3CPU_F_CPUID ? ASMGetCR4() : 0;
370 bool const fLegacyPTs = !(cr4 & X86_CR4_PAE);
371 bool const fUseInvlPg = (g_uBs3CpuDetected & BS3CPU_TYPE_MASK) >= BS3CPU_80486;
372 int rc;
373 if (!prc)
374 prc = &rc;
375 if (!fLegacyPTs)
376 return BS3_CMN_FAR_NM(bs3PagingGetPaePte)(cr3, g_bBs3CurrentMode, uFlat, fUseInvlPg, prc);
377 if (uFlat < _4G)
378 return BS3_CMN_FAR_NM(bs3PagingGetLegacyPte)(cr3, (uint32_t)uFlat, fUseInvlPg, prc);
379 *prc = VERR_OUT_OF_RANGE;
380 return NULL;
381}
382
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette