VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst-armv8.cpp.h@ 108856

Last change on this file since 108856 was 108856, checked in by vboxsync, 5 weeks ago

VMM/PGM: Move some code for the ARMv8 page table walking around in order to be able to create standalone testcase, bugref:10388

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.6 KB
Line 
1/* $Id: PGMAllGst-armv8.cpp.h 108856 2025-04-04 17:36:38Z vboxsync $ */
2/** @file
3 * PGM - Page Manager, ARMv8 Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*
30 *
31 * Mode criteria:
32 * - MMU enabled/disabled.
33 * - TCR_EL1.TG0 (granule size for TTBR0_EL1).
34 * - TCR_EL1.TG1 (granule size for TTBR1_EL1).
35 * - TCR_EL1.T0SZ (address space size for TTBR0_EL1).
36 * - TCR_EL1.T1SZ (address space size for TTBR1_EL1).
37 * - TCR_EL1.IPS (intermediate physical address size).
38 * - TCR_EL1.TBI0 (ignore top address byte for TTBR0_EL1).
39 * - TCR_EL1.TBI1 (ignore top address byte for TTBR1_EL1).
40 * - TCR_EL1.HPD0 (hierarchical permisson disables for TTBR0_EL1).
41 * - TCR_EL1.HPD1 (hierarchical permisson disables for TTBR1_EL1).
42 * - More ?
43 *
44 * Other relevant modifiers:
45 * - TCR_EL1.HA - hardware access bit.
46 * - TCR_EL1.HD - hardware dirty bit.
47 * - ++
48 *
49 * Each privilege EL (1,2,3) has their own TCR_ELx and TTBR[01]_ELx registers,
50 * so they should all have their own separate modes. To make it simpler,
51 * why not do a separate mode for TTBR0_ELx and one for TTBR1_ELx. Top-level
52 * functions determins which of the roots to use and call template (C++)
53 * functions that takes it from there. Using the preprocessor function template
54 * approach is _not_ desirable here.
55 *
56 */
57
58
59/*
60 * Common helpers.
61 * Common helpers.
62 * Common helpers.
63 */
64
65DECLINLINE(int) pgmGstWalkReturnNotPresent(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
66{
67 NOREF(pVCpu);
68 pWalk->fNotPresent = true;
69 pWalk->uLevel = uLevel;
70 pWalk->fFailed = PGM_WALKFAIL_NOT_PRESENT
71 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
72 return VERR_PAGE_TABLE_NOT_PRESENT;
73}
74
75DECLINLINE(int) pgmGstWalkReturnBadPhysAddr(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel, int rc)
76{
77 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
78 pWalk->fBadPhysAddr = true;
79 pWalk->uLevel = uLevel;
80 pWalk->fFailed = PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS
81 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
82 return VERR_PAGE_TABLE_NOT_PRESENT;
83}
84
85
86DECLINLINE(int) pgmGstWalkReturnRsvdError(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint8_t uLevel)
87{
88 NOREF(pVCpu);
89 pWalk->fRsvdError = true;
90 pWalk->uLevel = uLevel;
91 pWalk->fFailed = PGM_WALKFAIL_RESERVED_BITS
92 | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
93 return VERR_PAGE_TABLE_NOT_PRESENT;
94}
95
96
97DECLINLINE(int) pgmGstWalkFastReturnNotPresent(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint8_t uLevel)
98{
99 RT_NOREF(pVCpu);
100 pWalk->fFailed = PGM_WALKFAIL_NOT_PRESENT | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
101 return VERR_PAGE_TABLE_NOT_PRESENT;
102}
103
104
105DECLINLINE(int) pgmGstWalkFastReturnBadPhysAddr(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint8_t uLevel, int rc)
106{
107 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); RT_NOREF(pVCpu, rc);
108 pWalk->fFailed = PGM_WALKFAIL_BAD_PHYSICAL_ADDRESS | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
109 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
110}
111
112
113DECLINLINE(int) pgmGstWalkFastReturnRsvdError(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint8_t uLevel)
114{
115 RT_NOREF(pVCpu);
116 pWalk->fFailed = PGM_WALKFAIL_RESERVED_BITS | ((uint32_t)uLevel << PGM_WALKFAIL_LEVEL_SHIFT);
117 return VERR_RESERVED_PAGE_TABLE_BITS;
118}
119
120
121/*
122 * Special no paging variant.
123 * Special no paging variant.
124 * Special no paging variant.
125 */
126
127static PGM_CTX_DECL(int) PGM_CTX(pgm,GstNoneGetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
128{
129 RT_NOREF(pVCpu);
130
131 RT_ZERO(*pWalk);
132 pWalk->fSucceeded = true;
133 pWalk->GCPtr = GCPtr;
134 pWalk->GCPhys = GCPtr & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
135 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US; /** @todo */
136 return VINF_SUCCESS;
137}
138
139
140static PGM_CTX_DECL(int) PGM_CTX(pgm,GstNoneQueryPageFast)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalk)
141{
142 RT_NOREF(pVCpu, fFlags);
143
144 pWalk->GCPtr = GCPtr;
145 pWalk->GCPhys = GCPtr;
146 pWalk->GCPhysNested = 0;
147 pWalk->fInfo = PGM_WALKINFO_SUCCEEDED;
148 pWalk->fFailed = PGM_WALKFAIL_SUCCESS;
149 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_A | X86_PTE_D; /** @todo */
150 return VINF_SUCCESS;
151}
152
153
154static PGM_CTX_DECL(int) PGM_CTX(pgm,GstNoneModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
155{
156 /* Ignore. */
157 RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask);
158 return VINF_SUCCESS;
159}
160
161
162static PGM_CTX_DECL(int) PGM_CTX(pgm,GstNoneWalk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
163{
164 RT_NOREF(pVCpu, GCPtr, pWalk);
165 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
166 return VERR_PGM_NOT_USED_IN_MODE;
167}
168
169
170static PGM_CTX_DECL(int) PGM_CTX(pgm,GstNoneEnter)(PVMCPUCC pVCpu)
171{
172 /* Nothing to do. */
173 RT_NOREF(pVCpu);
174 return VINF_SUCCESS;
175}
176
177
178static PGM_CTX_DECL(int) PGM_CTX(pgm,GstNoneExit)(PVMCPUCC pVCpu)
179{
180 /* Nothing to do. */
181 RT_NOREF(pVCpu);
182 return VINF_SUCCESS;
183}
184
185
186/*
187 * Template variants for actual paging modes.
188 * Template variants for actual paging modes.
189 * Template variants for actual paging modes.
190 */
191template<bool a_fTtbr0, uint8_t a_InitialLookupLvl, uint8_t a_GranuleSz, bool a_fTbi, bool a_fEpd>
192DECL_FORCE_INLINE(int) pgmGstWalkWorker(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
193{
194 RT_NOREF(pGstWalk); /** @todo */
195
196 /*
197 * Initial lookup level 3 is not valid and only instantiated because we need two
198 * bits for the lookup level when creating the index and have to fill the slots.
199 */
200 if RT_CONSTEXPR_IF(a_InitialLookupLvl == 3)
201 {
202 AssertReleaseFailed();
203 return VERR_PGM_MODE_IPE;
204 }
205 else
206 {
207 uint8_t const bEl = CPUMGetGuestEL(pVCpu);
208
209 uint64_t fLookupMask;
210 if RT_CONSTEXPR_IF(a_fTtbr0 == true)
211 fLookupMask = pVCpu->pgm.s.afLookupMaskTtbr0[bEl];
212 else
213 fLookupMask = pVCpu->pgm.s.afLookupMaskTtbr1[bEl];
214
215 RTGCPHYS GCPhysPt = CPUMGetEffectiveTtbr(pVCpu, GCPtr);
216 uint64_t *pu64Pt = NULL;
217 uint64_t uPt;
218 int rc;
219 if RT_CONSTEXPR_IF(a_InitialLookupLvl == 0)
220 {
221 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
222 if (RT_SUCCESS(rc)) { /* probable */ }
223 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 0, rc);
224
225 uPt = pu64Pt[(GCPtr >> 39) & fLookupMask];
226 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_VALID) { /* probable */ }
227 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 0);
228
229 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_TBL_OR_PG) { /* probable */ }
230 else return pgmGstWalkReturnRsvdError(pVCpu, pWalk, 0); /** @todo Only supported if TCR_EL1.DS is set. */
231
232 /* All nine bits from now on. */
233 fLookupMask = RT_BIT_64(9) - 1;
234 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
235 }
236
237 if RT_CONSTEXPR_IF(a_InitialLookupLvl <= 1)
238 {
239 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
240 if (RT_SUCCESS(rc)) { /* probable */ }
241 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 1, rc);
242
243 uPt = pu64Pt[(GCPtr >> 30) & fLookupMask];
244 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_VALID) { /* probable */ }
245 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 1);
246
247 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_TBL_OR_PG) { /* probable */ }
248 else
249 {
250 /* Block descriptor (1G page). */
251 pWalk->GCPtr = GCPtr;
252 pWalk->fSucceeded = true;
253 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffc0000000)) | (GCPtr & (RTGCPTR)(_1G - 1));
254 pWalk->fGigantPage = true;
255 return VINF_SUCCESS;
256 }
257
258 /* All nine bits from now on. */
259 fLookupMask = RT_BIT_64(9) - 1;
260 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
261 }
262
263 if RT_CONSTEXPR_IF(a_InitialLookupLvl <= 2)
264 {
265 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
266 if (RT_SUCCESS(rc)) { /* probable */ }
267 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 2, rc);
268
269 uPt = pu64Pt[(GCPtr >> 21) & fLookupMask];
270 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_VALID) { /* probable */ }
271 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 2);
272
273 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_TBL_OR_PG) { /* probable */ }
274 else
275 {
276 /* Block descriptor (2M page). */
277 pWalk->GCPtr = GCPtr;
278 pWalk->fSucceeded = true;
279 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffffe00000)) | (GCPtr & (RTGCPTR)(_2M - 1));
280 pWalk->fBigPage = true;
281 return VINF_SUCCESS;
282 }
283
284 /* All nine bits from now on. */
285 fLookupMask = RT_BIT_64(9) - 1;
286 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
287 }
288
289 AssertCompile(a_InitialLookupLvl <= 3);
290
291 /* Next level. */
292 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pu64Pt);
293 if (RT_SUCCESS(rc)) { /* probable */ }
294 else return pgmGstWalkReturnBadPhysAddr(pVCpu, pWalk, 3, rc);
295
296 uPt = pu64Pt[(GCPtr & UINT64_C(0x1ff000)) >> 12];
297 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_VALID) { /* probable */ }
298 else return pgmGstWalkReturnNotPresent(pVCpu, pWalk, 3);
299
300 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_TBL_OR_PG) { /* probable */ }
301 else return pgmGstWalkReturnRsvdError(pVCpu, pWalk, 3); /** No block descriptors. */
302
303 pWalk->GCPtr = GCPtr;
304 pWalk->fSucceeded = true;
305 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000)) | (GCPtr & (RTGCPTR)(_4K - 1));
306 return VINF_SUCCESS;
307 }
308}
309
310
311template<bool a_fTtbr0, uint8_t a_InitialLookupLvl, uint8_t a_GranuleSz, bool a_fTbi, bool a_fEpd>
312static PGM_CTX_DECL(int) PGM_CTX(pgm,GstGetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
313{
314 return pgmGstWalkWorker<a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd>(pVCpu, GCPtr, pWalk, NULL /*pGstWalk*/);
315}
316
317
318template<bool a_fTtbr0, uint8_t a_InitialLookupLvl, uint8_t a_GranuleSz, bool a_fTbi, bool a_fEpd>
319static PGM_CTX_DECL(int) PGM_CTX(pgm,GstQueryPageFast)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalk)
320{
321 RT_NOREF(fFlags); /** @todo */
322
323 /*
324 * Initial lookup level 3 is not valid and only instantiated because we need two
325 * bits for the lookup level when creating the index and have to fill the slots.
326 */
327 if RT_CONSTEXPR_IF(a_InitialLookupLvl == 3)
328 {
329 AssertReleaseFailed();
330 return VERR_PGM_MODE_IPE;
331 }
332 else
333 {
334 uint8_t const bEl = CPUMGetGuestEL(pVCpu);
335
336 uint64_t fLookupMask;
337 if RT_CONSTEXPR_IF(a_fTtbr0 == true)
338 fLookupMask = pVCpu->pgm.s.afLookupMaskTtbr0[bEl];
339 else
340 fLookupMask = pVCpu->pgm.s.afLookupMaskTtbr1[bEl];
341
342 RTGCPHYS GCPhysPt = CPUMGetEffectiveTtbr(pVCpu, GCPtr);
343 uint64_t *pu64Pt = NULL;
344 uint64_t uPt;
345 int rc;
346 if RT_CONSTEXPR_IF(a_InitialLookupLvl == 0)
347 {
348 rc = pgmPhysGCPhys2CCPtrLockless(pVCpu, GCPhysPt, (void **)&pu64Pt);
349 if (RT_SUCCESS(rc)) { /* probable */ }
350 else return pgmGstWalkFastReturnBadPhysAddr(pVCpu, pWalk, 0, rc);
351
352 uPt = pu64Pt[(GCPtr >> 39) & fLookupMask];
353 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_VALID) { /* probable */ }
354 else return pgmGstWalkFastReturnNotPresent(pVCpu, pWalk, 0);
355
356 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_TBL_OR_PG) { /* probable */ }
357 else return pgmGstWalkFastReturnRsvdError(pVCpu, pWalk, 0); /** @todo Only supported if TCR_EL1.DS is set. */
358
359 /* All nine bits from now on. */
360 fLookupMask = RT_BIT_64(9) - 1;
361 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
362 }
363
364 if RT_CONSTEXPR_IF(a_InitialLookupLvl <= 1)
365 {
366 rc = pgmPhysGCPhys2CCPtrLockless(pVCpu, GCPhysPt, (void **)&pu64Pt);
367 if (RT_SUCCESS(rc)) { /* probable */ }
368 else return pgmGstWalkFastReturnBadPhysAddr(pVCpu, pWalk, 1, rc);
369
370 uPt = pu64Pt[(GCPtr >> 30) & fLookupMask];
371 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_VALID) { /* probable */ }
372 else return pgmGstWalkFastReturnNotPresent(pVCpu, pWalk, 1);
373
374 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_TBL_OR_PG) { /* probable */ }
375 else
376 {
377 /* Block descriptor (1G page). */
378 pWalk->GCPtr = GCPtr;
379 pWalk->fInfo = PGM_WALKINFO_SUCCEEDED | PGM_WALKINFO_GIGANTIC_PAGE;
380 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffc0000000)) | (GCPtr & (RTGCPTR)(_1G - 1));
381 return VINF_SUCCESS;
382 }
383
384 /* All nine bits from now on. */
385 fLookupMask = RT_BIT_64(9) - 1;
386 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
387 }
388
389 if RT_CONSTEXPR_IF(a_InitialLookupLvl <= 2)
390 {
391 rc = pgmPhysGCPhys2CCPtrLockless(pVCpu, GCPhysPt, (void **)&pu64Pt);
392 if (RT_SUCCESS(rc)) { /* probable */ }
393 else return pgmGstWalkFastReturnBadPhysAddr(pVCpu, pWalk, 2, rc);
394
395 uPt = pu64Pt[(GCPtr >> 21) & fLookupMask];
396 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_VALID) { /* probable */ }
397 else return pgmGstWalkFastReturnNotPresent(pVCpu, pWalk, 2);
398
399 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_TBL_OR_PG) { /* probable */ }
400 else
401 {
402 /* Block descriptor (2M page). */
403 pWalk->GCPtr = GCPtr;
404 pWalk->fInfo = PGM_WALKINFO_SUCCEEDED | PGM_WALKINFO_BIG_PAGE;
405 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xffffffe00000)) | (GCPtr & (RTGCPTR)(_2M - 1));
406 return VINF_SUCCESS;
407 }
408
409 /* All nine bits from now on. */
410 fLookupMask = RT_BIT_64(9) - 1;
411 GCPhysPt = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000));
412 }
413
414 AssertCompile(a_InitialLookupLvl <= 3);
415
416 /* Next level. */
417 rc = pgmPhysGCPhys2CCPtrLockless(pVCpu, GCPhysPt, (void **)&pu64Pt);
418 if (RT_SUCCESS(rc)) { /* probable */ }
419 else return pgmGstWalkFastReturnBadPhysAddr(pVCpu, pWalk, 3, rc);
420
421 uPt = pu64Pt[(GCPtr & UINT64_C(0x1ff000)) >> 12];
422 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_VALID) { /* probable */ }
423 else return pgmGstWalkFastReturnNotPresent(pVCpu, pWalk, 3);
424
425 if (uPt & ARMV8_VMSA64_TBL_ENTRY_F_TBL_OR_PG) { /* probable */ }
426 else return pgmGstWalkFastReturnRsvdError(pVCpu, pWalk, 3); /** No block descriptors. */
427
428 pWalk->GCPtr = GCPtr;
429 pWalk->fInfo = PGM_WALKINFO_SUCCEEDED;
430 pWalk->GCPhys = (RTGCPHYS)(uPt & UINT64_C(0xfffffffff000)) | (GCPtr & (RTGCPTR)(_4K - 1));
431 return VINF_SUCCESS;
432 }
433}
434
435
436template<bool a_fTtbr0, uint8_t a_InitialLookupLvl, uint8_t a_GranuleSz, bool a_fTbi, bool a_fEpd>
437static PGM_CTX_DECL(int) PGM_CTX(pgm,GstModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
438{
439 /** @todo Ignore for now. */
440 RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask);
441 return VINF_SUCCESS;
442}
443
444
445template<bool a_fTtbr0, uint8_t a_InitialLookupLvl, uint8_t a_GranuleSz, bool a_fTbi, bool a_fEpd>
446static PGM_CTX_DECL(int) PGM_CTX(pgm,GstWalk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
447{
448 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
449 return pgmGstWalkWorker<a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd>(pVCpu, GCPtr, pWalk, pGstWalk);
450}
451
452
453template<bool a_fTtbr0, uint8_t a_InitialLookupLvl, uint8_t a_GranuleSz, bool a_fTbi, bool a_fEpd>
454static PGM_CTX_DECL(int) PGM_CTX(pgm,GstEnter)(PVMCPUCC pVCpu)
455{
456 /* Nothing to do for now. */
457 RT_NOREF(pVCpu);
458 return VINF_SUCCESS;
459}
460
461
462template<bool a_fTtbr0, uint8_t a_InitialLookupLvl, uint8_t a_GranuleSz, bool a_fTbi, bool a_fEpd>
463static PGM_CTX_DECL(int) PGM_CTX(pgm,GstExit)(PVMCPUCC pVCpu)
464{
465 /* Nothing to do for now. */
466 RT_NOREF(pVCpu);
467 return VINF_SUCCESS;
468}
469
470
471/**
472 * Guest mode data array.
473 */
474PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
475{
476 { UINT32_MAX, NULL, NULL, NULL, NULL, NULL }, /* 0 */
477 {
478 PGM_TYPE_NONE,
479 PGM_CTX(pgm,GstNoneGetPage),
480 PGM_CTX(pgm,GstNoneQueryPageFast),
481 PGM_CTX(pgm,GstNoneModifyPage),
482 PGM_CTX(pgm,GstNoneWalk),
483 PGM_CTX(pgm,GstNoneEnter),
484 PGM_CTX(pgm,GstNoneExit),
485 },
486
487#define PGM_MODE_TYPE_CREATE(a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd) \
488 (2 + ( (a_fEpd ? RT_BIT_32(6) : 0) \
489 | (a_fTbi ? RT_BIT_32(5) : 0) \
490 | (a_GranuleSz << 3) \
491 | (a_InitialLookupLvl << 1) \
492 | (a_fTtbr0 ? RT_BIT_32(0) : 0) ))
493
494#define PGM_MODE_CREATE_EX(a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd) \
495 { \
496 PGM_MODE_TYPE_CREATE(a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd), \
497 PGM_CTX(pgm,GstGetPage)<a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd>, \
498 PGM_CTX(pgm,GstQueryPageFast)<a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd>, \
499 PGM_CTX(pgm,GstModifyPage)<a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd>, \
500 PGM_CTX(pgm,GstWalk)<a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd>, \
501 PGM_CTX(pgm,GstEnter)<a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd>, \
502 PGM_CTX(pgm,GstExit)<a_fTtbr0, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd> \
503 }
504
505#define PGM_MODE_CREATE_TTBR(a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd) \
506 PGM_MODE_CREATE_EX(false, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd), \
507 PGM_MODE_CREATE_EX(true, a_InitialLookupLvl, a_GranuleSz, a_fTbi, a_fEpd)
508
509#define PGM_MODE_CREATE_LOOKUP_LVL(a_GranuleSz, a_fTbi, a_fEpd) \
510 PGM_MODE_CREATE_TTBR(0, a_GranuleSz, a_fTbi, a_fEpd ), \
511 PGM_MODE_CREATE_TTBR(1, a_GranuleSz, a_fTbi, a_fEpd ), \
512 PGM_MODE_CREATE_TTBR(2, a_GranuleSz, a_fTbi, a_fEpd ), \
513 PGM_MODE_CREATE_TTBR(3, a_GranuleSz, a_fTbi, a_fEpd ) /* Invalid */
514
515#define PGM_MODE_CREATE_GRANULE_SZ(a_fTbi, a_fEpd) \
516 PGM_MODE_CREATE_LOOKUP_LVL(ARMV8_TCR_EL1_AARCH64_TG1_INVALID, a_fTbi, a_fEpd), \
517 PGM_MODE_CREATE_LOOKUP_LVL(ARMV8_TCR_EL1_AARCH64_TG1_16KB, a_fTbi, a_fEpd), \
518 PGM_MODE_CREATE_LOOKUP_LVL(ARMV8_TCR_EL1_AARCH64_TG1_4KB, a_fTbi, a_fEpd), \
519 PGM_MODE_CREATE_LOOKUP_LVL(ARMV8_TCR_EL1_AARCH64_TG1_64KB, a_fTbi, a_fEpd)
520
521#define PGM_MODE_CREATE_TBI(a_fEpd) \
522 PGM_MODE_CREATE_GRANULE_SZ(false, a_fEpd), \
523 PGM_MODE_CREATE_GRANULE_SZ(true, a_fEpd)
524
525 /* Recursive expansion for the win, this will blow up to 128 entries covering all possible modes. */
526 PGM_MODE_CREATE_TBI(false),
527 PGM_MODE_CREATE_TBI(true)
528
529#undef PGM_MODE_CREATE_TBI
530#undef PGM_MODE_CREATE_GRANULE_SZ
531#undef PGM_MODE_CREATE_LOOKUP_LVL
532#undef PGM_MODE_CREATE_TTBR
533#undef PGM_MODE_CREATE_EX
534};
535
536
537template<uint8_t a_offTsz, uint8_t a_offTg, uint8_t a_offTbi, uint8_t a_offEpd, bool a_fTtbr0>
538DECLINLINE(uintptr_t) pgmR3DeduceTypeFromTcr(uint64_t u64RegSctlr, uint64_t u64RegTcr, uint64_t *pfInitialLookupMask)
539{
540 uintptr_t idxNewGst = 0;
541
542 /*
543 * MMU enabled at all?
544 * Technically this is incorrect as we use ARMV8_SCTLR_EL1_M regardless of the EL but the bit is the same
545 * for all exception levels.
546 */
547 if (u64RegSctlr & ARMV8_SCTLR_EL1_M)
548 {
549 uint64_t const u64Tsz = (u64RegTcr >> a_offTsz) & 0x1f;
550 uint64_t const u64Tg = (u64RegTcr >> a_offTg) & 0x3;
551 bool const fTbi = RT_BOOL(u64RegTcr & RT_BIT_64(a_offTbi));
552 bool const fEpd = RT_BOOL(u64RegTcr & RT_BIT_64(a_offEpd));
553
554 /*
555 * From: https://github.com/codingbelief/arm-architecture-reference-manual-for-armv8-a/blob/master/en/chapter_d4/d42_2_controlling_address_translation_stages.md
556 * For all translation stages
557 * The maximum TxSZ value is 39. If TxSZ is programmed to a value larger than 39 then it is IMPLEMENTATION DEFINED whether:
558 * - The implementation behaves as if the field is programmed to 39 for all purposes other than reading back the value of the field.
559 * - Any use of the TxSZ value generates a Level 0 Translation fault for the stage of translation at which TxSZ is used.
560 *
561 * For a stage 1 translation
562 * The minimum TxSZ value is 16. If TxSZ is programmed to a value smaller than 16 then it is IMPLEMENTATION DEFINED whether:
563 * - The implementation behaves as if the field were programmed to 16 for all purposes other than reading back the value of the field.
564 * - Any use of the TxSZ value generates a stage 1 Level 0 Translation fault.
565 *
566 * We currently choose the former for both.
567 */
568 uint64_t uLookupLvl;
569 if (/*u64Tsz >= 16 &&*/ u64Tsz <= 24)
570 {
571 uLookupLvl = 0;
572 if (u64Tsz >= 16)
573 *pfInitialLookupMask = RT_BIT_64(24 - u64Tsz + 1) - 1;
574 else
575 *pfInitialLookupMask = RT_BIT_64(24 - 16 + 1) - 1;
576 }
577 else if (u64Tsz >= 25 && u64Tsz <= 33)
578 {
579 uLookupLvl = 1;
580 *pfInitialLookupMask = RT_BIT_64(33 - u64Tsz + 1) - 1;
581 }
582 else /*if (u64Tsz >= 34 && u64Tsz <= 39)*/
583 {
584 uLookupLvl = 2;
585 if (u64Tsz <= 39)
586 *pfInitialLookupMask = RT_BIT_64(39 - u64Tsz + 1) - 1;
587 else
588 *pfInitialLookupMask = RT_BIT_64(39 - 39 + 1) - 1;
589 }
590
591 /* Build the index into the PGM mode callback table for the given config. */
592 idxNewGst = PGM_MODE_TYPE_CREATE(a_fTtbr0, uLookupLvl, u64Tg, fTbi, fEpd);
593 }
594 else
595 idxNewGst = PGM_TYPE_NONE;
596
597 return idxNewGst;
598}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette