VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs-armv8.cpp@ 108832

Last change on this file since 108832 was 108791, checked in by vboxsync, 13 days ago

VMM/IEM: More ARM target work. jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.8 KB
Line 
1/* $Id: CPUMAllRegs-armv8.cpp 108791 2025-03-28 21:58:31Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters, ARMv8 variant.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_CPUM
33#include <VBox/vmm/cpum.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/pdmapic.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/hm.h>
41#include "CPUMInternal-armv8.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/err.h>
44#include <VBox/dis.h>
45#include <VBox/log.h>
46#include <VBox/vmm/hm.h>
47#include <VBox/vmm/tm.h>
48
49#include <iprt/armv8.h>
50#include <iprt/assert.h>
51#include <iprt/asm.h>
52#ifdef IN_RING3
53# include <iprt/thread.h>
54#endif
55
56
57/*********************************************************************************************************************************
58* Defined Constants And Macros *
59*********************************************************************************************************************************/
60/**
61 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
62 *
63 * @returns Pointer to the Virtual CPU.
64 * @param a_pGuestCtx Pointer to the guest context.
65 */
66#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
67
68/** @def CPUM_INT_ASSERT_NOT_EXTRN
69 * Macro for asserting that @a a_fNotExtrn are present.
70 *
71 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
72 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
73 */
74#define CPUM_INT_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
75 AssertMsg(!((a_pVCpu)->cpum.s.Guest.fExtrn & (a_fNotExtrn)), \
76 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.s.Guest.fExtrn, (a_fNotExtrn)))
77
78
79/**
80 * Queries the pointer to the internal CPUMCTX structure.
81 *
82 * @returns The CPUMCTX pointer.
83 * @param pVCpu The cross context virtual CPU structure.
84 */
85VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
86{
87 return &pVCpu->cpum.s.Guest;
88}
89
90
91VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
92{
93 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PC);
94 return pVCpu->cpum.s.Guest.Pc.u64;
95}
96
97
98VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
99{
100 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SP);
101 AssertReleaseFailed(); /** @todo Exception level. */
102 return pVCpu->cpum.s.Guest.aSpReg[0].u64;
103}
104
105
106/**
107 * Returns whether IRQs are currently masked.
108 *
109 * @returns true if IRQs are masked as indicated by the PState value.
110 * @param pVCpu The cross context virtual CPU structure.
111 */
112VMMDECL(bool) CPUMGetGuestIrqMasked(PVMCPUCC pVCpu)
113{
114 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
115 return RT_BOOL(pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_I);
116}
117
118
119/**
120 * Returns whether FIQs are currently masked.
121 *
122 * @returns true if FIQs are masked as indicated by the PState value.
123 * @param pVCpu The cross context virtual CPU structure.
124 */
125VMMDECL(bool) CPUMGetGuestFiqMasked(PVMCPUCC pVCpu)
126{
127 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
128 return RT_BOOL(pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_F);
129}
130
131
132/**
133 * Gets the host CPU vendor.
134 *
135 * @returns CPU vendor.
136 * @param pVM The cross context VM structure.
137 */
138VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
139{
140 RT_NOREF(pVM);
141 //AssertReleaseFailed();
142 return CPUMCPUVENDOR_UNKNOWN;
143}
144
145
146/**
147 * Gets the host CPU microarchitecture.
148 *
149 * @returns CPU microarchitecture.
150 * @param pVM The cross context VM structure.
151 */
152VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
153{
154 RT_NOREF(pVM);
155 AssertReleaseFailed();
156 return kCpumMicroarch_Unknown;
157}
158
159
160/**
161 * Gets the guest CPU vendor.
162 *
163 * @returns CPU vendor.
164 * @param pVM The cross context VM structure.
165 */
166VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
167{
168 RT_NOREF(pVM);
169 //AssertReleaseFailed();
170 return CPUMCPUVENDOR_UNKNOWN;
171}
172
173
174/**
175 * Gets the guest CPU architecture.
176 *
177 * @returns CPU architecture.
178 * @param pVM The cross context VM structure.
179 */
180VMMDECL(CPUMARCH) CPUMGetGuestArch(PCVM pVM)
181{
182 RT_NOREF(pVM);
183 return kCpumArch_Arm; /* Static as we are in the ARM VMM module here. */
184}
185
186
187/**
188 * Gets the guest CPU microarchitecture.
189 *
190 * @returns CPU microarchitecture.
191 * @param pVM The cross context VM structure.
192 */
193VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM)
194{
195 RT_NOREF(pVM);
196 AssertReleaseFailed();
197 return kCpumMicroarch_Unknown;
198}
199
200
201/**
202 * Gets the maximum number of physical and linear address bits supported by the
203 * guest.
204 *
205 * @param pVM The cross context VM structure.
206 * @param pcPhysAddrWidth Where to store the physical address width.
207 * @param pcLinearAddrWidth Where to store the linear address width.
208 */
209VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth)
210{
211 AssertPtr(pVM);
212 AssertReturnVoid(pcPhysAddrWidth);
213 AssertReturnVoid(pcLinearAddrWidth);
214 *pcPhysAddrWidth = pVM->cpum.s.GuestFeatures.cMaxPhysAddrWidth;
215 *pcLinearAddrWidth = pVM->cpum.s.GuestFeatures.cMaxLinearAddrWidth;
216}
217
218
219/**
220 * Tests if the guest has the paging enabled (PG).
221 *
222 * @returns true if in real mode, otherwise false.
223 * @param pVCpu The cross context virtual CPU structure.
224 */
225VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu)
226{
227 RT_NOREF(pVCpu);
228 AssertReleaseFailed();
229 return false;
230}
231
232
233/**
234 * Tests if the guest is running in 64 bits mode or not.
235 *
236 * @returns true if in 64 bits protected mode, otherwise false.
237 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
238 */
239VMMDECL(bool) CPUMIsGuestIn64BitCode(PCVMCPU pVCpu)
240{
241 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
242 return !RT_BOOL(pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4);
243}
244
245
246/**
247 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
248 * registers.
249 *
250 * @returns true if in 64 bits protected mode, otherwise false.
251 * @param pCtx Pointer to the current guest CPU context.
252 */
253VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCCPUMCTX pCtx)
254{
255 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
256}
257
258
259/**
260 * Sets the specified changed flags (CPUM_CHANGED_*).
261 *
262 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
263 * @param fChangedAdd The changed flags to add.
264 */
265VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
266{
267 pVCpu->cpum.s.fChanged |= fChangedAdd;
268}
269
270#if 0 /* unused atm */
271
272/**
273 * Checks if the guest debug state is active.
274 *
275 * @returns boolean
276 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
277 */
278VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
279{
280 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
281}
282
283
284/**
285 * Checks if the hyper debug state is active.
286 *
287 * @returns boolean
288 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
289 */
290VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
291{
292 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
293}
294
295
296/**
297 * Mark the guest's debug state as inactive.
298 *
299 * @returns boolean
300 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
301 * @todo This API doesn't make sense any more.
302 */
303VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
304{
305 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER)));
306 NOREF(pVCpu);
307}
308
309#endif
310
311/**
312 * Get the current exception level of the guest.
313 *
314 * @returns Exception Level 0 - 3
315 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
316 */
317VMM_INT_DECL(uint8_t) CPUMGetGuestEL(PVMCPU pVCpu)
318{
319 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
320 Assert(!(pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4)); /* ASSUMES aarch64 mode */
321 return ARMV8_SPSR_EL2_AARCH64_GET_EL(pVCpu->cpum.s.Guest.fPState);
322}
323
324
325/**
326 * Returns whether the guest has the MMU enabled for address translation.
327 *
328 * @returns true if address translation is enabled, false if not.
329 */
330VMM_INT_DECL(bool) CPUMGetGuestMmuEnabled(PVMCPUCC pVCpu)
331{
332 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE | CPUMCTX_EXTRN_SCTLR_TCR_TTBR);
333 Assert(!(pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4)); /* ASSUMES aarch64 mode */
334 uint8_t bEl = ARMV8_SPSR_EL2_AARCH64_GET_EL(pVCpu->cpum.s.Guest.fPState);
335 if (bEl == ARMV8_AARCH64_EL_2)
336 {
337 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_EL2);
338 return RT_BOOL(pVCpu->cpum.s.Guest.SctlrEl2.u64 & ARMV8_SCTLR_EL2_M);
339 }
340
341 Assert(bEl == ARMV8_AARCH64_EL_0 || bEl == ARMV8_AARCH64_EL_1);
342 return RT_BOOL(pVCpu->cpum.s.Guest.Sctlr.u64 & ARMV8_SCTLR_EL1_M);
343}
344
345
346/**
347 * Returns the effective TTBR value for the given guest context pointer.
348 *
349 * @returns Physical base address of the translation table being used, or RTGCPHYS_MAX
350 * if MMU is disabled.
351 */
352VMM_INT_DECL(RTGCPHYS) CPUMGetEffectiveTtbr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
353{
354 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE | CPUMCTX_EXTRN_SCTLR_TCR_TTBR);
355
356 Assert(!(pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4)); /* ASSUMES aarch64 mode */
357 uint8_t bEl = ARMV8_SPSR_EL2_AARCH64_GET_EL(pVCpu->cpum.s.Guest.fPState);
358 if (bEl == ARMV8_AARCH64_EL_2)
359 {
360 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_EL2);
361 if (pVCpu->cpum.s.Guest.SctlrEl2.u64 & ARMV8_SCTLR_EL2_M)
362 return (GCPtr & RT_BIT_64(55))
363 ? ARMV8_TTBR_EL1_AARCH64_BADDR_GET(pVCpu->cpum.s.Guest.Ttbr1El2.u64)
364 : ARMV8_TTBR_EL1_AARCH64_BADDR_GET(pVCpu->cpum.s.Guest.Ttbr0El2.u64);
365 }
366 else
367 {
368 Assert(bEl == ARMV8_AARCH64_EL_0 || bEl == ARMV8_AARCH64_EL_1);
369 if (pVCpu->cpum.s.Guest.Sctlr.u64 & ARMV8_SCTLR_EL1_M)
370 return (GCPtr & RT_BIT_64(55))
371 ? ARMV8_TTBR_EL1_AARCH64_BADDR_GET(pVCpu->cpum.s.Guest.Ttbr1.u64)
372 : ARMV8_TTBR_EL1_AARCH64_BADDR_GET(pVCpu->cpum.s.Guest.Ttbr0.u64);
373 }
374
375 return RTGCPHYS_MAX;
376}
377
378
379/**
380 * Returns the current TCR_EL1 system register value for the given vCPU.
381 *
382 * @returns TCR_EL1 value
383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
384 */
385VMM_INT_DECL(uint64_t) CPUMGetTcrEl1(PVMCPUCC pVCpu)
386{
387 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SCTLR_TCR_TTBR);
388 return pVCpu->cpum.s.Guest.Tcr.u64;
389}
390
391
392/**
393 * Returns the virtual address given in the input stripped from any potential
394 * pointer authentication code if enabled for the given vCPU.
395 *
396 * @returns Virtual address given in GCPtr stripped from any PAC (or reserved bits).
397 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
398 */
399VMM_INT_DECL(RTGCPTR) CPUMGetGCPtrPacStripped(PVMCPUCC pVCpu, RTGCPTR GCPtr)
400{
401 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SCTLR_TCR_TTBR);
402
403 /** @todo MTE support. */
404 bool fUpper = RT_BOOL(GCPtr & RT_BIT_64(55)); /* Save the determinator for upper lower range. */
405 uint8_t u8TxSz = fUpper
406 ? ARMV8_TCR_EL1_AARCH64_T1SZ_GET(pVCpu->cpum.s.Guest.Tcr.u64)
407 : ARMV8_TCR_EL1_AARCH64_T0SZ_GET(pVCpu->cpum.s.Guest.Tcr.u64);
408 RTGCPTR fNonPacMask = RT_BIT_64(64 - u8TxSz) - 1; /* Get mask of non PAC bits. */
409 RTGCPTR fSign = fUpper
410 ? ~fNonPacMask
411 : 0;
412
413 return (GCPtr & fNonPacMask)
414 | fSign;
415}
416
417
418/**
419 * Gets the current guest CPU mode.
420 *
421 * If paging mode is what you need, check out PGMGetGuestMode().
422 *
423 * @returns The CPU mode.
424 * @param pVCpu The cross context virtual CPU structure.
425 */
426VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
427{
428 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
429 if (pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4)
430 return CPUMMODE_ARMV8_AARCH32;
431
432 return CPUMMODE_ARMV8_AARCH64;
433}
434
435
436/**
437 * Figure whether the CPU is currently executing 32 or 64 bit code.
438 *
439 * @returns 32 or 64.
440 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
441 */
442VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
443{
444 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
445 if (pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4)
446 return 32;
447
448 return 64;
449}
450
451
452VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
453{
454 CPUM_INT_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PSTATE);
455 if (pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_M4)
456 {
457 if (pVCpu->cpum.s.Guest.fPState & ARMV8_SPSR_EL2_AARCH64_T)
458 return DISCPUMODE_ARMV8_T32;
459
460 return DISCPUMODE_ARMV8_A32;
461 }
462
463 return DISCPUMODE_ARMV8_A64;
464}
465
466
467/**
468 * Used to dynamically imports state residing in NEM or HM.
469 *
470 * This is a worker for the CPUM_IMPORT_EXTRN_RET() macro and various IEM ones.
471 *
472 * @returns VBox status code.
473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
474 * @param fExtrnImport The fields to import.
475 * @thread EMT(pVCpu)
476 */
477VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport)
478{
479 VMCPU_ASSERT_EMT(pVCpu);
480 if (pVCpu->cpum.s.Guest.fExtrn & fExtrnImport)
481 {
482 switch (pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_KEEPER_MASK)
483 {
484 case CPUMCTX_EXTRN_KEEPER_NEM:
485 {
486 int rc = NEMImportStateOnDemand(pVCpu, fExtrnImport);
487 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
488 return rc;
489 }
490
491 default:
492 AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
493 }
494 }
495 return VINF_SUCCESS;
496}
497
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette