VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUM-armv8.cpp@ 109008

Last change on this file since 109008 was 108968, checked in by vboxsync, 7 days ago

VMM,Main,Devices: Respect VBOX_VMM_TARGET_ARMV8 correctly on amd64 hosts (for IEM debugging purposes). jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 48.8 KB
Line 
1/* $Id: CPUM-armv8.cpp 108968 2025-04-14 20:45:36Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager (ARMv8 variant).
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_cpum CPUM - CPU Monitor / Manager
29 *
30 * The CPU Monitor / Manager keeps track of all the CPU registers.
31 * This is the ARMv8 variant which is doing much less than its x86/AMD6464
32 * counterpart due to the fact that we currently only support the NEM backends
33 * for running ARM guests. It might become complex iff we decide to implement our
34 * own hypervisor.
35 *
36 * @section sec_cpum_logging_armv8 Logging Level Assignments.
37 *
38 * Following log level assignments:
39 * - @todo
40 *
41 */
42
43
44/*********************************************************************************************************************************
45* Header Files *
46*********************************************************************************************************************************/
47#define LOG_GROUP LOG_GROUP_CPUM
48#define CPUM_WITH_NONCONST_HOST_FEATURES
49#include <VBox/vmm/cpum.h>
50#include <VBox/vmm/cpumdis.h>
51#include <VBox/vmm/pgm.h>
52#include <VBox/vmm/mm.h>
53#include <VBox/vmm/em.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/ssm.h>
57#include "CPUMInternal-armv8.h"
58#include <VBox/vmm/vm.h>
59
60#include <VBox/param.h>
61#include <VBox/dis.h>
62#include <VBox/err.h>
63#include <VBox/log.h>
64#include <iprt/assert.h>
65#include <iprt/cpuset.h>
66#include <iprt/mem.h>
67#include <iprt/mp.h>
68#include <iprt/string.h>
69#include <iprt/armv8.h>
70
71
72/*********************************************************************************************************************************
73* Defined Constants And Macros *
74*********************************************************************************************************************************/
75
76/** Internal form used by the macros. */
77#ifdef VBOX_WITH_STATISTICS
78# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
79 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
80 { 0 }, { 0 }, { 0 }, { 0 } }
81#else
82# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
83 { a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
84#endif
85
86/** Function handlers, extended version. */
87#define MFX(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uValue, a_fWrIgnMask, a_fWrGpMask) \
88 RINT(a_uMsr, a_uMsr, kCpumSysRegRdFn_##a_enmRdFnSuff, kCpumSysRegWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
89/** Function handlers, read-only. */
90#define MFO(a_uMsr, a_szName, a_enmRdFnSuff) \
91 RINT(a_uMsr, a_uMsr, kCpumSysRegRdFn_##a_enmRdFnSuff, kCpumSysRegWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
92/** Read-only fixed value, ignores all writes. */
93#define MVI(a_uMsr, a_szName, a_uValue) \
94 RINT(a_uMsr, a_uMsr, kCpumSysRegRdFn_FixedValue, kCpumSysRegWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
95/** Read/Write value from/to CPUMCTX. */
96#define MVRW(a_uMsr, a_szName, a_offCpum) \
97 RINT(a_uMsr, a_uMsr, kCpumSysRegRdFn_ReadCpumOff, kCpumSysRegWrFn_WriteCpumOff, (uint32_t)a_offCpum, 0, UINT64_MAX, 0, a_szName)
98
99
100/*********************************************************************************************************************************
101* Structures and Typedefs *
102*********************************************************************************************************************************/
103
104/**
105 * What kind of cpu info dump to perform.
106 */
107typedef enum CPUMDUMPTYPE
108{
109 CPUMDUMPTYPE_TERSE,
110 CPUMDUMPTYPE_DEFAULT,
111 CPUMDUMPTYPE_VERBOSE
112} CPUMDUMPTYPE;
113/** Pointer to a cpu info dump type. */
114typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
115
116
117/*********************************************************************************************************************************
118* Internal Functions *
119*********************************************************************************************************************************/
120static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
121static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
122static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM);
123static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
124static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
125static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
126static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
127static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
128
129
130/*********************************************************************************************************************************
131* Global Variables *
132*********************************************************************************************************************************/
133/** Host CPU features. */
134DECL_HIDDEN_DATA(CPUHOSTFEATURES) g_CpumHostFeatures;
135
136/**
137 * System register ranges.
138 */
139static CPUMSYSREGRANGE const g_aSysRegRanges[] =
140{
141 MFX(ARMV8_AARCH64_SYSREG_OSLAR_EL1, "OSLAR_EL1", WriteOnly, OslarEl1, 0, UINT64_C(0xfffffffffffffffe), UINT64_C(0xfffffffffffffffe)),
142 MFO(ARMV8_AARCH64_SYSREG_OSLSR_EL1, "OSLSR_EL1", OslsrEl1),
143 MVI(ARMV8_AARCH64_SYSREG_OSDLR_EL1, "OSDLR_EL1", 0),
144 MVRW(ARMV8_AARCH64_SYSREG_MDSCR_EL1, "MDSCR_EL1", RT_UOFFSETOF(CPUMCTX, Mdscr)),
145 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(0), "DBGBVR0_EL1", RT_UOFFSETOF(CPUMCTX, aBp[0].Value)),
146 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(1), "DBGBVR1_EL1", RT_UOFFSETOF(CPUMCTX, aBp[1].Value)),
147 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(2), "DBGBVR2_EL1", RT_UOFFSETOF(CPUMCTX, aBp[2].Value)),
148 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(3), "DBGBVR3_EL1", RT_UOFFSETOF(CPUMCTX, aBp[3].Value)),
149 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(4), "DBGBVR4_EL1", RT_UOFFSETOF(CPUMCTX, aBp[4].Value)),
150 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(5), "DBGBVR5_EL1", RT_UOFFSETOF(CPUMCTX, aBp[5].Value)),
151 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(6), "DBGBVR6_EL1", RT_UOFFSETOF(CPUMCTX, aBp[6].Value)),
152 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(7), "DBGBVR7_EL1", RT_UOFFSETOF(CPUMCTX, aBp[7].Value)),
153 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(8), "DBGBVR8_EL1", RT_UOFFSETOF(CPUMCTX, aBp[8].Value)),
154 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(9), "DBGBVR9_EL9", RT_UOFFSETOF(CPUMCTX, aBp[9].Value)),
155 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(10), "DBGBVR10_EL1", RT_UOFFSETOF(CPUMCTX, aBp[10].Value)),
156 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(11), "DBGBVR11_EL1", RT_UOFFSETOF(CPUMCTX, aBp[11].Value)),
157 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(12), "DBGBVR12_EL1", RT_UOFFSETOF(CPUMCTX, aBp[12].Value)),
158 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(13), "DBGBVR13_EL1", RT_UOFFSETOF(CPUMCTX, aBp[13].Value)),
159 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(14), "DBGBVR14_EL1", RT_UOFFSETOF(CPUMCTX, aBp[14].Value)),
160 MVRW(ARMV8_AARCH64_SYSREG_DBGBVRn_EL1(15), "DBGBVR15_EL1", RT_UOFFSETOF(CPUMCTX, aBp[15].Value)),
161 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(0), "DBGBCR0_EL1", RT_UOFFSETOF(CPUMCTX, aBp[0].Ctrl)),
162 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(1), "DBGBCR1_EL1", RT_UOFFSETOF(CPUMCTX, aBp[1].Ctrl)),
163 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(2), "DBGBCR2_EL1", RT_UOFFSETOF(CPUMCTX, aBp[2].Ctrl)),
164 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(3), "DBGBCR3_EL1", RT_UOFFSETOF(CPUMCTX, aBp[3].Ctrl)),
165 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(4), "DBGBCR4_EL1", RT_UOFFSETOF(CPUMCTX, aBp[4].Ctrl)),
166 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(5), "DBGBCR5_EL1", RT_UOFFSETOF(CPUMCTX, aBp[5].Ctrl)),
167 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(6), "DBGBCR6_EL1", RT_UOFFSETOF(CPUMCTX, aBp[6].Ctrl)),
168 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(7), "DBGBCR7_EL1", RT_UOFFSETOF(CPUMCTX, aBp[7].Ctrl)),
169 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(8), "DBGBCR8_EL1", RT_UOFFSETOF(CPUMCTX, aBp[8].Ctrl)),
170 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(9), "DBGBCR9_EL9", RT_UOFFSETOF(CPUMCTX, aBp[9].Ctrl)),
171 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(10), "DBGBCR10_EL1", RT_UOFFSETOF(CPUMCTX, aBp[10].Ctrl)),
172 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(11), "DBGBCR11_EL1", RT_UOFFSETOF(CPUMCTX, aBp[11].Ctrl)),
173 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(12), "DBGBCR12_EL1", RT_UOFFSETOF(CPUMCTX, aBp[12].Ctrl)),
174 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(13), "DBGBCR13_EL1", RT_UOFFSETOF(CPUMCTX, aBp[13].Ctrl)),
175 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(14), "DBGBCR14_EL1", RT_UOFFSETOF(CPUMCTX, aBp[14].Ctrl)),
176 MVRW(ARMV8_AARCH64_SYSREG_DBGBCRn_EL1(15), "DBGBCR15_EL1", RT_UOFFSETOF(CPUMCTX, aBp[15].Ctrl)),
177 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(0), "DBGWVR0_EL1", RT_UOFFSETOF(CPUMCTX, aWp[0].Value)),
178 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(1), "DBGWVR1_EL1", RT_UOFFSETOF(CPUMCTX, aWp[1].Value)),
179 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(2), "DBGWVR2_EL1", RT_UOFFSETOF(CPUMCTX, aWp[2].Value)),
180 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(3), "DBGWVR3_EL1", RT_UOFFSETOF(CPUMCTX, aWp[3].Value)),
181 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(4), "DBGWVR4_EL1", RT_UOFFSETOF(CPUMCTX, aWp[4].Value)),
182 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(5), "DBGWVR5_EL1", RT_UOFFSETOF(CPUMCTX, aWp[5].Value)),
183 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(6), "DBGWVR6_EL1", RT_UOFFSETOF(CPUMCTX, aWp[6].Value)),
184 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(7), "DBGWVR7_EL1", RT_UOFFSETOF(CPUMCTX, aWp[7].Value)),
185 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(8), "DBGWVR8_EL1", RT_UOFFSETOF(CPUMCTX, aWp[8].Value)),
186 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(9), "DBGWVR9_EL9", RT_UOFFSETOF(CPUMCTX, aWp[9].Value)),
187 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(10), "DBGWVR10_EL1", RT_UOFFSETOF(CPUMCTX, aWp[10].Value)),
188 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(11), "DBGWVR11_EL1", RT_UOFFSETOF(CPUMCTX, aWp[11].Value)),
189 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(12), "DBGWVR12_EL1", RT_UOFFSETOF(CPUMCTX, aWp[12].Value)),
190 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(13), "DBGWVR13_EL1", RT_UOFFSETOF(CPUMCTX, aWp[13].Value)),
191 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(14), "DBGWVR14_EL1", RT_UOFFSETOF(CPUMCTX, aWp[14].Value)),
192 MVRW(ARMV8_AARCH64_SYSREG_DBGWVRn_EL1(15), "DBGWVR15_EL1", RT_UOFFSETOF(CPUMCTX, aWp[15].Value)),
193 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(0), "DBGWCR0_EL1", RT_UOFFSETOF(CPUMCTX, aWp[0].Ctrl)),
194 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(1), "DBGWCR1_EL1", RT_UOFFSETOF(CPUMCTX, aWp[1].Ctrl)),
195 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(2), "DBGWCR2_EL1", RT_UOFFSETOF(CPUMCTX, aWp[2].Ctrl)),
196 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(3), "DBGWCR3_EL1", RT_UOFFSETOF(CPUMCTX, aWp[3].Ctrl)),
197 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(4), "DBGWCR4_EL1", RT_UOFFSETOF(CPUMCTX, aWp[4].Ctrl)),
198 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(5), "DBGWCR5_EL1", RT_UOFFSETOF(CPUMCTX, aWp[5].Ctrl)),
199 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(6), "DBGWCR6_EL1", RT_UOFFSETOF(CPUMCTX, aWp[6].Ctrl)),
200 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(7), "DBGWCR7_EL1", RT_UOFFSETOF(CPUMCTX, aWp[7].Ctrl)),
201 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(8), "DBGWCR8_EL1", RT_UOFFSETOF(CPUMCTX, aWp[8].Ctrl)),
202 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(9), "DBGWCR9_EL9", RT_UOFFSETOF(CPUMCTX, aWp[9].Ctrl)),
203 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(10), "DBGWCR10_EL1", RT_UOFFSETOF(CPUMCTX, aWp[10].Ctrl)),
204 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(11), "DBGWCR11_EL1", RT_UOFFSETOF(CPUMCTX, aWp[11].Ctrl)),
205 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(12), "DBGWCR12_EL1", RT_UOFFSETOF(CPUMCTX, aWp[12].Ctrl)),
206 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(13), "DBGWCR13_EL1", RT_UOFFSETOF(CPUMCTX, aWp[13].Ctrl)),
207 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(14), "DBGWCR14_EL1", RT_UOFFSETOF(CPUMCTX, aWp[14].Ctrl)),
208 MVRW(ARMV8_AARCH64_SYSREG_DBGWCRn_EL1(15), "DBGWCR15_EL1", RT_UOFFSETOF(CPUMCTX, aWp[15].Ctrl)),
209};
210
211
212/** Saved state field descriptors for CPUMCTX. */
213static const SSMFIELD g_aCpumCtxFields[] =
214{
215 SSMFIELD_ENTRY( CPUMCTX, aGRegs[0].x),
216 SSMFIELD_ENTRY( CPUMCTX, aGRegs[1].x),
217 SSMFIELD_ENTRY( CPUMCTX, aGRegs[2].x),
218 SSMFIELD_ENTRY( CPUMCTX, aGRegs[3].x),
219 SSMFIELD_ENTRY( CPUMCTX, aGRegs[4].x),
220 SSMFIELD_ENTRY( CPUMCTX, aGRegs[5].x),
221 SSMFIELD_ENTRY( CPUMCTX, aGRegs[6].x),
222 SSMFIELD_ENTRY( CPUMCTX, aGRegs[7].x),
223 SSMFIELD_ENTRY( CPUMCTX, aGRegs[8].x),
224 SSMFIELD_ENTRY( CPUMCTX, aGRegs[9].x),
225 SSMFIELD_ENTRY( CPUMCTX, aGRegs[10].x),
226 SSMFIELD_ENTRY( CPUMCTX, aGRegs[11].x),
227 SSMFIELD_ENTRY( CPUMCTX, aGRegs[12].x),
228 SSMFIELD_ENTRY( CPUMCTX, aGRegs[13].x),
229 SSMFIELD_ENTRY( CPUMCTX, aGRegs[14].x),
230 SSMFIELD_ENTRY( CPUMCTX, aGRegs[15].x),
231 SSMFIELD_ENTRY( CPUMCTX, aGRegs[16].x),
232 SSMFIELD_ENTRY( CPUMCTX, aGRegs[17].x),
233 SSMFIELD_ENTRY( CPUMCTX, aGRegs[18].x),
234 SSMFIELD_ENTRY( CPUMCTX, aGRegs[19].x),
235 SSMFIELD_ENTRY( CPUMCTX, aGRegs[20].x),
236 SSMFIELD_ENTRY( CPUMCTX, aGRegs[21].x),
237 SSMFIELD_ENTRY( CPUMCTX, aGRegs[22].x),
238 SSMFIELD_ENTRY( CPUMCTX, aGRegs[23].x),
239 SSMFIELD_ENTRY( CPUMCTX, aGRegs[24].x),
240 SSMFIELD_ENTRY( CPUMCTX, aGRegs[25].x),
241 SSMFIELD_ENTRY( CPUMCTX, aGRegs[26].x),
242 SSMFIELD_ENTRY( CPUMCTX, aGRegs[27].x),
243 SSMFIELD_ENTRY( CPUMCTX, aGRegs[28].x),
244 SSMFIELD_ENTRY( CPUMCTX, aGRegs[29].x),
245 SSMFIELD_ENTRY( CPUMCTX, aGRegs[30].x),
246 SSMFIELD_ENTRY( CPUMCTX, aVRegs[0].v),
247 SSMFIELD_ENTRY( CPUMCTX, aVRegs[1].v),
248 SSMFIELD_ENTRY( CPUMCTX, aVRegs[2].v),
249 SSMFIELD_ENTRY( CPUMCTX, aVRegs[3].v),
250 SSMFIELD_ENTRY( CPUMCTX, aVRegs[4].v),
251 SSMFIELD_ENTRY( CPUMCTX, aVRegs[5].v),
252 SSMFIELD_ENTRY( CPUMCTX, aVRegs[6].v),
253 SSMFIELD_ENTRY( CPUMCTX, aVRegs[7].v),
254 SSMFIELD_ENTRY( CPUMCTX, aVRegs[8].v),
255 SSMFIELD_ENTRY( CPUMCTX, aVRegs[9].v),
256 SSMFIELD_ENTRY( CPUMCTX, aVRegs[10].v),
257 SSMFIELD_ENTRY( CPUMCTX, aVRegs[11].v),
258 SSMFIELD_ENTRY( CPUMCTX, aVRegs[12].v),
259 SSMFIELD_ENTRY( CPUMCTX, aVRegs[13].v),
260 SSMFIELD_ENTRY( CPUMCTX, aVRegs[14].v),
261 SSMFIELD_ENTRY( CPUMCTX, aVRegs[15].v),
262 SSMFIELD_ENTRY( CPUMCTX, aVRegs[16].v),
263 SSMFIELD_ENTRY( CPUMCTX, aVRegs[17].v),
264 SSMFIELD_ENTRY( CPUMCTX, aVRegs[18].v),
265 SSMFIELD_ENTRY( CPUMCTX, aVRegs[19].v),
266 SSMFIELD_ENTRY( CPUMCTX, aVRegs[20].v),
267 SSMFIELD_ENTRY( CPUMCTX, aVRegs[21].v),
268 SSMFIELD_ENTRY( CPUMCTX, aVRegs[22].v),
269 SSMFIELD_ENTRY( CPUMCTX, aVRegs[23].v),
270 SSMFIELD_ENTRY( CPUMCTX, aVRegs[24].v),
271 SSMFIELD_ENTRY( CPUMCTX, aVRegs[25].v),
272 SSMFIELD_ENTRY( CPUMCTX, aVRegs[26].v),
273 SSMFIELD_ENTRY( CPUMCTX, aVRegs[27].v),
274 SSMFIELD_ENTRY( CPUMCTX, aVRegs[28].v),
275 SSMFIELD_ENTRY( CPUMCTX, aVRegs[29].v),
276 SSMFIELD_ENTRY( CPUMCTX, aVRegs[30].v),
277 SSMFIELD_ENTRY( CPUMCTX, aVRegs[31].v),
278 SSMFIELD_ENTRY( CPUMCTX, aSpReg[0].u64),
279 SSMFIELD_ENTRY( CPUMCTX, aSpReg[1].u64),
280 SSMFIELD_ENTRY( CPUMCTX, Pc.u64),
281 SSMFIELD_ENTRY( CPUMCTX, Spsr.u64),
282 SSMFIELD_ENTRY( CPUMCTX, Elr.u64),
283 SSMFIELD_ENTRY( CPUMCTX, Sctlr.u64),
284 SSMFIELD_ENTRY( CPUMCTX, Tcr.u64),
285 SSMFIELD_ENTRY( CPUMCTX, Ttbr0.u64),
286 SSMFIELD_ENTRY( CPUMCTX, Ttbr1.u64),
287 SSMFIELD_ENTRY( CPUMCTX, VBar.u64),
288 SSMFIELD_ENTRY( CPUMCTX, aBp[0].Ctrl.u64),
289 SSMFIELD_ENTRY( CPUMCTX, aBp[0].Value.u64),
290 SSMFIELD_ENTRY( CPUMCTX, aBp[1].Ctrl.u64),
291 SSMFIELD_ENTRY( CPUMCTX, aBp[1].Value.u64),
292 SSMFIELD_ENTRY( CPUMCTX, aBp[2].Ctrl.u64),
293 SSMFIELD_ENTRY( CPUMCTX, aBp[2].Value.u64),
294 SSMFIELD_ENTRY( CPUMCTX, aBp[3].Ctrl.u64),
295 SSMFIELD_ENTRY( CPUMCTX, aBp[3].Value.u64),
296 SSMFIELD_ENTRY( CPUMCTX, aBp[4].Ctrl.u64),
297 SSMFIELD_ENTRY( CPUMCTX, aBp[4].Value.u64),
298 SSMFIELD_ENTRY( CPUMCTX, aBp[5].Ctrl.u64),
299 SSMFIELD_ENTRY( CPUMCTX, aBp[5].Value.u64),
300 SSMFIELD_ENTRY( CPUMCTX, aBp[6].Ctrl.u64),
301 SSMFIELD_ENTRY( CPUMCTX, aBp[6].Value.u64),
302 SSMFIELD_ENTRY( CPUMCTX, aBp[7].Ctrl.u64),
303 SSMFIELD_ENTRY( CPUMCTX, aBp[7].Value.u64),
304 SSMFIELD_ENTRY( CPUMCTX, aBp[8].Ctrl.u64),
305 SSMFIELD_ENTRY( CPUMCTX, aBp[8].Value.u64),
306 SSMFIELD_ENTRY( CPUMCTX, aBp[9].Ctrl.u64),
307 SSMFIELD_ENTRY( CPUMCTX, aBp[9].Value.u64),
308 SSMFIELD_ENTRY( CPUMCTX, aBp[10].Ctrl.u64),
309 SSMFIELD_ENTRY( CPUMCTX, aBp[10].Value.u64),
310 SSMFIELD_ENTRY( CPUMCTX, aBp[11].Ctrl.u64),
311 SSMFIELD_ENTRY( CPUMCTX, aBp[11].Value.u64),
312 SSMFIELD_ENTRY( CPUMCTX, aBp[12].Ctrl.u64),
313 SSMFIELD_ENTRY( CPUMCTX, aBp[12].Value.u64),
314 SSMFIELD_ENTRY( CPUMCTX, aBp[13].Ctrl.u64),
315 SSMFIELD_ENTRY( CPUMCTX, aBp[13].Value.u64),
316 SSMFIELD_ENTRY( CPUMCTX, aBp[14].Ctrl.u64),
317 SSMFIELD_ENTRY( CPUMCTX, aBp[14].Value.u64),
318 SSMFIELD_ENTRY( CPUMCTX, aBp[15].Ctrl.u64),
319 SSMFIELD_ENTRY( CPUMCTX, aBp[15].Value.u64),
320 SSMFIELD_ENTRY( CPUMCTX, aWp[0].Ctrl.u64),
321 SSMFIELD_ENTRY( CPUMCTX, aWp[0].Value.u64),
322 SSMFIELD_ENTRY( CPUMCTX, aWp[1].Ctrl.u64),
323 SSMFIELD_ENTRY( CPUMCTX, aWp[1].Value.u64),
324 SSMFIELD_ENTRY( CPUMCTX, aWp[2].Ctrl.u64),
325 SSMFIELD_ENTRY( CPUMCTX, aWp[2].Value.u64),
326 SSMFIELD_ENTRY( CPUMCTX, aWp[3].Ctrl.u64),
327 SSMFIELD_ENTRY( CPUMCTX, aWp[3].Value.u64),
328 SSMFIELD_ENTRY( CPUMCTX, aWp[4].Ctrl.u64),
329 SSMFIELD_ENTRY( CPUMCTX, aWp[4].Value.u64),
330 SSMFIELD_ENTRY( CPUMCTX, aWp[5].Ctrl.u64),
331 SSMFIELD_ENTRY( CPUMCTX, aWp[5].Value.u64),
332 SSMFIELD_ENTRY( CPUMCTX, aWp[6].Ctrl.u64),
333 SSMFIELD_ENTRY( CPUMCTX, aWp[6].Value.u64),
334 SSMFIELD_ENTRY( CPUMCTX, aWp[7].Ctrl.u64),
335 SSMFIELD_ENTRY( CPUMCTX, aWp[7].Value.u64),
336 SSMFIELD_ENTRY( CPUMCTX, aWp[8].Ctrl.u64),
337 SSMFIELD_ENTRY( CPUMCTX, aWp[8].Value.u64),
338 SSMFIELD_ENTRY( CPUMCTX, aWp[9].Ctrl.u64),
339 SSMFIELD_ENTRY( CPUMCTX, aWp[9].Value.u64),
340 SSMFIELD_ENTRY( CPUMCTX, aWp[10].Ctrl.u64),
341 SSMFIELD_ENTRY( CPUMCTX, aWp[10].Value.u64),
342 SSMFIELD_ENTRY( CPUMCTX, aWp[11].Ctrl.u64),
343 SSMFIELD_ENTRY( CPUMCTX, aWp[11].Value.u64),
344 SSMFIELD_ENTRY( CPUMCTX, aWp[12].Ctrl.u64),
345 SSMFIELD_ENTRY( CPUMCTX, aWp[12].Value.u64),
346 SSMFIELD_ENTRY( CPUMCTX, aWp[13].Ctrl.u64),
347 SSMFIELD_ENTRY( CPUMCTX, aWp[13].Value.u64),
348 SSMFIELD_ENTRY( CPUMCTX, aWp[14].Ctrl.u64),
349 SSMFIELD_ENTRY( CPUMCTX, aWp[14].Value.u64),
350 SSMFIELD_ENTRY( CPUMCTX, aWp[15].Ctrl.u64),
351 SSMFIELD_ENTRY( CPUMCTX, aWp[15].Value.u64),
352 SSMFIELD_ENTRY( CPUMCTX, Mdscr.u64),
353 SSMFIELD_ENTRY( CPUMCTX, Apda.Low.u64),
354 SSMFIELD_ENTRY( CPUMCTX, Apda.High.u64),
355 SSMFIELD_ENTRY( CPUMCTX, Apdb.Low.u64),
356 SSMFIELD_ENTRY( CPUMCTX, Apdb.High.u64),
357 SSMFIELD_ENTRY( CPUMCTX, Apga.Low.u64),
358 SSMFIELD_ENTRY( CPUMCTX, Apga.High.u64),
359 SSMFIELD_ENTRY( CPUMCTX, Apia.Low.u64),
360 SSMFIELD_ENTRY( CPUMCTX, Apia.High.u64),
361 SSMFIELD_ENTRY( CPUMCTX, Apib.Low.u64),
362 SSMFIELD_ENTRY( CPUMCTX, Apib.High.u64),
363 SSMFIELD_ENTRY( CPUMCTX, Afsr0.u64),
364 SSMFIELD_ENTRY( CPUMCTX, Afsr1.u64),
365 SSMFIELD_ENTRY( CPUMCTX, Amair.u64),
366 SSMFIELD_ENTRY( CPUMCTX, CntKCtl.u64),
367 SSMFIELD_ENTRY( CPUMCTX, ContextIdr.u64),
368 SSMFIELD_ENTRY( CPUMCTX, Cpacr.u64),
369 SSMFIELD_ENTRY( CPUMCTX, Csselr.u64),
370 SSMFIELD_ENTRY( CPUMCTX, Esr.u64),
371 SSMFIELD_ENTRY( CPUMCTX, Far.u64),
372 SSMFIELD_ENTRY( CPUMCTX, Mair.u64),
373 SSMFIELD_ENTRY( CPUMCTX, Par.u64),
374 SSMFIELD_ENTRY( CPUMCTX, TpIdrRoEl0.u64),
375 SSMFIELD_ENTRY( CPUMCTX, aTpIdr[0].u64),
376 SSMFIELD_ENTRY( CPUMCTX, aTpIdr[1].u64),
377 SSMFIELD_ENTRY( CPUMCTX, MDccInt.u64),
378 SSMFIELD_ENTRY( CPUMCTX, fpcr),
379 SSMFIELD_ENTRY( CPUMCTX, fpsr),
380 SSMFIELD_ENTRY( CPUMCTX, fPState),
381 SSMFIELD_ENTRY( CPUMCTX, fOsLck),
382 SSMFIELD_ENTRY( CPUMCTX, CntvCtlEl0),
383 SSMFIELD_ENTRY( CPUMCTX, CntvCValEl0),
384 /** @name EL2 support:
385 * @{ */
386 SSMFIELD_ENTRY( CPUMCTX, CntHCtlEl2),
387 SSMFIELD_ENTRY( CPUMCTX, CntHpCtlEl2),
388 SSMFIELD_ENTRY( CPUMCTX, CntHpCValEl2),
389 SSMFIELD_ENTRY( CPUMCTX, CntHpTValEl2),
390 SSMFIELD_ENTRY( CPUMCTX, CntVOffEl2),
391 SSMFIELD_ENTRY( CPUMCTX, CptrEl2),
392 SSMFIELD_ENTRY( CPUMCTX, ElrEl2),
393 SSMFIELD_ENTRY( CPUMCTX, EsrEl2),
394 SSMFIELD_ENTRY( CPUMCTX, FarEl2),
395 SSMFIELD_ENTRY( CPUMCTX, HcrEl2),
396 SSMFIELD_ENTRY( CPUMCTX, HpFarEl2),
397 SSMFIELD_ENTRY( CPUMCTX, MairEl2),
398 SSMFIELD_ENTRY( CPUMCTX, MdcrEl2),
399 SSMFIELD_ENTRY( CPUMCTX, SctlrEl2),
400 SSMFIELD_ENTRY( CPUMCTX, SpsrEl2),
401 SSMFIELD_ENTRY( CPUMCTX, SpEl2),
402 SSMFIELD_ENTRY( CPUMCTX, TcrEl2),
403 SSMFIELD_ENTRY( CPUMCTX, TpidrEl2),
404 SSMFIELD_ENTRY( CPUMCTX, Ttbr0El2),
405 SSMFIELD_ENTRY( CPUMCTX, Ttbr1El2),
406 SSMFIELD_ENTRY( CPUMCTX, VBarEl2),
407 SSMFIELD_ENTRY( CPUMCTX, VMpidrEl2),
408 SSMFIELD_ENTRY( CPUMCTX, VPidrEl2),
409 SSMFIELD_ENTRY( CPUMCTX, VTcrEl2),
410 SSMFIELD_ENTRY( CPUMCTX, VTtbrEl2),
411 /** @} */
412
413 SSMFIELD_ENTRY_TERM()
414};
415
416/**
417 * Additional fields for v2
418 */
419static const SSMFIELD g_aCpumCtxFieldsV2[] =
420{
421 SSMFIELD_ENTRY( CPUMCTX, Actlr.u64),
422 SSMFIELD_ENTRY_TERM()
423};
424
425
426/**
427 * Initializes the guest system register states.
428 *
429 * @returns VBox status code.
430 * @param pVM The cross context VM structure.
431 */
432static int cpumR3InitSysRegs(PVM pVM)
433{
434 for (uint32_t i = 0; i < RT_ELEMENTS(g_aSysRegRanges); i++)
435 {
436 int rc = CPUMR3SysRegRangesInsert(pVM, &g_aSysRegRanges[i]);
437 AssertLogRelRCReturn(rc, rc);
438 }
439
440 return VINF_SUCCESS;
441}
442
443
444/**
445 * Initializes the CPUM.
446 *
447 * @returns VBox status code.
448 * @param pVM The cross context VM structure.
449 */
450VMMR3DECL(int) CPUMR3Init(PVM pVM)
451{
452 LogFlow(("CPUMR3Init\n"));
453
454 /*
455 * Assert alignment, sizes and tables.
456 */
457 AssertCompileMemberAlignment(VM, cpum.s, 32);
458 AssertCompile(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
459 AssertCompileSizeAlignment(CPUMCTX, 64);
460 AssertCompileMemberAlignment(VM, cpum, 64);
461 AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
462#ifdef VBOX_STRICT
463 int rc2 = cpumR3SysRegStrictInitChecks();
464 AssertRCReturn(rc2, rc2);
465#endif
466
467 pVM->cpum.s.GuestInfo.paSysRegRangesR3 = &pVM->cpum.s.GuestInfo.aSysRegRanges[0];
468 pVM->cpum.s.bResetEl = ARMV8_AARCH64_EL_1;
469
470 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
471
472 /** @cfgm{/CPUM/ResetPcValue, string}
473 * Program counter value after a reset, sets the address of the first instruction to execute. */
474 int rc = CFGMR3QueryU64Def(pCpumCfg, "ResetPcValue", &pVM->cpum.s.u64ResetPc, 0);
475 AssertLogRelRCReturn(rc, rc);
476
477 /** @cfgm{/CPUM/NestedHWVirt, bool, false}
478 * Whether to expose the hardware virtualization (EL2) feature to the guest.
479 * The default is false, and when enabled requires a 64-bit CPU and a NEM backend
480 * supporting it.
481 */
482 bool fNestedHWVirt = false;
483 rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &fNestedHWVirt, false);
484 AssertLogRelRCReturn(rc, rc);
485 if (fNestedHWVirt)
486 pVM->cpum.s.bResetEl = ARMV8_AARCH64_EL_2;
487
488 /*
489 * Register saved state data item.
490 */
491 rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
492 NULL, cpumR3LiveExec, NULL,
493 NULL, cpumR3SaveExec, NULL,
494 cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
495 if (RT_FAILURE(rc))
496 return rc;
497
498 /*
499 * Register info handlers and registers with the debugger facility.
500 */
501 DBGFR3InfoRegisterInternalEx(pVM, "cpum", "Displays the all the cpu states.",
502 &cpumR3InfoAll, DBGFINFO_FLAGS_ALL_EMTS);
503 DBGFR3InfoRegisterInternalEx(pVM, "cpumguest", "Displays the guest cpu state.",
504 &cpumR3InfoGuest, DBGFINFO_FLAGS_ALL_EMTS);
505 DBGFR3InfoRegisterInternalEx(pVM, "cpumguestinstr", "Displays the current guest instruction.",
506 &cpumR3InfoGuestInstr, DBGFINFO_FLAGS_ALL_EMTS);
507 DBGFR3InfoRegisterInternal( pVM, "cpuid", "Displays the guest cpuid information.",
508 &cpumR3CpuIdInfo);
509 DBGFR3InfoRegisterInternal( pVM, "cpufeat", "Displays the guest features.",
510 &cpumR3CpuFeatInfo);
511
512 rc = cpumR3DbgInit(pVM);
513 if (RT_FAILURE(rc))
514 return rc;
515
516 /*
517 * Initialize the Guest system register states.
518 */
519 rc = cpumR3InitSysRegs(pVM);
520 if (RT_FAILURE(rc))
521 return rc;
522
523 /*
524 * Initialize the general guest CPU state.
525 */
526 CPUMR3Reset(pVM);
527
528 return VINF_SUCCESS;
529}
530
531
532/**
533 * Applies relocations to data and code managed by this
534 * component. This function will be called at init and
535 * whenever the VMM need to relocate it self inside the GC.
536 *
537 * The CPUM will update the addresses used by the switcher.
538 *
539 * @param pVM The cross context VM structure.
540 */
541VMMR3DECL(void) CPUMR3Relocate(PVM pVM)
542{
543 RT_NOREF(pVM);
544}
545
546
547/**
548 * Terminates the CPUM.
549 *
550 * Termination means cleaning up and freeing all resources,
551 * the VM it self is at this point powered off or suspended.
552 *
553 * @returns VBox status code.
554 * @param pVM The cross context VM structure.
555 */
556VMMR3DECL(int) CPUMR3Term(PVM pVM)
557{
558 RT_NOREF(pVM);
559 return VINF_SUCCESS;
560}
561
562
563/**
564 * Resets a virtual CPU.
565 *
566 * Used by CPUMR3Reset and CPU hot plugging.
567 *
568 * @param pVM The cross context VM structure.
569 * @param pVCpu The cross context virtual CPU structure of the CPU that is
570 * being reset. This may differ from the current EMT.
571 */
572VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
573{
574 RT_NOREF(pVM);
575
576 /** @todo anything different for VCPU > 0? */
577 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
578
579 /*
580 * Initialize everything to ZERO first.
581 */
582 RT_BZERO(pCtx, sizeof(*pCtx));
583
584 /* Start in Supervisor mode. */
585 /** @todo Differentiate between Aarch64 and Aarch32 configuation. */
586 pCtx->fPState = ARMV8_SPSR_EL2_AARCH64_SET_EL(pVM->cpum.s.bResetEl)
587 | ARMV8_SPSR_EL2_AARCH64_SP
588 | ARMV8_SPSR_EL2_AARCH64_D
589 | ARMV8_SPSR_EL2_AARCH64_A
590 | ARMV8_SPSR_EL2_AARCH64_I
591 | ARMV8_SPSR_EL2_AARCH64_F;
592
593 pCtx->Pc.u64 = pVM->cpum.s.u64ResetPc;
594 /** @todo */
595}
596
597
598/**
599 * Resets the CPU.
600 *
601 * @param pVM The cross context VM structure.
602 */
603VMMR3DECL(void) CPUMR3Reset(PVM pVM)
604{
605 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
606 {
607 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
608 CPUMR3ResetCpu(pVM, pVCpu);
609 }
610}
611
612
613
614
615/**
616 * Pass 0 live exec callback.
617 *
618 * @returns VINF_SSM_DONT_CALL_AGAIN.
619 * @param pVM The cross context VM structure.
620 * @param pSSM The saved state handle.
621 * @param uPass The pass (0).
622 */
623static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
624{
625 AssertReturn(uPass == 0, VERR_SSM_UNEXPECTED_PASS);
626 cpumR3SaveCpuId(pVM, pSSM);
627 return VINF_SSM_DONT_CALL_AGAIN;
628}
629
630
631/**
632 * Execute state save operation.
633 *
634 * @returns VBox status code.
635 * @param pVM The cross context VM structure.
636 * @param pSSM SSM operation handle.
637 */
638static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
639{
640 /*
641 * Save.
642 */
643 SSMR3PutU32(pSSM, pVM->cCpus);
644 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
645 {
646 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
647 PCPUMCTX const pGstCtx = &pVCpu->cpum.s.Guest;
648
649 SSMR3PutStructEx(pSSM, pGstCtx, sizeof(*pGstCtx), 0, g_aCpumCtxFields, NULL);
650 SSMR3PutStructEx(pSSM, pGstCtx, sizeof(*pGstCtx), 0, g_aCpumCtxFieldsV2, NULL);
651
652 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged);
653 }
654
655 cpumR3SaveCpuId(pVM, pSSM);
656 return VINF_SUCCESS;
657}
658
659
660/**
661 * @callback_method_impl{FNSSMINTLOADPREP}
662 */
663static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
664{
665 RT_NOREF(pSSM);
666 pVM->cpum.s.fPendingRestore = true;
667 return VINF_SUCCESS;
668}
669
670
671/**
672 * @callback_method_impl{FNSSMINTLOADEXEC}
673 */
674static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
675{
676 /*
677 * Validate version.
678 */
679 if ( uVersion != CPUM_SAVED_STATE_VERSION
680 && uVersion != CPUM_SAVED_STATE_VERSION_ARMV8_V1)
681 {
682 AssertMsgFailed(("cpumR3LoadExec: Invalid version uVersion=%d!\n", uVersion));
683 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
684 }
685
686 if (uPass == SSM_PASS_FINAL)
687 {
688 uint32_t cCpus;
689 int rc = SSMR3GetU32(pSSM, &cCpus); AssertRCReturn(rc, rc);
690 AssertLogRelMsgReturn(cCpus == pVM->cCpus, ("Mismatching CPU counts: saved: %u; configured: %u \n", cCpus, pVM->cCpus),
691 VERR_SSM_UNEXPECTED_DATA);
692
693 /*
694 * Do the per-CPU restoring.
695 */
696 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
697 {
698 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
699 PCPUMCTX pGstCtx = &pVCpu->cpum.s.Guest;
700
701 /*
702 * Restore the CPUMCTX structure.
703 */
704 rc = SSMR3GetStructEx(pSSM, pGstCtx, sizeof(*pGstCtx), 0, g_aCpumCtxFields, NULL);
705 AssertRCReturn(rc, rc);
706
707 if (uVersion == CPUM_SAVED_STATE_VERSION_ARMV8_V2)
708 {
709 rc = SSMR3GetStructEx(pSSM, pGstCtx, sizeof(*pGstCtx), 0, g_aCpumCtxFieldsV2, NULL);
710 AssertRCReturn(rc, rc);
711 }
712
713 /*
714 * Restore a couple of flags.
715 */
716 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fChanged);
717 }
718 }
719
720 pVM->cpum.s.fPendingRestore = false;
721
722 /* Load CPUID and explode guest features. */
723 return cpumR3LoadCpuIdArmV8(pVM, pSSM, uVersion);
724}
725
726
727/**
728 * @callback_method_impl{FNSSMINTLOADDONE}
729 */
730static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
731{
732 if (RT_FAILURE(SSMR3HandleGetStatus(pSSM)))
733 return VINF_SUCCESS;
734
735 /* just check this since we can. */ /** @todo Add a SSM unit flag for indicating that it's mandatory during a restore. */
736 if (pVM->cpum.s.fPendingRestore)
737 {
738 LogRel(("CPUM: Missing state!\n"));
739 return VERR_INTERNAL_ERROR_2;
740 }
741
742 /** @todo */
743 return VINF_SUCCESS;
744}
745
746
747/**
748 * Checks if the CPUM state restore is still pending.
749 *
750 * @returns true / false.
751 * @param pVM The cross context VM structure.
752 */
753VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM)
754{
755 return pVM->cpum.s.fPendingRestore;
756}
757
758
759/**
760 * Formats the PSTATE value into mnemonics.
761 *
762 * @param pszPState Where to write the mnemonics. (Assumes sufficient buffer space.)
763 * @param fPState The PSTATE value with both guest hardware and VBox
764 * internal bits included.
765 */
766static void cpumR3InfoFormatPState(char *pszPState, uint32_t fPState)
767{
768 /*
769 * Format the flags.
770 */
771 static const struct
772 {
773 const char *pszSet; const char *pszClear; uint32_t fFlag;
774 } s_aFlags[] =
775 {
776 { "SP", "nSP", ARMV8_SPSR_EL2_AARCH64_SP },
777 { "M4", "nM4", ARMV8_SPSR_EL2_AARCH64_M4 },
778 { "T", "nT", ARMV8_SPSR_EL2_AARCH64_T },
779 { "nF", "F", ARMV8_SPSR_EL2_AARCH64_F },
780 { "nI", "I", ARMV8_SPSR_EL2_AARCH64_I },
781 { "nA", "A", ARMV8_SPSR_EL2_AARCH64_A },
782 { "nD", "D", ARMV8_SPSR_EL2_AARCH64_D },
783 { "V", "nV", ARMV8_SPSR_EL2_AARCH64_V },
784 { "C", "nC", ARMV8_SPSR_EL2_AARCH64_C },
785 { "Z", "nZ", ARMV8_SPSR_EL2_AARCH64_Z },
786 { "N", "nN", ARMV8_SPSR_EL2_AARCH64_N },
787 };
788 char *psz = pszPState;
789 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
790 {
791 const char *pszAdd = s_aFlags[i].fFlag & fPState ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
792 if (pszAdd)
793 {
794 strcpy(psz, pszAdd);
795 psz += strlen(pszAdd);
796 *psz++ = ' ';
797 }
798 }
799 psz[-1] = '\0';
800}
801
802
803/**
804 * Formats a full register dump.
805 *
806 * @param pVM The cross context VM structure.
807 * @param pCtx The context to format.
808 * @param pHlp Output functions.
809 * @param enmType The dump type.
810 */
811static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType)
812{
813 RT_NOREF(pVM);
814
815 /*
816 * Format the PSTATE.
817 */
818 char szPState[80];
819 cpumR3InfoFormatPState(&szPState[0], pCtx->fPState);
820
821 /*
822 * Format the registers.
823 */
824 switch (enmType)
825 {
826 case CPUMDUMPTYPE_TERSE:
827 if (CPUMIsGuestIn64BitCodeEx(pCtx))
828 pHlp->pfnPrintf(pHlp,
829 "x0=%016RX64 x1=%016RX64 x2=%016RX64 x3=%016RX64\n"
830 "x4=%016RX64 x5=%016RX64 x6=%016RX64 x7=%016RX64\n"
831 "x8=%016RX64 x9=%016RX64 x10=%016RX64 x11=%016RX64\n"
832 "x12=%016RX64 x13=%016RX64 x14=%016RX64 x15=%016RX64\n"
833 "x16=%016RX64 x17=%016RX64 x18=%016RX64 x19=%016RX64\n"
834 "x20=%016RX64 x21=%016RX64 x22=%016RX64 x23=%016RX64\n"
835 "x24=%016RX64 x25=%016RX64 x26=%016RX64 x27=%016RX64\n"
836 "x28=%016RX64 x29=%016RX64 x30=%016RX64\n"
837 "pc=%016RX64 pstate=%016RX64 %s\n"
838 "sp_el0=%016RX64 sp_el1=%016RX64\n",
839 pCtx->aGRegs[0], pCtx->aGRegs[1], pCtx->aGRegs[2], pCtx->aGRegs[3],
840 pCtx->aGRegs[4], pCtx->aGRegs[5], pCtx->aGRegs[6], pCtx->aGRegs[7],
841 pCtx->aGRegs[8], pCtx->aGRegs[9], pCtx->aGRegs[10], pCtx->aGRegs[11],
842 pCtx->aGRegs[12], pCtx->aGRegs[13], pCtx->aGRegs[14], pCtx->aGRegs[15],
843 pCtx->aGRegs[16], pCtx->aGRegs[17], pCtx->aGRegs[18], pCtx->aGRegs[19],
844 pCtx->aGRegs[20], pCtx->aGRegs[21], pCtx->aGRegs[22], pCtx->aGRegs[23],
845 pCtx->aGRegs[24], pCtx->aGRegs[25], pCtx->aGRegs[26], pCtx->aGRegs[27],
846 pCtx->aGRegs[28], pCtx->aGRegs[29], pCtx->aGRegs[30],
847 pCtx->Pc.u64, pCtx->fPState, szPState,
848 pCtx->aSpReg[0].u64, pCtx->aSpReg[1].u64);
849 else
850 AssertFailed();
851 break;
852
853 case CPUMDUMPTYPE_DEFAULT:
854 if (CPUMIsGuestIn64BitCodeEx(pCtx))
855 pHlp->pfnPrintf(pHlp,
856 "x0=%016RX64 x1=%016RX64 x2=%016RX64 x3=%016RX64\n"
857 "x4=%016RX64 x5=%016RX64 x6=%016RX64 x7=%016RX64\n"
858 "x8=%016RX64 x9=%016RX64 x10=%016RX64 x11=%016RX64\n"
859 "x12=%016RX64 x13=%016RX64 x14=%016RX64 x15=%016RX64\n"
860 "x16=%016RX64 x17=%016RX64 x18=%016RX64 x19=%016RX64\n"
861 "x20=%016RX64 x21=%016RX64 x22=%016RX64 x23=%016RX64\n"
862 "x24=%016RX64 x25=%016RX64 x26=%016RX64 x27=%016RX64\n"
863 "x28=%016RX64 x29=%016RX64 x30=%016RX64\n"
864 "pc=%016RX64 pstate=%016RX64 %s\n"
865 "sp_el0=%016RX64 sp_el1=%016RX64 sctlr_el1=%016RX64\n"
866 "tcr_el1=%016RX64 ttbr0_el1=%016RX64 ttbr1_el1=%016RX64\n"
867 "vbar_el1=%016RX64 elr_el1=%016RX64 esr_el1=%016RX64\n",
868 pCtx->aGRegs[0], pCtx->aGRegs[1], pCtx->aGRegs[2], pCtx->aGRegs[3],
869 pCtx->aGRegs[4], pCtx->aGRegs[5], pCtx->aGRegs[6], pCtx->aGRegs[7],
870 pCtx->aGRegs[8], pCtx->aGRegs[9], pCtx->aGRegs[10], pCtx->aGRegs[11],
871 pCtx->aGRegs[12], pCtx->aGRegs[13], pCtx->aGRegs[14], pCtx->aGRegs[15],
872 pCtx->aGRegs[16], pCtx->aGRegs[17], pCtx->aGRegs[18], pCtx->aGRegs[19],
873 pCtx->aGRegs[20], pCtx->aGRegs[21], pCtx->aGRegs[22], pCtx->aGRegs[23],
874 pCtx->aGRegs[24], pCtx->aGRegs[25], pCtx->aGRegs[26], pCtx->aGRegs[27],
875 pCtx->aGRegs[28], pCtx->aGRegs[29], pCtx->aGRegs[30],
876 pCtx->Pc.u64, pCtx->fPState, szPState,
877 pCtx->aSpReg[0].u64, pCtx->aSpReg[1].u64, pCtx->Sctlr.u64,
878 pCtx->Tcr.u64, pCtx->Ttbr0.u64, pCtx->Ttbr1.u64,
879 pCtx->VBar.u64, pCtx->Elr.u64, pCtx->Esr.u64);
880 else
881 AssertFailed();
882 break;
883
884 case CPUMDUMPTYPE_VERBOSE:
885 if (CPUMIsGuestIn64BitCodeEx(pCtx))
886 pHlp->pfnPrintf(pHlp,
887 "x0=%016RX64 x1=%016RX64 x2=%016RX64 x3=%016RX64\n"
888 "x4=%016RX64 x5=%016RX64 x6=%016RX64 x7=%016RX64\n"
889 "x8=%016RX64 x9=%016RX64 x10=%016RX64 x11=%016RX64\n"
890 "x12=%016RX64 x13=%016RX64 x14=%016RX64 x15=%016RX64\n"
891 "x16=%016RX64 x17=%016RX64 x18=%016RX64 x19=%016RX64\n"
892 "x20=%016RX64 x21=%016RX64 x22=%016RX64 x23=%016RX64\n"
893 "x24=%016RX64 x25=%016RX64 x26=%016RX64 x27=%016RX64\n"
894 "x28=%016RX64 x29=%016RX64 x30=%016RX64\n"
895 "pc=%016RX64 pstate=%016RX64 %s\n"
896 "sp_el0=%016RX64 sp_el1=%016RX64 sctlr_el1=%016RX64\n"
897 "tcr_el1=%016RX64 ttbr0_el1=%016RX64 ttbr1_el1=%016RX64\n"
898 "vbar_el1=%016RX64 elr_el1=%016RX64 esr_el1=%016RX64\n"
899 "contextidr_el1=%016RX64 tpidrr0_el0=%016RX64\n"
900 "tpidr_el0=%016RX64 tpidr_el1=%016RX64\n"
901 "far_el1=%016RX64 mair_el1=%016RX64 par_el1=%016RX64\n"
902 "cntv_ctl_el0=%016RX64 cntv_val_el0=%016RX64\n"
903 "afsr0_el1=%016RX64 afsr0_el1=%016RX64 amair_el1=%016RX64\n"
904 "cntkctl_el1=%016RX64 cpacr_el1=%016RX64 csselr_el1=%016RX64\n"
905 "mdccint_el1=%016RX64\n",
906 pCtx->aGRegs[0], pCtx->aGRegs[1], pCtx->aGRegs[2], pCtx->aGRegs[3],
907 pCtx->aGRegs[4], pCtx->aGRegs[5], pCtx->aGRegs[6], pCtx->aGRegs[7],
908 pCtx->aGRegs[8], pCtx->aGRegs[9], pCtx->aGRegs[10], pCtx->aGRegs[11],
909 pCtx->aGRegs[12], pCtx->aGRegs[13], pCtx->aGRegs[14], pCtx->aGRegs[15],
910 pCtx->aGRegs[16], pCtx->aGRegs[17], pCtx->aGRegs[18], pCtx->aGRegs[19],
911 pCtx->aGRegs[20], pCtx->aGRegs[21], pCtx->aGRegs[22], pCtx->aGRegs[23],
912 pCtx->aGRegs[24], pCtx->aGRegs[25], pCtx->aGRegs[26], pCtx->aGRegs[27],
913 pCtx->aGRegs[28], pCtx->aGRegs[29], pCtx->aGRegs[30],
914 pCtx->Pc.u64, pCtx->fPState, szPState,
915 pCtx->aSpReg[0].u64, pCtx->aSpReg[1].u64, pCtx->Sctlr.u64,
916 pCtx->Tcr.u64, pCtx->Ttbr0.u64, pCtx->Ttbr1.u64,
917 pCtx->VBar.u64, pCtx->Elr.u64, pCtx->Esr.u64,
918 pCtx->ContextIdr.u64, pCtx->TpIdrRoEl0.u64,
919 pCtx->aTpIdr[0].u64, pCtx->aTpIdr[1].u64,
920 pCtx->Far.u64, pCtx->Mair.u64, pCtx->Par.u64,
921 pCtx->CntvCtlEl0, pCtx->CntvCValEl0,
922 pCtx->Afsr0.u64, pCtx->Afsr1.u64, pCtx->Amair.u64,
923 pCtx->CntKCtl.u64, pCtx->Cpacr.u64, pCtx->Csselr.u64,
924 pCtx->MDccInt.u64);
925 else
926 AssertFailed();
927
928 pHlp->pfnPrintf(pHlp, "fpcr=%016RX64 fpsr=%016RX64\n", pCtx->fpcr, pCtx->fpsr);
929 for (unsigned i = 0; i < RT_ELEMENTS(pCtx->aVRegs); i++)
930 pHlp->pfnPrintf(pHlp,
931 i & 1
932 ? "q%u%s=%08RX32'%08RX32'%08RX32'%08RX32\n"
933 : "q%u%s=%08RX32'%08RX32'%08RX32'%08RX32 ",
934 i, i < 10 ? " " : "",
935 pCtx->aVRegs[i].au32[3],
936 pCtx->aVRegs[i].au32[2],
937 pCtx->aVRegs[i].au32[1],
938 pCtx->aVRegs[i].au32[0]);
939
940 pHlp->pfnPrintf(pHlp, "mdscr_el1=%016RX64\n", pCtx->Mdscr.u64);
941 for (unsigned i = 0; i < RT_ELEMENTS(pCtx->aBp); i++)
942 pHlp->pfnPrintf(pHlp, "DbgBp%u%s: Control=%016RX64 Value=%016RX64\n",
943 i, i < 10 ? " " : "",
944 pCtx->aBp[i].Ctrl, pCtx->aBp[i].Value);
945
946 for (unsigned i = 0; i < RT_ELEMENTS(pCtx->aWp); i++)
947 pHlp->pfnPrintf(pHlp, "DbgWp%u%s: Control=%016RX64 Value=%016RX64\n",
948 i, i < 10 ? " " : "",
949 pCtx->aWp[i].Ctrl, pCtx->aWp[i].Value);
950
951 pHlp->pfnPrintf(pHlp, "APDAKey=%016RX64'%016RX64\n", pCtx->Apda.High.u64, pCtx->Apda.Low.u64);
952 pHlp->pfnPrintf(pHlp, "APDBKey=%016RX64'%016RX64\n", pCtx->Apdb.High.u64, pCtx->Apdb.Low.u64);
953 pHlp->pfnPrintf(pHlp, "APGAKey=%016RX64'%016RX64\n", pCtx->Apga.High.u64, pCtx->Apga.Low.u64);
954 pHlp->pfnPrintf(pHlp, "APIAKey=%016RX64'%016RX64\n", pCtx->Apia.High.u64, pCtx->Apia.Low.u64);
955 pHlp->pfnPrintf(pHlp, "APIBKey=%016RX64'%016RX64\n", pCtx->Apib.High.u64, pCtx->Apib.Low.u64);
956
957 break;
958 }
959}
960
961
962/**
963 * Display all cpu states and any other cpum info.
964 *
965 * @param pVM The cross context VM structure.
966 * @param pHlp The info helper functions.
967 * @param pszArgs Arguments, ignored.
968 */
969static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
970{
971 cpumR3InfoGuest(pVM, pHlp, pszArgs);
972 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
973}
974
975
976/**
977 * Parses the info argument.
978 *
979 * The argument starts with 'verbose', 'terse' or 'default' and then
980 * continues with the comment string.
981 *
982 * @param pszArgs The pointer to the argument string.
983 * @param penmType Where to store the dump type request.
984 * @param ppszComment Where to store the pointer to the comment string.
985 */
986static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
987{
988 if (!pszArgs)
989 {
990 *penmType = CPUMDUMPTYPE_DEFAULT;
991 *ppszComment = "";
992 }
993 else
994 {
995 if (!strncmp(pszArgs, RT_STR_TUPLE("verbose")))
996 {
997 pszArgs += 7;
998 *penmType = CPUMDUMPTYPE_VERBOSE;
999 }
1000 else if (!strncmp(pszArgs, RT_STR_TUPLE("terse")))
1001 {
1002 pszArgs += 5;
1003 *penmType = CPUMDUMPTYPE_TERSE;
1004 }
1005 else if (!strncmp(pszArgs, RT_STR_TUPLE("default")))
1006 {
1007 pszArgs += 7;
1008 *penmType = CPUMDUMPTYPE_DEFAULT;
1009 }
1010 else
1011 *penmType = CPUMDUMPTYPE_DEFAULT;
1012 *ppszComment = RTStrStripL(pszArgs);
1013 }
1014}
1015
1016
1017/**
1018 * Display the guest cpu state.
1019 *
1020 * @param pVM The cross context VM structure.
1021 * @param pHlp The info helper functions.
1022 * @param pszArgs Arguments.
1023 */
1024static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1025{
1026 CPUMDUMPTYPE enmType;
1027 const char *pszComment;
1028 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1029
1030 PVMCPU pVCpu = VMMGetCpu(pVM);
1031 if (!pVCpu)
1032 pVCpu = pVM->apCpusR3[0];
1033
1034 pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment);
1035
1036 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1037 cpumR3InfoOne(pVM, pCtx, pHlp, enmType);
1038}
1039
1040
1041/**
1042 * Display the current guest instruction
1043 *
1044 * @param pVM The cross context VM structure.
1045 * @param pHlp The info helper functions.
1046 * @param pszArgs Arguments, ignored.
1047 */
1048static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1049{
1050 NOREF(pszArgs);
1051
1052 PVMCPU pVCpu = VMMGetCpu(pVM);
1053 if (!pVCpu)
1054 pVCpu = pVM->apCpusR3[0];
1055
1056 char szInstruction[256];
1057 szInstruction[0] = '\0';
1058 DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
1059 pHlp->pfnPrintf(pHlp, "\nCPUM%u: %s\n\n", pVCpu->idCpu, szInstruction);
1060}
1061
1062
1063/**
1064 * Called when the ring-3 init phase completes.
1065 *
1066 * @returns VBox status code.
1067 * @param pVM The cross context VM structure.
1068 * @param enmWhat Which init phase.
1069 */
1070VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1071{
1072 RT_NOREF(pVM, enmWhat);
1073 return VINF_SUCCESS;
1074}
1075
1076
1077/**
1078 * Called when the ring-0 init phases completed.
1079 *
1080 * @param pVM The cross context VM structure.
1081 */
1082VMMR3DECL(void) CPUMR3LogCpuIdAndMsrFeatures(PVM pVM)
1083{
1084 /*
1085 * Enable log buffering as we're going to log a lot of lines.
1086 */
1087 bool const fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
1088
1089 /*
1090 * Log the cpuid.
1091 */
1092 RTCPUSET OnlineSet;
1093 LogRel(("CPUM: Logical host processors: %u present, %u max, %u online, online mask: %016RX64\n",
1094 (unsigned)RTMpGetPresentCount(), (unsigned)RTMpGetCount(), (unsigned)RTMpGetOnlineCount(),
1095 RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
1096 RTCPUID cCores = RTMpGetCoreCount();
1097 if (cCores)
1098 LogRel(("CPUM: Physical host cores: %u\n", (unsigned)cCores));
1099 LogRel(("************************* CPUID dump ************************\n"));
1100 DBGFR3Info(pVM->pUVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
1101 LogRel(("\n"));
1102 DBGFR3_INFO_LOG_SAFE(pVM, "cpuid", "verbose"); /* macro */
1103 LogRel(("******************** End of CPUID dump **********************\n"));
1104
1105 LogRel(("******************** CPU feature dump ***********************\n"));
1106 DBGFR3Info(pVM->pUVM, "cpufeat", "verbose", DBGFR3InfoLogRelHlp());
1107 LogRel(("\n"));
1108 DBGFR3_INFO_LOG_SAFE(pVM, "cpufeat", "verbose"); /* macro */
1109 LogRel(("***************** End of CPU feature dump *******************\n"));
1110
1111 /*
1112 * Restore the log buffering state to what it was previously.
1113 */
1114 RTLogRelSetBuffering(fOldBuffered);
1115}
1116
1117#if 0 /* nobody is are using these atm, they are for AMD64/darwin only */
1118/**
1119 * Marks the guest debug state as active.
1120 *
1121 * @param pVCpu The cross context virtual CPU structure.
1122 *
1123 * @note This is used solely by NEM (hence the name) to set the correct flags here
1124 * without loading the host's DRx registers, which is not possible from ring-3 anyway.
1125 * The specific NEM backends have to make sure to load the correct values.
1126 */
1127VMMR3_INT_DECL(void) CPUMR3NemActivateGuestDebugState(PVMCPUCC pVCpu)
1128{
1129 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HYPER);
1130 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST);
1131}
1132
1133
1134/**
1135 * Marks the hyper debug state as active.
1136 *
1137 * @param pVCpu The cross context virtual CPU structure.
1138 *
1139 * @note This is used solely by NEM (hence the name) to set the correct flags here
1140 * without loading the host's debug registers, which is not possible from ring-3 anyway.
1141 * The specific NEM backends have to make sure to load the correct values.
1142 */
1143VMMR3_INT_DECL(void) CPUMR3NemActivateHyperDebugState(PVMCPUCC pVCpu)
1144{
1145 ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_GUEST);
1146 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
1147}
1148#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette