VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUM.cpp@ 54801

Last change on this file since 54801 was 54801, checked in by vboxsync, 10 years ago

Forgot this one again.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 89.8 KB
Line 
1/* $Id: CPUM.cpp 54801 2015-03-16 21:36:12Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_cpum CPUM - CPU Monitor / Manager
19 *
20 * The CPU Monitor / Manager keeps track of all the CPU registers. It is
21 * also responsible for lazy FPU handling and some of the context loading
22 * in raw mode.
23 *
24 * There are three CPU contexts, the most important one is the guest one (GC).
25 * When running in raw-mode (RC) there is a special hyper context for the VMM
26 * part that floats around inside the guest address space. When running in
27 * raw-mode, CPUM also maintains a host context for saving and restoring
28 * registers across world switches. This latter is done in cooperation with the
29 * world switcher (@see pg_vmm).
30 *
31 * @see grp_cpum
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_CPUM
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/cpumdis.h>
40#include <VBox/vmm/cpumctx-v1_6.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/pdmapi.h>
43#include <VBox/vmm/mm.h>
44#include <VBox/vmm/em.h>
45#include <VBox/vmm/selm.h>
46#include <VBox/vmm/dbgf.h>
47#include <VBox/vmm/patm.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/vmm/ssm.h>
50#include "CPUMInternal.h"
51#include <VBox/vmm/vm.h>
52
53#include <VBox/param.h>
54#include <VBox/dis.h>
55#include <VBox/err.h>
56#include <VBox/log.h>
57#include <iprt/asm-amd64-x86.h>
58#include <iprt/assert.h>
59#include <iprt/cpuset.h>
60#include <iprt/mem.h>
61#include <iprt/mp.h>
62#include <iprt/string.h>
63#include "internal/pgm.h"
64
65
66/*******************************************************************************
67* Defined Constants And Macros *
68*******************************************************************************/
69/**
70 * This was used in the saved state up to the early life of version 14.
71 *
72 * It indicates that we may have some out-of-sync hidden segement registers.
73 * It is only relevant for raw-mode.
74 */
75#define CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID RT_BIT(12)
76
77
78/*******************************************************************************
79* Structures and Typedefs *
80*******************************************************************************/
81
82/**
83 * What kind of cpu info dump to perform.
84 */
85typedef enum CPUMDUMPTYPE
86{
87 CPUMDUMPTYPE_TERSE,
88 CPUMDUMPTYPE_DEFAULT,
89 CPUMDUMPTYPE_VERBOSE
90} CPUMDUMPTYPE;
91/** Pointer to a cpu info dump type. */
92typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
93
94
95/*******************************************************************************
96* Internal Functions *
97*******************************************************************************/
98static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
99static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
100static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM);
101static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
102static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
103static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
104static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
105static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
106static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
107static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
108
109
110/*******************************************************************************
111* Global Variables *
112*******************************************************************************/
113/** Saved state field descriptors for CPUMCTX. */
114static const SSMFIELD g_aCpumCtxFields[] =
115{
116 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
117 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
118 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
119 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
120 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
121 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
122 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
123 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
124 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
125 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
126 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
127 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
128 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
129 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
130 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
131 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
132 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
133 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
134 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
135 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
136 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
137 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
138 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
139 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
140 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
141 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
142 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
143 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
144 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
145 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
146 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
147 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
148 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
149 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
150 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
151 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
152 SSMFIELD_ENTRY( CPUMCTX, rdi),
153 SSMFIELD_ENTRY( CPUMCTX, rsi),
154 SSMFIELD_ENTRY( CPUMCTX, rbp),
155 SSMFIELD_ENTRY( CPUMCTX, rax),
156 SSMFIELD_ENTRY( CPUMCTX, rbx),
157 SSMFIELD_ENTRY( CPUMCTX, rdx),
158 SSMFIELD_ENTRY( CPUMCTX, rcx),
159 SSMFIELD_ENTRY( CPUMCTX, rsp),
160 SSMFIELD_ENTRY( CPUMCTX, rflags),
161 SSMFIELD_ENTRY( CPUMCTX, rip),
162 SSMFIELD_ENTRY( CPUMCTX, r8),
163 SSMFIELD_ENTRY( CPUMCTX, r9),
164 SSMFIELD_ENTRY( CPUMCTX, r10),
165 SSMFIELD_ENTRY( CPUMCTX, r11),
166 SSMFIELD_ENTRY( CPUMCTX, r12),
167 SSMFIELD_ENTRY( CPUMCTX, r13),
168 SSMFIELD_ENTRY( CPUMCTX, r14),
169 SSMFIELD_ENTRY( CPUMCTX, r15),
170 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
171 SSMFIELD_ENTRY( CPUMCTX, es.ValidSel),
172 SSMFIELD_ENTRY( CPUMCTX, es.fFlags),
173 SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
174 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
175 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
176 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
177 SSMFIELD_ENTRY( CPUMCTX, cs.ValidSel),
178 SSMFIELD_ENTRY( CPUMCTX, cs.fFlags),
179 SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
180 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
181 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
182 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
183 SSMFIELD_ENTRY( CPUMCTX, ss.ValidSel),
184 SSMFIELD_ENTRY( CPUMCTX, ss.fFlags),
185 SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
186 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
187 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
188 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
189 SSMFIELD_ENTRY( CPUMCTX, ds.ValidSel),
190 SSMFIELD_ENTRY( CPUMCTX, ds.fFlags),
191 SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
192 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
193 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
194 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
195 SSMFIELD_ENTRY( CPUMCTX, fs.ValidSel),
196 SSMFIELD_ENTRY( CPUMCTX, fs.fFlags),
197 SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
198 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
199 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
200 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
201 SSMFIELD_ENTRY( CPUMCTX, gs.ValidSel),
202 SSMFIELD_ENTRY( CPUMCTX, gs.fFlags),
203 SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
204 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
205 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
206 SSMFIELD_ENTRY( CPUMCTX, cr0),
207 SSMFIELD_ENTRY( CPUMCTX, cr2),
208 SSMFIELD_ENTRY( CPUMCTX, cr3),
209 SSMFIELD_ENTRY( CPUMCTX, cr4),
210 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
211 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
212 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
213 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
214 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
215 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
216 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
217 SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
218 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
219 SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
220 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
221 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
222 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
223 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
224 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
225 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
226 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
227 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
228 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
229 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
230 /* msrApicBase is not included here, it resides in the APIC device state. */
231 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
232 SSMFIELD_ENTRY( CPUMCTX, ldtr.ValidSel),
233 SSMFIELD_ENTRY( CPUMCTX, ldtr.fFlags),
234 SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
235 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
236 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
237 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
238 SSMFIELD_ENTRY( CPUMCTX, tr.ValidSel),
239 SSMFIELD_ENTRY( CPUMCTX, tr.fFlags),
240 SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
241 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
242 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
243 SSMFIELD_ENTRY_TERM()
244};
245
246/** Saved state field descriptors for CPUMCTX in V4.1 before the hidden selector
247 * registeres changed. */
248static const SSMFIELD g_aCpumCtxFieldsMem[] =
249{
250 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
251 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
252 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
253 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
254 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
255 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
256 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
257 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
258 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
259 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
260 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
261 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
262 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
263 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
264 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
265 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
266 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
267 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
268 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
269 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
270 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
271 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
272 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
273 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
274 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
275 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
276 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
277 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
278 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
279 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
280 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
281 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
282 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
283 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
284 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
285 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
286 SSMFIELD_ENTRY_IGNORE( CPUMCTX, fpu.au32RsrvdRest),
287 SSMFIELD_ENTRY( CPUMCTX, rdi),
288 SSMFIELD_ENTRY( CPUMCTX, rsi),
289 SSMFIELD_ENTRY( CPUMCTX, rbp),
290 SSMFIELD_ENTRY( CPUMCTX, rax),
291 SSMFIELD_ENTRY( CPUMCTX, rbx),
292 SSMFIELD_ENTRY( CPUMCTX, rdx),
293 SSMFIELD_ENTRY( CPUMCTX, rcx),
294 SSMFIELD_ENTRY( CPUMCTX, rsp),
295 SSMFIELD_ENTRY_OLD( lss_esp, sizeof(uint32_t)),
296 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
297 SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
298 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
299 SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
300 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
301 SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
302 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
303 SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
304 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
305 SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
306 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
307 SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
308 SSMFIELD_ENTRY( CPUMCTX, rflags),
309 SSMFIELD_ENTRY( CPUMCTX, rip),
310 SSMFIELD_ENTRY( CPUMCTX, r8),
311 SSMFIELD_ENTRY( CPUMCTX, r9),
312 SSMFIELD_ENTRY( CPUMCTX, r10),
313 SSMFIELD_ENTRY( CPUMCTX, r11),
314 SSMFIELD_ENTRY( CPUMCTX, r12),
315 SSMFIELD_ENTRY( CPUMCTX, r13),
316 SSMFIELD_ENTRY( CPUMCTX, r14),
317 SSMFIELD_ENTRY( CPUMCTX, r15),
318 SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
319 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
320 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
321 SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
322 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
323 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
324 SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
325 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
326 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
327 SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
328 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
329 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
330 SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
331 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
332 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
333 SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
334 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
335 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
336 SSMFIELD_ENTRY( CPUMCTX, cr0),
337 SSMFIELD_ENTRY( CPUMCTX, cr2),
338 SSMFIELD_ENTRY( CPUMCTX, cr3),
339 SSMFIELD_ENTRY( CPUMCTX, cr4),
340 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
341 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
342 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
343 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
344 SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
345 SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
346 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
347 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
348 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
349 SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
350 SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
351 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
352 SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
353 SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
354 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
355 SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
356 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
357 SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
358 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
359 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
360 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
361 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
362 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
363 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
364 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
365 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
366 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
367 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
368 SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
369 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
370 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
371 SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
372 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
373 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
374 SSMFIELD_ENTRY_TERM()
375};
376
377/** Saved state field descriptors for CPUMCTX_VER1_6. */
378static const SSMFIELD g_aCpumCtxFieldsV16[] =
379{
380 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
381 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
382 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
383 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
384 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
385 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
386 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
387 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
388 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
389 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
390 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
391 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
392 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
393 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
394 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
395 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
396 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
397 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
398 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
399 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
400 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
401 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
402 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
403 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
404 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
405 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
406 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
407 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
408 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
409 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
410 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
411 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
412 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
413 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
414 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
415 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
416 SSMFIELD_ENTRY_IGNORE( CPUMCTX, fpu.au32RsrvdRest),
417 SSMFIELD_ENTRY( CPUMCTX, rdi),
418 SSMFIELD_ENTRY( CPUMCTX, rsi),
419 SSMFIELD_ENTRY( CPUMCTX, rbp),
420 SSMFIELD_ENTRY( CPUMCTX, rax),
421 SSMFIELD_ENTRY( CPUMCTX, rbx),
422 SSMFIELD_ENTRY( CPUMCTX, rdx),
423 SSMFIELD_ENTRY( CPUMCTX, rcx),
424 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, rsp),
425 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
426 SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
427 SSMFIELD_ENTRY_OLD( CPUMCTX, sizeof(uint64_t) /*rsp_notused*/),
428 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
429 SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
430 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
431 SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
432 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
433 SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
434 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
435 SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
436 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
437 SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
438 SSMFIELD_ENTRY( CPUMCTX, rflags),
439 SSMFIELD_ENTRY( CPUMCTX, rip),
440 SSMFIELD_ENTRY( CPUMCTX, r8),
441 SSMFIELD_ENTRY( CPUMCTX, r9),
442 SSMFIELD_ENTRY( CPUMCTX, r10),
443 SSMFIELD_ENTRY( CPUMCTX, r11),
444 SSMFIELD_ENTRY( CPUMCTX, r12),
445 SSMFIELD_ENTRY( CPUMCTX, r13),
446 SSMFIELD_ENTRY( CPUMCTX, r14),
447 SSMFIELD_ENTRY( CPUMCTX, r15),
448 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, es.u64Base),
449 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
450 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
451 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, cs.u64Base),
452 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
453 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
454 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ss.u64Base),
455 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
456 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
457 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ds.u64Base),
458 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
459 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
460 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, fs.u64Base),
461 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
462 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
463 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gs.u64Base),
464 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
465 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
466 SSMFIELD_ENTRY( CPUMCTX, cr0),
467 SSMFIELD_ENTRY( CPUMCTX, cr2),
468 SSMFIELD_ENTRY( CPUMCTX, cr3),
469 SSMFIELD_ENTRY( CPUMCTX, cr4),
470 SSMFIELD_ENTRY_OLD( cr8, sizeof(uint64_t)),
471 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
472 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
473 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
474 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
475 SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
476 SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
477 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
478 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
479 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
480 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gdtr.pGdt),
481 SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
482 SSMFIELD_ENTRY_OLD( gdtrPadding64, sizeof(uint64_t)),
483 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
484 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, idtr.pIdt),
485 SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
486 SSMFIELD_ENTRY_OLD( idtrPadding64, sizeof(uint64_t)),
487 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
488 SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
489 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
490 SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
491 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
492 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
493 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
494 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
495 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
496 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
497 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
498 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
499 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
500 SSMFIELD_ENTRY_OLD( msrFSBASE, sizeof(uint64_t)),
501 SSMFIELD_ENTRY_OLD( msrGSBASE, sizeof(uint64_t)),
502 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
503 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ldtr.u64Base),
504 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
505 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
506 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, tr.u64Base),
507 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
508 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
509 SSMFIELD_ENTRY_OLD( padding, sizeof(uint32_t)*2),
510 SSMFIELD_ENTRY_TERM()
511};
512
513
514/**
515 * Checks for partial/leaky FXSAVE/FXRSTOR handling on AMD CPUs.
516 *
517 * AMD K7, K8 and newer AMD CPUs do not save/restore the x87 error
518 * pointers (last instruction pointer, last data pointer, last opcode)
519 * except when the ES bit (Exception Summary) in x87 FSW (FPU Status
520 * Word) is set. Thus if we don't clear these registers there is
521 * potential, local FPU leakage from a process using the FPU to
522 * another.
523 *
524 * See AMD Instruction Reference for FXSAVE, FXRSTOR.
525 *
526 * @param pVM Pointer to the VM.
527 */
528static void cpumR3CheckLeakyFpu(PVM pVM)
529{
530 uint32_t u32CpuVersion = ASMCpuId_EAX(1);
531 uint32_t const u32Family = u32CpuVersion >> 8;
532 if ( u32Family >= 6 /* K7 and higher */
533 && ASMIsAmdCpu())
534 {
535 uint32_t cExt = ASMCpuId_EAX(0x80000000);
536 if (ASMIsValidExtRange(cExt))
537 {
538 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
539 if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
540 {
541 for (VMCPUID i = 0; i < pVM->cCpus; i++)
542 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_FFXSR_LEAKY;
543 Log(("CPUMR3Init: host CPU has leaky fxsave/fxrstor behaviour\n"));
544 }
545 }
546 }
547}
548
549
550/**
551 * Initializes the CPUM.
552 *
553 * @returns VBox status code.
554 * @param pVM Pointer to the VM.
555 */
556VMMR3DECL(int) CPUMR3Init(PVM pVM)
557{
558 LogFlow(("CPUMR3Init\n"));
559
560 /*
561 * Assert alignment, sizes and tables.
562 */
563 AssertCompileMemberAlignment(VM, cpum.s, 32);
564 AssertCompile(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
565 AssertCompileSizeAlignment(CPUMCTX, 64);
566 AssertCompileSizeAlignment(CPUMCTXMSRS, 64);
567 AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
568 AssertCompileMemberAlignment(VM, cpum, 64);
569 AssertCompileMemberAlignment(VM, aCpus, 64);
570 AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
571 AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
572#ifdef VBOX_STRICT
573 int rc2 = cpumR3MsrStrictInitChecks();
574 AssertRCReturn(rc2, rc2);
575#endif
576
577 /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
578 pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
579 Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum);
580
581
582 /* Calculate the offset from CPUMCPU to CPUM. */
583 for (VMCPUID i = 0; i < pVM->cCpus; i++)
584 {
585 PVMCPU pVCpu = &pVM->aCpus[i];
586
587 pVCpu->cpum.s.offCPUM = RT_OFFSETOF(VM, aCpus[i].cpum) - RT_OFFSETOF(VM, cpum);
588 Assert((uintptr_t)&pVCpu->cpum - pVCpu->cpum.s.offCPUM == (uintptr_t)&pVM->cpum);
589 }
590
591 /*
592 * Check that the CPU supports the minimum features we require.
593 */
594 if (!ASMHasCpuId())
595 {
596 Log(("The CPU doesn't support CPUID!\n"));
597 return VERR_UNSUPPORTED_CPU;
598 }
599 ASMCpuId_ECX_EDX(1, &pVM->cpum.s.CPUFeatures.ecx, &pVM->cpum.s.CPUFeatures.edx);
600 ASMCpuId_ECX_EDX(0x80000001, &pVM->cpum.s.CPUFeaturesExt.ecx, &pVM->cpum.s.CPUFeaturesExt.edx);
601
602 /* Setup the CR4 AND and OR masks used in the switcher */
603 /* Depends on the presence of FXSAVE(SSE) support on the host CPU */
604 if (!pVM->cpum.s.CPUFeatures.edx.u1FXSR)
605 {
606 Log(("The CPU doesn't support FXSAVE/FXRSTOR!\n"));
607 /* No FXSAVE implies no SSE */
608 pVM->cpum.s.CR4.AndMask = X86_CR4_PVI | X86_CR4_VME;
609 pVM->cpum.s.CR4.OrMask = 0;
610 }
611 else
612 {
613 pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
614 pVM->cpum.s.CR4.OrMask = X86_CR4_OSFSXR;
615 }
616
617 if (!pVM->cpum.s.CPUFeatures.edx.u1MMX)
618 {
619 Log(("The CPU doesn't support MMX!\n"));
620 return VERR_UNSUPPORTED_CPU;
621 }
622 if (!pVM->cpum.s.CPUFeatures.edx.u1TSC)
623 {
624 Log(("The CPU doesn't support TSC!\n"));
625 return VERR_UNSUPPORTED_CPU;
626 }
627 /* Bogus on AMD? */
628 if (!pVM->cpum.s.CPUFeatures.edx.u1SEP)
629 Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
630
631 /*
632 * Gather info about the host CPU.
633 */
634 PCPUMCPUIDLEAF paLeaves;
635 uint32_t cLeaves;
636 int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);
637 AssertLogRelRCReturn(rc, rc);
638
639 rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &pVM->cpum.s.HostFeatures);
640 RTMemFree(paLeaves);
641 AssertLogRelRCReturn(rc, rc);
642 pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
643
644 /*
645 * Setup hypervisor startup values.
646 */
647
648 /*
649 * Register saved state data item.
650 */
651 rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
652 NULL, cpumR3LiveExec, NULL,
653 NULL, cpumR3SaveExec, NULL,
654 cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
655 if (RT_FAILURE(rc))
656 return rc;
657
658 /*
659 * Register info handlers and registers with the debugger facility.
660 */
661 DBGFR3InfoRegisterInternal(pVM, "cpum", "Displays the all the cpu states.", &cpumR3InfoAll);
662 DBGFR3InfoRegisterInternal(pVM, "cpumguest", "Displays the guest cpu state.", &cpumR3InfoGuest);
663 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper);
664 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost);
665 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo);
666 DBGFR3InfoRegisterInternal(pVM, "cpumguestinstr", "Displays the current guest instruction.", &cpumR3InfoGuestInstr);
667
668 rc = cpumR3DbgInit(pVM);
669 if (RT_FAILURE(rc))
670 return rc;
671
672 /*
673 * Check if we need to workaround partial/leaky FPU handling.
674 */
675 cpumR3CheckLeakyFpu(pVM);
676
677 /*
678 * Initialize the Guest CPUID and MSR states.
679 */
680 rc = cpumR3InitCpuIdAndMsrs(pVM);
681 if (RT_FAILURE(rc))
682 return rc;
683 CPUMR3Reset(pVM);
684 return VINF_SUCCESS;
685}
686
687
688/**
689 * Applies relocations to data and code managed by this
690 * component. This function will be called at init and
691 * whenever the VMM need to relocate it self inside the GC.
692 *
693 * The CPUM will update the addresses used by the switcher.
694 *
695 * @param pVM The VM.
696 */
697VMMR3DECL(void) CPUMR3Relocate(PVM pVM)
698{
699 LogFlow(("CPUMR3Relocate\n"));
700
701 pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paMsrRangesR3);
702 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
703
704 /* Recheck the guest DRx values in raw-mode. */
705 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
706 CPUMRecalcHyperDRx(&pVM->aCpus[iCpu], UINT8_MAX, false);
707}
708
709
710/**
711 * Apply late CPUM property changes based on the fHWVirtEx setting
712 *
713 * @param pVM Pointer to the VM.
714 * @param fHWVirtExEnabled HWVirtEx enabled/disabled
715 */
716VMMR3DECL(void) CPUMR3SetHWVirtEx(PVM pVM, bool fHWVirtExEnabled)
717{
718 /*
719 * Workaround for missing cpuid(0) patches when leaf 4 returns GuestInfo.DefCpuId:
720 * If we miss to patch a cpuid(0).eax then Linux tries to determine the number
721 * of processors from (cpuid(4).eax >> 26) + 1.
722 *
723 * Note: this code is obsolete, but let's keep it here for reference.
724 * Purpose is valid when we artificially cap the max std id to less than 4.
725 */
726 if (!fHWVirtExEnabled)
727 {
728 Assert( (pVM->cpum.s.aGuestCpuIdPatmStd[4].uEax & UINT32_C(0xffffc000)) == 0
729 || pVM->cpum.s.aGuestCpuIdPatmStd[0].uEax < 0x4);
730 pVM->cpum.s.aGuestCpuIdPatmStd[4].uEax &= UINT32_C(0x00003fff);
731 }
732}
733
734/**
735 * Terminates the CPUM.
736 *
737 * Termination means cleaning up and freeing all resources,
738 * the VM it self is at this point powered off or suspended.
739 *
740 * @returns VBox status code.
741 * @param pVM Pointer to the VM.
742 */
743VMMR3DECL(int) CPUMR3Term(PVM pVM)
744{
745#ifdef VBOX_WITH_CRASHDUMP_MAGIC
746 for (VMCPUID i = 0; i < pVM->cCpus; i++)
747 {
748 PVMCPU pVCpu = &pVM->aCpus[i];
749 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
750
751 memset(pVCpu->cpum.s.aMagic, 0, sizeof(pVCpu->cpum.s.aMagic));
752 pVCpu->cpum.s.uMagic = 0;
753 pCtx->dr[5] = 0;
754 }
755#else
756 NOREF(pVM);
757#endif
758 return VINF_SUCCESS;
759}
760
761
762/**
763 * Resets a virtual CPU.
764 *
765 * Used by CPUMR3Reset and CPU hot plugging.
766 *
767 * @param pVM Pointer to the cross context VM structure.
768 * @param pVCpu Pointer to the cross context virtual CPU structure of
769 * the CPU that is being reset. This may differ from the
770 * current EMT.
771 */
772VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
773{
774 /** @todo anything different for VCPU > 0? */
775 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
776
777 /*
778 * Initialize everything to ZERO first.
779 */
780 uint32_t fUseFlags = pVCpu->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
781 memset(pCtx, 0, sizeof(*pCtx));
782 pVCpu->cpum.s.fUseFlags = fUseFlags;
783
784 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
785 pCtx->eip = 0x0000fff0;
786 pCtx->edx = 0x00000600; /* P6 processor */
787 pCtx->eflags.Bits.u1Reserved0 = 1;
788
789 pCtx->cs.Sel = 0xf000;
790 pCtx->cs.ValidSel = 0xf000;
791 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
792 pCtx->cs.u64Base = UINT64_C(0xffff0000);
793 pCtx->cs.u32Limit = 0x0000ffff;
794 pCtx->cs.Attr.n.u1DescType = 1; /* code/data segment */
795 pCtx->cs.Attr.n.u1Present = 1;
796 pCtx->cs.Attr.n.u4Type = X86_SEL_TYPE_ER_ACC;
797
798 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
799 pCtx->ds.u32Limit = 0x0000ffff;
800 pCtx->ds.Attr.n.u1DescType = 1; /* code/data segment */
801 pCtx->ds.Attr.n.u1Present = 1;
802 pCtx->ds.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
803
804 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
805 pCtx->es.u32Limit = 0x0000ffff;
806 pCtx->es.Attr.n.u1DescType = 1; /* code/data segment */
807 pCtx->es.Attr.n.u1Present = 1;
808 pCtx->es.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
809
810 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
811 pCtx->fs.u32Limit = 0x0000ffff;
812 pCtx->fs.Attr.n.u1DescType = 1; /* code/data segment */
813 pCtx->fs.Attr.n.u1Present = 1;
814 pCtx->fs.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
815
816 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
817 pCtx->gs.u32Limit = 0x0000ffff;
818 pCtx->gs.Attr.n.u1DescType = 1; /* code/data segment */
819 pCtx->gs.Attr.n.u1Present = 1;
820 pCtx->gs.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
821
822 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
823 pCtx->ss.u32Limit = 0x0000ffff;
824 pCtx->ss.Attr.n.u1Present = 1;
825 pCtx->ss.Attr.n.u1DescType = 1; /* code/data segment */
826 pCtx->ss.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
827
828 pCtx->idtr.cbIdt = 0xffff;
829 pCtx->gdtr.cbGdt = 0xffff;
830
831 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
832 pCtx->ldtr.u32Limit = 0xffff;
833 pCtx->ldtr.Attr.n.u1Present = 1;
834 pCtx->ldtr.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
835
836 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
837 pCtx->tr.u32Limit = 0xffff;
838 pCtx->tr.Attr.n.u1Present = 1;
839 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY; /* Deduction, not properly documented by Intel. */
840
841 pCtx->dr[6] = X86_DR6_INIT_VAL;
842 pCtx->dr[7] = X86_DR7_INIT_VAL;
843
844 pCtx->fpu.FTW = 0x00; /* All empty (abbridged tag reg edition). */
845 pCtx->fpu.FCW = 0x37f;
846
847 /* Intel 64 and IA-32 Architectures Software Developer's Manual Volume 3A, Table 8-1.
848 IA-32 Processor States Following Power-up, Reset, or INIT */
849 pCtx->fpu.MXCSR = 0x1F80;
850 pCtx->fpu.MXCSR_MASK = 0xffff; /** @todo REM always changed this for us. Should probably check if the HW really
851 supports all bits, since a zero value here should be read as 0xffbf. */
852
853 /*
854 * MSRs.
855 */
856 /* Init PAT MSR */
857 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
858
859 /* EFER MBZ; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State.
860 * The Intel docs don't mention it. */
861 Assert(!pCtx->msrEFER);
862
863 /* IA32_MISC_ENABLE - not entirely sure what the init/reset state really
864 is supposed to be here, just trying provide useful/sensible values. */
865 PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, MSR_IA32_MISC_ENABLE);
866 if (pRange)
867 {
868 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
869 | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL
870 | (pVM->cpum.s.GuestFeatures.fMonitorMWait ? MSR_IA32_MISC_ENABLE_MONITOR : 0)
871 | MSR_IA32_MISC_ENABLE_FAST_STRINGS;
872 pRange->fWrIgnMask |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
873 | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
874 pRange->fWrGpMask &= ~pVCpu->cpum.s.GuestMsrs.msr.MiscEnable;
875 }
876
877 /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */
878
879 /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be
880 * called from each EMT while we're getting called by CPUMR3Reset()
881 * iteratively on the same thread. Fix later. */
882#if 0 /** @todo r=bird: This we will do in TM, not here. */
883 /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */
884 CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0);
885#endif
886
887
888 /* C-state control. Guesses. */
889 pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28);
890
891
892 /*
893 * Get the APIC base MSR from the APIC device. For historical reasons (saved state), the APIC base
894 * continues to reside in the APIC device and we cache it here in the VCPU for all further accesses.
895 */
896 PDMApicGetBase(pVCpu, &pCtx->msrApicBase);
897}
898
899
900/**
901 * Resets the CPU.
902 *
903 * @returns VINF_SUCCESS.
904 * @param pVM Pointer to the VM.
905 */
906VMMR3DECL(void) CPUMR3Reset(PVM pVM)
907{
908 for (VMCPUID i = 0; i < pVM->cCpus; i++)
909 {
910 CPUMR3ResetCpu(pVM, &pVM->aCpus[i]);
911
912#ifdef VBOX_WITH_CRASHDUMP_MAGIC
913 PCPUMCTX pCtx = &pVM->aCpus[i].cpum.s.Guest;
914
915 /* Magic marker for searching in crash dumps. */
916 strcpy((char *)pVM->aCpus[i].cpum.s.aMagic, "CPUMCPU Magic");
917 pVM->aCpus[i].cpum.s.uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
918 pCtx->dr[5] = UINT64_C(0xDEADBEEFDEADBEEF);
919#endif
920 }
921}
922
923
924
925
926/**
927 * Pass 0 live exec callback.
928 *
929 * @returns VINF_SSM_DONT_CALL_AGAIN.
930 * @param pVM Pointer to the VM.
931 * @param pSSM The saved state handle.
932 * @param uPass The pass (0).
933 */
934static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
935{
936 AssertReturn(uPass == 0, VERR_SSM_UNEXPECTED_PASS);
937 cpumR3SaveCpuId(pVM, pSSM);
938 return VINF_SSM_DONT_CALL_AGAIN;
939}
940
941
942/**
943 * Execute state save operation.
944 *
945 * @returns VBox status code.
946 * @param pVM Pointer to the VM.
947 * @param pSSM SSM operation handle.
948 */
949static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
950{
951 /*
952 * Save.
953 */
954 for (VMCPUID i = 0; i < pVM->cCpus; i++)
955 {
956 PVMCPU pVCpu = &pVM->aCpus[i];
957 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), 0, g_aCpumCtxFields, NULL);
958 }
959
960 SSMR3PutU32(pSSM, pVM->cCpus);
961 SSMR3PutU32(pSSM, sizeof(pVM->aCpus[0].cpum.s.GuestMsrs.msr));
962 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
963 {
964 PVMCPU pVCpu = &pVM->aCpus[iCpu];
965
966 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), 0, g_aCpumCtxFields, NULL);
967 SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags);
968 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged);
969 AssertCompileSizeAlignment(pVCpu->cpum.s.GuestMsrs.msr, sizeof(uint64_t));
970 SSMR3PutMem(pSSM, &pVCpu->cpum.s.GuestMsrs, sizeof(pVCpu->cpum.s.GuestMsrs.msr));
971 }
972
973 cpumR3SaveCpuId(pVM, pSSM);
974 return VINF_SUCCESS;
975}
976
977
978/**
979 * @copydoc FNSSMINTLOADPREP
980 */
981static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
982{
983 NOREF(pSSM);
984 pVM->cpum.s.fPendingRestore = true;
985 return VINF_SUCCESS;
986}
987
988
989/**
990 * @copydoc FNSSMINTLOADEXEC
991 */
992static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
993{
994 /*
995 * Validate version.
996 */
997 if ( uVersion != CPUM_SAVED_STATE_VERSION
998 && uVersion != CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT
999 && uVersion != CPUM_SAVED_STATE_VERSION_PUT_STRUCT
1000 && uVersion != CPUM_SAVED_STATE_VERSION_MEM
1001 && uVersion != CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE
1002 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_2
1003 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_0
1004 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR
1005 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_0
1006 && uVersion != CPUM_SAVED_STATE_VERSION_VER1_6)
1007 {
1008 AssertMsgFailed(("cpumR3LoadExec: Invalid version uVersion=%d!\n", uVersion));
1009 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1010 }
1011
1012 if (uPass == SSM_PASS_FINAL)
1013 {
1014 /*
1015 * Set the size of RTGCPTR for SSMR3GetGCPtr. (Only necessary for
1016 * really old SSM file versions.)
1017 */
1018 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
1019 SSMR3HandleSetGCPtrSize(pSSM, sizeof(RTGCPTR32));
1020 else if (uVersion <= CPUM_SAVED_STATE_VERSION_VER3_0)
1021 SSMR3HandleSetGCPtrSize(pSSM, HC_ARCH_BITS == 32 ? sizeof(RTGCPTR32) : sizeof(RTGCPTR));
1022
1023 uint32_t const fLoad = uVersion > CPUM_SAVED_STATE_VERSION_MEM ? 0 : SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED;
1024 PCSSMFIELD paCpumCtxFields = g_aCpumCtxFields;
1025 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
1026 paCpumCtxFields = g_aCpumCtxFieldsV16;
1027 else if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
1028 paCpumCtxFields = g_aCpumCtxFieldsMem;
1029
1030 /*
1031 * Restore.
1032 */
1033 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1034 {
1035 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1036 uint64_t uCR3 = pVCpu->cpum.s.Hyper.cr3;
1037 uint64_t uRSP = pVCpu->cpum.s.Hyper.rsp; /* see VMMR3Relocate(). */
1038 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), fLoad, paCpumCtxFields, NULL);
1039 pVCpu->cpum.s.Hyper.cr3 = uCR3;
1040 pVCpu->cpum.s.Hyper.rsp = uRSP;
1041 }
1042
1043 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR)
1044 {
1045 uint32_t cCpus;
1046 int rc = SSMR3GetU32(pSSM, &cCpus); AssertRCReturn(rc, rc);
1047 AssertLogRelMsgReturn(cCpus == pVM->cCpus, ("Mismatching CPU counts: saved: %u; configured: %u \n", cCpus, pVM->cCpus),
1048 VERR_SSM_UNEXPECTED_DATA);
1049 }
1050 AssertLogRelMsgReturn( uVersion > CPUM_SAVED_STATE_VERSION_VER2_0
1051 || pVM->cCpus == 1,
1052 ("cCpus=%u\n", pVM->cCpus),
1053 VERR_SSM_UNEXPECTED_DATA);
1054
1055 uint32_t cbMsrs = 0;
1056 if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
1057 {
1058 int rc = SSMR3GetU32(pSSM, &cbMsrs); AssertRCReturn(rc, rc);
1059 AssertLogRelMsgReturn(RT_ALIGN(cbMsrs, sizeof(uint64_t)) == cbMsrs, ("Size of MSRs is misaligned: %#x\n", cbMsrs),
1060 VERR_SSM_UNEXPECTED_DATA);
1061 AssertLogRelMsgReturn(cbMsrs <= sizeof(CPUMCTXMSRS) && cbMsrs > 0, ("Size of MSRs is out of range: %#x\n", cbMsrs),
1062 VERR_SSM_UNEXPECTED_DATA);
1063 }
1064
1065 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1066 {
1067 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1068 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), fLoad,
1069 paCpumCtxFields, NULL);
1070 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fUseFlags);
1071 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fChanged);
1072 if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
1073 SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], cbMsrs);
1074 else if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_0)
1075 {
1076 SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], 2 * sizeof(uint64_t)); /* Restore two MSRs. */
1077 SSMR3Skip(pSSM, 62 * sizeof(uint64_t));
1078 }
1079
1080 /* REM and other may have cleared must-be-one fields in DR6 and
1081 DR7, fix these. */
1082 pVCpu->cpum.s.Guest.dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
1083 pVCpu->cpum.s.Guest.dr[6] |= X86_DR6_RA1_MASK;
1084 pVCpu->cpum.s.Guest.dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
1085 pVCpu->cpum.s.Guest.dr[7] |= X86_DR7_RA1_MASK;
1086 }
1087
1088 /* Older states does not have the internal selector register flags
1089 and valid selector value. Supply those. */
1090 if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
1091 {
1092 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1093 {
1094 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1095 bool const fValid = HMIsEnabled(pVM)
1096 || ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
1097 && !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID));
1098 PCPUMSELREG paSelReg = CPUMCTX_FIRST_SREG(&pVCpu->cpum.s.Guest);
1099 if (fValid)
1100 {
1101 for (uint32_t iSelReg = 0; iSelReg < X86_SREG_COUNT; iSelReg++)
1102 {
1103 paSelReg[iSelReg].fFlags = CPUMSELREG_FLAGS_VALID;
1104 paSelReg[iSelReg].ValidSel = paSelReg[iSelReg].Sel;
1105 }
1106
1107 pVCpu->cpum.s.Guest.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1108 pVCpu->cpum.s.Guest.ldtr.ValidSel = pVCpu->cpum.s.Guest.ldtr.Sel;
1109 }
1110 else
1111 {
1112 for (uint32_t iSelReg = 0; iSelReg < X86_SREG_COUNT; iSelReg++)
1113 {
1114 paSelReg[iSelReg].fFlags = 0;
1115 paSelReg[iSelReg].ValidSel = 0;
1116 }
1117
1118 /* This might not be 104% correct, but I think it's close
1119 enough for all practical purposes... (REM always loaded
1120 LDTR registers.) */
1121 pVCpu->cpum.s.Guest.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1122 pVCpu->cpum.s.Guest.ldtr.ValidSel = pVCpu->cpum.s.Guest.ldtr.Sel;
1123 }
1124 pVCpu->cpum.s.Guest.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1125 pVCpu->cpum.s.Guest.tr.ValidSel = pVCpu->cpum.s.Guest.tr.Sel;
1126 }
1127 }
1128
1129 /* Clear CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID. */
1130 if ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
1131 && uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
1132 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1133 pVM->aCpus[iCpu].cpum.s.fChanged &= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID;
1134
1135 /*
1136 * A quick sanity check.
1137 */
1138 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1139 {
1140 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1141 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.es.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1142 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.cs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1143 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.ss.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1144 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.ds.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1145 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.fs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1146 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.gs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1147 }
1148 }
1149
1150 pVM->cpum.s.fPendingRestore = false;
1151
1152 /*
1153 * Guest CPUIDs.
1154 */
1155 if (uVersion > CPUM_SAVED_STATE_VERSION_VER3_0)
1156 return cpumR3LoadCpuId(pVM, pSSM, uVersion);
1157
1158 /** @todo Merge the code below into cpumR3LoadCpuId when we've found out what is
1159 * actually required. */
1160
1161 /*
1162 * Restore the CPUID leaves.
1163 *
1164 * Note that we support restoring less than the current amount of standard
1165 * leaves because we've been allowed more is newer version of VBox.
1166 */
1167 uint32_t cElements;
1168 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1169 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmStd))
1170 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1171 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdPatmStd[0]));
1172
1173 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1174 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmExt))
1175 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1176 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmExt[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmExt));
1177
1178 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1179 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmCentaur))
1180 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1181 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmCentaur));
1182
1183 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestInfo.DefCpuId, sizeof(pVM->cpum.s.GuestInfo.DefCpuId));
1184
1185 /*
1186 * Check that the basic cpuid id information is unchanged.
1187 */
1188 /** @todo we should check the 64 bits capabilities too! */
1189 uint32_t au32CpuId[8] = {0,0,0,0, 0,0,0,0};
1190 ASMCpuIdExSlow(0, 0, 0, 0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
1191 ASMCpuIdExSlow(1, 0, 0, 0, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
1192 uint32_t au32CpuIdSaved[8];
1193 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
1194 if (RT_SUCCESS(rc))
1195 {
1196 /* Ignore CPU stepping. */
1197 au32CpuId[4] &= 0xfffffff0;
1198 au32CpuIdSaved[4] &= 0xfffffff0;
1199
1200 /* Ignore APIC ID (AMD specs). */
1201 au32CpuId[5] &= ~0xff000000;
1202 au32CpuIdSaved[5] &= ~0xff000000;
1203
1204 /* Ignore the number of Logical CPUs (AMD specs). */
1205 au32CpuId[5] &= ~0x00ff0000;
1206 au32CpuIdSaved[5] &= ~0x00ff0000;
1207
1208 /* Ignore some advanced capability bits, that we don't expose to the guest. */
1209 au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
1210 | X86_CPUID_FEATURE_ECX_VMX
1211 | X86_CPUID_FEATURE_ECX_SMX
1212 | X86_CPUID_FEATURE_ECX_EST
1213 | X86_CPUID_FEATURE_ECX_TM2
1214 | X86_CPUID_FEATURE_ECX_CNTXID
1215 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1216 | X86_CPUID_FEATURE_ECX_PDCM
1217 | X86_CPUID_FEATURE_ECX_DCA
1218 | X86_CPUID_FEATURE_ECX_X2APIC
1219 );
1220 au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
1221 | X86_CPUID_FEATURE_ECX_VMX
1222 | X86_CPUID_FEATURE_ECX_SMX
1223 | X86_CPUID_FEATURE_ECX_EST
1224 | X86_CPUID_FEATURE_ECX_TM2
1225 | X86_CPUID_FEATURE_ECX_CNTXID
1226 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1227 | X86_CPUID_FEATURE_ECX_PDCM
1228 | X86_CPUID_FEATURE_ECX_DCA
1229 | X86_CPUID_FEATURE_ECX_X2APIC
1230 );
1231
1232 /* Make sure we don't forget to update the masks when enabling
1233 * features in the future.
1234 */
1235 AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx &
1236 ( X86_CPUID_FEATURE_ECX_DTES64
1237 | X86_CPUID_FEATURE_ECX_VMX
1238 | X86_CPUID_FEATURE_ECX_SMX
1239 | X86_CPUID_FEATURE_ECX_EST
1240 | X86_CPUID_FEATURE_ECX_TM2
1241 | X86_CPUID_FEATURE_ECX_CNTXID
1242 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1243 | X86_CPUID_FEATURE_ECX_PDCM
1244 | X86_CPUID_FEATURE_ECX_DCA
1245 | X86_CPUID_FEATURE_ECX_X2APIC
1246 )));
1247 /* do the compare */
1248 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
1249 {
1250 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
1251 LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
1252 "Saved=%.*Rhxs\n"
1253 "Real =%.*Rhxs\n",
1254 sizeof(au32CpuIdSaved), au32CpuIdSaved,
1255 sizeof(au32CpuId), au32CpuId));
1256 else
1257 {
1258 LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
1259 "Saved=%.*Rhxs\n"
1260 "Real =%.*Rhxs\n",
1261 sizeof(au32CpuIdSaved), au32CpuIdSaved,
1262 sizeof(au32CpuId), au32CpuId));
1263 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
1264 }
1265 }
1266 }
1267
1268 return rc;
1269}
1270
1271
1272/**
1273 * @copydoc FNSSMINTLOADPREP
1274 */
1275static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
1276{
1277 if (RT_FAILURE(SSMR3HandleGetStatus(pSSM)))
1278 return VINF_SUCCESS;
1279
1280 /* just check this since we can. */ /** @todo Add a SSM unit flag for indicating that it's mandatory during a restore. */
1281 if (pVM->cpum.s.fPendingRestore)
1282 {
1283 LogRel(("CPUM: Missing state!\n"));
1284 return VERR_INTERNAL_ERROR_2;
1285 }
1286
1287 bool const fSupportsLongMode = VMR3IsLongModeAllowed(pVM);
1288 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1289 {
1290 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1291
1292 /* Notify PGM of the NXE states in case they've changed. */
1293 PGMNotifyNxeChanged(pVCpu, RT_BOOL(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
1294
1295 /* Cache the local APIC base from the APIC device. During init. this is done in CPUMR3ResetCpu(). */
1296 PDMApicGetBase(pVCpu, &pVCpu->cpum.s.Guest.msrApicBase);
1297
1298 /* During init. this is done in CPUMR3InitCompleted(). */
1299 if (fSupportsLongMode)
1300 pVCpu->cpum.s.fUseFlags |= CPUM_USE_SUPPORTS_LONGMODE;
1301 }
1302 return VINF_SUCCESS;
1303}
1304
1305
1306/**
1307 * Checks if the CPUM state restore is still pending.
1308 *
1309 * @returns true / false.
1310 * @param pVM Pointer to the VM.
1311 */
1312VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM)
1313{
1314 return pVM->cpum.s.fPendingRestore;
1315}
1316
1317
1318/**
1319 * Formats the EFLAGS value into mnemonics.
1320 *
1321 * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
1322 * @param efl The EFLAGS value.
1323 */
1324static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
1325{
1326 /*
1327 * Format the flags.
1328 */
1329 static const struct
1330 {
1331 const char *pszSet; const char *pszClear; uint32_t fFlag;
1332 } s_aFlags[] =
1333 {
1334 { "vip",NULL, X86_EFL_VIP },
1335 { "vif",NULL, X86_EFL_VIF },
1336 { "ac", NULL, X86_EFL_AC },
1337 { "vm", NULL, X86_EFL_VM },
1338 { "rf", NULL, X86_EFL_RF },
1339 { "nt", NULL, X86_EFL_NT },
1340 { "ov", "nv", X86_EFL_OF },
1341 { "dn", "up", X86_EFL_DF },
1342 { "ei", "di", X86_EFL_IF },
1343 { "tf", NULL, X86_EFL_TF },
1344 { "nt", "pl", X86_EFL_SF },
1345 { "nz", "zr", X86_EFL_ZF },
1346 { "ac", "na", X86_EFL_AF },
1347 { "po", "pe", X86_EFL_PF },
1348 { "cy", "nc", X86_EFL_CF },
1349 };
1350 char *psz = pszEFlags;
1351 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1352 {
1353 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
1354 if (pszAdd)
1355 {
1356 strcpy(psz, pszAdd);
1357 psz += strlen(pszAdd);
1358 *psz++ = ' ';
1359 }
1360 }
1361 psz[-1] = '\0';
1362}
1363
1364
1365/**
1366 * Formats a full register dump.
1367 *
1368 * @param pVM Pointer to the VM.
1369 * @param pCtx The context to format.
1370 * @param pCtxCore The context core to format.
1371 * @param pHlp Output functions.
1372 * @param enmType The dump type.
1373 * @param pszPrefix Register name prefix.
1374 */
1375static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType,
1376 const char *pszPrefix)
1377{
1378 NOREF(pVM);
1379
1380 /*
1381 * Format the EFLAGS.
1382 */
1383 uint32_t efl = pCtxCore->eflags.u32;
1384 char szEFlags[80];
1385 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1386
1387 /*
1388 * Format the registers.
1389 */
1390 switch (enmType)
1391 {
1392 case CPUMDUMPTYPE_TERSE:
1393 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1394 pHlp->pfnPrintf(pHlp,
1395 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1396 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1397 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1398 "%sr14=%016RX64 %sr15=%016RX64\n"
1399 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1400 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1401 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1402 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1403 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1404 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1405 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1406 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, efl);
1407 else
1408 pHlp->pfnPrintf(pHlp,
1409 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1410 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1411 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1412 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1413 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1414 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1415 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, efl);
1416 break;
1417
1418 case CPUMDUMPTYPE_DEFAULT:
1419 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1420 pHlp->pfnPrintf(pHlp,
1421 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1422 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1423 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1424 "%sr14=%016RX64 %sr15=%016RX64\n"
1425 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1426 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1427 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%016RX64:%04x %sldtr=%04x\n"
1428 ,
1429 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1430 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1431 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1432 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1433 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1434 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
1435 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1436 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
1437 else
1438 pHlp->pfnPrintf(pHlp,
1439 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1440 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1441 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1442 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%08RX64:%04x %sldtr=%04x\n"
1443 ,
1444 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1445 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1446 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1447 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
1448 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1449 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
1450 break;
1451
1452 case CPUMDUMPTYPE_VERBOSE:
1453 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1454 pHlp->pfnPrintf(pHlp,
1455 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1456 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1457 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1458 "%sr14=%016RX64 %sr15=%016RX64\n"
1459 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1460 "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1461 "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1462 "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1463 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1464 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1465 "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1466 "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
1467 "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
1468 "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
1469 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1470 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1471 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1472 "%sSysEnter={cs=%04llx eip=%016RX64 esp=%016RX64}\n"
1473 ,
1474 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1475 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1476 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1477 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1478 pszPrefix, pCtxCore->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u,
1479 pszPrefix, pCtxCore->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u,
1480 pszPrefix, pCtxCore->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u,
1481 pszPrefix, pCtxCore->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u,
1482 pszPrefix, pCtxCore->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u,
1483 pszPrefix, pCtxCore->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u,
1484 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1485 pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1], pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
1486 pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5], pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
1487 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1488 pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
1489 pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
1490 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1491 else
1492 pHlp->pfnPrintf(pHlp,
1493 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1494 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1495 "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
1496 "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
1497 "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
1498 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
1499 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
1500 "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
1501 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1502 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1503 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1504 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1505 ,
1506 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1507 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1508 pszPrefix, pCtxCore->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u, pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1],
1509 pszPrefix, pCtxCore->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u, pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
1510 pszPrefix, pCtxCore->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u, pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5],
1511 pszPrefix, pCtxCore->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u, pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
1512 pszPrefix, pCtxCore->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
1513 pszPrefix, pCtxCore->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1514 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1515 pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
1516 pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
1517 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1518
1519 pHlp->pfnPrintf(pHlp,
1520 "%sFCW=%04x %sFSW=%04x %sFTW=%04x %sFOP=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
1521 "%sFPUIP=%08x %sCS=%04x %sRsrvd1=%04x %sFPUDP=%08x %sDS=%04x %sRsvrd2=%04x\n"
1522 ,
1523 pszPrefix, pCtx->fpu.FCW, pszPrefix, pCtx->fpu.FSW, pszPrefix, pCtx->fpu.FTW, pszPrefix, pCtx->fpu.FOP,
1524 pszPrefix, pCtx->fpu.MXCSR, pszPrefix, pCtx->fpu.MXCSR_MASK,
1525 pszPrefix, pCtx->fpu.FPUIP, pszPrefix, pCtx->fpu.CS, pszPrefix, pCtx->fpu.Rsrvd1,
1526 pszPrefix, pCtx->fpu.FPUDP, pszPrefix, pCtx->fpu.DS, pszPrefix, pCtx->fpu.Rsrvd2
1527 );
1528 unsigned iShift = (pCtx->fpu.FSW >> 11) & 7;
1529 for (unsigned iST = 0; iST < RT_ELEMENTS(pCtx->fpu.aRegs); iST++)
1530 {
1531 unsigned iFPR = (iST + iShift) % RT_ELEMENTS(pCtx->fpu.aRegs);
1532 unsigned uTag = pCtx->fpu.FTW & (1 << iFPR) ? 1 : 0;
1533 char chSign = pCtx->fpu.aRegs[0].au16[4] & 0x8000 ? '-' : '+';
1534 unsigned iInteger = (unsigned)(pCtx->fpu.aRegs[0].au64[0] >> 63);
1535 uint64_t u64Fraction = pCtx->fpu.aRegs[0].au64[0] & UINT64_C(0x7fffffffffffffff);
1536 unsigned uExponent = pCtx->fpu.aRegs[0].au16[4] & 0x7fff;
1537 /** @todo This isn't entirenly correct and needs more work! */
1538 pHlp->pfnPrintf(pHlp,
1539 "%sST(%u)=%sFPR%u={%04RX16'%08RX32'%08RX32} t%d %c%u.%022llu ^ %u",
1540 pszPrefix, iST, pszPrefix, iFPR,
1541 pCtx->fpu.aRegs[0].au16[4], pCtx->fpu.aRegs[0].au32[1], pCtx->fpu.aRegs[0].au32[0],
1542 uTag, chSign, iInteger, u64Fraction, uExponent);
1543 if (pCtx->fpu.aRegs[0].au16[5] || pCtx->fpu.aRegs[0].au16[6] || pCtx->fpu.aRegs[0].au16[7])
1544 pHlp->pfnPrintf(pHlp, " res={%04RX16,%04RX16,%04RX16}\n",
1545 pCtx->fpu.aRegs[0].au16[5], pCtx->fpu.aRegs[0].au16[6], pCtx->fpu.aRegs[0].au16[7]);
1546 else
1547 pHlp->pfnPrintf(pHlp, "\n");
1548 }
1549 for (unsigned iXMM = 0; iXMM < RT_ELEMENTS(pCtx->fpu.aXMM); iXMM++)
1550 pHlp->pfnPrintf(pHlp,
1551 iXMM & 1
1552 ? "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32\n"
1553 : "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32 ",
1554 pszPrefix, iXMM, iXMM < 10 ? " " : "",
1555 pCtx->fpu.aXMM[iXMM].au32[3],
1556 pCtx->fpu.aXMM[iXMM].au32[2],
1557 pCtx->fpu.aXMM[iXMM].au32[1],
1558 pCtx->fpu.aXMM[iXMM].au32[0]);
1559 for (unsigned i = 0; i < RT_ELEMENTS(pCtx->fpu.au32RsrvdRest); i++)
1560 if (pCtx->fpu.au32RsrvdRest[i])
1561 pHlp->pfnPrintf(pHlp, "%sRsrvdRest[i]=%RX32 (offset=%#x)\n",
1562 pszPrefix, i, pCtx->fpu.au32RsrvdRest[i], RT_OFFSETOF(X86FXSTATE, au32RsrvdRest[i]) );
1563
1564 pHlp->pfnPrintf(pHlp,
1565 "%sEFER =%016RX64\n"
1566 "%sPAT =%016RX64\n"
1567 "%sSTAR =%016RX64\n"
1568 "%sCSTAR =%016RX64\n"
1569 "%sLSTAR =%016RX64\n"
1570 "%sSFMASK =%016RX64\n"
1571 "%sKERNELGSBASE =%016RX64\n",
1572 pszPrefix, pCtx->msrEFER,
1573 pszPrefix, pCtx->msrPAT,
1574 pszPrefix, pCtx->msrSTAR,
1575 pszPrefix, pCtx->msrCSTAR,
1576 pszPrefix, pCtx->msrLSTAR,
1577 pszPrefix, pCtx->msrSFMASK,
1578 pszPrefix, pCtx->msrKERNELGSBASE);
1579 break;
1580 }
1581}
1582
1583
1584/**
1585 * Display all cpu states and any other cpum info.
1586 *
1587 * @param pVM Pointer to the VM.
1588 * @param pHlp The info helper functions.
1589 * @param pszArgs Arguments, ignored.
1590 */
1591static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1592{
1593 cpumR3InfoGuest(pVM, pHlp, pszArgs);
1594 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
1595 cpumR3InfoHyper(pVM, pHlp, pszArgs);
1596 cpumR3InfoHost(pVM, pHlp, pszArgs);
1597}
1598
1599
1600/**
1601 * Parses the info argument.
1602 *
1603 * The argument starts with 'verbose', 'terse' or 'default' and then
1604 * continues with the comment string.
1605 *
1606 * @param pszArgs The pointer to the argument string.
1607 * @param penmType Where to store the dump type request.
1608 * @param ppszComment Where to store the pointer to the comment string.
1609 */
1610static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
1611{
1612 if (!pszArgs)
1613 {
1614 *penmType = CPUMDUMPTYPE_DEFAULT;
1615 *ppszComment = "";
1616 }
1617 else
1618 {
1619 if (!strncmp(pszArgs, RT_STR_TUPLE("verbose")))
1620 {
1621 pszArgs += 7;
1622 *penmType = CPUMDUMPTYPE_VERBOSE;
1623 }
1624 else if (!strncmp(pszArgs, RT_STR_TUPLE("terse")))
1625 {
1626 pszArgs += 5;
1627 *penmType = CPUMDUMPTYPE_TERSE;
1628 }
1629 else if (!strncmp(pszArgs, RT_STR_TUPLE("default")))
1630 {
1631 pszArgs += 7;
1632 *penmType = CPUMDUMPTYPE_DEFAULT;
1633 }
1634 else
1635 *penmType = CPUMDUMPTYPE_DEFAULT;
1636 *ppszComment = RTStrStripL(pszArgs);
1637 }
1638}
1639
1640
1641/**
1642 * Display the guest cpu state.
1643 *
1644 * @param pVM Pointer to the VM.
1645 * @param pHlp The info helper functions.
1646 * @param pszArgs Arguments, ignored.
1647 */
1648static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1649{
1650 CPUMDUMPTYPE enmType;
1651 const char *pszComment;
1652 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1653
1654 /* @todo SMP support! */
1655 PVMCPU pVCpu = VMMGetCpu(pVM);
1656 if (!pVCpu)
1657 pVCpu = &pVM->aCpus[0];
1658
1659 pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment);
1660
1661 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1662 cpumR3InfoOne(pVM, pCtx, CPUMCTX2CORE(pCtx), pHlp, enmType, "");
1663}
1664
1665
1666/**
1667 * Display the current guest instruction
1668 *
1669 * @param pVM Pointer to the VM.
1670 * @param pHlp The info helper functions.
1671 * @param pszArgs Arguments, ignored.
1672 */
1673static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1674{
1675 NOREF(pszArgs);
1676
1677 /** @todo SMP support! */
1678 PVMCPU pVCpu = VMMGetCpu(pVM);
1679 if (!pVCpu)
1680 pVCpu = &pVM->aCpus[0];
1681
1682 char szInstruction[256];
1683 szInstruction[0] = '\0';
1684 DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
1685 pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
1686}
1687
1688
1689/**
1690 * Display the hypervisor cpu state.
1691 *
1692 * @param pVM Pointer to the VM.
1693 * @param pHlp The info helper functions.
1694 * @param pszArgs Arguments, ignored.
1695 */
1696static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1697{
1698 CPUMDUMPTYPE enmType;
1699 const char *pszComment;
1700 /* @todo SMP */
1701 PVMCPU pVCpu = &pVM->aCpus[0];
1702
1703 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1704 pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
1705 cpumR3InfoOne(pVM, &pVCpu->cpum.s.Hyper, CPUMCTX2CORE(&pVCpu->cpum.s.Hyper), pHlp, enmType, ".");
1706 pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
1707}
1708
1709
1710/**
1711 * Display the host cpu state.
1712 *
1713 * @param pVM Pointer to the VM.
1714 * @param pHlp The info helper functions.
1715 * @param pszArgs Arguments, ignored.
1716 */
1717static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1718{
1719 CPUMDUMPTYPE enmType;
1720 const char *pszComment;
1721 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1722 pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
1723
1724 /*
1725 * Format the EFLAGS.
1726 */
1727 /* @todo SMP */
1728 PCPUMHOSTCTX pCtx = &pVM->aCpus[0].cpum.s.Host;
1729#if HC_ARCH_BITS == 32
1730 uint32_t efl = pCtx->eflags.u32;
1731#else
1732 uint64_t efl = pCtx->rflags;
1733#endif
1734 char szEFlags[80];
1735 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1736
1737 /*
1738 * Format the registers.
1739 */
1740#if HC_ARCH_BITS == 32
1741# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1742 if (!(pCtx->efer & MSR_K6_EFER_LMA))
1743# endif
1744 {
1745 pHlp->pfnPrintf(pHlp,
1746 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n"
1747 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n"
1748 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n"
1749 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n"
1750 "dr[0]=%08RX64 dr[1]=%08RX64x dr[2]=%08RX64 dr[3]=%08RX64x dr[6]=%08RX64 dr[7]=%08RX64\n"
1751 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1752 ,
1753 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi,
1754 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags,
1755 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
1756 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4,
1757 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7,
1758 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->ldtr,
1759 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1760 }
1761# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1762 else
1763# endif
1764#endif
1765#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1766 {
1767 pHlp->pfnPrintf(pHlp,
1768 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
1769 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
1770 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
1771 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
1772 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1773 "r14=%016RX64 r15=%016RX64\n"
1774 "iopl=%d %31s\n"
1775 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
1776 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
1777 "cr4=%016RX64 ldtr=%04x tr=%04x\n"
1778 "dr[0]=%016RX64 dr[1]=%016RX64 dr[2]=%016RX64\n"
1779 "dr[3]=%016RX64 dr[6]=%016RX64 dr[7]=%016RX64\n"
1780 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
1781 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1782 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
1783 ,
1784 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
1785 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
1786 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
1787 /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
1788 pCtx->r11, pCtx->r12, pCtx->r13,
1789 pCtx->r14, pCtx->r15,
1790 X86_EFL_GET_IOPL(efl), szEFlags,
1791 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
1792 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
1793 pCtx->cr4, pCtx->ldtr, pCtx->tr,
1794 pCtx->dr0, pCtx->dr1, pCtx->dr2,
1795 pCtx->dr3, pCtx->dr6, pCtx->dr7,
1796 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
1797 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1798 pCtx->FSbase, pCtx->GSbase, pCtx->efer);
1799 }
1800#endif
1801}
1802
1803/**
1804 * Structure used when disassembling and instructions in DBGF.
1805 * This is used so the reader function can get the stuff it needs.
1806 */
1807typedef struct CPUMDISASSTATE
1808{
1809 /** Pointer to the CPU structure. */
1810 PDISCPUSTATE pCpu;
1811 /** Pointer to the VM. */
1812 PVM pVM;
1813 /** Pointer to the VMCPU. */
1814 PVMCPU pVCpu;
1815 /** Pointer to the first byte in the segment. */
1816 RTGCUINTPTR GCPtrSegBase;
1817 /** Pointer to the byte after the end of the segment. (might have wrapped!) */
1818 RTGCUINTPTR GCPtrSegEnd;
1819 /** The size of the segment minus 1. */
1820 RTGCUINTPTR cbSegLimit;
1821 /** Pointer to the current page - R3 Ptr. */
1822 void const *pvPageR3;
1823 /** Pointer to the current page - GC Ptr. */
1824 RTGCPTR pvPageGC;
1825 /** The lock information that PGMPhysReleasePageMappingLock needs. */
1826 PGMPAGEMAPLOCK PageMapLock;
1827 /** Whether the PageMapLock is valid or not. */
1828 bool fLocked;
1829 /** 64 bits mode or not. */
1830 bool f64Bits;
1831} CPUMDISASSTATE, *PCPUMDISASSTATE;
1832
1833
1834/**
1835 * @callback_method_impl{FNDISREADBYTES}
1836 */
1837static DECLCALLBACK(int) cpumR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
1838{
1839 PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pDis->pvUser;
1840 for (;;)
1841 {
1842 RTGCUINTPTR GCPtr = pDis->uInstrAddr + offInstr + pState->GCPtrSegBase;
1843
1844 /*
1845 * Need to update the page translation?
1846 */
1847 if ( !pState->pvPageR3
1848 || (GCPtr >> PAGE_SHIFT) != (pState->pvPageGC >> PAGE_SHIFT))
1849 {
1850 int rc = VINF_SUCCESS;
1851
1852 /* translate the address */
1853 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
1854 if ( !HMIsEnabled(pState->pVM)
1855 && MMHyperIsInsideArea(pState->pVM, pState->pvPageGC))
1856 {
1857 pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->pvPageGC);
1858 if (!pState->pvPageR3)
1859 rc = VERR_INVALID_POINTER;
1860 }
1861 else
1862 {
1863 /* Release mapping lock previously acquired. */
1864 if (pState->fLocked)
1865 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
1866 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->pvPageGC, &pState->pvPageR3, &pState->PageMapLock);
1867 pState->fLocked = RT_SUCCESS_NP(rc);
1868 }
1869 if (RT_FAILURE(rc))
1870 {
1871 pState->pvPageR3 = NULL;
1872 return rc;
1873 }
1874 }
1875
1876 /*
1877 * Check the segment limit.
1878 */
1879 if (!pState->f64Bits && pDis->uInstrAddr + offInstr > pState->cbSegLimit)
1880 return VERR_OUT_OF_SELECTOR_BOUNDS;
1881
1882 /*
1883 * Calc how much we can read.
1884 */
1885 uint32_t cb = PAGE_SIZE - (GCPtr & PAGE_OFFSET_MASK);
1886 if (!pState->f64Bits)
1887 {
1888 RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
1889 if (cb > cbSeg && cbSeg)
1890 cb = cbSeg;
1891 }
1892 if (cb > cbMaxRead)
1893 cb = cbMaxRead;
1894
1895 /*
1896 * Read and advance or exit.
1897 */
1898 memcpy(&pDis->abInstr[offInstr], (uint8_t *)pState->pvPageR3 + (GCPtr & PAGE_OFFSET_MASK), cb);
1899 offInstr += (uint8_t)cb;
1900 if (cb >= cbMinRead)
1901 {
1902 pDis->cbCachedInstr = offInstr;
1903 return VINF_SUCCESS;
1904 }
1905 cbMinRead -= (uint8_t)cb;
1906 cbMaxRead -= (uint8_t)cb;
1907 }
1908}
1909
1910
1911/**
1912 * Disassemble an instruction and return the information in the provided structure.
1913 *
1914 * @returns VBox status code.
1915 * @param pVM Pointer to the VM.
1916 * @param pVCpu Pointer to the VMCPU.
1917 * @param pCtx Pointer to the guest CPU context.
1918 * @param GCPtrPC Program counter (relative to CS) to disassemble from.
1919 * @param pCpu Disassembly state.
1920 * @param pszPrefix String prefix for logging (debug only).
1921 *
1922 */
1923VMMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu, const char *pszPrefix)
1924{
1925 CPUMDISASSTATE State;
1926 int rc;
1927
1928 const PGMMODE enmMode = PGMGetGuestMode(pVCpu);
1929 State.pCpu = pCpu;
1930 State.pvPageGC = 0;
1931 State.pvPageR3 = NULL;
1932 State.pVM = pVM;
1933 State.pVCpu = pVCpu;
1934 State.fLocked = false;
1935 State.f64Bits = false;
1936
1937 /*
1938 * Get selector information.
1939 */
1940 DISCPUMODE enmDisCpuMode;
1941 if ( (pCtx->cr0 & X86_CR0_PE)
1942 && pCtx->eflags.Bits.u1VM == 0)
1943 {
1944 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
1945 {
1946# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1947 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtx->cs);
1948# endif
1949 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
1950 return VERR_CPUM_HIDDEN_CS_LOAD_ERROR;
1951 }
1952 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->cs.Attr.n.u1Long;
1953 State.GCPtrSegBase = pCtx->cs.u64Base;
1954 State.GCPtrSegEnd = pCtx->cs.u32Limit + 1 + (RTGCUINTPTR)pCtx->cs.u64Base;
1955 State.cbSegLimit = pCtx->cs.u32Limit;
1956 enmDisCpuMode = (State.f64Bits)
1957 ? DISCPUMODE_64BIT
1958 : pCtx->cs.Attr.n.u1DefBig
1959 ? DISCPUMODE_32BIT
1960 : DISCPUMODE_16BIT;
1961 }
1962 else
1963 {
1964 /* real or V86 mode */
1965 enmDisCpuMode = DISCPUMODE_16BIT;
1966 State.GCPtrSegBase = pCtx->cs.Sel * 16;
1967 State.GCPtrSegEnd = 0xFFFFFFFF;
1968 State.cbSegLimit = 0xFFFFFFFF;
1969 }
1970
1971 /*
1972 * Disassemble the instruction.
1973 */
1974 uint32_t cbInstr;
1975#ifndef LOG_ENABLED
1976 rc = DISInstrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State, pCpu, &cbInstr);
1977 if (RT_SUCCESS(rc))
1978 {
1979#else
1980 char szOutput[160];
1981 rc = DISInstrToStrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State,
1982 pCpu, &cbInstr, szOutput, sizeof(szOutput));
1983 if (RT_SUCCESS(rc))
1984 {
1985 /* log it */
1986 if (pszPrefix)
1987 Log(("%s-CPU%d: %s", pszPrefix, pVCpu->idCpu, szOutput));
1988 else
1989 Log(("%s", szOutput));
1990#endif
1991 rc = VINF_SUCCESS;
1992 }
1993 else
1994 Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%RGv rc=%Rrc\n", pCtx->cs.Sel, GCPtrPC, rc));
1995
1996 /* Release mapping lock acquired in cpumR3DisasInstrRead. */
1997 if (State.fLocked)
1998 PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
1999
2000 return rc;
2001}
2002
2003
2004
2005/**
2006 * API for controlling a few of the CPU features found in CR4.
2007 *
2008 * Currently only X86_CR4_TSD is accepted as input.
2009 *
2010 * @returns VBox status code.
2011 *
2012 * @param pVM Pointer to the VM.
2013 * @param fOr The CR4 OR mask.
2014 * @param fAnd The CR4 AND mask.
2015 */
2016VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
2017{
2018 AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
2019 AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
2020
2021 pVM->cpum.s.CR4.OrMask &= fAnd;
2022 pVM->cpum.s.CR4.OrMask |= fOr;
2023
2024 return VINF_SUCCESS;
2025}
2026
2027
2028/**
2029 * Enters REM, gets and resets the changed flags (CPUM_CHANGED_*).
2030 *
2031 * Only REM should ever call this function!
2032 *
2033 * @returns The changed flags.
2034 * @param pVCpu Pointer to the VMCPU.
2035 * @param puCpl Where to return the current privilege level (CPL).
2036 */
2037VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl)
2038{
2039 Assert(!pVCpu->cpum.s.fRawEntered);
2040 Assert(!pVCpu->cpum.s.fRemEntered);
2041
2042 /*
2043 * Get the CPL first.
2044 */
2045 *puCpl = CPUMGetGuestCPL(pVCpu);
2046
2047 /*
2048 * Get and reset the flags.
2049 */
2050 uint32_t fFlags = pVCpu->cpum.s.fChanged;
2051 pVCpu->cpum.s.fChanged = 0;
2052
2053 /** @todo change the switcher to use the fChanged flags. */
2054 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
2055 {
2056 fFlags |= CPUM_CHANGED_FPU_REM;
2057 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
2058 }
2059
2060 pVCpu->cpum.s.fRemEntered = true;
2061 return fFlags;
2062}
2063
2064
2065/**
2066 * Leaves REM.
2067 *
2068 * @param pVCpu Pointer to the VMCPU.
2069 * @param fNoOutOfSyncSels This is @c false if there are out of sync
2070 * registers.
2071 */
2072VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels)
2073{
2074 Assert(!pVCpu->cpum.s.fRawEntered);
2075 Assert(pVCpu->cpum.s.fRemEntered);
2076
2077 pVCpu->cpum.s.fRemEntered = false;
2078}
2079
2080
2081/**
2082 * Called when the ring-3 init phase completes.
2083 *
2084 * @returns VBox status code.
2085 * @param pVM Pointer to the VM.
2086 */
2087VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM)
2088{
2089 /*
2090 * Figure out if the guest uses 32-bit or 64-bit FPU state at runtime for 64-bit capable VMs.
2091 * Only applicable/used on 64-bit hosts, refer CPUMR0A.asm. See @bugref{7138}.
2092 */
2093 bool const fSupportsLongMode = VMR3IsLongModeAllowed(pVM);
2094 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2095 {
2096 PVMCPU pVCpu = &pVM->aCpus[i];
2097
2098 /* Cache the APIC base (from the APIC device) once it has been initialized. */
2099 PDMApicGetBase(pVCpu, &pVCpu->cpum.s.Guest.msrApicBase);
2100 Log(("CPUMR3InitCompleted pVM=%p APIC base[%u]=%RX64\n", pVM, (unsigned)i, pVCpu->cpum.s.Guest.msrApicBase));
2101
2102 /* While loading a saved-state we fix it up in, cpumR3LoadDone(). */
2103 if (fSupportsLongMode)
2104 pVCpu->cpum.s.fUseFlags |= CPUM_USE_SUPPORTS_LONGMODE;
2105 }
2106 return VINF_SUCCESS;
2107}
2108
2109
2110/**
2111 * Called when the ring-0 init phases comleted.
2112 *
2113 * @param pVM Pointer to the VM.
2114 */
2115VMMR3DECL(void) CPUMR3LogCpuIds(PVM pVM)
2116{
2117 /*
2118 * Log the cpuid.
2119 */
2120 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2121 RTCPUSET OnlineSet;
2122 LogRel(("Logical host processors: %u present, %u max, %u online, online mask: %016RX64\n",
2123 (unsigned)RTMpGetPresentCount(), (unsigned)RTMpGetCount(), (unsigned)RTMpGetOnlineCount(),
2124 RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
2125 RTCPUID cCores = RTMpGetCoreCount();
2126 if (cCores)
2127 LogRel(("Physical host cores: %u\n", (unsigned)cCores));
2128 LogRel(("************************* CPUID dump ************************\n"));
2129 DBGFR3Info(pVM->pUVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
2130 LogRel(("\n"));
2131 DBGFR3_INFO_LOG(pVM, "cpuid", "verbose"); /* macro */
2132 RTLogRelSetBuffering(fOldBuffered);
2133 LogRel(("******************** End of CPUID dump **********************\n"));
2134}
2135
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette