VirtualBox

source: vbox/trunk/include/VBox/vmm/cpum.h@ 80293

Last change on this file since 80293 was 80293, checked in by vboxsync, 5 years ago

VMM(CPUM),DevPcBios: Added CPUM methods for getting the guest (and host) microarchitecture so DevPcBios doesn't need to include vm.h. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 104.0 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager).
3 */
4
5/*
6 * Copyright (C) 2006-2019 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_cpum_h
27#define VBOX_INCLUDED_vmm_cpum_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <iprt/x86.h>
33#include <VBox/types.h>
34#ifdef RT_ARCH_AMD64
35# include <VBox/vmm/cpumctx.h>
36#endif
37#include <VBox/vmm/stam.h>
38#include <VBox/vmm/vmapi.h>
39#include <VBox/vmm/hm_svm.h>
40#include <VBox/vmm/hm_vmx.h>
41
42RT_C_DECLS_BEGIN
43
44/** @defgroup grp_cpum The CPU Monitor / Manager API
45 * @ingroup grp_vmm
46 * @{
47 */
48
49/**
50 * CPUID feature to set or clear.
51 */
52typedef enum CPUMCPUIDFEATURE
53{
54 CPUMCPUIDFEATURE_INVALID = 0,
55 /** The APIC feature bit. (Std+Ext)
56 * Note! There is a per-cpu flag for masking this CPUID feature bit when the
57 * APICBASE.ENABLED bit is zero. So, this feature is only set/cleared
58 * at VM construction time like all the others. This didn't used to be
59 * that way, this is new with 5.1. */
60 CPUMCPUIDFEATURE_APIC,
61 /** The sysenter/sysexit feature bit. (Std) */
62 CPUMCPUIDFEATURE_SEP,
63 /** The SYSCALL/SYSEXIT feature bit (64 bits mode only for Intel CPUs). (Ext) */
64 CPUMCPUIDFEATURE_SYSCALL,
65 /** The PAE feature bit. (Std+Ext) */
66 CPUMCPUIDFEATURE_PAE,
67 /** The NX feature bit. (Ext) */
68 CPUMCPUIDFEATURE_NX,
69 /** The LAHF/SAHF feature bit (64 bits mode only). (Ext) */
70 CPUMCPUIDFEATURE_LAHF,
71 /** The LONG MODE feature bit. (Ext) */
72 CPUMCPUIDFEATURE_LONG_MODE,
73 /** The PAT feature bit. (Std+Ext) */
74 CPUMCPUIDFEATURE_PAT,
75 /** The x2APIC feature bit. (Std) */
76 CPUMCPUIDFEATURE_X2APIC,
77 /** The RDTSCP feature bit. (Ext) */
78 CPUMCPUIDFEATURE_RDTSCP,
79 /** The Hypervisor Present bit. (Std) */
80 CPUMCPUIDFEATURE_HVP,
81 /** The MWait Extensions bits (Std) */
82 CPUMCPUIDFEATURE_MWAIT_EXTS,
83 /** The speculation control feature bits. (StExt) */
84 CPUMCPUIDFEATURE_SPEC_CTRL,
85 /** 32bit hackishness. */
86 CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
87} CPUMCPUIDFEATURE;
88
89/**
90 * CPU Vendor.
91 */
92typedef enum CPUMCPUVENDOR
93{
94 CPUMCPUVENDOR_INVALID = 0,
95 CPUMCPUVENDOR_INTEL,
96 CPUMCPUVENDOR_AMD,
97 CPUMCPUVENDOR_VIA,
98 CPUMCPUVENDOR_CYRIX,
99 CPUMCPUVENDOR_SHANGHAI,
100 CPUMCPUVENDOR_UNKNOWN,
101 /** 32bit hackishness. */
102 CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
103} CPUMCPUVENDOR;
104
105
106/**
107 * X86 and AMD64 CPU microarchitectures and in processor generations.
108 *
109 * @remarks The separation here is sometimes a little bit too finely grained,
110 * and the differences is more like processor generation than micro
111 * arch. This can be useful, so we'll provide functions for getting at
112 * more coarse grained info.
113 */
114typedef enum CPUMMICROARCH
115{
116 kCpumMicroarch_Invalid = 0,
117
118 kCpumMicroarch_Intel_First,
119
120 kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
121 kCpumMicroarch_Intel_80186,
122 kCpumMicroarch_Intel_80286,
123 kCpumMicroarch_Intel_80386,
124 kCpumMicroarch_Intel_80486,
125 kCpumMicroarch_Intel_P5,
126
127 kCpumMicroarch_Intel_P6_Core_Atom_First,
128 kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
129 kCpumMicroarch_Intel_P6_II,
130 kCpumMicroarch_Intel_P6_III,
131
132 kCpumMicroarch_Intel_P6_M_Banias,
133 kCpumMicroarch_Intel_P6_M_Dothan,
134 kCpumMicroarch_Intel_Core_Yonah, /**< Core, also known as Enhanced Pentium M. */
135
136 kCpumMicroarch_Intel_Core2_First,
137 kCpumMicroarch_Intel_Core2_Merom = kCpumMicroarch_Intel_Core2_First, /**< 65nm, Merom/Conroe/Kentsfield/Tigerton */
138 kCpumMicroarch_Intel_Core2_Penryn, /**< 45nm, Penryn/Wolfdale/Yorkfield/Harpertown */
139 kCpumMicroarch_Intel_Core2_End,
140
141 kCpumMicroarch_Intel_Core7_First,
142 kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
143 kCpumMicroarch_Intel_Core7_Westmere,
144 kCpumMicroarch_Intel_Core7_SandyBridge,
145 kCpumMicroarch_Intel_Core7_IvyBridge,
146 kCpumMicroarch_Intel_Core7_Haswell,
147 kCpumMicroarch_Intel_Core7_Broadwell,
148 kCpumMicroarch_Intel_Core7_Skylake,
149 kCpumMicroarch_Intel_Core7_KabyLake,
150 kCpumMicroarch_Intel_Core7_CoffeeLake,
151 kCpumMicroarch_Intel_Core7_WhiskeyLake,
152 kCpumMicroarch_Intel_Core7_CascadeLake,
153 kCpumMicroarch_Intel_Core7_CannonLake,
154 kCpumMicroarch_Intel_Core7_IceLake,
155 kCpumMicroarch_Intel_Core7_TigerLake,
156 kCpumMicroarch_Intel_Core7_End,
157
158 kCpumMicroarch_Intel_Atom_First,
159 kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
160 kCpumMicroarch_Intel_Atom_Lincroft, /**< Second generation bonnell (44nm). */
161 kCpumMicroarch_Intel_Atom_Saltwell, /**< 32nm shrink of Bonnell. */
162 kCpumMicroarch_Intel_Atom_Silvermont, /**< 22nm */
163 kCpumMicroarch_Intel_Atom_Airmount, /**< 14nm */
164 kCpumMicroarch_Intel_Atom_Goldmont, /**< 14nm */
165 kCpumMicroarch_Intel_Atom_GoldmontPlus, /**< 14nm */
166 kCpumMicroarch_Intel_Atom_Unknown,
167 kCpumMicroarch_Intel_Atom_End,
168
169
170 kCpumMicroarch_Intel_Phi_First,
171 kCpumMicroarch_Intel_Phi_KnightsFerry = kCpumMicroarch_Intel_Phi_First,
172 kCpumMicroarch_Intel_Phi_KnightsCorner,
173 kCpumMicroarch_Intel_Phi_KnightsLanding,
174 kCpumMicroarch_Intel_Phi_KnightsHill,
175 kCpumMicroarch_Intel_Phi_KnightsMill,
176 kCpumMicroarch_Intel_Phi_End,
177
178 kCpumMicroarch_Intel_P6_Core_Atom_End,
179
180 kCpumMicroarch_Intel_NB_First,
181 kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
182 kCpumMicroarch_Intel_NB_Northwood, /**< 130nm */
183 kCpumMicroarch_Intel_NB_Prescott, /**< 90nm */
184 kCpumMicroarch_Intel_NB_Prescott2M, /**< 90nm */
185 kCpumMicroarch_Intel_NB_CedarMill, /**< 65nm */
186 kCpumMicroarch_Intel_NB_Gallatin, /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
187 kCpumMicroarch_Intel_NB_Unknown,
188 kCpumMicroarch_Intel_NB_End,
189
190 kCpumMicroarch_Intel_Unknown,
191 kCpumMicroarch_Intel_End,
192
193 kCpumMicroarch_AMD_First,
194 kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
195 kCpumMicroarch_AMD_Am386,
196 kCpumMicroarch_AMD_Am486,
197 kCpumMicroarch_AMD_Am486Enh, /**< Covers Am5x86 as well. */
198 kCpumMicroarch_AMD_K5,
199 kCpumMicroarch_AMD_K6,
200
201 kCpumMicroarch_AMD_K7_First,
202 kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
203 kCpumMicroarch_AMD_K7_Spitfire,
204 kCpumMicroarch_AMD_K7_Thunderbird,
205 kCpumMicroarch_AMD_K7_Morgan,
206 kCpumMicroarch_AMD_K7_Thoroughbred,
207 kCpumMicroarch_AMD_K7_Barton,
208 kCpumMicroarch_AMD_K7_Unknown,
209 kCpumMicroarch_AMD_K7_End,
210
211 kCpumMicroarch_AMD_K8_First,
212 kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
213 kCpumMicroarch_AMD_K8_90nm, /**< 90nm shrink */
214 kCpumMicroarch_AMD_K8_90nm_DualCore, /**< 90nm with two cores. */
215 kCpumMicroarch_AMD_K8_90nm_AMDV, /**< 90nm with AMD-V (usually) and two cores (usually). */
216 kCpumMicroarch_AMD_K8_65nm, /**< 65nm shrink. */
217 kCpumMicroarch_AMD_K8_End,
218
219 kCpumMicroarch_AMD_K10,
220 kCpumMicroarch_AMD_K10_Lion,
221 kCpumMicroarch_AMD_K10_Llano,
222 kCpumMicroarch_AMD_Bobcat,
223 kCpumMicroarch_AMD_Jaguar,
224
225 kCpumMicroarch_AMD_15h_First,
226 kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
227 kCpumMicroarch_AMD_15h_Piledriver,
228 kCpumMicroarch_AMD_15h_Steamroller, /**< Yet to be released, might have different family. */
229 kCpumMicroarch_AMD_15h_Excavator, /**< Yet to be released, might have different family. */
230 kCpumMicroarch_AMD_15h_Unknown,
231 kCpumMicroarch_AMD_15h_End,
232
233 kCpumMicroarch_AMD_16h_First,
234 kCpumMicroarch_AMD_16h_End,
235
236 kCpumMicroarch_AMD_Zen_First,
237 kCpumMicroarch_AMD_Zen_Ryzen = kCpumMicroarch_AMD_Zen_First,
238 kCpumMicroarch_AMD_Zen_End,
239
240 kCpumMicroarch_AMD_Unknown,
241 kCpumMicroarch_AMD_End,
242
243 kCpumMicroarch_VIA_First,
244 kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
245 kCpumMicroarch_Centaur_C2,
246 kCpumMicroarch_Centaur_C3,
247 kCpumMicroarch_VIA_C3_M2,
248 kCpumMicroarch_VIA_C3_C5A, /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
249 kCpumMicroarch_VIA_C3_C5B, /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
250 kCpumMicroarch_VIA_C3_C5C, /**< 130nm Ezra - C3, Eden ESP. */
251 kCpumMicroarch_VIA_C3_C5N, /**< 130nm Ezra-T - C3. */
252 kCpumMicroarch_VIA_C3_C5XL, /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
253 kCpumMicroarch_VIA_C3_C5P, /**< 130nm Nehemiah+ - C3. */
254 kCpumMicroarch_VIA_C7_C5J, /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
255 kCpumMicroarch_VIA_Isaiah,
256 kCpumMicroarch_VIA_Unknown,
257 kCpumMicroarch_VIA_End,
258
259 kCpumMicroarch_Cyrix_First,
260 kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
261 kCpumMicroarch_Cyrix_M1,
262 kCpumMicroarch_Cyrix_MediaGX,
263 kCpumMicroarch_Cyrix_MediaGXm,
264 kCpumMicroarch_Cyrix_M2,
265 kCpumMicroarch_Cyrix_Unknown,
266 kCpumMicroarch_Cyrix_End,
267
268 kCpumMicroarch_NEC_First,
269 kCpumMicroarch_NEC_V20 = kCpumMicroarch_NEC_First,
270 kCpumMicroarch_NEC_V30,
271 kCpumMicroarch_NEC_End,
272
273 kCpumMicroarch_Shanghai_First,
274 kCpumMicroarch_Shanghai_Wudaokou = kCpumMicroarch_Shanghai_First,
275 kCpumMicroarch_Shanghai_Unknown,
276 kCpumMicroarch_Shanghai_End,
277
278 kCpumMicroarch_Unknown,
279
280 kCpumMicroarch_32BitHack = 0x7fffffff
281} CPUMMICROARCH;
282
283
284/** Predicate macro for catching netburst CPUs. */
285#define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
286 ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
287
288/** Predicate macro for catching Core7 CPUs. */
289#define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
290 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
291
292/** Predicate macro for catching Core 2 CPUs. */
293#define CPUMMICROARCH_IS_INTEL_CORE2(a_enmMicroarch) \
294 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core2_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core2_End)
295
296/** Predicate macro for catching Atom CPUs, Silvermont and upwards. */
297#define CPUMMICROARCH_IS_INTEL_SILVERMONT_PLUS(a_enmMicroarch) \
298 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Atom_Silvermont && (a_enmMicroarch) <= kCpumMicroarch_Intel_Atom_End)
299
300/** Predicate macro for catching AMD Family OFh CPUs (aka K8). */
301#define CPUMMICROARCH_IS_AMD_FAM_0FH(a_enmMicroarch) \
302 ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
303
304/** Predicate macro for catching AMD Family 10H CPUs (aka K10). */
305#define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
306
307/** Predicate macro for catching AMD Family 11H CPUs (aka Lion). */
308#define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
309
310/** Predicate macro for catching AMD Family 12H CPUs (aka Llano). */
311#define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
312
313/** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat). */
314#define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
315
316/** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
317 * decendants). */
318#define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
319 ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
320
321/** Predicate macro for catching AMD Family 16H CPUs. */
322#define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
323 ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
324
325
326
327/**
328 * CPUID leaf.
329 *
330 * @remarks This structure is used by the patch manager and is therefore
331 * more or less set in stone.
332 */
333typedef struct CPUMCPUIDLEAF
334{
335 /** The leaf number. */
336 uint32_t uLeaf;
337 /** The sub-leaf number. */
338 uint32_t uSubLeaf;
339 /** Sub-leaf mask. This is 0 when sub-leaves aren't used. */
340 uint32_t fSubLeafMask;
341
342 /** The EAX value. */
343 uint32_t uEax;
344 /** The EBX value. */
345 uint32_t uEbx;
346 /** The ECX value. */
347 uint32_t uEcx;
348 /** The EDX value. */
349 uint32_t uEdx;
350
351 /** Flags. */
352 uint32_t fFlags;
353} CPUMCPUIDLEAF;
354#ifndef VBOX_FOR_DTRACE_LIB
355AssertCompileSize(CPUMCPUIDLEAF, 32);
356#endif
357/** Pointer to a CPUID leaf. */
358typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
359/** Pointer to a const CPUID leaf. */
360typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
361
362/** @name CPUMCPUIDLEAF::fFlags
363 * @{ */
364/** Indicates working intel leaf 0xb where the lower 8 ECX bits are not modified
365 * and EDX containing the extended APIC ID. */
366#define CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES RT_BIT_32(0)
367/** The leaf contains an APIC ID that needs changing to that of the current CPU. */
368#define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID RT_BIT_32(1)
369/** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
370#define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE RT_BIT_32(2)
371/** The leaf contains an APIC feature bit which is tied to APICBASE.EN. */
372#define CPUMCPUIDLEAF_F_CONTAINS_APIC RT_BIT_32(3)
373/** Mask of the valid flags. */
374#define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0xf)
375/** @} */
376
377/**
378 * Method used to deal with unknown CPUID leaves.
379 * @remarks Used in patch code.
380 */
381typedef enum CPUMUNKNOWNCPUID
382{
383 /** Invalid zero value. */
384 CPUMUNKNOWNCPUID_INVALID = 0,
385 /** Use given default values (DefCpuId). */
386 CPUMUNKNOWNCPUID_DEFAULTS,
387 /** Return the last standard leaf.
388 * Intel Sandy Bridge has been observed doing this. */
389 CPUMUNKNOWNCPUID_LAST_STD_LEAF,
390 /** Return the last standard leaf, with ecx observed.
391 * Intel Sandy Bridge has been observed doing this. */
392 CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
393 /** The register values are passed thru unmodified. */
394 CPUMUNKNOWNCPUID_PASSTHRU,
395 /** End of valid value. */
396 CPUMUNKNOWNCPUID_END,
397 /** Ensure 32-bit type. */
398 CPUMUNKNOWNCPUID_32BIT_HACK = 0x7fffffff
399} CPUMUNKNOWNCPUID;
400/** Pointer to unknown CPUID leaf method. */
401typedef CPUMUNKNOWNCPUID *PCPUMUNKNOWNCPUID;
402
403
404/**
405 * The register set returned by a CPUID operation.
406 */
407typedef struct CPUMCPUID
408{
409 uint32_t uEax;
410 uint32_t uEbx;
411 uint32_t uEcx;
412 uint32_t uEdx;
413} CPUMCPUID;
414/** Pointer to a CPUID leaf. */
415typedef CPUMCPUID *PCPUMCPUID;
416/** Pointer to a const CPUID leaf. */
417typedef const CPUMCPUID *PCCPUMCPUID;
418
419
420/**
421 * MSR read functions.
422 */
423typedef enum CPUMMSRRDFN
424{
425 /** Invalid zero value. */
426 kCpumMsrRdFn_Invalid = 0,
427 /** Return the CPUMMSRRANGE::uValue. */
428 kCpumMsrRdFn_FixedValue,
429 /** Alias to the MSR range starting at the MSR given by
430 * CPUMMSRRANGE::uValue. Must be used in pair with
431 * kCpumMsrWrFn_MsrAlias. */
432 kCpumMsrRdFn_MsrAlias,
433 /** Write only register, GP all read attempts. */
434 kCpumMsrRdFn_WriteOnly,
435
436 kCpumMsrRdFn_Ia32P5McAddr,
437 kCpumMsrRdFn_Ia32P5McType,
438 kCpumMsrRdFn_Ia32TimestampCounter,
439 kCpumMsrRdFn_Ia32PlatformId, /**< Takes real CPU value for reference. */
440 kCpumMsrRdFn_Ia32ApicBase,
441 kCpumMsrRdFn_Ia32FeatureControl,
442 kCpumMsrRdFn_Ia32BiosSignId, /**< Range value returned. */
443 kCpumMsrRdFn_Ia32SmmMonitorCtl,
444 kCpumMsrRdFn_Ia32PmcN,
445 kCpumMsrRdFn_Ia32MonitorFilterLineSize,
446 kCpumMsrRdFn_Ia32MPerf,
447 kCpumMsrRdFn_Ia32APerf,
448 kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */
449 kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
450 kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
451 kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
452 kCpumMsrRdFn_Ia32MtrrDefType,
453 kCpumMsrRdFn_Ia32Pat,
454 kCpumMsrRdFn_Ia32SysEnterCs,
455 kCpumMsrRdFn_Ia32SysEnterEsp,
456 kCpumMsrRdFn_Ia32SysEnterEip,
457 kCpumMsrRdFn_Ia32McgCap,
458 kCpumMsrRdFn_Ia32McgStatus,
459 kCpumMsrRdFn_Ia32McgCtl,
460 kCpumMsrRdFn_Ia32DebugCtl,
461 kCpumMsrRdFn_Ia32SmrrPhysBase,
462 kCpumMsrRdFn_Ia32SmrrPhysMask,
463 kCpumMsrRdFn_Ia32PlatformDcaCap,
464 kCpumMsrRdFn_Ia32CpuDcaCap,
465 kCpumMsrRdFn_Ia32Dca0Cap,
466 kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
467 kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */
468 kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */
469 kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
470 kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */
471 kCpumMsrRdFn_Ia32FixedCtrCtrl,
472 kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */
473 kCpumMsrRdFn_Ia32PerfGlobalCtrl,
474 kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
475 kCpumMsrRdFn_Ia32PebsEnable,
476 kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */
477 kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */
478 kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */
479 kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */
480 kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */
481 kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
482 kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */
483 kCpumMsrRdFn_Ia32DsArea,
484 kCpumMsrRdFn_Ia32TscDeadline,
485 kCpumMsrRdFn_Ia32X2ApicN,
486 kCpumMsrRdFn_Ia32DebugInterface,
487 kCpumMsrRdFn_Ia32VmxBasic, /**< Takes real value as reference. */
488 kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */
489 kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */
490 kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */
491 kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */
492 kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */
493 kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */
494 kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */
495 kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */
496 kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */
497 kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */
498 kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */
499 kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */
500 kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */
501 kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */
502 kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */
503 kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */
504 kCpumMsrRdFn_Ia32VmxVmFunc, /**< Takes real value as reference. */
505 kCpumMsrRdFn_Ia32SpecCtrl,
506 kCpumMsrRdFn_Ia32ArchCapabilities,
507
508 kCpumMsrRdFn_Amd64Efer,
509 kCpumMsrRdFn_Amd64SyscallTarget,
510 kCpumMsrRdFn_Amd64LongSyscallTarget,
511 kCpumMsrRdFn_Amd64CompSyscallTarget,
512 kCpumMsrRdFn_Amd64SyscallFlagMask,
513 kCpumMsrRdFn_Amd64FsBase,
514 kCpumMsrRdFn_Amd64GsBase,
515 kCpumMsrRdFn_Amd64KernelGsBase,
516 kCpumMsrRdFn_Amd64TscAux,
517
518 kCpumMsrRdFn_IntelEblCrPowerOn,
519 kCpumMsrRdFn_IntelI7CoreThreadCount,
520 kCpumMsrRdFn_IntelP4EbcHardPowerOn,
521 kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
522 kCpumMsrRdFn_IntelP4EbcFrequencyId,
523 kCpumMsrRdFn_IntelP6FsbFrequency, /**< Takes real value as reference. */
524 kCpumMsrRdFn_IntelPlatformInfo,
525 kCpumMsrRdFn_IntelFlexRatio, /**< Takes real value as reference. */
526 kCpumMsrRdFn_IntelPkgCStConfigControl,
527 kCpumMsrRdFn_IntelPmgIoCaptureBase,
528 kCpumMsrRdFn_IntelLastBranchFromToN,
529 kCpumMsrRdFn_IntelLastBranchFromN,
530 kCpumMsrRdFn_IntelLastBranchToN,
531 kCpumMsrRdFn_IntelLastBranchTos,
532 kCpumMsrRdFn_IntelBblCrCtl,
533 kCpumMsrRdFn_IntelBblCrCtl3,
534 kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */
535 kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
536 kCpumMsrRdFn_IntelI7MiscPwrMgmt,
537 kCpumMsrRdFn_IntelP6CrN,
538 kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
539 kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
540 kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
541 kCpumMsrRdFn_IntelI7SandyAesNiCtl,
542 kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */
543 kCpumMsrRdFn_IntelI7LbrSelect,
544 kCpumMsrRdFn_IntelI7SandyErrorControl,
545 kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
546 kCpumMsrRdFn_IntelI7PowerCtl,
547 kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
548 kCpumMsrRdFn_IntelI7PebsLdLat,
549 kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */
550 kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */
551 kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
552 kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */
553 kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */
554 kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */
555 kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
556 kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */
557 kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
558 kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */
559 kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */
560 kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */
561 kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
562 kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */
563 kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */
564 kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */
565 kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
566 kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */
567 kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */
568 kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */
569 kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
570 kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */
571 kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
572 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1, /**< Takes real value as reference. */
573 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2, /**< Takes real value as reference. */
574 kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
575 kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
576 kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
577 kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
578 kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
579 kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
580 kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
581 kCpumMsrRdFn_IntelI7UncCBoxConfig,
582 kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
583 kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
584 kCpumMsrRdFn_IntelI7SmiCount,
585 kCpumMsrRdFn_IntelCore2EmttmCrTablesN, /**< Range value returned. */
586 kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
587 kCpumMsrRdFn_IntelCore1ExtConfig,
588 kCpumMsrRdFn_IntelCore1DtsCalControl,
589 kCpumMsrRdFn_IntelCore2PeciControl,
590 kCpumMsrRdFn_IntelAtSilvCoreC1Recidency,
591
592 kCpumMsrRdFn_P6LastBranchFromIp,
593 kCpumMsrRdFn_P6LastBranchToIp,
594 kCpumMsrRdFn_P6LastIntFromIp,
595 kCpumMsrRdFn_P6LastIntToIp,
596
597 kCpumMsrRdFn_AmdFam15hTscRate,
598 kCpumMsrRdFn_AmdFam15hLwpCfg,
599 kCpumMsrRdFn_AmdFam15hLwpCbAddr,
600 kCpumMsrRdFn_AmdFam10hMc4MiscN,
601 kCpumMsrRdFn_AmdK8PerfCtlN,
602 kCpumMsrRdFn_AmdK8PerfCtrN,
603 kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */
604 kCpumMsrRdFn_AmdK8HwCr,
605 kCpumMsrRdFn_AmdK8IorrBaseN,
606 kCpumMsrRdFn_AmdK8IorrMaskN,
607 kCpumMsrRdFn_AmdK8TopOfMemN,
608 kCpumMsrRdFn_AmdK8NbCfg1,
609 kCpumMsrRdFn_AmdK8McXcptRedir,
610 kCpumMsrRdFn_AmdK8CpuNameN,
611 kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */
612 kCpumMsrRdFn_AmdK8SwThermalCtrl,
613 kCpumMsrRdFn_AmdK8FidVidControl, /**< Range value returned. */
614 kCpumMsrRdFn_AmdK8FidVidStatus, /**< Range value returned. */
615 kCpumMsrRdFn_AmdK8McCtlMaskN,
616 kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
617 kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
618 kCpumMsrRdFn_AmdK8IntPendingMessage,
619 kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
620 kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
621 kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
622 kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */
623 kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */
624 kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */
625 kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */
626 kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */
627 kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */
628 kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
629 kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
630 kCpumMsrRdFn_AmdK8SmmBase,
631 kCpumMsrRdFn_AmdK8SmmAddr,
632 kCpumMsrRdFn_AmdK8SmmMask,
633 kCpumMsrRdFn_AmdK8VmCr,
634 kCpumMsrRdFn_AmdK8IgnNe,
635 kCpumMsrRdFn_AmdK8SmmCtl,
636 kCpumMsrRdFn_AmdK8VmHSavePa,
637 kCpumMsrRdFn_AmdFam10hVmLockKey,
638 kCpumMsrRdFn_AmdFam10hSmmLockKey,
639 kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
640 kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
641 kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
642 kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
643 kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
644 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
645 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
646 kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */
647 kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */
648 kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
649 kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
650 kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
651 kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
652 kCpumMsrRdFn_AmdK8PatchLevel, /**< Returns range value. */
653 kCpumMsrRdFn_AmdK7DebugStatusMaybe,
654 kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
655 kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
656 kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
657 kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
658 kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
659 kCpumMsrRdFn_AmdK7NodeId,
660 kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
661 kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
662 kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
663 kCpumMsrRdFn_AmdK7LoadStoreCfg,
664 kCpumMsrRdFn_AmdK7InstrCacheCfg,
665 kCpumMsrRdFn_AmdK7DataCacheCfg,
666 kCpumMsrRdFn_AmdK7BusUnitCfg,
667 kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
668 kCpumMsrRdFn_AmdFam15hFpuCfg,
669 kCpumMsrRdFn_AmdFam15hDecoderCfg,
670 kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
671 kCpumMsrRdFn_AmdFam15hCombUnitCfg,
672 kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
673 kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
674 kCpumMsrRdFn_AmdFam15hExecUnitCfg,
675 kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
676 kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
677 kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
678 kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
679 kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
680 kCpumMsrRdFn_AmdFam10hIbsOpRip,
681 kCpumMsrRdFn_AmdFam10hIbsOpData,
682 kCpumMsrRdFn_AmdFam10hIbsOpData2,
683 kCpumMsrRdFn_AmdFam10hIbsOpData3,
684 kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
685 kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
686 kCpumMsrRdFn_AmdFam10hIbsCtl,
687 kCpumMsrRdFn_AmdFam14hIbsBrTarget,
688
689 kCpumMsrRdFn_Gim,
690
691 /** End of valid MSR read function indexes. */
692 kCpumMsrRdFn_End
693} CPUMMSRRDFN;
694
695/**
696 * MSR write functions.
697 */
698typedef enum CPUMMSRWRFN
699{
700 /** Invalid zero value. */
701 kCpumMsrWrFn_Invalid = 0,
702 /** Writes are ignored, the fWrGpMask is observed though. */
703 kCpumMsrWrFn_IgnoreWrite,
704 /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
705 kCpumMsrWrFn_ReadOnly,
706 /** Alias to the MSR range starting at the MSR given by
707 * CPUMMSRRANGE::uValue. Must be used in pair with
708 * kCpumMsrRdFn_MsrAlias. */
709 kCpumMsrWrFn_MsrAlias,
710
711 kCpumMsrWrFn_Ia32P5McAddr,
712 kCpumMsrWrFn_Ia32P5McType,
713 kCpumMsrWrFn_Ia32TimestampCounter,
714 kCpumMsrWrFn_Ia32ApicBase,
715 kCpumMsrWrFn_Ia32FeatureControl,
716 kCpumMsrWrFn_Ia32BiosSignId,
717 kCpumMsrWrFn_Ia32BiosUpdateTrigger,
718 kCpumMsrWrFn_Ia32SmmMonitorCtl,
719 kCpumMsrWrFn_Ia32PmcN,
720 kCpumMsrWrFn_Ia32MonitorFilterLineSize,
721 kCpumMsrWrFn_Ia32MPerf,
722 kCpumMsrWrFn_Ia32APerf,
723 kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
724 kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
725 kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
726 kCpumMsrWrFn_Ia32MtrrDefType,
727 kCpumMsrWrFn_Ia32Pat,
728 kCpumMsrWrFn_Ia32SysEnterCs,
729 kCpumMsrWrFn_Ia32SysEnterEsp,
730 kCpumMsrWrFn_Ia32SysEnterEip,
731 kCpumMsrWrFn_Ia32McgStatus,
732 kCpumMsrWrFn_Ia32McgCtl,
733 kCpumMsrWrFn_Ia32DebugCtl,
734 kCpumMsrWrFn_Ia32SmrrPhysBase,
735 kCpumMsrWrFn_Ia32SmrrPhysMask,
736 kCpumMsrWrFn_Ia32PlatformDcaCap,
737 kCpumMsrWrFn_Ia32Dca0Cap,
738 kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
739 kCpumMsrWrFn_Ia32PerfStatus,
740 kCpumMsrWrFn_Ia32PerfCtl,
741 kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
742 kCpumMsrWrFn_Ia32PerfCapabilities,
743 kCpumMsrWrFn_Ia32FixedCtrCtrl,
744 kCpumMsrWrFn_Ia32PerfGlobalStatus,
745 kCpumMsrWrFn_Ia32PerfGlobalCtrl,
746 kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
747 kCpumMsrWrFn_Ia32PebsEnable,
748 kCpumMsrWrFn_Ia32ClockModulation,
749 kCpumMsrWrFn_Ia32ThermInterrupt,
750 kCpumMsrWrFn_Ia32ThermStatus,
751 kCpumMsrWrFn_Ia32Therm2Ctl,
752 kCpumMsrWrFn_Ia32MiscEnable,
753 kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
754 kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */
755 kCpumMsrWrFn_Ia32DsArea,
756 kCpumMsrWrFn_Ia32TscDeadline,
757 kCpumMsrWrFn_Ia32X2ApicN,
758 kCpumMsrWrFn_Ia32DebugInterface,
759 kCpumMsrWrFn_Ia32SpecCtrl,
760 kCpumMsrWrFn_Ia32PredCmd,
761 kCpumMsrWrFn_Ia32FlushCmd,
762
763 kCpumMsrWrFn_Amd64Efer,
764 kCpumMsrWrFn_Amd64SyscallTarget,
765 kCpumMsrWrFn_Amd64LongSyscallTarget,
766 kCpumMsrWrFn_Amd64CompSyscallTarget,
767 kCpumMsrWrFn_Amd64SyscallFlagMask,
768 kCpumMsrWrFn_Amd64FsBase,
769 kCpumMsrWrFn_Amd64GsBase,
770 kCpumMsrWrFn_Amd64KernelGsBase,
771 kCpumMsrWrFn_Amd64TscAux,
772 kCpumMsrWrFn_IntelEblCrPowerOn,
773 kCpumMsrWrFn_IntelP4EbcHardPowerOn,
774 kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
775 kCpumMsrWrFn_IntelP4EbcFrequencyId,
776 kCpumMsrWrFn_IntelFlexRatio,
777 kCpumMsrWrFn_IntelPkgCStConfigControl,
778 kCpumMsrWrFn_IntelPmgIoCaptureBase,
779 kCpumMsrWrFn_IntelLastBranchFromToN,
780 kCpumMsrWrFn_IntelLastBranchFromN,
781 kCpumMsrWrFn_IntelLastBranchToN,
782 kCpumMsrWrFn_IntelLastBranchTos,
783 kCpumMsrWrFn_IntelBblCrCtl,
784 kCpumMsrWrFn_IntelBblCrCtl3,
785 kCpumMsrWrFn_IntelI7TemperatureTarget,
786 kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
787 kCpumMsrWrFn_IntelI7MiscPwrMgmt,
788 kCpumMsrWrFn_IntelP6CrN,
789 kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
790 kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
791 kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
792 kCpumMsrWrFn_IntelI7SandyAesNiCtl,
793 kCpumMsrWrFn_IntelI7TurboRatioLimit,
794 kCpumMsrWrFn_IntelI7LbrSelect,
795 kCpumMsrWrFn_IntelI7SandyErrorControl,
796 kCpumMsrWrFn_IntelI7PowerCtl,
797 kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
798 kCpumMsrWrFn_IntelI7PebsLdLat,
799 kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
800 kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
801 kCpumMsrWrFn_IntelI7SandyRaplPowerUnit, /**< R/O but found writable bits on a Silvermont CPU here. */
802 kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
803 kCpumMsrWrFn_IntelI7SandyPkgC2Residency, /**< R/O but found writable bits on a Silvermont CPU here. */
804 kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
805 kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
806 kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
807 kCpumMsrWrFn_IntelI7RaplPp0Policy,
808 kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
809 kCpumMsrWrFn_IntelI7RaplPp1Policy,
810 kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
811 kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
812 kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
813 kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
814 kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
815 kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
816 kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
817 kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
818 kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
819 kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
820 kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
821 kCpumMsrWrFn_IntelCore1ExtConfig,
822 kCpumMsrWrFn_IntelCore1DtsCalControl,
823 kCpumMsrWrFn_IntelCore2PeciControl,
824
825 kCpumMsrWrFn_P6LastIntFromIp,
826 kCpumMsrWrFn_P6LastIntToIp,
827
828 kCpumMsrWrFn_AmdFam15hTscRate,
829 kCpumMsrWrFn_AmdFam15hLwpCfg,
830 kCpumMsrWrFn_AmdFam15hLwpCbAddr,
831 kCpumMsrWrFn_AmdFam10hMc4MiscN,
832 kCpumMsrWrFn_AmdK8PerfCtlN,
833 kCpumMsrWrFn_AmdK8PerfCtrN,
834 kCpumMsrWrFn_AmdK8SysCfg,
835 kCpumMsrWrFn_AmdK8HwCr,
836 kCpumMsrWrFn_AmdK8IorrBaseN,
837 kCpumMsrWrFn_AmdK8IorrMaskN,
838 kCpumMsrWrFn_AmdK8TopOfMemN,
839 kCpumMsrWrFn_AmdK8NbCfg1,
840 kCpumMsrWrFn_AmdK8McXcptRedir,
841 kCpumMsrWrFn_AmdK8CpuNameN,
842 kCpumMsrWrFn_AmdK8HwThermalCtrl,
843 kCpumMsrWrFn_AmdK8SwThermalCtrl,
844 kCpumMsrWrFn_AmdK8FidVidControl,
845 kCpumMsrWrFn_AmdK8McCtlMaskN,
846 kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
847 kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
848 kCpumMsrWrFn_AmdK8IntPendingMessage,
849 kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
850 kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
851 kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
852 kCpumMsrWrFn_AmdFam10hPStateControl,
853 kCpumMsrWrFn_AmdFam10hPStateStatus,
854 kCpumMsrWrFn_AmdFam10hPStateN,
855 kCpumMsrWrFn_AmdFam10hCofVidControl,
856 kCpumMsrWrFn_AmdFam10hCofVidStatus,
857 kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
858 kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
859 kCpumMsrWrFn_AmdK8SmmBase,
860 kCpumMsrWrFn_AmdK8SmmAddr,
861 kCpumMsrWrFn_AmdK8SmmMask,
862 kCpumMsrWrFn_AmdK8VmCr,
863 kCpumMsrWrFn_AmdK8IgnNe,
864 kCpumMsrWrFn_AmdK8SmmCtl,
865 kCpumMsrWrFn_AmdK8VmHSavePa,
866 kCpumMsrWrFn_AmdFam10hVmLockKey,
867 kCpumMsrWrFn_AmdFam10hSmmLockKey,
868 kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
869 kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
870 kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
871 kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
872 kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
873 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
874 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
875 kCpumMsrWrFn_AmdK7MicrocodeCtl,
876 kCpumMsrWrFn_AmdK7ClusterIdMaybe,
877 kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
878 kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
879 kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
880 kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
881 kCpumMsrWrFn_AmdK8PatchLoader,
882 kCpumMsrWrFn_AmdK7DebugStatusMaybe,
883 kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
884 kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
885 kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
886 kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
887 kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
888 kCpumMsrWrFn_AmdK7NodeId,
889 kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
890 kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
891 kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
892 kCpumMsrWrFn_AmdK7LoadStoreCfg,
893 kCpumMsrWrFn_AmdK7InstrCacheCfg,
894 kCpumMsrWrFn_AmdK7DataCacheCfg,
895 kCpumMsrWrFn_AmdK7BusUnitCfg,
896 kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
897 kCpumMsrWrFn_AmdFam15hFpuCfg,
898 kCpumMsrWrFn_AmdFam15hDecoderCfg,
899 kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
900 kCpumMsrWrFn_AmdFam15hCombUnitCfg,
901 kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
902 kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
903 kCpumMsrWrFn_AmdFam15hExecUnitCfg,
904 kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
905 kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
906 kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
907 kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
908 kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
909 kCpumMsrWrFn_AmdFam10hIbsOpRip,
910 kCpumMsrWrFn_AmdFam10hIbsOpData,
911 kCpumMsrWrFn_AmdFam10hIbsOpData2,
912 kCpumMsrWrFn_AmdFam10hIbsOpData3,
913 kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
914 kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
915 kCpumMsrWrFn_AmdFam10hIbsCtl,
916 kCpumMsrWrFn_AmdFam14hIbsBrTarget,
917
918 kCpumMsrWrFn_Gim,
919
920 /** End of valid MSR write function indexes. */
921 kCpumMsrWrFn_End
922} CPUMMSRWRFN;
923
924/**
925 * MSR range.
926 */
927typedef struct CPUMMSRRANGE
928{
929 /** The first MSR. [0] */
930 uint32_t uFirst;
931 /** The last MSR. [4] */
932 uint32_t uLast;
933 /** The read function (CPUMMSRRDFN). [8] */
934 uint16_t enmRdFn;
935 /** The write function (CPUMMSRWRFN). [10] */
936 uint16_t enmWrFn;
937 /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
938 * UINT16_MAX if not used by the read and write functions. [12] */
939 uint16_t offCpumCpu;
940 /** Reserved for future hacks. [14] */
941 uint16_t fReserved;
942 /** The init/read value. [16]
943 * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
944 * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
945 * offset into CPUM. */
946 uint64_t uValue;
947 /** The bits to ignore when writing. [24] */
948 uint64_t fWrIgnMask;
949 /** The bits that will cause a GP(0) when writing. [32]
950 * This is always checked prior to calling the write function. Using
951 * UINT64_MAX effectively marks the MSR as read-only. */
952 uint64_t fWrGpMask;
953 /** The register name, if applicable. [40] */
954 char szName[56];
955
956#ifdef VBOX_WITH_STATISTICS
957 /** The number of reads. */
958 STAMCOUNTER cReads;
959 /** The number of writes. */
960 STAMCOUNTER cWrites;
961 /** The number of times ignored bits were written. */
962 STAMCOUNTER cIgnoredBits;
963 /** The number of GPs generated. */
964 STAMCOUNTER cGps;
965#endif
966} CPUMMSRRANGE;
967#ifndef VBOX_FOR_DTRACE_LIB
968# ifdef VBOX_WITH_STATISTICS
969AssertCompileSize(CPUMMSRRANGE, 128);
970# else
971AssertCompileSize(CPUMMSRRANGE, 96);
972# endif
973#endif
974/** Pointer to an MSR range. */
975typedef CPUMMSRRANGE *PCPUMMSRRANGE;
976/** Pointer to a const MSR range. */
977typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
978
979
980/**
981 * MSRs.
982 * MSRs which are required while exploding features.
983 */
984typedef struct CPUMMSRS
985{
986 union
987 {
988 VMXMSRS vmx;
989 SVMMSRS svm;
990 } hwvirt;
991} CPUMMSRS;
992/** Pointer to an CPUMMSRS struct. */
993typedef CPUMMSRS *PCPUMMSRS;
994/** Pointer to a const CPUMMSRS struct. */
995typedef CPUMMSRS const *PCCPUMMSRS;
996
997
998/**
999 * CPU features and quirks.
1000 * This is mostly exploded CPUID info.
1001 */
1002typedef struct CPUMFEATURES
1003{
1004 /** The CPU vendor (CPUMCPUVENDOR). */
1005 uint8_t enmCpuVendor;
1006 /** The CPU family. */
1007 uint8_t uFamily;
1008 /** The CPU model. */
1009 uint8_t uModel;
1010 /** The CPU stepping. */
1011 uint8_t uStepping;
1012 /** The microarchitecture. */
1013#ifndef VBOX_FOR_DTRACE_LIB
1014 CPUMMICROARCH enmMicroarch;
1015#else
1016 uint32_t enmMicroarch;
1017#endif
1018 /** The maximum physical address width of the CPU. */
1019 uint8_t cMaxPhysAddrWidth;
1020 /** The maximum linear address width of the CPU. */
1021 uint8_t cMaxLinearAddrWidth;
1022 /** Max size of the extended state (or FPU state if no XSAVE). */
1023 uint16_t cbMaxExtendedState;
1024
1025 /** Supports MSRs. */
1026 uint32_t fMsr : 1;
1027 /** Supports the page size extension (4/2 MB pages). */
1028 uint32_t fPse : 1;
1029 /** Supports 36-bit page size extension (4 MB pages can map memory above
1030 * 4GB). */
1031 uint32_t fPse36 : 1;
1032 /** Supports physical address extension (PAE). */
1033 uint32_t fPae : 1;
1034 /** Page attribute table (PAT) support (page level cache control). */
1035 uint32_t fPat : 1;
1036 /** Supports the FXSAVE and FXRSTOR instructions. */
1037 uint32_t fFxSaveRstor : 1;
1038 /** Supports the XSAVE and XRSTOR instructions. */
1039 uint32_t fXSaveRstor : 1;
1040 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
1041 uint32_t fOpSysXSaveRstor : 1;
1042 /** Supports MMX. */
1043 uint32_t fMmx : 1;
1044 /** Supports AMD extensions to MMX instructions. */
1045 uint32_t fAmdMmxExts : 1;
1046 /** Supports SSE. */
1047 uint32_t fSse : 1;
1048 /** Supports SSE2. */
1049 uint32_t fSse2 : 1;
1050 /** Supports SSE3. */
1051 uint32_t fSse3 : 1;
1052 /** Supports SSSE3. */
1053 uint32_t fSsse3 : 1;
1054 /** Supports SSE4.1. */
1055 uint32_t fSse41 : 1;
1056 /** Supports SSE4.2. */
1057 uint32_t fSse42 : 1;
1058 /** Supports AVX. */
1059 uint32_t fAvx : 1;
1060 /** Supports AVX2. */
1061 uint32_t fAvx2 : 1;
1062 /** Supports AVX512 foundation. */
1063 uint32_t fAvx512Foundation : 1;
1064 /** Supports RDTSC. */
1065 uint32_t fTsc : 1;
1066 /** Intel SYSENTER/SYSEXIT support */
1067 uint32_t fSysEnter : 1;
1068 /** First generation APIC. */
1069 uint32_t fApic : 1;
1070 /** Second generation APIC. */
1071 uint32_t fX2Apic : 1;
1072 /** Hypervisor present. */
1073 uint32_t fHypervisorPresent : 1;
1074 /** MWAIT & MONITOR instructions supported. */
1075 uint32_t fMonitorMWait : 1;
1076 /** MWAIT Extensions present. */
1077 uint32_t fMWaitExtensions : 1;
1078 /** Supports CMPXCHG16B in 64-bit mode. */
1079 uint32_t fMovCmpXchg16b : 1;
1080 /** Supports CLFLUSH. */
1081 uint32_t fClFlush : 1;
1082 /** Supports CLFLUSHOPT. */
1083 uint32_t fClFlushOpt : 1;
1084 /** Supports IA32_PRED_CMD.IBPB. */
1085 uint32_t fIbpb : 1;
1086 /** Supports IA32_SPEC_CTRL.IBRS. */
1087 uint32_t fIbrs : 1;
1088 /** Supports IA32_SPEC_CTRL.STIBP. */
1089 uint32_t fStibp : 1;
1090 /** Supports IA32_FLUSH_CMD. */
1091 uint32_t fFlushCmd : 1;
1092 /** Supports IA32_ARCH_CAP. */
1093 uint32_t fArchCap : 1;
1094 /** Supports MD_CLEAR functionality (VERW, IA32_FLUSH_CMD). */
1095 uint32_t fMdsClear : 1;
1096 /** Supports PCID. */
1097 uint32_t fPcid : 1;
1098 /** Supports INVPCID. */
1099 uint32_t fInvpcid : 1;
1100 /** Supports read/write FSGSBASE instructions. */
1101 uint32_t fFsGsBase : 1;
1102
1103 /** Supports AMD 3DNow instructions. */
1104 uint32_t f3DNow : 1;
1105 /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
1106 uint32_t f3DNowPrefetch : 1;
1107
1108 /** AMD64: Supports long mode. */
1109 uint32_t fLongMode : 1;
1110 /** AMD64: SYSCALL/SYSRET support. */
1111 uint32_t fSysCall : 1;
1112 /** AMD64: No-execute page table bit. */
1113 uint32_t fNoExecute : 1;
1114 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
1115 uint32_t fLahfSahf : 1;
1116 /** AMD64: Supports RDTSCP. */
1117 uint32_t fRdTscP : 1;
1118 /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
1119 uint32_t fMovCr8In32Bit : 1;
1120 /** AMD64: Supports XOP (similar to VEX3/AVX). */
1121 uint32_t fXop : 1;
1122
1123 /** Indicates that FPU instruction and data pointers may leak.
1124 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
1125 * is only saved and restored if an exception is pending. */
1126 uint32_t fLeakyFxSR : 1;
1127
1128 /** AMD64: Supports AMD SVM. */
1129 uint32_t fSvm : 1;
1130
1131 /** Support for Intel VMX. */
1132 uint32_t fVmx : 1;
1133
1134 /** Indicates that speculative execution control CPUID bits and MSRs are exposed.
1135 * The details are different for Intel and AMD but both have similar
1136 * functionality. */
1137 uint32_t fSpeculationControl : 1;
1138
1139 /** MSR_IA32_ARCH_CAPABILITIES: RDCL_NO (bit 0).
1140 * @remarks Only safe use after CPUM ring-0 init! */
1141 uint32_t fArchRdclNo : 1;
1142 /** MSR_IA32_ARCH_CAPABILITIES: IBRS_ALL (bit 1).
1143 * @remarks Only safe use after CPUM ring-0 init! */
1144 uint32_t fArchIbrsAll : 1;
1145 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 2).
1146 * @remarks Only safe use after CPUM ring-0 init! */
1147 uint32_t fArchRsbOverride : 1;
1148 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 3).
1149 * @remarks Only safe use after CPUM ring-0 init! */
1150 uint32_t fArchVmmNeedNotFlushL1d : 1;
1151 /** MSR_IA32_ARCH_CAPABILITIES: MDS_NO (bit 4).
1152 * @remarks Only safe use after CPUM ring-0 init! */
1153 uint32_t fArchMdsNo : 1;
1154
1155 /** Alignment padding / reserved for future use. */
1156 uint32_t fPadding : 8;
1157
1158 /** SVM: Supports Nested-paging. */
1159 uint32_t fSvmNestedPaging : 1;
1160 /** SVM: Support LBR (Last Branch Record) virtualization. */
1161 uint32_t fSvmLbrVirt : 1;
1162 /** SVM: Supports SVM lock. */
1163 uint32_t fSvmSvmLock : 1;
1164 /** SVM: Supports Next RIP save. */
1165 uint32_t fSvmNextRipSave : 1;
1166 /** SVM: Supports TSC rate MSR. */
1167 uint32_t fSvmTscRateMsr : 1;
1168 /** SVM: Supports VMCB clean bits. */
1169 uint32_t fSvmVmcbClean : 1;
1170 /** SVM: Supports Flush-by-ASID. */
1171 uint32_t fSvmFlusbByAsid : 1;
1172 /** SVM: Supports decode assist. */
1173 uint32_t fSvmDecodeAssists : 1;
1174 /** SVM: Supports Pause filter. */
1175 uint32_t fSvmPauseFilter : 1;
1176 /** SVM: Supports Pause filter threshold. */
1177 uint32_t fSvmPauseFilterThreshold : 1;
1178 /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
1179 uint32_t fSvmAvic : 1;
1180 /** SVM: Supports Virtualized VMSAVE/VMLOAD. */
1181 uint32_t fSvmVirtVmsaveVmload : 1;
1182 /** SVM: Supports VGIF (Virtual Global Interrupt Flag). */
1183 uint32_t fSvmVGif : 1;
1184 /** SVM: Padding / reserved for future features. */
1185 uint32_t fSvmPadding0 : 19;
1186 /** SVM: Maximum supported ASID. */
1187 uint32_t uSvmMaxAsid;
1188
1189 /** VMX: Maximum physical address width. */
1190 uint8_t cVmxMaxPhysAddrWidth;
1191 /** VMX: Padding / reserved for future. */
1192 uint8_t abVmxPadding[3];
1193 /** VMX: Padding / reserved for future. */
1194 uint32_t fVmxPadding0;
1195
1196 /** @name VMX basic controls.
1197 * @{ */
1198 /** VMX: Supports INS/OUTS VM-exit instruction info. */
1199 uint32_t fVmxInsOutInfo : 1;
1200 /** @} */
1201
1202 /** @name VMX Pin-based controls.
1203 * @{ */
1204 /** VMX: Supports external interrupt VM-exit. */
1205 uint32_t fVmxExtIntExit : 1;
1206 /** VMX: Supports NMI VM-exit. */
1207 uint32_t fVmxNmiExit : 1;
1208 /** VMX: Supports Virtual NMIs. */
1209 uint32_t fVmxVirtNmi : 1;
1210 /** VMX: Supports preemption timer. */
1211 uint32_t fVmxPreemptTimer : 1;
1212 /** VMX: Supports posted interrupts. */
1213 uint32_t fVmxPostedInt : 1;
1214 /** @} */
1215
1216 /** @name VMX Processor-based controls.
1217 * @{ */
1218 /** VMX: Supports Interrupt-window exiting. */
1219 uint32_t fVmxIntWindowExit : 1;
1220 /** VMX: Supports TSC offsetting. */
1221 uint32_t fVmxTscOffsetting : 1;
1222 /** VMX: Supports HLT exiting. */
1223 uint32_t fVmxHltExit : 1;
1224 /** VMX: Supports INVLPG exiting. */
1225 uint32_t fVmxInvlpgExit : 1;
1226 /** VMX: Supports MWAIT exiting. */
1227 uint32_t fVmxMwaitExit : 1;
1228 /** VMX: Supports RDPMC exiting. */
1229 uint32_t fVmxRdpmcExit : 1;
1230 /** VMX: Supports RDTSC exiting. */
1231 uint32_t fVmxRdtscExit : 1;
1232 /** VMX: Supports CR3-load exiting. */
1233 uint32_t fVmxCr3LoadExit : 1;
1234 /** VMX: Supports CR3-store exiting. */
1235 uint32_t fVmxCr3StoreExit : 1;
1236 /** VMX: Supports CR8-load exiting. */
1237 uint32_t fVmxCr8LoadExit : 1;
1238 /** VMX: Supports CR8-store exiting. */
1239 uint32_t fVmxCr8StoreExit : 1;
1240 /** VMX: Supports TPR shadow. */
1241 uint32_t fVmxUseTprShadow : 1;
1242 /** VMX: Supports NMI-window exiting. */
1243 uint32_t fVmxNmiWindowExit : 1;
1244 /** VMX: Supports Mov-DRx exiting. */
1245 uint32_t fVmxMovDRxExit : 1;
1246 /** VMX: Supports Unconditional I/O exiting. */
1247 uint32_t fVmxUncondIoExit : 1;
1248 /** VMX: Supportgs I/O bitmaps. */
1249 uint32_t fVmxUseIoBitmaps : 1;
1250 /** VMX: Supports Monitor Trap Flag. */
1251 uint32_t fVmxMonitorTrapFlag : 1;
1252 /** VMX: Supports MSR bitmap. */
1253 uint32_t fVmxUseMsrBitmaps : 1;
1254 /** VMX: Supports MONITOR exiting. */
1255 uint32_t fVmxMonitorExit : 1;
1256 /** VMX: Supports PAUSE exiting. */
1257 uint32_t fVmxPauseExit : 1;
1258 /** VMX: Supports secondary processor-based VM-execution controls. */
1259 uint32_t fVmxSecondaryExecCtls : 1;
1260 /** @} */
1261
1262 /** @name VMX Secondary processor-based controls.
1263 * @{ */
1264 /** VMX: Supports virtualize-APIC access. */
1265 uint32_t fVmxVirtApicAccess : 1;
1266 /** VMX: Supports EPT (Extended Page Tables). */
1267 uint32_t fVmxEpt : 1;
1268 /** VMX: Supports descriptor-table exiting. */
1269 uint32_t fVmxDescTableExit : 1;
1270 /** VMX: Supports RDTSCP. */
1271 uint32_t fVmxRdtscp : 1;
1272 /** VMX: Supports virtualize-x2APIC mode. */
1273 uint32_t fVmxVirtX2ApicMode : 1;
1274 /** VMX: Supports VPID. */
1275 uint32_t fVmxVpid : 1;
1276 /** VMX: Supports WBIND exiting. */
1277 uint32_t fVmxWbinvdExit : 1;
1278 /** VMX: Supports Unrestricted guest. */
1279 uint32_t fVmxUnrestrictedGuest : 1;
1280 /** VMX: Supports APIC-register virtualization. */
1281 uint32_t fVmxApicRegVirt : 1;
1282 /** VMX: Supports virtual-interrupt delivery. */
1283 uint32_t fVmxVirtIntDelivery : 1;
1284 /** VMX: Supports Pause-loop exiting. */
1285 uint32_t fVmxPauseLoopExit : 1;
1286 /** VMX: Supports RDRAND exiting. */
1287 uint32_t fVmxRdrandExit : 1;
1288 /** VMX: Supports INVPCID. */
1289 uint32_t fVmxInvpcid : 1;
1290 /** VMX: Supports VM functions. */
1291 uint32_t fVmxVmFunc : 1;
1292 /** VMX: Supports VMCS shadowing. */
1293 uint32_t fVmxVmcsShadowing : 1;
1294 /** VMX: Supports RDSEED exiting. */
1295 uint32_t fVmxRdseedExit : 1;
1296 /** VMX: Supports PML. */
1297 uint32_t fVmxPml : 1;
1298 /** VMX: Supports EPT-violations \#VE. */
1299 uint32_t fVmxEptXcptVe : 1;
1300 /** VMX: Supports XSAVES/XRSTORS. */
1301 uint32_t fVmxXsavesXrstors : 1;
1302 /** VMX: Supports TSC scaling. */
1303 uint32_t fVmxUseTscScaling : 1;
1304 /** @} */
1305
1306 /** @name VMX VM-entry controls.
1307 * @{ */
1308 /** VMX: Supports load-debug controls on VM-entry. */
1309 uint32_t fVmxEntryLoadDebugCtls : 1;
1310 /** VMX: Supports IA32e mode guest. */
1311 uint32_t fVmxIa32eModeGuest : 1;
1312 /** VMX: Supports load guest EFER MSR on VM-entry. */
1313 uint32_t fVmxEntryLoadEferMsr : 1;
1314 /** VMX: Supports load guest PAT MSR on VM-entry. */
1315 uint32_t fVmxEntryLoadPatMsr : 1;
1316 /** @} */
1317
1318 /** @name VMX VM-exit controls.
1319 * @{ */
1320 /** VMX: Supports save debug controls on VM-exit. */
1321 uint32_t fVmxExitSaveDebugCtls : 1;
1322 /** VMX: Supports host-address space size. */
1323 uint32_t fVmxHostAddrSpaceSize : 1;
1324 /** VMX: Supports acknowledge external interrupt on VM-exit. */
1325 uint32_t fVmxExitAckExtInt : 1;
1326 /** VMX: Supports save guest PAT MSR on VM-exit. */
1327 uint32_t fVmxExitSavePatMsr : 1;
1328 /** VMX: Supports load hsot PAT MSR on VM-exit. */
1329 uint32_t fVmxExitLoadPatMsr : 1;
1330 /** VMX: Supports save guest EFER MSR on VM-exit. */
1331 uint32_t fVmxExitSaveEferMsr : 1;
1332 /** VMX: Supports load host EFER MSR on VM-exit. */
1333 uint32_t fVmxExitLoadEferMsr : 1;
1334 /** VMX: Supports save VMX preemption timer on VM-exit. */
1335 uint32_t fVmxSavePreemptTimer : 1;
1336 /** @} */
1337
1338 /** @name VMX Miscellaneous data.
1339 * @{ */
1340 /** VMX: Supports storing EFER.LMA into IA32e-mode guest field on VM-exit. */
1341 uint32_t fVmxExitSaveEferLma : 1;
1342 /** VMX: Whether Intel PT (Processor Trace) is supported in VMX mode or not. */
1343 uint32_t fVmxIntelPt : 1;
1344 /** VMX: Supports VMWRITE to any valid VMCS field incl. read-only fields, otherwise
1345 * VMWRITE cannot modify read-only VM-exit information fields. */
1346 uint32_t fVmxVmwriteAll : 1;
1347 /** VMX: Supports injection of software interrupts, ICEBP on VM-entry for zero
1348 * length instructions. */
1349 uint32_t fVmxEntryInjectSoftInt : 1;
1350 /** @} */
1351
1352 /** VMX: Padding / reserved for future features. */
1353 uint32_t fVmxPadding1 : 1;
1354 uint32_t fVmxPadding2;
1355} CPUMFEATURES;
1356#ifndef VBOX_FOR_DTRACE_LIB
1357AssertCompileSize(CPUMFEATURES, 48);
1358#endif
1359/** Pointer to a CPU feature structure. */
1360typedef CPUMFEATURES *PCPUMFEATURES;
1361/** Pointer to a const CPU feature structure. */
1362typedef CPUMFEATURES const *PCCPUMFEATURES;
1363
1364
1365#ifndef VBOX_FOR_DTRACE_LIB
1366
1367/** @name Guest Register Getters.
1368 * @{ */
1369VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR);
1370VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit);
1371VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden);
1372VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu);
1373VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
1374VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu);
1375VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu);
1376VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu);
1377VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu);
1378VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu);
1379VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue);
1380VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu);
1381VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu);
1382VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu);
1383VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu);
1384VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu);
1385VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu);
1386VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu);
1387VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu);
1388VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu);
1389VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu);
1390VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu);
1391VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu);
1392VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu);
1393VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu);
1394VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu);
1395VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu);
1396VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu);
1397VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu);
1398VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu);
1399VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu);
1400VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu);
1401VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu);
1402VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu);
1403VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu);
1404VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu);
1405VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
1406VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t iLeaf, uint32_t iSubLeaf,
1407 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
1408VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu);
1409VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PCVMCPU pVCpu);
1410VMM_INT_DECL(uint64_t) CPUMGetGuestIa32SmmMonitorCtl(PCVMCPU pVCpu);
1411VMMDECL(VBOXSTRICTRC) CPUMQueryGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *puValue);
1412VMMDECL(VBOXSTRICTRC) CPUMSetGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t uValue);
1413VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM);
1414VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM);
1415VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM);
1416VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM);
1417/** @} */
1418
1419/** @name Guest Register Setters.
1420 * @{ */
1421VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1422VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1423VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
1424VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
1425VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0);
1426VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
1427VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
1428VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
1429VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0);
1430VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1);
1431VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2);
1432VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3);
1433VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
1434VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7);
1435VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value);
1436VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue);
1437VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
1438VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
1439VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
1440VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
1441VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
1442VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
1443VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
1444VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
1445VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
1446VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
1447VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
1448VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
1449VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
1450VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
1451VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
1452VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
1453VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
1454VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1455VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1456VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1457VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
1458VMMDECL(void) CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1459VMM_INT_DECL(void) CPUMSetGuestTscAux(PVMCPUCC pVCpu, uint64_t uValue);
1460VMM_INT_DECL(uint64_t) CPUMGetGuestTscAux(PVMCPUCC pVCpu);
1461VMM_INT_DECL(void) CPUMSetGuestSpecCtrl(PVMCPUCC pVCpu, uint64_t uValue);
1462VMM_INT_DECL(uint64_t) CPUMGetGuestSpecCtrl(PVMCPUCC pVCpu);
1463VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM);
1464/** @} */
1465
1466
1467/** @name Misc Guest Predicate Functions.
1468 * @{ */
1469VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
1470VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu);
1471VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu);
1472VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu);
1473VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu);
1474VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu);
1475VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu);
1476VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu);
1477VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu);
1478VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu);
1479VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu);
1480/** @} */
1481
1482/** @name Nested Hardware-Virtualization Helpers.
1483 * @{ */
1484VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu);
1485VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu);
1486VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
1487VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
1488
1489/* SVM helpers. */
1490VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1491VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1492VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx);
1493VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx);
1494VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
1495VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1496 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
1497 PSVMIOIOEXITINFO pIoExitInfo);
1498VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
1499
1500/* VMX helpers. */
1501VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField);
1502VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess);
1503VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3);
1504VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc);
1505VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr);
1506VMM_INT_DECL(bool) CPUMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
1507 uint8_t cbAccess);
1508/** @} */
1509
1510/** @name Externalized State Helpers.
1511 * @{ */
1512/** @def CPUM_ASSERT_NOT_EXTRN
1513 * Macro for asserting that @a a_fNotExtrn are present.
1514 *
1515 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1516 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
1517 *
1518 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1519 */
1520#define CPUM_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
1521 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fNotExtrn)), \
1522 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
1523
1524/** @def CPUM_IMPORT_EXTRN_RET
1525 * Macro for making sure the state specified by @a fExtrnImport is present,
1526 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1527 *
1528 * Will return if CPUMImportGuestStateOnDemand() fails.
1529 *
1530 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1531 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1532 * @thread EMT(a_pVCpu)
1533 *
1534 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1535 */
1536#define CPUM_IMPORT_EXTRN_RET(a_pVCpu, a_fExtrnImport) \
1537 do { \
1538 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1539 { /* already present, consider this likely */ } \
1540 else \
1541 { \
1542 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1543 AssertRCReturn(rcCpumImport, rcCpumImport); \
1544 } \
1545 } while (0)
1546
1547/** @def CPUM_IMPORT_EXTRN_RCSTRICT
1548 * Macro for making sure the state specified by @a fExtrnImport is present,
1549 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1550 *
1551 * Will update a_rcStrict if CPUMImportGuestStateOnDemand() fails.
1552 *
1553 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1554 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1555 * @param a_rcStrict Strict status code variable to update on failure.
1556 * @thread EMT(a_pVCpu)
1557 *
1558 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1559 */
1560#define CPUM_IMPORT_EXTRN_RCSTRICT(a_pVCpu, a_fExtrnImport, a_rcStrict) \
1561 do { \
1562 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1563 { /* already present, consider this likely */ } \
1564 else \
1565 { \
1566 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1567 AssertStmt(RT_SUCCESS(rcCpumImport) || RT_FAILURE_NP(a_rcStrict), a_rcStrict = rcCpumImport); \
1568 } \
1569 } while (0)
1570
1571VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport);
1572/** @} */
1573
1574#if !defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS) && defined(RT_ARCH_AMD64)
1575
1576/**
1577 * Gets valid CR0 bits for the guest.
1578 *
1579 * @returns Valid CR0 bits.
1580 */
1581DECLINLINE(uint64_t) CPUMGetGuestCR0ValidMask(void)
1582{
1583 return ( X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
1584 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
1585 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG);
1586}
1587
1588/**
1589 * Tests if the guest is running in real mode or not.
1590 *
1591 * @returns true if in real mode, otherwise false.
1592 * @param pCtx Current CPU context.
1593 */
1594DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCCPUMCTX pCtx)
1595{
1596 return !(pCtx->cr0 & X86_CR0_PE);
1597}
1598
1599/**
1600 * Tests if the guest is running in real or virtual 8086 mode.
1601 *
1602 * @returns @c true if it is, @c false if not.
1603 * @param pCtx Current CPU context.
1604 */
1605DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCCPUMCTX pCtx)
1606{
1607 return !(pCtx->cr0 & X86_CR0_PE)
1608 || pCtx->eflags.Bits.u1VM; /* Cannot be set in long mode. Intel spec 2.3.1 "System Flags and Fields in IA-32e Mode". */
1609}
1610
1611/**
1612 * Tests if the guest is running in virtual 8086 mode.
1613 *
1614 * @returns @c true if it is, @c false if not.
1615 * @param pCtx Current CPU context.
1616 */
1617DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCCPUMCTX pCtx)
1618{
1619 return (pCtx->eflags.Bits.u1VM == 1);
1620}
1621
1622/**
1623 * Tests if the guest is running in paged protected or not.
1624 *
1625 * @returns true if in paged protected mode, otherwise false.
1626 * @param pCtx Current CPU context.
1627 */
1628DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
1629{
1630 return (pCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1631}
1632
1633/**
1634 * Tests if the guest is running in long mode or not.
1635 *
1636 * @returns true if in long mode, otherwise false.
1637 * @param pCtx Current CPU context.
1638 */
1639DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCCPUMCTX pCtx)
1640{
1641 return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1642}
1643
1644VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx);
1645
1646/**
1647 * Tests if the guest is running in 64 bits mode or not.
1648 *
1649 * @returns true if in 64 bits protected mode, otherwise false.
1650 * @param pCtx Current CPU context.
1651 */
1652DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
1653{
1654 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
1655 return false;
1656 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
1657 return CPUMIsGuestIn64BitCodeSlow(pCtx);
1658 return pCtx->cs.Attr.n.u1Long;
1659}
1660
1661/**
1662 * Tests if the guest has paging enabled or not.
1663 *
1664 * @returns true if paging is enabled, otherwise false.
1665 * @param pCtx Current CPU context.
1666 */
1667DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCCPUMCTX pCtx)
1668{
1669 return !!(pCtx->cr0 & X86_CR0_PG);
1670}
1671
1672/**
1673 * Tests if the guest is running in PAE mode or not.
1674 *
1675 * @returns true if in PAE mode, otherwise false.
1676 * @param pCtx Current CPU context.
1677 */
1678DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCCPUMCTX pCtx)
1679{
1680 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1681 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1682 return ( (pCtx->cr4 & X86_CR4_PAE)
1683 && CPUMIsGuestPagingEnabledEx(pCtx)
1684 && !(pCtx->msrEFER & MSR_K6_EFER_LMA));
1685}
1686
1687/**
1688 * Tests if the guest has AMD SVM enabled or not.
1689 *
1690 * @returns true if SMV is enabled, otherwise false.
1691 * @param pCtx Current CPU context.
1692 */
1693DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCCPUMCTX pCtx)
1694{
1695 return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
1696}
1697
1698/**
1699 * Tests if the guest has Intel VT-x enabled or not.
1700 *
1701 * @returns true if VMX is enabled, otherwise false.
1702 * @param pCtx Current CPU context.
1703 */
1704DECLINLINE(bool) CPUMIsGuestVmxEnabled(PCCPUMCTX pCtx)
1705{
1706 return RT_BOOL(pCtx->cr4 & X86_CR4_VMXE);
1707}
1708
1709/**
1710 * Returns the guest's global-interrupt (GIF) flag.
1711 *
1712 * @returns true when global-interrupts are enabled, otherwise false.
1713 * @param pCtx Current CPU context.
1714 */
1715DECLINLINE(bool) CPUMGetGuestGif(PCCPUMCTX pCtx)
1716{
1717 return pCtx->hwvirt.fGif;
1718}
1719
1720/**
1721 * Sets the guest's global-interrupt flag (GIF).
1722 *
1723 * @param pCtx Current CPU context.
1724 * @param fGif The value to set.
1725 */
1726DECLINLINE(void) CPUMSetGuestGif(PCPUMCTX pCtx, bool fGif)
1727{
1728 pCtx->hwvirt.fGif = fGif;
1729}
1730
1731/**
1732 * Checks if we are executing inside an SVM nested hardware-virtualized guest.
1733 *
1734 * @returns @c true if in SVM nested-guest mode, @c false otherwise.
1735 * @param pCtx Current CPU context.
1736 */
1737DECLINLINE(bool) CPUMIsGuestInSvmNestedHwVirtMode(PCCPUMCTX pCtx)
1738{
1739 /*
1740 * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
1741 * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
1742 */
1743#ifndef IN_RC
1744 if ( pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM
1745 || !(pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
1746 return false;
1747 return true;
1748#else
1749 NOREF(pCtx);
1750 return false;
1751#endif
1752}
1753
1754/**
1755 * Checks if the guest is in VMX non-root operation.
1756 *
1757 * @returns @c true if in VMX non-root operation, @c false otherwise.
1758 * @param pCtx Current CPU context.
1759 */
1760DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx)
1761{
1762#ifndef IN_RC
1763 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1764 return false;
1765 Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
1766 return pCtx->hwvirt.vmx.fInVmxNonRootMode;
1767#else
1768 NOREF(pCtx);
1769 return false;
1770#endif
1771}
1772
1773/**
1774 * Checks if we are executing inside an SVM or VMX nested hardware-virtualized
1775 * guest.
1776 *
1777 * @returns @c true if in nested-guest mode, @c false otherwise.
1778 * @param pCtx Current CPU context.
1779 */
1780DECLINLINE(bool) CPUMIsGuestInNestedHwvirtMode(PCCPUMCTX pCtx)
1781{
1782 return CPUMIsGuestInVmxNonRootMode(pCtx) || CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
1783}
1784
1785/**
1786 * Checks if the guest is in VMX root operation.
1787 *
1788 * @returns @c true if in VMX root operation, @c false otherwise.
1789 * @param pCtx Current CPU context.
1790 */
1791DECLINLINE(bool) CPUMIsGuestInVmxRootMode(PCCPUMCTX pCtx)
1792{
1793#ifndef IN_RC
1794 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1795 return false;
1796 return pCtx->hwvirt.vmx.fInVmxRootMode;
1797#else
1798 NOREF(pCtx);
1799 return false;
1800#endif
1801}
1802
1803# ifndef IN_RC
1804
1805/**
1806 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
1807 * active.
1808 *
1809 * @returns @c true if in intercept is set, @c false otherwise.
1810 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1811 * @param pCtx Pointer to the context.
1812 * @param fIntercept The SVM control/instruction intercept, see
1813 * SVM_CTRL_INTERCEPT_*.
1814 */
1815DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fIntercept)
1816{
1817 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1818 return false;
1819 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1820 Assert(pVmcb);
1821 uint64_t u64Intercepts;
1822 if (!HMGetGuestSvmCtrlIntercepts(pVCpu, &u64Intercepts))
1823 u64Intercepts = pVmcb->ctrl.u64InterceptCtrl;
1824 return RT_BOOL(u64Intercepts & fIntercept);
1825}
1826
1827/**
1828 * Checks if the nested-guest VMCB has the specified CR read intercept active.
1829 *
1830 * @returns @c true if in intercept is set, @c false otherwise.
1831 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1832 * @param pCtx Pointer to the context.
1833 * @param uCr The CR register number (0 to 15).
1834 */
1835DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1836{
1837 Assert(uCr < 16);
1838 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1839 return false;
1840 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1841 Assert(pVmcb);
1842 uint16_t u16Intercepts;
1843 if (!HMGetGuestSvmReadCRxIntercepts(pVCpu, &u16Intercepts))
1844 u16Intercepts = pVmcb->ctrl.u16InterceptRdCRx;
1845 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
1846}
1847
1848/**
1849 * Checks if the nested-guest VMCB has the specified CR write intercept active.
1850 *
1851 * @returns @c true if in intercept is set, @c false otherwise.
1852 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1853 * @param pCtx Pointer to the context.
1854 * @param uCr The CR register number (0 to 15).
1855 */
1856DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1857{
1858 Assert(uCr < 16);
1859 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1860 return false;
1861 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1862 Assert(pVmcb);
1863 uint16_t u16Intercepts;
1864 if (!HMGetGuestSvmWriteCRxIntercepts(pVCpu, &u16Intercepts))
1865 u16Intercepts = pVmcb->ctrl.u16InterceptWrCRx;
1866 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
1867}
1868
1869/**
1870 * Checks if the nested-guest VMCB has the specified DR read intercept active.
1871 *
1872 * @returns @c true if in intercept is set, @c false otherwise.
1873 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1874 * @param pCtx Pointer to the context.
1875 * @param uDr The DR register number (0 to 15).
1876 */
1877DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1878{
1879 Assert(uDr < 16);
1880 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1881 return false;
1882 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1883 Assert(pVmcb);
1884 uint16_t u16Intercepts;
1885 if (!HMGetGuestSvmReadDRxIntercepts(pVCpu, &u16Intercepts))
1886 u16Intercepts = pVmcb->ctrl.u16InterceptRdDRx;
1887 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
1888}
1889
1890/**
1891 * Checks if the nested-guest VMCB has the specified DR write intercept active.
1892 *
1893 * @returns @c true if in intercept is set, @c false otherwise.
1894 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1895 * @param pCtx Pointer to the context.
1896 * @param uDr The DR register number (0 to 15).
1897 */
1898DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1899{
1900 Assert(uDr < 16);
1901 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1902 return false;
1903 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1904 Assert(pVmcb);
1905 uint16_t u16Intercepts;
1906 if (!HMGetGuestSvmWriteDRxIntercepts(pVCpu, &u16Intercepts))
1907 u16Intercepts = pVmcb->ctrl.u16InterceptWrDRx;
1908 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
1909}
1910
1911/**
1912 * Checks if the nested-guest VMCB has the specified exception intercept active.
1913 *
1914 * @returns @c true if in intercept is active, @c false otherwise.
1915 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1916 * @param pCtx Pointer to the context.
1917 * @param uVector The exception / interrupt vector.
1918 */
1919DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
1920{
1921 Assert(uVector <= X86_XCPT_LAST);
1922 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1923 return false;
1924 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1925 Assert(pVmcb);
1926 uint32_t u32Intercepts;
1927 if (!HMGetGuestSvmXcptIntercepts(pVCpu, &u32Intercepts))
1928 u32Intercepts = pVmcb->ctrl.u32InterceptXcpt;
1929 return RT_BOOL(u32Intercepts & RT_BIT(uVector));
1930}
1931
1932/**
1933 * Checks if the nested-guest VMCB has virtual-interrupt masking enabled.
1934 *
1935 * @returns @c true if virtual-interrupts are masked, @c false otherwise.
1936 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1937 * @param pCtx Pointer to the context.
1938 *
1939 * @remarks Should only be called when SVM feature is exposed to the guest.
1940 */
1941DECLINLINE(bool) CPUMIsGuestSvmVirtIntrMasking(PCVMCPU pVCpu, PCCPUMCTX pCtx)
1942{
1943 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1944 return false;
1945 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1946 Assert(pVmcb);
1947 bool fVIntrMasking;
1948 if (!HMGetGuestSvmVirtIntrMasking(pVCpu, &fVIntrMasking))
1949 fVIntrMasking = pVmcb->ctrl.IntCtrl.n.u1VIntrMasking;
1950 return fVIntrMasking;
1951}
1952
1953/**
1954 * Checks if the nested-guest VMCB has nested-paging enabled.
1955 *
1956 * @returns @c true if nested-paging is enabled, @c false otherwise.
1957 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1958 * @param pCtx Pointer to the context.
1959 *
1960 * @remarks Should only be called when SVM feature is exposed to the guest.
1961 */
1962DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
1963{
1964 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1965 return false;
1966 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1967 Assert(pVmcb);
1968 bool fNestedPaging;
1969 if (!HMGetGuestSvmNestedPaging(pVCpu, &fNestedPaging))
1970 fNestedPaging = pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging;
1971 return fNestedPaging;
1972}
1973
1974/**
1975 * Gets the nested-guest VMCB pause-filter count.
1976 *
1977 * @returns The pause-filter count.
1978 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1979 * @param pCtx Pointer to the context.
1980 *
1981 * @remarks Should only be called when SVM feature is exposed to the guest.
1982 */
1983DECLINLINE(uint16_t) CPUMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu, PCCPUMCTX pCtx)
1984{
1985 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1986 return false;
1987 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1988 Assert(pVmcb);
1989 uint16_t u16PauseFilterCount;
1990 if (!HMGetGuestSvmPauseFilterCount(pVCpu, &u16PauseFilterCount))
1991 u16PauseFilterCount = pVmcb->ctrl.u16PauseFilterCount;
1992 return u16PauseFilterCount;
1993}
1994
1995/**
1996 * Updates the NextRIP (NRIP) field in the nested-guest VMCB.
1997 *
1998 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1999 * @param pCtx Pointer to the context.
2000 * @param cbInstr The length of the current instruction in bytes.
2001 *
2002 * @remarks Should only be called when SVM feature is exposed to the guest.
2003 */
2004DECLINLINE(void) CPUMGuestSvmUpdateNRip(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbInstr)
2005{
2006 RT_NOREF(pVCpu);
2007 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
2008 PSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2009 Assert(pVmcb);
2010 pVmcb->ctrl.u64NextRIP = pCtx->rip + cbInstr;
2011}
2012
2013/**
2014 * Checks whether one of the given Pin-based VM-execution controls are set when
2015 * executing a nested-guest.
2016 *
2017 * @returns @c true if set, @c false otherwise.
2018 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2019 * @param pCtx Pointer to the context.
2020 * @param uPinCtls The Pin-based VM-execution controls to check.
2021 *
2022 * @remarks This does not check if all given controls are set if more than one
2023 * control is passed in @a uPinCtl.
2024 */
2025DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uPinCtls)
2026{
2027 RT_NOREF(pVCpu);
2028 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2029 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2030 Assert(pVmcs);
2031 return RT_BOOL(pVmcs->u32PinCtls & uPinCtls);
2032}
2033
2034/**
2035 * Checks whether one of the given Processor-based VM-execution controls are set
2036 * when executing a nested-guest.
2037 *
2038 * @returns @c true if set, @c false otherwise.
2039 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2040 * @param pCtx Pointer to the context.
2041 * @param uProcCtls The Processor-based VM-execution controls to check.
2042 *
2043 * @remarks This does not check if all given controls are set if more than one
2044 * control is passed in @a uProcCtls.
2045 */
2046DECLINLINE(bool) CPUMIsGuestVmxProcCtlsSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uProcCtls)
2047{
2048 RT_NOREF(pVCpu);
2049 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2050 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2051 Assert(pVmcs);
2052 return RT_BOOL(pVmcs->u32ProcCtls & uProcCtls);
2053}
2054
2055/**
2056 * Checks whether one of the given Secondary Processor-based VM-execution controls
2057 * are set when executing a nested-guest.
2058 *
2059 * @returns @c true if set, @c false otherwise.
2060 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2061 * @param pCtx Pointer to the context.
2062 * @param uProcCtls2 The Secondary Processor-based VM-execution controls to
2063 * check.
2064 *
2065 * @remarks This does not check if all given controls are set if more than one
2066 * control is passed in @a uProcCtls2.
2067 */
2068DECLINLINE(bool) CPUMIsGuestVmxProcCtls2Set(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uProcCtls2)
2069{
2070 RT_NOREF(pVCpu);
2071 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2072 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2073 Assert(pVmcs);
2074 return RT_BOOL(pVmcs->u32ProcCtls2 & uProcCtls2);
2075}
2076
2077/**
2078 * Checks whether one of the given VM-exit controls are set when executing a
2079 * nested-guest.
2080 *
2081 * @returns @c true if set, @c false otherwise.
2082 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2083 * @param pCtx Pointer to the context.
2084 * @param uExitCtls The VM-exit controls to check.
2085 *
2086 * @remarks This does not check if all given controls are set if more than one
2087 * control is passed in @a uExitCtls.
2088 */
2089DECLINLINE(bool) CPUMIsGuestVmxExitCtlsSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uExitCtls)
2090{
2091 RT_NOREF(pVCpu);
2092 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2093 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2094 Assert(pVmcs);
2095 return RT_BOOL(pVmcs->u32ExitCtls & uExitCtls);
2096}
2097
2098/**
2099 * Checks whether one of the given VM-entry controls are set when executing a
2100 * nested-guest.
2101 *
2102 * @returns @c true if set, @c false otherwise.
2103 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2104 * @param pCtx Pointer to the context.
2105 * @param uEntryCtls The VM-entry controls to check.
2106 *
2107 * @remarks This does not check if all given controls are set if more than one
2108 * control is passed in @a uEntryCtls.
2109 */
2110DECLINLINE(bool) CPUMIsGuestVmxEntryCtlsSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uEntryCtls)
2111{
2112 RT_NOREF(pVCpu);
2113 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2114 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2115 Assert(pVmcs);
2116 return RT_BOOL(pVmcs->u32EntryCtls & uEntryCtls);
2117}
2118
2119/**
2120 * Checks whether the given exception causes a VM-exit.
2121 *
2122 * The exception type include hardware exceptions, software exceptions (#BP, #OF)
2123 * and privileged software exceptions (#DB generated by INT1/ICEBP).
2124 *
2125 * Software interrupts do -not- cause VM-exits and hence must not be used with this
2126 * function.
2127 *
2128 * @returns @c true if the exception causes a VM-exit, @c false otherwise.
2129 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2130 * @param pCtx Pointer to the context.
2131 * @param uVector The exception vector.
2132 * @param uErrCode The error code associated with the exception. Pass 0 if not
2133 * applicable.
2134 */
2135DECLINLINE(bool) CPUMIsGuestVmxXcptInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector, uint32_t uErrCode)
2136{
2137 Assert(uVector <= X86_XCPT_LAST);
2138
2139 RT_NOREF(pVCpu);
2140 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2141 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2142 Assert(pVmcs);
2143
2144 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
2145 if (uVector == X86_XCPT_NMI)
2146 return RT_BOOL(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
2147
2148 /* Page-faults are subject to masking using its error code. */
2149 uint32_t fXcptBitmap = pVmcs->u32XcptBitmap;
2150 if (uVector == X86_XCPT_PF)
2151 {
2152 uint32_t const fXcptPFMask = pVmcs->u32XcptPFMask;
2153 uint32_t const fXcptPFMatch = pVmcs->u32XcptPFMatch;
2154 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
2155 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
2156 }
2157
2158 /* Consult the exception bitmap for all other exceptions. */
2159 if (fXcptBitmap & RT_BIT(uVector))
2160 return true;
2161 return false;
2162}
2163
2164/**
2165 * Implements VMSucceed for VMX instruction success.
2166 *
2167 * @param pVCpu The cross context virtual CPU structure.
2168 */
2169DECLINLINE(void) CPUMSetGuestVmxVmSucceed(PCPUMCTX pCtx)
2170{
2171 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2172}
2173
2174/**
2175 * Implements VMFailInvalid for VMX instruction failure.
2176 *
2177 * @param pVCpu The cross context virtual CPU structure.
2178 */
2179DECLINLINE(void) CPUMSetGuestVmxVmFailInvalid(PCPUMCTX pCtx)
2180{
2181 pCtx->eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2182 pCtx->eflags.u32 |= X86_EFL_CF;
2183}
2184
2185/**
2186 * Implements VMFailValid for VMX instruction failure.
2187 *
2188 * @param pVCpu The cross context virtual CPU structure.
2189 * @param enmInsErr The VM instruction error.
2190 */
2191DECLINLINE(void) CPUMSetGuestVmxVmFailValid(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2192{
2193 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2194 pCtx->eflags.u32 |= X86_EFL_ZF;
2195 pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u32RoVmInstrError = enmInsErr;
2196}
2197
2198/**
2199 * Implements VMFail for VMX instruction failure.
2200 *
2201 * @param pVCpu The cross context virtual CPU structure.
2202 * @param enmInsErr The VM instruction error.
2203 */
2204DECLINLINE(void) CPUMSetGuestVmxVmFail(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2205{
2206 if (pCtx->hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
2207 CPUMSetGuestVmxVmFailValid(pCtx, enmInsErr);
2208 else
2209 CPUMSetGuestVmxVmFailInvalid(pCtx);
2210}
2211
2212/**
2213 * Returns the guest-physical address of the APIC-access page when executing a
2214 * nested-guest.
2215 *
2216 * @returns The APIC-access page guest-physical address.
2217 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2218 * @param pCtx Pointer to the context.
2219 */
2220DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2221{
2222 RT_NOREF(pVCpu);
2223 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2224 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2225 Assert(pVmcs);
2226 return pVmcs->u64AddrApicAccess.u;
2227}
2228
2229/**
2230 * Gets the nested-guest CR0 subject to the guest/host mask and the read-shadow.
2231 *
2232 * @returns The nested-guest CR0.
2233 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2234 * @param pCtx Pointer to the context.
2235 * @param fGstHostMask The CR0 guest/host mask to use.
2236 */
2237DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr0(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fGstHostMask)
2238{
2239 /*
2240 * For each CR0 bit owned by the host, the corresponding bit from the
2241 * CR0 read shadow is loaded. For each CR0 bit that is not owned by the host,
2242 * the corresponding bit from the guest CR0 is loaded.
2243 *
2244 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2245 */
2246 RT_NOREF(pVCpu);
2247 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2248 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2249 Assert(pVmcs);
2250 uint64_t const uGstCr0 = pCtx->cr0;
2251 uint64_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2252 return (fReadShadow & fGstHostMask) | (uGstCr0 & ~fGstHostMask);
2253}
2254
2255/**
2256 * Gets the nested-guest CR4 subject to the guest/host mask and the read-shadow.
2257 *
2258 * @returns The nested-guest CR4.
2259 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2260 * @param pCtx Pointer to the context.
2261 * @param fGstHostMask The CR4 guest/host mask to use.
2262 */
2263DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr4(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fGstHostMask)
2264{
2265 /*
2266 * For each CR4 bit owned by the host, the corresponding bit from the
2267 * CR4 read shadow is loaded. For each CR4 bit that is not owned by the host,
2268 * the corresponding bit from the guest CR4 is loaded.
2269 *
2270 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2271 */
2272 RT_NOREF(pVCpu);
2273 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2274 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2275 Assert(pVmcs);
2276 uint64_t const uGstCr4 = pCtx->cr4;
2277 uint64_t const fReadShadow = pVmcs->u64Cr4ReadShadow.u;
2278 return (fReadShadow & fGstHostMask) | (uGstCr4 & ~fGstHostMask);
2279}
2280
2281/**
2282 * Checks whether the LMSW access causes a VM-exit or not.
2283 *
2284 * @returns @c true if the LMSW access causes a VM-exit, @c false otherwise.
2285 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2286 * @param pCtx Pointer to the context.
2287 * @param uNewMsw The LMSW source operand (the Machine Status Word).
2288 */
2289DECLINLINE(bool) CPUMIsGuestVmxLmswInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t uNewMsw)
2290{
2291 /*
2292 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
2293 *
2294 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
2295 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2296 */
2297 RT_NOREF(pVCpu);
2298 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2299 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2300 Assert(pVmcs);
2301
2302 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
2303 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2304
2305 /*
2306 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
2307 * CR0.PE case first, before the rest of the bits in the MSW.
2308 *
2309 * If CR0.PE is owned by the host and CR0.PE differs between the
2310 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
2311 */
2312 if ( (fGstHostMask & X86_CR0_PE)
2313 && (uNewMsw & X86_CR0_PE)
2314 && !(fReadShadow & X86_CR0_PE))
2315 return true;
2316
2317 /*
2318 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
2319 * bits differ between the MSW (source operand) and the read-shadow, we must
2320 * cause a VM-exit.
2321 */
2322 uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2323 if ((fReadShadow & fGstHostLmswMask) != (uNewMsw & fGstHostLmswMask))
2324 return true;
2325
2326 return false;
2327}
2328
2329/**
2330 * Checks whether the Mov-to-CR0/CR4 access causes a VM-exit or not.
2331 *
2332 * @returns @c true if the Mov CRX access causes a VM-exit, @c false otherwise.
2333 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2334 * @param pCtx Pointer to the context.
2335 * @param iCrReg The control register number (must be 0 or 4).
2336 * @param uNewCrX The CR0/CR4 value being written.
2337 */
2338DECLINLINE(bool) CPUMIsGuestVmxMovToCr0Cr4InterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t iCrReg, uint64_t uNewCrX)
2339{
2340 /*
2341 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
2342 * corresponding bits differ between the source operand and the read-shadow,
2343 * we must cause a VM-exit.
2344 *
2345 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2346 */
2347 RT_NOREF(pVCpu);
2348 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2349 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2350 Assert(pVmcs);
2351 Assert(iCrReg == 0 || iCrReg == 4);
2352
2353 uint64_t fGstHostMask;
2354 uint64_t fReadShadow;
2355 if (iCrReg == 0)
2356 {
2357 fGstHostMask = pVmcs->u64Cr0Mask.u;
2358 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2359 }
2360 else
2361 {
2362 fGstHostMask = pVmcs->u64Cr4Mask.u;
2363 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
2364 }
2365
2366 if ((fReadShadow & fGstHostMask) != (uNewCrX & fGstHostMask))
2367 {
2368 Assert(fGstHostMask != 0);
2369 return true;
2370 }
2371
2372 return false;
2373}
2374
2375/**
2376 * Returns whether the guest has an active, current VMCS.
2377 *
2378 * @returns @c true if the guest has an active, current VMCS, @c false otherwise.
2379 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2380 * @param pCtx Pointer to the context.
2381 */
2382DECLINLINE(bool) CPUMIsGuestVmxCurrentVmcsValid(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2383{
2384 RT_NOREF(pVCpu);
2385 RTGCPHYS const GCPhysVmcs = pCtx->hwvirt.vmx.GCPhysVmcs;
2386 return RT_BOOL(GCPhysVmcs != NIL_RTGCPHYS);
2387}
2388
2389/**
2390 * Gets the nested-guest virtual-APIC page.
2391 *
2392 * @returns The virtual-APIC page.
2393 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2394 * @param pCtx Pointer to the context.
2395 * @param pHCPhys Where to store the host-physical address of the virtual-APIC
2396 * page.
2397 */
2398DECLINLINE(void *) CPUMGetGuestVmxVirtApicPage(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTHCPHYS pHCPhysVirtApicPage)
2399{
2400 RT_NOREF(pVCpu);
2401 Assert(pHCPhysVirtApicPage);
2402 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
2403 *pHCPhysVirtApicPage = pCtx->hwvirt.vmx.HCPhysVirtApicPage;
2404 return pCtx->hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
2405}
2406
2407# endif /* !IN_RC */
2408
2409/**
2410 * Checks whether the VMX nested-guest is in a state to receive physical (APIC)
2411 * interrupts.
2412 *
2413 * @returns @c true if it's ready, @c false otherwise.
2414 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2415 * @param pCtx The guest-CPU context.
2416 */
2417DECLINLINE(bool) CPUMIsGuestVmxPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2418{
2419#ifdef IN_RC
2420 RT_NOREF2(pVCpu, pCtx);
2421 AssertReleaseFailedReturn(false);
2422#else
2423 RT_NOREF(pVCpu);
2424 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2425 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2426#endif
2427}
2428
2429/**
2430 * Checks whether the VMX nested-guest is blocking virtual-NMIs.
2431 *
2432 * @returns @c true if it's blocked, @c false otherwise.
2433 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2434 * @param pCtx The guest-CPU context.
2435 */
2436DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2437{
2438#ifdef IN_RC
2439 RT_NOREF2(pVCpu, pCtx);
2440 AssertReleaseFailedReturn(false);
2441#else
2442 /*
2443 * Return the state of virtual-NMI blocking, if we are executing a
2444 * VMX nested-guest with virtual-NMIs enabled.
2445 */
2446 RT_NOREF(pVCpu);
2447 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2448 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI));
2449 return pCtx->hwvirt.vmx.fVirtNmiBlocking;
2450#endif
2451}
2452
2453/**
2454 * Sets or clears VMX nested-guest virtual-NMI blocking.
2455 *
2456 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2457 * @param pCtx The guest-CPU context.
2458 * @param fBlocking Whether virtual-NMI blocking is in effect or not.
2459 */
2460DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCVMCPU pVCpu, PCPUMCTX pCtx, bool fBlocking)
2461{
2462#ifdef IN_RC
2463 RT_NOREF3(pVCpu, pCtx, fBlocking);
2464 AssertReleaseFailedReturnVoid();
2465#else
2466 RT_NOREF(pVCpu);
2467 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2468 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI));
2469 pCtx->hwvirt.vmx.fVirtNmiBlocking = fBlocking;
2470#endif
2471}
2472
2473/**
2474 * Checks whether the VMX nested-guest is in a state to receive virtual interrupts
2475 * (those injected with the "virtual-interrupt delivery" feature).
2476 *
2477 * @returns @c true if it's ready, @c false otherwise.
2478 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2479 * @param pCtx The guest-CPU context.
2480 */
2481DECLINLINE(bool) CPUMIsGuestVmxVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2482{
2483#ifdef IN_RC
2484 RT_NOREF2(pVCpu, pCtx);
2485 AssertReleaseFailedReturn(false);
2486#else
2487 RT_NOREF(pVCpu);
2488 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2489 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2490#endif
2491}
2492
2493#endif /* !IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS && RT_ARCH_AMD64 */
2494
2495/** @} */
2496
2497
2498/** @name Hypervisor Register Getters.
2499 * @{ */
2500VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu);
2501VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu);
2502VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu);
2503VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu);
2504VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu);
2505VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu);
2506VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
2507/** @} */
2508
2509/** @name Hypervisor Register Setters.
2510 * @{ */
2511VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3);
2512VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0);
2513VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1);
2514VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2);
2515VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3);
2516VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6);
2517VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7);
2518VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg, bool fForceHyper);
2519/** @} */
2520
2521VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
2522#ifdef VBOX_INCLUDED_vmm_cpumctx_h
2523VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu);
2524#endif
2525VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu);
2526
2527/** @name Changed flags.
2528 * These flags are used to keep track of which important register that
2529 * have been changed since last they were reset. The only one allowed
2530 * to clear them is REM!
2531 * @{
2532 */
2533#define CPUM_CHANGED_FPU_REM RT_BIT(0)
2534#define CPUM_CHANGED_CR0 RT_BIT(1)
2535#define CPUM_CHANGED_CR4 RT_BIT(2)
2536#define CPUM_CHANGED_GLOBAL_TLB_FLUSH RT_BIT(3)
2537#define CPUM_CHANGED_CR3 RT_BIT(4)
2538#define CPUM_CHANGED_GDTR RT_BIT(5)
2539#define CPUM_CHANGED_IDTR RT_BIT(6)
2540#define CPUM_CHANGED_LDTR RT_BIT(7)
2541#define CPUM_CHANGED_TR RT_BIT(8) /**@< Currently unused. */
2542#define CPUM_CHANGED_SYSENTER_MSR RT_BIT(9)
2543#define CPUM_CHANGED_HIDDEN_SEL_REGS RT_BIT(10) /**@< Currently unused. */
2544#define CPUM_CHANGED_CPUID RT_BIT(11)
2545#define CPUM_CHANGED_ALL ( CPUM_CHANGED_FPU_REM \
2546 | CPUM_CHANGED_CR0 \
2547 | CPUM_CHANGED_CR4 \
2548 | CPUM_CHANGED_GLOBAL_TLB_FLUSH \
2549 | CPUM_CHANGED_CR3 \
2550 | CPUM_CHANGED_GDTR \
2551 | CPUM_CHANGED_IDTR \
2552 | CPUM_CHANGED_LDTR \
2553 | CPUM_CHANGED_TR \
2554 | CPUM_CHANGED_SYSENTER_MSR \
2555 | CPUM_CHANGED_HIDDEN_SEL_REGS \
2556 | CPUM_CHANGED_CPUID )
2557/** @} */
2558
2559VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);
2560VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl);
2561VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels);
2562VMMDECL(bool) CPUMSupportsXSave(PVM pVM);
2563VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM);
2564VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM);
2565VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu);
2566VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu);
2567VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu);
2568VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu);
2569VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu);
2570VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu);
2571VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu);
2572VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu);
2573VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu);
2574VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu);
2575VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu);
2576VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu);
2577VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM);
2578VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM);
2579VMMDECL(uint64_t) CPUMGetGuestEferMsrValidMask(PVM pVM);
2580VMMDECL(int) CPUMIsGuestEferMsrWriteValid(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
2581 uint64_t *puValidEfer);
2582VMMDECL(void) CPUMSetGuestEferMsrNoChecks(PVMCPUCC pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
2583VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue);
2584
2585
2586/** Guest CPU interruptibility level, see CPUMGetGuestInterruptibility(). */
2587typedef enum CPUMINTERRUPTIBILITY
2588{
2589 CPUMINTERRUPTIBILITY_INVALID = 0,
2590 CPUMINTERRUPTIBILITY_UNRESTRAINED,
2591 CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED,
2592 CPUMINTERRUPTIBILITY_INT_DISABLED,
2593 CPUMINTERRUPTIBILITY_INT_INHIBITED,
2594 CPUMINTERRUPTIBILITY_NMI_INHIBIT,
2595 CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT,
2596 CPUMINTERRUPTIBILITY_END,
2597 CPUMINTERRUPTIBILITY_32BIT_HACK = 0x7fffffff
2598} CPUMINTERRUPTIBILITY;
2599
2600VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu);
2601VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu);
2602VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock);
2603
2604/** @name Typical scalable bus frequency values.
2605 * @{ */
2606/** Special internal value indicating that we don't know the frequency.
2607 * @internal */
2608#define CPUM_SBUSFREQ_UNKNOWN UINT64_C(1)
2609#define CPUM_SBUSFREQ_100MHZ UINT64_C(100000000)
2610#define CPUM_SBUSFREQ_133MHZ UINT64_C(133333333)
2611#define CPUM_SBUSFREQ_167MHZ UINT64_C(166666666)
2612#define CPUM_SBUSFREQ_200MHZ UINT64_C(200000000)
2613#define CPUM_SBUSFREQ_267MHZ UINT64_C(266666666)
2614#define CPUM_SBUSFREQ_333MHZ UINT64_C(333333333)
2615#define CPUM_SBUSFREQ_400MHZ UINT64_C(400000000)
2616/** @} */
2617
2618
2619#ifdef IN_RING3
2620/** @defgroup grp_cpum_r3 The CPUM ring-3 API
2621 * @{
2622 */
2623
2624VMMR3DECL(int) CPUMR3Init(PVM pVM);
2625VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
2626VMMR3DECL(void) CPUMR3LogCpuIdAndMsrFeatures(PVM pVM);
2627VMMR3DECL(void) CPUMR3Relocate(PVM pVM);
2628VMMR3DECL(int) CPUMR3Term(PVM pVM);
2629VMMR3DECL(void) CPUMR3Reset(PVM pVM);
2630VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
2631VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM);
2632VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
2633
2634VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf);
2635VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf);
2636VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
2637 uint8_t bModel, uint8_t bStepping);
2638VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch);
2639VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
2640VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
2641VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
2642VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
2643VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor);
2644VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void);
2645
2646VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange);
2647
2648/** @} */
2649#endif /* IN_RING3 */
2650
2651#ifdef IN_RING0
2652/** @defgroup grp_cpum_r0 The CPUM ring-0 API
2653 * @{
2654 */
2655VMMR0_INT_DECL(int) CPUMR0ModuleInit(void);
2656VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void);
2657VMMR0_INT_DECL(int) CPUMR0InitVM(PVMCC pVM);
2658DECLASM(void) CPUMR0RegisterVCpuThread(PVMCPUCC pVCpu);
2659DECLASM(void) CPUMR0TouchHostFpu(void);
2660VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVMCC pVM, PVMCPUCC pVCpu);
2661VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVMCC pVM, PVMCPUCC pVCpu);
2662VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu);
2663VMMR0_INT_DECL(int) CPUMR0SaveHostDebugState(PVMCC pVM, PVMCPUCC pVCpu);
2664VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu, bool fDr6);
2665VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPUCC pVCpu, bool fDr6);
2666
2667VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPUCC pVCpu, bool fDr6);
2668VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPUCC pVCpu, bool fDr6);
2669#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
2670VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPUCC pVCpu, uint32_t iHostCpuSet);
2671#endif
2672
2673/** @} */
2674#endif /* IN_RING0 */
2675
2676/** @defgroup grp_cpum_rz The CPUM raw-mode and ring-0 context API
2677 * @{
2678 */
2679VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPUCC pVCpu);
2680VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForRead(PVMCPUCC pVCpu);
2681VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForChange(PVMCPUCC pVCpu);
2682VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeSseForRead(PVMCPUCC pVCpu);
2683VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPUCC pVCpu);
2684/** @} */
2685
2686
2687#endif /* !VBOX_FOR_DTRACE_LIB */
2688/** @} */
2689RT_C_DECLS_END
2690
2691
2692#endif /* !VBOX_INCLUDED_vmm_cpum_h */
2693
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette