VirtualBox

source: vbox/trunk/include/VBox/vmm/cpum.h@ 87542

Last change on this file since 87542 was 87523, checked in by vboxsync, 4 years ago

VMM/CPUM/HM: No else after return.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 105.6 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager).
3 */
4
5/*
6 * Copyright (C) 2006-2020 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_cpum_h
27#define VBOX_INCLUDED_vmm_cpum_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <iprt/x86.h>
33#include <VBox/types.h>
34#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_ARM64)
35# include <VBox/vmm/cpumctx.h>
36#endif
37#include <VBox/vmm/stam.h>
38#include <VBox/vmm/vmapi.h>
39#include <VBox/vmm/hm_svm.h>
40#include <VBox/vmm/hm_vmx.h>
41
42RT_C_DECLS_BEGIN
43
44/** @defgroup grp_cpum The CPU Monitor / Manager API
45 * @ingroup grp_vmm
46 * @{
47 */
48
49/**
50 * CPUID feature to set or clear.
51 */
52typedef enum CPUMCPUIDFEATURE
53{
54 CPUMCPUIDFEATURE_INVALID = 0,
55 /** The APIC feature bit. (Std+Ext)
56 * Note! There is a per-cpu flag for masking this CPUID feature bit when the
57 * APICBASE.ENABLED bit is zero. So, this feature is only set/cleared
58 * at VM construction time like all the others. This didn't used to be
59 * that way, this is new with 5.1. */
60 CPUMCPUIDFEATURE_APIC,
61 /** The sysenter/sysexit feature bit. (Std) */
62 CPUMCPUIDFEATURE_SEP,
63 /** The SYSCALL/SYSEXIT feature bit (64 bits mode only for Intel CPUs). (Ext) */
64 CPUMCPUIDFEATURE_SYSCALL,
65 /** The PAE feature bit. (Std+Ext) */
66 CPUMCPUIDFEATURE_PAE,
67 /** The NX feature bit. (Ext) */
68 CPUMCPUIDFEATURE_NX,
69 /** The LAHF/SAHF feature bit (64 bits mode only). (Ext) */
70 CPUMCPUIDFEATURE_LAHF,
71 /** The LONG MODE feature bit. (Ext) */
72 CPUMCPUIDFEATURE_LONG_MODE,
73 /** The PAT feature bit. (Std+Ext) */
74 CPUMCPUIDFEATURE_PAT,
75 /** The x2APIC feature bit. (Std) */
76 CPUMCPUIDFEATURE_X2APIC,
77 /** The RDTSCP feature bit. (Ext) */
78 CPUMCPUIDFEATURE_RDTSCP,
79 /** The Hypervisor Present bit. (Std) */
80 CPUMCPUIDFEATURE_HVP,
81 /** The MWait Extensions bits (Std) */
82 CPUMCPUIDFEATURE_MWAIT_EXTS,
83 /** The speculation control feature bits. (StExt) */
84 CPUMCPUIDFEATURE_SPEC_CTRL,
85 /** 32bit hackishness. */
86 CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
87} CPUMCPUIDFEATURE;
88
89/**
90 * CPU Vendor.
91 */
92typedef enum CPUMCPUVENDOR
93{
94 CPUMCPUVENDOR_INVALID = 0,
95 CPUMCPUVENDOR_INTEL,
96 CPUMCPUVENDOR_AMD,
97 CPUMCPUVENDOR_VIA,
98 CPUMCPUVENDOR_CYRIX,
99 CPUMCPUVENDOR_SHANGHAI,
100 CPUMCPUVENDOR_HYGON,
101 CPUMCPUVENDOR_UNKNOWN,
102 /** 32bit hackishness. */
103 CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
104} CPUMCPUVENDOR;
105
106
107/**
108 * X86 and AMD64 CPU microarchitectures and in processor generations.
109 *
110 * @remarks The separation here is sometimes a little bit too finely grained,
111 * and the differences is more like processor generation than micro
112 * arch. This can be useful, so we'll provide functions for getting at
113 * more coarse grained info.
114 */
115typedef enum CPUMMICROARCH
116{
117 kCpumMicroarch_Invalid = 0,
118
119 kCpumMicroarch_Intel_First,
120
121 kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
122 kCpumMicroarch_Intel_80186,
123 kCpumMicroarch_Intel_80286,
124 kCpumMicroarch_Intel_80386,
125 kCpumMicroarch_Intel_80486,
126 kCpumMicroarch_Intel_P5,
127
128 kCpumMicroarch_Intel_P6_Core_Atom_First,
129 kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
130 kCpumMicroarch_Intel_P6_II,
131 kCpumMicroarch_Intel_P6_III,
132
133 kCpumMicroarch_Intel_P6_M_Banias,
134 kCpumMicroarch_Intel_P6_M_Dothan,
135 kCpumMicroarch_Intel_Core_Yonah, /**< Core, also known as Enhanced Pentium M. */
136
137 kCpumMicroarch_Intel_Core2_First,
138 kCpumMicroarch_Intel_Core2_Merom = kCpumMicroarch_Intel_Core2_First, /**< 65nm, Merom/Conroe/Kentsfield/Tigerton */
139 kCpumMicroarch_Intel_Core2_Penryn, /**< 45nm, Penryn/Wolfdale/Yorkfield/Harpertown */
140 kCpumMicroarch_Intel_Core2_End,
141
142 kCpumMicroarch_Intel_Core7_First,
143 kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
144 kCpumMicroarch_Intel_Core7_Westmere,
145 kCpumMicroarch_Intel_Core7_SandyBridge,
146 kCpumMicroarch_Intel_Core7_IvyBridge,
147 kCpumMicroarch_Intel_Core7_Haswell,
148 kCpumMicroarch_Intel_Core7_Broadwell,
149 kCpumMicroarch_Intel_Core7_Skylake,
150 kCpumMicroarch_Intel_Core7_KabyLake,
151 kCpumMicroarch_Intel_Core7_CoffeeLake,
152 kCpumMicroarch_Intel_Core7_WhiskeyLake,
153 kCpumMicroarch_Intel_Core7_CascadeLake,
154 kCpumMicroarch_Intel_Core7_CannonLake,
155 kCpumMicroarch_Intel_Core7_IceLake,
156 kCpumMicroarch_Intel_Core7_TigerLake,
157 kCpumMicroarch_Intel_Core7_End,
158
159 kCpumMicroarch_Intel_Atom_First,
160 kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
161 kCpumMicroarch_Intel_Atom_Lincroft, /**< Second generation bonnell (44nm). */
162 kCpumMicroarch_Intel_Atom_Saltwell, /**< 32nm shrink of Bonnell. */
163 kCpumMicroarch_Intel_Atom_Silvermont, /**< 22nm */
164 kCpumMicroarch_Intel_Atom_Airmount, /**< 14nm */
165 kCpumMicroarch_Intel_Atom_Goldmont, /**< 14nm */
166 kCpumMicroarch_Intel_Atom_GoldmontPlus, /**< 14nm */
167 kCpumMicroarch_Intel_Atom_Unknown,
168 kCpumMicroarch_Intel_Atom_End,
169
170
171 kCpumMicroarch_Intel_Phi_First,
172 kCpumMicroarch_Intel_Phi_KnightsFerry = kCpumMicroarch_Intel_Phi_First,
173 kCpumMicroarch_Intel_Phi_KnightsCorner,
174 kCpumMicroarch_Intel_Phi_KnightsLanding,
175 kCpumMicroarch_Intel_Phi_KnightsHill,
176 kCpumMicroarch_Intel_Phi_KnightsMill,
177 kCpumMicroarch_Intel_Phi_End,
178
179 kCpumMicroarch_Intel_P6_Core_Atom_End,
180
181 kCpumMicroarch_Intel_NB_First,
182 kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
183 kCpumMicroarch_Intel_NB_Northwood, /**< 130nm */
184 kCpumMicroarch_Intel_NB_Prescott, /**< 90nm */
185 kCpumMicroarch_Intel_NB_Prescott2M, /**< 90nm */
186 kCpumMicroarch_Intel_NB_CedarMill, /**< 65nm */
187 kCpumMicroarch_Intel_NB_Gallatin, /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
188 kCpumMicroarch_Intel_NB_Unknown,
189 kCpumMicroarch_Intel_NB_End,
190
191 kCpumMicroarch_Intel_Unknown,
192 kCpumMicroarch_Intel_End,
193
194 kCpumMicroarch_AMD_First,
195 kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
196 kCpumMicroarch_AMD_Am386,
197 kCpumMicroarch_AMD_Am486,
198 kCpumMicroarch_AMD_Am486Enh, /**< Covers Am5x86 as well. */
199 kCpumMicroarch_AMD_K5,
200 kCpumMicroarch_AMD_K6,
201
202 kCpumMicroarch_AMD_K7_First,
203 kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
204 kCpumMicroarch_AMD_K7_Spitfire,
205 kCpumMicroarch_AMD_K7_Thunderbird,
206 kCpumMicroarch_AMD_K7_Morgan,
207 kCpumMicroarch_AMD_K7_Thoroughbred,
208 kCpumMicroarch_AMD_K7_Barton,
209 kCpumMicroarch_AMD_K7_Unknown,
210 kCpumMicroarch_AMD_K7_End,
211
212 kCpumMicroarch_AMD_K8_First,
213 kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
214 kCpumMicroarch_AMD_K8_90nm, /**< 90nm shrink */
215 kCpumMicroarch_AMD_K8_90nm_DualCore, /**< 90nm with two cores. */
216 kCpumMicroarch_AMD_K8_90nm_AMDV, /**< 90nm with AMD-V (usually) and two cores (usually). */
217 kCpumMicroarch_AMD_K8_65nm, /**< 65nm shrink. */
218 kCpumMicroarch_AMD_K8_End,
219
220 kCpumMicroarch_AMD_K10,
221 kCpumMicroarch_AMD_K10_Lion,
222 kCpumMicroarch_AMD_K10_Llano,
223 kCpumMicroarch_AMD_Bobcat,
224 kCpumMicroarch_AMD_Jaguar,
225
226 kCpumMicroarch_AMD_15h_First,
227 kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
228 kCpumMicroarch_AMD_15h_Piledriver,
229 kCpumMicroarch_AMD_15h_Steamroller, /**< Yet to be released, might have different family. */
230 kCpumMicroarch_AMD_15h_Excavator, /**< Yet to be released, might have different family. */
231 kCpumMicroarch_AMD_15h_Unknown,
232 kCpumMicroarch_AMD_15h_End,
233
234 kCpumMicroarch_AMD_16h_First,
235 kCpumMicroarch_AMD_16h_End,
236
237 kCpumMicroarch_AMD_Zen_First,
238 kCpumMicroarch_AMD_Zen_Ryzen = kCpumMicroarch_AMD_Zen_First,
239 kCpumMicroarch_AMD_Zen_End,
240
241 kCpumMicroarch_AMD_Unknown,
242 kCpumMicroarch_AMD_End,
243
244 kCpumMicroarch_Hygon_First,
245 kCpumMicroarch_Hygon_Dhyana = kCpumMicroarch_Hygon_First,
246 kCpumMicroarch_Hygon_Unknown,
247 kCpumMicroarch_Hygon_End,
248
249 kCpumMicroarch_VIA_First,
250 kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
251 kCpumMicroarch_Centaur_C2,
252 kCpumMicroarch_Centaur_C3,
253 kCpumMicroarch_VIA_C3_M2,
254 kCpumMicroarch_VIA_C3_C5A, /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
255 kCpumMicroarch_VIA_C3_C5B, /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
256 kCpumMicroarch_VIA_C3_C5C, /**< 130nm Ezra - C3, Eden ESP. */
257 kCpumMicroarch_VIA_C3_C5N, /**< 130nm Ezra-T - C3. */
258 kCpumMicroarch_VIA_C3_C5XL, /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
259 kCpumMicroarch_VIA_C3_C5P, /**< 130nm Nehemiah+ - C3. */
260 kCpumMicroarch_VIA_C7_C5J, /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
261 kCpumMicroarch_VIA_Isaiah,
262 kCpumMicroarch_VIA_Unknown,
263 kCpumMicroarch_VIA_End,
264
265 kCpumMicroarch_Shanghai_First,
266 kCpumMicroarch_Shanghai_Wudaokou = kCpumMicroarch_Shanghai_First,
267 kCpumMicroarch_Shanghai_Unknown,
268 kCpumMicroarch_Shanghai_End,
269
270 kCpumMicroarch_Cyrix_First,
271 kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
272 kCpumMicroarch_Cyrix_M1,
273 kCpumMicroarch_Cyrix_MediaGX,
274 kCpumMicroarch_Cyrix_MediaGXm,
275 kCpumMicroarch_Cyrix_M2,
276 kCpumMicroarch_Cyrix_Unknown,
277 kCpumMicroarch_Cyrix_End,
278
279 kCpumMicroarch_NEC_First,
280 kCpumMicroarch_NEC_V20 = kCpumMicroarch_NEC_First,
281 kCpumMicroarch_NEC_V30,
282 kCpumMicroarch_NEC_End,
283
284 kCpumMicroarch_Unknown,
285
286 kCpumMicroarch_32BitHack = 0x7fffffff
287} CPUMMICROARCH;
288
289
290/** Predicate macro for catching netburst CPUs. */
291#define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
292 ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
293
294/** Predicate macro for catching Core7 CPUs. */
295#define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
296 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
297
298/** Predicate macro for catching Core 2 CPUs. */
299#define CPUMMICROARCH_IS_INTEL_CORE2(a_enmMicroarch) \
300 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core2_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core2_End)
301
302/** Predicate macro for catching Atom CPUs, Silvermont and upwards. */
303#define CPUMMICROARCH_IS_INTEL_SILVERMONT_PLUS(a_enmMicroarch) \
304 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Atom_Silvermont && (a_enmMicroarch) <= kCpumMicroarch_Intel_Atom_End)
305
306/** Predicate macro for catching AMD Family OFh CPUs (aka K8). */
307#define CPUMMICROARCH_IS_AMD_FAM_0FH(a_enmMicroarch) \
308 ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
309
310/** Predicate macro for catching AMD Family 10H CPUs (aka K10). */
311#define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
312
313/** Predicate macro for catching AMD Family 11H CPUs (aka Lion). */
314#define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
315
316/** Predicate macro for catching AMD Family 12H CPUs (aka Llano). */
317#define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
318
319/** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat). */
320#define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
321
322/** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
323 * decendants). */
324#define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
325 ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
326
327/** Predicate macro for catching AMD Family 16H CPUs. */
328#define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
329 ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
330
331/** Predicate macro for catching AMD Zen Family CPUs. */
332#define CPUMMICROARCH_IS_AMD_FAM_ZEN(a_enmMicroarch) \
333 ((a_enmMicroarch) >= kCpumMicroarch_AMD_Zen_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_Zen_End)
334
335
336/**
337 * CPUID leaf.
338 *
339 * @remarks This structure is used by the patch manager and is therefore
340 * more or less set in stone.
341 */
342typedef struct CPUMCPUIDLEAF
343{
344 /** The leaf number. */
345 uint32_t uLeaf;
346 /** The sub-leaf number. */
347 uint32_t uSubLeaf;
348 /** Sub-leaf mask. This is 0 when sub-leaves aren't used. */
349 uint32_t fSubLeafMask;
350
351 /** The EAX value. */
352 uint32_t uEax;
353 /** The EBX value. */
354 uint32_t uEbx;
355 /** The ECX value. */
356 uint32_t uEcx;
357 /** The EDX value. */
358 uint32_t uEdx;
359
360 /** Flags. */
361 uint32_t fFlags;
362} CPUMCPUIDLEAF;
363#ifndef VBOX_FOR_DTRACE_LIB
364AssertCompileSize(CPUMCPUIDLEAF, 32);
365#endif
366/** Pointer to a CPUID leaf. */
367typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
368/** Pointer to a const CPUID leaf. */
369typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
370
371/** @name CPUMCPUIDLEAF::fFlags
372 * @{ */
373/** Indicates working intel leaf 0xb where the lower 8 ECX bits are not modified
374 * and EDX containing the extended APIC ID. */
375#define CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES RT_BIT_32(0)
376/** The leaf contains an APIC ID that needs changing to that of the current CPU. */
377#define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID RT_BIT_32(1)
378/** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
379#define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE RT_BIT_32(2)
380/** The leaf contains an APIC feature bit which is tied to APICBASE.EN. */
381#define CPUMCPUIDLEAF_F_CONTAINS_APIC RT_BIT_32(3)
382/** Mask of the valid flags. */
383#define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0xf)
384/** @} */
385
386/**
387 * Method used to deal with unknown CPUID leaves.
388 * @remarks Used in patch code.
389 */
390typedef enum CPUMUNKNOWNCPUID
391{
392 /** Invalid zero value. */
393 CPUMUNKNOWNCPUID_INVALID = 0,
394 /** Use given default values (DefCpuId). */
395 CPUMUNKNOWNCPUID_DEFAULTS,
396 /** Return the last standard leaf.
397 * Intel Sandy Bridge has been observed doing this. */
398 CPUMUNKNOWNCPUID_LAST_STD_LEAF,
399 /** Return the last standard leaf, with ecx observed.
400 * Intel Sandy Bridge has been observed doing this. */
401 CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
402 /** The register values are passed thru unmodified. */
403 CPUMUNKNOWNCPUID_PASSTHRU,
404 /** End of valid value. */
405 CPUMUNKNOWNCPUID_END,
406 /** Ensure 32-bit type. */
407 CPUMUNKNOWNCPUID_32BIT_HACK = 0x7fffffff
408} CPUMUNKNOWNCPUID;
409/** Pointer to unknown CPUID leaf method. */
410typedef CPUMUNKNOWNCPUID *PCPUMUNKNOWNCPUID;
411
412
413/**
414 * The register set returned by a CPUID operation.
415 */
416typedef struct CPUMCPUID
417{
418 uint32_t uEax;
419 uint32_t uEbx;
420 uint32_t uEcx;
421 uint32_t uEdx;
422} CPUMCPUID;
423/** Pointer to a CPUID leaf. */
424typedef CPUMCPUID *PCPUMCPUID;
425/** Pointer to a const CPUID leaf. */
426typedef const CPUMCPUID *PCCPUMCPUID;
427
428
429/**
430 * MSR read functions.
431 */
432typedef enum CPUMMSRRDFN
433{
434 /** Invalid zero value. */
435 kCpumMsrRdFn_Invalid = 0,
436 /** Return the CPUMMSRRANGE::uValue. */
437 kCpumMsrRdFn_FixedValue,
438 /** Alias to the MSR range starting at the MSR given by
439 * CPUMMSRRANGE::uValue. Must be used in pair with
440 * kCpumMsrWrFn_MsrAlias. */
441 kCpumMsrRdFn_MsrAlias,
442 /** Write only register, GP all read attempts. */
443 kCpumMsrRdFn_WriteOnly,
444
445 kCpumMsrRdFn_Ia32P5McAddr,
446 kCpumMsrRdFn_Ia32P5McType,
447 kCpumMsrRdFn_Ia32TimestampCounter,
448 kCpumMsrRdFn_Ia32PlatformId, /**< Takes real CPU value for reference. */
449 kCpumMsrRdFn_Ia32ApicBase,
450 kCpumMsrRdFn_Ia32FeatureControl,
451 kCpumMsrRdFn_Ia32BiosSignId, /**< Range value returned. */
452 kCpumMsrRdFn_Ia32SmmMonitorCtl,
453 kCpumMsrRdFn_Ia32PmcN,
454 kCpumMsrRdFn_Ia32MonitorFilterLineSize,
455 kCpumMsrRdFn_Ia32MPerf,
456 kCpumMsrRdFn_Ia32APerf,
457 kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */
458 kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
459 kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
460 kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
461 kCpumMsrRdFn_Ia32MtrrDefType,
462 kCpumMsrRdFn_Ia32Pat,
463 kCpumMsrRdFn_Ia32SysEnterCs,
464 kCpumMsrRdFn_Ia32SysEnterEsp,
465 kCpumMsrRdFn_Ia32SysEnterEip,
466 kCpumMsrRdFn_Ia32McgCap,
467 kCpumMsrRdFn_Ia32McgStatus,
468 kCpumMsrRdFn_Ia32McgCtl,
469 kCpumMsrRdFn_Ia32DebugCtl,
470 kCpumMsrRdFn_Ia32SmrrPhysBase,
471 kCpumMsrRdFn_Ia32SmrrPhysMask,
472 kCpumMsrRdFn_Ia32PlatformDcaCap,
473 kCpumMsrRdFn_Ia32CpuDcaCap,
474 kCpumMsrRdFn_Ia32Dca0Cap,
475 kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
476 kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */
477 kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */
478 kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
479 kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */
480 kCpumMsrRdFn_Ia32FixedCtrCtrl,
481 kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */
482 kCpumMsrRdFn_Ia32PerfGlobalCtrl,
483 kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
484 kCpumMsrRdFn_Ia32PebsEnable,
485 kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */
486 kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */
487 kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */
488 kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */
489 kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */
490 kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
491 kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */
492 kCpumMsrRdFn_Ia32DsArea,
493 kCpumMsrRdFn_Ia32TscDeadline,
494 kCpumMsrRdFn_Ia32X2ApicN,
495 kCpumMsrRdFn_Ia32DebugInterface,
496 kCpumMsrRdFn_Ia32VmxBasic, /**< Takes real value as reference. */
497 kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */
498 kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */
499 kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */
500 kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */
501 kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */
502 kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */
503 kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */
504 kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */
505 kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */
506 kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */
507 kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */
508 kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */
509 kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */
510 kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */
511 kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */
512 kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */
513 kCpumMsrRdFn_Ia32VmxVmFunc, /**< Takes real value as reference. */
514 kCpumMsrRdFn_Ia32SpecCtrl,
515 kCpumMsrRdFn_Ia32ArchCapabilities,
516
517 kCpumMsrRdFn_Amd64Efer,
518 kCpumMsrRdFn_Amd64SyscallTarget,
519 kCpumMsrRdFn_Amd64LongSyscallTarget,
520 kCpumMsrRdFn_Amd64CompSyscallTarget,
521 kCpumMsrRdFn_Amd64SyscallFlagMask,
522 kCpumMsrRdFn_Amd64FsBase,
523 kCpumMsrRdFn_Amd64GsBase,
524 kCpumMsrRdFn_Amd64KernelGsBase,
525 kCpumMsrRdFn_Amd64TscAux,
526
527 kCpumMsrRdFn_IntelEblCrPowerOn,
528 kCpumMsrRdFn_IntelI7CoreThreadCount,
529 kCpumMsrRdFn_IntelP4EbcHardPowerOn,
530 kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
531 kCpumMsrRdFn_IntelP4EbcFrequencyId,
532 kCpumMsrRdFn_IntelP6FsbFrequency, /**< Takes real value as reference. */
533 kCpumMsrRdFn_IntelPlatformInfo,
534 kCpumMsrRdFn_IntelFlexRatio, /**< Takes real value as reference. */
535 kCpumMsrRdFn_IntelPkgCStConfigControl,
536 kCpumMsrRdFn_IntelPmgIoCaptureBase,
537 kCpumMsrRdFn_IntelLastBranchFromToN,
538 kCpumMsrRdFn_IntelLastBranchFromN,
539 kCpumMsrRdFn_IntelLastBranchToN,
540 kCpumMsrRdFn_IntelLastBranchTos,
541 kCpumMsrRdFn_IntelBblCrCtl,
542 kCpumMsrRdFn_IntelBblCrCtl3,
543 kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */
544 kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
545 kCpumMsrRdFn_IntelI7MiscPwrMgmt,
546 kCpumMsrRdFn_IntelP6CrN,
547 kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
548 kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
549 kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
550 kCpumMsrRdFn_IntelI7SandyAesNiCtl,
551 kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */
552 kCpumMsrRdFn_IntelI7LbrSelect,
553 kCpumMsrRdFn_IntelI7SandyErrorControl,
554 kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
555 kCpumMsrRdFn_IntelI7PowerCtl,
556 kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
557 kCpumMsrRdFn_IntelI7PebsLdLat,
558 kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */
559 kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */
560 kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
561 kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */
562 kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */
563 kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */
564 kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
565 kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */
566 kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
567 kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */
568 kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */
569 kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */
570 kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
571 kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */
572 kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */
573 kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */
574 kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
575 kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */
576 kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */
577 kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */
578 kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
579 kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */
580 kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
581 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1, /**< Takes real value as reference. */
582 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2, /**< Takes real value as reference. */
583 kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
584 kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
585 kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
586 kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
587 kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
588 kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
589 kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
590 kCpumMsrRdFn_IntelI7UncCBoxConfig,
591 kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
592 kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
593 kCpumMsrRdFn_IntelI7SmiCount,
594 kCpumMsrRdFn_IntelCore2EmttmCrTablesN, /**< Range value returned. */
595 kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
596 kCpumMsrRdFn_IntelCore1ExtConfig,
597 kCpumMsrRdFn_IntelCore1DtsCalControl,
598 kCpumMsrRdFn_IntelCore2PeciControl,
599 kCpumMsrRdFn_IntelAtSilvCoreC1Recidency,
600
601 kCpumMsrRdFn_P6LastBranchFromIp,
602 kCpumMsrRdFn_P6LastBranchToIp,
603 kCpumMsrRdFn_P6LastIntFromIp,
604 kCpumMsrRdFn_P6LastIntToIp,
605
606 kCpumMsrRdFn_AmdFam15hTscRate,
607 kCpumMsrRdFn_AmdFam15hLwpCfg,
608 kCpumMsrRdFn_AmdFam15hLwpCbAddr,
609 kCpumMsrRdFn_AmdFam10hMc4MiscN,
610 kCpumMsrRdFn_AmdK8PerfCtlN,
611 kCpumMsrRdFn_AmdK8PerfCtrN,
612 kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */
613 kCpumMsrRdFn_AmdK8HwCr,
614 kCpumMsrRdFn_AmdK8IorrBaseN,
615 kCpumMsrRdFn_AmdK8IorrMaskN,
616 kCpumMsrRdFn_AmdK8TopOfMemN,
617 kCpumMsrRdFn_AmdK8NbCfg1,
618 kCpumMsrRdFn_AmdK8McXcptRedir,
619 kCpumMsrRdFn_AmdK8CpuNameN,
620 kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */
621 kCpumMsrRdFn_AmdK8SwThermalCtrl,
622 kCpumMsrRdFn_AmdK8FidVidControl, /**< Range value returned. */
623 kCpumMsrRdFn_AmdK8FidVidStatus, /**< Range value returned. */
624 kCpumMsrRdFn_AmdK8McCtlMaskN,
625 kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
626 kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
627 kCpumMsrRdFn_AmdK8IntPendingMessage,
628 kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
629 kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
630 kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
631 kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */
632 kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */
633 kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */
634 kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */
635 kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */
636 kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */
637 kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
638 kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
639 kCpumMsrRdFn_AmdK8SmmBase,
640 kCpumMsrRdFn_AmdK8SmmAddr,
641 kCpumMsrRdFn_AmdK8SmmMask,
642 kCpumMsrRdFn_AmdK8VmCr,
643 kCpumMsrRdFn_AmdK8IgnNe,
644 kCpumMsrRdFn_AmdK8SmmCtl,
645 kCpumMsrRdFn_AmdK8VmHSavePa,
646 kCpumMsrRdFn_AmdFam10hVmLockKey,
647 kCpumMsrRdFn_AmdFam10hSmmLockKey,
648 kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
649 kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
650 kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
651 kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
652 kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
653 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
654 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
655 kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */
656 kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */
657 kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
658 kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
659 kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
660 kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
661 kCpumMsrRdFn_AmdK8PatchLevel, /**< Returns range value. */
662 kCpumMsrRdFn_AmdK7DebugStatusMaybe,
663 kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
664 kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
665 kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
666 kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
667 kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
668 kCpumMsrRdFn_AmdK7NodeId,
669 kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
670 kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
671 kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
672 kCpumMsrRdFn_AmdK7LoadStoreCfg,
673 kCpumMsrRdFn_AmdK7InstrCacheCfg,
674 kCpumMsrRdFn_AmdK7DataCacheCfg,
675 kCpumMsrRdFn_AmdK7BusUnitCfg,
676 kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
677 kCpumMsrRdFn_AmdFam15hFpuCfg,
678 kCpumMsrRdFn_AmdFam15hDecoderCfg,
679 kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
680 kCpumMsrRdFn_AmdFam15hCombUnitCfg,
681 kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
682 kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
683 kCpumMsrRdFn_AmdFam15hExecUnitCfg,
684 kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
685 kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
686 kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
687 kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
688 kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
689 kCpumMsrRdFn_AmdFam10hIbsOpRip,
690 kCpumMsrRdFn_AmdFam10hIbsOpData,
691 kCpumMsrRdFn_AmdFam10hIbsOpData2,
692 kCpumMsrRdFn_AmdFam10hIbsOpData3,
693 kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
694 kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
695 kCpumMsrRdFn_AmdFam10hIbsCtl,
696 kCpumMsrRdFn_AmdFam14hIbsBrTarget,
697
698 kCpumMsrRdFn_Gim,
699
700 /** End of valid MSR read function indexes. */
701 kCpumMsrRdFn_End
702} CPUMMSRRDFN;
703
704/**
705 * MSR write functions.
706 */
707typedef enum CPUMMSRWRFN
708{
709 /** Invalid zero value. */
710 kCpumMsrWrFn_Invalid = 0,
711 /** Writes are ignored, the fWrGpMask is observed though. */
712 kCpumMsrWrFn_IgnoreWrite,
713 /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
714 kCpumMsrWrFn_ReadOnly,
715 /** Alias to the MSR range starting at the MSR given by
716 * CPUMMSRRANGE::uValue. Must be used in pair with
717 * kCpumMsrRdFn_MsrAlias. */
718 kCpumMsrWrFn_MsrAlias,
719
720 kCpumMsrWrFn_Ia32P5McAddr,
721 kCpumMsrWrFn_Ia32P5McType,
722 kCpumMsrWrFn_Ia32TimestampCounter,
723 kCpumMsrWrFn_Ia32ApicBase,
724 kCpumMsrWrFn_Ia32FeatureControl,
725 kCpumMsrWrFn_Ia32BiosSignId,
726 kCpumMsrWrFn_Ia32BiosUpdateTrigger,
727 kCpumMsrWrFn_Ia32SmmMonitorCtl,
728 kCpumMsrWrFn_Ia32PmcN,
729 kCpumMsrWrFn_Ia32MonitorFilterLineSize,
730 kCpumMsrWrFn_Ia32MPerf,
731 kCpumMsrWrFn_Ia32APerf,
732 kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
733 kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
734 kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
735 kCpumMsrWrFn_Ia32MtrrDefType,
736 kCpumMsrWrFn_Ia32Pat,
737 kCpumMsrWrFn_Ia32SysEnterCs,
738 kCpumMsrWrFn_Ia32SysEnterEsp,
739 kCpumMsrWrFn_Ia32SysEnterEip,
740 kCpumMsrWrFn_Ia32McgStatus,
741 kCpumMsrWrFn_Ia32McgCtl,
742 kCpumMsrWrFn_Ia32DebugCtl,
743 kCpumMsrWrFn_Ia32SmrrPhysBase,
744 kCpumMsrWrFn_Ia32SmrrPhysMask,
745 kCpumMsrWrFn_Ia32PlatformDcaCap,
746 kCpumMsrWrFn_Ia32Dca0Cap,
747 kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
748 kCpumMsrWrFn_Ia32PerfStatus,
749 kCpumMsrWrFn_Ia32PerfCtl,
750 kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
751 kCpumMsrWrFn_Ia32PerfCapabilities,
752 kCpumMsrWrFn_Ia32FixedCtrCtrl,
753 kCpumMsrWrFn_Ia32PerfGlobalStatus,
754 kCpumMsrWrFn_Ia32PerfGlobalCtrl,
755 kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
756 kCpumMsrWrFn_Ia32PebsEnable,
757 kCpumMsrWrFn_Ia32ClockModulation,
758 kCpumMsrWrFn_Ia32ThermInterrupt,
759 kCpumMsrWrFn_Ia32ThermStatus,
760 kCpumMsrWrFn_Ia32Therm2Ctl,
761 kCpumMsrWrFn_Ia32MiscEnable,
762 kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
763 kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */
764 kCpumMsrWrFn_Ia32DsArea,
765 kCpumMsrWrFn_Ia32TscDeadline,
766 kCpumMsrWrFn_Ia32X2ApicN,
767 kCpumMsrWrFn_Ia32DebugInterface,
768 kCpumMsrWrFn_Ia32SpecCtrl,
769 kCpumMsrWrFn_Ia32PredCmd,
770 kCpumMsrWrFn_Ia32FlushCmd,
771
772 kCpumMsrWrFn_Amd64Efer,
773 kCpumMsrWrFn_Amd64SyscallTarget,
774 kCpumMsrWrFn_Amd64LongSyscallTarget,
775 kCpumMsrWrFn_Amd64CompSyscallTarget,
776 kCpumMsrWrFn_Amd64SyscallFlagMask,
777 kCpumMsrWrFn_Amd64FsBase,
778 kCpumMsrWrFn_Amd64GsBase,
779 kCpumMsrWrFn_Amd64KernelGsBase,
780 kCpumMsrWrFn_Amd64TscAux,
781 kCpumMsrWrFn_IntelEblCrPowerOn,
782 kCpumMsrWrFn_IntelP4EbcHardPowerOn,
783 kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
784 kCpumMsrWrFn_IntelP4EbcFrequencyId,
785 kCpumMsrWrFn_IntelFlexRatio,
786 kCpumMsrWrFn_IntelPkgCStConfigControl,
787 kCpumMsrWrFn_IntelPmgIoCaptureBase,
788 kCpumMsrWrFn_IntelLastBranchFromToN,
789 kCpumMsrWrFn_IntelLastBranchFromN,
790 kCpumMsrWrFn_IntelLastBranchToN,
791 kCpumMsrWrFn_IntelLastBranchTos,
792 kCpumMsrWrFn_IntelBblCrCtl,
793 kCpumMsrWrFn_IntelBblCrCtl3,
794 kCpumMsrWrFn_IntelI7TemperatureTarget,
795 kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
796 kCpumMsrWrFn_IntelI7MiscPwrMgmt,
797 kCpumMsrWrFn_IntelP6CrN,
798 kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
799 kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
800 kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
801 kCpumMsrWrFn_IntelI7SandyAesNiCtl,
802 kCpumMsrWrFn_IntelI7TurboRatioLimit,
803 kCpumMsrWrFn_IntelI7LbrSelect,
804 kCpumMsrWrFn_IntelI7SandyErrorControl,
805 kCpumMsrWrFn_IntelI7PowerCtl,
806 kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
807 kCpumMsrWrFn_IntelI7PebsLdLat,
808 kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
809 kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
810 kCpumMsrWrFn_IntelI7SandyRaplPowerUnit, /**< R/O but found writable bits on a Silvermont CPU here. */
811 kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
812 kCpumMsrWrFn_IntelI7SandyPkgC2Residency, /**< R/O but found writable bits on a Silvermont CPU here. */
813 kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
814 kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
815 kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
816 kCpumMsrWrFn_IntelI7RaplPp0Policy,
817 kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
818 kCpumMsrWrFn_IntelI7RaplPp1Policy,
819 kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
820 kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
821 kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
822 kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
823 kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
824 kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
825 kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
826 kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
827 kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
828 kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
829 kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
830 kCpumMsrWrFn_IntelCore1ExtConfig,
831 kCpumMsrWrFn_IntelCore1DtsCalControl,
832 kCpumMsrWrFn_IntelCore2PeciControl,
833
834 kCpumMsrWrFn_P6LastIntFromIp,
835 kCpumMsrWrFn_P6LastIntToIp,
836
837 kCpumMsrWrFn_AmdFam15hTscRate,
838 kCpumMsrWrFn_AmdFam15hLwpCfg,
839 kCpumMsrWrFn_AmdFam15hLwpCbAddr,
840 kCpumMsrWrFn_AmdFam10hMc4MiscN,
841 kCpumMsrWrFn_AmdK8PerfCtlN,
842 kCpumMsrWrFn_AmdK8PerfCtrN,
843 kCpumMsrWrFn_AmdK8SysCfg,
844 kCpumMsrWrFn_AmdK8HwCr,
845 kCpumMsrWrFn_AmdK8IorrBaseN,
846 kCpumMsrWrFn_AmdK8IorrMaskN,
847 kCpumMsrWrFn_AmdK8TopOfMemN,
848 kCpumMsrWrFn_AmdK8NbCfg1,
849 kCpumMsrWrFn_AmdK8McXcptRedir,
850 kCpumMsrWrFn_AmdK8CpuNameN,
851 kCpumMsrWrFn_AmdK8HwThermalCtrl,
852 kCpumMsrWrFn_AmdK8SwThermalCtrl,
853 kCpumMsrWrFn_AmdK8FidVidControl,
854 kCpumMsrWrFn_AmdK8McCtlMaskN,
855 kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
856 kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
857 kCpumMsrWrFn_AmdK8IntPendingMessage,
858 kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
859 kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
860 kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
861 kCpumMsrWrFn_AmdFam10hPStateControl,
862 kCpumMsrWrFn_AmdFam10hPStateStatus,
863 kCpumMsrWrFn_AmdFam10hPStateN,
864 kCpumMsrWrFn_AmdFam10hCofVidControl,
865 kCpumMsrWrFn_AmdFam10hCofVidStatus,
866 kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
867 kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
868 kCpumMsrWrFn_AmdK8SmmBase,
869 kCpumMsrWrFn_AmdK8SmmAddr,
870 kCpumMsrWrFn_AmdK8SmmMask,
871 kCpumMsrWrFn_AmdK8VmCr,
872 kCpumMsrWrFn_AmdK8IgnNe,
873 kCpumMsrWrFn_AmdK8SmmCtl,
874 kCpumMsrWrFn_AmdK8VmHSavePa,
875 kCpumMsrWrFn_AmdFam10hVmLockKey,
876 kCpumMsrWrFn_AmdFam10hSmmLockKey,
877 kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
878 kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
879 kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
880 kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
881 kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
882 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
883 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
884 kCpumMsrWrFn_AmdK7MicrocodeCtl,
885 kCpumMsrWrFn_AmdK7ClusterIdMaybe,
886 kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
887 kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
888 kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
889 kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
890 kCpumMsrWrFn_AmdK8PatchLoader,
891 kCpumMsrWrFn_AmdK7DebugStatusMaybe,
892 kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
893 kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
894 kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
895 kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
896 kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
897 kCpumMsrWrFn_AmdK7NodeId,
898 kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
899 kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
900 kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
901 kCpumMsrWrFn_AmdK7LoadStoreCfg,
902 kCpumMsrWrFn_AmdK7InstrCacheCfg,
903 kCpumMsrWrFn_AmdK7DataCacheCfg,
904 kCpumMsrWrFn_AmdK7BusUnitCfg,
905 kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
906 kCpumMsrWrFn_AmdFam15hFpuCfg,
907 kCpumMsrWrFn_AmdFam15hDecoderCfg,
908 kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
909 kCpumMsrWrFn_AmdFam15hCombUnitCfg,
910 kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
911 kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
912 kCpumMsrWrFn_AmdFam15hExecUnitCfg,
913 kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
914 kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
915 kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
916 kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
917 kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
918 kCpumMsrWrFn_AmdFam10hIbsOpRip,
919 kCpumMsrWrFn_AmdFam10hIbsOpData,
920 kCpumMsrWrFn_AmdFam10hIbsOpData2,
921 kCpumMsrWrFn_AmdFam10hIbsOpData3,
922 kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
923 kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
924 kCpumMsrWrFn_AmdFam10hIbsCtl,
925 kCpumMsrWrFn_AmdFam14hIbsBrTarget,
926
927 kCpumMsrWrFn_Gim,
928
929 /** End of valid MSR write function indexes. */
930 kCpumMsrWrFn_End
931} CPUMMSRWRFN;
932
933/**
934 * MSR range.
935 */
936typedef struct CPUMMSRRANGE
937{
938 /** The first MSR. [0] */
939 uint32_t uFirst;
940 /** The last MSR. [4] */
941 uint32_t uLast;
942 /** The read function (CPUMMSRRDFN). [8] */
943 uint16_t enmRdFn;
944 /** The write function (CPUMMSRWRFN). [10] */
945 uint16_t enmWrFn;
946 /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
947 * UINT16_MAX if not used by the read and write functions. [12] */
948 uint16_t offCpumCpu;
949 /** Reserved for future hacks. [14] */
950 uint16_t fReserved;
951 /** The init/read value. [16]
952 * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
953 * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
954 * offset into CPUM. */
955 uint64_t uValue;
956 /** The bits to ignore when writing. [24] */
957 uint64_t fWrIgnMask;
958 /** The bits that will cause a GP(0) when writing. [32]
959 * This is always checked prior to calling the write function. Using
960 * UINT64_MAX effectively marks the MSR as read-only. */
961 uint64_t fWrGpMask;
962 /** The register name, if applicable. [40] */
963 char szName[56];
964
965#ifdef VBOX_WITH_STATISTICS
966 /** The number of reads. */
967 STAMCOUNTER cReads;
968 /** The number of writes. */
969 STAMCOUNTER cWrites;
970 /** The number of times ignored bits were written. */
971 STAMCOUNTER cIgnoredBits;
972 /** The number of GPs generated. */
973 STAMCOUNTER cGps;
974#endif
975} CPUMMSRRANGE;
976#ifndef VBOX_FOR_DTRACE_LIB
977# ifdef VBOX_WITH_STATISTICS
978AssertCompileSize(CPUMMSRRANGE, 128);
979# else
980AssertCompileSize(CPUMMSRRANGE, 96);
981# endif
982#endif
983/** Pointer to an MSR range. */
984typedef CPUMMSRRANGE *PCPUMMSRRANGE;
985/** Pointer to a const MSR range. */
986typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
987
988
989/**
990 * MSRs which are required while exploding features.
991 */
992typedef struct CPUMMSRS
993{
994 union
995 {
996 VMXMSRS vmx;
997 SVMMSRS svm;
998 } hwvirt;
999} CPUMMSRS;
1000/** Pointer to an CPUMMSRS struct. */
1001typedef CPUMMSRS *PCPUMMSRS;
1002/** Pointer to a const CPUMMSRS struct. */
1003typedef CPUMMSRS const *PCCPUMMSRS;
1004
1005
1006/**
1007 * CPU features and quirks.
1008 * This is mostly exploded CPUID info.
1009 */
1010typedef struct CPUMFEATURES
1011{
1012 /** The CPU vendor (CPUMCPUVENDOR). */
1013 uint8_t enmCpuVendor;
1014 /** The CPU family. */
1015 uint8_t uFamily;
1016 /** The CPU model. */
1017 uint8_t uModel;
1018 /** The CPU stepping. */
1019 uint8_t uStepping;
1020 /** The microarchitecture. */
1021#ifndef VBOX_FOR_DTRACE_LIB
1022 CPUMMICROARCH enmMicroarch;
1023#else
1024 uint32_t enmMicroarch;
1025#endif
1026 /** The maximum physical address width of the CPU. */
1027 uint8_t cMaxPhysAddrWidth;
1028 /** The maximum linear address width of the CPU. */
1029 uint8_t cMaxLinearAddrWidth;
1030 /** Max size of the extended state (or FPU state if no XSAVE). */
1031 uint16_t cbMaxExtendedState;
1032
1033 /** Supports MSRs. */
1034 uint32_t fMsr : 1;
1035 /** Supports the page size extension (4/2 MB pages). */
1036 uint32_t fPse : 1;
1037 /** Supports 36-bit page size extension (4 MB pages can map memory above
1038 * 4GB). */
1039 uint32_t fPse36 : 1;
1040 /** Supports physical address extension (PAE). */
1041 uint32_t fPae : 1;
1042 /** Page attribute table (PAT) support (page level cache control). */
1043 uint32_t fPat : 1;
1044 /** Supports the FXSAVE and FXRSTOR instructions. */
1045 uint32_t fFxSaveRstor : 1;
1046 /** Supports the XSAVE and XRSTOR instructions. */
1047 uint32_t fXSaveRstor : 1;
1048 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
1049 uint32_t fOpSysXSaveRstor : 1;
1050 /** Supports MMX. */
1051 uint32_t fMmx : 1;
1052 /** Supports AMD extensions to MMX instructions. */
1053 uint32_t fAmdMmxExts : 1;
1054 /** Supports SSE. */
1055 uint32_t fSse : 1;
1056 /** Supports SSE2. */
1057 uint32_t fSse2 : 1;
1058 /** Supports SSE3. */
1059 uint32_t fSse3 : 1;
1060 /** Supports SSSE3. */
1061 uint32_t fSsse3 : 1;
1062 /** Supports SSE4.1. */
1063 uint32_t fSse41 : 1;
1064 /** Supports SSE4.2. */
1065 uint32_t fSse42 : 1;
1066 /** Supports AVX. */
1067 uint32_t fAvx : 1;
1068 /** Supports AVX2. */
1069 uint32_t fAvx2 : 1;
1070 /** Supports AVX512 foundation. */
1071 uint32_t fAvx512Foundation : 1;
1072 /** Supports RDTSC. */
1073 uint32_t fTsc : 1;
1074 /** Intel SYSENTER/SYSEXIT support */
1075 uint32_t fSysEnter : 1;
1076 /** First generation APIC. */
1077 uint32_t fApic : 1;
1078 /** Second generation APIC. */
1079 uint32_t fX2Apic : 1;
1080 /** Hypervisor present. */
1081 uint32_t fHypervisorPresent : 1;
1082 /** MWAIT & MONITOR instructions supported. */
1083 uint32_t fMonitorMWait : 1;
1084 /** MWAIT Extensions present. */
1085 uint32_t fMWaitExtensions : 1;
1086 /** Supports CMPXCHG16B in 64-bit mode. */
1087 uint32_t fMovCmpXchg16b : 1;
1088 /** Supports CLFLUSH. */
1089 uint32_t fClFlush : 1;
1090 /** Supports CLFLUSHOPT. */
1091 uint32_t fClFlushOpt : 1;
1092 /** Supports IA32_PRED_CMD.IBPB. */
1093 uint32_t fIbpb : 1;
1094 /** Supports IA32_SPEC_CTRL.IBRS. */
1095 uint32_t fIbrs : 1;
1096 /** Supports IA32_SPEC_CTRL.STIBP. */
1097 uint32_t fStibp : 1;
1098 /** Supports IA32_FLUSH_CMD. */
1099 uint32_t fFlushCmd : 1;
1100 /** Supports IA32_ARCH_CAP. */
1101 uint32_t fArchCap : 1;
1102 /** Supports MD_CLEAR functionality (VERW, IA32_FLUSH_CMD). */
1103 uint32_t fMdsClear : 1;
1104 /** Supports PCID. */
1105 uint32_t fPcid : 1;
1106 /** Supports INVPCID. */
1107 uint32_t fInvpcid : 1;
1108 /** Supports read/write FSGSBASE instructions. */
1109 uint32_t fFsGsBase : 1;
1110
1111 /** Supports AMD 3DNow instructions. */
1112 uint32_t f3DNow : 1;
1113 /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
1114 uint32_t f3DNowPrefetch : 1;
1115
1116 /** AMD64: Supports long mode. */
1117 uint32_t fLongMode : 1;
1118 /** AMD64: SYSCALL/SYSRET support. */
1119 uint32_t fSysCall : 1;
1120 /** AMD64: No-execute page table bit. */
1121 uint32_t fNoExecute : 1;
1122 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
1123 uint32_t fLahfSahf : 1;
1124 /** AMD64: Supports RDTSCP. */
1125 uint32_t fRdTscP : 1;
1126 /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
1127 uint32_t fMovCr8In32Bit : 1;
1128 /** AMD64: Supports XOP (similar to VEX3/AVX). */
1129 uint32_t fXop : 1;
1130
1131 /** Indicates that FPU instruction and data pointers may leak.
1132 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
1133 * is only saved and restored if an exception is pending. */
1134 uint32_t fLeakyFxSR : 1;
1135
1136 /** AMD64: Supports AMD SVM. */
1137 uint32_t fSvm : 1;
1138
1139 /** Support for Intel VMX. */
1140 uint32_t fVmx : 1;
1141
1142 /** Indicates that speculative execution control CPUID bits and MSRs are exposed.
1143 * The details are different for Intel and AMD but both have similar
1144 * functionality. */
1145 uint32_t fSpeculationControl : 1;
1146
1147 /** MSR_IA32_ARCH_CAPABILITIES: RDCL_NO (bit 0).
1148 * @remarks Only safe use after CPUM ring-0 init! */
1149 uint32_t fArchRdclNo : 1;
1150 /** MSR_IA32_ARCH_CAPABILITIES: IBRS_ALL (bit 1).
1151 * @remarks Only safe use after CPUM ring-0 init! */
1152 uint32_t fArchIbrsAll : 1;
1153 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 2).
1154 * @remarks Only safe use after CPUM ring-0 init! */
1155 uint32_t fArchRsbOverride : 1;
1156 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 3).
1157 * @remarks Only safe use after CPUM ring-0 init! */
1158 uint32_t fArchVmmNeedNotFlushL1d : 1;
1159 /** MSR_IA32_ARCH_CAPABILITIES: MDS_NO (bit 4).
1160 * @remarks Only safe use after CPUM ring-0 init! */
1161 uint32_t fArchMdsNo : 1;
1162
1163 /** Alignment padding / reserved for future use. */
1164 uint32_t fPadding : 8;
1165
1166 /** SVM: Supports Nested-paging. */
1167 uint32_t fSvmNestedPaging : 1;
1168 /** SVM: Support LBR (Last Branch Record) virtualization. */
1169 uint32_t fSvmLbrVirt : 1;
1170 /** SVM: Supports SVM lock. */
1171 uint32_t fSvmSvmLock : 1;
1172 /** SVM: Supports Next RIP save. */
1173 uint32_t fSvmNextRipSave : 1;
1174 /** SVM: Supports TSC rate MSR. */
1175 uint32_t fSvmTscRateMsr : 1;
1176 /** SVM: Supports VMCB clean bits. */
1177 uint32_t fSvmVmcbClean : 1;
1178 /** SVM: Supports Flush-by-ASID. */
1179 uint32_t fSvmFlusbByAsid : 1;
1180 /** SVM: Supports decode assist. */
1181 uint32_t fSvmDecodeAssists : 1;
1182 /** SVM: Supports Pause filter. */
1183 uint32_t fSvmPauseFilter : 1;
1184 /** SVM: Supports Pause filter threshold. */
1185 uint32_t fSvmPauseFilterThreshold : 1;
1186 /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
1187 uint32_t fSvmAvic : 1;
1188 /** SVM: Supports Virtualized VMSAVE/VMLOAD. */
1189 uint32_t fSvmVirtVmsaveVmload : 1;
1190 /** SVM: Supports VGIF (Virtual Global Interrupt Flag). */
1191 uint32_t fSvmVGif : 1;
1192 /** SVM: Supports GMET (Guest Mode Execute Trap Extension). */
1193 uint32_t fSvmGmet : 1;
1194 /** SVM: Padding / reserved for future features. */
1195 uint32_t fSvmPadding0 : 18;
1196 /** SVM: Maximum supported ASID. */
1197 uint32_t uSvmMaxAsid;
1198
1199 /** VMX: Maximum physical address width. */
1200 uint8_t cVmxMaxPhysAddrWidth;
1201 /** VMX: Padding / reserved for future. */
1202 uint8_t abVmxPadding[3];
1203 /** VMX: Padding / reserved for future. */
1204 uint32_t fVmxPadding0;
1205
1206 /** @name VMX basic controls.
1207 * @{ */
1208 /** VMX: Supports INS/OUTS VM-exit instruction info. */
1209 uint32_t fVmxInsOutInfo : 1;
1210 /** @} */
1211
1212 /** @name VMX Pin-based controls.
1213 * @{ */
1214 /** VMX: Supports external interrupt VM-exit. */
1215 uint32_t fVmxExtIntExit : 1;
1216 /** VMX: Supports NMI VM-exit. */
1217 uint32_t fVmxNmiExit : 1;
1218 /** VMX: Supports Virtual NMIs. */
1219 uint32_t fVmxVirtNmi : 1;
1220 /** VMX: Supports preemption timer. */
1221 uint32_t fVmxPreemptTimer : 1;
1222 /** VMX: Supports posted interrupts. */
1223 uint32_t fVmxPostedInt : 1;
1224 /** @} */
1225
1226 /** @name VMX Processor-based controls.
1227 * @{ */
1228 /** VMX: Supports Interrupt-window exiting. */
1229 uint32_t fVmxIntWindowExit : 1;
1230 /** VMX: Supports TSC offsetting. */
1231 uint32_t fVmxTscOffsetting : 1;
1232 /** VMX: Supports HLT exiting. */
1233 uint32_t fVmxHltExit : 1;
1234 /** VMX: Supports INVLPG exiting. */
1235 uint32_t fVmxInvlpgExit : 1;
1236 /** VMX: Supports MWAIT exiting. */
1237 uint32_t fVmxMwaitExit : 1;
1238 /** VMX: Supports RDPMC exiting. */
1239 uint32_t fVmxRdpmcExit : 1;
1240 /** VMX: Supports RDTSC exiting. */
1241 uint32_t fVmxRdtscExit : 1;
1242 /** VMX: Supports CR3-load exiting. */
1243 uint32_t fVmxCr3LoadExit : 1;
1244 /** VMX: Supports CR3-store exiting. */
1245 uint32_t fVmxCr3StoreExit : 1;
1246 /** VMX: Supports CR8-load exiting. */
1247 uint32_t fVmxCr8LoadExit : 1;
1248 /** VMX: Supports CR8-store exiting. */
1249 uint32_t fVmxCr8StoreExit : 1;
1250 /** VMX: Supports TPR shadow. */
1251 uint32_t fVmxUseTprShadow : 1;
1252 /** VMX: Supports NMI-window exiting. */
1253 uint32_t fVmxNmiWindowExit : 1;
1254 /** VMX: Supports Mov-DRx exiting. */
1255 uint32_t fVmxMovDRxExit : 1;
1256 /** VMX: Supports Unconditional I/O exiting. */
1257 uint32_t fVmxUncondIoExit : 1;
1258 /** VMX: Supportgs I/O bitmaps. */
1259 uint32_t fVmxUseIoBitmaps : 1;
1260 /** VMX: Supports Monitor Trap Flag. */
1261 uint32_t fVmxMonitorTrapFlag : 1;
1262 /** VMX: Supports MSR bitmap. */
1263 uint32_t fVmxUseMsrBitmaps : 1;
1264 /** VMX: Supports MONITOR exiting. */
1265 uint32_t fVmxMonitorExit : 1;
1266 /** VMX: Supports PAUSE exiting. */
1267 uint32_t fVmxPauseExit : 1;
1268 /** VMX: Supports secondary processor-based VM-execution controls. */
1269 uint32_t fVmxSecondaryExecCtls : 1;
1270 /** @} */
1271
1272 /** @name VMX Secondary processor-based controls.
1273 * @{ */
1274 /** VMX: Supports virtualize-APIC access. */
1275 uint32_t fVmxVirtApicAccess : 1;
1276 /** VMX: Supports EPT (Extended Page Tables). */
1277 uint32_t fVmxEpt : 1;
1278 /** VMX: Supports descriptor-table exiting. */
1279 uint32_t fVmxDescTableExit : 1;
1280 /** VMX: Supports RDTSCP. */
1281 uint32_t fVmxRdtscp : 1;
1282 /** VMX: Supports virtualize-x2APIC mode. */
1283 uint32_t fVmxVirtX2ApicMode : 1;
1284 /** VMX: Supports VPID. */
1285 uint32_t fVmxVpid : 1;
1286 /** VMX: Supports WBIND exiting. */
1287 uint32_t fVmxWbinvdExit : 1;
1288 /** VMX: Supports Unrestricted guest. */
1289 uint32_t fVmxUnrestrictedGuest : 1;
1290 /** VMX: Supports APIC-register virtualization. */
1291 uint32_t fVmxApicRegVirt : 1;
1292 /** VMX: Supports virtual-interrupt delivery. */
1293 uint32_t fVmxVirtIntDelivery : 1;
1294 /** VMX: Supports Pause-loop exiting. */
1295 uint32_t fVmxPauseLoopExit : 1;
1296 /** VMX: Supports RDRAND exiting. */
1297 uint32_t fVmxRdrandExit : 1;
1298 /** VMX: Supports INVPCID. */
1299 uint32_t fVmxInvpcid : 1;
1300 /** VMX: Supports VM functions. */
1301 uint32_t fVmxVmFunc : 1;
1302 /** VMX: Supports VMCS shadowing. */
1303 uint32_t fVmxVmcsShadowing : 1;
1304 /** VMX: Supports RDSEED exiting. */
1305 uint32_t fVmxRdseedExit : 1;
1306 /** VMX: Supports PML. */
1307 uint32_t fVmxPml : 1;
1308 /** VMX: Supports EPT-violations \#VE. */
1309 uint32_t fVmxEptXcptVe : 1;
1310 /** VMX: Supports XSAVES/XRSTORS. */
1311 uint32_t fVmxXsavesXrstors : 1;
1312 /** VMX: Supports TSC scaling. */
1313 uint32_t fVmxUseTscScaling : 1;
1314 /** @} */
1315
1316 /** @name VMX VM-entry controls.
1317 * @{ */
1318 /** VMX: Supports load-debug controls on VM-entry. */
1319 uint32_t fVmxEntryLoadDebugCtls : 1;
1320 /** VMX: Supports IA32e mode guest. */
1321 uint32_t fVmxIa32eModeGuest : 1;
1322 /** VMX: Supports load guest EFER MSR on VM-entry. */
1323 uint32_t fVmxEntryLoadEferMsr : 1;
1324 /** VMX: Supports load guest PAT MSR on VM-entry. */
1325 uint32_t fVmxEntryLoadPatMsr : 1;
1326 /** @} */
1327
1328 /** @name VMX VM-exit controls.
1329 * @{ */
1330 /** VMX: Supports save debug controls on VM-exit. */
1331 uint32_t fVmxExitSaveDebugCtls : 1;
1332 /** VMX: Supports host-address space size. */
1333 uint32_t fVmxHostAddrSpaceSize : 1;
1334 /** VMX: Supports acknowledge external interrupt on VM-exit. */
1335 uint32_t fVmxExitAckExtInt : 1;
1336 /** VMX: Supports save guest PAT MSR on VM-exit. */
1337 uint32_t fVmxExitSavePatMsr : 1;
1338 /** VMX: Supports load hsot PAT MSR on VM-exit. */
1339 uint32_t fVmxExitLoadPatMsr : 1;
1340 /** VMX: Supports save guest EFER MSR on VM-exit. */
1341 uint32_t fVmxExitSaveEferMsr : 1;
1342 /** VMX: Supports load host EFER MSR on VM-exit. */
1343 uint32_t fVmxExitLoadEferMsr : 1;
1344 /** VMX: Supports save VMX preemption timer on VM-exit. */
1345 uint32_t fVmxSavePreemptTimer : 1;
1346 /** @} */
1347
1348 /** @name VMX Miscellaneous data.
1349 * @{ */
1350 /** VMX: Supports storing EFER.LMA into IA32e-mode guest field on VM-exit. */
1351 uint32_t fVmxExitSaveEferLma : 1;
1352 /** VMX: Whether Intel PT (Processor Trace) is supported in VMX mode or not. */
1353 uint32_t fVmxIntelPt : 1;
1354 /** VMX: Supports VMWRITE to any valid VMCS field incl. read-only fields, otherwise
1355 * VMWRITE cannot modify read-only VM-exit information fields. */
1356 uint32_t fVmxVmwriteAll : 1;
1357 /** VMX: Supports injection of software interrupts, ICEBP on VM-entry for zero
1358 * length instructions. */
1359 uint32_t fVmxEntryInjectSoftInt : 1;
1360 /** @} */
1361
1362 /** VMX: Padding / reserved for future features. */
1363 uint32_t fVmxPadding1 : 1;
1364 uint32_t fVmxPadding2;
1365} CPUMFEATURES;
1366#ifndef VBOX_FOR_DTRACE_LIB
1367AssertCompileSize(CPUMFEATURES, 48);
1368#endif
1369/** Pointer to a CPU feature structure. */
1370typedef CPUMFEATURES *PCPUMFEATURES;
1371/** Pointer to a const CPU feature structure. */
1372typedef CPUMFEATURES const *PCCPUMFEATURES;
1373
1374
1375/**
1376 * CPU database entry.
1377 */
1378typedef struct CPUMDBENTRY
1379{
1380 /** The CPU name. */
1381 const char *pszName;
1382 /** The full CPU name. */
1383 const char *pszFullName;
1384 /** The CPU vendor (CPUMCPUVENDOR). */
1385 uint8_t enmVendor;
1386 /** The CPU family. */
1387 uint8_t uFamily;
1388 /** The CPU model. */
1389 uint8_t uModel;
1390 /** The CPU stepping. */
1391 uint8_t uStepping;
1392 /** The microarchitecture. */
1393 CPUMMICROARCH enmMicroarch;
1394 /** Scalable bus frequency used for reporting other frequencies. */
1395 uint64_t uScalableBusFreq;
1396 /** Flags - CPUMDB_F_XXX. */
1397 uint32_t fFlags;
1398 /** The maximum physical address with of the CPU. This should correspond to
1399 * the value in CPUID leaf 0x80000008 when present. */
1400 uint8_t cMaxPhysAddrWidth;
1401 /** The MXCSR mask. */
1402 uint32_t fMxCsrMask;
1403 /** Pointer to an array of CPUID leaves. */
1404 PCCPUMCPUIDLEAF paCpuIdLeaves;
1405 /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
1406 uint32_t cCpuIdLeaves;
1407 /** The method used to deal with unknown CPUID leaves. */
1408 CPUMUNKNOWNCPUID enmUnknownCpuId;
1409 /** The default unknown CPUID value. */
1410 CPUMCPUID DefUnknownCpuId;
1411
1412 /** MSR mask. Several microarchitectures ignore the higher bits of ECX in
1413 * the RDMSR and WRMSR instructions. */
1414 uint32_t fMsrMask;
1415
1416 /** The number of ranges in the table pointed to b paMsrRanges. */
1417 uint32_t cMsrRanges;
1418 /** MSR ranges for this CPU. */
1419 PCCPUMMSRRANGE paMsrRanges;
1420} CPUMDBENTRY;
1421/** Pointer to a const CPU database entry. */
1422typedef CPUMDBENTRY const *PCCPUMDBENTRY;
1423
1424/** @name CPUMDB_F_XXX - CPUDBENTRY::fFlags
1425 * @{ */
1426/** Should execute all in IEM.
1427 * @todo Implement this - currently done in Main... */
1428#define CPUMDB_F_EXECUTE_ALL_IN_IEM RT_BIT_32(0)
1429/** @} */
1430
1431
1432
1433#ifndef VBOX_FOR_DTRACE_LIB
1434
1435/** @name Guest Register Getters.
1436 * @{ */
1437VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR);
1438VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit);
1439VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden);
1440VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu);
1441VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
1442VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu);
1443VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu);
1444VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu);
1445VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu);
1446VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu);
1447VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue);
1448VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu);
1449VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu);
1450VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu);
1451VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu);
1452VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu);
1453VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu);
1454VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu);
1455VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu);
1456VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu);
1457VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu);
1458VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu);
1459VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu);
1460VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu);
1461VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu);
1462VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu);
1463VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu);
1464VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu);
1465VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu);
1466VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu);
1467VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu);
1468VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu);
1469VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu);
1470VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu);
1471VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu);
1472VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu);
1473VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
1474VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t iLeaf, uint32_t iSubLeaf,
1475 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
1476VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu);
1477VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PCVMCPU pVCpu);
1478VMM_INT_DECL(uint64_t) CPUMGetGuestIa32SmmMonitorCtl(PCVMCPU pVCpu);
1479VMMDECL(VBOXSTRICTRC) CPUMQueryGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *puValue);
1480VMMDECL(VBOXSTRICTRC) CPUMSetGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t uValue);
1481VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM);
1482VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM);
1483VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM);
1484VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM);
1485/** @} */
1486
1487/** @name Guest Register Setters.
1488 * @{ */
1489VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1490VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1491VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
1492VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
1493VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0);
1494VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
1495VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
1496VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
1497VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0);
1498VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1);
1499VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2);
1500VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3);
1501VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
1502VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7);
1503VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value);
1504VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue);
1505VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
1506VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
1507VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
1508VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
1509VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
1510VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
1511VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
1512VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
1513VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
1514VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
1515VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
1516VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
1517VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
1518VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
1519VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
1520VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
1521VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
1522VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1523VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1524VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1525VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
1526VMMDECL(void) CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1527VMM_INT_DECL(void) CPUMSetGuestTscAux(PVMCPUCC pVCpu, uint64_t uValue);
1528VMM_INT_DECL(uint64_t) CPUMGetGuestTscAux(PVMCPUCC pVCpu);
1529VMM_INT_DECL(void) CPUMSetGuestSpecCtrl(PVMCPUCC pVCpu, uint64_t uValue);
1530VMM_INT_DECL(uint64_t) CPUMGetGuestSpecCtrl(PVMCPUCC pVCpu);
1531VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM);
1532/** @} */
1533
1534
1535/** @name Misc Guest Predicate Functions.
1536 * @{ */
1537VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
1538VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu);
1539VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu);
1540VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu);
1541VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu);
1542VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu);
1543VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu);
1544VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu);
1545VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu);
1546VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu);
1547VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu);
1548/** @} */
1549
1550/** @name Nested Hardware-Virtualization Helpers.
1551 * @{ */
1552VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu);
1553VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu);
1554VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
1555VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
1556
1557/* SVM helpers. */
1558VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1559VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1560VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx);
1561VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx);
1562VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
1563VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1564 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
1565 PSVMIOIOEXITINFO pIoExitInfo);
1566VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
1567
1568/* VMX helpers. */
1569VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField);
1570VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess);
1571VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3);
1572VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc);
1573VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick);
1574VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu);
1575VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr);
1576/** @} */
1577
1578/** @name Externalized State Helpers.
1579 * @{ */
1580/** @def CPUM_ASSERT_NOT_EXTRN
1581 * Macro for asserting that @a a_fNotExtrn are present.
1582 *
1583 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1584 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
1585 *
1586 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1587 */
1588#define CPUM_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
1589 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fNotExtrn)), \
1590 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
1591
1592/** @def CPUM_IMPORT_EXTRN_RET
1593 * Macro for making sure the state specified by @a fExtrnImport is present,
1594 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1595 *
1596 * Will return if CPUMImportGuestStateOnDemand() fails.
1597 *
1598 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1599 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1600 * @thread EMT(a_pVCpu)
1601 *
1602 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1603 */
1604#define CPUM_IMPORT_EXTRN_RET(a_pVCpu, a_fExtrnImport) \
1605 do { \
1606 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1607 { /* already present, consider this likely */ } \
1608 else \
1609 { \
1610 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1611 AssertRCReturn(rcCpumImport, rcCpumImport); \
1612 } \
1613 } while (0)
1614
1615/** @def CPUM_IMPORT_EXTRN_RCSTRICT
1616 * Macro for making sure the state specified by @a fExtrnImport is present,
1617 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1618 *
1619 * Will update a_rcStrict if CPUMImportGuestStateOnDemand() fails.
1620 *
1621 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1622 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1623 * @param a_rcStrict Strict status code variable to update on failure.
1624 * @thread EMT(a_pVCpu)
1625 *
1626 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1627 */
1628#define CPUM_IMPORT_EXTRN_RCSTRICT(a_pVCpu, a_fExtrnImport, a_rcStrict) \
1629 do { \
1630 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1631 { /* already present, consider this likely */ } \
1632 else \
1633 { \
1634 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1635 AssertStmt(RT_SUCCESS(rcCpumImport) || RT_FAILURE_NP(a_rcStrict), a_rcStrict = rcCpumImport); \
1636 } \
1637 } while (0)
1638
1639VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport);
1640/** @} */
1641
1642#if (!defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS) && defined(RT_ARCH_AMD64)) || defined(DOXYGEN_RUNNING)
1643/** @name Inlined Guest Getters and predicates Functions.
1644 * @{ */
1645
1646/**
1647 * Gets valid CR0 bits for the guest.
1648 *
1649 * @returns Valid CR0 bits.
1650 */
1651DECLINLINE(uint64_t) CPUMGetGuestCR0ValidMask(void)
1652{
1653 return ( X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
1654 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
1655 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG);
1656}
1657
1658/**
1659 * Tests if the guest is running in real mode or not.
1660 *
1661 * @returns true if in real mode, otherwise false.
1662 * @param pCtx Current CPU context.
1663 */
1664DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCCPUMCTX pCtx)
1665{
1666 return !(pCtx->cr0 & X86_CR0_PE);
1667}
1668
1669/**
1670 * Tests if the guest is running in real or virtual 8086 mode.
1671 *
1672 * @returns @c true if it is, @c false if not.
1673 * @param pCtx Current CPU context.
1674 */
1675DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCCPUMCTX pCtx)
1676{
1677 return !(pCtx->cr0 & X86_CR0_PE)
1678 || pCtx->eflags.Bits.u1VM; /* Cannot be set in long mode. Intel spec 2.3.1 "System Flags and Fields in IA-32e Mode". */
1679}
1680
1681/**
1682 * Tests if the guest is running in virtual 8086 mode.
1683 *
1684 * @returns @c true if it is, @c false if not.
1685 * @param pCtx Current CPU context.
1686 */
1687DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCCPUMCTX pCtx)
1688{
1689 return (pCtx->eflags.Bits.u1VM == 1);
1690}
1691
1692/**
1693 * Tests if the guest is running in paged protected or not.
1694 *
1695 * @returns true if in paged protected mode, otherwise false.
1696 * @param pCtx Current CPU context.
1697 */
1698DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
1699{
1700 return (pCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1701}
1702
1703/**
1704 * Tests if the guest is running in long mode or not.
1705 *
1706 * @returns true if in long mode, otherwise false.
1707 * @param pCtx Current CPU context.
1708 */
1709DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCCPUMCTX pCtx)
1710{
1711 return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1712}
1713
1714VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx);
1715
1716/**
1717 * Tests if the guest is running in 64 bits mode or not.
1718 *
1719 * @returns true if in 64 bits protected mode, otherwise false.
1720 * @param pCtx Current CPU context.
1721 */
1722DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
1723{
1724 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
1725 return false;
1726 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
1727 return CPUMIsGuestIn64BitCodeSlow(pCtx);
1728 return pCtx->cs.Attr.n.u1Long;
1729}
1730
1731/**
1732 * Tests if the guest has paging enabled or not.
1733 *
1734 * @returns true if paging is enabled, otherwise false.
1735 * @param pCtx Current CPU context.
1736 */
1737DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCCPUMCTX pCtx)
1738{
1739 return !!(pCtx->cr0 & X86_CR0_PG);
1740}
1741
1742/**
1743 * Tests if the guest is running in PAE mode or not.
1744 *
1745 * @returns true if in PAE mode, otherwise false.
1746 * @param pCtx Current CPU context.
1747 */
1748DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCCPUMCTX pCtx)
1749{
1750 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1751 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1752 return ( (pCtx->cr4 & X86_CR4_PAE)
1753 && CPUMIsGuestPagingEnabledEx(pCtx)
1754 && !(pCtx->msrEFER & MSR_K6_EFER_LMA));
1755}
1756
1757/**
1758 * Tests if the guest has AMD SVM enabled or not.
1759 *
1760 * @returns true if SMV is enabled, otherwise false.
1761 * @param pCtx Current CPU context.
1762 */
1763DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCCPUMCTX pCtx)
1764{
1765 return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
1766}
1767
1768/**
1769 * Tests if the guest has Intel VT-x enabled or not.
1770 *
1771 * @returns true if VMX is enabled, otherwise false.
1772 * @param pCtx Current CPU context.
1773 */
1774DECLINLINE(bool) CPUMIsGuestVmxEnabled(PCCPUMCTX pCtx)
1775{
1776 return RT_BOOL(pCtx->cr4 & X86_CR4_VMXE);
1777}
1778
1779/**
1780 * Returns the guest's global-interrupt (GIF) flag.
1781 *
1782 * @returns true when global-interrupts are enabled, otherwise false.
1783 * @param pCtx Current CPU context.
1784 */
1785DECLINLINE(bool) CPUMGetGuestGif(PCCPUMCTX pCtx)
1786{
1787 return pCtx->hwvirt.fGif;
1788}
1789
1790/**
1791 * Sets the guest's global-interrupt flag (GIF).
1792 *
1793 * @param pCtx Current CPU context.
1794 * @param fGif The value to set.
1795 */
1796DECLINLINE(void) CPUMSetGuestGif(PCPUMCTX pCtx, bool fGif)
1797{
1798 pCtx->hwvirt.fGif = fGif;
1799}
1800
1801/**
1802 * Checks if we are executing inside an SVM nested hardware-virtualized guest.
1803 *
1804 * @returns @c true if in SVM nested-guest mode, @c false otherwise.
1805 * @param pCtx Current CPU context.
1806 */
1807DECLINLINE(bool) CPUMIsGuestInSvmNestedHwVirtMode(PCCPUMCTX pCtx)
1808{
1809 /*
1810 * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
1811 * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
1812 */
1813#ifndef IN_RC
1814 if ( pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM
1815 || !(pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
1816 return false;
1817 return true;
1818#else
1819 NOREF(pCtx);
1820 return false;
1821#endif
1822}
1823
1824/**
1825 * Checks if the guest is in VMX non-root operation.
1826 *
1827 * @returns @c true if in VMX non-root operation, @c false otherwise.
1828 * @param pCtx Current CPU context.
1829 */
1830DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx)
1831{
1832#ifndef IN_RC
1833 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1834 return false;
1835 Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
1836 return pCtx->hwvirt.vmx.fInVmxNonRootMode;
1837#else
1838 NOREF(pCtx);
1839 return false;
1840#endif
1841}
1842
1843/**
1844 * Checks if we are executing inside an SVM or VMX nested hardware-virtualized
1845 * guest.
1846 *
1847 * @returns @c true if in nested-guest mode, @c false otherwise.
1848 * @param pCtx Current CPU context.
1849 */
1850DECLINLINE(bool) CPUMIsGuestInNestedHwvirtMode(PCCPUMCTX pCtx)
1851{
1852 return CPUMIsGuestInVmxNonRootMode(pCtx) || CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
1853}
1854
1855/**
1856 * Checks if the guest is in VMX root operation.
1857 *
1858 * @returns @c true if in VMX root operation, @c false otherwise.
1859 * @param pCtx Current CPU context.
1860 */
1861DECLINLINE(bool) CPUMIsGuestInVmxRootMode(PCCPUMCTX pCtx)
1862{
1863#ifndef IN_RC
1864 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1865 return false;
1866 return pCtx->hwvirt.vmx.fInVmxRootMode;
1867#else
1868 NOREF(pCtx);
1869 return false;
1870#endif
1871}
1872
1873# ifndef IN_RC
1874
1875/**
1876 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
1877 * active.
1878 *
1879 * @returns @c true if in intercept is set, @c false otherwise.
1880 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1881 * @param pCtx Pointer to the context.
1882 * @param fIntercept The SVM control/instruction intercept, see
1883 * SVM_CTRL_INTERCEPT_*.
1884 */
1885DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fIntercept)
1886{
1887 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1888 return false;
1889 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1890 Assert(pVmcb);
1891 uint64_t u64Intercepts;
1892 if (!HMGetGuestSvmCtrlIntercepts(pVCpu, &u64Intercepts))
1893 u64Intercepts = pVmcb->ctrl.u64InterceptCtrl;
1894 return RT_BOOL(u64Intercepts & fIntercept);
1895}
1896
1897/**
1898 * Checks if the nested-guest VMCB has the specified CR read intercept active.
1899 *
1900 * @returns @c true if in intercept is set, @c false otherwise.
1901 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1902 * @param pCtx Pointer to the context.
1903 * @param uCr The CR register number (0 to 15).
1904 */
1905DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1906{
1907 Assert(uCr < 16);
1908 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1909 return false;
1910 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1911 Assert(pVmcb);
1912 uint16_t u16Intercepts;
1913 if (!HMGetGuestSvmReadCRxIntercepts(pVCpu, &u16Intercepts))
1914 u16Intercepts = pVmcb->ctrl.u16InterceptRdCRx;
1915 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
1916}
1917
1918/**
1919 * Checks if the nested-guest VMCB has the specified CR write intercept active.
1920 *
1921 * @returns @c true if in intercept is set, @c false otherwise.
1922 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1923 * @param pCtx Pointer to the context.
1924 * @param uCr The CR register number (0 to 15).
1925 */
1926DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1927{
1928 Assert(uCr < 16);
1929 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1930 return false;
1931 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1932 Assert(pVmcb);
1933 uint16_t u16Intercepts;
1934 if (!HMGetGuestSvmWriteCRxIntercepts(pVCpu, &u16Intercepts))
1935 u16Intercepts = pVmcb->ctrl.u16InterceptWrCRx;
1936 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
1937}
1938
1939/**
1940 * Checks if the nested-guest VMCB has the specified DR read intercept active.
1941 *
1942 * @returns @c true if in intercept is set, @c false otherwise.
1943 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1944 * @param pCtx Pointer to the context.
1945 * @param uDr The DR register number (0 to 15).
1946 */
1947DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1948{
1949 Assert(uDr < 16);
1950 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1951 return false;
1952 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1953 Assert(pVmcb);
1954 uint16_t u16Intercepts;
1955 if (!HMGetGuestSvmReadDRxIntercepts(pVCpu, &u16Intercepts))
1956 u16Intercepts = pVmcb->ctrl.u16InterceptRdDRx;
1957 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
1958}
1959
1960/**
1961 * Checks if the nested-guest VMCB has the specified DR write intercept active.
1962 *
1963 * @returns @c true if in intercept is set, @c false otherwise.
1964 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1965 * @param pCtx Pointer to the context.
1966 * @param uDr The DR register number (0 to 15).
1967 */
1968DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1969{
1970 Assert(uDr < 16);
1971 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1972 return false;
1973 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1974 Assert(pVmcb);
1975 uint16_t u16Intercepts;
1976 if (!HMGetGuestSvmWriteDRxIntercepts(pVCpu, &u16Intercepts))
1977 u16Intercepts = pVmcb->ctrl.u16InterceptWrDRx;
1978 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
1979}
1980
1981/**
1982 * Checks if the nested-guest VMCB has the specified exception intercept active.
1983 *
1984 * @returns @c true if in intercept is active, @c false otherwise.
1985 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1986 * @param pCtx Pointer to the context.
1987 * @param uVector The exception / interrupt vector.
1988 */
1989DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
1990{
1991 Assert(uVector <= X86_XCPT_LAST);
1992 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1993 return false;
1994 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1995 Assert(pVmcb);
1996 uint32_t u32Intercepts;
1997 if (!HMGetGuestSvmXcptIntercepts(pVCpu, &u32Intercepts))
1998 u32Intercepts = pVmcb->ctrl.u32InterceptXcpt;
1999 return RT_BOOL(u32Intercepts & RT_BIT(uVector));
2000}
2001
2002/**
2003 * Checks if the nested-guest VMCB has virtual-interrupt masking enabled.
2004 *
2005 * @returns @c true if virtual-interrupts are masked, @c false otherwise.
2006 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2007 * @param pCtx Pointer to the context.
2008 *
2009 * @remarks Should only be called when SVM feature is exposed to the guest.
2010 */
2011DECLINLINE(bool) CPUMIsGuestSvmVirtIntrMasking(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2012{
2013 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2014 return false;
2015 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2016 Assert(pVmcb);
2017 bool fVIntrMasking;
2018 if (!HMGetGuestSvmVirtIntrMasking(pVCpu, &fVIntrMasking))
2019 fVIntrMasking = pVmcb->ctrl.IntCtrl.n.u1VIntrMasking;
2020 return fVIntrMasking;
2021}
2022
2023/**
2024 * Checks if the nested-guest VMCB has nested-paging enabled.
2025 *
2026 * @returns @c true if nested-paging is enabled, @c false otherwise.
2027 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2028 * @param pCtx Pointer to the context.
2029 *
2030 * @remarks Should only be called when SVM feature is exposed to the guest.
2031 */
2032DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2033{
2034 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2035 return false;
2036 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2037 Assert(pVmcb);
2038 bool fNestedPaging;
2039 if (!HMGetGuestSvmNestedPaging(pVCpu, &fNestedPaging))
2040 fNestedPaging = pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging;
2041 return fNestedPaging;
2042}
2043
2044/**
2045 * Gets the nested-guest VMCB pause-filter count.
2046 *
2047 * @returns The pause-filter count.
2048 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2049 * @param pCtx Pointer to the context.
2050 *
2051 * @remarks Should only be called when SVM feature is exposed to the guest.
2052 */
2053DECLINLINE(uint16_t) CPUMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2054{
2055 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2056 return false;
2057 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2058 Assert(pVmcb);
2059 uint16_t u16PauseFilterCount;
2060 if (!HMGetGuestSvmPauseFilterCount(pVCpu, &u16PauseFilterCount))
2061 u16PauseFilterCount = pVmcb->ctrl.u16PauseFilterCount;
2062 return u16PauseFilterCount;
2063}
2064
2065/**
2066 * Updates the NextRIP (NRIP) field in the nested-guest VMCB.
2067 *
2068 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2069 * @param pCtx Pointer to the context.
2070 * @param cbInstr The length of the current instruction in bytes.
2071 *
2072 * @remarks Should only be called when SVM feature is exposed to the guest.
2073 */
2074DECLINLINE(void) CPUMGuestSvmUpdateNRip(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbInstr)
2075{
2076 RT_NOREF(pVCpu);
2077 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
2078 PSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2079 Assert(pVmcb);
2080 pVmcb->ctrl.u64NextRIP = pCtx->rip + cbInstr;
2081}
2082
2083/**
2084 * Checks whether one of the given Pin-based VM-execution controls are set when
2085 * executing a nested-guest.
2086 *
2087 * @returns @c true if set, @c false otherwise.
2088 * @param pCtx Pointer to the context.
2089 * @param uPinCtls The Pin-based VM-execution controls to check.
2090 *
2091 * @remarks This does not check if all given controls are set if more than one
2092 * control is passed in @a uPinCtl.
2093 */
2094DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls)
2095{
2096 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2097 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2098 Assert(pVmcs);
2099 return RT_BOOL(pVmcs->u32PinCtls & uPinCtls);
2100}
2101
2102/**
2103 * Checks whether one of the given Processor-based VM-execution controls are set
2104 * when executing a nested-guest.
2105 *
2106 * @returns @c true if set, @c false otherwise.
2107 * @param pCtx Pointer to the context.
2108 * @param uProcCtls The Processor-based VM-execution controls to check.
2109 *
2110 * @remarks This does not check if all given controls are set if more than one
2111 * control is passed in @a uProcCtls.
2112 */
2113DECLINLINE(bool) CPUMIsGuestVmxProcCtlsSet(PCCPUMCTX pCtx, uint32_t uProcCtls)
2114{
2115 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2116 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2117 Assert(pVmcs);
2118 return RT_BOOL(pVmcs->u32ProcCtls & uProcCtls);
2119}
2120
2121/**
2122 * Checks whether one of the given Secondary Processor-based VM-execution controls
2123 * are set when executing a nested-guest.
2124 *
2125 * @returns @c true if set, @c false otherwise.
2126 * @param pCtx Pointer to the context.
2127 * @param uProcCtls2 The Secondary Processor-based VM-execution controls to
2128 * check.
2129 *
2130 * @remarks This does not check if all given controls are set if more than one
2131 * control is passed in @a uProcCtls2.
2132 */
2133DECLINLINE(bool) CPUMIsGuestVmxProcCtls2Set(PCCPUMCTX pCtx, uint32_t uProcCtls2)
2134{
2135 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2136 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2137 Assert(pVmcs);
2138 return RT_BOOL(pVmcs->u32ProcCtls2 & uProcCtls2);
2139}
2140
2141/**
2142 * Checks whether one of the given VM-exit controls are set when executing a
2143 * nested-guest.
2144 *
2145 * @returns @c true if set, @c false otherwise.
2146 * @param pCtx Pointer to the context.
2147 * @param uExitCtls The VM-exit controls to check.
2148 *
2149 * @remarks This does not check if all given controls are set if more than one
2150 * control is passed in @a uExitCtls.
2151 */
2152DECLINLINE(bool) CPUMIsGuestVmxExitCtlsSet(PCCPUMCTX pCtx, uint32_t uExitCtls)
2153{
2154 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2155 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2156 Assert(pVmcs);
2157 return RT_BOOL(pVmcs->u32ExitCtls & uExitCtls);
2158}
2159
2160/**
2161 * Checks whether one of the given VM-entry controls are set when executing a
2162 * nested-guest.
2163 *
2164 * @returns @c true if set, @c false otherwise.
2165 * @param pCtx Pointer to the context.
2166 * @param uEntryCtls The VM-entry controls to check.
2167 *
2168 * @remarks This does not check if all given controls are set if more than one
2169 * control is passed in @a uEntryCtls.
2170 */
2171DECLINLINE(bool) CPUMIsGuestVmxEntryCtlsSet(PCCPUMCTX pCtx, uint32_t uEntryCtls)
2172{
2173 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2174 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2175 Assert(pVmcs);
2176 return RT_BOOL(pVmcs->u32EntryCtls & uEntryCtls);
2177}
2178
2179/**
2180 * Checks whether events injected in the nested-guest are subject to VM-exit checks.
2181 *
2182 * @returns @c true if set, @c false otherwise.
2183 * @param pCtx Pointer to the context.
2184 */
2185DECLINLINE(bool) CPUMIsGuestVmxInterceptEvents(PCCPUMCTX pCtx)
2186{
2187 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2188 return pCtx->hwvirt.vmx.fInterceptEvents;
2189}
2190
2191/**
2192 * Sets whether events injected in the nested-guest are subject to VM-exit checks.
2193 *
2194 * @param pCtx Pointer to the context.
2195 * @param fIntercept Whether to subject injected events to VM-exits or not.
2196 */
2197DECLINLINE(void) CPUMSetGuestVmxInterceptEvents(PCPUMCTX pCtx, bool fInterceptEvents)
2198{
2199 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2200 pCtx->hwvirt.vmx.fInterceptEvents = fInterceptEvents;
2201}
2202
2203/**
2204 * Checks whether the given exception causes a VM-exit.
2205 *
2206 * The exception type include hardware exceptions, software exceptions (#BP, #OF)
2207 * and privileged software exceptions (#DB generated by INT1/ICEBP).
2208 *
2209 * Software interrupts do -not- cause VM-exits and hence must not be used with this
2210 * function.
2211 *
2212 * @returns @c true if the exception causes a VM-exit, @c false otherwise.
2213 * @param pCtx Pointer to the context.
2214 * @param uVector The exception vector.
2215 * @param uErrCode The error code associated with the exception. Pass 0 if not
2216 * applicable.
2217 */
2218DECLINLINE(bool) CPUMIsGuestVmxXcptInterceptSet(PCCPUMCTX pCtx, uint8_t uVector, uint32_t uErrCode)
2219{
2220 Assert(uVector <= X86_XCPT_LAST);
2221
2222 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2223 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2224 Assert(pVmcs);
2225
2226 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
2227 if (uVector == X86_XCPT_NMI)
2228 return RT_BOOL(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
2229
2230 /* Page-faults are subject to masking using its error code. */
2231 uint32_t fXcptBitmap = pVmcs->u32XcptBitmap;
2232 if (uVector == X86_XCPT_PF)
2233 {
2234 uint32_t const fXcptPFMask = pVmcs->u32XcptPFMask;
2235 uint32_t const fXcptPFMatch = pVmcs->u32XcptPFMatch;
2236 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
2237 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
2238 }
2239
2240 /* Consult the exception bitmap for all other exceptions. */
2241 if (fXcptBitmap & RT_BIT(uVector))
2242 return true;
2243 return false;
2244}
2245
2246/**
2247 * Implements VMSucceed for VMX instruction success.
2248 *
2249 * @param pVCpu The cross context virtual CPU structure.
2250 */
2251DECLINLINE(void) CPUMSetGuestVmxVmSucceed(PCPUMCTX pCtx)
2252{
2253 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2254}
2255
2256/**
2257 * Implements VMFailInvalid for VMX instruction failure.
2258 *
2259 * @param pVCpu The cross context virtual CPU structure.
2260 */
2261DECLINLINE(void) CPUMSetGuestVmxVmFailInvalid(PCPUMCTX pCtx)
2262{
2263 pCtx->eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2264 pCtx->eflags.u32 |= X86_EFL_CF;
2265}
2266
2267/**
2268 * Implements VMFailValid for VMX instruction failure.
2269 *
2270 * @param pVCpu The cross context virtual CPU structure.
2271 * @param enmInsErr The VM instruction error.
2272 */
2273DECLINLINE(void) CPUMSetGuestVmxVmFailValid(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2274{
2275 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2276 pCtx->eflags.u32 |= X86_EFL_ZF;
2277 pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u32RoVmInstrError = enmInsErr;
2278}
2279
2280/**
2281 * Implements VMFail for VMX instruction failure.
2282 *
2283 * @param pVCpu The cross context virtual CPU structure.
2284 * @param enmInsErr The VM instruction error.
2285 */
2286DECLINLINE(void) CPUMSetGuestVmxVmFail(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2287{
2288 if (pCtx->hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
2289 CPUMSetGuestVmxVmFailValid(pCtx, enmInsErr);
2290 else
2291 CPUMSetGuestVmxVmFailInvalid(pCtx);
2292}
2293
2294/**
2295 * Returns the guest-physical address of the APIC-access page when executing a
2296 * nested-guest.
2297 *
2298 * @returns The APIC-access page guest-physical address.
2299 * @param pCtx Pointer to the context.
2300 */
2301DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCCPUMCTX pCtx)
2302{
2303 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2304 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2305 Assert(pVmcs);
2306 return pVmcs->u64AddrApicAccess.u;
2307}
2308
2309/**
2310 * Gets the nested-guest CR0 subject to the guest/host mask and the read-shadow.
2311 *
2312 * @returns The nested-guest CR0.
2313 * @param pCtx Pointer to the context.
2314 * @param fGstHostMask The CR0 guest/host mask to use.
2315 */
2316DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr0(PCCPUMCTX pCtx, uint64_t fGstHostMask)
2317{
2318 /*
2319 * For each CR0 bit owned by the host, the corresponding bit from the
2320 * CR0 read shadow is loaded. For each CR0 bit that is not owned by the host,
2321 * the corresponding bit from the guest CR0 is loaded.
2322 *
2323 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2324 */
2325 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2326 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2327 Assert(pVmcs);
2328 uint64_t const uGstCr0 = pCtx->cr0;
2329 uint64_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2330 return (fReadShadow & fGstHostMask) | (uGstCr0 & ~fGstHostMask);
2331}
2332
2333/**
2334 * Gets the nested-guest CR4 subject to the guest/host mask and the read-shadow.
2335 *
2336 * @returns The nested-guest CR4.
2337 * @param pCtx Pointer to the context.
2338 * @param fGstHostMask The CR4 guest/host mask to use.
2339 */
2340DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr4(PCCPUMCTX pCtx, uint64_t fGstHostMask)
2341{
2342 /*
2343 * For each CR4 bit owned by the host, the corresponding bit from the
2344 * CR4 read shadow is loaded. For each CR4 bit that is not owned by the host,
2345 * the corresponding bit from the guest CR4 is loaded.
2346 *
2347 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2348 */
2349 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2350 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2351 Assert(pVmcs);
2352 uint64_t const uGstCr4 = pCtx->cr4;
2353 uint64_t const fReadShadow = pVmcs->u64Cr4ReadShadow.u;
2354 return (fReadShadow & fGstHostMask) | (uGstCr4 & ~fGstHostMask);
2355}
2356
2357/**
2358 * Checks whether the LMSW access causes a VM-exit or not.
2359 *
2360 * @returns @c true if the LMSW access causes a VM-exit, @c false otherwise.
2361 * @param pCtx Pointer to the context.
2362 * @param uNewMsw The LMSW source operand (the Machine Status Word).
2363 */
2364DECLINLINE(bool) CPUMIsGuestVmxLmswInterceptSet(PCCPUMCTX pCtx, uint16_t uNewMsw)
2365{
2366 /*
2367 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
2368 *
2369 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
2370 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2371 */
2372 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2373 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2374 Assert(pVmcs);
2375
2376 uint32_t const fGstHostMask = (uint32_t)pVmcs->u64Cr0Mask.u;
2377 uint32_t const fReadShadow = (uint32_t)pVmcs->u64Cr0ReadShadow.u;
2378
2379 /*
2380 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
2381 * CR0.PE case first, before the rest of the bits in the MSW.
2382 *
2383 * If CR0.PE is owned by the host and CR0.PE differs between the
2384 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
2385 */
2386 if ( (fGstHostMask & X86_CR0_PE)
2387 && (uNewMsw & X86_CR0_PE)
2388 && !(fReadShadow & X86_CR0_PE))
2389 return true;
2390
2391 /*
2392 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
2393 * bits differ between the MSW (source operand) and the read-shadow, we must
2394 * cause a VM-exit.
2395 */
2396 uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2397 if ((fReadShadow & fGstHostLmswMask) != (uNewMsw & fGstHostLmswMask))
2398 return true;
2399
2400 return false;
2401}
2402
2403/**
2404 * Checks whether the Mov-to-CR0/CR4 access causes a VM-exit or not.
2405 *
2406 * @returns @c true if the Mov CRX access causes a VM-exit, @c false otherwise.
2407 * @param pCtx Pointer to the context.
2408 * @param iCrReg The control register number (must be 0 or 4).
2409 * @param uNewCrX The CR0/CR4 value being written.
2410 */
2411DECLINLINE(bool) CPUMIsGuestVmxMovToCr0Cr4InterceptSet(PCCPUMCTX pCtx, uint8_t iCrReg, uint64_t uNewCrX)
2412{
2413 /*
2414 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
2415 * corresponding bits differ between the source operand and the read-shadow,
2416 * we must cause a VM-exit.
2417 *
2418 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2419 */
2420 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2421 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2422 Assert(pVmcs);
2423 Assert(iCrReg == 0 || iCrReg == 4);
2424
2425 uint64_t fGstHostMask;
2426 uint64_t fReadShadow;
2427 if (iCrReg == 0)
2428 {
2429 fGstHostMask = pVmcs->u64Cr0Mask.u;
2430 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2431 }
2432 else
2433 {
2434 fGstHostMask = pVmcs->u64Cr4Mask.u;
2435 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
2436 }
2437
2438 if ((fReadShadow & fGstHostMask) != (uNewCrX & fGstHostMask))
2439 {
2440 Assert(fGstHostMask != 0);
2441 return true;
2442 }
2443
2444 return false;
2445}
2446
2447/**
2448 * Returns whether the guest has an active, current VMCS.
2449 *
2450 * @returns @c true if the guest has an active, current VMCS, @c false otherwise.
2451 * @param pCtx Pointer to the context.
2452 */
2453DECLINLINE(bool) CPUMIsGuestVmxCurrentVmcsValid(PCCPUMCTX pCtx)
2454{
2455 RTGCPHYS const GCPhysVmcs = pCtx->hwvirt.vmx.GCPhysVmcs;
2456 return RT_BOOL(GCPhysVmcs != NIL_RTGCPHYS);
2457}
2458
2459/**
2460 * Gets the nested-guest virtual-APIC page.
2461 *
2462 * @returns The virtual-APIC page.
2463 * @param pCtx Pointer to the context.
2464 * @param pHCPhys Where to store the host-physical address of the virtual-APIC
2465 * page.
2466 */
2467DECLINLINE(void *) CPUMGetGuestVmxVirtApicPage(PCCPUMCTX pCtx, PRTHCPHYS pHCPhysVirtApicPage)
2468{
2469 Assert(pHCPhysVirtApicPage);
2470 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
2471 *pHCPhysVirtApicPage = pCtx->hwvirt.vmx.HCPhysVirtApicPage;
2472 return pCtx->hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
2473}
2474
2475# endif /* !IN_RC */
2476
2477/**
2478 * Checks whether the VMX nested-guest is in a state to receive physical (APIC)
2479 * interrupts.
2480 *
2481 * @returns @c true if it's ready, @c false otherwise.
2482 * @param pCtx The guest-CPU context.
2483 */
2484DECLINLINE(bool) CPUMIsGuestVmxPhysIntrEnabled(PCCPUMCTX pCtx)
2485{
2486#ifdef IN_RC
2487 AssertReleaseFailedReturn(false);
2488#else
2489 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2490 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
2491 return true;
2492 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2493#endif
2494}
2495
2496/**
2497 * Checks whether the VMX nested-guest is blocking virtual-NMIs.
2498 *
2499 * @returns @c true if it's blocked, @c false otherwise.
2500 * @param pCtx The guest-CPU context.
2501 */
2502DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx)
2503{
2504#ifdef IN_RC
2505 RT_NOREF(pCtx);
2506 AssertReleaseFailedReturn(false);
2507#else
2508 /*
2509 * Return the state of virtual-NMI blocking, if we are executing a
2510 * VMX nested-guest with virtual-NMIs enabled.
2511 */
2512 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2513 Assert(CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI));
2514 return pCtx->hwvirt.vmx.fVirtNmiBlocking;
2515#endif
2516}
2517
2518/**
2519 * Sets or clears VMX nested-guest virtual-NMI blocking.
2520 *
2521 * @param pCtx The guest-CPU context.
2522 * @param fBlocking Whether virtual-NMI blocking is in effect or not.
2523 */
2524DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking)
2525{
2526#ifdef IN_RC
2527 RT_NOREF2(pCtx, fBlocking);
2528 AssertReleaseFailedReturnVoid();
2529#else
2530 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2531 Assert(CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI));
2532 pCtx->hwvirt.vmx.fVirtNmiBlocking = fBlocking;
2533#endif
2534}
2535
2536/**
2537 * Checks whether the VMX nested-guest is in a state to receive virtual interrupts
2538 * (those injected with the "virtual-interrupt delivery" feature).
2539 *
2540 * @returns @c true if it's ready, @c false otherwise.
2541 * @param pCtx The guest-CPU context.
2542 */
2543DECLINLINE(bool) CPUMIsGuestVmxVirtIntrEnabled(PCCPUMCTX pCtx)
2544{
2545#ifdef IN_RC
2546 RT_NOREF2(pCtx);
2547 AssertReleaseFailedReturn(false);
2548#else
2549 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2550 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2551#endif
2552}
2553
2554/** @} */
2555#endif /* !IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS && RT_ARCH_AMD64 */
2556
2557
2558
2559/** @name Hypervisor Register Getters.
2560 * @{ */
2561VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu);
2562VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu);
2563VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu);
2564VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu);
2565VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu);
2566VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu);
2567VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
2568/** @} */
2569
2570/** @name Hypervisor Register Setters.
2571 * @{ */
2572VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3);
2573VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0);
2574VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1);
2575VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2);
2576VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3);
2577VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6);
2578VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7);
2579VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg);
2580/** @} */
2581
2582VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
2583#ifdef VBOX_INCLUDED_vmm_cpumctx_h
2584VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu);
2585#endif
2586VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu);
2587
2588/** @name Changed flags.
2589 * These flags are used to keep track of which important register that
2590 * have been changed since last they were reset. The only one allowed
2591 * to clear them is REM!
2592 *
2593 * @todo This is obsolete, but remains as it will be refactored for coordinating
2594 * IEM and NEM/HM later. Probably.
2595 * @{
2596 */
2597#define CPUM_CHANGED_FPU_REM RT_BIT(0)
2598#define CPUM_CHANGED_CR0 RT_BIT(1)
2599#define CPUM_CHANGED_CR4 RT_BIT(2)
2600#define CPUM_CHANGED_GLOBAL_TLB_FLUSH RT_BIT(3)
2601#define CPUM_CHANGED_CR3 RT_BIT(4)
2602#define CPUM_CHANGED_GDTR RT_BIT(5)
2603#define CPUM_CHANGED_IDTR RT_BIT(6)
2604#define CPUM_CHANGED_LDTR RT_BIT(7)
2605#define CPUM_CHANGED_TR RT_BIT(8) /**@< Currently unused. */
2606#define CPUM_CHANGED_SYSENTER_MSR RT_BIT(9)
2607#define CPUM_CHANGED_HIDDEN_SEL_REGS RT_BIT(10) /**@< Currently unused. */
2608#define CPUM_CHANGED_CPUID RT_BIT(11)
2609#define CPUM_CHANGED_ALL ( CPUM_CHANGED_FPU_REM \
2610 | CPUM_CHANGED_CR0 \
2611 | CPUM_CHANGED_CR4 \
2612 | CPUM_CHANGED_GLOBAL_TLB_FLUSH \
2613 | CPUM_CHANGED_CR3 \
2614 | CPUM_CHANGED_GDTR \
2615 | CPUM_CHANGED_IDTR \
2616 | CPUM_CHANGED_LDTR \
2617 | CPUM_CHANGED_TR \
2618 | CPUM_CHANGED_SYSENTER_MSR \
2619 | CPUM_CHANGED_HIDDEN_SEL_REGS \
2620 | CPUM_CHANGED_CPUID )
2621/** @} */
2622
2623VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);
2624VMMDECL(bool) CPUMSupportsXSave(PVM pVM);
2625VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM);
2626VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM);
2627VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu);
2628VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu);
2629VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu);
2630VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu);
2631VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu);
2632VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu);
2633VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu);
2634VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu);
2635VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu);
2636VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu);
2637VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM);
2638VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM);
2639VMMDECL(uint64_t) CPUMGetGuestEferMsrValidMask(PVM pVM);
2640VMMDECL(int) CPUMIsGuestEferMsrWriteValid(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
2641 uint64_t *puValidEfer);
2642VMMDECL(void) CPUMSetGuestEferMsrNoChecks(PVMCPUCC pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
2643VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue);
2644
2645
2646/** Guest CPU interruptibility level, see CPUMGetGuestInterruptibility(). */
2647typedef enum CPUMINTERRUPTIBILITY
2648{
2649 CPUMINTERRUPTIBILITY_INVALID = 0,
2650 CPUMINTERRUPTIBILITY_UNRESTRAINED,
2651 CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED,
2652 CPUMINTERRUPTIBILITY_INT_DISABLED,
2653 CPUMINTERRUPTIBILITY_INT_INHIBITED,
2654 CPUMINTERRUPTIBILITY_NMI_INHIBIT,
2655 CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT,
2656 CPUMINTERRUPTIBILITY_END,
2657 CPUMINTERRUPTIBILITY_32BIT_HACK = 0x7fffffff
2658} CPUMINTERRUPTIBILITY;
2659
2660VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu);
2661VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu);
2662VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock);
2663
2664/** @name Typical scalable bus frequency values.
2665 * @{ */
2666/** Special internal value indicating that we don't know the frequency.
2667 * @internal */
2668#define CPUM_SBUSFREQ_UNKNOWN UINT64_C(1)
2669#define CPUM_SBUSFREQ_100MHZ UINT64_C(100000000)
2670#define CPUM_SBUSFREQ_133MHZ UINT64_C(133333333)
2671#define CPUM_SBUSFREQ_167MHZ UINT64_C(166666666)
2672#define CPUM_SBUSFREQ_200MHZ UINT64_C(200000000)
2673#define CPUM_SBUSFREQ_267MHZ UINT64_C(266666666)
2674#define CPUM_SBUSFREQ_333MHZ UINT64_C(333333333)
2675#define CPUM_SBUSFREQ_400MHZ UINT64_C(400000000)
2676/** @} */
2677
2678
2679#ifdef IN_RING3
2680/** @defgroup grp_cpum_r3 The CPUM ring-3 API
2681 * @{
2682 */
2683
2684VMMR3DECL(int) CPUMR3Init(PVM pVM);
2685VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
2686VMMR3DECL(void) CPUMR3LogCpuIdAndMsrFeatures(PVM pVM);
2687VMMR3DECL(void) CPUMR3Relocate(PVM pVM);
2688VMMR3DECL(int) CPUMR3Term(PVM pVM);
2689VMMR3DECL(void) CPUMR3Reset(PVM pVM);
2690VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
2691VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM);
2692VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
2693
2694VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf);
2695VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf);
2696VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
2697 uint8_t bModel, uint8_t bStepping);
2698VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch);
2699VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
2700VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
2701VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
2702VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
2703VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor);
2704VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void);
2705
2706VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange);
2707
2708VMMR3DECL(uint32_t) CPUMR3DbGetEntries(void);
2709/** Pointer to CPUMR3DbGetEntries. */
2710typedef DECLCALLBACKPTR(uint32_t, PFNCPUMDBGETENTRIES, (void));
2711VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByIndex(uint32_t idxCpuDb);
2712/** Pointer to CPUMR3DbGetEntryByIndex. */
2713typedef DECLCALLBACKPTR(PCCPUMDBENTRY, PFNCPUMDBGETENTRYBYINDEX, (uint32_t idxCpuDb));
2714VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByName(const char *pszName);
2715/** Pointer to CPUMR3DbGetEntryByName. */
2716typedef DECLCALLBACKPTR(PCCPUMDBENTRY, PFNCPUMDBGETENTRYBYNAME, (const char *pszName));
2717/** @} */
2718#endif /* IN_RING3 */
2719
2720#ifdef IN_RING0
2721/** @defgroup grp_cpum_r0 The CPUM ring-0 API
2722 * @{
2723 */
2724VMMR0_INT_DECL(int) CPUMR0ModuleInit(void);
2725VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void);
2726VMMR0_INT_DECL(int) CPUMR0InitVM(PVMCC pVM);
2727DECLASM(void) CPUMR0RegisterVCpuThread(PVMCPUCC pVCpu);
2728DECLASM(void) CPUMR0TouchHostFpu(void);
2729VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVMCC pVM, PVMCPUCC pVCpu);
2730VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVMCC pVM, PVMCPUCC pVCpu);
2731VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu);
2732VMMR0_INT_DECL(int) CPUMR0SaveHostDebugState(PVMCC pVM, PVMCPUCC pVCpu);
2733VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu, bool fDr6);
2734VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPUCC pVCpu, bool fDr6);
2735
2736VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPUCC pVCpu, bool fDr6);
2737VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPUCC pVCpu, bool fDr6);
2738#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
2739VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPUCC pVCpu, uint32_t iHostCpuSet);
2740#endif
2741
2742/** @} */
2743#endif /* IN_RING0 */
2744
2745/** @defgroup grp_cpum_rz The CPUM raw-mode and ring-0 context API
2746 * @{
2747 */
2748VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPUCC pVCpu);
2749VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForRead(PVMCPUCC pVCpu);
2750VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForChange(PVMCPUCC pVCpu);
2751VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeSseForRead(PVMCPUCC pVCpu);
2752VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPUCC pVCpu);
2753/** @} */
2754
2755
2756#endif /* !VBOX_FOR_DTRACE_LIB */
2757/** @} */
2758RT_C_DECLS_END
2759
2760
2761#endif /* !VBOX_INCLUDED_vmm_cpum_h */
2762
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette