VirtualBox

source: vbox/trunk/include/VBox/vmm/cpum.h@ 95248

Last change on this file since 95248 was 95248, checked in by vboxsync, 2 years ago

VMM/CPUM: Started changing the way we advertise SYSCALL, SEP, NX, and others as well as deduplicating the code for enabling 64-bit guest support (long mode). Also, the SYSCALL CPUID bit is now correctly suppressed when not in 64-bit mode on Intel CPUs. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 109.1 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager).
3 */
4
5/*
6 * Copyright (C) 2006-2022 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_cpum_h
27#define VBOX_INCLUDED_vmm_cpum_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <iprt/x86.h>
33#include <VBox/types.h>
34#include <VBox/vmm/cpumctx.h>
35#include <VBox/vmm/stam.h>
36#include <VBox/vmm/vmapi.h>
37#include <VBox/vmm/hm_svm.h>
38#include <VBox/vmm/hm_vmx.h>
39
40RT_C_DECLS_BEGIN
41
42/** @defgroup grp_cpum The CPU Monitor / Manager API
43 * @ingroup grp_vmm
44 * @{
45 */
46
47/**
48 * CPUID feature to set or clear.
49 */
50typedef enum CPUMCPUIDFEATURE
51{
52 CPUMCPUIDFEATURE_INVALID = 0,
53 /** The APIC feature bit. (Std+Ext)
54 * Note! There is a per-cpu flag for masking this CPUID feature bit when the
55 * APICBASE.ENABLED bit is zero. So, this feature is only set/cleared
56 * at VM construction time like all the others. This didn't used to be
57 * that way, this is new with 5.1. */
58 CPUMCPUIDFEATURE_APIC,
59 /** The sysenter/sysexit feature bit. (Std) */
60 CPUMCPUIDFEATURE_SEP,
61 /** The SYSCALL/SYSEXIT feature bit (64 bits mode only for Intel CPUs). (Ext) */
62 CPUMCPUIDFEATURE_SYSCALL,
63 /** The PAE feature bit. (Std+Ext) */
64 CPUMCPUIDFEATURE_PAE,
65 /** The NX feature bit. (Ext) */
66 CPUMCPUIDFEATURE_NX,
67 /** The LAHF/SAHF feature bit (64 bits mode only). (Ext) */
68 CPUMCPUIDFEATURE_LAHF,
69 /** The LONG MODE feature bit. (Ext) */
70 CPUMCPUIDFEATURE_LONG_MODE,
71 /** The x2APIC feature bit. (Std) */
72 CPUMCPUIDFEATURE_X2APIC,
73 /** The RDTSCP feature bit. (Ext) */
74 CPUMCPUIDFEATURE_RDTSCP,
75 /** The Hypervisor Present bit. (Std) */
76 CPUMCPUIDFEATURE_HVP,
77 /** The speculation control feature bits. (StExt) */
78 CPUMCPUIDFEATURE_SPEC_CTRL,
79 /** 32bit hackishness. */
80 CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
81} CPUMCPUIDFEATURE;
82
83/**
84 * CPU Vendor.
85 */
86typedef enum CPUMCPUVENDOR
87{
88 CPUMCPUVENDOR_INVALID = 0,
89 CPUMCPUVENDOR_INTEL,
90 CPUMCPUVENDOR_AMD,
91 CPUMCPUVENDOR_VIA,
92 CPUMCPUVENDOR_CYRIX,
93 CPUMCPUVENDOR_SHANGHAI,
94 CPUMCPUVENDOR_HYGON,
95 CPUMCPUVENDOR_UNKNOWN,
96 /** 32bit hackishness. */
97 CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
98} CPUMCPUVENDOR;
99
100
101/**
102 * X86 and AMD64 CPU microarchitectures and in processor generations.
103 *
104 * @remarks The separation here is sometimes a little bit too finely grained,
105 * and the differences is more like processor generation than micro
106 * arch. This can be useful, so we'll provide functions for getting at
107 * more coarse grained info.
108 */
109typedef enum CPUMMICROARCH
110{
111 kCpumMicroarch_Invalid = 0,
112
113 kCpumMicroarch_Intel_First,
114
115 kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
116 kCpumMicroarch_Intel_80186,
117 kCpumMicroarch_Intel_80286,
118 kCpumMicroarch_Intel_80386,
119 kCpumMicroarch_Intel_80486,
120 kCpumMicroarch_Intel_P5,
121
122 kCpumMicroarch_Intel_P6_Core_Atom_First,
123 kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
124 kCpumMicroarch_Intel_P6_II,
125 kCpumMicroarch_Intel_P6_III,
126
127 kCpumMicroarch_Intel_P6_M_Banias,
128 kCpumMicroarch_Intel_P6_M_Dothan,
129 kCpumMicroarch_Intel_Core_Yonah, /**< Core, also known as Enhanced Pentium M. */
130
131 kCpumMicroarch_Intel_Core2_First,
132 kCpumMicroarch_Intel_Core2_Merom = kCpumMicroarch_Intel_Core2_First, /**< 65nm, Merom/Conroe/Kentsfield/Tigerton */
133 kCpumMicroarch_Intel_Core2_Penryn, /**< 45nm, Penryn/Wolfdale/Yorkfield/Harpertown */
134 kCpumMicroarch_Intel_Core2_End,
135
136 kCpumMicroarch_Intel_Core7_First,
137 kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
138 kCpumMicroarch_Intel_Core7_Westmere,
139 kCpumMicroarch_Intel_Core7_SandyBridge,
140 kCpumMicroarch_Intel_Core7_IvyBridge,
141 kCpumMicroarch_Intel_Core7_Haswell,
142 kCpumMicroarch_Intel_Core7_Broadwell,
143 kCpumMicroarch_Intel_Core7_Skylake,
144 kCpumMicroarch_Intel_Core7_KabyLake,
145 kCpumMicroarch_Intel_Core7_CoffeeLake,
146 kCpumMicroarch_Intel_Core7_WhiskeyLake,
147 kCpumMicroarch_Intel_Core7_CascadeLake,
148 kCpumMicroarch_Intel_Core7_CannonLake, /**< Limited 10nm. */
149 kCpumMicroarch_Intel_Core7_CometLake, /**< 10th gen, 14nm desktop + high power mobile. */
150 kCpumMicroarch_Intel_Core7_IceLake, /**< 10th gen, 10nm mobile and some Xeons. Actually 'Sunny Cove' march. */
151 kCpumMicroarch_Intel_Core7_SunnyCove = kCpumMicroarch_Intel_Core7_IceLake,
152 kCpumMicroarch_Intel_Core7_RocketLake, /**< 11th gen, 14nm desktop + high power mobile. Aka 'Cypress Cove', backport of 'Willow Cove' to 14nm. */
153 kCpumMicroarch_Intel_Core7_CypressCove = kCpumMicroarch_Intel_Core7_RocketLake,
154 kCpumMicroarch_Intel_Core7_TigerLake, /**< 11th gen, 10nm mobile. Actually 'Willow Cove' march. */
155 kCpumMicroarch_Intel_Core7_WillowCove = kCpumMicroarch_Intel_Core7_TigerLake,
156 kCpumMicroarch_Intel_Core7_AlderLake, /**< 12th gen, 10nm all platforms(?). */
157 kCpumMicroarch_Intel_Core7_SapphireRapids, /**< 12th? gen, 10nm server? */
158 kCpumMicroarch_Intel_Core7_End,
159
160 kCpumMicroarch_Intel_Atom_First,
161 kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
162 kCpumMicroarch_Intel_Atom_Lincroft, /**< Second generation bonnell (44nm). */
163 kCpumMicroarch_Intel_Atom_Saltwell, /**< 32nm shrink of Bonnell. */
164 kCpumMicroarch_Intel_Atom_Silvermont, /**< 22nm */
165 kCpumMicroarch_Intel_Atom_Airmount, /**< 14nm */
166 kCpumMicroarch_Intel_Atom_Goldmont, /**< 14nm */
167 kCpumMicroarch_Intel_Atom_GoldmontPlus, /**< 14nm */
168 kCpumMicroarch_Intel_Atom_Unknown,
169 kCpumMicroarch_Intel_Atom_End,
170
171
172 kCpumMicroarch_Intel_Phi_First,
173 kCpumMicroarch_Intel_Phi_KnightsFerry = kCpumMicroarch_Intel_Phi_First,
174 kCpumMicroarch_Intel_Phi_KnightsCorner,
175 kCpumMicroarch_Intel_Phi_KnightsLanding,
176 kCpumMicroarch_Intel_Phi_KnightsHill,
177 kCpumMicroarch_Intel_Phi_KnightsMill,
178 kCpumMicroarch_Intel_Phi_End,
179
180 kCpumMicroarch_Intel_P6_Core_Atom_End,
181
182 kCpumMicroarch_Intel_NB_First,
183 kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
184 kCpumMicroarch_Intel_NB_Northwood, /**< 130nm */
185 kCpumMicroarch_Intel_NB_Prescott, /**< 90nm */
186 kCpumMicroarch_Intel_NB_Prescott2M, /**< 90nm */
187 kCpumMicroarch_Intel_NB_CedarMill, /**< 65nm */
188 kCpumMicroarch_Intel_NB_Gallatin, /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
189 kCpumMicroarch_Intel_NB_Unknown,
190 kCpumMicroarch_Intel_NB_End,
191
192 kCpumMicroarch_Intel_Unknown,
193 kCpumMicroarch_Intel_End,
194
195 kCpumMicroarch_AMD_First,
196 kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
197 kCpumMicroarch_AMD_Am386,
198 kCpumMicroarch_AMD_Am486,
199 kCpumMicroarch_AMD_Am486Enh, /**< Covers Am5x86 as well. */
200 kCpumMicroarch_AMD_K5,
201 kCpumMicroarch_AMD_K6,
202
203 kCpumMicroarch_AMD_K7_First,
204 kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
205 kCpumMicroarch_AMD_K7_Spitfire,
206 kCpumMicroarch_AMD_K7_Thunderbird,
207 kCpumMicroarch_AMD_K7_Morgan,
208 kCpumMicroarch_AMD_K7_Thoroughbred,
209 kCpumMicroarch_AMD_K7_Barton,
210 kCpumMicroarch_AMD_K7_Unknown,
211 kCpumMicroarch_AMD_K7_End,
212
213 kCpumMicroarch_AMD_K8_First,
214 kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
215 kCpumMicroarch_AMD_K8_90nm, /**< 90nm shrink */
216 kCpumMicroarch_AMD_K8_90nm_DualCore, /**< 90nm with two cores. */
217 kCpumMicroarch_AMD_K8_90nm_AMDV, /**< 90nm with AMD-V (usually) and two cores (usually). */
218 kCpumMicroarch_AMD_K8_65nm, /**< 65nm shrink. */
219 kCpumMicroarch_AMD_K8_End,
220
221 kCpumMicroarch_AMD_K10,
222 kCpumMicroarch_AMD_K10_Lion,
223 kCpumMicroarch_AMD_K10_Llano,
224 kCpumMicroarch_AMD_Bobcat,
225 kCpumMicroarch_AMD_Jaguar,
226
227 kCpumMicroarch_AMD_15h_First,
228 kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
229 kCpumMicroarch_AMD_15h_Piledriver,
230 kCpumMicroarch_AMD_15h_Steamroller, /**< Yet to be released, might have different family. */
231 kCpumMicroarch_AMD_15h_Excavator, /**< Yet to be released, might have different family. */
232 kCpumMicroarch_AMD_15h_Unknown,
233 kCpumMicroarch_AMD_15h_End,
234
235 kCpumMicroarch_AMD_16h_First,
236 kCpumMicroarch_AMD_16h_End,
237
238 kCpumMicroarch_AMD_Zen_First,
239 kCpumMicroarch_AMD_Zen_Ryzen = kCpumMicroarch_AMD_Zen_First,
240 kCpumMicroarch_AMD_Zen_End,
241
242 kCpumMicroarch_AMD_Unknown,
243 kCpumMicroarch_AMD_End,
244
245 kCpumMicroarch_Hygon_First,
246 kCpumMicroarch_Hygon_Dhyana = kCpumMicroarch_Hygon_First,
247 kCpumMicroarch_Hygon_Unknown,
248 kCpumMicroarch_Hygon_End,
249
250 kCpumMicroarch_VIA_First,
251 kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
252 kCpumMicroarch_Centaur_C2,
253 kCpumMicroarch_Centaur_C3,
254 kCpumMicroarch_VIA_C3_M2,
255 kCpumMicroarch_VIA_C3_C5A, /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
256 kCpumMicroarch_VIA_C3_C5B, /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
257 kCpumMicroarch_VIA_C3_C5C, /**< 130nm Ezra - C3, Eden ESP. */
258 kCpumMicroarch_VIA_C3_C5N, /**< 130nm Ezra-T - C3. */
259 kCpumMicroarch_VIA_C3_C5XL, /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
260 kCpumMicroarch_VIA_C3_C5P, /**< 130nm Nehemiah+ - C3. */
261 kCpumMicroarch_VIA_C7_C5J, /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
262 kCpumMicroarch_VIA_Isaiah,
263 kCpumMicroarch_VIA_Unknown,
264 kCpumMicroarch_VIA_End,
265
266 kCpumMicroarch_Shanghai_First,
267 kCpumMicroarch_Shanghai_Wudaokou = kCpumMicroarch_Shanghai_First,
268 kCpumMicroarch_Shanghai_Unknown,
269 kCpumMicroarch_Shanghai_End,
270
271 kCpumMicroarch_Cyrix_First,
272 kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
273 kCpumMicroarch_Cyrix_M1,
274 kCpumMicroarch_Cyrix_MediaGX,
275 kCpumMicroarch_Cyrix_MediaGXm,
276 kCpumMicroarch_Cyrix_M2,
277 kCpumMicroarch_Cyrix_Unknown,
278 kCpumMicroarch_Cyrix_End,
279
280 kCpumMicroarch_NEC_First,
281 kCpumMicroarch_NEC_V20 = kCpumMicroarch_NEC_First,
282 kCpumMicroarch_NEC_V30,
283 kCpumMicroarch_NEC_End,
284
285 kCpumMicroarch_Unknown,
286
287 kCpumMicroarch_32BitHack = 0x7fffffff
288} CPUMMICROARCH;
289
290
291/** Predicate macro for catching netburst CPUs. */
292#define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
293 ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
294
295/** Predicate macro for catching Core7 CPUs. */
296#define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
297 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
298
299/** Predicate macro for catching Core 2 CPUs. */
300#define CPUMMICROARCH_IS_INTEL_CORE2(a_enmMicroarch) \
301 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core2_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core2_End)
302
303/** Predicate macro for catching Atom CPUs, Silvermont and upwards. */
304#define CPUMMICROARCH_IS_INTEL_SILVERMONT_PLUS(a_enmMicroarch) \
305 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Atom_Silvermont && (a_enmMicroarch) <= kCpumMicroarch_Intel_Atom_End)
306
307/** Predicate macro for catching AMD Family OFh CPUs (aka K8). */
308#define CPUMMICROARCH_IS_AMD_FAM_0FH(a_enmMicroarch) \
309 ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
310
311/** Predicate macro for catching AMD Family 10H CPUs (aka K10). */
312#define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
313
314/** Predicate macro for catching AMD Family 11H CPUs (aka Lion). */
315#define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
316
317/** Predicate macro for catching AMD Family 12H CPUs (aka Llano). */
318#define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
319
320/** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat). */
321#define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
322
323/** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
324 * decendants). */
325#define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
326 ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
327
328/** Predicate macro for catching AMD Family 16H CPUs. */
329#define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
330 ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
331
332/** Predicate macro for catching AMD Zen Family CPUs. */
333#define CPUMMICROARCH_IS_AMD_FAM_ZEN(a_enmMicroarch) \
334 ((a_enmMicroarch) >= kCpumMicroarch_AMD_Zen_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_Zen_End)
335
336
337/**
338 * CPUID leaf.
339 *
340 * @remarks This structure is used by the patch manager and is therefore
341 * more or less set in stone.
342 */
343typedef struct CPUMCPUIDLEAF
344{
345 /** The leaf number. */
346 uint32_t uLeaf;
347 /** The sub-leaf number. */
348 uint32_t uSubLeaf;
349 /** Sub-leaf mask. This is 0 when sub-leaves aren't used. */
350 uint32_t fSubLeafMask;
351
352 /** The EAX value. */
353 uint32_t uEax;
354 /** The EBX value. */
355 uint32_t uEbx;
356 /** The ECX value. */
357 uint32_t uEcx;
358 /** The EDX value. */
359 uint32_t uEdx;
360
361 /** Flags. */
362 uint32_t fFlags;
363} CPUMCPUIDLEAF;
364#ifndef VBOX_FOR_DTRACE_LIB
365AssertCompileSize(CPUMCPUIDLEAF, 32);
366#endif
367/** Pointer to a CPUID leaf. */
368typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
369/** Pointer to a const CPUID leaf. */
370typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
371
372/** @name CPUMCPUIDLEAF::fFlags
373 * @{ */
374/** Indicates working intel leaf 0xb where the lower 8 ECX bits are not modified
375 * and EDX containing the extended APIC ID. */
376#define CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES RT_BIT_32(0)
377/** The leaf contains an APIC ID that needs changing to that of the current CPU. */
378#define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID RT_BIT_32(1)
379/** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
380#define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE RT_BIT_32(2)
381/** The leaf contains an APIC feature bit which is tied to APICBASE.EN. */
382#define CPUMCPUIDLEAF_F_CONTAINS_APIC RT_BIT_32(3)
383/** Mask of the valid flags. */
384#define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0xf)
385/** @} */
386
387/**
388 * Method used to deal with unknown CPUID leaves.
389 * @remarks Used in patch code.
390 */
391typedef enum CPUMUNKNOWNCPUID
392{
393 /** Invalid zero value. */
394 CPUMUNKNOWNCPUID_INVALID = 0,
395 /** Use given default values (DefCpuId). */
396 CPUMUNKNOWNCPUID_DEFAULTS,
397 /** Return the last standard leaf.
398 * Intel Sandy Bridge has been observed doing this. */
399 CPUMUNKNOWNCPUID_LAST_STD_LEAF,
400 /** Return the last standard leaf, with ecx observed.
401 * Intel Sandy Bridge has been observed doing this. */
402 CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
403 /** The register values are passed thru unmodified. */
404 CPUMUNKNOWNCPUID_PASSTHRU,
405 /** End of valid value. */
406 CPUMUNKNOWNCPUID_END,
407 /** Ensure 32-bit type. */
408 CPUMUNKNOWNCPUID_32BIT_HACK = 0x7fffffff
409} CPUMUNKNOWNCPUID;
410/** Pointer to unknown CPUID leaf method. */
411typedef CPUMUNKNOWNCPUID *PCPUMUNKNOWNCPUID;
412
413
414/**
415 * The register set returned by a CPUID operation.
416 */
417typedef struct CPUMCPUID
418{
419 uint32_t uEax;
420 uint32_t uEbx;
421 uint32_t uEcx;
422 uint32_t uEdx;
423} CPUMCPUID;
424/** Pointer to a CPUID leaf. */
425typedef CPUMCPUID *PCPUMCPUID;
426/** Pointer to a const CPUID leaf. */
427typedef const CPUMCPUID *PCCPUMCPUID;
428
429
430/**
431 * MSR read functions.
432 */
433typedef enum CPUMMSRRDFN
434{
435 /** Invalid zero value. */
436 kCpumMsrRdFn_Invalid = 0,
437 /** Return the CPUMMSRRANGE::uValue. */
438 kCpumMsrRdFn_FixedValue,
439 /** Alias to the MSR range starting at the MSR given by
440 * CPUMMSRRANGE::uValue. Must be used in pair with
441 * kCpumMsrWrFn_MsrAlias. */
442 kCpumMsrRdFn_MsrAlias,
443 /** Write only register, GP all read attempts. */
444 kCpumMsrRdFn_WriteOnly,
445
446 kCpumMsrRdFn_Ia32P5McAddr,
447 kCpumMsrRdFn_Ia32P5McType,
448 kCpumMsrRdFn_Ia32TimestampCounter,
449 kCpumMsrRdFn_Ia32PlatformId, /**< Takes real CPU value for reference. */
450 kCpumMsrRdFn_Ia32ApicBase,
451 kCpumMsrRdFn_Ia32FeatureControl,
452 kCpumMsrRdFn_Ia32BiosSignId, /**< Range value returned. */
453 kCpumMsrRdFn_Ia32SmmMonitorCtl,
454 kCpumMsrRdFn_Ia32PmcN,
455 kCpumMsrRdFn_Ia32MonitorFilterLineSize,
456 kCpumMsrRdFn_Ia32MPerf,
457 kCpumMsrRdFn_Ia32APerf,
458 kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */
459 kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
460 kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
461 kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
462 kCpumMsrRdFn_Ia32MtrrDefType,
463 kCpumMsrRdFn_Ia32Pat,
464 kCpumMsrRdFn_Ia32SysEnterCs,
465 kCpumMsrRdFn_Ia32SysEnterEsp,
466 kCpumMsrRdFn_Ia32SysEnterEip,
467 kCpumMsrRdFn_Ia32McgCap,
468 kCpumMsrRdFn_Ia32McgStatus,
469 kCpumMsrRdFn_Ia32McgCtl,
470 kCpumMsrRdFn_Ia32DebugCtl,
471 kCpumMsrRdFn_Ia32SmrrPhysBase,
472 kCpumMsrRdFn_Ia32SmrrPhysMask,
473 kCpumMsrRdFn_Ia32PlatformDcaCap,
474 kCpumMsrRdFn_Ia32CpuDcaCap,
475 kCpumMsrRdFn_Ia32Dca0Cap,
476 kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
477 kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */
478 kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */
479 kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
480 kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */
481 kCpumMsrRdFn_Ia32FixedCtrCtrl,
482 kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */
483 kCpumMsrRdFn_Ia32PerfGlobalCtrl,
484 kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
485 kCpumMsrRdFn_Ia32PebsEnable,
486 kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */
487 kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */
488 kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */
489 kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */
490 kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */
491 kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
492 kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */
493 kCpumMsrRdFn_Ia32DsArea,
494 kCpumMsrRdFn_Ia32TscDeadline,
495 kCpumMsrRdFn_Ia32X2ApicN,
496 kCpumMsrRdFn_Ia32DebugInterface,
497 kCpumMsrRdFn_Ia32VmxBasic, /**< Takes real value as reference. */
498 kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */
499 kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */
500 kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */
501 kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */
502 kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */
503 kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */
504 kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */
505 kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */
506 kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */
507 kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */
508 kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */
509 kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */
510 kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */
511 kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */
512 kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */
513 kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */
514 kCpumMsrRdFn_Ia32VmxVmFunc, /**< Takes real value as reference. */
515 kCpumMsrRdFn_Ia32SpecCtrl,
516 kCpumMsrRdFn_Ia32ArchCapabilities,
517
518 kCpumMsrRdFn_Amd64Efer,
519 kCpumMsrRdFn_Amd64SyscallTarget,
520 kCpumMsrRdFn_Amd64LongSyscallTarget,
521 kCpumMsrRdFn_Amd64CompSyscallTarget,
522 kCpumMsrRdFn_Amd64SyscallFlagMask,
523 kCpumMsrRdFn_Amd64FsBase,
524 kCpumMsrRdFn_Amd64GsBase,
525 kCpumMsrRdFn_Amd64KernelGsBase,
526 kCpumMsrRdFn_Amd64TscAux,
527
528 kCpumMsrRdFn_IntelEblCrPowerOn,
529 kCpumMsrRdFn_IntelI7CoreThreadCount,
530 kCpumMsrRdFn_IntelP4EbcHardPowerOn,
531 kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
532 kCpumMsrRdFn_IntelP4EbcFrequencyId,
533 kCpumMsrRdFn_IntelP6FsbFrequency, /**< Takes real value as reference. */
534 kCpumMsrRdFn_IntelPlatformInfo,
535 kCpumMsrRdFn_IntelFlexRatio, /**< Takes real value as reference. */
536 kCpumMsrRdFn_IntelPkgCStConfigControl,
537 kCpumMsrRdFn_IntelPmgIoCaptureBase,
538 kCpumMsrRdFn_IntelLastBranchFromToN,
539 kCpumMsrRdFn_IntelLastBranchFromN,
540 kCpumMsrRdFn_IntelLastBranchToN,
541 kCpumMsrRdFn_IntelLastBranchTos,
542 kCpumMsrRdFn_IntelBblCrCtl,
543 kCpumMsrRdFn_IntelBblCrCtl3,
544 kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */
545 kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
546 kCpumMsrRdFn_IntelI7MiscPwrMgmt,
547 kCpumMsrRdFn_IntelP6CrN,
548 kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
549 kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
550 kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
551 kCpumMsrRdFn_IntelI7SandyAesNiCtl,
552 kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */
553 kCpumMsrRdFn_IntelI7LbrSelect,
554 kCpumMsrRdFn_IntelI7SandyErrorControl,
555 kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
556 kCpumMsrRdFn_IntelI7PowerCtl,
557 kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
558 kCpumMsrRdFn_IntelI7PebsLdLat,
559 kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */
560 kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */
561 kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
562 kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */
563 kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */
564 kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */
565 kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
566 kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */
567 kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
568 kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */
569 kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */
570 kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */
571 kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
572 kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */
573 kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */
574 kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */
575 kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
576 kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */
577 kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */
578 kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */
579 kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
580 kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */
581 kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
582 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1, /**< Takes real value as reference. */
583 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2, /**< Takes real value as reference. */
584 kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
585 kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
586 kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
587 kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
588 kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
589 kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
590 kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
591 kCpumMsrRdFn_IntelI7UncCBoxConfig,
592 kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
593 kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
594 kCpumMsrRdFn_IntelI7SmiCount,
595 kCpumMsrRdFn_IntelCore2EmttmCrTablesN, /**< Range value returned. */
596 kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
597 kCpumMsrRdFn_IntelCore1ExtConfig,
598 kCpumMsrRdFn_IntelCore1DtsCalControl,
599 kCpumMsrRdFn_IntelCore2PeciControl,
600 kCpumMsrRdFn_IntelAtSilvCoreC1Recidency,
601
602 kCpumMsrRdFn_P6LastBranchFromIp,
603 kCpumMsrRdFn_P6LastBranchToIp,
604 kCpumMsrRdFn_P6LastIntFromIp,
605 kCpumMsrRdFn_P6LastIntToIp,
606
607 kCpumMsrRdFn_AmdFam15hTscRate,
608 kCpumMsrRdFn_AmdFam15hLwpCfg,
609 kCpumMsrRdFn_AmdFam15hLwpCbAddr,
610 kCpumMsrRdFn_AmdFam10hMc4MiscN,
611 kCpumMsrRdFn_AmdK8PerfCtlN,
612 kCpumMsrRdFn_AmdK8PerfCtrN,
613 kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */
614 kCpumMsrRdFn_AmdK8HwCr,
615 kCpumMsrRdFn_AmdK8IorrBaseN,
616 kCpumMsrRdFn_AmdK8IorrMaskN,
617 kCpumMsrRdFn_AmdK8TopOfMemN,
618 kCpumMsrRdFn_AmdK8NbCfg1,
619 kCpumMsrRdFn_AmdK8McXcptRedir,
620 kCpumMsrRdFn_AmdK8CpuNameN,
621 kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */
622 kCpumMsrRdFn_AmdK8SwThermalCtrl,
623 kCpumMsrRdFn_AmdK8FidVidControl, /**< Range value returned. */
624 kCpumMsrRdFn_AmdK8FidVidStatus, /**< Range value returned. */
625 kCpumMsrRdFn_AmdK8McCtlMaskN,
626 kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
627 kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
628 kCpumMsrRdFn_AmdK8IntPendingMessage,
629 kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
630 kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
631 kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
632 kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */
633 kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */
634 kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */
635 kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */
636 kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */
637 kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */
638 kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
639 kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
640 kCpumMsrRdFn_AmdK8SmmBase,
641 kCpumMsrRdFn_AmdK8SmmAddr,
642 kCpumMsrRdFn_AmdK8SmmMask,
643 kCpumMsrRdFn_AmdK8VmCr,
644 kCpumMsrRdFn_AmdK8IgnNe,
645 kCpumMsrRdFn_AmdK8SmmCtl,
646 kCpumMsrRdFn_AmdK8VmHSavePa,
647 kCpumMsrRdFn_AmdFam10hVmLockKey,
648 kCpumMsrRdFn_AmdFam10hSmmLockKey,
649 kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
650 kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
651 kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
652 kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
653 kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
654 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
655 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
656 kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */
657 kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */
658 kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
659 kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
660 kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
661 kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
662 kCpumMsrRdFn_AmdK8PatchLevel, /**< Returns range value. */
663 kCpumMsrRdFn_AmdK7DebugStatusMaybe,
664 kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
665 kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
666 kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
667 kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
668 kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
669 kCpumMsrRdFn_AmdK7NodeId,
670 kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
671 kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
672 kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
673 kCpumMsrRdFn_AmdK7LoadStoreCfg,
674 kCpumMsrRdFn_AmdK7InstrCacheCfg,
675 kCpumMsrRdFn_AmdK7DataCacheCfg,
676 kCpumMsrRdFn_AmdK7BusUnitCfg,
677 kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
678 kCpumMsrRdFn_AmdFam15hFpuCfg,
679 kCpumMsrRdFn_AmdFam15hDecoderCfg,
680 kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
681 kCpumMsrRdFn_AmdFam15hCombUnitCfg,
682 kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
683 kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
684 kCpumMsrRdFn_AmdFam15hExecUnitCfg,
685 kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
686 kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
687 kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
688 kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
689 kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
690 kCpumMsrRdFn_AmdFam10hIbsOpRip,
691 kCpumMsrRdFn_AmdFam10hIbsOpData,
692 kCpumMsrRdFn_AmdFam10hIbsOpData2,
693 kCpumMsrRdFn_AmdFam10hIbsOpData3,
694 kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
695 kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
696 kCpumMsrRdFn_AmdFam10hIbsCtl,
697 kCpumMsrRdFn_AmdFam14hIbsBrTarget,
698
699 kCpumMsrRdFn_Gim,
700
701 /** End of valid MSR read function indexes. */
702 kCpumMsrRdFn_End
703} CPUMMSRRDFN;
704
705/**
706 * MSR write functions.
707 */
708typedef enum CPUMMSRWRFN
709{
710 /** Invalid zero value. */
711 kCpumMsrWrFn_Invalid = 0,
712 /** Writes are ignored, the fWrGpMask is observed though. */
713 kCpumMsrWrFn_IgnoreWrite,
714 /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
715 kCpumMsrWrFn_ReadOnly,
716 /** Alias to the MSR range starting at the MSR given by
717 * CPUMMSRRANGE::uValue. Must be used in pair with
718 * kCpumMsrRdFn_MsrAlias. */
719 kCpumMsrWrFn_MsrAlias,
720
721 kCpumMsrWrFn_Ia32P5McAddr,
722 kCpumMsrWrFn_Ia32P5McType,
723 kCpumMsrWrFn_Ia32TimestampCounter,
724 kCpumMsrWrFn_Ia32ApicBase,
725 kCpumMsrWrFn_Ia32FeatureControl,
726 kCpumMsrWrFn_Ia32BiosSignId,
727 kCpumMsrWrFn_Ia32BiosUpdateTrigger,
728 kCpumMsrWrFn_Ia32SmmMonitorCtl,
729 kCpumMsrWrFn_Ia32PmcN,
730 kCpumMsrWrFn_Ia32MonitorFilterLineSize,
731 kCpumMsrWrFn_Ia32MPerf,
732 kCpumMsrWrFn_Ia32APerf,
733 kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
734 kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
735 kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
736 kCpumMsrWrFn_Ia32MtrrDefType,
737 kCpumMsrWrFn_Ia32Pat,
738 kCpumMsrWrFn_Ia32SysEnterCs,
739 kCpumMsrWrFn_Ia32SysEnterEsp,
740 kCpumMsrWrFn_Ia32SysEnterEip,
741 kCpumMsrWrFn_Ia32McgStatus,
742 kCpumMsrWrFn_Ia32McgCtl,
743 kCpumMsrWrFn_Ia32DebugCtl,
744 kCpumMsrWrFn_Ia32SmrrPhysBase,
745 kCpumMsrWrFn_Ia32SmrrPhysMask,
746 kCpumMsrWrFn_Ia32PlatformDcaCap,
747 kCpumMsrWrFn_Ia32Dca0Cap,
748 kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
749 kCpumMsrWrFn_Ia32PerfStatus,
750 kCpumMsrWrFn_Ia32PerfCtl,
751 kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
752 kCpumMsrWrFn_Ia32PerfCapabilities,
753 kCpumMsrWrFn_Ia32FixedCtrCtrl,
754 kCpumMsrWrFn_Ia32PerfGlobalStatus,
755 kCpumMsrWrFn_Ia32PerfGlobalCtrl,
756 kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
757 kCpumMsrWrFn_Ia32PebsEnable,
758 kCpumMsrWrFn_Ia32ClockModulation,
759 kCpumMsrWrFn_Ia32ThermInterrupt,
760 kCpumMsrWrFn_Ia32ThermStatus,
761 kCpumMsrWrFn_Ia32Therm2Ctl,
762 kCpumMsrWrFn_Ia32MiscEnable,
763 kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
764 kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */
765 kCpumMsrWrFn_Ia32DsArea,
766 kCpumMsrWrFn_Ia32TscDeadline,
767 kCpumMsrWrFn_Ia32X2ApicN,
768 kCpumMsrWrFn_Ia32DebugInterface,
769 kCpumMsrWrFn_Ia32SpecCtrl,
770 kCpumMsrWrFn_Ia32PredCmd,
771 kCpumMsrWrFn_Ia32FlushCmd,
772
773 kCpumMsrWrFn_Amd64Efer,
774 kCpumMsrWrFn_Amd64SyscallTarget,
775 kCpumMsrWrFn_Amd64LongSyscallTarget,
776 kCpumMsrWrFn_Amd64CompSyscallTarget,
777 kCpumMsrWrFn_Amd64SyscallFlagMask,
778 kCpumMsrWrFn_Amd64FsBase,
779 kCpumMsrWrFn_Amd64GsBase,
780 kCpumMsrWrFn_Amd64KernelGsBase,
781 kCpumMsrWrFn_Amd64TscAux,
782 kCpumMsrWrFn_IntelEblCrPowerOn,
783 kCpumMsrWrFn_IntelP4EbcHardPowerOn,
784 kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
785 kCpumMsrWrFn_IntelP4EbcFrequencyId,
786 kCpumMsrWrFn_IntelFlexRatio,
787 kCpumMsrWrFn_IntelPkgCStConfigControl,
788 kCpumMsrWrFn_IntelPmgIoCaptureBase,
789 kCpumMsrWrFn_IntelLastBranchFromToN,
790 kCpumMsrWrFn_IntelLastBranchFromN,
791 kCpumMsrWrFn_IntelLastBranchToN,
792 kCpumMsrWrFn_IntelLastBranchTos,
793 kCpumMsrWrFn_IntelBblCrCtl,
794 kCpumMsrWrFn_IntelBblCrCtl3,
795 kCpumMsrWrFn_IntelI7TemperatureTarget,
796 kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
797 kCpumMsrWrFn_IntelI7MiscPwrMgmt,
798 kCpumMsrWrFn_IntelP6CrN,
799 kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
800 kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
801 kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
802 kCpumMsrWrFn_IntelI7SandyAesNiCtl,
803 kCpumMsrWrFn_IntelI7TurboRatioLimit,
804 kCpumMsrWrFn_IntelI7LbrSelect,
805 kCpumMsrWrFn_IntelI7SandyErrorControl,
806 kCpumMsrWrFn_IntelI7PowerCtl,
807 kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
808 kCpumMsrWrFn_IntelI7PebsLdLat,
809 kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
810 kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
811 kCpumMsrWrFn_IntelI7SandyRaplPowerUnit, /**< R/O but found writable bits on a Silvermont CPU here. */
812 kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
813 kCpumMsrWrFn_IntelI7SandyPkgC2Residency, /**< R/O but found writable bits on a Silvermont CPU here. */
814 kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
815 kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
816 kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
817 kCpumMsrWrFn_IntelI7RaplPp0Policy,
818 kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
819 kCpumMsrWrFn_IntelI7RaplPp1Policy,
820 kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
821 kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
822 kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
823 kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
824 kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
825 kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
826 kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
827 kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
828 kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
829 kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
830 kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
831 kCpumMsrWrFn_IntelCore1ExtConfig,
832 kCpumMsrWrFn_IntelCore1DtsCalControl,
833 kCpumMsrWrFn_IntelCore2PeciControl,
834
835 kCpumMsrWrFn_P6LastIntFromIp,
836 kCpumMsrWrFn_P6LastIntToIp,
837
838 kCpumMsrWrFn_AmdFam15hTscRate,
839 kCpumMsrWrFn_AmdFam15hLwpCfg,
840 kCpumMsrWrFn_AmdFam15hLwpCbAddr,
841 kCpumMsrWrFn_AmdFam10hMc4MiscN,
842 kCpumMsrWrFn_AmdK8PerfCtlN,
843 kCpumMsrWrFn_AmdK8PerfCtrN,
844 kCpumMsrWrFn_AmdK8SysCfg,
845 kCpumMsrWrFn_AmdK8HwCr,
846 kCpumMsrWrFn_AmdK8IorrBaseN,
847 kCpumMsrWrFn_AmdK8IorrMaskN,
848 kCpumMsrWrFn_AmdK8TopOfMemN,
849 kCpumMsrWrFn_AmdK8NbCfg1,
850 kCpumMsrWrFn_AmdK8McXcptRedir,
851 kCpumMsrWrFn_AmdK8CpuNameN,
852 kCpumMsrWrFn_AmdK8HwThermalCtrl,
853 kCpumMsrWrFn_AmdK8SwThermalCtrl,
854 kCpumMsrWrFn_AmdK8FidVidControl,
855 kCpumMsrWrFn_AmdK8McCtlMaskN,
856 kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
857 kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
858 kCpumMsrWrFn_AmdK8IntPendingMessage,
859 kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
860 kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
861 kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
862 kCpumMsrWrFn_AmdFam10hPStateControl,
863 kCpumMsrWrFn_AmdFam10hPStateStatus,
864 kCpumMsrWrFn_AmdFam10hPStateN,
865 kCpumMsrWrFn_AmdFam10hCofVidControl,
866 kCpumMsrWrFn_AmdFam10hCofVidStatus,
867 kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
868 kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
869 kCpumMsrWrFn_AmdK8SmmBase,
870 kCpumMsrWrFn_AmdK8SmmAddr,
871 kCpumMsrWrFn_AmdK8SmmMask,
872 kCpumMsrWrFn_AmdK8VmCr,
873 kCpumMsrWrFn_AmdK8IgnNe,
874 kCpumMsrWrFn_AmdK8SmmCtl,
875 kCpumMsrWrFn_AmdK8VmHSavePa,
876 kCpumMsrWrFn_AmdFam10hVmLockKey,
877 kCpumMsrWrFn_AmdFam10hSmmLockKey,
878 kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
879 kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
880 kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
881 kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
882 kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
883 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
884 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
885 kCpumMsrWrFn_AmdK7MicrocodeCtl,
886 kCpumMsrWrFn_AmdK7ClusterIdMaybe,
887 kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
888 kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
889 kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
890 kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
891 kCpumMsrWrFn_AmdK8PatchLoader,
892 kCpumMsrWrFn_AmdK7DebugStatusMaybe,
893 kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
894 kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
895 kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
896 kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
897 kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
898 kCpumMsrWrFn_AmdK7NodeId,
899 kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
900 kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
901 kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
902 kCpumMsrWrFn_AmdK7LoadStoreCfg,
903 kCpumMsrWrFn_AmdK7InstrCacheCfg,
904 kCpumMsrWrFn_AmdK7DataCacheCfg,
905 kCpumMsrWrFn_AmdK7BusUnitCfg,
906 kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
907 kCpumMsrWrFn_AmdFam15hFpuCfg,
908 kCpumMsrWrFn_AmdFam15hDecoderCfg,
909 kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
910 kCpumMsrWrFn_AmdFam15hCombUnitCfg,
911 kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
912 kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
913 kCpumMsrWrFn_AmdFam15hExecUnitCfg,
914 kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
915 kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
916 kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
917 kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
918 kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
919 kCpumMsrWrFn_AmdFam10hIbsOpRip,
920 kCpumMsrWrFn_AmdFam10hIbsOpData,
921 kCpumMsrWrFn_AmdFam10hIbsOpData2,
922 kCpumMsrWrFn_AmdFam10hIbsOpData3,
923 kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
924 kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
925 kCpumMsrWrFn_AmdFam10hIbsCtl,
926 kCpumMsrWrFn_AmdFam14hIbsBrTarget,
927
928 kCpumMsrWrFn_Gim,
929
930 /** End of valid MSR write function indexes. */
931 kCpumMsrWrFn_End
932} CPUMMSRWRFN;
933
934/**
935 * MSR range.
936 */
937typedef struct CPUMMSRRANGE
938{
939 /** The first MSR. [0] */
940 uint32_t uFirst;
941 /** The last MSR. [4] */
942 uint32_t uLast;
943 /** The read function (CPUMMSRRDFN). [8] */
944 uint16_t enmRdFn;
945 /** The write function (CPUMMSRWRFN). [10] */
946 uint16_t enmWrFn;
947 /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
948 * UINT16_MAX if not used by the read and write functions. [12] */
949 uint32_t offCpumCpu : 24;
950 /** Reserved for future hacks. [15] */
951 uint32_t fReserved : 8;
952 /** The init/read value. [16]
953 * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
954 * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
955 * offset into CPUM. */
956 uint64_t uValue;
957 /** The bits to ignore when writing. [24] */
958 uint64_t fWrIgnMask;
959 /** The bits that will cause a GP(0) when writing. [32]
960 * This is always checked prior to calling the write function. Using
961 * UINT64_MAX effectively marks the MSR as read-only. */
962 uint64_t fWrGpMask;
963 /** The register name, if applicable. [40] */
964 char szName[56];
965
966 /** The number of reads. */
967 STAMCOUNTER cReads;
968 /** The number of writes. */
969 STAMCOUNTER cWrites;
970 /** The number of times ignored bits were written. */
971 STAMCOUNTER cIgnoredBits;
972 /** The number of GPs generated. */
973 STAMCOUNTER cGps;
974} CPUMMSRRANGE;
975#ifndef VBOX_FOR_DTRACE_LIB
976AssertCompileSize(CPUMMSRRANGE, 128);
977#endif
978/** Pointer to an MSR range. */
979typedef CPUMMSRRANGE *PCPUMMSRRANGE;
980/** Pointer to a const MSR range. */
981typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
982
983
984/**
985 * MSRs which are required while exploding features.
986 */
987typedef struct CPUMMSRS
988{
989 union
990 {
991 VMXMSRS vmx;
992 SVMMSRS svm;
993 } hwvirt;
994} CPUMMSRS;
995/** Pointer to an CPUMMSRS struct. */
996typedef CPUMMSRS *PCPUMMSRS;
997/** Pointer to a const CPUMMSRS struct. */
998typedef CPUMMSRS const *PCCPUMMSRS;
999
1000
1001/**
1002 * CPU features and quirks.
1003 * This is mostly exploded CPUID info.
1004 */
1005typedef struct CPUMFEATURES
1006{
1007 /** The CPU vendor (CPUMCPUVENDOR). */
1008 uint8_t enmCpuVendor;
1009 /** The CPU family. */
1010 uint8_t uFamily;
1011 /** The CPU model. */
1012 uint8_t uModel;
1013 /** The CPU stepping. */
1014 uint8_t uStepping;
1015 /** The microarchitecture. */
1016#ifndef VBOX_FOR_DTRACE_LIB
1017 CPUMMICROARCH enmMicroarch;
1018#else
1019 uint32_t enmMicroarch;
1020#endif
1021 /** The maximum physical address width of the CPU. */
1022 uint8_t cMaxPhysAddrWidth;
1023 /** The maximum linear address width of the CPU. */
1024 uint8_t cMaxLinearAddrWidth;
1025 /** Max size of the extended state (or FPU state if no XSAVE). */
1026 uint16_t cbMaxExtendedState;
1027
1028 /** Supports MSRs. */
1029 uint32_t fMsr : 1;
1030 /** Supports the page size extension (4/2 MB pages). */
1031 uint32_t fPse : 1;
1032 /** Supports 36-bit page size extension (4 MB pages can map memory above
1033 * 4GB). */
1034 uint32_t fPse36 : 1;
1035 /** Supports physical address extension (PAE). */
1036 uint32_t fPae : 1;
1037 /** Supports page-global extension (PGE). */
1038 uint32_t fPge : 1;
1039 /** Page attribute table (PAT) support (page level cache control). */
1040 uint32_t fPat : 1;
1041 /** Supports the FXSAVE and FXRSTOR instructions. */
1042 uint32_t fFxSaveRstor : 1;
1043 /** Supports the XSAVE and XRSTOR instructions. */
1044 uint32_t fXSaveRstor : 1;
1045 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
1046 uint32_t fOpSysXSaveRstor : 1;
1047 /** Supports MMX. */
1048 uint32_t fMmx : 1;
1049 /** Supports AMD extensions to MMX instructions. */
1050 uint32_t fAmdMmxExts : 1;
1051 /** Supports SSE. */
1052 uint32_t fSse : 1;
1053 /** Supports SSE2. */
1054 uint32_t fSse2 : 1;
1055 /** Supports SSE3. */
1056 uint32_t fSse3 : 1;
1057 /** Supports SSSE3. */
1058 uint32_t fSsse3 : 1;
1059 /** Supports SSE4.1. */
1060 uint32_t fSse41 : 1;
1061 /** Supports SSE4.2. */
1062 uint32_t fSse42 : 1;
1063 /** Supports AVX. */
1064 uint32_t fAvx : 1;
1065 /** Supports AVX2. */
1066 uint32_t fAvx2 : 1;
1067 /** Supports AVX512 foundation. */
1068 uint32_t fAvx512Foundation : 1;
1069 /** Supports RDTSC. */
1070 uint32_t fTsc : 1;
1071 /** Intel SYSENTER/SYSEXIT support */
1072 uint32_t fSysEnter : 1;
1073 /** First generation APIC. */
1074 uint32_t fApic : 1;
1075 /** Second generation APIC. */
1076 uint32_t fX2Apic : 1;
1077 /** Hypervisor present. */
1078 uint32_t fHypervisorPresent : 1;
1079 /** MWAIT & MONITOR instructions supported. */
1080 uint32_t fMonitorMWait : 1;
1081 /** MWAIT Extensions present. */
1082 uint32_t fMWaitExtensions : 1;
1083 /** Supports CMPXCHG16B in 64-bit mode. */
1084 uint32_t fMovCmpXchg16b : 1;
1085 /** Supports CLFLUSH. */
1086 uint32_t fClFlush : 1;
1087 /** Supports CLFLUSHOPT. */
1088 uint32_t fClFlushOpt : 1;
1089 /** Supports IA32_PRED_CMD.IBPB. */
1090 uint32_t fIbpb : 1;
1091 /** Supports IA32_SPEC_CTRL.IBRS. */
1092 uint32_t fIbrs : 1;
1093 /** Supports IA32_SPEC_CTRL.STIBP. */
1094 uint32_t fStibp : 1;
1095 /** Supports IA32_FLUSH_CMD. */
1096 uint32_t fFlushCmd : 1;
1097 /** Supports IA32_ARCH_CAP. */
1098 uint32_t fArchCap : 1;
1099 /** Supports MD_CLEAR functionality (VERW, IA32_FLUSH_CMD). */
1100 uint32_t fMdsClear : 1;
1101 /** Supports PCID. */
1102 uint32_t fPcid : 1;
1103 /** Supports INVPCID. */
1104 uint32_t fInvpcid : 1;
1105 /** Supports read/write FSGSBASE instructions. */
1106 uint32_t fFsGsBase : 1;
1107 /** Support BMI1 instructions (ANDN, BEXTR, BLSI, BLSMSK, BLSR, and TZCNT). */
1108 uint32_t fBmi1 : 1;
1109 /** Support BMI2 instructions (BZHI, MULX, PDEP, PEXT, RORX, SARX, SHRX,
1110 * and SHLX). */
1111 uint32_t fBmi2 : 1;
1112 /** Support RDRAND instruction. */
1113 uint32_t fRdRand : 1;
1114 /** Support RDSEED instruction. */
1115 uint32_t fRdSeed : 1;
1116
1117 /** Supports AMD 3DNow instructions. */
1118 uint32_t f3DNow : 1;
1119 /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
1120 uint32_t f3DNowPrefetch : 1;
1121
1122 /** AMD64: Supports long mode. */
1123 uint32_t fLongMode : 1;
1124 /** AMD64: SYSCALL/SYSRET support. */
1125 uint32_t fSysCall : 1;
1126 /** AMD64: No-execute page table bit. */
1127 uint32_t fNoExecute : 1;
1128 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
1129 uint32_t fLahfSahf : 1;
1130 /** AMD64: Supports RDTSCP. */
1131 uint32_t fRdTscP : 1;
1132 /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
1133 uint32_t fMovCr8In32Bit : 1;
1134 /** AMD64: Supports XOP (similar to VEX3/AVX). */
1135 uint32_t fXop : 1;
1136 /** AMD64: Supports ABM, i.e. the LZCNT instruction. */
1137 uint32_t fAbm : 1;
1138 /** AMD64: Supports TBM (BEXTR, BLCFILL, BLCI, BLCIC, BLCMSK, BLCS,
1139 * BLSFILL, BLSIC, T1MSKC, and TZMSK). */
1140 uint32_t fTbm : 1;
1141
1142 /** Indicates that FPU instruction and data pointers may leak.
1143 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
1144 * is only saved and restored if an exception is pending. */
1145 uint32_t fLeakyFxSR : 1;
1146
1147 /** AMD64: Supports AMD SVM. */
1148 uint32_t fSvm : 1;
1149
1150 /** Support for Intel VMX. */
1151 uint32_t fVmx : 1;
1152
1153 /** Indicates that speculative execution control CPUID bits and MSRs are exposed.
1154 * The details are different for Intel and AMD but both have similar
1155 * functionality. */
1156 uint32_t fSpeculationControl : 1;
1157
1158 /** MSR_IA32_ARCH_CAPABILITIES: RDCL_NO (bit 0).
1159 * @remarks Only safe use after CPUM ring-0 init! */
1160 uint32_t fArchRdclNo : 1;
1161 /** MSR_IA32_ARCH_CAPABILITIES: IBRS_ALL (bit 1).
1162 * @remarks Only safe use after CPUM ring-0 init! */
1163 uint32_t fArchIbrsAll : 1;
1164 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 2).
1165 * @remarks Only safe use after CPUM ring-0 init! */
1166 uint32_t fArchRsbOverride : 1;
1167 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 3).
1168 * @remarks Only safe use after CPUM ring-0 init! */
1169 uint32_t fArchVmmNeedNotFlushL1d : 1;
1170 /** MSR_IA32_ARCH_CAPABILITIES: MDS_NO (bit 4).
1171 * @remarks Only safe use after CPUM ring-0 init! */
1172 uint32_t fArchMdsNo : 1;
1173
1174 /** Alignment padding / reserved for future use (96 bits total, plus 12 bytes
1175 * prior to the bit fields -> total of 24 bytes) */
1176 uint32_t fPadding0 : 1;
1177 uint32_t fPadding1;
1178
1179
1180 /** @name SVM
1181 * @{ */
1182 /** SVM: Supports Nested-paging. */
1183 uint32_t fSvmNestedPaging : 1;
1184 /** SVM: Support LBR (Last Branch Record) virtualization. */
1185 uint32_t fSvmLbrVirt : 1;
1186 /** SVM: Supports SVM lock. */
1187 uint32_t fSvmSvmLock : 1;
1188 /** SVM: Supports Next RIP save. */
1189 uint32_t fSvmNextRipSave : 1;
1190 /** SVM: Supports TSC rate MSR. */
1191 uint32_t fSvmTscRateMsr : 1;
1192 /** SVM: Supports VMCB clean bits. */
1193 uint32_t fSvmVmcbClean : 1;
1194 /** SVM: Supports Flush-by-ASID. */
1195 uint32_t fSvmFlusbByAsid : 1;
1196 /** SVM: Supports decode assist. */
1197 uint32_t fSvmDecodeAssists : 1;
1198 /** SVM: Supports Pause filter. */
1199 uint32_t fSvmPauseFilter : 1;
1200 /** SVM: Supports Pause filter threshold. */
1201 uint32_t fSvmPauseFilterThreshold : 1;
1202 /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
1203 uint32_t fSvmAvic : 1;
1204 /** SVM: Supports Virtualized VMSAVE/VMLOAD. */
1205 uint32_t fSvmVirtVmsaveVmload : 1;
1206 /** SVM: Supports VGIF (Virtual Global Interrupt Flag). */
1207 uint32_t fSvmVGif : 1;
1208 /** SVM: Supports GMET (Guest Mode Execute Trap Extension). */
1209 uint32_t fSvmGmet : 1;
1210 /** SVM: Padding / reserved for future features (64 bits total w/ max ASID). */
1211 uint32_t fSvmPadding0 : 18;
1212 /** SVM: Maximum supported ASID. */
1213 uint32_t uSvmMaxAsid;
1214 /** @} */
1215
1216
1217 /** VMX: Maximum physical address width. */
1218 uint32_t cVmxMaxPhysAddrWidth : 8;
1219
1220 /** @name VMX basic controls.
1221 * @{ */
1222 /** VMX: Supports INS/OUTS VM-exit instruction info. */
1223 uint32_t fVmxInsOutInfo : 1;
1224 /** @} */
1225
1226 /** @name VMX Pin-based controls.
1227 * @{ */
1228 /** VMX: Supports external interrupt VM-exit. */
1229 uint32_t fVmxExtIntExit : 1;
1230 /** VMX: Supports NMI VM-exit. */
1231 uint32_t fVmxNmiExit : 1;
1232 /** VMX: Supports Virtual NMIs. */
1233 uint32_t fVmxVirtNmi : 1;
1234 /** VMX: Supports preemption timer. */
1235 uint32_t fVmxPreemptTimer : 1;
1236 /** VMX: Supports posted interrupts. */
1237 uint32_t fVmxPostedInt : 1;
1238 /** @} */
1239
1240 /** @name VMX Processor-based controls.
1241 * @{ */
1242 /** VMX: Supports Interrupt-window exiting. */
1243 uint32_t fVmxIntWindowExit : 1;
1244 /** VMX: Supports TSC offsetting. */
1245 uint32_t fVmxTscOffsetting : 1;
1246 /** VMX: Supports HLT exiting. */
1247 uint32_t fVmxHltExit : 1;
1248 /** VMX: Supports INVLPG exiting. */
1249 uint32_t fVmxInvlpgExit : 1;
1250 /** VMX: Supports MWAIT exiting. */
1251 uint32_t fVmxMwaitExit : 1;
1252 /** VMX: Supports RDPMC exiting. */
1253 uint32_t fVmxRdpmcExit : 1;
1254 /** VMX: Supports RDTSC exiting. */
1255 uint32_t fVmxRdtscExit : 1;
1256 /** VMX: Supports CR3-load exiting. */
1257 uint32_t fVmxCr3LoadExit : 1;
1258 /** VMX: Supports CR3-store exiting. */
1259 uint32_t fVmxCr3StoreExit : 1;
1260 /** VMX: Supports tertiary processor-based VM-execution controls. */
1261 uint32_t fVmxTertiaryExecCtls : 1;
1262 /** VMX: Supports CR8-load exiting. */
1263 uint32_t fVmxCr8LoadExit : 1;
1264 /** VMX: Supports CR8-store exiting. */
1265 uint32_t fVmxCr8StoreExit : 1;
1266 /** VMX: Supports TPR shadow. */
1267 uint32_t fVmxUseTprShadow : 1;
1268 /** VMX: Supports NMI-window exiting. */
1269 uint32_t fVmxNmiWindowExit : 1;
1270 /** VMX: Supports Mov-DRx exiting. */
1271 uint32_t fVmxMovDRxExit : 1;
1272 /** VMX: Supports Unconditional I/O exiting. */
1273 uint32_t fVmxUncondIoExit : 1;
1274 /** VMX: Supportgs I/O bitmaps. */
1275 uint32_t fVmxUseIoBitmaps : 1;
1276 /** VMX: Supports Monitor Trap Flag. */
1277 uint32_t fVmxMonitorTrapFlag : 1;
1278 /** VMX: Supports MSR bitmap. */
1279 uint32_t fVmxUseMsrBitmaps : 1;
1280 /** VMX: Supports MONITOR exiting. */
1281 uint32_t fVmxMonitorExit : 1;
1282 /** VMX: Supports PAUSE exiting. */
1283 uint32_t fVmxPauseExit : 1;
1284 /** VMX: Supports secondary processor-based VM-execution controls. */
1285 uint32_t fVmxSecondaryExecCtls : 1;
1286 /** @} */
1287
1288 /** @name VMX Secondary processor-based controls.
1289 * @{ */
1290 /** VMX: Supports virtualize-APIC access. */
1291 uint32_t fVmxVirtApicAccess : 1;
1292 /** VMX: Supports EPT (Extended Page Tables). */
1293 uint32_t fVmxEpt : 1;
1294 /** VMX: Supports descriptor-table exiting. */
1295 uint32_t fVmxDescTableExit : 1;
1296 /** VMX: Supports RDTSCP. */
1297 uint32_t fVmxRdtscp : 1;
1298 /** VMX: Supports virtualize-x2APIC mode. */
1299 uint32_t fVmxVirtX2ApicMode : 1;
1300 /** VMX: Supports VPID. */
1301 uint32_t fVmxVpid : 1;
1302 /** VMX: Supports WBIND exiting. */
1303 uint32_t fVmxWbinvdExit : 1;
1304 /** VMX: Supports Unrestricted guest. */
1305 uint32_t fVmxUnrestrictedGuest : 1;
1306 /** VMX: Supports APIC-register virtualization. */
1307 uint32_t fVmxApicRegVirt : 1;
1308 /** VMX: Supports virtual-interrupt delivery. */
1309 uint32_t fVmxVirtIntDelivery : 1;
1310 /** VMX: Supports Pause-loop exiting. */
1311 uint32_t fVmxPauseLoopExit : 1;
1312 /** VMX: Supports RDRAND exiting. */
1313 uint32_t fVmxRdrandExit : 1;
1314 /** VMX: Supports INVPCID. */
1315 uint32_t fVmxInvpcid : 1;
1316 /** VMX: Supports VM functions. */
1317 uint32_t fVmxVmFunc : 1;
1318 /** VMX: Supports VMCS shadowing. */
1319 uint32_t fVmxVmcsShadowing : 1;
1320 /** VMX: Supports RDSEED exiting. */
1321 uint32_t fVmxRdseedExit : 1;
1322 /** VMX: Supports PML. */
1323 uint32_t fVmxPml : 1;
1324 /** VMX: Supports EPT-violations \#VE. */
1325 uint32_t fVmxEptXcptVe : 1;
1326 /** VMX: Supports conceal VMX from PT. */
1327 uint32_t fVmxConcealVmxFromPt : 1;
1328 /** VMX: Supports XSAVES/XRSTORS. */
1329 uint32_t fVmxXsavesXrstors : 1;
1330 /** VMX: Supports mode-based execute control for EPT. */
1331 uint32_t fVmxModeBasedExecuteEpt : 1;
1332 /** VMX: Supports sub-page write permissions for EPT. */
1333 uint32_t fVmxSppEpt : 1;
1334 /** VMX: Supports Intel PT to output guest-physical addresses for EPT. */
1335 uint32_t fVmxPtEpt : 1;
1336 /** VMX: Supports TSC scaling. */
1337 uint32_t fVmxUseTscScaling : 1;
1338 /** VMX: Supports TPAUSE, UMONITOR, or UMWAIT. */
1339 uint32_t fVmxUserWaitPause : 1;
1340 /** VMX: Supports enclave (ENCLV) exiting. */
1341 uint32_t fVmxEnclvExit : 1;
1342 /** @} */
1343
1344 /** @name VMX Tertiary processor-based controls.
1345 * @{ */
1346 /** VMX: Supports LOADIWKEY exiting. */
1347 uint32_t fVmxLoadIwKeyExit : 1;
1348 /** @} */
1349
1350 /** @name VMX VM-entry controls.
1351 * @{ */
1352 /** VMX: Supports load-debug controls on VM-entry. */
1353 uint32_t fVmxEntryLoadDebugCtls : 1;
1354 /** VMX: Supports IA32e mode guest. */
1355 uint32_t fVmxIa32eModeGuest : 1;
1356 /** VMX: Supports load guest EFER MSR on VM-entry. */
1357 uint32_t fVmxEntryLoadEferMsr : 1;
1358 /** VMX: Supports load guest PAT MSR on VM-entry. */
1359 uint32_t fVmxEntryLoadPatMsr : 1;
1360 /** @} */
1361
1362 /** @name VMX VM-exit controls.
1363 * @{ */
1364 /** VMX: Supports save debug controls on VM-exit. */
1365 uint32_t fVmxExitSaveDebugCtls : 1;
1366 /** VMX: Supports host-address space size. */
1367 uint32_t fVmxHostAddrSpaceSize : 1;
1368 /** VMX: Supports acknowledge external interrupt on VM-exit. */
1369 uint32_t fVmxExitAckExtInt : 1;
1370 /** VMX: Supports save guest PAT MSR on VM-exit. */
1371 uint32_t fVmxExitSavePatMsr : 1;
1372 /** VMX: Supports load hsot PAT MSR on VM-exit. */
1373 uint32_t fVmxExitLoadPatMsr : 1;
1374 /** VMX: Supports save guest EFER MSR on VM-exit. */
1375 uint32_t fVmxExitSaveEferMsr : 1;
1376 /** VMX: Supports load host EFER MSR on VM-exit. */
1377 uint32_t fVmxExitLoadEferMsr : 1;
1378 /** VMX: Supports save VMX preemption timer on VM-exit. */
1379 uint32_t fVmxSavePreemptTimer : 1;
1380 /** @} */
1381
1382 /** @name VMX Miscellaneous data.
1383 * @{ */
1384 /** VMX: Supports storing EFER.LMA into IA32e-mode guest field on VM-exit. */
1385 uint32_t fVmxExitSaveEferLma : 1;
1386 /** VMX: Whether Intel PT (Processor Trace) is supported in VMX mode or not. */
1387 uint32_t fVmxPt : 1;
1388 /** VMX: Supports VMWRITE to any valid VMCS field incl. read-only fields, otherwise
1389 * VMWRITE cannot modify read-only VM-exit information fields. */
1390 uint32_t fVmxVmwriteAll : 1;
1391 /** VMX: Supports injection of software interrupts, ICEBP on VM-entry for zero
1392 * length instructions. */
1393 uint32_t fVmxEntryInjectSoftInt : 1;
1394 /** @} */
1395
1396 /** VMX: Padding / reserved for future features. */
1397 uint32_t fVmxPadding0 : 17;
1398 /** VMX: Padding / reserved for future, making it a total of 128 bits. */
1399 uint32_t fVmxPadding1;
1400} CPUMFEATURES;
1401#ifndef VBOX_FOR_DTRACE_LIB
1402AssertCompileSize(CPUMFEATURES, 48);
1403#endif
1404/** Pointer to a CPU feature structure. */
1405typedef CPUMFEATURES *PCPUMFEATURES;
1406/** Pointer to a const CPU feature structure. */
1407typedef CPUMFEATURES const *PCCPUMFEATURES;
1408
1409/**
1410 * Chameleon wrapper structure for the host CPU features.
1411 *
1412 * This is used for the globally readable g_CpumHostFeatures variable, which is
1413 * initialized once during VMMR0 load for ring-0 and during CPUMR3Init in
1414 * ring-3. To reflect this immutability after load/init, we use this wrapper
1415 * structure to switch it between const and non-const depending on the context.
1416 * Only two files sees it as non-const (CPUMR0.cpp and CPUM.cpp).
1417 */
1418typedef struct CPUHOSTFEATURES
1419{
1420 CPUMFEATURES
1421#ifndef CPUM_WITH_NONCONST_HOST_FEATURES
1422 const
1423#endif
1424 s;
1425} CPUHOSTFEATURES;
1426/** Pointer to a const host CPU feature structure. */
1427typedef CPUHOSTFEATURES const *PCCPUHOSTFEATURES;
1428
1429/** Host CPU features.
1430 * @note In ring-3, only valid after CPUMR3Init. In ring-0, valid after
1431 * module init. */
1432extern CPUHOSTFEATURES g_CpumHostFeatures;
1433
1434
1435/**
1436 * CPU database entry.
1437 */
1438typedef struct CPUMDBENTRY
1439{
1440 /** The CPU name. */
1441 const char *pszName;
1442 /** The full CPU name. */
1443 const char *pszFullName;
1444 /** The CPU vendor (CPUMCPUVENDOR). */
1445 uint8_t enmVendor;
1446 /** The CPU family. */
1447 uint8_t uFamily;
1448 /** The CPU model. */
1449 uint8_t uModel;
1450 /** The CPU stepping. */
1451 uint8_t uStepping;
1452 /** The microarchitecture. */
1453 CPUMMICROARCH enmMicroarch;
1454 /** Scalable bus frequency used for reporting other frequencies. */
1455 uint64_t uScalableBusFreq;
1456 /** Flags - CPUMDB_F_XXX. */
1457 uint32_t fFlags;
1458 /** The maximum physical address with of the CPU. This should correspond to
1459 * the value in CPUID leaf 0x80000008 when present. */
1460 uint8_t cMaxPhysAddrWidth;
1461 /** The MXCSR mask. */
1462 uint32_t fMxCsrMask;
1463 /** Pointer to an array of CPUID leaves. */
1464 PCCPUMCPUIDLEAF paCpuIdLeaves;
1465 /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
1466 uint32_t cCpuIdLeaves;
1467 /** The method used to deal with unknown CPUID leaves. */
1468 CPUMUNKNOWNCPUID enmUnknownCpuId;
1469 /** The default unknown CPUID value. */
1470 CPUMCPUID DefUnknownCpuId;
1471
1472 /** MSR mask. Several microarchitectures ignore the higher bits of ECX in
1473 * the RDMSR and WRMSR instructions. */
1474 uint32_t fMsrMask;
1475
1476 /** The number of ranges in the table pointed to b paMsrRanges. */
1477 uint32_t cMsrRanges;
1478 /** MSR ranges for this CPU. */
1479 PCCPUMMSRRANGE paMsrRanges;
1480} CPUMDBENTRY;
1481/** Pointer to a const CPU database entry. */
1482typedef CPUMDBENTRY const *PCCPUMDBENTRY;
1483
1484/** @name CPUMDB_F_XXX - CPUDBENTRY::fFlags
1485 * @{ */
1486/** Should execute all in IEM.
1487 * @todo Implement this - currently done in Main... */
1488#define CPUMDB_F_EXECUTE_ALL_IN_IEM RT_BIT_32(0)
1489/** @} */
1490
1491
1492
1493#ifndef VBOX_FOR_DTRACE_LIB
1494
1495#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
1496VMMDECL(int) CPUMCpuIdCollectLeavesX86(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
1497VMMDECL(CPUMCPUVENDOR) CPUMCpuIdDetectX86VendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
1498#endif
1499
1500/** @name Guest Register Getters.
1501 * @{ */
1502VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR);
1503VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit);
1504VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden);
1505VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu);
1506VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
1507VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu);
1508VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu);
1509VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu);
1510VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu);
1511VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu);
1512VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue);
1513VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu);
1514VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu);
1515VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu);
1516VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu);
1517VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu);
1518VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu);
1519VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu);
1520VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu);
1521VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu);
1522VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu);
1523VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu);
1524VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu);
1525VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu);
1526VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu);
1527VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu);
1528VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu);
1529VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu);
1530VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu);
1531VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu);
1532VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu);
1533VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu);
1534VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu);
1535VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu);
1536VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu);
1537VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu);
1538VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
1539VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t iLeaf, uint32_t iSubLeaf, int f64BitMode,
1540 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
1541VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu);
1542VMM_INT_DECL(uint64_t) CPUMGetGuestIa32FeatCtrl(PCVMCPUCC pVCpu);
1543VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PCVMCPU pVCpu);
1544VMM_INT_DECL(uint64_t) CPUMGetGuestIa32SmmMonitorCtl(PCVMCPUCC pVCpu);
1545VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxEptVpidCap(PCVMCPUCC pVCpu);
1546VMMDECL(VBOXSTRICTRC) CPUMQueryGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *puValue);
1547VMMDECL(VBOXSTRICTRC) CPUMSetGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t uValue);
1548VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM);
1549VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM);
1550VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth);
1551VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM);
1552VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM);
1553/** @} */
1554
1555/** @name Guest Register Setters.
1556 * @{ */
1557VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1558VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1559VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
1560VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
1561VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0);
1562VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
1563VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
1564VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
1565VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0);
1566VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1);
1567VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2);
1568VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3);
1569VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
1570VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7);
1571VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value);
1572VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue);
1573VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
1574VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
1575VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
1576VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
1577VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
1578VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
1579VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
1580VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
1581VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
1582VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
1583VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
1584VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
1585VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
1586VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
1587VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
1588VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
1589VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
1590VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1591VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1592VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1593VMMR3_INT_DECL(void) CPUMR3CpuIdEnable64BitGuests(PVM pVM);
1594VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
1595VMMDECL(void) CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1596VMM_INT_DECL(void) CPUMSetGuestTscAux(PVMCPUCC pVCpu, uint64_t uValue);
1597VMM_INT_DECL(uint64_t) CPUMGetGuestTscAux(PVMCPUCC pVCpu);
1598VMM_INT_DECL(void) CPUMSetGuestSpecCtrl(PVMCPUCC pVCpu, uint64_t uValue);
1599VMM_INT_DECL(uint64_t) CPUMGetGuestSpecCtrl(PVMCPUCC pVCpu);
1600VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM);
1601VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes);
1602VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes);
1603/** @} */
1604
1605
1606/** @name Misc Guest Predicate Functions.
1607 * @{ */
1608VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
1609VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu);
1610VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu);
1611VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu);
1612VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu);
1613VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu);
1614VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu);
1615VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu);
1616VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu);
1617VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu);
1618VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu);
1619/** @} */
1620
1621/** @name Nested Hardware-Virtualization Helpers.
1622 * @{ */
1623VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu);
1624VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu);
1625VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
1626VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
1627
1628/* SVM helpers. */
1629VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1630VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1631VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx);
1632VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx);
1633VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
1634VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1635 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
1636 PSVMIOIOEXITINFO pIoExitInfo);
1637VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
1638
1639/* VMX helpers. */
1640VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField);
1641VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess);
1642VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3);
1643VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc);
1644VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick);
1645VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu);
1646VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr);
1647VMM_INT_DECL(bool) CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu);
1648VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu);
1649VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu);
1650/** @} */
1651
1652/** @name Externalized State Helpers.
1653 * @{ */
1654/** @def CPUM_ASSERT_NOT_EXTRN
1655 * Macro for asserting that @a a_fNotExtrn are present.
1656 *
1657 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1658 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
1659 *
1660 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1661 */
1662#define CPUM_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
1663 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fNotExtrn)), \
1664 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
1665
1666/** @def CPUM_IMPORT_EXTRN_RET
1667 * Macro for making sure the state specified by @a fExtrnImport is present,
1668 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1669 *
1670 * Will return if CPUMImportGuestStateOnDemand() fails.
1671 *
1672 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1673 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1674 * @thread EMT(a_pVCpu)
1675 *
1676 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1677 */
1678#define CPUM_IMPORT_EXTRN_RET(a_pVCpu, a_fExtrnImport) \
1679 do { \
1680 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1681 { /* already present, consider this likely */ } \
1682 else \
1683 { \
1684 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1685 AssertRCReturn(rcCpumImport, rcCpumImport); \
1686 } \
1687 } while (0)
1688
1689/** @def CPUM_IMPORT_EXTRN_RCSTRICT
1690 * Macro for making sure the state specified by @a fExtrnImport is present,
1691 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1692 *
1693 * Will update a_rcStrict if CPUMImportGuestStateOnDemand() fails.
1694 *
1695 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1696 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1697 * @param a_rcStrict Strict status code variable to update on failure.
1698 * @thread EMT(a_pVCpu)
1699 *
1700 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1701 */
1702#define CPUM_IMPORT_EXTRN_RCSTRICT(a_pVCpu, a_fExtrnImport, a_rcStrict) \
1703 do { \
1704 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1705 { /* already present, consider this likely */ } \
1706 else \
1707 { \
1708 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1709 AssertStmt(RT_SUCCESS(rcCpumImport) || RT_FAILURE_NP(a_rcStrict), a_rcStrict = rcCpumImport); \
1710 } \
1711 } while (0)
1712
1713VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport);
1714/** @} */
1715
1716#if !defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS) || defined(DOXYGEN_RUNNING)
1717/** @name Inlined Guest Getters and predicates Functions.
1718 * @{ */
1719
1720/**
1721 * Gets valid CR0 bits for the guest.
1722 *
1723 * @returns Valid CR0 bits.
1724 */
1725DECLINLINE(uint64_t) CPUMGetGuestCR0ValidMask(void)
1726{
1727 return ( X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
1728 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
1729 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG);
1730}
1731
1732/**
1733 * Tests if the guest is running in real mode or not.
1734 *
1735 * @returns true if in real mode, otherwise false.
1736 * @param pCtx Current CPU context.
1737 */
1738DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCCPUMCTX pCtx)
1739{
1740 return !(pCtx->cr0 & X86_CR0_PE);
1741}
1742
1743/**
1744 * Tests if the guest is running in real or virtual 8086 mode.
1745 *
1746 * @returns @c true if it is, @c false if not.
1747 * @param pCtx Current CPU context.
1748 */
1749DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCCPUMCTX pCtx)
1750{
1751 return !(pCtx->cr0 & X86_CR0_PE)
1752 || pCtx->eflags.Bits.u1VM; /* Cannot be set in long mode. Intel spec 2.3.1 "System Flags and Fields in IA-32e Mode". */
1753}
1754
1755/**
1756 * Tests if the guest is running in virtual 8086 mode.
1757 *
1758 * @returns @c true if it is, @c false if not.
1759 * @param pCtx Current CPU context.
1760 */
1761DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCCPUMCTX pCtx)
1762{
1763 return (pCtx->eflags.Bits.u1VM == 1);
1764}
1765
1766/**
1767 * Tests if the guest is running in paged protected or not.
1768 *
1769 * @returns true if in paged protected mode, otherwise false.
1770 * @param pCtx Current CPU context.
1771 */
1772DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
1773{
1774 return (pCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1775}
1776
1777/**
1778 * Tests if the guest is running in long mode or not.
1779 *
1780 * @returns true if in long mode, otherwise false.
1781 * @param pCtx Current CPU context.
1782 */
1783DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCCPUMCTX pCtx)
1784{
1785 return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1786}
1787
1788VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx);
1789
1790/**
1791 * Tests if the guest is running in 64 bits mode or not.
1792 *
1793 * @returns true if in 64 bits protected mode, otherwise false.
1794 * @param pCtx Current CPU context.
1795 */
1796DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
1797{
1798 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
1799 return false;
1800 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
1801 return CPUMIsGuestIn64BitCodeSlow(pCtx);
1802 return pCtx->cs.Attr.n.u1Long;
1803}
1804
1805/**
1806 * Tests if the guest has paging enabled or not.
1807 *
1808 * @returns true if paging is enabled, otherwise false.
1809 * @param pCtx Current CPU context.
1810 */
1811DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCCPUMCTX pCtx)
1812{
1813 return !!(pCtx->cr0 & X86_CR0_PG);
1814}
1815
1816/**
1817 * Tests if PAE paging is enabled given the relevant control registers.
1818 *
1819 * @returns @c true if in PAE mode, @c false otherwise.
1820 * @param uCr0 The CR0 value.
1821 * @param uCr4 The CR4 value.
1822 * @param uEferMsr The EFER value.
1823 */
1824DECLINLINE(bool) CPUMIsPaePagingEnabled(uint64_t uCr0, uint64_t uCr4, uint64_t uEferMsr)
1825{
1826 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1827 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1828 return ( (uCr4 & X86_CR4_PAE)
1829 && (uCr0 & X86_CR0_PG)
1830 && !(uEferMsr & MSR_K6_EFER_LMA));
1831}
1832
1833/**
1834 * Tests if the guest is running in PAE mode or not.
1835 *
1836 * @returns @c true if in PAE mode, @c false otherwise.
1837 * @param pCtx Current CPU context.
1838 */
1839DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCCPUMCTX pCtx)
1840{
1841 return CPUMIsPaePagingEnabled(pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1842}
1843
1844/**
1845 * Tests if the guest has AMD SVM enabled or not.
1846 *
1847 * @returns true if SMV is enabled, otherwise false.
1848 * @param pCtx Current CPU context.
1849 */
1850DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCCPUMCTX pCtx)
1851{
1852 return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
1853}
1854
1855/**
1856 * Tests if the guest has Intel VT-x enabled or not.
1857 *
1858 * @returns true if VMX is enabled, otherwise false.
1859 * @param pCtx Current CPU context.
1860 */
1861DECLINLINE(bool) CPUMIsGuestVmxEnabled(PCCPUMCTX pCtx)
1862{
1863 return RT_BOOL(pCtx->cr4 & X86_CR4_VMXE);
1864}
1865
1866/**
1867 * Returns the guest's global-interrupt (GIF) flag.
1868 *
1869 * @returns true when global-interrupts are enabled, otherwise false.
1870 * @param pCtx Current CPU context.
1871 */
1872DECLINLINE(bool) CPUMGetGuestGif(PCCPUMCTX pCtx)
1873{
1874 return pCtx->hwvirt.fGif;
1875}
1876
1877/**
1878 * Sets the guest's global-interrupt flag (GIF).
1879 *
1880 * @param pCtx Current CPU context.
1881 * @param fGif The value to set.
1882 */
1883DECLINLINE(void) CPUMSetGuestGif(PCPUMCTX pCtx, bool fGif)
1884{
1885 pCtx->hwvirt.fGif = fGif;
1886}
1887
1888/**
1889 * Checks if we are executing inside an SVM nested hardware-virtualized guest.
1890 *
1891 * @returns @c true if in SVM nested-guest mode, @c false otherwise.
1892 * @param pCtx Current CPU context.
1893 */
1894DECLINLINE(bool) CPUMIsGuestInSvmNestedHwVirtMode(PCCPUMCTX pCtx)
1895{
1896 /*
1897 * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
1898 * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
1899 */
1900#ifndef IN_RC
1901 if ( pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM
1902 || !(pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
1903 return false;
1904 return true;
1905#else
1906 NOREF(pCtx);
1907 return false;
1908#endif
1909}
1910
1911/**
1912 * Checks if the guest is in VMX non-root operation.
1913 *
1914 * @returns @c true if in VMX non-root operation, @c false otherwise.
1915 * @param pCtx Current CPU context.
1916 */
1917DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx)
1918{
1919#ifndef IN_RC
1920 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1921 return false;
1922 Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
1923 return pCtx->hwvirt.vmx.fInVmxNonRootMode;
1924#else
1925 NOREF(pCtx);
1926 return false;
1927#endif
1928}
1929
1930/**
1931 * Checks if we are executing inside an SVM or VMX nested hardware-virtualized
1932 * guest.
1933 *
1934 * @returns @c true if in nested-guest mode, @c false otherwise.
1935 * @param pCtx Current CPU context.
1936 */
1937DECLINLINE(bool) CPUMIsGuestInNestedHwvirtMode(PCCPUMCTX pCtx)
1938{
1939 return CPUMIsGuestInVmxNonRootMode(pCtx) || CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
1940}
1941
1942/**
1943 * Checks if the guest is in VMX root operation.
1944 *
1945 * @returns @c true if in VMX root operation, @c false otherwise.
1946 * @param pCtx Current CPU context.
1947 */
1948DECLINLINE(bool) CPUMIsGuestInVmxRootMode(PCCPUMCTX pCtx)
1949{
1950#ifndef IN_RC
1951 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1952 return false;
1953 return pCtx->hwvirt.vmx.fInVmxRootMode;
1954#else
1955 NOREF(pCtx);
1956 return false;
1957#endif
1958}
1959
1960# ifndef IN_RC
1961
1962/**
1963 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
1964 * active.
1965 *
1966 * @returns @c true if in intercept is set, @c false otherwise.
1967 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1968 * @param pCtx Current CPU context.
1969 * @param fIntercept The SVM control/instruction intercept, see
1970 * SVM_CTRL_INTERCEPT_*.
1971 */
1972DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fIntercept)
1973{
1974 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1975 return false;
1976 uint64_t u64Intercepts;
1977 if (!HMGetGuestSvmCtrlIntercepts(pVCpu, &u64Intercepts))
1978 u64Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl;
1979 return RT_BOOL(u64Intercepts & fIntercept);
1980}
1981
1982/**
1983 * Checks if the nested-guest VMCB has the specified CR read intercept active.
1984 *
1985 * @returns @c true if in intercept is set, @c false otherwise.
1986 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1987 * @param pCtx Current CPU context.
1988 * @param uCr The CR register number (0 to 15).
1989 */
1990DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1991{
1992 Assert(uCr < 16);
1993 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1994 return false;
1995 uint16_t u16Intercepts;
1996 if (!HMGetGuestSvmReadCRxIntercepts(pVCpu, &u16Intercepts))
1997 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptRdCRx;
1998 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
1999}
2000
2001/**
2002 * Checks if the nested-guest VMCB has the specified CR write intercept active.
2003 *
2004 * @returns @c true if in intercept is set, @c false otherwise.
2005 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2006 * @param pCtx Current CPU context.
2007 * @param uCr The CR register number (0 to 15).
2008 */
2009DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
2010{
2011 Assert(uCr < 16);
2012 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2013 return false;
2014 uint16_t u16Intercepts;
2015 if (!HMGetGuestSvmWriteCRxIntercepts(pVCpu, &u16Intercepts))
2016 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptWrCRx;
2017 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
2018}
2019
2020/**
2021 * Checks if the nested-guest VMCB has the specified DR read intercept active.
2022 *
2023 * @returns @c true if in intercept is set, @c false otherwise.
2024 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2025 * @param pCtx Current CPU context.
2026 * @param uDr The DR register number (0 to 15).
2027 */
2028DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
2029{
2030 Assert(uDr < 16);
2031 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2032 return false;
2033 uint16_t u16Intercepts;
2034 if (!HMGetGuestSvmReadDRxIntercepts(pVCpu, &u16Intercepts))
2035 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptRdDRx;
2036 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
2037}
2038
2039/**
2040 * Checks if the nested-guest VMCB has the specified DR write intercept active.
2041 *
2042 * @returns @c true if in intercept is set, @c false otherwise.
2043 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2044 * @param pCtx Current CPU context.
2045 * @param uDr The DR register number (0 to 15).
2046 */
2047DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
2048{
2049 Assert(uDr < 16);
2050 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2051 return false;
2052 uint16_t u16Intercepts;
2053 if (!HMGetGuestSvmWriteDRxIntercepts(pVCpu, &u16Intercepts))
2054 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptWrDRx;
2055 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
2056}
2057
2058/**
2059 * Checks if the nested-guest VMCB has the specified exception intercept active.
2060 *
2061 * @returns @c true if in intercept is active, @c false otherwise.
2062 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2063 * @param pCtx Current CPU context.
2064 * @param uVector The exception / interrupt vector.
2065 */
2066DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
2067{
2068 Assert(uVector <= X86_XCPT_LAST);
2069 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2070 return false;
2071 uint32_t u32Intercepts;
2072 if (!HMGetGuestSvmXcptIntercepts(pVCpu, &u32Intercepts))
2073 u32Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u32InterceptXcpt;
2074 return RT_BOOL(u32Intercepts & RT_BIT(uVector));
2075}
2076
2077/**
2078 * Checks if the nested-guest VMCB has virtual-interrupt masking enabled.
2079 *
2080 * @returns @c true if virtual-interrupts are masked, @c false otherwise.
2081 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2082 * @param pCtx Current CPU context.
2083 *
2084 * @remarks Should only be called when SVM feature is exposed to the guest.
2085 */
2086DECLINLINE(bool) CPUMIsGuestSvmVirtIntrMasking(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2087{
2088 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2089 return false;
2090 bool fVIntrMasking;
2091 if (!HMGetGuestSvmVirtIntrMasking(pVCpu, &fVIntrMasking))
2092 fVIntrMasking = pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u1VIntrMasking;
2093 return fVIntrMasking;
2094}
2095
2096/**
2097 * Checks if the nested-guest VMCB has nested-paging enabled.
2098 *
2099 * @returns @c true if nested-paging is enabled, @c false otherwise.
2100 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2101 * @param pCtx Current CPU context.
2102 *
2103 * @remarks Should only be called when SVM feature is exposed to the guest.
2104 */
2105DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2106{
2107 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2108 return false;
2109 bool fNestedPaging;
2110 if (!HMGetGuestSvmNestedPaging(pVCpu, &fNestedPaging))
2111 fNestedPaging = pCtx->hwvirt.svm.Vmcb.ctrl.NestedPagingCtrl.n.u1NestedPaging;
2112 return fNestedPaging;
2113}
2114
2115/**
2116 * Gets the nested-guest VMCB pause-filter count.
2117 *
2118 * @returns The pause-filter count.
2119 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2120 * @param pCtx Current CPU context.
2121 *
2122 * @remarks Should only be called when SVM feature is exposed to the guest.
2123 */
2124DECLINLINE(uint16_t) CPUMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2125{
2126 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2127 return false;
2128 uint16_t u16PauseFilterCount;
2129 if (!HMGetGuestSvmPauseFilterCount(pVCpu, &u16PauseFilterCount))
2130 u16PauseFilterCount = pCtx->hwvirt.svm.Vmcb.ctrl.u16PauseFilterCount;
2131 return u16PauseFilterCount;
2132}
2133
2134/**
2135 * Updates the NextRIP (NRIP) field in the nested-guest VMCB.
2136 *
2137 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2138 * @param pCtx Current CPU context.
2139 * @param cbInstr The length of the current instruction in bytes.
2140 *
2141 * @remarks Should only be called when SVM feature is exposed to the guest.
2142 */
2143DECLINLINE(void) CPUMGuestSvmUpdateNRip(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr)
2144{
2145 RT_NOREF(pVCpu);
2146 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
2147 pCtx->hwvirt.svm.Vmcb.ctrl.u64NextRIP = pCtx->rip + cbInstr;
2148}
2149
2150/**
2151 * Checks whether one of the given Pin-based VM-execution controls are set when
2152 * executing a nested-guest.
2153 *
2154 * @returns @c true if set, @c false otherwise.
2155 * @param pCtx Current CPU context.
2156 * @param uPinCtls The Pin-based VM-execution controls to check.
2157 *
2158 * @remarks This does not check if all given controls are set if more than one
2159 * control is passed in @a uPinCtl.
2160 */
2161DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls)
2162{
2163 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2164 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & uPinCtls);
2165}
2166
2167/**
2168 * Checks whether one of the given Processor-based VM-execution controls are set
2169 * when executing a nested-guest.
2170 *
2171 * @returns @c true if set, @c false otherwise.
2172 * @param pCtx Current CPU context.
2173 * @param uProcCtls The Processor-based VM-execution controls to check.
2174 *
2175 * @remarks This does not check if all given controls are set if more than one
2176 * control is passed in @a uProcCtls.
2177 */
2178DECLINLINE(bool) CPUMIsGuestVmxProcCtlsSet(PCCPUMCTX pCtx, uint32_t uProcCtls)
2179{
2180 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2181 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls & uProcCtls);
2182}
2183
2184/**
2185 * Checks whether one of the given Secondary Processor-based VM-execution controls
2186 * are set when executing a nested-guest.
2187 *
2188 * @returns @c true if set, @c false otherwise.
2189 * @param pCtx Current CPU context.
2190 * @param uProcCtls2 The Secondary Processor-based VM-execution controls to
2191 * check.
2192 *
2193 * @remarks This does not check if all given controls are set if more than one
2194 * control is passed in @a uProcCtls2.
2195 */
2196DECLINLINE(bool) CPUMIsGuestVmxProcCtls2Set(PCCPUMCTX pCtx, uint32_t uProcCtls2)
2197{
2198 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2199 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls2 & uProcCtls2);
2200}
2201
2202/**
2203 * Checks whether one of the given Tertiary Processor-based VM-execution controls
2204 * are set when executing a nested-guest.
2205 *
2206 * @returns @c true if set, @c false otherwise.
2207 * @param pCtx Current CPU context.
2208 * @param uProcCtls3 The Tertiary Processor-based VM-execution controls to
2209 * check.
2210 *
2211 * @remarks This does not check if all given controls are set if more than one
2212 * control is passed in @a uProcCtls3.
2213 */
2214DECLINLINE(bool) CPUMIsGuestVmxProcCtls3Set(PCCPUMCTX pCtx, uint64_t uProcCtls3)
2215{
2216 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2217 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u64ProcCtls3.u & uProcCtls3);
2218}
2219
2220/**
2221 * Checks whether one of the given VM-exit controls are set when executing a
2222 * nested-guest.
2223 *
2224 * @returns @c true if set, @c false otherwise.
2225 * @param pCtx Current CPU context.
2226 * @param uExitCtls The VM-exit controls to check.
2227 *
2228 * @remarks This does not check if all given controls are set if more than one
2229 * control is passed in @a uExitCtls.
2230 */
2231DECLINLINE(bool) CPUMIsGuestVmxExitCtlsSet(PCCPUMCTX pCtx, uint32_t uExitCtls)
2232{
2233 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2234 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ExitCtls & uExitCtls);
2235}
2236
2237/**
2238 * Checks whether one of the given VM-entry controls are set when executing a
2239 * nested-guest.
2240 *
2241 * @returns @c true if set, @c false otherwise.
2242 * @param pCtx Current CPU context.
2243 * @param uEntryCtls The VM-entry controls to check.
2244 *
2245 * @remarks This does not check if all given controls are set if more than one
2246 * control is passed in @a uEntryCtls.
2247 */
2248DECLINLINE(bool) CPUMIsGuestVmxEntryCtlsSet(PCCPUMCTX pCtx, uint32_t uEntryCtls)
2249{
2250 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2251 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32EntryCtls & uEntryCtls);
2252}
2253
2254/**
2255 * Checks whether events injected in the nested-guest are subject to VM-exit checks.
2256 *
2257 * @returns @c true if set, @c false otherwise.
2258 * @param pCtx Current CPU context.
2259 */
2260DECLINLINE(bool) CPUMIsGuestVmxInterceptEvents(PCCPUMCTX pCtx)
2261{
2262 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2263 return pCtx->hwvirt.vmx.fInterceptEvents;
2264}
2265
2266/**
2267 * Sets whether events injected in the nested-guest are subject to VM-exit checks.
2268 *
2269 * @param pCtx Current CPU context.
2270 * @param fIntercept Whether to subject injected events to VM-exits or not.
2271 */
2272DECLINLINE(void) CPUMSetGuestVmxInterceptEvents(PCPUMCTX pCtx, bool fInterceptEvents)
2273{
2274 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2275 pCtx->hwvirt.vmx.fInterceptEvents = fInterceptEvents;
2276}
2277
2278/**
2279 * Checks whether the given exception causes a VM-exit.
2280 *
2281 * The exception type include hardware exceptions, software exceptions (#BP, #OF)
2282 * and privileged software exceptions (#DB generated by INT1/ICEBP).
2283 *
2284 * Software interrupts do -not- cause VM-exits and hence must not be used with this
2285 * function.
2286 *
2287 * @returns @c true if the exception causes a VM-exit, @c false otherwise.
2288 * @param pCtx Current CPU context.
2289 * @param uVector The exception vector.
2290 * @param uErrCode The error code associated with the exception. Pass 0 if not
2291 * applicable.
2292 */
2293DECLINLINE(bool) CPUMIsGuestVmxXcptInterceptSet(PCCPUMCTX pCtx, uint8_t uVector, uint32_t uErrCode)
2294{
2295 Assert(uVector <= X86_XCPT_LAST);
2296
2297 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2298
2299 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
2300 if (uVector == X86_XCPT_NMI)
2301 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
2302
2303 /* Page-faults are subject to masking using its error code. */
2304 uint32_t fXcptBitmap = pCtx->hwvirt.vmx.Vmcs.u32XcptBitmap;
2305 if (uVector == X86_XCPT_PF)
2306 {
2307 uint32_t const fXcptPFMask = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMask;
2308 uint32_t const fXcptPFMatch = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMatch;
2309 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
2310 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
2311 }
2312
2313 /* Consult the exception bitmap for all other exceptions. */
2314 if (fXcptBitmap & RT_BIT(uVector))
2315 return true;
2316 return false;
2317}
2318
2319
2320/**
2321 * Checks whether the guest is in VMX non-root mode and using EPT paging.
2322 *
2323 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
2324 * @param pCtx Current CPU context.
2325 */
2326DECLINLINE(bool) CPUMIsGuestVmxEptPagingEnabledEx(PCCPUMCTX pCtx)
2327{
2328 return CPUMIsGuestInVmxNonRootMode(pCtx)
2329 && CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_EPT);
2330}
2331
2332
2333/**
2334 * Implements VMSucceed for VMX instruction success.
2335 *
2336 * @param pCtx Current CPU context.
2337 */
2338DECLINLINE(void) CPUMSetGuestVmxVmSucceed(PCPUMCTX pCtx)
2339{
2340 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2341}
2342
2343/**
2344 * Implements VMFailInvalid for VMX instruction failure.
2345 *
2346 * @param pCtx Current CPU context.
2347 */
2348DECLINLINE(void) CPUMSetGuestVmxVmFailInvalid(PCPUMCTX pCtx)
2349{
2350 pCtx->eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2351 pCtx->eflags.u32 |= X86_EFL_CF;
2352}
2353
2354/**
2355 * Implements VMFailValid for VMX instruction failure.
2356 *
2357 * @param pCtx Current CPU context.
2358 * @param enmInsErr The VM instruction error.
2359 */
2360DECLINLINE(void) CPUMSetGuestVmxVmFailValid(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2361{
2362 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2363 pCtx->eflags.u32 |= X86_EFL_ZF;
2364 pCtx->hwvirt.vmx.Vmcs.u32RoVmInstrError = enmInsErr;
2365}
2366
2367/**
2368 * Implements VMFail for VMX instruction failure.
2369 *
2370 * @param pCtx Current CPU context.
2371 * @param enmInsErr The VM instruction error.
2372 */
2373DECLINLINE(void) CPUMSetGuestVmxVmFail(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2374{
2375 if (pCtx->hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
2376 CPUMSetGuestVmxVmFailValid(pCtx, enmInsErr);
2377 else
2378 CPUMSetGuestVmxVmFailInvalid(pCtx);
2379}
2380
2381/**
2382 * Returns the guest-physical address of the APIC-access page when executing a
2383 * nested-guest.
2384 *
2385 * @returns The APIC-access page guest-physical address.
2386 * @param pCtx Current CPU context.
2387 */
2388DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddrEx(PCCPUMCTX pCtx)
2389{
2390 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2391 return pCtx->hwvirt.vmx.Vmcs.u64AddrApicAccess.u;
2392}
2393
2394/**
2395 * Gets the nested-guest CR0 subject to the guest/host mask and the read-shadow.
2396 *
2397 * @returns The nested-guest CR0.
2398 * @param pCtx Current CPU context.
2399 * @param fGstHostMask The CR0 guest/host mask to use.
2400 */
2401DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr0(PCCPUMCTX pCtx, uint64_t fGstHostMask)
2402{
2403 /*
2404 * For each CR0 bit owned by the host, the corresponding bit from the
2405 * CR0 read shadow is loaded. For each CR0 bit that is not owned by the host,
2406 * the corresponding bit from the guest CR0 is loaded.
2407 *
2408 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2409 */
2410 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2411 uint64_t const uGstCr0 = pCtx->cr0;
2412 uint64_t const fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
2413 return (fReadShadow & fGstHostMask) | (uGstCr0 & ~fGstHostMask);
2414}
2415
2416/**
2417 * Gets the nested-guest CR4 subject to the guest/host mask and the read-shadow.
2418 *
2419 * @returns The nested-guest CR4.
2420 * @param pCtx Current CPU context.
2421 * @param fGstHostMask The CR4 guest/host mask to use.
2422 */
2423DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr4(PCCPUMCTX pCtx, uint64_t fGstHostMask)
2424{
2425 /*
2426 * For each CR4 bit owned by the host, the corresponding bit from the
2427 * CR4 read shadow is loaded. For each CR4 bit that is not owned by the host,
2428 * the corresponding bit from the guest CR4 is loaded.
2429 *
2430 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2431 */
2432 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2433 uint64_t const uGstCr4 = pCtx->cr4;
2434 uint64_t const fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u;
2435 return (fReadShadow & fGstHostMask) | (uGstCr4 & ~fGstHostMask);
2436}
2437
2438/**
2439 * Checks whether the LMSW access causes a VM-exit or not.
2440 *
2441 * @returns @c true if the LMSW access causes a VM-exit, @c false otherwise.
2442 * @param pCtx Current CPU context.
2443 * @param uNewMsw The LMSW source operand (the Machine Status Word).
2444 */
2445DECLINLINE(bool) CPUMIsGuestVmxLmswInterceptSet(PCCPUMCTX pCtx, uint16_t uNewMsw)
2446{
2447 /*
2448 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
2449 *
2450 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
2451 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2452 */
2453 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2454
2455 uint32_t const fGstHostMask = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u;
2456 uint32_t const fReadShadow = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
2457
2458 /*
2459 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
2460 * CR0.PE case first, before the rest of the bits in the MSW.
2461 *
2462 * If CR0.PE is owned by the host and CR0.PE differs between the
2463 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
2464 */
2465 if ( (fGstHostMask & X86_CR0_PE)
2466 && (uNewMsw & X86_CR0_PE)
2467 && !(fReadShadow & X86_CR0_PE))
2468 return true;
2469
2470 /*
2471 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
2472 * bits differ between the MSW (source operand) and the read-shadow, we must
2473 * cause a VM-exit.
2474 */
2475 uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2476 if ((fReadShadow & fGstHostLmswMask) != (uNewMsw & fGstHostLmswMask))
2477 return true;
2478
2479 return false;
2480}
2481
2482/**
2483 * Checks whether the Mov-to-CR0/CR4 access causes a VM-exit or not.
2484 *
2485 * @returns @c true if the Mov CRX access causes a VM-exit, @c false otherwise.
2486 * @param pCtx Current CPU context.
2487 * @param iCrReg The control register number (must be 0 or 4).
2488 * @param uNewCrX The CR0/CR4 value being written.
2489 */
2490DECLINLINE(bool) CPUMIsGuestVmxMovToCr0Cr4InterceptSet(PCCPUMCTX pCtx, uint8_t iCrReg, uint64_t uNewCrX)
2491{
2492 /*
2493 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
2494 * corresponding bits differ between the source operand and the read-shadow,
2495 * we must cause a VM-exit.
2496 *
2497 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2498 */
2499 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2500 Assert(iCrReg == 0 || iCrReg == 4);
2501
2502 uint64_t fGstHostMask;
2503 uint64_t fReadShadow;
2504 if (iCrReg == 0)
2505 {
2506 fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u;
2507 fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
2508 }
2509 else
2510 {
2511 fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr4Mask.u;
2512 fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u;
2513 }
2514
2515 if ((fReadShadow & fGstHostMask) != (uNewCrX & fGstHostMask))
2516 {
2517 Assert(fGstHostMask != 0);
2518 return true;
2519 }
2520
2521 return false;
2522}
2523
2524/**
2525 * Returns whether the guest has an active, current VMCS.
2526 *
2527 * @returns @c true if the guest has an active, current VMCS, @c false otherwise.
2528 * @param pCtx Current CPU context.
2529 */
2530DECLINLINE(bool) CPUMIsGuestVmxCurrentVmcsValid(PCCPUMCTX pCtx)
2531{
2532 RTGCPHYS const GCPhysVmcs = pCtx->hwvirt.vmx.GCPhysVmcs;
2533 return RT_BOOL(GCPhysVmcs != NIL_RTGCPHYS);
2534}
2535
2536# endif /* !IN_RC */
2537
2538/**
2539 * Checks whether the VMX nested-guest is in a state to receive physical (APIC)
2540 * interrupts.
2541 *
2542 * @returns @c true if it's ready, @c false otherwise.
2543 * @param pCtx The guest-CPU context.
2544 */
2545DECLINLINE(bool) CPUMIsGuestVmxPhysIntrEnabled(PCCPUMCTX pCtx)
2546{
2547#ifdef IN_RC
2548 AssertReleaseFailedReturn(false);
2549#else
2550 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2551 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
2552 return true;
2553 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2554#endif
2555}
2556
2557/**
2558 * Checks whether the VMX nested-guest is blocking virtual-NMIs.
2559 *
2560 * @returns @c true if it's blocked, @c false otherwise.
2561 * @param pCtx The guest-CPU context.
2562 */
2563DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx)
2564{
2565#ifdef IN_RC
2566 RT_NOREF(pCtx);
2567 AssertReleaseFailedReturn(false);
2568#else
2569 /*
2570 * Return the state of virtual-NMI blocking, if we are executing a
2571 * VMX nested-guest with virtual-NMIs enabled.
2572 */
2573 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2574 Assert(CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI));
2575 return pCtx->hwvirt.vmx.fVirtNmiBlocking;
2576#endif
2577}
2578
2579/**
2580 * Sets or clears VMX nested-guest virtual-NMI blocking.
2581 *
2582 * @param pCtx The guest-CPU context.
2583 * @param fBlocking Whether virtual-NMI blocking is in effect or not.
2584 */
2585DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking)
2586{
2587#ifdef IN_RC
2588 RT_NOREF2(pCtx, fBlocking);
2589 AssertReleaseFailedReturnVoid();
2590#else
2591 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2592 Assert(CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI));
2593 pCtx->hwvirt.vmx.fVirtNmiBlocking = fBlocking;
2594#endif
2595}
2596
2597/**
2598 * Checks whether the VMX nested-guest is in a state to receive virtual interrupts
2599 * (those injected with the "virtual-interrupt delivery" feature).
2600 *
2601 * @returns @c true if it's ready, @c false otherwise.
2602 * @param pCtx The guest-CPU context.
2603 */
2604DECLINLINE(bool) CPUMIsGuestVmxVirtIntrEnabled(PCCPUMCTX pCtx)
2605{
2606#ifdef IN_RC
2607 RT_NOREF2(pCtx);
2608 AssertReleaseFailedReturn(false);
2609#else
2610 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2611 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2612#endif
2613}
2614
2615/** @} */
2616#endif /* !IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS || DOXYGEN_RUNNING */
2617
2618
2619
2620/** @name Hypervisor Register Getters.
2621 * @{ */
2622VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu);
2623VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu);
2624VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu);
2625VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu);
2626VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu);
2627VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu);
2628VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
2629/** @} */
2630
2631/** @name Hypervisor Register Setters.
2632 * @{ */
2633VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3);
2634VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0);
2635VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1);
2636VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2);
2637VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3);
2638VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6);
2639VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7);
2640VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg);
2641/** @} */
2642
2643VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
2644#ifdef VBOX_INCLUDED_vmm_cpumctx_h
2645VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu);
2646#endif
2647VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu);
2648
2649/** @name Changed flags.
2650 * These flags are used to keep track of which important register that
2651 * have been changed since last they were reset. The only one allowed
2652 * to clear them is REM!
2653 *
2654 * @todo This is obsolete, but remains as it will be refactored for coordinating
2655 * IEM and NEM/HM later. Probably.
2656 * @{
2657 */
2658#define CPUM_CHANGED_FPU_REM RT_BIT(0)
2659#define CPUM_CHANGED_CR0 RT_BIT(1)
2660#define CPUM_CHANGED_CR4 RT_BIT(2)
2661#define CPUM_CHANGED_GLOBAL_TLB_FLUSH RT_BIT(3)
2662#define CPUM_CHANGED_CR3 RT_BIT(4)
2663#define CPUM_CHANGED_GDTR RT_BIT(5)
2664#define CPUM_CHANGED_IDTR RT_BIT(6)
2665#define CPUM_CHANGED_LDTR RT_BIT(7)
2666#define CPUM_CHANGED_TR RT_BIT(8) /**@< Currently unused. */
2667#define CPUM_CHANGED_SYSENTER_MSR RT_BIT(9)
2668#define CPUM_CHANGED_HIDDEN_SEL_REGS RT_BIT(10) /**@< Currently unused. */
2669#define CPUM_CHANGED_CPUID RT_BIT(11)
2670#define CPUM_CHANGED_ALL ( CPUM_CHANGED_FPU_REM \
2671 | CPUM_CHANGED_CR0 \
2672 | CPUM_CHANGED_CR4 \
2673 | CPUM_CHANGED_GLOBAL_TLB_FLUSH \
2674 | CPUM_CHANGED_CR3 \
2675 | CPUM_CHANGED_GDTR \
2676 | CPUM_CHANGED_IDTR \
2677 | CPUM_CHANGED_LDTR \
2678 | CPUM_CHANGED_TR \
2679 | CPUM_CHANGED_SYSENTER_MSR \
2680 | CPUM_CHANGED_HIDDEN_SEL_REGS \
2681 | CPUM_CHANGED_CPUID )
2682/** @} */
2683
2684VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);
2685VMMDECL(bool) CPUMSupportsXSave(PVM pVM);
2686VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM);
2687VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM);
2688VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu);
2689VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu);
2690VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu);
2691VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu);
2692VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu);
2693VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu);
2694VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu);
2695VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu);
2696VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu);
2697VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu);
2698VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM);
2699VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM);
2700VMMDECL(uint64_t) CPUMGetGuestEferMsrValidMask(PVM pVM);
2701VMMDECL(int) CPUMIsGuestEferMsrWriteValid(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
2702 uint64_t *puValidEfer);
2703VMMDECL(void) CPUMSetGuestEferMsrNoChecks(PVMCPUCC pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
2704VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue);
2705
2706
2707/** Guest CPU interruptibility level, see CPUMGetGuestInterruptibility(). */
2708typedef enum CPUMINTERRUPTIBILITY
2709{
2710 CPUMINTERRUPTIBILITY_INVALID = 0,
2711 CPUMINTERRUPTIBILITY_UNRESTRAINED,
2712 CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED,
2713 CPUMINTERRUPTIBILITY_INT_DISABLED,
2714 CPUMINTERRUPTIBILITY_INT_INHIBITED,
2715 CPUMINTERRUPTIBILITY_NMI_INHIBIT,
2716 CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT,
2717 CPUMINTERRUPTIBILITY_END,
2718 CPUMINTERRUPTIBILITY_32BIT_HACK = 0x7fffffff
2719} CPUMINTERRUPTIBILITY;
2720
2721VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu);
2722VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu);
2723VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock);
2724
2725/** @name Typical scalable bus frequency values.
2726 * @{ */
2727/** Special internal value indicating that we don't know the frequency.
2728 * @internal */
2729#define CPUM_SBUSFREQ_UNKNOWN UINT64_C(1)
2730#define CPUM_SBUSFREQ_100MHZ UINT64_C(100000000)
2731#define CPUM_SBUSFREQ_133MHZ UINT64_C(133333333)
2732#define CPUM_SBUSFREQ_167MHZ UINT64_C(166666666)
2733#define CPUM_SBUSFREQ_200MHZ UINT64_C(200000000)
2734#define CPUM_SBUSFREQ_267MHZ UINT64_C(266666666)
2735#define CPUM_SBUSFREQ_333MHZ UINT64_C(333333333)
2736#define CPUM_SBUSFREQ_400MHZ UINT64_C(400000000)
2737/** @} */
2738
2739
2740#ifdef IN_RING3
2741/** @defgroup grp_cpum_r3 The CPUM ring-3 API
2742 * @{
2743 */
2744
2745VMMR3DECL(int) CPUMR3Init(PVM pVM);
2746VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
2747VMMR3DECL(void) CPUMR3LogCpuIdAndMsrFeatures(PVM pVM);
2748VMMR3DECL(void) CPUMR3Relocate(PVM pVM);
2749VMMR3DECL(int) CPUMR3Term(PVM pVM);
2750VMMR3DECL(void) CPUMR3Reset(PVM pVM);
2751VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
2752VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM);
2753VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
2754
2755VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf);
2756VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf);
2757VMMR3_INT_DECL(PCCPUMCPUIDLEAF) CPUMR3CpuIdGetPtr(PVM pVM, uint32_t *pcLeaves);
2758VMMDECL(CPUMMICROARCH) CPUMCpuIdDetermineX86MicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
2759 uint8_t bModel, uint8_t bStepping);
2760VMMDECL(const char *) CPUMMicroarchName(CPUMMICROARCH enmMicroarch);
2761VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
2762VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
2763VMMR3DECL(const char *) CPUMCpuVendorName(CPUMCPUVENDOR enmVendor);
2764#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
2765VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void);
2766#endif
2767
2768VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange);
2769
2770VMMR3DECL(uint32_t) CPUMR3DbGetEntries(void);
2771/** Pointer to CPUMR3DbGetEntries. */
2772typedef DECLCALLBACKPTR(uint32_t, PFNCPUMDBGETENTRIES, (void));
2773VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByIndex(uint32_t idxCpuDb);
2774/** Pointer to CPUMR3DbGetEntryByIndex. */
2775typedef DECLCALLBACKPTR(PCCPUMDBENTRY, PFNCPUMDBGETENTRYBYINDEX, (uint32_t idxCpuDb));
2776VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByName(const char *pszName);
2777/** Pointer to CPUMR3DbGetEntryByName. */
2778typedef DECLCALLBACKPTR(PCCPUMDBENTRY, PFNCPUMDBGETENTRYBYNAME, (const char *pszName));
2779
2780VMMR3_INT_DECL(void) CPUMR3NemActivateGuestDebugState(PVMCPUCC pVCpu);
2781VMMR3_INT_DECL(void) CPUMR3NemActivateHyperDebugState(PVMCPUCC pVCpu);
2782/** @} */
2783#endif /* IN_RING3 */
2784
2785#ifdef IN_RING0
2786/** @defgroup grp_cpum_r0 The CPUM ring-0 API
2787 * @{
2788 */
2789VMMR0_INT_DECL(int) CPUMR0ModuleInit(void);
2790VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void);
2791VMMR0_INT_DECL(void) CPUMR0InitPerVMData(PGVM pGVM);
2792VMMR0_INT_DECL(int) CPUMR0InitVM(PVMCC pVM);
2793DECLASM(void) CPUMR0RegisterVCpuThread(PVMCPUCC pVCpu);
2794DECLASM(void) CPUMR0TouchHostFpu(void);
2795VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVMCC pVM, PVMCPUCC pVCpu);
2796VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVMCC pVM, PVMCPUCC pVCpu);
2797VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu);
2798VMMR0_INT_DECL(int) CPUMR0SaveHostDebugState(PVMCC pVM, PVMCPUCC pVCpu);
2799VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu, bool fDr6);
2800VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPUCC pVCpu, bool fDr6);
2801
2802VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPUCC pVCpu, bool fDr6);
2803VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPUCC pVCpu, bool fDr6);
2804/** @} */
2805#endif /* IN_RING0 */
2806
2807/** @defgroup grp_cpum_rz The CPUM raw-mode and ring-0 context API
2808 * @{
2809 */
2810VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPUCC pVCpu);
2811VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForRead(PVMCPUCC pVCpu);
2812VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForChange(PVMCPUCC pVCpu);
2813VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeSseForRead(PVMCPUCC pVCpu);
2814VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPUCC pVCpu);
2815/** @} */
2816
2817
2818#endif /* !VBOX_FOR_DTRACE_LIB */
2819/** @} */
2820RT_C_DECLS_END
2821
2822
2823#endif /* !VBOX_INCLUDED_vmm_cpum_h */
2824
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette