VirtualBox

source: vbox/trunk/include/VBox/vmm/cpum.h@ 91303

Last change on this file since 91303 was 91303, checked in by vboxsync, 3 years ago

VMM/CPUM,++: Expand CPUMMSRRANGE::offCpumCpu to 24 bits so the entire CPUMCPU can be addressed now that it's grown above 64KB. [comment fix] bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 106.2 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager).
3 */
4
5/*
6 * Copyright (C) 2006-2020 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_cpum_h
27#define VBOX_INCLUDED_vmm_cpum_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <iprt/x86.h>
33#include <VBox/types.h>
34#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_ARM64)
35# include <VBox/vmm/cpumctx.h>
36#endif
37#include <VBox/vmm/stam.h>
38#include <VBox/vmm/vmapi.h>
39#include <VBox/vmm/hm_svm.h>
40#include <VBox/vmm/hm_vmx.h>
41
42RT_C_DECLS_BEGIN
43
44/** @defgroup grp_cpum The CPU Monitor / Manager API
45 * @ingroup grp_vmm
46 * @{
47 */
48
49/**
50 * CPUID feature to set or clear.
51 */
52typedef enum CPUMCPUIDFEATURE
53{
54 CPUMCPUIDFEATURE_INVALID = 0,
55 /** The APIC feature bit. (Std+Ext)
56 * Note! There is a per-cpu flag for masking this CPUID feature bit when the
57 * APICBASE.ENABLED bit is zero. So, this feature is only set/cleared
58 * at VM construction time like all the others. This didn't used to be
59 * that way, this is new with 5.1. */
60 CPUMCPUIDFEATURE_APIC,
61 /** The sysenter/sysexit feature bit. (Std) */
62 CPUMCPUIDFEATURE_SEP,
63 /** The SYSCALL/SYSEXIT feature bit (64 bits mode only for Intel CPUs). (Ext) */
64 CPUMCPUIDFEATURE_SYSCALL,
65 /** The PAE feature bit. (Std+Ext) */
66 CPUMCPUIDFEATURE_PAE,
67 /** The NX feature bit. (Ext) */
68 CPUMCPUIDFEATURE_NX,
69 /** The LAHF/SAHF feature bit (64 bits mode only). (Ext) */
70 CPUMCPUIDFEATURE_LAHF,
71 /** The LONG MODE feature bit. (Ext) */
72 CPUMCPUIDFEATURE_LONG_MODE,
73 /** The x2APIC feature bit. (Std) */
74 CPUMCPUIDFEATURE_X2APIC,
75 /** The RDTSCP feature bit. (Ext) */
76 CPUMCPUIDFEATURE_RDTSCP,
77 /** The Hypervisor Present bit. (Std) */
78 CPUMCPUIDFEATURE_HVP,
79 /** The speculation control feature bits. (StExt) */
80 CPUMCPUIDFEATURE_SPEC_CTRL,
81 /** 32bit hackishness. */
82 CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
83} CPUMCPUIDFEATURE;
84
85/**
86 * CPU Vendor.
87 */
88typedef enum CPUMCPUVENDOR
89{
90 CPUMCPUVENDOR_INVALID = 0,
91 CPUMCPUVENDOR_INTEL,
92 CPUMCPUVENDOR_AMD,
93 CPUMCPUVENDOR_VIA,
94 CPUMCPUVENDOR_CYRIX,
95 CPUMCPUVENDOR_SHANGHAI,
96 CPUMCPUVENDOR_HYGON,
97 CPUMCPUVENDOR_UNKNOWN,
98 /** 32bit hackishness. */
99 CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
100} CPUMCPUVENDOR;
101
102
103/**
104 * X86 and AMD64 CPU microarchitectures and in processor generations.
105 *
106 * @remarks The separation here is sometimes a little bit too finely grained,
107 * and the differences is more like processor generation than micro
108 * arch. This can be useful, so we'll provide functions for getting at
109 * more coarse grained info.
110 */
111typedef enum CPUMMICROARCH
112{
113 kCpumMicroarch_Invalid = 0,
114
115 kCpumMicroarch_Intel_First,
116
117 kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
118 kCpumMicroarch_Intel_80186,
119 kCpumMicroarch_Intel_80286,
120 kCpumMicroarch_Intel_80386,
121 kCpumMicroarch_Intel_80486,
122 kCpumMicroarch_Intel_P5,
123
124 kCpumMicroarch_Intel_P6_Core_Atom_First,
125 kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
126 kCpumMicroarch_Intel_P6_II,
127 kCpumMicroarch_Intel_P6_III,
128
129 kCpumMicroarch_Intel_P6_M_Banias,
130 kCpumMicroarch_Intel_P6_M_Dothan,
131 kCpumMicroarch_Intel_Core_Yonah, /**< Core, also known as Enhanced Pentium M. */
132
133 kCpumMicroarch_Intel_Core2_First,
134 kCpumMicroarch_Intel_Core2_Merom = kCpumMicroarch_Intel_Core2_First, /**< 65nm, Merom/Conroe/Kentsfield/Tigerton */
135 kCpumMicroarch_Intel_Core2_Penryn, /**< 45nm, Penryn/Wolfdale/Yorkfield/Harpertown */
136 kCpumMicroarch_Intel_Core2_End,
137
138 kCpumMicroarch_Intel_Core7_First,
139 kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
140 kCpumMicroarch_Intel_Core7_Westmere,
141 kCpumMicroarch_Intel_Core7_SandyBridge,
142 kCpumMicroarch_Intel_Core7_IvyBridge,
143 kCpumMicroarch_Intel_Core7_Haswell,
144 kCpumMicroarch_Intel_Core7_Broadwell,
145 kCpumMicroarch_Intel_Core7_Skylake,
146 kCpumMicroarch_Intel_Core7_KabyLake,
147 kCpumMicroarch_Intel_Core7_CoffeeLake,
148 kCpumMicroarch_Intel_Core7_WhiskeyLake,
149 kCpumMicroarch_Intel_Core7_CascadeLake,
150 kCpumMicroarch_Intel_Core7_CannonLake, /**< Limited 10nm. */
151 kCpumMicroarch_Intel_Core7_CometLake, /**< 10th gen, 14nm desktop + high power mobile. */
152 kCpumMicroarch_Intel_Core7_IceLake, /**< 10th gen, 10nm mobile and some Xeons. Actually 'Sunny Cove' march. */
153 kCpumMicroarch_Intel_Core7_SunnyCove = kCpumMicroarch_Intel_Core7_IceLake,
154 kCpumMicroarch_Intel_Core7_RocketLake, /**< 11th gen, 14nm desktop + high power mobile. Aka 'Cypress Cove', backport of 'Willow Cove' to 14nm. */
155 kCpumMicroarch_Intel_Core7_CypressCove = kCpumMicroarch_Intel_Core7_RocketLake,
156 kCpumMicroarch_Intel_Core7_TigerLake, /**< 11th gen, 10nm mobile. Actually 'Willow Cove' march. */
157 kCpumMicroarch_Intel_Core7_WillowCove = kCpumMicroarch_Intel_Core7_TigerLake,
158 kCpumMicroarch_Intel_Core7_AlderLake, /**< 12th gen, 10nm all platforms(?). */
159 kCpumMicroarch_Intel_Core7_SapphireRapids, /**< 12th? gen, 10nm server? */
160 kCpumMicroarch_Intel_Core7_End,
161
162 kCpumMicroarch_Intel_Atom_First,
163 kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
164 kCpumMicroarch_Intel_Atom_Lincroft, /**< Second generation bonnell (44nm). */
165 kCpumMicroarch_Intel_Atom_Saltwell, /**< 32nm shrink of Bonnell. */
166 kCpumMicroarch_Intel_Atom_Silvermont, /**< 22nm */
167 kCpumMicroarch_Intel_Atom_Airmount, /**< 14nm */
168 kCpumMicroarch_Intel_Atom_Goldmont, /**< 14nm */
169 kCpumMicroarch_Intel_Atom_GoldmontPlus, /**< 14nm */
170 kCpumMicroarch_Intel_Atom_Unknown,
171 kCpumMicroarch_Intel_Atom_End,
172
173
174 kCpumMicroarch_Intel_Phi_First,
175 kCpumMicroarch_Intel_Phi_KnightsFerry = kCpumMicroarch_Intel_Phi_First,
176 kCpumMicroarch_Intel_Phi_KnightsCorner,
177 kCpumMicroarch_Intel_Phi_KnightsLanding,
178 kCpumMicroarch_Intel_Phi_KnightsHill,
179 kCpumMicroarch_Intel_Phi_KnightsMill,
180 kCpumMicroarch_Intel_Phi_End,
181
182 kCpumMicroarch_Intel_P6_Core_Atom_End,
183
184 kCpumMicroarch_Intel_NB_First,
185 kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
186 kCpumMicroarch_Intel_NB_Northwood, /**< 130nm */
187 kCpumMicroarch_Intel_NB_Prescott, /**< 90nm */
188 kCpumMicroarch_Intel_NB_Prescott2M, /**< 90nm */
189 kCpumMicroarch_Intel_NB_CedarMill, /**< 65nm */
190 kCpumMicroarch_Intel_NB_Gallatin, /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
191 kCpumMicroarch_Intel_NB_Unknown,
192 kCpumMicroarch_Intel_NB_End,
193
194 kCpumMicroarch_Intel_Unknown,
195 kCpumMicroarch_Intel_End,
196
197 kCpumMicroarch_AMD_First,
198 kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
199 kCpumMicroarch_AMD_Am386,
200 kCpumMicroarch_AMD_Am486,
201 kCpumMicroarch_AMD_Am486Enh, /**< Covers Am5x86 as well. */
202 kCpumMicroarch_AMD_K5,
203 kCpumMicroarch_AMD_K6,
204
205 kCpumMicroarch_AMD_K7_First,
206 kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
207 kCpumMicroarch_AMD_K7_Spitfire,
208 kCpumMicroarch_AMD_K7_Thunderbird,
209 kCpumMicroarch_AMD_K7_Morgan,
210 kCpumMicroarch_AMD_K7_Thoroughbred,
211 kCpumMicroarch_AMD_K7_Barton,
212 kCpumMicroarch_AMD_K7_Unknown,
213 kCpumMicroarch_AMD_K7_End,
214
215 kCpumMicroarch_AMD_K8_First,
216 kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
217 kCpumMicroarch_AMD_K8_90nm, /**< 90nm shrink */
218 kCpumMicroarch_AMD_K8_90nm_DualCore, /**< 90nm with two cores. */
219 kCpumMicroarch_AMD_K8_90nm_AMDV, /**< 90nm with AMD-V (usually) and two cores (usually). */
220 kCpumMicroarch_AMD_K8_65nm, /**< 65nm shrink. */
221 kCpumMicroarch_AMD_K8_End,
222
223 kCpumMicroarch_AMD_K10,
224 kCpumMicroarch_AMD_K10_Lion,
225 kCpumMicroarch_AMD_K10_Llano,
226 kCpumMicroarch_AMD_Bobcat,
227 kCpumMicroarch_AMD_Jaguar,
228
229 kCpumMicroarch_AMD_15h_First,
230 kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
231 kCpumMicroarch_AMD_15h_Piledriver,
232 kCpumMicroarch_AMD_15h_Steamroller, /**< Yet to be released, might have different family. */
233 kCpumMicroarch_AMD_15h_Excavator, /**< Yet to be released, might have different family. */
234 kCpumMicroarch_AMD_15h_Unknown,
235 kCpumMicroarch_AMD_15h_End,
236
237 kCpumMicroarch_AMD_16h_First,
238 kCpumMicroarch_AMD_16h_End,
239
240 kCpumMicroarch_AMD_Zen_First,
241 kCpumMicroarch_AMD_Zen_Ryzen = kCpumMicroarch_AMD_Zen_First,
242 kCpumMicroarch_AMD_Zen_End,
243
244 kCpumMicroarch_AMD_Unknown,
245 kCpumMicroarch_AMD_End,
246
247 kCpumMicroarch_Hygon_First,
248 kCpumMicroarch_Hygon_Dhyana = kCpumMicroarch_Hygon_First,
249 kCpumMicroarch_Hygon_Unknown,
250 kCpumMicroarch_Hygon_End,
251
252 kCpumMicroarch_VIA_First,
253 kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
254 kCpumMicroarch_Centaur_C2,
255 kCpumMicroarch_Centaur_C3,
256 kCpumMicroarch_VIA_C3_M2,
257 kCpumMicroarch_VIA_C3_C5A, /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
258 kCpumMicroarch_VIA_C3_C5B, /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
259 kCpumMicroarch_VIA_C3_C5C, /**< 130nm Ezra - C3, Eden ESP. */
260 kCpumMicroarch_VIA_C3_C5N, /**< 130nm Ezra-T - C3. */
261 kCpumMicroarch_VIA_C3_C5XL, /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
262 kCpumMicroarch_VIA_C3_C5P, /**< 130nm Nehemiah+ - C3. */
263 kCpumMicroarch_VIA_C7_C5J, /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
264 kCpumMicroarch_VIA_Isaiah,
265 kCpumMicroarch_VIA_Unknown,
266 kCpumMicroarch_VIA_End,
267
268 kCpumMicroarch_Shanghai_First,
269 kCpumMicroarch_Shanghai_Wudaokou = kCpumMicroarch_Shanghai_First,
270 kCpumMicroarch_Shanghai_Unknown,
271 kCpumMicroarch_Shanghai_End,
272
273 kCpumMicroarch_Cyrix_First,
274 kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
275 kCpumMicroarch_Cyrix_M1,
276 kCpumMicroarch_Cyrix_MediaGX,
277 kCpumMicroarch_Cyrix_MediaGXm,
278 kCpumMicroarch_Cyrix_M2,
279 kCpumMicroarch_Cyrix_Unknown,
280 kCpumMicroarch_Cyrix_End,
281
282 kCpumMicroarch_NEC_First,
283 kCpumMicroarch_NEC_V20 = kCpumMicroarch_NEC_First,
284 kCpumMicroarch_NEC_V30,
285 kCpumMicroarch_NEC_End,
286
287 kCpumMicroarch_Unknown,
288
289 kCpumMicroarch_32BitHack = 0x7fffffff
290} CPUMMICROARCH;
291
292
293/** Predicate macro for catching netburst CPUs. */
294#define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
295 ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
296
297/** Predicate macro for catching Core7 CPUs. */
298#define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
299 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
300
301/** Predicate macro for catching Core 2 CPUs. */
302#define CPUMMICROARCH_IS_INTEL_CORE2(a_enmMicroarch) \
303 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core2_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core2_End)
304
305/** Predicate macro for catching Atom CPUs, Silvermont and upwards. */
306#define CPUMMICROARCH_IS_INTEL_SILVERMONT_PLUS(a_enmMicroarch) \
307 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Atom_Silvermont && (a_enmMicroarch) <= kCpumMicroarch_Intel_Atom_End)
308
309/** Predicate macro for catching AMD Family OFh CPUs (aka K8). */
310#define CPUMMICROARCH_IS_AMD_FAM_0FH(a_enmMicroarch) \
311 ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
312
313/** Predicate macro for catching AMD Family 10H CPUs (aka K10). */
314#define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
315
316/** Predicate macro for catching AMD Family 11H CPUs (aka Lion). */
317#define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
318
319/** Predicate macro for catching AMD Family 12H CPUs (aka Llano). */
320#define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
321
322/** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat). */
323#define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
324
325/** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
326 * decendants). */
327#define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
328 ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
329
330/** Predicate macro for catching AMD Family 16H CPUs. */
331#define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
332 ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
333
334/** Predicate macro for catching AMD Zen Family CPUs. */
335#define CPUMMICROARCH_IS_AMD_FAM_ZEN(a_enmMicroarch) \
336 ((a_enmMicroarch) >= kCpumMicroarch_AMD_Zen_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_Zen_End)
337
338
339/**
340 * CPUID leaf.
341 *
342 * @remarks This structure is used by the patch manager and is therefore
343 * more or less set in stone.
344 */
345typedef struct CPUMCPUIDLEAF
346{
347 /** The leaf number. */
348 uint32_t uLeaf;
349 /** The sub-leaf number. */
350 uint32_t uSubLeaf;
351 /** Sub-leaf mask. This is 0 when sub-leaves aren't used. */
352 uint32_t fSubLeafMask;
353
354 /** The EAX value. */
355 uint32_t uEax;
356 /** The EBX value. */
357 uint32_t uEbx;
358 /** The ECX value. */
359 uint32_t uEcx;
360 /** The EDX value. */
361 uint32_t uEdx;
362
363 /** Flags. */
364 uint32_t fFlags;
365} CPUMCPUIDLEAF;
366#ifndef VBOX_FOR_DTRACE_LIB
367AssertCompileSize(CPUMCPUIDLEAF, 32);
368#endif
369/** Pointer to a CPUID leaf. */
370typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
371/** Pointer to a const CPUID leaf. */
372typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
373
374/** @name CPUMCPUIDLEAF::fFlags
375 * @{ */
376/** Indicates working intel leaf 0xb where the lower 8 ECX bits are not modified
377 * and EDX containing the extended APIC ID. */
378#define CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES RT_BIT_32(0)
379/** The leaf contains an APIC ID that needs changing to that of the current CPU. */
380#define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID RT_BIT_32(1)
381/** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
382#define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE RT_BIT_32(2)
383/** The leaf contains an APIC feature bit which is tied to APICBASE.EN. */
384#define CPUMCPUIDLEAF_F_CONTAINS_APIC RT_BIT_32(3)
385/** Mask of the valid flags. */
386#define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0xf)
387/** @} */
388
389/**
390 * Method used to deal with unknown CPUID leaves.
391 * @remarks Used in patch code.
392 */
393typedef enum CPUMUNKNOWNCPUID
394{
395 /** Invalid zero value. */
396 CPUMUNKNOWNCPUID_INVALID = 0,
397 /** Use given default values (DefCpuId). */
398 CPUMUNKNOWNCPUID_DEFAULTS,
399 /** Return the last standard leaf.
400 * Intel Sandy Bridge has been observed doing this. */
401 CPUMUNKNOWNCPUID_LAST_STD_LEAF,
402 /** Return the last standard leaf, with ecx observed.
403 * Intel Sandy Bridge has been observed doing this. */
404 CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
405 /** The register values are passed thru unmodified. */
406 CPUMUNKNOWNCPUID_PASSTHRU,
407 /** End of valid value. */
408 CPUMUNKNOWNCPUID_END,
409 /** Ensure 32-bit type. */
410 CPUMUNKNOWNCPUID_32BIT_HACK = 0x7fffffff
411} CPUMUNKNOWNCPUID;
412/** Pointer to unknown CPUID leaf method. */
413typedef CPUMUNKNOWNCPUID *PCPUMUNKNOWNCPUID;
414
415
416/**
417 * The register set returned by a CPUID operation.
418 */
419typedef struct CPUMCPUID
420{
421 uint32_t uEax;
422 uint32_t uEbx;
423 uint32_t uEcx;
424 uint32_t uEdx;
425} CPUMCPUID;
426/** Pointer to a CPUID leaf. */
427typedef CPUMCPUID *PCPUMCPUID;
428/** Pointer to a const CPUID leaf. */
429typedef const CPUMCPUID *PCCPUMCPUID;
430
431
432/**
433 * MSR read functions.
434 */
435typedef enum CPUMMSRRDFN
436{
437 /** Invalid zero value. */
438 kCpumMsrRdFn_Invalid = 0,
439 /** Return the CPUMMSRRANGE::uValue. */
440 kCpumMsrRdFn_FixedValue,
441 /** Alias to the MSR range starting at the MSR given by
442 * CPUMMSRRANGE::uValue. Must be used in pair with
443 * kCpumMsrWrFn_MsrAlias. */
444 kCpumMsrRdFn_MsrAlias,
445 /** Write only register, GP all read attempts. */
446 kCpumMsrRdFn_WriteOnly,
447
448 kCpumMsrRdFn_Ia32P5McAddr,
449 kCpumMsrRdFn_Ia32P5McType,
450 kCpumMsrRdFn_Ia32TimestampCounter,
451 kCpumMsrRdFn_Ia32PlatformId, /**< Takes real CPU value for reference. */
452 kCpumMsrRdFn_Ia32ApicBase,
453 kCpumMsrRdFn_Ia32FeatureControl,
454 kCpumMsrRdFn_Ia32BiosSignId, /**< Range value returned. */
455 kCpumMsrRdFn_Ia32SmmMonitorCtl,
456 kCpumMsrRdFn_Ia32PmcN,
457 kCpumMsrRdFn_Ia32MonitorFilterLineSize,
458 kCpumMsrRdFn_Ia32MPerf,
459 kCpumMsrRdFn_Ia32APerf,
460 kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */
461 kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
462 kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
463 kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
464 kCpumMsrRdFn_Ia32MtrrDefType,
465 kCpumMsrRdFn_Ia32Pat,
466 kCpumMsrRdFn_Ia32SysEnterCs,
467 kCpumMsrRdFn_Ia32SysEnterEsp,
468 kCpumMsrRdFn_Ia32SysEnterEip,
469 kCpumMsrRdFn_Ia32McgCap,
470 kCpumMsrRdFn_Ia32McgStatus,
471 kCpumMsrRdFn_Ia32McgCtl,
472 kCpumMsrRdFn_Ia32DebugCtl,
473 kCpumMsrRdFn_Ia32SmrrPhysBase,
474 kCpumMsrRdFn_Ia32SmrrPhysMask,
475 kCpumMsrRdFn_Ia32PlatformDcaCap,
476 kCpumMsrRdFn_Ia32CpuDcaCap,
477 kCpumMsrRdFn_Ia32Dca0Cap,
478 kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
479 kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */
480 kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */
481 kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
482 kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */
483 kCpumMsrRdFn_Ia32FixedCtrCtrl,
484 kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */
485 kCpumMsrRdFn_Ia32PerfGlobalCtrl,
486 kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
487 kCpumMsrRdFn_Ia32PebsEnable,
488 kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */
489 kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */
490 kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */
491 kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */
492 kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */
493 kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
494 kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */
495 kCpumMsrRdFn_Ia32DsArea,
496 kCpumMsrRdFn_Ia32TscDeadline,
497 kCpumMsrRdFn_Ia32X2ApicN,
498 kCpumMsrRdFn_Ia32DebugInterface,
499 kCpumMsrRdFn_Ia32VmxBasic, /**< Takes real value as reference. */
500 kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */
501 kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */
502 kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */
503 kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */
504 kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */
505 kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */
506 kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */
507 kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */
508 kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */
509 kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */
510 kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */
511 kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */
512 kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */
513 kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */
514 kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */
515 kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */
516 kCpumMsrRdFn_Ia32VmxVmFunc, /**< Takes real value as reference. */
517 kCpumMsrRdFn_Ia32SpecCtrl,
518 kCpumMsrRdFn_Ia32ArchCapabilities,
519
520 kCpumMsrRdFn_Amd64Efer,
521 kCpumMsrRdFn_Amd64SyscallTarget,
522 kCpumMsrRdFn_Amd64LongSyscallTarget,
523 kCpumMsrRdFn_Amd64CompSyscallTarget,
524 kCpumMsrRdFn_Amd64SyscallFlagMask,
525 kCpumMsrRdFn_Amd64FsBase,
526 kCpumMsrRdFn_Amd64GsBase,
527 kCpumMsrRdFn_Amd64KernelGsBase,
528 kCpumMsrRdFn_Amd64TscAux,
529
530 kCpumMsrRdFn_IntelEblCrPowerOn,
531 kCpumMsrRdFn_IntelI7CoreThreadCount,
532 kCpumMsrRdFn_IntelP4EbcHardPowerOn,
533 kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
534 kCpumMsrRdFn_IntelP4EbcFrequencyId,
535 kCpumMsrRdFn_IntelP6FsbFrequency, /**< Takes real value as reference. */
536 kCpumMsrRdFn_IntelPlatformInfo,
537 kCpumMsrRdFn_IntelFlexRatio, /**< Takes real value as reference. */
538 kCpumMsrRdFn_IntelPkgCStConfigControl,
539 kCpumMsrRdFn_IntelPmgIoCaptureBase,
540 kCpumMsrRdFn_IntelLastBranchFromToN,
541 kCpumMsrRdFn_IntelLastBranchFromN,
542 kCpumMsrRdFn_IntelLastBranchToN,
543 kCpumMsrRdFn_IntelLastBranchTos,
544 kCpumMsrRdFn_IntelBblCrCtl,
545 kCpumMsrRdFn_IntelBblCrCtl3,
546 kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */
547 kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
548 kCpumMsrRdFn_IntelI7MiscPwrMgmt,
549 kCpumMsrRdFn_IntelP6CrN,
550 kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
551 kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
552 kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
553 kCpumMsrRdFn_IntelI7SandyAesNiCtl,
554 kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */
555 kCpumMsrRdFn_IntelI7LbrSelect,
556 kCpumMsrRdFn_IntelI7SandyErrorControl,
557 kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
558 kCpumMsrRdFn_IntelI7PowerCtl,
559 kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
560 kCpumMsrRdFn_IntelI7PebsLdLat,
561 kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */
562 kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */
563 kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
564 kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */
565 kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */
566 kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */
567 kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
568 kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */
569 kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
570 kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */
571 kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */
572 kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */
573 kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
574 kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */
575 kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */
576 kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */
577 kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
578 kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */
579 kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */
580 kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */
581 kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
582 kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */
583 kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
584 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1, /**< Takes real value as reference. */
585 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2, /**< Takes real value as reference. */
586 kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
587 kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
588 kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
589 kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
590 kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
591 kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
592 kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
593 kCpumMsrRdFn_IntelI7UncCBoxConfig,
594 kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
595 kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
596 kCpumMsrRdFn_IntelI7SmiCount,
597 kCpumMsrRdFn_IntelCore2EmttmCrTablesN, /**< Range value returned. */
598 kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
599 kCpumMsrRdFn_IntelCore1ExtConfig,
600 kCpumMsrRdFn_IntelCore1DtsCalControl,
601 kCpumMsrRdFn_IntelCore2PeciControl,
602 kCpumMsrRdFn_IntelAtSilvCoreC1Recidency,
603
604 kCpumMsrRdFn_P6LastBranchFromIp,
605 kCpumMsrRdFn_P6LastBranchToIp,
606 kCpumMsrRdFn_P6LastIntFromIp,
607 kCpumMsrRdFn_P6LastIntToIp,
608
609 kCpumMsrRdFn_AmdFam15hTscRate,
610 kCpumMsrRdFn_AmdFam15hLwpCfg,
611 kCpumMsrRdFn_AmdFam15hLwpCbAddr,
612 kCpumMsrRdFn_AmdFam10hMc4MiscN,
613 kCpumMsrRdFn_AmdK8PerfCtlN,
614 kCpumMsrRdFn_AmdK8PerfCtrN,
615 kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */
616 kCpumMsrRdFn_AmdK8HwCr,
617 kCpumMsrRdFn_AmdK8IorrBaseN,
618 kCpumMsrRdFn_AmdK8IorrMaskN,
619 kCpumMsrRdFn_AmdK8TopOfMemN,
620 kCpumMsrRdFn_AmdK8NbCfg1,
621 kCpumMsrRdFn_AmdK8McXcptRedir,
622 kCpumMsrRdFn_AmdK8CpuNameN,
623 kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */
624 kCpumMsrRdFn_AmdK8SwThermalCtrl,
625 kCpumMsrRdFn_AmdK8FidVidControl, /**< Range value returned. */
626 kCpumMsrRdFn_AmdK8FidVidStatus, /**< Range value returned. */
627 kCpumMsrRdFn_AmdK8McCtlMaskN,
628 kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
629 kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
630 kCpumMsrRdFn_AmdK8IntPendingMessage,
631 kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
632 kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
633 kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
634 kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */
635 kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */
636 kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */
637 kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */
638 kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */
639 kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */
640 kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
641 kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
642 kCpumMsrRdFn_AmdK8SmmBase,
643 kCpumMsrRdFn_AmdK8SmmAddr,
644 kCpumMsrRdFn_AmdK8SmmMask,
645 kCpumMsrRdFn_AmdK8VmCr,
646 kCpumMsrRdFn_AmdK8IgnNe,
647 kCpumMsrRdFn_AmdK8SmmCtl,
648 kCpumMsrRdFn_AmdK8VmHSavePa,
649 kCpumMsrRdFn_AmdFam10hVmLockKey,
650 kCpumMsrRdFn_AmdFam10hSmmLockKey,
651 kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
652 kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
653 kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
654 kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
655 kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
656 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
657 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
658 kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */
659 kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */
660 kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
661 kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
662 kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
663 kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
664 kCpumMsrRdFn_AmdK8PatchLevel, /**< Returns range value. */
665 kCpumMsrRdFn_AmdK7DebugStatusMaybe,
666 kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
667 kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
668 kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
669 kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
670 kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
671 kCpumMsrRdFn_AmdK7NodeId,
672 kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
673 kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
674 kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
675 kCpumMsrRdFn_AmdK7LoadStoreCfg,
676 kCpumMsrRdFn_AmdK7InstrCacheCfg,
677 kCpumMsrRdFn_AmdK7DataCacheCfg,
678 kCpumMsrRdFn_AmdK7BusUnitCfg,
679 kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
680 kCpumMsrRdFn_AmdFam15hFpuCfg,
681 kCpumMsrRdFn_AmdFam15hDecoderCfg,
682 kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
683 kCpumMsrRdFn_AmdFam15hCombUnitCfg,
684 kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
685 kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
686 kCpumMsrRdFn_AmdFam15hExecUnitCfg,
687 kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
688 kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
689 kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
690 kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
691 kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
692 kCpumMsrRdFn_AmdFam10hIbsOpRip,
693 kCpumMsrRdFn_AmdFam10hIbsOpData,
694 kCpumMsrRdFn_AmdFam10hIbsOpData2,
695 kCpumMsrRdFn_AmdFam10hIbsOpData3,
696 kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
697 kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
698 kCpumMsrRdFn_AmdFam10hIbsCtl,
699 kCpumMsrRdFn_AmdFam14hIbsBrTarget,
700
701 kCpumMsrRdFn_Gim,
702
703 /** End of valid MSR read function indexes. */
704 kCpumMsrRdFn_End
705} CPUMMSRRDFN;
706
707/**
708 * MSR write functions.
709 */
710typedef enum CPUMMSRWRFN
711{
712 /** Invalid zero value. */
713 kCpumMsrWrFn_Invalid = 0,
714 /** Writes are ignored, the fWrGpMask is observed though. */
715 kCpumMsrWrFn_IgnoreWrite,
716 /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
717 kCpumMsrWrFn_ReadOnly,
718 /** Alias to the MSR range starting at the MSR given by
719 * CPUMMSRRANGE::uValue. Must be used in pair with
720 * kCpumMsrRdFn_MsrAlias. */
721 kCpumMsrWrFn_MsrAlias,
722
723 kCpumMsrWrFn_Ia32P5McAddr,
724 kCpumMsrWrFn_Ia32P5McType,
725 kCpumMsrWrFn_Ia32TimestampCounter,
726 kCpumMsrWrFn_Ia32ApicBase,
727 kCpumMsrWrFn_Ia32FeatureControl,
728 kCpumMsrWrFn_Ia32BiosSignId,
729 kCpumMsrWrFn_Ia32BiosUpdateTrigger,
730 kCpumMsrWrFn_Ia32SmmMonitorCtl,
731 kCpumMsrWrFn_Ia32PmcN,
732 kCpumMsrWrFn_Ia32MonitorFilterLineSize,
733 kCpumMsrWrFn_Ia32MPerf,
734 kCpumMsrWrFn_Ia32APerf,
735 kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
736 kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
737 kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
738 kCpumMsrWrFn_Ia32MtrrDefType,
739 kCpumMsrWrFn_Ia32Pat,
740 kCpumMsrWrFn_Ia32SysEnterCs,
741 kCpumMsrWrFn_Ia32SysEnterEsp,
742 kCpumMsrWrFn_Ia32SysEnterEip,
743 kCpumMsrWrFn_Ia32McgStatus,
744 kCpumMsrWrFn_Ia32McgCtl,
745 kCpumMsrWrFn_Ia32DebugCtl,
746 kCpumMsrWrFn_Ia32SmrrPhysBase,
747 kCpumMsrWrFn_Ia32SmrrPhysMask,
748 kCpumMsrWrFn_Ia32PlatformDcaCap,
749 kCpumMsrWrFn_Ia32Dca0Cap,
750 kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
751 kCpumMsrWrFn_Ia32PerfStatus,
752 kCpumMsrWrFn_Ia32PerfCtl,
753 kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
754 kCpumMsrWrFn_Ia32PerfCapabilities,
755 kCpumMsrWrFn_Ia32FixedCtrCtrl,
756 kCpumMsrWrFn_Ia32PerfGlobalStatus,
757 kCpumMsrWrFn_Ia32PerfGlobalCtrl,
758 kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
759 kCpumMsrWrFn_Ia32PebsEnable,
760 kCpumMsrWrFn_Ia32ClockModulation,
761 kCpumMsrWrFn_Ia32ThermInterrupt,
762 kCpumMsrWrFn_Ia32ThermStatus,
763 kCpumMsrWrFn_Ia32Therm2Ctl,
764 kCpumMsrWrFn_Ia32MiscEnable,
765 kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
766 kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */
767 kCpumMsrWrFn_Ia32DsArea,
768 kCpumMsrWrFn_Ia32TscDeadline,
769 kCpumMsrWrFn_Ia32X2ApicN,
770 kCpumMsrWrFn_Ia32DebugInterface,
771 kCpumMsrWrFn_Ia32SpecCtrl,
772 kCpumMsrWrFn_Ia32PredCmd,
773 kCpumMsrWrFn_Ia32FlushCmd,
774
775 kCpumMsrWrFn_Amd64Efer,
776 kCpumMsrWrFn_Amd64SyscallTarget,
777 kCpumMsrWrFn_Amd64LongSyscallTarget,
778 kCpumMsrWrFn_Amd64CompSyscallTarget,
779 kCpumMsrWrFn_Amd64SyscallFlagMask,
780 kCpumMsrWrFn_Amd64FsBase,
781 kCpumMsrWrFn_Amd64GsBase,
782 kCpumMsrWrFn_Amd64KernelGsBase,
783 kCpumMsrWrFn_Amd64TscAux,
784 kCpumMsrWrFn_IntelEblCrPowerOn,
785 kCpumMsrWrFn_IntelP4EbcHardPowerOn,
786 kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
787 kCpumMsrWrFn_IntelP4EbcFrequencyId,
788 kCpumMsrWrFn_IntelFlexRatio,
789 kCpumMsrWrFn_IntelPkgCStConfigControl,
790 kCpumMsrWrFn_IntelPmgIoCaptureBase,
791 kCpumMsrWrFn_IntelLastBranchFromToN,
792 kCpumMsrWrFn_IntelLastBranchFromN,
793 kCpumMsrWrFn_IntelLastBranchToN,
794 kCpumMsrWrFn_IntelLastBranchTos,
795 kCpumMsrWrFn_IntelBblCrCtl,
796 kCpumMsrWrFn_IntelBblCrCtl3,
797 kCpumMsrWrFn_IntelI7TemperatureTarget,
798 kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
799 kCpumMsrWrFn_IntelI7MiscPwrMgmt,
800 kCpumMsrWrFn_IntelP6CrN,
801 kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
802 kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
803 kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
804 kCpumMsrWrFn_IntelI7SandyAesNiCtl,
805 kCpumMsrWrFn_IntelI7TurboRatioLimit,
806 kCpumMsrWrFn_IntelI7LbrSelect,
807 kCpumMsrWrFn_IntelI7SandyErrorControl,
808 kCpumMsrWrFn_IntelI7PowerCtl,
809 kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
810 kCpumMsrWrFn_IntelI7PebsLdLat,
811 kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
812 kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
813 kCpumMsrWrFn_IntelI7SandyRaplPowerUnit, /**< R/O but found writable bits on a Silvermont CPU here. */
814 kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
815 kCpumMsrWrFn_IntelI7SandyPkgC2Residency, /**< R/O but found writable bits on a Silvermont CPU here. */
816 kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
817 kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
818 kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
819 kCpumMsrWrFn_IntelI7RaplPp0Policy,
820 kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
821 kCpumMsrWrFn_IntelI7RaplPp1Policy,
822 kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
823 kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
824 kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
825 kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
826 kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
827 kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
828 kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
829 kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
830 kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
831 kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
832 kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
833 kCpumMsrWrFn_IntelCore1ExtConfig,
834 kCpumMsrWrFn_IntelCore1DtsCalControl,
835 kCpumMsrWrFn_IntelCore2PeciControl,
836
837 kCpumMsrWrFn_P6LastIntFromIp,
838 kCpumMsrWrFn_P6LastIntToIp,
839
840 kCpumMsrWrFn_AmdFam15hTscRate,
841 kCpumMsrWrFn_AmdFam15hLwpCfg,
842 kCpumMsrWrFn_AmdFam15hLwpCbAddr,
843 kCpumMsrWrFn_AmdFam10hMc4MiscN,
844 kCpumMsrWrFn_AmdK8PerfCtlN,
845 kCpumMsrWrFn_AmdK8PerfCtrN,
846 kCpumMsrWrFn_AmdK8SysCfg,
847 kCpumMsrWrFn_AmdK8HwCr,
848 kCpumMsrWrFn_AmdK8IorrBaseN,
849 kCpumMsrWrFn_AmdK8IorrMaskN,
850 kCpumMsrWrFn_AmdK8TopOfMemN,
851 kCpumMsrWrFn_AmdK8NbCfg1,
852 kCpumMsrWrFn_AmdK8McXcptRedir,
853 kCpumMsrWrFn_AmdK8CpuNameN,
854 kCpumMsrWrFn_AmdK8HwThermalCtrl,
855 kCpumMsrWrFn_AmdK8SwThermalCtrl,
856 kCpumMsrWrFn_AmdK8FidVidControl,
857 kCpumMsrWrFn_AmdK8McCtlMaskN,
858 kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
859 kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
860 kCpumMsrWrFn_AmdK8IntPendingMessage,
861 kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
862 kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
863 kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
864 kCpumMsrWrFn_AmdFam10hPStateControl,
865 kCpumMsrWrFn_AmdFam10hPStateStatus,
866 kCpumMsrWrFn_AmdFam10hPStateN,
867 kCpumMsrWrFn_AmdFam10hCofVidControl,
868 kCpumMsrWrFn_AmdFam10hCofVidStatus,
869 kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
870 kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
871 kCpumMsrWrFn_AmdK8SmmBase,
872 kCpumMsrWrFn_AmdK8SmmAddr,
873 kCpumMsrWrFn_AmdK8SmmMask,
874 kCpumMsrWrFn_AmdK8VmCr,
875 kCpumMsrWrFn_AmdK8IgnNe,
876 kCpumMsrWrFn_AmdK8SmmCtl,
877 kCpumMsrWrFn_AmdK8VmHSavePa,
878 kCpumMsrWrFn_AmdFam10hVmLockKey,
879 kCpumMsrWrFn_AmdFam10hSmmLockKey,
880 kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
881 kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
882 kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
883 kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
884 kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
885 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
886 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
887 kCpumMsrWrFn_AmdK7MicrocodeCtl,
888 kCpumMsrWrFn_AmdK7ClusterIdMaybe,
889 kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
890 kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
891 kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
892 kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
893 kCpumMsrWrFn_AmdK8PatchLoader,
894 kCpumMsrWrFn_AmdK7DebugStatusMaybe,
895 kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
896 kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
897 kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
898 kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
899 kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
900 kCpumMsrWrFn_AmdK7NodeId,
901 kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
902 kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
903 kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
904 kCpumMsrWrFn_AmdK7LoadStoreCfg,
905 kCpumMsrWrFn_AmdK7InstrCacheCfg,
906 kCpumMsrWrFn_AmdK7DataCacheCfg,
907 kCpumMsrWrFn_AmdK7BusUnitCfg,
908 kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
909 kCpumMsrWrFn_AmdFam15hFpuCfg,
910 kCpumMsrWrFn_AmdFam15hDecoderCfg,
911 kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
912 kCpumMsrWrFn_AmdFam15hCombUnitCfg,
913 kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
914 kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
915 kCpumMsrWrFn_AmdFam15hExecUnitCfg,
916 kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
917 kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
918 kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
919 kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
920 kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
921 kCpumMsrWrFn_AmdFam10hIbsOpRip,
922 kCpumMsrWrFn_AmdFam10hIbsOpData,
923 kCpumMsrWrFn_AmdFam10hIbsOpData2,
924 kCpumMsrWrFn_AmdFam10hIbsOpData3,
925 kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
926 kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
927 kCpumMsrWrFn_AmdFam10hIbsCtl,
928 kCpumMsrWrFn_AmdFam14hIbsBrTarget,
929
930 kCpumMsrWrFn_Gim,
931
932 /** End of valid MSR write function indexes. */
933 kCpumMsrWrFn_End
934} CPUMMSRWRFN;
935
936/**
937 * MSR range.
938 */
939typedef struct CPUMMSRRANGE
940{
941 /** The first MSR. [0] */
942 uint32_t uFirst;
943 /** The last MSR. [4] */
944 uint32_t uLast;
945 /** The read function (CPUMMSRRDFN). [8] */
946 uint16_t enmRdFn;
947 /** The write function (CPUMMSRWRFN). [10] */
948 uint16_t enmWrFn;
949 /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
950 * UINT16_MAX if not used by the read and write functions. [12] */
951 uint32_t offCpumCpu : 24;
952 /** Reserved for future hacks. [15] */
953 uint32_t fReserved : 8;
954 /** The init/read value. [16]
955 * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
956 * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
957 * offset into CPUM. */
958 uint64_t uValue;
959 /** The bits to ignore when writing. [24] */
960 uint64_t fWrIgnMask;
961 /** The bits that will cause a GP(0) when writing. [32]
962 * This is always checked prior to calling the write function. Using
963 * UINT64_MAX effectively marks the MSR as read-only. */
964 uint64_t fWrGpMask;
965 /** The register name, if applicable. [40] */
966 char szName[56];
967
968 /** The number of reads. */
969 STAMCOUNTER cReads;
970 /** The number of writes. */
971 STAMCOUNTER cWrites;
972 /** The number of times ignored bits were written. */
973 STAMCOUNTER cIgnoredBits;
974 /** The number of GPs generated. */
975 STAMCOUNTER cGps;
976} CPUMMSRRANGE;
977#ifndef VBOX_FOR_DTRACE_LIB
978AssertCompileSize(CPUMMSRRANGE, 128);
979#endif
980/** Pointer to an MSR range. */
981typedef CPUMMSRRANGE *PCPUMMSRRANGE;
982/** Pointer to a const MSR range. */
983typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
984
985
986/**
987 * MSRs which are required while exploding features.
988 */
989typedef struct CPUMMSRS
990{
991 union
992 {
993 VMXMSRS vmx;
994 SVMMSRS svm;
995 } hwvirt;
996} CPUMMSRS;
997/** Pointer to an CPUMMSRS struct. */
998typedef CPUMMSRS *PCPUMMSRS;
999/** Pointer to a const CPUMMSRS struct. */
1000typedef CPUMMSRS const *PCCPUMMSRS;
1001
1002
1003/**
1004 * CPU features and quirks.
1005 * This is mostly exploded CPUID info.
1006 */
1007typedef struct CPUMFEATURES
1008{
1009 /** The CPU vendor (CPUMCPUVENDOR). */
1010 uint8_t enmCpuVendor;
1011 /** The CPU family. */
1012 uint8_t uFamily;
1013 /** The CPU model. */
1014 uint8_t uModel;
1015 /** The CPU stepping. */
1016 uint8_t uStepping;
1017 /** The microarchitecture. */
1018#ifndef VBOX_FOR_DTRACE_LIB
1019 CPUMMICROARCH enmMicroarch;
1020#else
1021 uint32_t enmMicroarch;
1022#endif
1023 /** The maximum physical address width of the CPU. */
1024 uint8_t cMaxPhysAddrWidth;
1025 /** The maximum linear address width of the CPU. */
1026 uint8_t cMaxLinearAddrWidth;
1027 /** Max size of the extended state (or FPU state if no XSAVE). */
1028 uint16_t cbMaxExtendedState;
1029
1030 /** Supports MSRs. */
1031 uint32_t fMsr : 1;
1032 /** Supports the page size extension (4/2 MB pages). */
1033 uint32_t fPse : 1;
1034 /** Supports 36-bit page size extension (4 MB pages can map memory above
1035 * 4GB). */
1036 uint32_t fPse36 : 1;
1037 /** Supports physical address extension (PAE). */
1038 uint32_t fPae : 1;
1039 /** Supports page-global extension (PGE). */
1040 uint32_t fPge : 1;
1041 /** Page attribute table (PAT) support (page level cache control). */
1042 uint32_t fPat : 1;
1043 /** Supports the FXSAVE and FXRSTOR instructions. */
1044 uint32_t fFxSaveRstor : 1;
1045 /** Supports the XSAVE and XRSTOR instructions. */
1046 uint32_t fXSaveRstor : 1;
1047 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
1048 uint32_t fOpSysXSaveRstor : 1;
1049 /** Supports MMX. */
1050 uint32_t fMmx : 1;
1051 /** Supports AMD extensions to MMX instructions. */
1052 uint32_t fAmdMmxExts : 1;
1053 /** Supports SSE. */
1054 uint32_t fSse : 1;
1055 /** Supports SSE2. */
1056 uint32_t fSse2 : 1;
1057 /** Supports SSE3. */
1058 uint32_t fSse3 : 1;
1059 /** Supports SSSE3. */
1060 uint32_t fSsse3 : 1;
1061 /** Supports SSE4.1. */
1062 uint32_t fSse41 : 1;
1063 /** Supports SSE4.2. */
1064 uint32_t fSse42 : 1;
1065 /** Supports AVX. */
1066 uint32_t fAvx : 1;
1067 /** Supports AVX2. */
1068 uint32_t fAvx2 : 1;
1069 /** Supports AVX512 foundation. */
1070 uint32_t fAvx512Foundation : 1;
1071 /** Supports RDTSC. */
1072 uint32_t fTsc : 1;
1073 /** Intel SYSENTER/SYSEXIT support */
1074 uint32_t fSysEnter : 1;
1075 /** First generation APIC. */
1076 uint32_t fApic : 1;
1077 /** Second generation APIC. */
1078 uint32_t fX2Apic : 1;
1079 /** Hypervisor present. */
1080 uint32_t fHypervisorPresent : 1;
1081 /** MWAIT & MONITOR instructions supported. */
1082 uint32_t fMonitorMWait : 1;
1083 /** MWAIT Extensions present. */
1084 uint32_t fMWaitExtensions : 1;
1085 /** Supports CMPXCHG16B in 64-bit mode. */
1086 uint32_t fMovCmpXchg16b : 1;
1087 /** Supports CLFLUSH. */
1088 uint32_t fClFlush : 1;
1089 /** Supports CLFLUSHOPT. */
1090 uint32_t fClFlushOpt : 1;
1091 /** Supports IA32_PRED_CMD.IBPB. */
1092 uint32_t fIbpb : 1;
1093 /** Supports IA32_SPEC_CTRL.IBRS. */
1094 uint32_t fIbrs : 1;
1095 /** Supports IA32_SPEC_CTRL.STIBP. */
1096 uint32_t fStibp : 1;
1097 /** Supports IA32_FLUSH_CMD. */
1098 uint32_t fFlushCmd : 1;
1099 /** Supports IA32_ARCH_CAP. */
1100 uint32_t fArchCap : 1;
1101 /** Supports MD_CLEAR functionality (VERW, IA32_FLUSH_CMD). */
1102 uint32_t fMdsClear : 1;
1103 /** Supports PCID. */
1104 uint32_t fPcid : 1;
1105 /** Supports INVPCID. */
1106 uint32_t fInvpcid : 1;
1107 /** Supports read/write FSGSBASE instructions. */
1108 uint32_t fFsGsBase : 1;
1109
1110 /** Supports AMD 3DNow instructions. */
1111 uint32_t f3DNow : 1;
1112 /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
1113 uint32_t f3DNowPrefetch : 1;
1114
1115 /** AMD64: Supports long mode. */
1116 uint32_t fLongMode : 1;
1117 /** AMD64: SYSCALL/SYSRET support. */
1118 uint32_t fSysCall : 1;
1119 /** AMD64: No-execute page table bit. */
1120 uint32_t fNoExecute : 1;
1121 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
1122 uint32_t fLahfSahf : 1;
1123 /** AMD64: Supports RDTSCP. */
1124 uint32_t fRdTscP : 1;
1125 /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
1126 uint32_t fMovCr8In32Bit : 1;
1127 /** AMD64: Supports XOP (similar to VEX3/AVX). */
1128 uint32_t fXop : 1;
1129
1130 /** Indicates that FPU instruction and data pointers may leak.
1131 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
1132 * is only saved and restored if an exception is pending. */
1133 uint32_t fLeakyFxSR : 1;
1134
1135 /** AMD64: Supports AMD SVM. */
1136 uint32_t fSvm : 1;
1137
1138 /** Support for Intel VMX. */
1139 uint32_t fVmx : 1;
1140
1141 /** Indicates that speculative execution control CPUID bits and MSRs are exposed.
1142 * The details are different for Intel and AMD but both have similar
1143 * functionality. */
1144 uint32_t fSpeculationControl : 1;
1145
1146 /** MSR_IA32_ARCH_CAPABILITIES: RDCL_NO (bit 0).
1147 * @remarks Only safe use after CPUM ring-0 init! */
1148 uint32_t fArchRdclNo : 1;
1149 /** MSR_IA32_ARCH_CAPABILITIES: IBRS_ALL (bit 1).
1150 * @remarks Only safe use after CPUM ring-0 init! */
1151 uint32_t fArchIbrsAll : 1;
1152 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 2).
1153 * @remarks Only safe use after CPUM ring-0 init! */
1154 uint32_t fArchRsbOverride : 1;
1155 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 3).
1156 * @remarks Only safe use after CPUM ring-0 init! */
1157 uint32_t fArchVmmNeedNotFlushL1d : 1;
1158 /** MSR_IA32_ARCH_CAPABILITIES: MDS_NO (bit 4).
1159 * @remarks Only safe use after CPUM ring-0 init! */
1160 uint32_t fArchMdsNo : 1;
1161
1162 /** Alignment padding / reserved for future use. */
1163 uint32_t fPadding : 7;
1164
1165 /** SVM: Supports Nested-paging. */
1166 uint32_t fSvmNestedPaging : 1;
1167 /** SVM: Support LBR (Last Branch Record) virtualization. */
1168 uint32_t fSvmLbrVirt : 1;
1169 /** SVM: Supports SVM lock. */
1170 uint32_t fSvmSvmLock : 1;
1171 /** SVM: Supports Next RIP save. */
1172 uint32_t fSvmNextRipSave : 1;
1173 /** SVM: Supports TSC rate MSR. */
1174 uint32_t fSvmTscRateMsr : 1;
1175 /** SVM: Supports VMCB clean bits. */
1176 uint32_t fSvmVmcbClean : 1;
1177 /** SVM: Supports Flush-by-ASID. */
1178 uint32_t fSvmFlusbByAsid : 1;
1179 /** SVM: Supports decode assist. */
1180 uint32_t fSvmDecodeAssists : 1;
1181 /** SVM: Supports Pause filter. */
1182 uint32_t fSvmPauseFilter : 1;
1183 /** SVM: Supports Pause filter threshold. */
1184 uint32_t fSvmPauseFilterThreshold : 1;
1185 /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
1186 uint32_t fSvmAvic : 1;
1187 /** SVM: Supports Virtualized VMSAVE/VMLOAD. */
1188 uint32_t fSvmVirtVmsaveVmload : 1;
1189 /** SVM: Supports VGIF (Virtual Global Interrupt Flag). */
1190 uint32_t fSvmVGif : 1;
1191 /** SVM: Supports GMET (Guest Mode Execute Trap Extension). */
1192 uint32_t fSvmGmet : 1;
1193 /** SVM: Padding / reserved for future features. */
1194 uint32_t fSvmPadding0 : 18;
1195 /** SVM: Maximum supported ASID. */
1196 uint32_t uSvmMaxAsid;
1197
1198 /** VMX: Maximum physical address width. */
1199 uint8_t cVmxMaxPhysAddrWidth;
1200 /** VMX: Padding / reserved for future. */
1201 uint8_t abVmxPadding[3];
1202 /** VMX: Padding / reserved for future. */
1203 uint32_t fVmxPadding0;
1204
1205 /** @name VMX basic controls.
1206 * @{ */
1207 /** VMX: Supports INS/OUTS VM-exit instruction info. */
1208 uint32_t fVmxInsOutInfo : 1;
1209 /** @} */
1210
1211 /** @name VMX Pin-based controls.
1212 * @{ */
1213 /** VMX: Supports external interrupt VM-exit. */
1214 uint32_t fVmxExtIntExit : 1;
1215 /** VMX: Supports NMI VM-exit. */
1216 uint32_t fVmxNmiExit : 1;
1217 /** VMX: Supports Virtual NMIs. */
1218 uint32_t fVmxVirtNmi : 1;
1219 /** VMX: Supports preemption timer. */
1220 uint32_t fVmxPreemptTimer : 1;
1221 /** VMX: Supports posted interrupts. */
1222 uint32_t fVmxPostedInt : 1;
1223 /** @} */
1224
1225 /** @name VMX Processor-based controls.
1226 * @{ */
1227 /** VMX: Supports Interrupt-window exiting. */
1228 uint32_t fVmxIntWindowExit : 1;
1229 /** VMX: Supports TSC offsetting. */
1230 uint32_t fVmxTscOffsetting : 1;
1231 /** VMX: Supports HLT exiting. */
1232 uint32_t fVmxHltExit : 1;
1233 /** VMX: Supports INVLPG exiting. */
1234 uint32_t fVmxInvlpgExit : 1;
1235 /** VMX: Supports MWAIT exiting. */
1236 uint32_t fVmxMwaitExit : 1;
1237 /** VMX: Supports RDPMC exiting. */
1238 uint32_t fVmxRdpmcExit : 1;
1239 /** VMX: Supports RDTSC exiting. */
1240 uint32_t fVmxRdtscExit : 1;
1241 /** VMX: Supports CR3-load exiting. */
1242 uint32_t fVmxCr3LoadExit : 1;
1243 /** VMX: Supports CR3-store exiting. */
1244 uint32_t fVmxCr3StoreExit : 1;
1245 /** VMX: Supports tertiary processor-based VM-execution controls. */
1246 uint32_t fVmxTertiaryExecCtls : 1;
1247 /** VMX: Supports CR8-load exiting. */
1248 uint32_t fVmxCr8LoadExit : 1;
1249 /** VMX: Supports CR8-store exiting. */
1250 uint32_t fVmxCr8StoreExit : 1;
1251 /** VMX: Supports TPR shadow. */
1252 uint32_t fVmxUseTprShadow : 1;
1253 /** VMX: Supports NMI-window exiting. */
1254 uint32_t fVmxNmiWindowExit : 1;
1255 /** VMX: Supports Mov-DRx exiting. */
1256 uint32_t fVmxMovDRxExit : 1;
1257 /** VMX: Supports Unconditional I/O exiting. */
1258 uint32_t fVmxUncondIoExit : 1;
1259 /** VMX: Supportgs I/O bitmaps. */
1260 uint32_t fVmxUseIoBitmaps : 1;
1261 /** VMX: Supports Monitor Trap Flag. */
1262 uint32_t fVmxMonitorTrapFlag : 1;
1263 /** VMX: Supports MSR bitmap. */
1264 uint32_t fVmxUseMsrBitmaps : 1;
1265 /** VMX: Supports MONITOR exiting. */
1266 uint32_t fVmxMonitorExit : 1;
1267 /** VMX: Supports PAUSE exiting. */
1268 uint32_t fVmxPauseExit : 1;
1269 /** VMX: Supports secondary processor-based VM-execution controls. */
1270 uint32_t fVmxSecondaryExecCtls : 1;
1271 /** @} */
1272
1273 /** @name VMX Secondary processor-based controls.
1274 * @{ */
1275 /** VMX: Supports virtualize-APIC access. */
1276 uint32_t fVmxVirtApicAccess : 1;
1277 /** VMX: Supports EPT (Extended Page Tables). */
1278 uint32_t fVmxEpt : 1;
1279 /** VMX: Supports descriptor-table exiting. */
1280 uint32_t fVmxDescTableExit : 1;
1281 /** VMX: Supports RDTSCP. */
1282 uint32_t fVmxRdtscp : 1;
1283 /** VMX: Supports virtualize-x2APIC mode. */
1284 uint32_t fVmxVirtX2ApicMode : 1;
1285 /** VMX: Supports VPID. */
1286 uint32_t fVmxVpid : 1;
1287 /** VMX: Supports WBIND exiting. */
1288 uint32_t fVmxWbinvdExit : 1;
1289 /** VMX: Supports Unrestricted guest. */
1290 uint32_t fVmxUnrestrictedGuest : 1;
1291 /** VMX: Supports APIC-register virtualization. */
1292 uint32_t fVmxApicRegVirt : 1;
1293 /** VMX: Supports virtual-interrupt delivery. */
1294 uint32_t fVmxVirtIntDelivery : 1;
1295 /** VMX: Supports Pause-loop exiting. */
1296 uint32_t fVmxPauseLoopExit : 1;
1297 /** VMX: Supports RDRAND exiting. */
1298 uint32_t fVmxRdrandExit : 1;
1299 /** VMX: Supports INVPCID. */
1300 uint32_t fVmxInvpcid : 1;
1301 /** VMX: Supports VM functions. */
1302 uint32_t fVmxVmFunc : 1;
1303 /** VMX: Supports VMCS shadowing. */
1304 uint32_t fVmxVmcsShadowing : 1;
1305 /** VMX: Supports RDSEED exiting. */
1306 uint32_t fVmxRdseedExit : 1;
1307 /** VMX: Supports PML. */
1308 uint32_t fVmxPml : 1;
1309 /** VMX: Supports EPT-violations \#VE. */
1310 uint32_t fVmxEptXcptVe : 1;
1311 /** VMX: Supports XSAVES/XRSTORS. */
1312 uint32_t fVmxXsavesXrstors : 1;
1313 /** VMX: Supports TSC scaling. */
1314 uint32_t fVmxUseTscScaling : 1;
1315 /** @} */
1316
1317 /** @name VMX Tertiary processor-based controls.
1318 * @{ */
1319 /** VMX: Supports LOADIWKEY exiting. */
1320 uint32_t fVmxLoadIwKeyExit : 1;
1321 /** @} */
1322
1323 /** @name VMX VM-entry controls.
1324 * @{ */
1325 /** VMX: Supports load-debug controls on VM-entry. */
1326 uint32_t fVmxEntryLoadDebugCtls : 1;
1327 /** VMX: Supports IA32e mode guest. */
1328 uint32_t fVmxIa32eModeGuest : 1;
1329 /** VMX: Supports load guest EFER MSR on VM-entry. */
1330 uint32_t fVmxEntryLoadEferMsr : 1;
1331 /** VMX: Supports load guest PAT MSR on VM-entry. */
1332 uint32_t fVmxEntryLoadPatMsr : 1;
1333 /** @} */
1334
1335 /** @name VMX VM-exit controls.
1336 * @{ */
1337 /** VMX: Supports save debug controls on VM-exit. */
1338 uint32_t fVmxExitSaveDebugCtls : 1;
1339 /** VMX: Supports host-address space size. */
1340 uint32_t fVmxHostAddrSpaceSize : 1;
1341 /** VMX: Supports acknowledge external interrupt on VM-exit. */
1342 uint32_t fVmxExitAckExtInt : 1;
1343 /** VMX: Supports save guest PAT MSR on VM-exit. */
1344 uint32_t fVmxExitSavePatMsr : 1;
1345 /** VMX: Supports load hsot PAT MSR on VM-exit. */
1346 uint32_t fVmxExitLoadPatMsr : 1;
1347 /** VMX: Supports save guest EFER MSR on VM-exit. */
1348 uint32_t fVmxExitSaveEferMsr : 1;
1349 /** VMX: Supports load host EFER MSR on VM-exit. */
1350 uint32_t fVmxExitLoadEferMsr : 1;
1351 /** VMX: Supports save VMX preemption timer on VM-exit. */
1352 uint32_t fVmxSavePreemptTimer : 1;
1353 /** @} */
1354
1355 /** @name VMX Miscellaneous data.
1356 * @{ */
1357 /** VMX: Supports storing EFER.LMA into IA32e-mode guest field on VM-exit. */
1358 uint32_t fVmxExitSaveEferLma : 1;
1359 /** VMX: Whether Intel PT (Processor Trace) is supported in VMX mode or not. */
1360 uint32_t fVmxIntelPt : 1;
1361 /** VMX: Supports VMWRITE to any valid VMCS field incl. read-only fields, otherwise
1362 * VMWRITE cannot modify read-only VM-exit information fields. */
1363 uint32_t fVmxVmwriteAll : 1;
1364 /** VMX: Supports injection of software interrupts, ICEBP on VM-entry for zero
1365 * length instructions. */
1366 uint32_t fVmxEntryInjectSoftInt : 1;
1367 /** @} */
1368
1369 /** VMX: Padding / reserved for future features. */
1370 uint32_t fVmxPadding1 : 31;
1371} CPUMFEATURES;
1372#ifndef VBOX_FOR_DTRACE_LIB
1373AssertCompileSize(CPUMFEATURES, 48);
1374#endif
1375/** Pointer to a CPU feature structure. */
1376typedef CPUMFEATURES *PCPUMFEATURES;
1377/** Pointer to a const CPU feature structure. */
1378typedef CPUMFEATURES const *PCCPUMFEATURES;
1379
1380
1381/**
1382 * CPU database entry.
1383 */
1384typedef struct CPUMDBENTRY
1385{
1386 /** The CPU name. */
1387 const char *pszName;
1388 /** The full CPU name. */
1389 const char *pszFullName;
1390 /** The CPU vendor (CPUMCPUVENDOR). */
1391 uint8_t enmVendor;
1392 /** The CPU family. */
1393 uint8_t uFamily;
1394 /** The CPU model. */
1395 uint8_t uModel;
1396 /** The CPU stepping. */
1397 uint8_t uStepping;
1398 /** The microarchitecture. */
1399 CPUMMICROARCH enmMicroarch;
1400 /** Scalable bus frequency used for reporting other frequencies. */
1401 uint64_t uScalableBusFreq;
1402 /** Flags - CPUMDB_F_XXX. */
1403 uint32_t fFlags;
1404 /** The maximum physical address with of the CPU. This should correspond to
1405 * the value in CPUID leaf 0x80000008 when present. */
1406 uint8_t cMaxPhysAddrWidth;
1407 /** The MXCSR mask. */
1408 uint32_t fMxCsrMask;
1409 /** Pointer to an array of CPUID leaves. */
1410 PCCPUMCPUIDLEAF paCpuIdLeaves;
1411 /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
1412 uint32_t cCpuIdLeaves;
1413 /** The method used to deal with unknown CPUID leaves. */
1414 CPUMUNKNOWNCPUID enmUnknownCpuId;
1415 /** The default unknown CPUID value. */
1416 CPUMCPUID DefUnknownCpuId;
1417
1418 /** MSR mask. Several microarchitectures ignore the higher bits of ECX in
1419 * the RDMSR and WRMSR instructions. */
1420 uint32_t fMsrMask;
1421
1422 /** The number of ranges in the table pointed to b paMsrRanges. */
1423 uint32_t cMsrRanges;
1424 /** MSR ranges for this CPU. */
1425 PCCPUMMSRRANGE paMsrRanges;
1426} CPUMDBENTRY;
1427/** Pointer to a const CPU database entry. */
1428typedef CPUMDBENTRY const *PCCPUMDBENTRY;
1429
1430/** @name CPUMDB_F_XXX - CPUDBENTRY::fFlags
1431 * @{ */
1432/** Should execute all in IEM.
1433 * @todo Implement this - currently done in Main... */
1434#define CPUMDB_F_EXECUTE_ALL_IN_IEM RT_BIT_32(0)
1435/** @} */
1436
1437
1438
1439#ifndef VBOX_FOR_DTRACE_LIB
1440
1441/** @name Guest Register Getters.
1442 * @{ */
1443VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR);
1444VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit);
1445VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden);
1446VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu);
1447VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
1448VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu);
1449VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu);
1450VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu);
1451VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu);
1452VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu);
1453VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue);
1454VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu);
1455VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu);
1456VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu);
1457VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu);
1458VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu);
1459VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu);
1460VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu);
1461VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu);
1462VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu);
1463VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu);
1464VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu);
1465VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu);
1466VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu);
1467VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu);
1468VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu);
1469VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu);
1470VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu);
1471VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu);
1472VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu);
1473VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu);
1474VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu);
1475VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu);
1476VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu);
1477VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu);
1478VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu);
1479VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
1480VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t iLeaf, uint32_t iSubLeaf,
1481 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
1482VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu);
1483VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PCVMCPU pVCpu);
1484VMM_INT_DECL(uint64_t) CPUMGetGuestIa32SmmMonitorCtl(PCVMCPU pVCpu);
1485VMMDECL(VBOXSTRICTRC) CPUMQueryGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *puValue);
1486VMMDECL(VBOXSTRICTRC) CPUMSetGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t uValue);
1487VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM);
1488VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM);
1489VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth);
1490VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM);
1491VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM);
1492/** @} */
1493
1494/** @name Guest Register Setters.
1495 * @{ */
1496VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1497VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1498VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
1499VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
1500VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0);
1501VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
1502VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
1503VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
1504VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0);
1505VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1);
1506VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2);
1507VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3);
1508VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
1509VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7);
1510VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value);
1511VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue);
1512VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
1513VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
1514VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
1515VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
1516VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
1517VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
1518VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
1519VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
1520VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
1521VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
1522VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
1523VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
1524VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
1525VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
1526VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
1527VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
1528VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
1529VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1530VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1531VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1532VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
1533VMMDECL(void) CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1534VMM_INT_DECL(void) CPUMSetGuestTscAux(PVMCPUCC pVCpu, uint64_t uValue);
1535VMM_INT_DECL(uint64_t) CPUMGetGuestTscAux(PVMCPUCC pVCpu);
1536VMM_INT_DECL(void) CPUMSetGuestSpecCtrl(PVMCPUCC pVCpu, uint64_t uValue);
1537VMM_INT_DECL(uint64_t) CPUMGetGuestSpecCtrl(PVMCPUCC pVCpu);
1538VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM);
1539VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes);
1540/** @} */
1541
1542
1543/** @name Misc Guest Predicate Functions.
1544 * @{ */
1545VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
1546VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu);
1547VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu);
1548VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu);
1549VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu);
1550VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu);
1551VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu);
1552VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu);
1553VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu);
1554VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu);
1555VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu);
1556/** @} */
1557
1558/** @name Nested Hardware-Virtualization Helpers.
1559 * @{ */
1560VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu);
1561VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu);
1562VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
1563VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
1564
1565/* SVM helpers. */
1566VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1567VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1568VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx);
1569VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx);
1570VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
1571VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1572 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
1573 PSVMIOIOEXITINFO pIoExitInfo);
1574VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
1575
1576/* VMX helpers. */
1577VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField);
1578VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess);
1579VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3);
1580VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc);
1581VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick);
1582VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu);
1583VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr);
1584/** @} */
1585
1586/** @name Externalized State Helpers.
1587 * @{ */
1588/** @def CPUM_ASSERT_NOT_EXTRN
1589 * Macro for asserting that @a a_fNotExtrn are present.
1590 *
1591 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1592 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
1593 *
1594 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1595 */
1596#define CPUM_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
1597 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fNotExtrn)), \
1598 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
1599
1600/** @def CPUM_IMPORT_EXTRN_RET
1601 * Macro for making sure the state specified by @a fExtrnImport is present,
1602 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1603 *
1604 * Will return if CPUMImportGuestStateOnDemand() fails.
1605 *
1606 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1607 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1608 * @thread EMT(a_pVCpu)
1609 *
1610 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1611 */
1612#define CPUM_IMPORT_EXTRN_RET(a_pVCpu, a_fExtrnImport) \
1613 do { \
1614 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1615 { /* already present, consider this likely */ } \
1616 else \
1617 { \
1618 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1619 AssertRCReturn(rcCpumImport, rcCpumImport); \
1620 } \
1621 } while (0)
1622
1623/** @def CPUM_IMPORT_EXTRN_RCSTRICT
1624 * Macro for making sure the state specified by @a fExtrnImport is present,
1625 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1626 *
1627 * Will update a_rcStrict if CPUMImportGuestStateOnDemand() fails.
1628 *
1629 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1630 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1631 * @param a_rcStrict Strict status code variable to update on failure.
1632 * @thread EMT(a_pVCpu)
1633 *
1634 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1635 */
1636#define CPUM_IMPORT_EXTRN_RCSTRICT(a_pVCpu, a_fExtrnImport, a_rcStrict) \
1637 do { \
1638 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1639 { /* already present, consider this likely */ } \
1640 else \
1641 { \
1642 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1643 AssertStmt(RT_SUCCESS(rcCpumImport) || RT_FAILURE_NP(a_rcStrict), a_rcStrict = rcCpumImport); \
1644 } \
1645 } while (0)
1646
1647VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport);
1648/** @} */
1649
1650#if (!defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS) && defined(RT_ARCH_AMD64)) || defined(DOXYGEN_RUNNING)
1651/** @name Inlined Guest Getters and predicates Functions.
1652 * @{ */
1653
1654/**
1655 * Gets valid CR0 bits for the guest.
1656 *
1657 * @returns Valid CR0 bits.
1658 */
1659DECLINLINE(uint64_t) CPUMGetGuestCR0ValidMask(void)
1660{
1661 return ( X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
1662 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
1663 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG);
1664}
1665
1666/**
1667 * Tests if the guest is running in real mode or not.
1668 *
1669 * @returns true if in real mode, otherwise false.
1670 * @param pCtx Current CPU context.
1671 */
1672DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCCPUMCTX pCtx)
1673{
1674 return !(pCtx->cr0 & X86_CR0_PE);
1675}
1676
1677/**
1678 * Tests if the guest is running in real or virtual 8086 mode.
1679 *
1680 * @returns @c true if it is, @c false if not.
1681 * @param pCtx Current CPU context.
1682 */
1683DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCCPUMCTX pCtx)
1684{
1685 return !(pCtx->cr0 & X86_CR0_PE)
1686 || pCtx->eflags.Bits.u1VM; /* Cannot be set in long mode. Intel spec 2.3.1 "System Flags and Fields in IA-32e Mode". */
1687}
1688
1689/**
1690 * Tests if the guest is running in virtual 8086 mode.
1691 *
1692 * @returns @c true if it is, @c false if not.
1693 * @param pCtx Current CPU context.
1694 */
1695DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCCPUMCTX pCtx)
1696{
1697 return (pCtx->eflags.Bits.u1VM == 1);
1698}
1699
1700/**
1701 * Tests if the guest is running in paged protected or not.
1702 *
1703 * @returns true if in paged protected mode, otherwise false.
1704 * @param pCtx Current CPU context.
1705 */
1706DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
1707{
1708 return (pCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1709}
1710
1711/**
1712 * Tests if the guest is running in long mode or not.
1713 *
1714 * @returns true if in long mode, otherwise false.
1715 * @param pCtx Current CPU context.
1716 */
1717DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCCPUMCTX pCtx)
1718{
1719 return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1720}
1721
1722VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx);
1723
1724/**
1725 * Tests if the guest is running in 64 bits mode or not.
1726 *
1727 * @returns true if in 64 bits protected mode, otherwise false.
1728 * @param pCtx Current CPU context.
1729 */
1730DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
1731{
1732 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
1733 return false;
1734 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
1735 return CPUMIsGuestIn64BitCodeSlow(pCtx);
1736 return pCtx->cs.Attr.n.u1Long;
1737}
1738
1739/**
1740 * Tests if the guest has paging enabled or not.
1741 *
1742 * @returns true if paging is enabled, otherwise false.
1743 * @param pCtx Current CPU context.
1744 */
1745DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCCPUMCTX pCtx)
1746{
1747 return !!(pCtx->cr0 & X86_CR0_PG);
1748}
1749
1750/**
1751 * Tests if the guest is running in PAE mode or not.
1752 *
1753 * @returns true if in PAE mode, otherwise false.
1754 * @param pCtx Current CPU context.
1755 */
1756DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCCPUMCTX pCtx)
1757{
1758 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1759 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1760 return ( (pCtx->cr4 & X86_CR4_PAE)
1761 && CPUMIsGuestPagingEnabledEx(pCtx)
1762 && !(pCtx->msrEFER & MSR_K6_EFER_LMA));
1763}
1764
1765/**
1766 * Tests if the guest has AMD SVM enabled or not.
1767 *
1768 * @returns true if SMV is enabled, otherwise false.
1769 * @param pCtx Current CPU context.
1770 */
1771DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCCPUMCTX pCtx)
1772{
1773 return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
1774}
1775
1776/**
1777 * Tests if the guest has Intel VT-x enabled or not.
1778 *
1779 * @returns true if VMX is enabled, otherwise false.
1780 * @param pCtx Current CPU context.
1781 */
1782DECLINLINE(bool) CPUMIsGuestVmxEnabled(PCCPUMCTX pCtx)
1783{
1784 return RT_BOOL(pCtx->cr4 & X86_CR4_VMXE);
1785}
1786
1787/**
1788 * Returns the guest's global-interrupt (GIF) flag.
1789 *
1790 * @returns true when global-interrupts are enabled, otherwise false.
1791 * @param pCtx Current CPU context.
1792 */
1793DECLINLINE(bool) CPUMGetGuestGif(PCCPUMCTX pCtx)
1794{
1795 return pCtx->hwvirt.fGif;
1796}
1797
1798/**
1799 * Sets the guest's global-interrupt flag (GIF).
1800 *
1801 * @param pCtx Current CPU context.
1802 * @param fGif The value to set.
1803 */
1804DECLINLINE(void) CPUMSetGuestGif(PCPUMCTX pCtx, bool fGif)
1805{
1806 pCtx->hwvirt.fGif = fGif;
1807}
1808
1809/**
1810 * Checks if we are executing inside an SVM nested hardware-virtualized guest.
1811 *
1812 * @returns @c true if in SVM nested-guest mode, @c false otherwise.
1813 * @param pCtx Current CPU context.
1814 */
1815DECLINLINE(bool) CPUMIsGuestInSvmNestedHwVirtMode(PCCPUMCTX pCtx)
1816{
1817 /*
1818 * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
1819 * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
1820 */
1821#ifndef IN_RC
1822 if ( pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM
1823 || !(pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
1824 return false;
1825 return true;
1826#else
1827 NOREF(pCtx);
1828 return false;
1829#endif
1830}
1831
1832/**
1833 * Checks if the guest is in VMX non-root operation.
1834 *
1835 * @returns @c true if in VMX non-root operation, @c false otherwise.
1836 * @param pCtx Current CPU context.
1837 */
1838DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx)
1839{
1840#ifndef IN_RC
1841 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1842 return false;
1843 Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
1844 return pCtx->hwvirt.vmx.fInVmxNonRootMode;
1845#else
1846 NOREF(pCtx);
1847 return false;
1848#endif
1849}
1850
1851/**
1852 * Checks if we are executing inside an SVM or VMX nested hardware-virtualized
1853 * guest.
1854 *
1855 * @returns @c true if in nested-guest mode, @c false otherwise.
1856 * @param pCtx Current CPU context.
1857 */
1858DECLINLINE(bool) CPUMIsGuestInNestedHwvirtMode(PCCPUMCTX pCtx)
1859{
1860 return CPUMIsGuestInVmxNonRootMode(pCtx) || CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
1861}
1862
1863/**
1864 * Checks if the guest is in VMX root operation.
1865 *
1866 * @returns @c true if in VMX root operation, @c false otherwise.
1867 * @param pCtx Current CPU context.
1868 */
1869DECLINLINE(bool) CPUMIsGuestInVmxRootMode(PCCPUMCTX pCtx)
1870{
1871#ifndef IN_RC
1872 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1873 return false;
1874 return pCtx->hwvirt.vmx.fInVmxRootMode;
1875#else
1876 NOREF(pCtx);
1877 return false;
1878#endif
1879}
1880
1881# ifndef IN_RC
1882
1883/**
1884 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
1885 * active.
1886 *
1887 * @returns @c true if in intercept is set, @c false otherwise.
1888 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1889 * @param pCtx Pointer to the context.
1890 * @param fIntercept The SVM control/instruction intercept, see
1891 * SVM_CTRL_INTERCEPT_*.
1892 */
1893DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fIntercept)
1894{
1895 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1896 return false;
1897 uint64_t u64Intercepts;
1898 if (!HMGetGuestSvmCtrlIntercepts(pVCpu, &u64Intercepts))
1899 u64Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl;
1900 return RT_BOOL(u64Intercepts & fIntercept);
1901}
1902
1903/**
1904 * Checks if the nested-guest VMCB has the specified CR read intercept active.
1905 *
1906 * @returns @c true if in intercept is set, @c false otherwise.
1907 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1908 * @param pCtx Pointer to the context.
1909 * @param uCr The CR register number (0 to 15).
1910 */
1911DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1912{
1913 Assert(uCr < 16);
1914 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1915 return false;
1916 uint16_t u16Intercepts;
1917 if (!HMGetGuestSvmReadCRxIntercepts(pVCpu, &u16Intercepts))
1918 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptRdCRx;
1919 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
1920}
1921
1922/**
1923 * Checks if the nested-guest VMCB has the specified CR write intercept active.
1924 *
1925 * @returns @c true if in intercept is set, @c false otherwise.
1926 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1927 * @param pCtx Pointer to the context.
1928 * @param uCr The CR register number (0 to 15).
1929 */
1930DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1931{
1932 Assert(uCr < 16);
1933 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1934 return false;
1935 uint16_t u16Intercepts;
1936 if (!HMGetGuestSvmWriteCRxIntercepts(pVCpu, &u16Intercepts))
1937 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptWrCRx;
1938 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
1939}
1940
1941/**
1942 * Checks if the nested-guest VMCB has the specified DR read intercept active.
1943 *
1944 * @returns @c true if in intercept is set, @c false otherwise.
1945 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1946 * @param pCtx Pointer to the context.
1947 * @param uDr The DR register number (0 to 15).
1948 */
1949DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1950{
1951 Assert(uDr < 16);
1952 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1953 return false;
1954 uint16_t u16Intercepts;
1955 if (!HMGetGuestSvmReadDRxIntercepts(pVCpu, &u16Intercepts))
1956 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptRdDRx;
1957 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
1958}
1959
1960/**
1961 * Checks if the nested-guest VMCB has the specified DR write intercept active.
1962 *
1963 * @returns @c true if in intercept is set, @c false otherwise.
1964 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1965 * @param pCtx Pointer to the context.
1966 * @param uDr The DR register number (0 to 15).
1967 */
1968DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1969{
1970 Assert(uDr < 16);
1971 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1972 return false;
1973 uint16_t u16Intercepts;
1974 if (!HMGetGuestSvmWriteDRxIntercepts(pVCpu, &u16Intercepts))
1975 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptWrDRx;
1976 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
1977}
1978
1979/**
1980 * Checks if the nested-guest VMCB has the specified exception intercept active.
1981 *
1982 * @returns @c true if in intercept is active, @c false otherwise.
1983 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1984 * @param pCtx Pointer to the context.
1985 * @param uVector The exception / interrupt vector.
1986 */
1987DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
1988{
1989 Assert(uVector <= X86_XCPT_LAST);
1990 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1991 return false;
1992 uint32_t u32Intercepts;
1993 if (!HMGetGuestSvmXcptIntercepts(pVCpu, &u32Intercepts))
1994 u32Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u32InterceptXcpt;
1995 return RT_BOOL(u32Intercepts & RT_BIT(uVector));
1996}
1997
1998/**
1999 * Checks if the nested-guest VMCB has virtual-interrupt masking enabled.
2000 *
2001 * @returns @c true if virtual-interrupts are masked, @c false otherwise.
2002 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2003 * @param pCtx Pointer to the context.
2004 *
2005 * @remarks Should only be called when SVM feature is exposed to the guest.
2006 */
2007DECLINLINE(bool) CPUMIsGuestSvmVirtIntrMasking(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2008{
2009 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2010 return false;
2011 bool fVIntrMasking;
2012 if (!HMGetGuestSvmVirtIntrMasking(pVCpu, &fVIntrMasking))
2013 fVIntrMasking = pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u1VIntrMasking;
2014 return fVIntrMasking;
2015}
2016
2017/**
2018 * Checks if the nested-guest VMCB has nested-paging enabled.
2019 *
2020 * @returns @c true if nested-paging is enabled, @c false otherwise.
2021 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2022 * @param pCtx Pointer to the context.
2023 *
2024 * @remarks Should only be called when SVM feature is exposed to the guest.
2025 */
2026DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2027{
2028 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2029 return false;
2030 bool fNestedPaging;
2031 if (!HMGetGuestSvmNestedPaging(pVCpu, &fNestedPaging))
2032 fNestedPaging = pCtx->hwvirt.svm.Vmcb.ctrl.NestedPagingCtrl.n.u1NestedPaging;
2033 return fNestedPaging;
2034}
2035
2036/**
2037 * Gets the nested-guest VMCB pause-filter count.
2038 *
2039 * @returns The pause-filter count.
2040 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2041 * @param pCtx Pointer to the context.
2042 *
2043 * @remarks Should only be called when SVM feature is exposed to the guest.
2044 */
2045DECLINLINE(uint16_t) CPUMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2046{
2047 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2048 return false;
2049 uint16_t u16PauseFilterCount;
2050 if (!HMGetGuestSvmPauseFilterCount(pVCpu, &u16PauseFilterCount))
2051 u16PauseFilterCount = pCtx->hwvirt.svm.Vmcb.ctrl.u16PauseFilterCount;
2052 return u16PauseFilterCount;
2053}
2054
2055/**
2056 * Updates the NextRIP (NRIP) field in the nested-guest VMCB.
2057 *
2058 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2059 * @param pCtx Pointer to the context.
2060 * @param cbInstr The length of the current instruction in bytes.
2061 *
2062 * @remarks Should only be called when SVM feature is exposed to the guest.
2063 */
2064DECLINLINE(void) CPUMGuestSvmUpdateNRip(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr)
2065{
2066 RT_NOREF(pVCpu);
2067 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
2068 pCtx->hwvirt.svm.Vmcb.ctrl.u64NextRIP = pCtx->rip + cbInstr;
2069}
2070
2071/**
2072 * Checks whether one of the given Pin-based VM-execution controls are set when
2073 * executing a nested-guest.
2074 *
2075 * @returns @c true if set, @c false otherwise.
2076 * @param pCtx Pointer to the context.
2077 * @param uPinCtls The Pin-based VM-execution controls to check.
2078 *
2079 * @remarks This does not check if all given controls are set if more than one
2080 * control is passed in @a uPinCtl.
2081 */
2082DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls)
2083{
2084 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2085 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & uPinCtls);
2086}
2087
2088/**
2089 * Checks whether one of the given Processor-based VM-execution controls are set
2090 * when executing a nested-guest.
2091 *
2092 * @returns @c true if set, @c false otherwise.
2093 * @param pCtx Pointer to the context.
2094 * @param uProcCtls The Processor-based VM-execution controls to check.
2095 *
2096 * @remarks This does not check if all given controls are set if more than one
2097 * control is passed in @a uProcCtls.
2098 */
2099DECLINLINE(bool) CPUMIsGuestVmxProcCtlsSet(PCCPUMCTX pCtx, uint32_t uProcCtls)
2100{
2101 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2102 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls & uProcCtls);
2103}
2104
2105/**
2106 * Checks whether one of the given Secondary Processor-based VM-execution controls
2107 * are set when executing a nested-guest.
2108 *
2109 * @returns @c true if set, @c false otherwise.
2110 * @param pCtx Pointer to the context.
2111 * @param uProcCtls2 The Secondary Processor-based VM-execution controls to
2112 * check.
2113 *
2114 * @remarks This does not check if all given controls are set if more than one
2115 * control is passed in @a uProcCtls2.
2116 */
2117DECLINLINE(bool) CPUMIsGuestVmxProcCtls2Set(PCCPUMCTX pCtx, uint32_t uProcCtls2)
2118{
2119 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2120 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls2 & uProcCtls2);
2121}
2122
2123/**
2124 * Checks whether one of the given Tertiary Processor-based VM-execution controls
2125 * are set when executing a nested-guest.
2126 *
2127 * @returns @c true if set, @c false otherwise.
2128 * @param pCtx Pointer to the context.
2129 * @param uProcCtls3 The Tertiary Processor-based VM-execution controls to
2130 * check.
2131 *
2132 * @remarks This does not check if all given controls are set if more than one
2133 * control is passed in @a uProcCtls3.
2134 */
2135DECLINLINE(bool) CPUMIsGuestVmxProcCtls3Set(PCCPUMCTX pCtx, uint64_t uProcCtls3)
2136{
2137 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2138 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u64ProcCtls3.u & uProcCtls3);
2139}
2140
2141/**
2142 * Checks whether one of the given VM-exit controls are set when executing a
2143 * nested-guest.
2144 *
2145 * @returns @c true if set, @c false otherwise.
2146 * @param pCtx Pointer to the context.
2147 * @param uExitCtls The VM-exit controls to check.
2148 *
2149 * @remarks This does not check if all given controls are set if more than one
2150 * control is passed in @a uExitCtls.
2151 */
2152DECLINLINE(bool) CPUMIsGuestVmxExitCtlsSet(PCCPUMCTX pCtx, uint32_t uExitCtls)
2153{
2154 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2155 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ExitCtls & uExitCtls);
2156}
2157
2158/**
2159 * Checks whether one of the given VM-entry controls are set when executing a
2160 * nested-guest.
2161 *
2162 * @returns @c true if set, @c false otherwise.
2163 * @param pCtx Pointer to the context.
2164 * @param uEntryCtls The VM-entry controls to check.
2165 *
2166 * @remarks This does not check if all given controls are set if more than one
2167 * control is passed in @a uEntryCtls.
2168 */
2169DECLINLINE(bool) CPUMIsGuestVmxEntryCtlsSet(PCCPUMCTX pCtx, uint32_t uEntryCtls)
2170{
2171 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2172 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32EntryCtls & uEntryCtls);
2173}
2174
2175/**
2176 * Checks whether events injected in the nested-guest are subject to VM-exit checks.
2177 *
2178 * @returns @c true if set, @c false otherwise.
2179 * @param pCtx Pointer to the context.
2180 */
2181DECLINLINE(bool) CPUMIsGuestVmxInterceptEvents(PCCPUMCTX pCtx)
2182{
2183 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2184 return pCtx->hwvirt.vmx.fInterceptEvents;
2185}
2186
2187/**
2188 * Sets whether events injected in the nested-guest are subject to VM-exit checks.
2189 *
2190 * @param pCtx Pointer to the context.
2191 * @param fIntercept Whether to subject injected events to VM-exits or not.
2192 */
2193DECLINLINE(void) CPUMSetGuestVmxInterceptEvents(PCPUMCTX pCtx, bool fInterceptEvents)
2194{
2195 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2196 pCtx->hwvirt.vmx.fInterceptEvents = fInterceptEvents;
2197}
2198
2199/**
2200 * Checks whether the given exception causes a VM-exit.
2201 *
2202 * The exception type include hardware exceptions, software exceptions (#BP, #OF)
2203 * and privileged software exceptions (#DB generated by INT1/ICEBP).
2204 *
2205 * Software interrupts do -not- cause VM-exits and hence must not be used with this
2206 * function.
2207 *
2208 * @returns @c true if the exception causes a VM-exit, @c false otherwise.
2209 * @param pCtx Pointer to the context.
2210 * @param uVector The exception vector.
2211 * @param uErrCode The error code associated with the exception. Pass 0 if not
2212 * applicable.
2213 */
2214DECLINLINE(bool) CPUMIsGuestVmxXcptInterceptSet(PCCPUMCTX pCtx, uint8_t uVector, uint32_t uErrCode)
2215{
2216 Assert(uVector <= X86_XCPT_LAST);
2217
2218 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2219
2220 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
2221 if (uVector == X86_XCPT_NMI)
2222 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
2223
2224 /* Page-faults are subject to masking using its error code. */
2225 uint32_t fXcptBitmap = pCtx->hwvirt.vmx.Vmcs.u32XcptBitmap;
2226 if (uVector == X86_XCPT_PF)
2227 {
2228 uint32_t const fXcptPFMask = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMask;
2229 uint32_t const fXcptPFMatch = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMatch;
2230 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
2231 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
2232 }
2233
2234 /* Consult the exception bitmap for all other exceptions. */
2235 if (fXcptBitmap & RT_BIT(uVector))
2236 return true;
2237 return false;
2238}
2239
2240/**
2241 * Implements VMSucceed for VMX instruction success.
2242 *
2243 * @param pVCpu The cross context virtual CPU structure.
2244 */
2245DECLINLINE(void) CPUMSetGuestVmxVmSucceed(PCPUMCTX pCtx)
2246{
2247 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2248}
2249
2250/**
2251 * Implements VMFailInvalid for VMX instruction failure.
2252 *
2253 * @param pVCpu The cross context virtual CPU structure.
2254 */
2255DECLINLINE(void) CPUMSetGuestVmxVmFailInvalid(PCPUMCTX pCtx)
2256{
2257 pCtx->eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2258 pCtx->eflags.u32 |= X86_EFL_CF;
2259}
2260
2261/**
2262 * Implements VMFailValid for VMX instruction failure.
2263 *
2264 * @param pVCpu The cross context virtual CPU structure.
2265 * @param enmInsErr The VM instruction error.
2266 */
2267DECLINLINE(void) CPUMSetGuestVmxVmFailValid(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2268{
2269 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2270 pCtx->eflags.u32 |= X86_EFL_ZF;
2271 pCtx->hwvirt.vmx.Vmcs.u32RoVmInstrError = enmInsErr;
2272}
2273
2274/**
2275 * Implements VMFail for VMX instruction failure.
2276 *
2277 * @param pVCpu The cross context virtual CPU structure.
2278 * @param enmInsErr The VM instruction error.
2279 */
2280DECLINLINE(void) CPUMSetGuestVmxVmFail(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2281{
2282 if (pCtx->hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
2283 CPUMSetGuestVmxVmFailValid(pCtx, enmInsErr);
2284 else
2285 CPUMSetGuestVmxVmFailInvalid(pCtx);
2286}
2287
2288/**
2289 * Returns the guest-physical address of the APIC-access page when executing a
2290 * nested-guest.
2291 *
2292 * @returns The APIC-access page guest-physical address.
2293 * @param pCtx Pointer to the context.
2294 */
2295DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCCPUMCTX pCtx)
2296{
2297 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2298 return pCtx->hwvirt.vmx.Vmcs.u64AddrApicAccess.u;
2299}
2300
2301/**
2302 * Gets the nested-guest CR0 subject to the guest/host mask and the read-shadow.
2303 *
2304 * @returns The nested-guest CR0.
2305 * @param pCtx Pointer to the context.
2306 * @param fGstHostMask The CR0 guest/host mask to use.
2307 */
2308DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr0(PCCPUMCTX pCtx, uint64_t fGstHostMask)
2309{
2310 /*
2311 * For each CR0 bit owned by the host, the corresponding bit from the
2312 * CR0 read shadow is loaded. For each CR0 bit that is not owned by the host,
2313 * the corresponding bit from the guest CR0 is loaded.
2314 *
2315 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2316 */
2317 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2318 uint64_t const uGstCr0 = pCtx->cr0;
2319 uint64_t const fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
2320 return (fReadShadow & fGstHostMask) | (uGstCr0 & ~fGstHostMask);
2321}
2322
2323/**
2324 * Gets the nested-guest CR4 subject to the guest/host mask and the read-shadow.
2325 *
2326 * @returns The nested-guest CR4.
2327 * @param pCtx Pointer to the context.
2328 * @param fGstHostMask The CR4 guest/host mask to use.
2329 */
2330DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr4(PCCPUMCTX pCtx, uint64_t fGstHostMask)
2331{
2332 /*
2333 * For each CR4 bit owned by the host, the corresponding bit from the
2334 * CR4 read shadow is loaded. For each CR4 bit that is not owned by the host,
2335 * the corresponding bit from the guest CR4 is loaded.
2336 *
2337 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2338 */
2339 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2340 uint64_t const uGstCr4 = pCtx->cr4;
2341 uint64_t const fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u;
2342 return (fReadShadow & fGstHostMask) | (uGstCr4 & ~fGstHostMask);
2343}
2344
2345/**
2346 * Checks whether the LMSW access causes a VM-exit or not.
2347 *
2348 * @returns @c true if the LMSW access causes a VM-exit, @c false otherwise.
2349 * @param pCtx Pointer to the context.
2350 * @param uNewMsw The LMSW source operand (the Machine Status Word).
2351 */
2352DECLINLINE(bool) CPUMIsGuestVmxLmswInterceptSet(PCCPUMCTX pCtx, uint16_t uNewMsw)
2353{
2354 /*
2355 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
2356 *
2357 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
2358 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2359 */
2360 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2361
2362 uint32_t const fGstHostMask = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u;
2363 uint32_t const fReadShadow = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
2364
2365 /*
2366 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
2367 * CR0.PE case first, before the rest of the bits in the MSW.
2368 *
2369 * If CR0.PE is owned by the host and CR0.PE differs between the
2370 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
2371 */
2372 if ( (fGstHostMask & X86_CR0_PE)
2373 && (uNewMsw & X86_CR0_PE)
2374 && !(fReadShadow & X86_CR0_PE))
2375 return true;
2376
2377 /*
2378 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
2379 * bits differ between the MSW (source operand) and the read-shadow, we must
2380 * cause a VM-exit.
2381 */
2382 uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2383 if ((fReadShadow & fGstHostLmswMask) != (uNewMsw & fGstHostLmswMask))
2384 return true;
2385
2386 return false;
2387}
2388
2389/**
2390 * Checks whether the Mov-to-CR0/CR4 access causes a VM-exit or not.
2391 *
2392 * @returns @c true if the Mov CRX access causes a VM-exit, @c false otherwise.
2393 * @param pCtx Pointer to the context.
2394 * @param iCrReg The control register number (must be 0 or 4).
2395 * @param uNewCrX The CR0/CR4 value being written.
2396 */
2397DECLINLINE(bool) CPUMIsGuestVmxMovToCr0Cr4InterceptSet(PCCPUMCTX pCtx, uint8_t iCrReg, uint64_t uNewCrX)
2398{
2399 /*
2400 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
2401 * corresponding bits differ between the source operand and the read-shadow,
2402 * we must cause a VM-exit.
2403 *
2404 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2405 */
2406 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2407 Assert(iCrReg == 0 || iCrReg == 4);
2408
2409 uint64_t fGstHostMask;
2410 uint64_t fReadShadow;
2411 if (iCrReg == 0)
2412 {
2413 fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u;
2414 fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
2415 }
2416 else
2417 {
2418 fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr4Mask.u;
2419 fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u;
2420 }
2421
2422 if ((fReadShadow & fGstHostMask) != (uNewCrX & fGstHostMask))
2423 {
2424 Assert(fGstHostMask != 0);
2425 return true;
2426 }
2427
2428 return false;
2429}
2430
2431/**
2432 * Returns whether the guest has an active, current VMCS.
2433 *
2434 * @returns @c true if the guest has an active, current VMCS, @c false otherwise.
2435 * @param pCtx Pointer to the context.
2436 */
2437DECLINLINE(bool) CPUMIsGuestVmxCurrentVmcsValid(PCCPUMCTX pCtx)
2438{
2439 RTGCPHYS const GCPhysVmcs = pCtx->hwvirt.vmx.GCPhysVmcs;
2440 return RT_BOOL(GCPhysVmcs != NIL_RTGCPHYS);
2441}
2442
2443/**
2444 * Gets the nested-guest virtual-APIC page.
2445 *
2446 * @returns The virtual-APIC page.
2447 * @param pCtx Pointer to the context.
2448 * @param pHCPhys Where to store the host-physical address of the virtual-APIC
2449 * page.
2450 */
2451DECLINLINE(void *) CPUMGetGuestVmxVirtApicPage(PCCPUMCTX pCtx, PRTHCPHYS pHCPhysVirtApicPage)
2452{
2453 Assert(pHCPhysVirtApicPage);
2454 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
2455 *pHCPhysVirtApicPage = pCtx->hwvirt.vmx.HCPhysVirtApicPage;
2456 return pCtx->hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
2457}
2458
2459# endif /* !IN_RC */
2460
2461/**
2462 * Checks whether the VMX nested-guest is in a state to receive physical (APIC)
2463 * interrupts.
2464 *
2465 * @returns @c true if it's ready, @c false otherwise.
2466 * @param pCtx The guest-CPU context.
2467 */
2468DECLINLINE(bool) CPUMIsGuestVmxPhysIntrEnabled(PCCPUMCTX pCtx)
2469{
2470#ifdef IN_RC
2471 AssertReleaseFailedReturn(false);
2472#else
2473 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2474 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
2475 return true;
2476 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2477#endif
2478}
2479
2480/**
2481 * Checks whether the VMX nested-guest is blocking virtual-NMIs.
2482 *
2483 * @returns @c true if it's blocked, @c false otherwise.
2484 * @param pCtx The guest-CPU context.
2485 */
2486DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx)
2487{
2488#ifdef IN_RC
2489 RT_NOREF(pCtx);
2490 AssertReleaseFailedReturn(false);
2491#else
2492 /*
2493 * Return the state of virtual-NMI blocking, if we are executing a
2494 * VMX nested-guest with virtual-NMIs enabled.
2495 */
2496 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2497 Assert(CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI));
2498 return pCtx->hwvirt.vmx.fVirtNmiBlocking;
2499#endif
2500}
2501
2502/**
2503 * Sets or clears VMX nested-guest virtual-NMI blocking.
2504 *
2505 * @param pCtx The guest-CPU context.
2506 * @param fBlocking Whether virtual-NMI blocking is in effect or not.
2507 */
2508DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking)
2509{
2510#ifdef IN_RC
2511 RT_NOREF2(pCtx, fBlocking);
2512 AssertReleaseFailedReturnVoid();
2513#else
2514 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2515 Assert(CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI));
2516 pCtx->hwvirt.vmx.fVirtNmiBlocking = fBlocking;
2517#endif
2518}
2519
2520/**
2521 * Checks whether the VMX nested-guest is in a state to receive virtual interrupts
2522 * (those injected with the "virtual-interrupt delivery" feature).
2523 *
2524 * @returns @c true if it's ready, @c false otherwise.
2525 * @param pCtx The guest-CPU context.
2526 */
2527DECLINLINE(bool) CPUMIsGuestVmxVirtIntrEnabled(PCCPUMCTX pCtx)
2528{
2529#ifdef IN_RC
2530 RT_NOREF2(pCtx);
2531 AssertReleaseFailedReturn(false);
2532#else
2533 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2534 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2535#endif
2536}
2537
2538/** @} */
2539#endif /* !IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS && RT_ARCH_AMD64 */
2540
2541
2542
2543/** @name Hypervisor Register Getters.
2544 * @{ */
2545VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu);
2546VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu);
2547VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu);
2548VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu);
2549VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu);
2550VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu);
2551VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
2552/** @} */
2553
2554/** @name Hypervisor Register Setters.
2555 * @{ */
2556VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3);
2557VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0);
2558VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1);
2559VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2);
2560VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3);
2561VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6);
2562VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7);
2563VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg);
2564/** @} */
2565
2566VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
2567#ifdef VBOX_INCLUDED_vmm_cpumctx_h
2568VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu);
2569#endif
2570VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu);
2571
2572/** @name Changed flags.
2573 * These flags are used to keep track of which important register that
2574 * have been changed since last they were reset. The only one allowed
2575 * to clear them is REM!
2576 *
2577 * @todo This is obsolete, but remains as it will be refactored for coordinating
2578 * IEM and NEM/HM later. Probably.
2579 * @{
2580 */
2581#define CPUM_CHANGED_FPU_REM RT_BIT(0)
2582#define CPUM_CHANGED_CR0 RT_BIT(1)
2583#define CPUM_CHANGED_CR4 RT_BIT(2)
2584#define CPUM_CHANGED_GLOBAL_TLB_FLUSH RT_BIT(3)
2585#define CPUM_CHANGED_CR3 RT_BIT(4)
2586#define CPUM_CHANGED_GDTR RT_BIT(5)
2587#define CPUM_CHANGED_IDTR RT_BIT(6)
2588#define CPUM_CHANGED_LDTR RT_BIT(7)
2589#define CPUM_CHANGED_TR RT_BIT(8) /**@< Currently unused. */
2590#define CPUM_CHANGED_SYSENTER_MSR RT_BIT(9)
2591#define CPUM_CHANGED_HIDDEN_SEL_REGS RT_BIT(10) /**@< Currently unused. */
2592#define CPUM_CHANGED_CPUID RT_BIT(11)
2593#define CPUM_CHANGED_ALL ( CPUM_CHANGED_FPU_REM \
2594 | CPUM_CHANGED_CR0 \
2595 | CPUM_CHANGED_CR4 \
2596 | CPUM_CHANGED_GLOBAL_TLB_FLUSH \
2597 | CPUM_CHANGED_CR3 \
2598 | CPUM_CHANGED_GDTR \
2599 | CPUM_CHANGED_IDTR \
2600 | CPUM_CHANGED_LDTR \
2601 | CPUM_CHANGED_TR \
2602 | CPUM_CHANGED_SYSENTER_MSR \
2603 | CPUM_CHANGED_HIDDEN_SEL_REGS \
2604 | CPUM_CHANGED_CPUID )
2605/** @} */
2606
2607VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);
2608VMMDECL(bool) CPUMSupportsXSave(PVM pVM);
2609VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM);
2610VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM);
2611VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu);
2612VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu);
2613VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu);
2614VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu);
2615VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu);
2616VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu);
2617VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu);
2618VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu);
2619VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu);
2620VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu);
2621VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM);
2622VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM);
2623VMMDECL(uint64_t) CPUMGetGuestEferMsrValidMask(PVM pVM);
2624VMMDECL(int) CPUMIsGuestEferMsrWriteValid(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
2625 uint64_t *puValidEfer);
2626VMMDECL(void) CPUMSetGuestEferMsrNoChecks(PVMCPUCC pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
2627VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue);
2628
2629
2630/** Guest CPU interruptibility level, see CPUMGetGuestInterruptibility(). */
2631typedef enum CPUMINTERRUPTIBILITY
2632{
2633 CPUMINTERRUPTIBILITY_INVALID = 0,
2634 CPUMINTERRUPTIBILITY_UNRESTRAINED,
2635 CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED,
2636 CPUMINTERRUPTIBILITY_INT_DISABLED,
2637 CPUMINTERRUPTIBILITY_INT_INHIBITED,
2638 CPUMINTERRUPTIBILITY_NMI_INHIBIT,
2639 CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT,
2640 CPUMINTERRUPTIBILITY_END,
2641 CPUMINTERRUPTIBILITY_32BIT_HACK = 0x7fffffff
2642} CPUMINTERRUPTIBILITY;
2643
2644VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu);
2645VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu);
2646VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock);
2647
2648/** @name Typical scalable bus frequency values.
2649 * @{ */
2650/** Special internal value indicating that we don't know the frequency.
2651 * @internal */
2652#define CPUM_SBUSFREQ_UNKNOWN UINT64_C(1)
2653#define CPUM_SBUSFREQ_100MHZ UINT64_C(100000000)
2654#define CPUM_SBUSFREQ_133MHZ UINT64_C(133333333)
2655#define CPUM_SBUSFREQ_167MHZ UINT64_C(166666666)
2656#define CPUM_SBUSFREQ_200MHZ UINT64_C(200000000)
2657#define CPUM_SBUSFREQ_267MHZ UINT64_C(266666666)
2658#define CPUM_SBUSFREQ_333MHZ UINT64_C(333333333)
2659#define CPUM_SBUSFREQ_400MHZ UINT64_C(400000000)
2660/** @} */
2661
2662
2663#ifdef IN_RING3
2664/** @defgroup grp_cpum_r3 The CPUM ring-3 API
2665 * @{
2666 */
2667
2668VMMR3DECL(int) CPUMR3Init(PVM pVM);
2669VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
2670VMMR3DECL(void) CPUMR3LogCpuIdAndMsrFeatures(PVM pVM);
2671VMMR3DECL(void) CPUMR3Relocate(PVM pVM);
2672VMMR3DECL(int) CPUMR3Term(PVM pVM);
2673VMMR3DECL(void) CPUMR3Reset(PVM pVM);
2674VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
2675VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM);
2676VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
2677
2678VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf);
2679VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf);
2680VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
2681 uint8_t bModel, uint8_t bStepping);
2682VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch);
2683VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
2684VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
2685VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
2686VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
2687VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor);
2688VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void);
2689
2690VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange);
2691
2692VMMR3DECL(uint32_t) CPUMR3DbGetEntries(void);
2693/** Pointer to CPUMR3DbGetEntries. */
2694typedef DECLCALLBACKPTR(uint32_t, PFNCPUMDBGETENTRIES, (void));
2695VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByIndex(uint32_t idxCpuDb);
2696/** Pointer to CPUMR3DbGetEntryByIndex. */
2697typedef DECLCALLBACKPTR(PCCPUMDBENTRY, PFNCPUMDBGETENTRYBYINDEX, (uint32_t idxCpuDb));
2698VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByName(const char *pszName);
2699/** Pointer to CPUMR3DbGetEntryByName. */
2700typedef DECLCALLBACKPTR(PCCPUMDBENTRY, PFNCPUMDBGETENTRYBYNAME, (const char *pszName));
2701/** @} */
2702#endif /* IN_RING3 */
2703
2704#ifdef IN_RING0
2705/** @defgroup grp_cpum_r0 The CPUM ring-0 API
2706 * @{
2707 */
2708VMMR0_INT_DECL(int) CPUMR0ModuleInit(void);
2709VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void);
2710VMMR0_INT_DECL(int) CPUMR0InitVM(PVMCC pVM);
2711DECLASM(void) CPUMR0RegisterVCpuThread(PVMCPUCC pVCpu);
2712DECLASM(void) CPUMR0TouchHostFpu(void);
2713VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVMCC pVM, PVMCPUCC pVCpu);
2714VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVMCC pVM, PVMCPUCC pVCpu);
2715VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu);
2716VMMR0_INT_DECL(int) CPUMR0SaveHostDebugState(PVMCC pVM, PVMCPUCC pVCpu);
2717VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu, bool fDr6);
2718VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPUCC pVCpu, bool fDr6);
2719
2720VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPUCC pVCpu, bool fDr6);
2721VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPUCC pVCpu, bool fDr6);
2722#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
2723VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPUCC pVCpu, uint32_t iHostCpuSet);
2724#endif
2725
2726/** @} */
2727#endif /* IN_RING0 */
2728
2729/** @defgroup grp_cpum_rz The CPUM raw-mode and ring-0 context API
2730 * @{
2731 */
2732VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPUCC pVCpu);
2733VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForRead(PVMCPUCC pVCpu);
2734VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForChange(PVMCPUCC pVCpu);
2735VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeSseForRead(PVMCPUCC pVCpu);
2736VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPUCC pVCpu);
2737/** @} */
2738
2739
2740#endif /* !VBOX_FOR_DTRACE_LIB */
2741/** @} */
2742RT_C_DECLS_END
2743
2744
2745#endif /* !VBOX_INCLUDED_vmm_cpum_h */
2746
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette