VirtualBox

source: vbox/trunk/include/VBox/vmm/cpum-x86-amd64.h@ 98970

Last change on this file since 98970 was 98970, checked in by vboxsync, 21 months ago

VMM: More ARMv8 x86/amd64 separation work, bugref:10385

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 119.4 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager).
3 */
4
5/*
6 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * The contents of this file may alternatively be used under the terms
25 * of the Common Development and Distribution License Version 1.0
26 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
27 * in the VirtualBox distribution, in which case the provisions of the
28 * CDDL are applicable instead of those of the GPL.
29 *
30 * You may elect to license modified versions of this file under the
31 * terms and conditions of either the GPL or the CDDL or both.
32 *
33 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
34 */
35
36#ifndef VBOX_INCLUDED_vmm_cpum_x86_amd64_h
37#define VBOX_INCLUDED_vmm_cpum_x86_amd64_h
38#ifndef RT_WITHOUT_PRAGMA_ONCE
39# pragma once
40#endif
41
42#include <iprt/x86.h>
43#include <VBox/vmm/hm_svm.h>
44#include <VBox/vmm/hm_vmx.h>
45
46RT_C_DECLS_BEGIN
47
48/** @defgroup grp_cpum The CPU Monitor / Manager API
49 * @ingroup grp_vmm
50 * @{
51 */
52
53/**
54 * CPUID feature to set or clear.
55 */
56typedef enum CPUMCPUIDFEATURE
57{
58 CPUMCPUIDFEATURE_INVALID = 0,
59 /** The APIC feature bit. (Std+Ext)
60 * Note! There is a per-cpu flag for masking this CPUID feature bit when the
61 * APICBASE.ENABLED bit is zero. So, this feature is only set/cleared
62 * at VM construction time like all the others. This didn't used to be
63 * that way, this is new with 5.1. */
64 CPUMCPUIDFEATURE_APIC,
65 /** The sysenter/sysexit feature bit. (Std) */
66 CPUMCPUIDFEATURE_SEP,
67 /** The SYSCALL/SYSEXIT feature bit (64 bits mode only for Intel CPUs). (Ext) */
68 CPUMCPUIDFEATURE_SYSCALL,
69 /** The PAE feature bit. (Std+Ext) */
70 CPUMCPUIDFEATURE_PAE,
71 /** The NX feature bit. (Ext) */
72 CPUMCPUIDFEATURE_NX,
73 /** The LAHF/SAHF feature bit (64 bits mode only). (Ext) */
74 CPUMCPUIDFEATURE_LAHF,
75 /** The LONG MODE feature bit. (Ext) */
76 CPUMCPUIDFEATURE_LONG_MODE,
77 /** The x2APIC feature bit. (Std) */
78 CPUMCPUIDFEATURE_X2APIC,
79 /** The RDTSCP feature bit. (Ext) */
80 CPUMCPUIDFEATURE_RDTSCP,
81 /** The Hypervisor Present bit. (Std) */
82 CPUMCPUIDFEATURE_HVP,
83 /** The speculation control feature bits. (StExt) */
84 CPUMCPUIDFEATURE_SPEC_CTRL,
85 /** 32bit hackishness. */
86 CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
87} CPUMCPUIDFEATURE;
88
89/**
90 * CPU Vendor.
91 */
92typedef enum CPUMCPUVENDOR
93{
94 CPUMCPUVENDOR_INVALID = 0,
95 CPUMCPUVENDOR_INTEL,
96 CPUMCPUVENDOR_AMD,
97 CPUMCPUVENDOR_VIA,
98 CPUMCPUVENDOR_CYRIX,
99 CPUMCPUVENDOR_SHANGHAI,
100 CPUMCPUVENDOR_HYGON,
101 CPUMCPUVENDOR_UNKNOWN,
102 /** 32bit hackishness. */
103 CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
104} CPUMCPUVENDOR;
105
106
107/**
108 * X86 and AMD64 CPU microarchitectures and in processor generations.
109 *
110 * @remarks The separation here is sometimes a little bit too finely grained,
111 * and the differences is more like processor generation than micro
112 * arch. This can be useful, so we'll provide functions for getting at
113 * more coarse grained info.
114 */
115typedef enum CPUMMICROARCH
116{
117 kCpumMicroarch_Invalid = 0,
118
119 kCpumMicroarch_Intel_First,
120
121 kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
122 kCpumMicroarch_Intel_80186,
123 kCpumMicroarch_Intel_80286,
124 kCpumMicroarch_Intel_80386,
125 kCpumMicroarch_Intel_80486,
126 kCpumMicroarch_Intel_P5,
127
128 kCpumMicroarch_Intel_P6_Core_Atom_First,
129 kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
130 kCpumMicroarch_Intel_P6_II,
131 kCpumMicroarch_Intel_P6_III,
132
133 kCpumMicroarch_Intel_P6_M_Banias,
134 kCpumMicroarch_Intel_P6_M_Dothan,
135 kCpumMicroarch_Intel_Core_Yonah, /**< Core, also known as Enhanced Pentium M. */
136
137 kCpumMicroarch_Intel_Core2_First,
138 kCpumMicroarch_Intel_Core2_Merom = kCpumMicroarch_Intel_Core2_First, /**< 65nm, Merom/Conroe/Kentsfield/Tigerton */
139 kCpumMicroarch_Intel_Core2_Penryn, /**< 45nm, Penryn/Wolfdale/Yorkfield/Harpertown */
140 kCpumMicroarch_Intel_Core2_End,
141
142 kCpumMicroarch_Intel_Core7_First,
143 kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
144 kCpumMicroarch_Intel_Core7_Westmere,
145 kCpumMicroarch_Intel_Core7_SandyBridge,
146 kCpumMicroarch_Intel_Core7_IvyBridge,
147 kCpumMicroarch_Intel_Core7_Haswell,
148 kCpumMicroarch_Intel_Core7_Broadwell,
149 kCpumMicroarch_Intel_Core7_Skylake,
150 kCpumMicroarch_Intel_Core7_KabyLake,
151 kCpumMicroarch_Intel_Core7_CoffeeLake,
152 kCpumMicroarch_Intel_Core7_WhiskeyLake,
153 kCpumMicroarch_Intel_Core7_CascadeLake,
154 kCpumMicroarch_Intel_Core7_CannonLake, /**< Limited 10nm. */
155 kCpumMicroarch_Intel_Core7_CometLake, /**< 10th gen, 14nm desktop + high power mobile. */
156 kCpumMicroarch_Intel_Core7_IceLake, /**< 10th gen, 10nm mobile and some Xeons. Actually 'Sunny Cove' march. */
157 kCpumMicroarch_Intel_Core7_SunnyCove = kCpumMicroarch_Intel_Core7_IceLake,
158 kCpumMicroarch_Intel_Core7_RocketLake, /**< 11th gen, 14nm desktop + high power mobile. Aka 'Cypress Cove', backport of 'Willow Cove' to 14nm. */
159 kCpumMicroarch_Intel_Core7_CypressCove = kCpumMicroarch_Intel_Core7_RocketLake,
160 kCpumMicroarch_Intel_Core7_TigerLake, /**< 11th gen, 10nm mobile. Actually 'Willow Cove' march. */
161 kCpumMicroarch_Intel_Core7_WillowCove = kCpumMicroarch_Intel_Core7_TigerLake,
162 kCpumMicroarch_Intel_Core7_AlderLake, /**< 12th gen, 10nm all platforms(?). */
163 kCpumMicroarch_Intel_Core7_SapphireRapids, /**< 12th? gen, 10nm server? */
164 kCpumMicroarch_Intel_Core7_End,
165
166 kCpumMicroarch_Intel_Atom_First,
167 kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
168 kCpumMicroarch_Intel_Atom_Lincroft, /**< Second generation bonnell (44nm). */
169 kCpumMicroarch_Intel_Atom_Saltwell, /**< 32nm shrink of Bonnell. */
170 kCpumMicroarch_Intel_Atom_Silvermont, /**< 22nm */
171 kCpumMicroarch_Intel_Atom_Airmount, /**< 14nm */
172 kCpumMicroarch_Intel_Atom_Goldmont, /**< 14nm */
173 kCpumMicroarch_Intel_Atom_GoldmontPlus, /**< 14nm */
174 kCpumMicroarch_Intel_Atom_Unknown,
175 kCpumMicroarch_Intel_Atom_End,
176
177
178 kCpumMicroarch_Intel_Phi_First,
179 kCpumMicroarch_Intel_Phi_KnightsFerry = kCpumMicroarch_Intel_Phi_First,
180 kCpumMicroarch_Intel_Phi_KnightsCorner,
181 kCpumMicroarch_Intel_Phi_KnightsLanding,
182 kCpumMicroarch_Intel_Phi_KnightsHill,
183 kCpumMicroarch_Intel_Phi_KnightsMill,
184 kCpumMicroarch_Intel_Phi_End,
185
186 kCpumMicroarch_Intel_P6_Core_Atom_End,
187
188 kCpumMicroarch_Intel_NB_First,
189 kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
190 kCpumMicroarch_Intel_NB_Northwood, /**< 130nm */
191 kCpumMicroarch_Intel_NB_Prescott, /**< 90nm */
192 kCpumMicroarch_Intel_NB_Prescott2M, /**< 90nm */
193 kCpumMicroarch_Intel_NB_CedarMill, /**< 65nm */
194 kCpumMicroarch_Intel_NB_Gallatin, /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
195 kCpumMicroarch_Intel_NB_Unknown,
196 kCpumMicroarch_Intel_NB_End,
197
198 kCpumMicroarch_Intel_Unknown,
199 kCpumMicroarch_Intel_End,
200
201 kCpumMicroarch_AMD_First,
202 kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
203 kCpumMicroarch_AMD_Am386,
204 kCpumMicroarch_AMD_Am486,
205 kCpumMicroarch_AMD_Am486Enh, /**< Covers Am5x86 as well. */
206 kCpumMicroarch_AMD_K5,
207 kCpumMicroarch_AMD_K6,
208
209 kCpumMicroarch_AMD_K7_First,
210 kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
211 kCpumMicroarch_AMD_K7_Spitfire,
212 kCpumMicroarch_AMD_K7_Thunderbird,
213 kCpumMicroarch_AMD_K7_Morgan,
214 kCpumMicroarch_AMD_K7_Thoroughbred,
215 kCpumMicroarch_AMD_K7_Barton,
216 kCpumMicroarch_AMD_K7_Unknown,
217 kCpumMicroarch_AMD_K7_End,
218
219 kCpumMicroarch_AMD_K8_First,
220 kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
221 kCpumMicroarch_AMD_K8_90nm, /**< 90nm shrink */
222 kCpumMicroarch_AMD_K8_90nm_DualCore, /**< 90nm with two cores. */
223 kCpumMicroarch_AMD_K8_90nm_AMDV, /**< 90nm with AMD-V (usually) and two cores (usually). */
224 kCpumMicroarch_AMD_K8_65nm, /**< 65nm shrink. */
225 kCpumMicroarch_AMD_K8_End,
226
227 kCpumMicroarch_AMD_K10,
228 kCpumMicroarch_AMD_K10_Lion,
229 kCpumMicroarch_AMD_K10_Llano,
230 kCpumMicroarch_AMD_Bobcat,
231 kCpumMicroarch_AMD_Jaguar,
232
233 kCpumMicroarch_AMD_15h_First,
234 kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
235 kCpumMicroarch_AMD_15h_Piledriver,
236 kCpumMicroarch_AMD_15h_Steamroller, /**< Yet to be released, might have different family. */
237 kCpumMicroarch_AMD_15h_Excavator, /**< Yet to be released, might have different family. */
238 kCpumMicroarch_AMD_15h_Unknown,
239 kCpumMicroarch_AMD_15h_End,
240
241 kCpumMicroarch_AMD_16h_First,
242 kCpumMicroarch_AMD_16h_End,
243
244 kCpumMicroarch_AMD_Zen_First,
245 kCpumMicroarch_AMD_Zen_Ryzen = kCpumMicroarch_AMD_Zen_First,
246 kCpumMicroarch_AMD_Zen_End,
247
248 kCpumMicroarch_AMD_Unknown,
249 kCpumMicroarch_AMD_End,
250
251 kCpumMicroarch_Hygon_First,
252 kCpumMicroarch_Hygon_Dhyana = kCpumMicroarch_Hygon_First,
253 kCpumMicroarch_Hygon_Unknown,
254 kCpumMicroarch_Hygon_End,
255
256 kCpumMicroarch_VIA_First,
257 kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
258 kCpumMicroarch_Centaur_C2,
259 kCpumMicroarch_Centaur_C3,
260 kCpumMicroarch_VIA_C3_M2,
261 kCpumMicroarch_VIA_C3_C5A, /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
262 kCpumMicroarch_VIA_C3_C5B, /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
263 kCpumMicroarch_VIA_C3_C5C, /**< 130nm Ezra - C3, Eden ESP. */
264 kCpumMicroarch_VIA_C3_C5N, /**< 130nm Ezra-T - C3. */
265 kCpumMicroarch_VIA_C3_C5XL, /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
266 kCpumMicroarch_VIA_C3_C5P, /**< 130nm Nehemiah+ - C3. */
267 kCpumMicroarch_VIA_C7_C5J, /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
268 kCpumMicroarch_VIA_Isaiah,
269 kCpumMicroarch_VIA_Unknown,
270 kCpumMicroarch_VIA_End,
271
272 kCpumMicroarch_Shanghai_First,
273 kCpumMicroarch_Shanghai_Wudaokou = kCpumMicroarch_Shanghai_First,
274 kCpumMicroarch_Shanghai_Unknown,
275 kCpumMicroarch_Shanghai_End,
276
277 kCpumMicroarch_Cyrix_First,
278 kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
279 kCpumMicroarch_Cyrix_M1,
280 kCpumMicroarch_Cyrix_MediaGX,
281 kCpumMicroarch_Cyrix_MediaGXm,
282 kCpumMicroarch_Cyrix_M2,
283 kCpumMicroarch_Cyrix_Unknown,
284 kCpumMicroarch_Cyrix_End,
285
286 kCpumMicroarch_NEC_First,
287 kCpumMicroarch_NEC_V20 = kCpumMicroarch_NEC_First,
288 kCpumMicroarch_NEC_V30,
289 kCpumMicroarch_NEC_End,
290
291 kCpumMicroarch_Unknown,
292
293 kCpumMicroarch_32BitHack = 0x7fffffff
294} CPUMMICROARCH;
295
296
297/** Predicate macro for catching netburst CPUs. */
298#define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
299 ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
300
301/** Predicate macro for catching Core7 CPUs. */
302#define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
303 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
304
305/** Predicate macro for catching Core 2 CPUs. */
306#define CPUMMICROARCH_IS_INTEL_CORE2(a_enmMicroarch) \
307 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core2_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core2_End)
308
309/** Predicate macro for catching Atom CPUs, Silvermont and upwards. */
310#define CPUMMICROARCH_IS_INTEL_SILVERMONT_PLUS(a_enmMicroarch) \
311 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Atom_Silvermont && (a_enmMicroarch) <= kCpumMicroarch_Intel_Atom_End)
312
313/** Predicate macro for catching AMD Family OFh CPUs (aka K8). */
314#define CPUMMICROARCH_IS_AMD_FAM_0FH(a_enmMicroarch) \
315 ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
316
317/** Predicate macro for catching AMD Family 10H CPUs (aka K10). */
318#define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
319
320/** Predicate macro for catching AMD Family 11H CPUs (aka Lion). */
321#define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
322
323/** Predicate macro for catching AMD Family 12H CPUs (aka Llano). */
324#define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
325
326/** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat). */
327#define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
328
329/** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
330 * decendants). */
331#define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
332 ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
333
334/** Predicate macro for catching AMD Family 16H CPUs. */
335#define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
336 ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
337
338/** Predicate macro for catching AMD Zen Family CPUs. */
339#define CPUMMICROARCH_IS_AMD_FAM_ZEN(a_enmMicroarch) \
340 ((a_enmMicroarch) >= kCpumMicroarch_AMD_Zen_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_Zen_End)
341
342
343/**
344 * CPUID leaf.
345 *
346 * @remarks This structure is used by the patch manager and is therefore
347 * more or less set in stone.
348 */
349typedef struct CPUMCPUIDLEAF
350{
351 /** The leaf number. */
352 uint32_t uLeaf;
353 /** The sub-leaf number. */
354 uint32_t uSubLeaf;
355 /** Sub-leaf mask. This is 0 when sub-leaves aren't used. */
356 uint32_t fSubLeafMask;
357
358 /** The EAX value. */
359 uint32_t uEax;
360 /** The EBX value. */
361 uint32_t uEbx;
362 /** The ECX value. */
363 uint32_t uEcx;
364 /** The EDX value. */
365 uint32_t uEdx;
366
367 /** Flags. */
368 uint32_t fFlags;
369} CPUMCPUIDLEAF;
370#ifndef VBOX_FOR_DTRACE_LIB
371AssertCompileSize(CPUMCPUIDLEAF, 32);
372#endif
373/** Pointer to a CPUID leaf. */
374typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
375/** Pointer to a const CPUID leaf. */
376typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
377
378/** @name CPUMCPUIDLEAF::fFlags
379 * @{ */
380/** Indicates working intel leaf 0xb where the lower 8 ECX bits are not modified
381 * and EDX containing the extended APIC ID. */
382#define CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES RT_BIT_32(0)
383/** The leaf contains an APIC ID that needs changing to that of the current CPU. */
384#define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID RT_BIT_32(1)
385/** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
386#define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE RT_BIT_32(2)
387/** The leaf contains an APIC feature bit which is tied to APICBASE.EN. */
388#define CPUMCPUIDLEAF_F_CONTAINS_APIC RT_BIT_32(3)
389/** Mask of the valid flags. */
390#define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0xf)
391/** @} */
392
393/**
394 * Method used to deal with unknown CPUID leaves.
395 * @remarks Used in patch code.
396 */
397typedef enum CPUMUNKNOWNCPUID
398{
399 /** Invalid zero value. */
400 CPUMUNKNOWNCPUID_INVALID = 0,
401 /** Use given default values (DefCpuId). */
402 CPUMUNKNOWNCPUID_DEFAULTS,
403 /** Return the last standard leaf.
404 * Intel Sandy Bridge has been observed doing this. */
405 CPUMUNKNOWNCPUID_LAST_STD_LEAF,
406 /** Return the last standard leaf, with ecx observed.
407 * Intel Sandy Bridge has been observed doing this. */
408 CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
409 /** The register values are passed thru unmodified. */
410 CPUMUNKNOWNCPUID_PASSTHRU,
411 /** End of valid value. */
412 CPUMUNKNOWNCPUID_END,
413 /** Ensure 32-bit type. */
414 CPUMUNKNOWNCPUID_32BIT_HACK = 0x7fffffff
415} CPUMUNKNOWNCPUID;
416/** Pointer to unknown CPUID leaf method. */
417typedef CPUMUNKNOWNCPUID *PCPUMUNKNOWNCPUID;
418
419
420/**
421 * The register set returned by a CPUID operation.
422 */
423typedef struct CPUMCPUID
424{
425 uint32_t uEax;
426 uint32_t uEbx;
427 uint32_t uEcx;
428 uint32_t uEdx;
429} CPUMCPUID;
430/** Pointer to a CPUID leaf. */
431typedef CPUMCPUID *PCPUMCPUID;
432/** Pointer to a const CPUID leaf. */
433typedef const CPUMCPUID *PCCPUMCPUID;
434
435
436/**
437 * MSR read functions.
438 */
439typedef enum CPUMMSRRDFN
440{
441 /** Invalid zero value. */
442 kCpumMsrRdFn_Invalid = 0,
443 /** Return the CPUMMSRRANGE::uValue. */
444 kCpumMsrRdFn_FixedValue,
445 /** Alias to the MSR range starting at the MSR given by
446 * CPUMMSRRANGE::uValue. Must be used in pair with
447 * kCpumMsrWrFn_MsrAlias. */
448 kCpumMsrRdFn_MsrAlias,
449 /** Write only register, GP all read attempts. */
450 kCpumMsrRdFn_WriteOnly,
451
452 kCpumMsrRdFn_Ia32P5McAddr,
453 kCpumMsrRdFn_Ia32P5McType,
454 kCpumMsrRdFn_Ia32TimestampCounter,
455 kCpumMsrRdFn_Ia32PlatformId, /**< Takes real CPU value for reference. */
456 kCpumMsrRdFn_Ia32ApicBase,
457 kCpumMsrRdFn_Ia32FeatureControl,
458 kCpumMsrRdFn_Ia32BiosSignId, /**< Range value returned. */
459 kCpumMsrRdFn_Ia32SmmMonitorCtl,
460 kCpumMsrRdFn_Ia32PmcN,
461 kCpumMsrRdFn_Ia32MonitorFilterLineSize,
462 kCpumMsrRdFn_Ia32MPerf,
463 kCpumMsrRdFn_Ia32APerf,
464 kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */
465 kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
466 kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
467 kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
468 kCpumMsrRdFn_Ia32MtrrDefType,
469 kCpumMsrRdFn_Ia32Pat,
470 kCpumMsrRdFn_Ia32SysEnterCs,
471 kCpumMsrRdFn_Ia32SysEnterEsp,
472 kCpumMsrRdFn_Ia32SysEnterEip,
473 kCpumMsrRdFn_Ia32McgCap,
474 kCpumMsrRdFn_Ia32McgStatus,
475 kCpumMsrRdFn_Ia32McgCtl,
476 kCpumMsrRdFn_Ia32DebugCtl,
477 kCpumMsrRdFn_Ia32SmrrPhysBase,
478 kCpumMsrRdFn_Ia32SmrrPhysMask,
479 kCpumMsrRdFn_Ia32PlatformDcaCap,
480 kCpumMsrRdFn_Ia32CpuDcaCap,
481 kCpumMsrRdFn_Ia32Dca0Cap,
482 kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
483 kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */
484 kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */
485 kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
486 kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */
487 kCpumMsrRdFn_Ia32FixedCtrCtrl,
488 kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */
489 kCpumMsrRdFn_Ia32PerfGlobalCtrl,
490 kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
491 kCpumMsrRdFn_Ia32PebsEnable,
492 kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */
493 kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */
494 kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */
495 kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */
496 kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */
497 kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
498 kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */
499 kCpumMsrRdFn_Ia32DsArea,
500 kCpumMsrRdFn_Ia32TscDeadline,
501 kCpumMsrRdFn_Ia32X2ApicN,
502 kCpumMsrRdFn_Ia32DebugInterface,
503 kCpumMsrRdFn_Ia32VmxBasic, /**< Takes real value as reference. */
504 kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */
505 kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */
506 kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */
507 kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */
508 kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */
509 kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */
510 kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */
511 kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */
512 kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */
513 kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */
514 kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */
515 kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */
516 kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */
517 kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */
518 kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */
519 kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */
520 kCpumMsrRdFn_Ia32VmxVmFunc, /**< Takes real value as reference. */
521 kCpumMsrRdFn_Ia32SpecCtrl,
522 kCpumMsrRdFn_Ia32ArchCapabilities,
523
524 kCpumMsrRdFn_Amd64Efer,
525 kCpumMsrRdFn_Amd64SyscallTarget,
526 kCpumMsrRdFn_Amd64LongSyscallTarget,
527 kCpumMsrRdFn_Amd64CompSyscallTarget,
528 kCpumMsrRdFn_Amd64SyscallFlagMask,
529 kCpumMsrRdFn_Amd64FsBase,
530 kCpumMsrRdFn_Amd64GsBase,
531 kCpumMsrRdFn_Amd64KernelGsBase,
532 kCpumMsrRdFn_Amd64TscAux,
533
534 kCpumMsrRdFn_IntelEblCrPowerOn,
535 kCpumMsrRdFn_IntelI7CoreThreadCount,
536 kCpumMsrRdFn_IntelP4EbcHardPowerOn,
537 kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
538 kCpumMsrRdFn_IntelP4EbcFrequencyId,
539 kCpumMsrRdFn_IntelP6FsbFrequency, /**< Takes real value as reference. */
540 kCpumMsrRdFn_IntelPlatformInfo,
541 kCpumMsrRdFn_IntelFlexRatio, /**< Takes real value as reference. */
542 kCpumMsrRdFn_IntelPkgCStConfigControl,
543 kCpumMsrRdFn_IntelPmgIoCaptureBase,
544 kCpumMsrRdFn_IntelLastBranchFromToN,
545 kCpumMsrRdFn_IntelLastBranchFromN,
546 kCpumMsrRdFn_IntelLastBranchToN,
547 kCpumMsrRdFn_IntelLastBranchTos,
548 kCpumMsrRdFn_IntelBblCrCtl,
549 kCpumMsrRdFn_IntelBblCrCtl3,
550 kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */
551 kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
552 kCpumMsrRdFn_IntelI7MiscPwrMgmt,
553 kCpumMsrRdFn_IntelP6CrN,
554 kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
555 kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
556 kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
557 kCpumMsrRdFn_IntelI7SandyAesNiCtl,
558 kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */
559 kCpumMsrRdFn_IntelI7LbrSelect,
560 kCpumMsrRdFn_IntelI7SandyErrorControl,
561 kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
562 kCpumMsrRdFn_IntelI7PowerCtl,
563 kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
564 kCpumMsrRdFn_IntelI7PebsLdLat,
565 kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */
566 kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */
567 kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
568 kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */
569 kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */
570 kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */
571 kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
572 kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */
573 kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
574 kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */
575 kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */
576 kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */
577 kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
578 kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */
579 kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */
580 kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */
581 kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
582 kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */
583 kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */
584 kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */
585 kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
586 kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */
587 kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
588 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1, /**< Takes real value as reference. */
589 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2, /**< Takes real value as reference. */
590 kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
591 kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
592 kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
593 kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
594 kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
595 kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
596 kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
597 kCpumMsrRdFn_IntelI7UncCBoxConfig,
598 kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
599 kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
600 kCpumMsrRdFn_IntelI7SmiCount,
601 kCpumMsrRdFn_IntelCore2EmttmCrTablesN, /**< Range value returned. */
602 kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
603 kCpumMsrRdFn_IntelCore1ExtConfig,
604 kCpumMsrRdFn_IntelCore1DtsCalControl,
605 kCpumMsrRdFn_IntelCore2PeciControl,
606 kCpumMsrRdFn_IntelAtSilvCoreC1Recidency,
607
608 kCpumMsrRdFn_P6LastBranchFromIp,
609 kCpumMsrRdFn_P6LastBranchToIp,
610 kCpumMsrRdFn_P6LastIntFromIp,
611 kCpumMsrRdFn_P6LastIntToIp,
612
613 kCpumMsrRdFn_AmdFam15hTscRate,
614 kCpumMsrRdFn_AmdFam15hLwpCfg,
615 kCpumMsrRdFn_AmdFam15hLwpCbAddr,
616 kCpumMsrRdFn_AmdFam10hMc4MiscN,
617 kCpumMsrRdFn_AmdK8PerfCtlN,
618 kCpumMsrRdFn_AmdK8PerfCtrN,
619 kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */
620 kCpumMsrRdFn_AmdK8HwCr,
621 kCpumMsrRdFn_AmdK8IorrBaseN,
622 kCpumMsrRdFn_AmdK8IorrMaskN,
623 kCpumMsrRdFn_AmdK8TopOfMemN,
624 kCpumMsrRdFn_AmdK8NbCfg1,
625 kCpumMsrRdFn_AmdK8McXcptRedir,
626 kCpumMsrRdFn_AmdK8CpuNameN,
627 kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */
628 kCpumMsrRdFn_AmdK8SwThermalCtrl,
629 kCpumMsrRdFn_AmdK8FidVidControl, /**< Range value returned. */
630 kCpumMsrRdFn_AmdK8FidVidStatus, /**< Range value returned. */
631 kCpumMsrRdFn_AmdK8McCtlMaskN,
632 kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
633 kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
634 kCpumMsrRdFn_AmdK8IntPendingMessage,
635 kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
636 kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
637 kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
638 kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */
639 kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */
640 kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */
641 kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */
642 kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */
643 kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */
644 kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
645 kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
646 kCpumMsrRdFn_AmdK8SmmBase,
647 kCpumMsrRdFn_AmdK8SmmAddr,
648 kCpumMsrRdFn_AmdK8SmmMask,
649 kCpumMsrRdFn_AmdK8VmCr,
650 kCpumMsrRdFn_AmdK8IgnNe,
651 kCpumMsrRdFn_AmdK8SmmCtl,
652 kCpumMsrRdFn_AmdK8VmHSavePa,
653 kCpumMsrRdFn_AmdFam10hVmLockKey,
654 kCpumMsrRdFn_AmdFam10hSmmLockKey,
655 kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
656 kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
657 kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
658 kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
659 kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
660 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
661 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
662 kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */
663 kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */
664 kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
665 kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
666 kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
667 kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
668 kCpumMsrRdFn_AmdK8PatchLevel, /**< Returns range value. */
669 kCpumMsrRdFn_AmdK7DebugStatusMaybe,
670 kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
671 kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
672 kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
673 kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
674 kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
675 kCpumMsrRdFn_AmdK7NodeId,
676 kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
677 kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
678 kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
679 kCpumMsrRdFn_AmdK7LoadStoreCfg,
680 kCpumMsrRdFn_AmdK7InstrCacheCfg,
681 kCpumMsrRdFn_AmdK7DataCacheCfg,
682 kCpumMsrRdFn_AmdK7BusUnitCfg,
683 kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
684 kCpumMsrRdFn_AmdFam15hFpuCfg,
685 kCpumMsrRdFn_AmdFam15hDecoderCfg,
686 kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
687 kCpumMsrRdFn_AmdFam15hCombUnitCfg,
688 kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
689 kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
690 kCpumMsrRdFn_AmdFam15hExecUnitCfg,
691 kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
692 kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
693 kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
694 kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
695 kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
696 kCpumMsrRdFn_AmdFam10hIbsOpRip,
697 kCpumMsrRdFn_AmdFam10hIbsOpData,
698 kCpumMsrRdFn_AmdFam10hIbsOpData2,
699 kCpumMsrRdFn_AmdFam10hIbsOpData3,
700 kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
701 kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
702 kCpumMsrRdFn_AmdFam10hIbsCtl,
703 kCpumMsrRdFn_AmdFam14hIbsBrTarget,
704
705 kCpumMsrRdFn_Gim,
706
707 /** End of valid MSR read function indexes. */
708 kCpumMsrRdFn_End
709} CPUMMSRRDFN;
710
711/**
712 * MSR write functions.
713 */
714typedef enum CPUMMSRWRFN
715{
716 /** Invalid zero value. */
717 kCpumMsrWrFn_Invalid = 0,
718 /** Writes are ignored, the fWrGpMask is observed though. */
719 kCpumMsrWrFn_IgnoreWrite,
720 /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
721 kCpumMsrWrFn_ReadOnly,
722 /** Alias to the MSR range starting at the MSR given by
723 * CPUMMSRRANGE::uValue. Must be used in pair with
724 * kCpumMsrRdFn_MsrAlias. */
725 kCpumMsrWrFn_MsrAlias,
726
727 kCpumMsrWrFn_Ia32P5McAddr,
728 kCpumMsrWrFn_Ia32P5McType,
729 kCpumMsrWrFn_Ia32TimestampCounter,
730 kCpumMsrWrFn_Ia32ApicBase,
731 kCpumMsrWrFn_Ia32FeatureControl,
732 kCpumMsrWrFn_Ia32BiosSignId,
733 kCpumMsrWrFn_Ia32BiosUpdateTrigger,
734 kCpumMsrWrFn_Ia32SmmMonitorCtl,
735 kCpumMsrWrFn_Ia32PmcN,
736 kCpumMsrWrFn_Ia32MonitorFilterLineSize,
737 kCpumMsrWrFn_Ia32MPerf,
738 kCpumMsrWrFn_Ia32APerf,
739 kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
740 kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
741 kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
742 kCpumMsrWrFn_Ia32MtrrDefType,
743 kCpumMsrWrFn_Ia32Pat,
744 kCpumMsrWrFn_Ia32SysEnterCs,
745 kCpumMsrWrFn_Ia32SysEnterEsp,
746 kCpumMsrWrFn_Ia32SysEnterEip,
747 kCpumMsrWrFn_Ia32McgStatus,
748 kCpumMsrWrFn_Ia32McgCtl,
749 kCpumMsrWrFn_Ia32DebugCtl,
750 kCpumMsrWrFn_Ia32SmrrPhysBase,
751 kCpumMsrWrFn_Ia32SmrrPhysMask,
752 kCpumMsrWrFn_Ia32PlatformDcaCap,
753 kCpumMsrWrFn_Ia32Dca0Cap,
754 kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
755 kCpumMsrWrFn_Ia32PerfStatus,
756 kCpumMsrWrFn_Ia32PerfCtl,
757 kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
758 kCpumMsrWrFn_Ia32PerfCapabilities,
759 kCpumMsrWrFn_Ia32FixedCtrCtrl,
760 kCpumMsrWrFn_Ia32PerfGlobalStatus,
761 kCpumMsrWrFn_Ia32PerfGlobalCtrl,
762 kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
763 kCpumMsrWrFn_Ia32PebsEnable,
764 kCpumMsrWrFn_Ia32ClockModulation,
765 kCpumMsrWrFn_Ia32ThermInterrupt,
766 kCpumMsrWrFn_Ia32ThermStatus,
767 kCpumMsrWrFn_Ia32Therm2Ctl,
768 kCpumMsrWrFn_Ia32MiscEnable,
769 kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
770 kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */
771 kCpumMsrWrFn_Ia32DsArea,
772 kCpumMsrWrFn_Ia32TscDeadline,
773 kCpumMsrWrFn_Ia32X2ApicN,
774 kCpumMsrWrFn_Ia32DebugInterface,
775 kCpumMsrWrFn_Ia32SpecCtrl,
776 kCpumMsrWrFn_Ia32PredCmd,
777 kCpumMsrWrFn_Ia32FlushCmd,
778
779 kCpumMsrWrFn_Amd64Efer,
780 kCpumMsrWrFn_Amd64SyscallTarget,
781 kCpumMsrWrFn_Amd64LongSyscallTarget,
782 kCpumMsrWrFn_Amd64CompSyscallTarget,
783 kCpumMsrWrFn_Amd64SyscallFlagMask,
784 kCpumMsrWrFn_Amd64FsBase,
785 kCpumMsrWrFn_Amd64GsBase,
786 kCpumMsrWrFn_Amd64KernelGsBase,
787 kCpumMsrWrFn_Amd64TscAux,
788 kCpumMsrWrFn_IntelEblCrPowerOn,
789 kCpumMsrWrFn_IntelP4EbcHardPowerOn,
790 kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
791 kCpumMsrWrFn_IntelP4EbcFrequencyId,
792 kCpumMsrWrFn_IntelFlexRatio,
793 kCpumMsrWrFn_IntelPkgCStConfigControl,
794 kCpumMsrWrFn_IntelPmgIoCaptureBase,
795 kCpumMsrWrFn_IntelLastBranchFromToN,
796 kCpumMsrWrFn_IntelLastBranchFromN,
797 kCpumMsrWrFn_IntelLastBranchToN,
798 kCpumMsrWrFn_IntelLastBranchTos,
799 kCpumMsrWrFn_IntelBblCrCtl,
800 kCpumMsrWrFn_IntelBblCrCtl3,
801 kCpumMsrWrFn_IntelI7TemperatureTarget,
802 kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
803 kCpumMsrWrFn_IntelI7MiscPwrMgmt,
804 kCpumMsrWrFn_IntelP6CrN,
805 kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
806 kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
807 kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
808 kCpumMsrWrFn_IntelI7SandyAesNiCtl,
809 kCpumMsrWrFn_IntelI7TurboRatioLimit,
810 kCpumMsrWrFn_IntelI7LbrSelect,
811 kCpumMsrWrFn_IntelI7SandyErrorControl,
812 kCpumMsrWrFn_IntelI7PowerCtl,
813 kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
814 kCpumMsrWrFn_IntelI7PebsLdLat,
815 kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
816 kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
817 kCpumMsrWrFn_IntelI7SandyRaplPowerUnit, /**< R/O but found writable bits on a Silvermont CPU here. */
818 kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
819 kCpumMsrWrFn_IntelI7SandyPkgC2Residency, /**< R/O but found writable bits on a Silvermont CPU here. */
820 kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
821 kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
822 kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
823 kCpumMsrWrFn_IntelI7RaplPp0Policy,
824 kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
825 kCpumMsrWrFn_IntelI7RaplPp1Policy,
826 kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
827 kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
828 kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
829 kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
830 kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
831 kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
832 kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
833 kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
834 kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
835 kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
836 kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
837 kCpumMsrWrFn_IntelCore1ExtConfig,
838 kCpumMsrWrFn_IntelCore1DtsCalControl,
839 kCpumMsrWrFn_IntelCore2PeciControl,
840
841 kCpumMsrWrFn_P6LastIntFromIp,
842 kCpumMsrWrFn_P6LastIntToIp,
843
844 kCpumMsrWrFn_AmdFam15hTscRate,
845 kCpumMsrWrFn_AmdFam15hLwpCfg,
846 kCpumMsrWrFn_AmdFam15hLwpCbAddr,
847 kCpumMsrWrFn_AmdFam10hMc4MiscN,
848 kCpumMsrWrFn_AmdK8PerfCtlN,
849 kCpumMsrWrFn_AmdK8PerfCtrN,
850 kCpumMsrWrFn_AmdK8SysCfg,
851 kCpumMsrWrFn_AmdK8HwCr,
852 kCpumMsrWrFn_AmdK8IorrBaseN,
853 kCpumMsrWrFn_AmdK8IorrMaskN,
854 kCpumMsrWrFn_AmdK8TopOfMemN,
855 kCpumMsrWrFn_AmdK8NbCfg1,
856 kCpumMsrWrFn_AmdK8McXcptRedir,
857 kCpumMsrWrFn_AmdK8CpuNameN,
858 kCpumMsrWrFn_AmdK8HwThermalCtrl,
859 kCpumMsrWrFn_AmdK8SwThermalCtrl,
860 kCpumMsrWrFn_AmdK8FidVidControl,
861 kCpumMsrWrFn_AmdK8McCtlMaskN,
862 kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
863 kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
864 kCpumMsrWrFn_AmdK8IntPendingMessage,
865 kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
866 kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
867 kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
868 kCpumMsrWrFn_AmdFam10hPStateControl,
869 kCpumMsrWrFn_AmdFam10hPStateStatus,
870 kCpumMsrWrFn_AmdFam10hPStateN,
871 kCpumMsrWrFn_AmdFam10hCofVidControl,
872 kCpumMsrWrFn_AmdFam10hCofVidStatus,
873 kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
874 kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
875 kCpumMsrWrFn_AmdK8SmmBase,
876 kCpumMsrWrFn_AmdK8SmmAddr,
877 kCpumMsrWrFn_AmdK8SmmMask,
878 kCpumMsrWrFn_AmdK8VmCr,
879 kCpumMsrWrFn_AmdK8IgnNe,
880 kCpumMsrWrFn_AmdK8SmmCtl,
881 kCpumMsrWrFn_AmdK8VmHSavePa,
882 kCpumMsrWrFn_AmdFam10hVmLockKey,
883 kCpumMsrWrFn_AmdFam10hSmmLockKey,
884 kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
885 kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
886 kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
887 kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
888 kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
889 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
890 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
891 kCpumMsrWrFn_AmdK7MicrocodeCtl,
892 kCpumMsrWrFn_AmdK7ClusterIdMaybe,
893 kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
894 kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
895 kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
896 kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
897 kCpumMsrWrFn_AmdK8PatchLoader,
898 kCpumMsrWrFn_AmdK7DebugStatusMaybe,
899 kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
900 kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
901 kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
902 kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
903 kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
904 kCpumMsrWrFn_AmdK7NodeId,
905 kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
906 kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
907 kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
908 kCpumMsrWrFn_AmdK7LoadStoreCfg,
909 kCpumMsrWrFn_AmdK7InstrCacheCfg,
910 kCpumMsrWrFn_AmdK7DataCacheCfg,
911 kCpumMsrWrFn_AmdK7BusUnitCfg,
912 kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
913 kCpumMsrWrFn_AmdFam15hFpuCfg,
914 kCpumMsrWrFn_AmdFam15hDecoderCfg,
915 kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
916 kCpumMsrWrFn_AmdFam15hCombUnitCfg,
917 kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
918 kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
919 kCpumMsrWrFn_AmdFam15hExecUnitCfg,
920 kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
921 kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
922 kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
923 kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
924 kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
925 kCpumMsrWrFn_AmdFam10hIbsOpRip,
926 kCpumMsrWrFn_AmdFam10hIbsOpData,
927 kCpumMsrWrFn_AmdFam10hIbsOpData2,
928 kCpumMsrWrFn_AmdFam10hIbsOpData3,
929 kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
930 kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
931 kCpumMsrWrFn_AmdFam10hIbsCtl,
932 kCpumMsrWrFn_AmdFam14hIbsBrTarget,
933
934 kCpumMsrWrFn_Gim,
935
936 /** End of valid MSR write function indexes. */
937 kCpumMsrWrFn_End
938} CPUMMSRWRFN;
939
940/**
941 * MSR range.
942 */
943typedef struct CPUMMSRRANGE
944{
945 /** The first MSR. [0] */
946 uint32_t uFirst;
947 /** The last MSR. [4] */
948 uint32_t uLast;
949 /** The read function (CPUMMSRRDFN). [8] */
950 uint16_t enmRdFn;
951 /** The write function (CPUMMSRWRFN). [10] */
952 uint16_t enmWrFn;
953 /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
954 * UINT16_MAX if not used by the read and write functions. [12] */
955 uint32_t offCpumCpu : 24;
956 /** Reserved for future hacks. [15] */
957 uint32_t fReserved : 8;
958 /** The init/read value. [16]
959 * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
960 * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
961 * offset into CPUM. */
962 uint64_t uValue;
963 /** The bits to ignore when writing. [24] */
964 uint64_t fWrIgnMask;
965 /** The bits that will cause a GP(0) when writing. [32]
966 * This is always checked prior to calling the write function. Using
967 * UINT64_MAX effectively marks the MSR as read-only. */
968 uint64_t fWrGpMask;
969 /** The register name, if applicable. [40] */
970 char szName[56];
971
972 /** The number of reads. */
973 STAMCOUNTER cReads;
974 /** The number of writes. */
975 STAMCOUNTER cWrites;
976 /** The number of times ignored bits were written. */
977 STAMCOUNTER cIgnoredBits;
978 /** The number of GPs generated. */
979 STAMCOUNTER cGps;
980} CPUMMSRRANGE;
981#ifndef VBOX_FOR_DTRACE_LIB
982AssertCompileSize(CPUMMSRRANGE, 128);
983#endif
984/** Pointer to an MSR range. */
985typedef CPUMMSRRANGE *PCPUMMSRRANGE;
986/** Pointer to a const MSR range. */
987typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
988
989
990/**
991 * MSRs which are required while exploding features.
992 */
993typedef struct CPUMMSRS
994{
995 union
996 {
997 VMXMSRS vmx;
998 SVMMSRS svm;
999 } hwvirt;
1000} CPUMMSRS;
1001/** Pointer to an CPUMMSRS struct. */
1002typedef CPUMMSRS *PCPUMMSRS;
1003/** Pointer to a const CPUMMSRS struct. */
1004typedef CPUMMSRS const *PCCPUMMSRS;
1005
1006
1007/**
1008 * CPU features and quirks.
1009 * This is mostly exploded CPUID info.
1010 */
1011typedef struct CPUMFEATURES
1012{
1013 /** The CPU vendor (CPUMCPUVENDOR). */
1014 uint8_t enmCpuVendor;
1015 /** The CPU family. */
1016 uint8_t uFamily;
1017 /** The CPU model. */
1018 uint8_t uModel;
1019 /** The CPU stepping. */
1020 uint8_t uStepping;
1021 /** The microarchitecture. */
1022#ifndef VBOX_FOR_DTRACE_LIB
1023 CPUMMICROARCH enmMicroarch;
1024#else
1025 uint32_t enmMicroarch;
1026#endif
1027 /** The maximum physical address width of the CPU. */
1028 uint8_t cMaxPhysAddrWidth;
1029 /** The maximum linear address width of the CPU. */
1030 uint8_t cMaxLinearAddrWidth;
1031 /** Max size of the extended state (or FPU state if no XSAVE). */
1032 uint16_t cbMaxExtendedState;
1033
1034 /** Supports MSRs. */
1035 uint32_t fMsr : 1;
1036 /** Supports the page size extension (4/2 MB pages). */
1037 uint32_t fPse : 1;
1038 /** Supports 36-bit page size extension (4 MB pages can map memory above
1039 * 4GB). */
1040 uint32_t fPse36 : 1;
1041 /** Supports physical address extension (PAE). */
1042 uint32_t fPae : 1;
1043 /** Supports page-global extension (PGE). */
1044 uint32_t fPge : 1;
1045 /** Page attribute table (PAT) support (page level cache control). */
1046 uint32_t fPat : 1;
1047 /** Supports the FXSAVE and FXRSTOR instructions. */
1048 uint32_t fFxSaveRstor : 1;
1049 /** Supports the XSAVE and XRSTOR instructions. */
1050 uint32_t fXSaveRstor : 1;
1051 /** Supports the XSAVEOPT instruction. */
1052 uint32_t fXSaveOpt : 1;
1053 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
1054 uint32_t fOpSysXSaveRstor : 1;
1055 /** Supports MMX. */
1056 uint32_t fMmx : 1;
1057 /** Supports AMD extensions to MMX instructions. */
1058 uint32_t fAmdMmxExts : 1;
1059 /** Supports SSE. */
1060 uint32_t fSse : 1;
1061 /** Supports SSE2. */
1062 uint32_t fSse2 : 1;
1063 /** Supports SSE3. */
1064 uint32_t fSse3 : 1;
1065 /** Supports SSSE3. */
1066 uint32_t fSsse3 : 1;
1067 /** Supports SSE4.1. */
1068 uint32_t fSse41 : 1;
1069 /** Supports SSE4.2. */
1070 uint32_t fSse42 : 1;
1071 /** Supports AVX. */
1072 uint32_t fAvx : 1;
1073 /** Supports AVX2. */
1074 uint32_t fAvx2 : 1;
1075 /** Supports AVX512 foundation. */
1076 uint32_t fAvx512Foundation : 1;
1077 /** Supports RDTSC. */
1078 uint32_t fTsc : 1;
1079 /** Intel SYSENTER/SYSEXIT support */
1080 uint32_t fSysEnter : 1;
1081 /** First generation APIC. */
1082 uint32_t fApic : 1;
1083 /** Second generation APIC. */
1084 uint32_t fX2Apic : 1;
1085 /** Hypervisor present. */
1086 uint32_t fHypervisorPresent : 1;
1087 /** MWAIT & MONITOR instructions supported. */
1088 uint32_t fMonitorMWait : 1;
1089 /** MWAIT Extensions present. */
1090 uint32_t fMWaitExtensions : 1;
1091 /** Supports CMPXCHG16B in 64-bit mode. */
1092 uint32_t fMovCmpXchg16b : 1;
1093 /** Supports CLFLUSH. */
1094 uint32_t fClFlush : 1;
1095 /** Supports CLFLUSHOPT. */
1096 uint32_t fClFlushOpt : 1;
1097 /** Supports IA32_PRED_CMD.IBPB. */
1098 uint32_t fIbpb : 1;
1099 /** Supports IA32_SPEC_CTRL.IBRS. */
1100 uint32_t fIbrs : 1;
1101 /** Supports IA32_SPEC_CTRL.STIBP. */
1102 uint32_t fStibp : 1;
1103 /** Supports IA32_FLUSH_CMD. */
1104 uint32_t fFlushCmd : 1;
1105 /** Supports IA32_ARCH_CAP. */
1106 uint32_t fArchCap : 1;
1107 /** Supports MD_CLEAR functionality (VERW, IA32_FLUSH_CMD). */
1108 uint32_t fMdsClear : 1;
1109 /** Supports PCID. */
1110 uint32_t fPcid : 1;
1111 /** Supports INVPCID. */
1112 uint32_t fInvpcid : 1;
1113 /** Supports read/write FSGSBASE instructions. */
1114 uint32_t fFsGsBase : 1;
1115 /** Supports BMI1 instructions (ANDN, BEXTR, BLSI, BLSMSK, BLSR, and TZCNT). */
1116 uint32_t fBmi1 : 1;
1117 /** Supports BMI2 instructions (BZHI, MULX, PDEP, PEXT, RORX, SARX, SHRX,
1118 * and SHLX). */
1119 uint32_t fBmi2 : 1;
1120 /** Supports POPCNT instruction. */
1121 uint32_t fPopCnt : 1;
1122 /** Supports RDRAND instruction. */
1123 uint32_t fRdRand : 1;
1124 /** Supports RDSEED instruction. */
1125 uint32_t fRdSeed : 1;
1126 /** Supports Hardware Lock Elision (HLE). */
1127 uint32_t fHle : 1;
1128 /** Supports Restricted Transactional Memory (RTM - XBEGIN, XEND, XABORT). */
1129 uint32_t fRtm : 1;
1130 /** Supports PCLMULQDQ instruction. */
1131 uint32_t fPclMul : 1;
1132 /** Supports AES-NI (six AESxxx instructions). */
1133 uint32_t fAesNi : 1;
1134 /** Support MOVBE instruction. */
1135 uint32_t fMovBe : 1;
1136 /** Support SHA instructions. */
1137 uint32_t fSha : 1;
1138 /** Support ADX instructions. */
1139 uint32_t fAdx : 1;
1140
1141 /** Supports AMD 3DNow instructions. */
1142 uint32_t f3DNow : 1;
1143 /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
1144 uint32_t f3DNowPrefetch : 1;
1145
1146 /** AMD64: Supports long mode. */
1147 uint32_t fLongMode : 1;
1148 /** AMD64: SYSCALL/SYSRET support. */
1149 uint32_t fSysCall : 1;
1150 /** AMD64: No-execute page table bit. */
1151 uint32_t fNoExecute : 1;
1152 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
1153 uint32_t fLahfSahf : 1;
1154 /** AMD64: Supports RDTSCP. */
1155 uint32_t fRdTscP : 1;
1156 /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
1157 uint32_t fMovCr8In32Bit : 1;
1158 /** AMD64: Supports XOP (similar to VEX3/AVX). */
1159 uint32_t fXop : 1;
1160 /** AMD64: Supports ABM, i.e. the LZCNT instruction. */
1161 uint32_t fAbm : 1;
1162 /** AMD64: Supports TBM (BEXTR, BLCFILL, BLCI, BLCIC, BLCMSK, BLCS,
1163 * BLSFILL, BLSIC, T1MSKC, and TZMSK). */
1164 uint32_t fTbm : 1;
1165
1166 /** Indicates that FPU instruction and data pointers may leak.
1167 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
1168 * is only saved and restored if an exception is pending. */
1169 uint32_t fLeakyFxSR : 1;
1170
1171 /** AMD64: Supports AMD SVM. */
1172 uint32_t fSvm : 1;
1173
1174 /** Support for Intel VMX. */
1175 uint32_t fVmx : 1;
1176
1177 /** Indicates that speculative execution control CPUID bits and MSRs are exposed.
1178 * The details are different for Intel and AMD but both have similar
1179 * functionality. */
1180 uint32_t fSpeculationControl : 1;
1181
1182 /** MSR_IA32_ARCH_CAPABILITIES: RDCL_NO (bit 0).
1183 * @remarks Only safe use after CPUM ring-0 init! */
1184 uint32_t fArchRdclNo : 1;
1185 /** MSR_IA32_ARCH_CAPABILITIES: IBRS_ALL (bit 1).
1186 * @remarks Only safe use after CPUM ring-0 init! */
1187 uint32_t fArchIbrsAll : 1;
1188 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 2).
1189 * @remarks Only safe use after CPUM ring-0 init! */
1190 uint32_t fArchRsbOverride : 1;
1191 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 3).
1192 * @remarks Only safe use after CPUM ring-0 init! */
1193 uint32_t fArchVmmNeedNotFlushL1d : 1;
1194 /** MSR_IA32_ARCH_CAPABILITIES: MDS_NO (bit 4).
1195 * @remarks Only safe use after CPUM ring-0 init! */
1196 uint32_t fArchMdsNo : 1;
1197
1198 /** Alignment padding / reserved for future use (96 bits total, plus 12 bytes
1199 * prior to the bit fields -> total of 24 bytes) */
1200 uint32_t fPadding0 : 24;
1201
1202
1203 /** @name SVM
1204 * @{ */
1205 /** SVM: Supports Nested-paging. */
1206 uint32_t fSvmNestedPaging : 1;
1207 /** SVM: Support LBR (Last Branch Record) virtualization. */
1208 uint32_t fSvmLbrVirt : 1;
1209 /** SVM: Supports SVM lock. */
1210 uint32_t fSvmSvmLock : 1;
1211 /** SVM: Supports Next RIP save. */
1212 uint32_t fSvmNextRipSave : 1;
1213 /** SVM: Supports TSC rate MSR. */
1214 uint32_t fSvmTscRateMsr : 1;
1215 /** SVM: Supports VMCB clean bits. */
1216 uint32_t fSvmVmcbClean : 1;
1217 /** SVM: Supports Flush-by-ASID. */
1218 uint32_t fSvmFlusbByAsid : 1;
1219 /** SVM: Supports decode assist. */
1220 uint32_t fSvmDecodeAssists : 1;
1221 /** SVM: Supports Pause filter. */
1222 uint32_t fSvmPauseFilter : 1;
1223 /** SVM: Supports Pause filter threshold. */
1224 uint32_t fSvmPauseFilterThreshold : 1;
1225 /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
1226 uint32_t fSvmAvic : 1;
1227 /** SVM: Supports Virtualized VMSAVE/VMLOAD. */
1228 uint32_t fSvmVirtVmsaveVmload : 1;
1229 /** SVM: Supports VGIF (Virtual Global Interrupt Flag). */
1230 uint32_t fSvmVGif : 1;
1231 /** SVM: Supports GMET (Guest Mode Execute Trap Extension). */
1232 uint32_t fSvmGmet : 1;
1233 /** SVM: Supports SSSCheck (SVM Supervisor Shadow Stack). */
1234 uint32_t fSvmSSSCheck : 1;
1235 /** SVM: Supports SPEC_CTRL virtualization. */
1236 uint32_t fSvmSpecCtrl : 1;
1237 /** SVM: Supports HOST_MCE_OVERRIDE. */
1238 uint32_t fSvmHostMceOverride : 1;
1239 /** SVM: Supports TlbiCtl (INVLPGB/TLBSYNC in VMCB and TLBSYNC intercept). */
1240 uint32_t fSvmTlbiCtl : 1;
1241 /** SVM: Padding / reserved for future features (64 bits total w/ max ASID). */
1242 uint32_t fSvmPadding0 : 14;
1243 /** SVM: Maximum supported ASID. */
1244 uint32_t uSvmMaxAsid;
1245 /** @} */
1246
1247
1248 /** VMX: Maximum physical address width. */
1249 uint32_t cVmxMaxPhysAddrWidth : 8;
1250
1251 /** @name VMX basic controls.
1252 * @{ */
1253 /** VMX: Supports INS/OUTS VM-exit instruction info. */
1254 uint32_t fVmxInsOutInfo : 1;
1255 /** @} */
1256
1257 /** @name VMX Pin-based controls.
1258 * @{ */
1259 /** VMX: Supports external interrupt VM-exit. */
1260 uint32_t fVmxExtIntExit : 1;
1261 /** VMX: Supports NMI VM-exit. */
1262 uint32_t fVmxNmiExit : 1;
1263 /** VMX: Supports Virtual NMIs. */
1264 uint32_t fVmxVirtNmi : 1;
1265 /** VMX: Supports preemption timer. */
1266 uint32_t fVmxPreemptTimer : 1;
1267 /** VMX: Supports posted interrupts. */
1268 uint32_t fVmxPostedInt : 1;
1269 /** @} */
1270
1271 /** @name VMX Processor-based controls.
1272 * @{ */
1273 /** VMX: Supports Interrupt-window exiting. */
1274 uint32_t fVmxIntWindowExit : 1;
1275 /** VMX: Supports TSC offsetting. */
1276 uint32_t fVmxTscOffsetting : 1;
1277 /** VMX: Supports HLT exiting. */
1278 uint32_t fVmxHltExit : 1;
1279 /** VMX: Supports INVLPG exiting. */
1280 uint32_t fVmxInvlpgExit : 1;
1281 /** VMX: Supports MWAIT exiting. */
1282 uint32_t fVmxMwaitExit : 1;
1283 /** VMX: Supports RDPMC exiting. */
1284 uint32_t fVmxRdpmcExit : 1;
1285 /** VMX: Supports RDTSC exiting. */
1286 uint32_t fVmxRdtscExit : 1;
1287 /** VMX: Supports CR3-load exiting. */
1288 uint32_t fVmxCr3LoadExit : 1;
1289 /** VMX: Supports CR3-store exiting. */
1290 uint32_t fVmxCr3StoreExit : 1;
1291 /** VMX: Supports tertiary processor-based VM-execution controls. */
1292 uint32_t fVmxTertiaryExecCtls : 1;
1293 /** VMX: Supports CR8-load exiting. */
1294 uint32_t fVmxCr8LoadExit : 1;
1295 /** VMX: Supports CR8-store exiting. */
1296 uint32_t fVmxCr8StoreExit : 1;
1297 /** VMX: Supports TPR shadow. */
1298 uint32_t fVmxUseTprShadow : 1;
1299 /** VMX: Supports NMI-window exiting. */
1300 uint32_t fVmxNmiWindowExit : 1;
1301 /** VMX: Supports Mov-DRx exiting. */
1302 uint32_t fVmxMovDRxExit : 1;
1303 /** VMX: Supports Unconditional I/O exiting. */
1304 uint32_t fVmxUncondIoExit : 1;
1305 /** VMX: Supportgs I/O bitmaps. */
1306 uint32_t fVmxUseIoBitmaps : 1;
1307 /** VMX: Supports Monitor Trap Flag. */
1308 uint32_t fVmxMonitorTrapFlag : 1;
1309 /** VMX: Supports MSR bitmap. */
1310 uint32_t fVmxUseMsrBitmaps : 1;
1311 /** VMX: Supports MONITOR exiting. */
1312 uint32_t fVmxMonitorExit : 1;
1313 /** VMX: Supports PAUSE exiting. */
1314 uint32_t fVmxPauseExit : 1;
1315 /** VMX: Supports secondary processor-based VM-execution controls. */
1316 uint32_t fVmxSecondaryExecCtls : 1;
1317 /** @} */
1318
1319 /** @name VMX Secondary processor-based controls.
1320 * @{ */
1321 /** VMX: Supports virtualize-APIC access. */
1322 uint32_t fVmxVirtApicAccess : 1;
1323 /** VMX: Supports EPT (Extended Page Tables). */
1324 uint32_t fVmxEpt : 1;
1325 /** VMX: Supports descriptor-table exiting. */
1326 uint32_t fVmxDescTableExit : 1;
1327 /** VMX: Supports RDTSCP. */
1328 uint32_t fVmxRdtscp : 1;
1329 /** VMX: Supports virtualize-x2APIC mode. */
1330 uint32_t fVmxVirtX2ApicMode : 1;
1331 /** VMX: Supports VPID. */
1332 uint32_t fVmxVpid : 1;
1333 /** VMX: Supports WBIND exiting. */
1334 uint32_t fVmxWbinvdExit : 1;
1335 /** VMX: Supports Unrestricted guest. */
1336 uint32_t fVmxUnrestrictedGuest : 1;
1337 /** VMX: Supports APIC-register virtualization. */
1338 uint32_t fVmxApicRegVirt : 1;
1339 /** VMX: Supports virtual-interrupt delivery. */
1340 uint32_t fVmxVirtIntDelivery : 1;
1341 /** VMX: Supports Pause-loop exiting. */
1342 uint32_t fVmxPauseLoopExit : 1;
1343 /** VMX: Supports RDRAND exiting. */
1344 uint32_t fVmxRdrandExit : 1;
1345 /** VMX: Supports INVPCID. */
1346 uint32_t fVmxInvpcid : 1;
1347 /** VMX: Supports VM functions. */
1348 uint32_t fVmxVmFunc : 1;
1349 /** VMX: Supports VMCS shadowing. */
1350 uint32_t fVmxVmcsShadowing : 1;
1351 /** VMX: Supports RDSEED exiting. */
1352 uint32_t fVmxRdseedExit : 1;
1353 /** VMX: Supports PML. */
1354 uint32_t fVmxPml : 1;
1355 /** VMX: Supports EPT-violations \#VE. */
1356 uint32_t fVmxEptXcptVe : 1;
1357 /** VMX: Supports conceal VMX from PT. */
1358 uint32_t fVmxConcealVmxFromPt : 1;
1359 /** VMX: Supports XSAVES/XRSTORS. */
1360 uint32_t fVmxXsavesXrstors : 1;
1361 /** VMX: Supports mode-based execute control for EPT. */
1362 uint32_t fVmxModeBasedExecuteEpt : 1;
1363 /** VMX: Supports sub-page write permissions for EPT. */
1364 uint32_t fVmxSppEpt : 1;
1365 /** VMX: Supports Intel PT to output guest-physical addresses for EPT. */
1366 uint32_t fVmxPtEpt : 1;
1367 /** VMX: Supports TSC scaling. */
1368 uint32_t fVmxUseTscScaling : 1;
1369 /** VMX: Supports TPAUSE, UMONITOR, or UMWAIT. */
1370 uint32_t fVmxUserWaitPause : 1;
1371 /** VMX: Supports enclave (ENCLV) exiting. */
1372 uint32_t fVmxEnclvExit : 1;
1373 /** @} */
1374
1375 /** @name VMX Tertiary processor-based controls.
1376 * @{ */
1377 /** VMX: Supports LOADIWKEY exiting. */
1378 uint32_t fVmxLoadIwKeyExit : 1;
1379 /** @} */
1380
1381 /** @name VMX VM-entry controls.
1382 * @{ */
1383 /** VMX: Supports load-debug controls on VM-entry. */
1384 uint32_t fVmxEntryLoadDebugCtls : 1;
1385 /** VMX: Supports IA32e mode guest. */
1386 uint32_t fVmxIa32eModeGuest : 1;
1387 /** VMX: Supports load guest EFER MSR on VM-entry. */
1388 uint32_t fVmxEntryLoadEferMsr : 1;
1389 /** VMX: Supports load guest PAT MSR on VM-entry. */
1390 uint32_t fVmxEntryLoadPatMsr : 1;
1391 /** @} */
1392
1393 /** @name VMX VM-exit controls.
1394 * @{ */
1395 /** VMX: Supports save debug controls on VM-exit. */
1396 uint32_t fVmxExitSaveDebugCtls : 1;
1397 /** VMX: Supports host-address space size. */
1398 uint32_t fVmxHostAddrSpaceSize : 1;
1399 /** VMX: Supports acknowledge external interrupt on VM-exit. */
1400 uint32_t fVmxExitAckExtInt : 1;
1401 /** VMX: Supports save guest PAT MSR on VM-exit. */
1402 uint32_t fVmxExitSavePatMsr : 1;
1403 /** VMX: Supports load hsot PAT MSR on VM-exit. */
1404 uint32_t fVmxExitLoadPatMsr : 1;
1405 /** VMX: Supports save guest EFER MSR on VM-exit. */
1406 uint32_t fVmxExitSaveEferMsr : 1;
1407 /** VMX: Supports load host EFER MSR on VM-exit. */
1408 uint32_t fVmxExitLoadEferMsr : 1;
1409 /** VMX: Supports save VMX preemption timer on VM-exit. */
1410 uint32_t fVmxSavePreemptTimer : 1;
1411 /** VMX: Supports secondary VM-exit controls. */
1412 uint32_t fVmxSecondaryExitCtls : 1;
1413 /** @} */
1414
1415 /** @name VMX Miscellaneous data.
1416 * @{ */
1417 /** VMX: Supports storing EFER.LMA into IA32e-mode guest field on VM-exit. */
1418 uint32_t fVmxExitSaveEferLma : 1;
1419 /** VMX: Whether Intel PT (Processor Trace) is supported in VMX mode or not. */
1420 uint32_t fVmxPt : 1;
1421 /** VMX: Supports VMWRITE to any valid VMCS field incl. read-only fields, otherwise
1422 * VMWRITE cannot modify read-only VM-exit information fields. */
1423 uint32_t fVmxVmwriteAll : 1;
1424 /** VMX: Supports injection of software interrupts, ICEBP on VM-entry for zero
1425 * length instructions. */
1426 uint32_t fVmxEntryInjectSoftInt : 1;
1427 /** @} */
1428
1429 /** VMX: Padding / reserved for future features. */
1430 uint32_t fVmxPadding0 : 16;
1431 /** VMX: Padding / reserved for future, making it a total of 128 bits. */
1432 uint32_t fVmxPadding1;
1433} CPUMFEATURES;
1434#ifndef VBOX_FOR_DTRACE_LIB
1435AssertCompileSize(CPUMFEATURES, 48);
1436#endif
1437/** Pointer to a CPU feature structure. */
1438typedef CPUMFEATURES *PCPUMFEATURES;
1439/** Pointer to a const CPU feature structure. */
1440typedef CPUMFEATURES const *PCCPUMFEATURES;
1441
1442/**
1443 * Chameleon wrapper structure for the host CPU features.
1444 *
1445 * This is used for the globally readable g_CpumHostFeatures variable, which is
1446 * initialized once during VMMR0 load for ring-0 and during CPUMR3Init in
1447 * ring-3. To reflect this immutability after load/init, we use this wrapper
1448 * structure to switch it between const and non-const depending on the context.
1449 * Only two files sees it as non-const (CPUMR0.cpp and CPUM.cpp).
1450 */
1451typedef struct CPUHOSTFEATURES
1452{
1453 CPUMFEATURES
1454#ifndef CPUM_WITH_NONCONST_HOST_FEATURES
1455 const
1456#endif
1457 s;
1458} CPUHOSTFEATURES;
1459/** Pointer to a const host CPU feature structure. */
1460typedef CPUHOSTFEATURES const *PCCPUHOSTFEATURES;
1461
1462/** Host CPU features.
1463 * @note In ring-3, only valid after CPUMR3Init. In ring-0, valid after
1464 * module init. */
1465extern CPUHOSTFEATURES g_CpumHostFeatures;
1466
1467
1468/**
1469 * CPU database entry.
1470 */
1471typedef struct CPUMDBENTRY
1472{
1473 /** The CPU name. */
1474 const char *pszName;
1475 /** The full CPU name. */
1476 const char *pszFullName;
1477 /** The CPU vendor (CPUMCPUVENDOR). */
1478 uint8_t enmVendor;
1479 /** The CPU family. */
1480 uint8_t uFamily;
1481 /** The CPU model. */
1482 uint8_t uModel;
1483 /** The CPU stepping. */
1484 uint8_t uStepping;
1485 /** The microarchitecture. */
1486 CPUMMICROARCH enmMicroarch;
1487 /** Scalable bus frequency used for reporting other frequencies. */
1488 uint64_t uScalableBusFreq;
1489 /** Flags - CPUMDB_F_XXX. */
1490 uint32_t fFlags;
1491 /** The maximum physical address with of the CPU. This should correspond to
1492 * the value in CPUID leaf 0x80000008 when present. */
1493 uint8_t cMaxPhysAddrWidth;
1494 /** The MXCSR mask. */
1495 uint32_t fMxCsrMask;
1496 /** Pointer to an array of CPUID leaves. */
1497 PCCPUMCPUIDLEAF paCpuIdLeaves;
1498 /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
1499 uint32_t cCpuIdLeaves;
1500 /** The method used to deal with unknown CPUID leaves. */
1501 CPUMUNKNOWNCPUID enmUnknownCpuId;
1502 /** The default unknown CPUID value. */
1503 CPUMCPUID DefUnknownCpuId;
1504
1505 /** MSR mask. Several microarchitectures ignore the higher bits of ECX in
1506 * the RDMSR and WRMSR instructions. */
1507 uint32_t fMsrMask;
1508
1509 /** The number of ranges in the table pointed to b paMsrRanges. */
1510 uint32_t cMsrRanges;
1511 /** MSR ranges for this CPU. */
1512 PCCPUMMSRRANGE paMsrRanges;
1513} CPUMDBENTRY;
1514/** Pointer to a const CPU database entry. */
1515typedef CPUMDBENTRY const *PCCPUMDBENTRY;
1516
1517/** @name CPUMDB_F_XXX - CPUDBENTRY::fFlags
1518 * @{ */
1519/** Should execute all in IEM.
1520 * @todo Implement this - currently done in Main... */
1521#define CPUMDB_F_EXECUTE_ALL_IN_IEM RT_BIT_32(0)
1522/** @} */
1523
1524
1525
1526#ifndef VBOX_FOR_DTRACE_LIB
1527
1528#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
1529VMMDECL(int) CPUMCpuIdCollectLeavesX86(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
1530VMMDECL(CPUMCPUVENDOR) CPUMCpuIdDetectX86VendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
1531#endif
1532
1533VMM_INT_DECL(bool) CPUMAssertGuestRFlagsCookie(PVM pVM, PVMCPU pVCpu);
1534
1535
1536/** @name Guest Register Getters.
1537 * @{ */
1538VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR);
1539VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit);
1540VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden);
1541VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu);
1542VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
1543VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu);
1544VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu);
1545VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu);
1546VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu);
1547VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPUCC pVCpu);
1548VMMDECL(int) CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue);
1549VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu);
1550VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu);
1551VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu);
1552VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu);
1553VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu);
1554VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu);
1555VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu);
1556VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu);
1557VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu);
1558VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu);
1559VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu);
1560VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu);
1561VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu);
1562VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu);
1563VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu);
1564VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu);
1565VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu);
1566VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu);
1567VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu);
1568VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu);
1569VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu);
1570VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu);
1571VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu);
1572VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
1573VMMDECL(void) CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t iLeaf, uint32_t iSubLeaf, int f64BitMode,
1574 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
1575VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu);
1576VMM_INT_DECL(uint64_t) CPUMGetGuestIa32FeatCtrl(PCVMCPUCC pVCpu);
1577VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PCVMCPU pVCpu);
1578VMM_INT_DECL(uint64_t) CPUMGetGuestIa32SmmMonitorCtl(PCVMCPUCC pVCpu);
1579VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxEptVpidCap(PCVMCPUCC pVCpu);
1580VMMDECL(VBOXSTRICTRC) CPUMQueryGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *puValue);
1581VMMDECL(VBOXSTRICTRC) CPUMSetGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t uValue);
1582VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM);
1583VMMDECL(CPUMMICROARCH) CPUMGetGuestMicroarch(PCVM pVM);
1584VMMDECL(void) CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth);
1585/** @} */
1586
1587/** @name Guest Register Setters.
1588 * @{ */
1589VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1590VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1591VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
1592VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
1593VMMDECL(int) CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0);
1594VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
1595VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
1596VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
1597VMMDECL(int) CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0);
1598VMMDECL(int) CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1);
1599VMMDECL(int) CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2);
1600VMMDECL(int) CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3);
1601VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
1602VMMDECL(int) CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7);
1603VMMDECL(int) CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value);
1604VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue);
1605VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
1606VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
1607VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
1608VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
1609VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
1610VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
1611VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
1612VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
1613VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
1614VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
1615VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
1616VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
1617VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
1618VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
1619VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
1620VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
1621VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
1622VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1623VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1624VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1625VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
1626VMMDECL(void) CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1627VMM_INT_DECL(void) CPUMSetGuestTscAux(PVMCPUCC pVCpu, uint64_t uValue);
1628VMM_INT_DECL(uint64_t) CPUMGetGuestTscAux(PVMCPUCC pVCpu);
1629VMM_INT_DECL(void) CPUMSetGuestSpecCtrl(PVMCPUCC pVCpu, uint64_t uValue);
1630VMM_INT_DECL(uint64_t) CPUMGetGuestSpecCtrl(PVMCPUCC pVCpu);
1631VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM);
1632VMM_INT_DECL(void) CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes);
1633VMM_INT_DECL(void) CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes);
1634/** @} */
1635
1636
1637/** @name Misc Guest Predicate Functions.
1638 * @{ */
1639VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu);
1640VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu);
1641VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu);
1642VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu);
1643VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu);
1644VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu);
1645VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu);
1646VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu);
1647VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu);
1648VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu);
1649/** @} */
1650
1651/** @name Nested Hardware-Virtualization Helpers.
1652 * @{ */
1653VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu);
1654VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu);
1655VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
1656VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
1657
1658/* SVM helpers. */
1659VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1660VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1661VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx);
1662VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx);
1663VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
1664VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1665 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
1666 PSVMIOIOEXITINFO pIoExitInfo);
1667VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
1668
1669/* VMX helpers. */
1670VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField);
1671VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess);
1672VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3);
1673VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc);
1674VMM_INT_DECL(int) CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick);
1675VMM_INT_DECL(int) CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu);
1676VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr);
1677VMM_INT_DECL(bool) CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu);
1678VMM_INT_DECL(bool) CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu);
1679VMM_INT_DECL(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu);
1680/** @} */
1681
1682#if !defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS) || defined(DOXYGEN_RUNNING)
1683/** @name Inlined Guest Getters and predicates Functions.
1684 * @{ */
1685
1686/**
1687 * Gets valid CR0 bits for the guest.
1688 *
1689 * @returns Valid CR0 bits.
1690 */
1691DECLINLINE(uint64_t) CPUMGetGuestCR0ValidMask(void)
1692{
1693 return ( X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
1694 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
1695 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG);
1696}
1697
1698/**
1699 * Tests if the guest is running in real mode or not.
1700 *
1701 * @returns true if in real mode, otherwise false.
1702 * @param pCtx Current CPU context.
1703 */
1704DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCCPUMCTX pCtx)
1705{
1706 return !(pCtx->cr0 & X86_CR0_PE);
1707}
1708
1709/**
1710 * Tests if the guest is running in real or virtual 8086 mode.
1711 *
1712 * @returns @c true if it is, @c false if not.
1713 * @param pCtx Current CPU context.
1714 */
1715DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCCPUMCTX pCtx)
1716{
1717 return !(pCtx->cr0 & X86_CR0_PE)
1718 || pCtx->eflags.Bits.u1VM; /* Cannot be set in long mode. Intel spec 2.3.1 "System Flags and Fields in IA-32e Mode". */
1719}
1720
1721/**
1722 * Tests if the guest is running in virtual 8086 mode.
1723 *
1724 * @returns @c true if it is, @c false if not.
1725 * @param pCtx Current CPU context.
1726 */
1727DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCCPUMCTX pCtx)
1728{
1729 return (pCtx->eflags.Bits.u1VM == 1);
1730}
1731
1732/**
1733 * Tests if the guest is running in paged protected or not.
1734 *
1735 * @returns true if in paged protected mode, otherwise false.
1736 * @param pCtx Current CPU context.
1737 */
1738DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
1739{
1740 return (pCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1741}
1742
1743/**
1744 * Tests if the guest is running in long mode or not.
1745 *
1746 * @returns true if in long mode, otherwise false.
1747 * @param pCtx Current CPU context.
1748 */
1749DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCCPUMCTX pCtx)
1750{
1751 return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1752}
1753
1754VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx);
1755
1756/**
1757 * Tests if the guest is running in 64 bits mode or not.
1758 *
1759 * @returns true if in 64 bits protected mode, otherwise false.
1760 * @param pCtx Current CPU context.
1761 */
1762DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
1763{
1764 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
1765 return false;
1766 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
1767 return CPUMIsGuestIn64BitCodeSlow(pCtx);
1768 return pCtx->cs.Attr.n.u1Long;
1769}
1770
1771/**
1772 * Tests if the guest has paging enabled or not.
1773 *
1774 * @returns true if paging is enabled, otherwise false.
1775 * @param pCtx Current CPU context.
1776 */
1777DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCCPUMCTX pCtx)
1778{
1779 return !!(pCtx->cr0 & X86_CR0_PG);
1780}
1781
1782/**
1783 * Tests if PAE paging is enabled given the relevant control registers.
1784 *
1785 * @returns @c true if in PAE mode, @c false otherwise.
1786 * @param uCr0 The CR0 value.
1787 * @param uCr4 The CR4 value.
1788 * @param uEferMsr The EFER value.
1789 */
1790DECLINLINE(bool) CPUMIsPaePagingEnabled(uint64_t uCr0, uint64_t uCr4, uint64_t uEferMsr)
1791{
1792 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1793 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1794 return ( (uCr4 & X86_CR4_PAE)
1795 && (uCr0 & X86_CR0_PG)
1796 && !(uEferMsr & MSR_K6_EFER_LMA));
1797}
1798
1799/**
1800 * Tests if the guest is running in PAE mode or not.
1801 *
1802 * @returns @c true if in PAE mode, @c false otherwise.
1803 * @param pCtx Current CPU context.
1804 */
1805DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCCPUMCTX pCtx)
1806{
1807 return CPUMIsPaePagingEnabled(pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1808}
1809
1810/**
1811 * Tests if the guest has AMD SVM enabled or not.
1812 *
1813 * @returns true if SMV is enabled, otherwise false.
1814 * @param pCtx Current CPU context.
1815 */
1816DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCCPUMCTX pCtx)
1817{
1818 return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
1819}
1820
1821/**
1822 * Tests if the guest has Intel VT-x enabled or not.
1823 *
1824 * @returns true if VMX is enabled, otherwise false.
1825 * @param pCtx Current CPU context.
1826 */
1827DECLINLINE(bool) CPUMIsGuestVmxEnabled(PCCPUMCTX pCtx)
1828{
1829 return RT_BOOL(pCtx->cr4 & X86_CR4_VMXE);
1830}
1831
1832/**
1833 * Returns the guest's global-interrupt (GIF) flag.
1834 *
1835 * @returns true when global-interrupts are enabled, otherwise false.
1836 * @param pCtx Current CPU context.
1837 */
1838DECLINLINE(bool) CPUMGetGuestGif(PCCPUMCTX pCtx)
1839{
1840 return pCtx->hwvirt.fGif;
1841}
1842
1843/**
1844 * Sets the guest's global-interrupt flag (GIF).
1845 *
1846 * @param pCtx Current CPU context.
1847 * @param fGif The value to set.
1848 */
1849DECLINLINE(void) CPUMSetGuestGif(PCPUMCTX pCtx, bool fGif)
1850{
1851 pCtx->hwvirt.fGif = fGif;
1852}
1853
1854/**
1855 * Checks if we're in an "interrupt shadow", i.e. after a STI, POP SS or MOV SS.
1856 *
1857 * This also inhibit NMIs, except perhaps for nested guests.
1858 *
1859 * @returns true if interrupts are inhibited by interrupt shadow, false if not.
1860 * @param pCtx Current guest CPU context.
1861 * @note Requires pCtx->rip to be up to date.
1862 * @note Does NOT clear CPUMCTX_INHIBIT_SHADOW when CPUMCTX::uRipInhibitInt
1863 * differs from CPUMCTX::rip.
1864 */
1865DECLINLINE(bool) CPUMIsInInterruptShadow(PCCPUMCTX pCtx)
1866{
1867 if (!(pCtx->eflags.uBoth & CPUMCTX_INHIBIT_SHADOW))
1868 return false;
1869
1870 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
1871 return pCtx->uRipInhibitInt == pCtx->rip;
1872}
1873
1874/**
1875 * Checks if we're in an "interrupt shadow", i.e. after a STI, POP SS or MOV SS,
1876 * updating the state if stale.
1877 *
1878 * This also inhibit NMIs, except perhaps for nested guests.
1879 *
1880 * @retval true if interrupts are inhibited by interrupt shadow.
1881 * @retval false if not.
1882 * @param pCtx Current guest CPU context.
1883 * @note Requires pCtx->rip to be up to date.
1884 */
1885DECLINLINE(bool) CPUMIsInInterruptShadowWithUpdate(PCPUMCTX pCtx)
1886{
1887 if (!(pCtx->eflags.uBoth & CPUMCTX_INHIBIT_SHADOW))
1888 return false;
1889
1890 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
1891 if (pCtx->uRipInhibitInt == pCtx->rip)
1892 return true;
1893
1894 pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
1895 return false;
1896}
1897
1898/**
1899 * Checks if we're in an "interrupt shadow" due to a POP SS or MOV SS
1900 * instruction.
1901 *
1902 * This also inhibit NMIs, except perhaps for nested guests.
1903 *
1904 * @retval true if interrupts are inhibited due to POP/MOV SS.
1905 * @retval false if not.
1906 * @param pCtx Current guest CPU context.
1907 * @note Requires pCtx->rip to be up to date.
1908 * @note Does NOT clear CPUMCTX_INHIBIT_SHADOW when CPUMCTX::uRipInhibitInt
1909 * differs from CPUMCTX::rip.
1910 * @note Both CPUMIsInInterruptShadowAfterSti() and this function may return
1911 * true depending on the execution engine being used.
1912 */
1913DECLINLINE(bool) CPUMIsInInterruptShadowAfterSs(PCCPUMCTX pCtx)
1914{
1915 if (!(pCtx->eflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS))
1916 return false;
1917
1918 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
1919 return pCtx->uRipInhibitInt == pCtx->rip;
1920}
1921
1922/**
1923 * Checks if we're in an "interrupt shadow" due to an STI instruction.
1924 *
1925 * This also inhibit NMIs, except perhaps for nested guests.
1926 *
1927 * @retval true if interrupts are inhibited due to STI.
1928 * @retval false if not.
1929 * @param pCtx Current guest CPU context.
1930 * @note Requires pCtx->rip to be up to date.
1931 * @note Does NOT clear CPUMCTX_INHIBIT_SHADOW when CPUMCTX::uRipInhibitInt
1932 * differs from CPUMCTX::rip.
1933 * @note Both CPUMIsInInterruptShadowAfterSs() and this function may return
1934 * true depending on the execution engine being used.
1935 */
1936DECLINLINE(bool) CPUMIsInInterruptShadowAfterSti(PCCPUMCTX pCtx)
1937{
1938 if (!(pCtx->eflags.uBoth & CPUMCTX_INHIBIT_SHADOW_STI))
1939 return false;
1940
1941 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
1942 return pCtx->uRipInhibitInt == pCtx->rip;
1943}
1944
1945/**
1946 * Sets the "interrupt shadow" flag, after a STI, POP SS or MOV SS instruction.
1947 *
1948 * @param pCtx Current guest CPU context.
1949 * @note Requires pCtx->rip to be up to date.
1950 */
1951DECLINLINE(void) CPUMSetInInterruptShadow(PCPUMCTX pCtx)
1952{
1953 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
1954 pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_SHADOW;
1955 pCtx->uRipInhibitInt = pCtx->rip;
1956}
1957
1958/**
1959 * Sets the "interrupt shadow" flag, after a STI, POP SS or MOV SS instruction,
1960 * extended version.
1961 *
1962 * @param pCtx Current guest CPU context.
1963 * @param rip The RIP for which it is inhibited.
1964 */
1965DECLINLINE(void) CPUMSetInInterruptShadowEx(PCPUMCTX pCtx, uint64_t rip)
1966{
1967 pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_SHADOW;
1968 pCtx->uRipInhibitInt = rip;
1969}
1970
1971/**
1972 * Sets the "interrupt shadow" flag after a POP SS or MOV SS instruction.
1973 *
1974 * @param pCtx Current guest CPU context.
1975 * @note Requires pCtx->rip to be up to date.
1976 */
1977DECLINLINE(void) CPUMSetInInterruptShadowSs(PCPUMCTX pCtx)
1978{
1979 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
1980 pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_SHADOW_SS;
1981 pCtx->uRipInhibitInt = pCtx->rip;
1982}
1983
1984/**
1985 * Sets the "interrupt shadow" flag after an STI instruction.
1986 *
1987 * @param pCtx Current guest CPU context.
1988 * @note Requires pCtx->rip to be up to date.
1989 */
1990DECLINLINE(void) CPUMSetInInterruptShadowSti(PCPUMCTX pCtx)
1991{
1992 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
1993 pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_SHADOW_STI;
1994 pCtx->uRipInhibitInt = pCtx->rip;
1995}
1996
1997/**
1998 * Clears the "interrupt shadow" flag.
1999 *
2000 * @param pCtx Current guest CPU context.
2001 */
2002DECLINLINE(void) CPUMClearInterruptShadow(PCPUMCTX pCtx)
2003{
2004 pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
2005}
2006
2007/**
2008 * Update the "interrupt shadow" flag.
2009 *
2010 * @param pCtx Current guest CPU context.
2011 * @param fInhibited The new state.
2012 * @note Requires pCtx->rip to be up to date.
2013 */
2014DECLINLINE(void) CPUMUpdateInterruptShadow(PCPUMCTX pCtx, bool fInhibited)
2015{
2016 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
2017 if (!fInhibited)
2018 pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
2019 else
2020 {
2021 pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_SHADOW;
2022 pCtx->uRipInhibitInt = pCtx->rip;
2023 }
2024}
2025
2026/**
2027 * Update the "interrupt shadow" flag, extended version.
2028 *
2029 * @returns fInhibited.
2030 * @param pCtx Current guest CPU context.
2031 * @param fInhibited The new state.
2032 * @param rip The RIP for which it is inhibited.
2033 */
2034DECLINLINE(bool) CPUMUpdateInterruptShadowEx(PCPUMCTX pCtx, bool fInhibited, uint64_t rip)
2035{
2036 if (!fInhibited)
2037 pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
2038 else
2039 {
2040 pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_SHADOW;
2041 pCtx->uRipInhibitInt = rip;
2042 }
2043 return fInhibited;
2044}
2045
2046/**
2047 * Update the two "interrupt shadow" flags separately, extended version.
2048 *
2049 * @param pCtx Current guest CPU context.
2050 * @param fInhibitedBySs The new state for the MOV SS & POP SS aspect.
2051 * @param fInhibitedBySti The new state for the STI aspect.
2052 * @param rip The RIP for which it is inhibited.
2053 */
2054DECLINLINE(void) CPUMUpdateInterruptShadowSsStiEx(PCPUMCTX pCtx, bool fInhibitedBySs, bool fInhibitedBySti, uint64_t rip)
2055{
2056 if (!(fInhibitedBySs | fInhibitedBySti))
2057 pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
2058 else
2059 {
2060 pCtx->eflags.uBoth |= (fInhibitedBySs ? CPUMCTX_INHIBIT_SHADOW_SS : UINT32_C(0))
2061 | (fInhibitedBySti ? CPUMCTX_INHIBIT_SHADOW_STI : UINT32_C(0));
2062 pCtx->uRipInhibitInt = rip;
2063 }
2064}
2065
2066/* VMX forward declarations used by extended function versions: */
2067DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx);
2068DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls);
2069DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx);
2070DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking);
2071
2072/**
2073 * Checks whether interrupts, include NMIs, are inhibited by pending NMI
2074 * delivery.
2075 *
2076 * This only checks the inhibit mask.
2077 *
2078 * @retval true if interrupts are inhibited by NMI handling.
2079 * @retval false if interrupts are not inhibited by NMI handling.
2080 * @param pCtx Current guest CPU context.
2081 */
2082DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmi(PCCPUMCTX pCtx)
2083{
2084 return (pCtx->eflags.uBoth & CPUMCTX_INHIBIT_NMI) != 0;
2085}
2086
2087/**
2088 * Extended version of CPUMAreInterruptsInhibitedByNmi() that takes VMX non-root
2089 * mode into account when check whether interrupts are inhibited by NMI.
2090 *
2091 * @retval true if interrupts are inhibited by NMI handling.
2092 * @retval false if interrupts are not inhibited by NMI handling.
2093 * @param pCtx Current guest CPU context.
2094 */
2095DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmiEx(PCCPUMCTX pCtx)
2096{
2097 /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
2098 if ( !CPUMIsGuestInVmxNonRootMode(pCtx)
2099 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
2100 return CPUMAreInterruptsInhibitedByNmi(pCtx);
2101 return CPUMIsGuestVmxVirtNmiBlocking(pCtx);
2102}
2103
2104/**
2105 * Marks interrupts, include NMIs, as inhibited by pending NMI delivery.
2106 *
2107 * @param pCtx Current guest CPU context.
2108 */
2109DECLINLINE(void) CPUMSetInterruptInhibitingByNmi(PCPUMCTX pCtx)
2110{
2111 pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_NMI;
2112}
2113
2114/**
2115 * Extended version of CPUMSetInterruptInhibitingByNmi() that takes VMX non-root
2116 * mode into account when marking interrupts as inhibited by NMI.
2117 *
2118 * @param pCtx Current guest CPU context.
2119 */
2120DECLINLINE(void) CPUMSetInterruptInhibitingByNmiEx(PCPUMCTX pCtx)
2121{
2122 /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
2123 if ( !CPUMIsGuestInVmxNonRootMode(pCtx)
2124 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
2125 CPUMSetInterruptInhibitingByNmi(pCtx);
2126 else
2127 CPUMSetGuestVmxVirtNmiBlocking(pCtx, true);
2128}
2129
2130/**
2131 * Marks interrupts, include NMIs, as no longer inhibited by pending NMI
2132 * delivery.
2133 *
2134 * @param pCtx Current guest CPU context.
2135 */
2136DECLINLINE(void) CPUMClearInterruptInhibitingByNmi(PCPUMCTX pCtx)
2137{
2138 pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_NMI;
2139}
2140
2141/**
2142 * Extended version of CPUMClearInterruptInhibitingByNmi() that takes VMX
2143 * non-root mode into account when doing the updating.
2144 *
2145 * @param pCtx Current guest CPU context.
2146 */
2147DECLINLINE(void) CPUMClearInterruptInhibitingByNmiEx(PCPUMCTX pCtx)
2148{
2149 /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
2150 if ( !CPUMIsGuestInVmxNonRootMode(pCtx)
2151 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
2152 CPUMClearInterruptInhibitingByNmi(pCtx);
2153 else
2154 CPUMSetGuestVmxVirtNmiBlocking(pCtx, false);
2155}
2156
2157/**
2158 * Update whether interrupts, include NMIs, are inhibited by pending NMI
2159 * delivery.
2160 *
2161 * @param pCtx Current guest CPU context.
2162 * @param fInhibited The new state.
2163 */
2164DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmi(PCPUMCTX pCtx, bool fInhibited)
2165{
2166 if (!fInhibited)
2167 pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_NMI;
2168 else
2169 pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_NMI;
2170}
2171
2172/**
2173 * Extended version of CPUMUpdateInterruptInhibitingByNmi() that takes VMX
2174 * non-root mode into account when doing the updating.
2175 *
2176 * @param pCtx Current guest CPU context.
2177 * @param fInhibited The new state.
2178 */
2179DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmiEx(PCPUMCTX pCtx, bool fInhibited)
2180{
2181 /*
2182 * Set the state of guest-NMI blocking in any of the following cases:
2183 * - We're not executing a nested-guest.
2184 * - We're executing an SVM nested-guest[1].
2185 * - We're executing a VMX nested-guest without virtual-NMIs enabled.
2186 *
2187 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
2188 * SVM hypervisors must track NMI blocking themselves by intercepting
2189 * the IRET instruction after injection of an NMI.
2190 */
2191 if ( !CPUMIsGuestInVmxNonRootMode(pCtx)
2192 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
2193 CPUMUpdateInterruptInhibitingByNmi(pCtx, fInhibited);
2194 /*
2195 * Set the state of virtual-NMI blocking, if we are executing a
2196 * VMX nested-guest with virtual-NMIs enabled.
2197 */
2198 else
2199 CPUMSetGuestVmxVirtNmiBlocking(pCtx, fInhibited);
2200}
2201
2202
2203/**
2204 * Checks if we are executing inside an SVM nested hardware-virtualized guest.
2205 *
2206 * @returns @c true if in SVM nested-guest mode, @c false otherwise.
2207 * @param pCtx Current CPU context.
2208 */
2209DECLINLINE(bool) CPUMIsGuestInSvmNestedHwVirtMode(PCCPUMCTX pCtx)
2210{
2211 /*
2212 * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
2213 * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
2214 */
2215#ifndef IN_RC
2216 if ( pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM
2217 || !(pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
2218 return false;
2219 return true;
2220#else
2221 NOREF(pCtx);
2222 return false;
2223#endif
2224}
2225
2226/**
2227 * Checks if the guest is in VMX non-root operation.
2228 *
2229 * @returns @c true if in VMX non-root operation, @c false otherwise.
2230 * @param pCtx Current CPU context.
2231 */
2232DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx)
2233{
2234#ifndef IN_RC
2235 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
2236 return false;
2237 Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
2238 return pCtx->hwvirt.vmx.fInVmxNonRootMode;
2239#else
2240 NOREF(pCtx);
2241 return false;
2242#endif
2243}
2244
2245/**
2246 * Checks if we are executing inside an SVM or VMX nested hardware-virtualized
2247 * guest.
2248 *
2249 * @returns @c true if in nested-guest mode, @c false otherwise.
2250 * @param pCtx Current CPU context.
2251 */
2252DECLINLINE(bool) CPUMIsGuestInNestedHwvirtMode(PCCPUMCTX pCtx)
2253{
2254#if 0
2255 return CPUMIsGuestInVmxNonRootMode(pCtx) || CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
2256#else
2257 if (pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_NONE)
2258 return false;
2259 if (pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX)
2260 {
2261 Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
2262 return pCtx->hwvirt.vmx.fInVmxNonRootMode;
2263 }
2264 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
2265 return RT_BOOL(pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN);
2266#endif
2267}
2268
2269/**
2270 * Checks if we are executing inside an SVM or VMX nested hardware-virtualized
2271 * guest.
2272 *
2273 * @retval CPUMHWVIRT_NONE if not in SVM or VMX non-root mode.
2274 * @retval CPUMHWVIRT_VMX if in VMX non-root mode.
2275 * @retval CPUMHWVIRT_SVM if in SVM non-root mode.
2276 * @param pCtx Current CPU context.
2277 */
2278DECLINLINE(CPUMHWVIRT) CPUMGetGuestInNestedHwvirtMode(PCCPUMCTX pCtx)
2279{
2280 if (pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_NONE)
2281 return CPUMHWVIRT_NONE;
2282 if (pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX)
2283 {
2284 Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
2285 return pCtx->hwvirt.vmx.fInVmxNonRootMode ? CPUMHWVIRT_VMX : CPUMHWVIRT_NONE;
2286 }
2287 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
2288 return pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN ? CPUMHWVIRT_SVM : CPUMHWVIRT_NONE;
2289}
2290
2291/**
2292 * Checks if the guest is in VMX root operation.
2293 *
2294 * @returns @c true if in VMX root operation, @c false otherwise.
2295 * @param pCtx Current CPU context.
2296 */
2297DECLINLINE(bool) CPUMIsGuestInVmxRootMode(PCCPUMCTX pCtx)
2298{
2299#ifndef IN_RC
2300 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
2301 return false;
2302 return pCtx->hwvirt.vmx.fInVmxRootMode;
2303#else
2304 NOREF(pCtx);
2305 return false;
2306#endif
2307}
2308
2309# ifndef IN_RC
2310
2311/**
2312 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
2313 * active.
2314 *
2315 * @returns @c true if in intercept is set, @c false otherwise.
2316 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2317 * @param pCtx Current CPU context.
2318 * @param fIntercept The SVM control/instruction intercept, see
2319 * SVM_CTRL_INTERCEPT_*.
2320 */
2321DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fIntercept)
2322{
2323 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2324 return false;
2325 uint64_t u64Intercepts;
2326 if (!HMGetGuestSvmCtrlIntercepts(pVCpu, &u64Intercepts))
2327 u64Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl;
2328 return RT_BOOL(u64Intercepts & fIntercept);
2329}
2330
2331/**
2332 * Checks if the nested-guest VMCB has the specified CR read intercept active.
2333 *
2334 * @returns @c true if in intercept is set, @c false otherwise.
2335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2336 * @param pCtx Current CPU context.
2337 * @param uCr The CR register number (0 to 15).
2338 */
2339DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
2340{
2341 Assert(uCr < 16);
2342 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2343 return false;
2344 uint16_t u16Intercepts;
2345 if (!HMGetGuestSvmReadCRxIntercepts(pVCpu, &u16Intercepts))
2346 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptRdCRx;
2347 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
2348}
2349
2350/**
2351 * Checks if the nested-guest VMCB has the specified CR write intercept active.
2352 *
2353 * @returns @c true if in intercept is set, @c false otherwise.
2354 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2355 * @param pCtx Current CPU context.
2356 * @param uCr The CR register number (0 to 15).
2357 */
2358DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
2359{
2360 Assert(uCr < 16);
2361 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2362 return false;
2363 uint16_t u16Intercepts;
2364 if (!HMGetGuestSvmWriteCRxIntercepts(pVCpu, &u16Intercepts))
2365 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptWrCRx;
2366 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
2367}
2368
2369/**
2370 * Checks if the nested-guest VMCB has the specified DR read intercept active.
2371 *
2372 * @returns @c true if in intercept is set, @c false otherwise.
2373 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2374 * @param pCtx Current CPU context.
2375 * @param uDr The DR register number (0 to 15).
2376 */
2377DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
2378{
2379 Assert(uDr < 16);
2380 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2381 return false;
2382 uint16_t u16Intercepts;
2383 if (!HMGetGuestSvmReadDRxIntercepts(pVCpu, &u16Intercepts))
2384 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptRdDRx;
2385 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
2386}
2387
2388/**
2389 * Checks if the nested-guest VMCB has the specified DR write intercept active.
2390 *
2391 * @returns @c true if in intercept is set, @c false otherwise.
2392 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2393 * @param pCtx Current CPU context.
2394 * @param uDr The DR register number (0 to 15).
2395 */
2396DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
2397{
2398 Assert(uDr < 16);
2399 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2400 return false;
2401 uint16_t u16Intercepts;
2402 if (!HMGetGuestSvmWriteDRxIntercepts(pVCpu, &u16Intercepts))
2403 u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptWrDRx;
2404 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
2405}
2406
2407/**
2408 * Checks if the nested-guest VMCB has the specified exception intercept active.
2409 *
2410 * @returns @c true if in intercept is active, @c false otherwise.
2411 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2412 * @param pCtx Current CPU context.
2413 * @param uVector The exception / interrupt vector.
2414 */
2415DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
2416{
2417 Assert(uVector <= X86_XCPT_LAST);
2418 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2419 return false;
2420 uint32_t u32Intercepts;
2421 if (!HMGetGuestSvmXcptIntercepts(pVCpu, &u32Intercepts))
2422 u32Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u32InterceptXcpt;
2423 return RT_BOOL(u32Intercepts & RT_BIT(uVector));
2424}
2425
2426/**
2427 * Checks if the nested-guest VMCB has virtual-interrupt masking enabled.
2428 *
2429 * @returns @c true if virtual-interrupts are masked, @c false otherwise.
2430 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2431 * @param pCtx Current CPU context.
2432 *
2433 * @remarks Should only be called when SVM feature is exposed to the guest.
2434 */
2435DECLINLINE(bool) CPUMIsGuestSvmVirtIntrMasking(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2436{
2437 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2438 return false;
2439 bool fVIntrMasking;
2440 if (!HMGetGuestSvmVirtIntrMasking(pVCpu, &fVIntrMasking))
2441 fVIntrMasking = pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u1VIntrMasking;
2442 return fVIntrMasking;
2443}
2444
2445/**
2446 * Checks if the nested-guest VMCB has nested-paging enabled.
2447 *
2448 * @returns @c true if nested-paging is enabled, @c false otherwise.
2449 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2450 * @param pCtx Current CPU context.
2451 *
2452 * @remarks Should only be called when SVM feature is exposed to the guest.
2453 */
2454DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2455{
2456 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2457 return false;
2458 bool fNestedPaging;
2459 if (!HMGetGuestSvmNestedPaging(pVCpu, &fNestedPaging))
2460 fNestedPaging = pCtx->hwvirt.svm.Vmcb.ctrl.NestedPagingCtrl.n.u1NestedPaging;
2461 return fNestedPaging;
2462}
2463
2464/**
2465 * Gets the nested-guest VMCB pause-filter count.
2466 *
2467 * @returns The pause-filter count.
2468 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2469 * @param pCtx Current CPU context.
2470 *
2471 * @remarks Should only be called when SVM feature is exposed to the guest.
2472 */
2473DECLINLINE(uint16_t) CPUMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2474{
2475 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
2476 return false;
2477 uint16_t u16PauseFilterCount;
2478 if (!HMGetGuestSvmPauseFilterCount(pVCpu, &u16PauseFilterCount))
2479 u16PauseFilterCount = pCtx->hwvirt.svm.Vmcb.ctrl.u16PauseFilterCount;
2480 return u16PauseFilterCount;
2481}
2482
2483/**
2484 * Updates the NextRIP (NRIP) field in the nested-guest VMCB.
2485 *
2486 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2487 * @param pCtx Current CPU context.
2488 * @param cbInstr The length of the current instruction in bytes.
2489 *
2490 * @remarks Should only be called when SVM feature is exposed to the guest.
2491 */
2492DECLINLINE(void) CPUMGuestSvmUpdateNRip(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr)
2493{
2494 RT_NOREF(pVCpu);
2495 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
2496 pCtx->hwvirt.svm.Vmcb.ctrl.u64NextRIP = pCtx->rip + cbInstr;
2497}
2498
2499/**
2500 * Checks whether one of the given Pin-based VM-execution controls are set when
2501 * executing a nested-guest.
2502 *
2503 * @returns @c true if set, @c false otherwise.
2504 * @param pCtx Current CPU context.
2505 * @param uPinCtls The Pin-based VM-execution controls to check.
2506 *
2507 * @remarks This does not check if all given controls are set if more than one
2508 * control is passed in @a uPinCtl.
2509 */
2510DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls)
2511{
2512 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2513 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & uPinCtls);
2514}
2515
2516/**
2517 * Checks whether one of the given Processor-based VM-execution controls are set
2518 * when executing a nested-guest.
2519 *
2520 * @returns @c true if set, @c false otherwise.
2521 * @param pCtx Current CPU context.
2522 * @param uProcCtls The Processor-based VM-execution controls to check.
2523 *
2524 * @remarks This does not check if all given controls are set if more than one
2525 * control is passed in @a uProcCtls.
2526 */
2527DECLINLINE(bool) CPUMIsGuestVmxProcCtlsSet(PCCPUMCTX pCtx, uint32_t uProcCtls)
2528{
2529 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2530 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls & uProcCtls);
2531}
2532
2533/**
2534 * Checks whether one of the given Secondary Processor-based VM-execution controls
2535 * are set when executing a nested-guest.
2536 *
2537 * @returns @c true if set, @c false otherwise.
2538 * @param pCtx Current CPU context.
2539 * @param uProcCtls2 The Secondary Processor-based VM-execution controls to
2540 * check.
2541 *
2542 * @remarks This does not check if all given controls are set if more than one
2543 * control is passed in @a uProcCtls2.
2544 */
2545DECLINLINE(bool) CPUMIsGuestVmxProcCtls2Set(PCCPUMCTX pCtx, uint32_t uProcCtls2)
2546{
2547 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2548 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls2 & uProcCtls2);
2549}
2550
2551/**
2552 * Checks whether one of the given Tertiary Processor-based VM-execution controls
2553 * are set when executing a nested-guest.
2554 *
2555 * @returns @c true if set, @c false otherwise.
2556 * @param pCtx Current CPU context.
2557 * @param uProcCtls3 The Tertiary Processor-based VM-execution controls to
2558 * check.
2559 *
2560 * @remarks This does not check if all given controls are set if more than one
2561 * control is passed in @a uProcCtls3.
2562 */
2563DECLINLINE(bool) CPUMIsGuestVmxProcCtls3Set(PCCPUMCTX pCtx, uint64_t uProcCtls3)
2564{
2565 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2566 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u64ProcCtls3.u & uProcCtls3);
2567}
2568
2569/**
2570 * Checks whether one of the given VM-exit controls are set when executing a
2571 * nested-guest.
2572 *
2573 * @returns @c true if set, @c false otherwise.
2574 * @param pCtx Current CPU context.
2575 * @param uExitCtls The VM-exit controls to check.
2576 *
2577 * @remarks This does not check if all given controls are set if more than one
2578 * control is passed in @a uExitCtls.
2579 */
2580DECLINLINE(bool) CPUMIsGuestVmxExitCtlsSet(PCCPUMCTX pCtx, uint32_t uExitCtls)
2581{
2582 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2583 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ExitCtls & uExitCtls);
2584}
2585
2586/**
2587 * Checks whether one of the given VM-entry controls are set when executing a
2588 * nested-guest.
2589 *
2590 * @returns @c true if set, @c false otherwise.
2591 * @param pCtx Current CPU context.
2592 * @param uEntryCtls The VM-entry controls to check.
2593 *
2594 * @remarks This does not check if all given controls are set if more than one
2595 * control is passed in @a uEntryCtls.
2596 */
2597DECLINLINE(bool) CPUMIsGuestVmxEntryCtlsSet(PCCPUMCTX pCtx, uint32_t uEntryCtls)
2598{
2599 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2600 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32EntryCtls & uEntryCtls);
2601}
2602
2603/**
2604 * Checks whether events injected in the nested-guest are subject to VM-exit checks.
2605 *
2606 * @returns @c true if set, @c false otherwise.
2607 * @param pCtx Current CPU context.
2608 */
2609DECLINLINE(bool) CPUMIsGuestVmxInterceptEvents(PCCPUMCTX pCtx)
2610{
2611 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2612 return pCtx->hwvirt.vmx.fInterceptEvents;
2613}
2614
2615/**
2616 * Sets whether events injected in the nested-guest are subject to VM-exit checks.
2617 *
2618 * @param pCtx Current CPU context.
2619 * @param fIntercept Whether to subject injected events to VM-exits or not.
2620 */
2621DECLINLINE(void) CPUMSetGuestVmxInterceptEvents(PCPUMCTX pCtx, bool fInterceptEvents)
2622{
2623 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2624 pCtx->hwvirt.vmx.fInterceptEvents = fInterceptEvents;
2625}
2626
2627/**
2628 * Checks whether the given exception causes a VM-exit.
2629 *
2630 * The exception type include hardware exceptions, software exceptions (#BP, #OF)
2631 * and privileged software exceptions (#DB generated by INT1/ICEBP).
2632 *
2633 * Software interrupts do -not- cause VM-exits and hence must not be used with this
2634 * function.
2635 *
2636 * @returns @c true if the exception causes a VM-exit, @c false otherwise.
2637 * @param pCtx Current CPU context.
2638 * @param uVector The exception vector.
2639 * @param uErrCode The error code associated with the exception. Pass 0 if not
2640 * applicable.
2641 */
2642DECLINLINE(bool) CPUMIsGuestVmxXcptInterceptSet(PCCPUMCTX pCtx, uint8_t uVector, uint32_t uErrCode)
2643{
2644 Assert(uVector <= X86_XCPT_LAST);
2645
2646 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2647
2648 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
2649 if (uVector == X86_XCPT_NMI)
2650 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
2651
2652 /* Page-faults are subject to masking using its error code. */
2653 uint32_t fXcptBitmap = pCtx->hwvirt.vmx.Vmcs.u32XcptBitmap;
2654 if (uVector == X86_XCPT_PF)
2655 {
2656 uint32_t const fXcptPFMask = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMask;
2657 uint32_t const fXcptPFMatch = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMatch;
2658 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
2659 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
2660 }
2661
2662 /* Consult the exception bitmap for all other exceptions. */
2663 if (fXcptBitmap & RT_BIT(uVector))
2664 return true;
2665 return false;
2666}
2667
2668
2669/**
2670 * Checks whether the guest is in VMX non-root mode and using EPT paging.
2671 *
2672 * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
2673 * @param pCtx Current CPU context.
2674 */
2675DECLINLINE(bool) CPUMIsGuestVmxEptPagingEnabledEx(PCCPUMCTX pCtx)
2676{
2677 return CPUMIsGuestInVmxNonRootMode(pCtx)
2678 && CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_EPT);
2679}
2680
2681
2682/**
2683 * Implements VMSucceed for VMX instruction success.
2684 *
2685 * @param pCtx Current CPU context.
2686 */
2687DECLINLINE(void) CPUMSetGuestVmxVmSucceed(PCPUMCTX pCtx)
2688{
2689 pCtx->eflags.uBoth &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2690}
2691
2692/**
2693 * Implements VMFailInvalid for VMX instruction failure.
2694 *
2695 * @param pCtx Current CPU context.
2696 */
2697DECLINLINE(void) CPUMSetGuestVmxVmFailInvalid(PCPUMCTX pCtx)
2698{
2699 pCtx->eflags.uBoth &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2700 pCtx->eflags.uBoth |= X86_EFL_CF;
2701}
2702
2703/**
2704 * Implements VMFailValid for VMX instruction failure.
2705 *
2706 * @param pCtx Current CPU context.
2707 * @param enmInsErr The VM instruction error.
2708 */
2709DECLINLINE(void) CPUMSetGuestVmxVmFailValid(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2710{
2711 pCtx->eflags.uBoth &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2712 pCtx->eflags.uBoth |= X86_EFL_ZF;
2713 pCtx->hwvirt.vmx.Vmcs.u32RoVmInstrError = enmInsErr;
2714}
2715
2716/**
2717 * Implements VMFail for VMX instruction failure.
2718 *
2719 * @param pCtx Current CPU context.
2720 * @param enmInsErr The VM instruction error.
2721 */
2722DECLINLINE(void) CPUMSetGuestVmxVmFail(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2723{
2724 if (pCtx->hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
2725 CPUMSetGuestVmxVmFailValid(pCtx, enmInsErr);
2726 else
2727 CPUMSetGuestVmxVmFailInvalid(pCtx);
2728}
2729
2730/**
2731 * Returns the guest-physical address of the APIC-access page when executing a
2732 * nested-guest.
2733 *
2734 * @returns The APIC-access page guest-physical address.
2735 * @param pCtx Current CPU context.
2736 */
2737DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddrEx(PCCPUMCTX pCtx)
2738{
2739 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2740 return pCtx->hwvirt.vmx.Vmcs.u64AddrApicAccess.u;
2741}
2742
2743/**
2744 * Gets the nested-guest CR0 subject to the guest/host mask and the read-shadow.
2745 *
2746 * @returns The nested-guest CR0.
2747 * @param pCtx Current CPU context.
2748 * @param fGstHostMask The CR0 guest/host mask to use.
2749 */
2750DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr0(PCCPUMCTX pCtx, uint64_t fGstHostMask)
2751{
2752 /*
2753 * For each CR0 bit owned by the host, the corresponding bit from the
2754 * CR0 read shadow is loaded. For each CR0 bit that is not owned by the host,
2755 * the corresponding bit from the guest CR0 is loaded.
2756 *
2757 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2758 */
2759 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2760 uint64_t const uGstCr0 = pCtx->cr0;
2761 uint64_t const fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
2762 return (fReadShadow & fGstHostMask) | (uGstCr0 & ~fGstHostMask);
2763}
2764
2765/**
2766 * Gets the nested-guest CR4 subject to the guest/host mask and the read-shadow.
2767 *
2768 * @returns The nested-guest CR4.
2769 * @param pCtx Current CPU context.
2770 * @param fGstHostMask The CR4 guest/host mask to use.
2771 */
2772DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr4(PCCPUMCTX pCtx, uint64_t fGstHostMask)
2773{
2774 /*
2775 * For each CR4 bit owned by the host, the corresponding bit from the
2776 * CR4 read shadow is loaded. For each CR4 bit that is not owned by the host,
2777 * the corresponding bit from the guest CR4 is loaded.
2778 *
2779 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2780 */
2781 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2782 uint64_t const uGstCr4 = pCtx->cr4;
2783 uint64_t const fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u;
2784 return (fReadShadow & fGstHostMask) | (uGstCr4 & ~fGstHostMask);
2785}
2786
2787/**
2788 * Checks whether the LMSW access causes a VM-exit or not.
2789 *
2790 * @returns @c true if the LMSW access causes a VM-exit, @c false otherwise.
2791 * @param pCtx Current CPU context.
2792 * @param uNewMsw The LMSW source operand (the Machine Status Word).
2793 */
2794DECLINLINE(bool) CPUMIsGuestVmxLmswInterceptSet(PCCPUMCTX pCtx, uint16_t uNewMsw)
2795{
2796 /*
2797 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
2798 *
2799 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
2800 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2801 */
2802 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2803
2804 uint32_t const fGstHostMask = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u;
2805 uint32_t const fReadShadow = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
2806
2807 /*
2808 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
2809 * CR0.PE case first, before the rest of the bits in the MSW.
2810 *
2811 * If CR0.PE is owned by the host and CR0.PE differs between the
2812 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
2813 */
2814 if ( (fGstHostMask & X86_CR0_PE)
2815 && (uNewMsw & X86_CR0_PE)
2816 && !(fReadShadow & X86_CR0_PE))
2817 return true;
2818
2819 /*
2820 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
2821 * bits differ between the MSW (source operand) and the read-shadow, we must
2822 * cause a VM-exit.
2823 */
2824 uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2825 if ((fReadShadow & fGstHostLmswMask) != (uNewMsw & fGstHostLmswMask))
2826 return true;
2827
2828 return false;
2829}
2830
2831/**
2832 * Checks whether the Mov-to-CR0/CR4 access causes a VM-exit or not.
2833 *
2834 * @returns @c true if the Mov CRX access causes a VM-exit, @c false otherwise.
2835 * @param pCtx Current CPU context.
2836 * @param iCrReg The control register number (must be 0 or 4).
2837 * @param uNewCrX The CR0/CR4 value being written.
2838 */
2839DECLINLINE(bool) CPUMIsGuestVmxMovToCr0Cr4InterceptSet(PCCPUMCTX pCtx, uint8_t iCrReg, uint64_t uNewCrX)
2840{
2841 /*
2842 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
2843 * corresponding bits differ between the source operand and the read-shadow,
2844 * we must cause a VM-exit.
2845 *
2846 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2847 */
2848 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2849 Assert(iCrReg == 0 || iCrReg == 4);
2850
2851 uint64_t fGstHostMask;
2852 uint64_t fReadShadow;
2853 if (iCrReg == 0)
2854 {
2855 fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u;
2856 fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
2857 }
2858 else
2859 {
2860 fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr4Mask.u;
2861 fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u;
2862 }
2863
2864 if ((fReadShadow & fGstHostMask) != (uNewCrX & fGstHostMask))
2865 {
2866 Assert(fGstHostMask != 0);
2867 return true;
2868 }
2869
2870 return false;
2871}
2872
2873/**
2874 * Returns whether the guest has an active, current VMCS.
2875 *
2876 * @returns @c true if the guest has an active, current VMCS, @c false otherwise.
2877 * @param pCtx Current CPU context.
2878 */
2879DECLINLINE(bool) CPUMIsGuestVmxCurrentVmcsValid(PCCPUMCTX pCtx)
2880{
2881 return pCtx->hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS;
2882}
2883
2884# endif /* !IN_RC */
2885
2886/**
2887 * Checks whether the VMX nested-guest is in a state to receive physical (APIC)
2888 * interrupts.
2889 *
2890 * @returns @c true if it's ready, @c false otherwise.
2891 * @param pCtx The guest-CPU context.
2892 */
2893DECLINLINE(bool) CPUMIsGuestVmxPhysIntrEnabled(PCCPUMCTX pCtx)
2894{
2895#ifdef IN_RC
2896 AssertReleaseFailedReturn(false);
2897#else
2898 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2899 if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
2900 return true;
2901 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2902#endif
2903}
2904
2905/**
2906 * Checks whether the VMX nested-guest is blocking virtual-NMIs.
2907 *
2908 * @returns @c true if it's blocked, @c false otherwise.
2909 * @param pCtx The guest-CPU context.
2910 */
2911DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx)
2912{
2913#ifdef IN_RC
2914 RT_NOREF(pCtx);
2915 AssertReleaseFailedReturn(false);
2916#else
2917 /*
2918 * Return the state of virtual-NMI blocking, if we are executing a
2919 * VMX nested-guest with virtual-NMIs enabled.
2920 */
2921 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2922 Assert(CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI));
2923 return pCtx->hwvirt.vmx.fVirtNmiBlocking;
2924#endif
2925}
2926
2927/**
2928 * Sets or clears VMX nested-guest virtual-NMI blocking.
2929 *
2930 * @param pCtx The guest-CPU context.
2931 * @param fBlocking Whether virtual-NMI blocking is in effect or not.
2932 */
2933DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking)
2934{
2935#ifdef IN_RC
2936 RT_NOREF2(pCtx, fBlocking);
2937 AssertReleaseFailedReturnVoid();
2938#else
2939 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2940 Assert(CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI));
2941 pCtx->hwvirt.vmx.fVirtNmiBlocking = fBlocking;
2942#endif
2943}
2944
2945/**
2946 * Checks whether the VMX nested-guest is in a state to receive virtual interrupts
2947 * (those injected with the "virtual-interrupt delivery" feature).
2948 *
2949 * @returns @c true if it's ready, @c false otherwise.
2950 * @param pCtx The guest-CPU context.
2951 */
2952DECLINLINE(bool) CPUMIsGuestVmxVirtIntrEnabled(PCCPUMCTX pCtx)
2953{
2954#ifdef IN_RC
2955 RT_NOREF2(pCtx);
2956 AssertReleaseFailedReturn(false);
2957#else
2958 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2959 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2960#endif
2961}
2962
2963/** @} */
2964#endif /* !IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS || DOXYGEN_RUNNING */
2965
2966
2967
2968/** @name Hypervisor Register Getters.
2969 * @{ */
2970VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu);
2971VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu);
2972VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu);
2973VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu);
2974VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu);
2975VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu);
2976VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
2977/** @} */
2978
2979/** @name Hypervisor Register Setters.
2980 * @{ */
2981VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3);
2982VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0);
2983VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1);
2984VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2);
2985VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3);
2986VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6);
2987VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7);
2988VMMDECL(int) CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg);
2989/** @} */
2990
2991VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
2992#ifdef VBOX_INCLUDED_vmm_cpumctx_h
2993VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu);
2994#endif
2995
2996/** @name Changed flags.
2997 * These flags are used to keep track of which important register that
2998 * have been changed since last they were reset. The only one allowed
2999 * to clear them is REM!
3000 *
3001 * @todo This is obsolete, but remains as it will be refactored for coordinating
3002 * IEM and NEM/HM later. Probably.
3003 * @{
3004 */
3005#define CPUM_CHANGED_FPU_REM RT_BIT(0)
3006#define CPUM_CHANGED_CR0 RT_BIT(1)
3007#define CPUM_CHANGED_CR4 RT_BIT(2)
3008#define CPUM_CHANGED_GLOBAL_TLB_FLUSH RT_BIT(3)
3009#define CPUM_CHANGED_CR3 RT_BIT(4)
3010#define CPUM_CHANGED_GDTR RT_BIT(5)
3011#define CPUM_CHANGED_IDTR RT_BIT(6)
3012#define CPUM_CHANGED_LDTR RT_BIT(7)
3013#define CPUM_CHANGED_TR RT_BIT(8) /**@< Currently unused. */
3014#define CPUM_CHANGED_SYSENTER_MSR RT_BIT(9)
3015#define CPUM_CHANGED_HIDDEN_SEL_REGS RT_BIT(10) /**@< Currently unused. */
3016#define CPUM_CHANGED_CPUID RT_BIT(11)
3017#define CPUM_CHANGED_ALL ( CPUM_CHANGED_FPU_REM \
3018 | CPUM_CHANGED_CR0 \
3019 | CPUM_CHANGED_CR4 \
3020 | CPUM_CHANGED_GLOBAL_TLB_FLUSH \
3021 | CPUM_CHANGED_CR3 \
3022 | CPUM_CHANGED_GDTR \
3023 | CPUM_CHANGED_IDTR \
3024 | CPUM_CHANGED_LDTR \
3025 | CPUM_CHANGED_TR \
3026 | CPUM_CHANGED_SYSENTER_MSR \
3027 | CPUM_CHANGED_HIDDEN_SEL_REGS \
3028 | CPUM_CHANGED_CPUID )
3029/** @} */
3030
3031VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);
3032VMMDECL(bool) CPUMSupportsXSave(PVM pVM);
3033VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM);
3034VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM);
3035VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu);
3036VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu);
3037VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu);
3038VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu);
3039VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu);
3040VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu);
3041VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu);
3042VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu);
3043VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu);
3044VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu);
3045VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM);
3046VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM);
3047VMMDECL(uint64_t) CPUMGetGuestEferMsrValidMask(PVM pVM);
3048VMMDECL(int) CPUMIsGuestEferMsrWriteValid(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
3049 uint64_t *puValidEfer);
3050VMMDECL(void) CPUMSetGuestEferMsrNoChecks(PVMCPUCC pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
3051VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue);
3052
3053
3054/** Guest CPU interruptibility level, see CPUMGetGuestInterruptibility(). */
3055typedef enum CPUMINTERRUPTIBILITY
3056{
3057 CPUMINTERRUPTIBILITY_INVALID = 0,
3058 CPUMINTERRUPTIBILITY_UNRESTRAINED,
3059 CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED,
3060 CPUMINTERRUPTIBILITY_INT_DISABLED,
3061 CPUMINTERRUPTIBILITY_INT_INHIBITED, /**< @todo rename as it inhibits NMIs too. */
3062 CPUMINTERRUPTIBILITY_NMI_INHIBIT,
3063 CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT,
3064 CPUMINTERRUPTIBILITY_END,
3065 CPUMINTERRUPTIBILITY_32BIT_HACK = 0x7fffffff
3066} CPUMINTERRUPTIBILITY;
3067
3068VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu);
3069
3070/** @name Typical scalable bus frequency values.
3071 * @{ */
3072/** Special internal value indicating that we don't know the frequency.
3073 * @internal */
3074#define CPUM_SBUSFREQ_UNKNOWN UINT64_C(1)
3075#define CPUM_SBUSFREQ_100MHZ UINT64_C(100000000)
3076#define CPUM_SBUSFREQ_133MHZ UINT64_C(133333333)
3077#define CPUM_SBUSFREQ_167MHZ UINT64_C(166666666)
3078#define CPUM_SBUSFREQ_200MHZ UINT64_C(200000000)
3079#define CPUM_SBUSFREQ_267MHZ UINT64_C(266666666)
3080#define CPUM_SBUSFREQ_333MHZ UINT64_C(333333333)
3081#define CPUM_SBUSFREQ_400MHZ UINT64_C(400000000)
3082/** @} */
3083
3084
3085#ifdef IN_RING3
3086/** @defgroup grp_cpum_r3 The CPUM ring-3 API
3087 * @{
3088 */
3089
3090VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
3091
3092VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf);
3093VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf);
3094VMMR3_INT_DECL(PCCPUMCPUIDLEAF) CPUMR3CpuIdGetPtr(PVM pVM, uint32_t *pcLeaves);
3095VMMDECL(CPUMMICROARCH) CPUMCpuIdDetermineX86MicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
3096 uint8_t bModel, uint8_t bStepping);
3097VMMDECL(const char *) CPUMMicroarchName(CPUMMICROARCH enmMicroarch);
3098VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
3099VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
3100#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
3101VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void);
3102#endif
3103
3104VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange);
3105
3106VMMR3DECL(uint32_t) CPUMR3DbGetEntries(void);
3107/** Pointer to CPUMR3DbGetEntries. */
3108typedef DECLCALLBACKPTR(uint32_t, PFNCPUMDBGETENTRIES, (void));
3109VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByIndex(uint32_t idxCpuDb);
3110/** Pointer to CPUMR3DbGetEntryByIndex. */
3111typedef DECLCALLBACKPTR(PCCPUMDBENTRY, PFNCPUMDBGETENTRYBYINDEX, (uint32_t idxCpuDb));
3112VMMR3DECL(PCCPUMDBENTRY) CPUMR3DbGetEntryByName(const char *pszName);
3113/** Pointer to CPUMR3DbGetEntryByName. */
3114typedef DECLCALLBACKPTR(PCCPUMDBENTRY, PFNCPUMDBGETENTRYBYNAME, (const char *pszName));
3115
3116VMMR3_INT_DECL(void) CPUMR3NemActivateGuestDebugState(PVMCPUCC pVCpu);
3117VMMR3_INT_DECL(void) CPUMR3NemActivateHyperDebugState(PVMCPUCC pVCpu);
3118/** @} */
3119#endif /* IN_RING3 */
3120
3121#ifdef IN_RING0
3122/** @defgroup grp_cpum_r0 The CPUM ring-0 API
3123 * @{
3124 */
3125VMMR0_INT_DECL(int) CPUMR0ModuleInit(void);
3126VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void);
3127VMMR0_INT_DECL(void) CPUMR0InitPerVMData(PGVM pGVM);
3128VMMR0_INT_DECL(int) CPUMR0InitVM(PVMCC pVM);
3129DECLASM(void) CPUMR0RegisterVCpuThread(PVMCPUCC pVCpu);
3130DECLASM(void) CPUMR0TouchHostFpu(void);
3131VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVMCC pVM, PVMCPUCC pVCpu);
3132VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVMCC pVM, PVMCPUCC pVCpu);
3133VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu);
3134VMMR0_INT_DECL(int) CPUMR0SaveHostDebugState(PVMCC pVM, PVMCPUCC pVCpu);
3135VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu, bool fDr6);
3136VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPUCC pVCpu, bool fDr6);
3137
3138VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPUCC pVCpu, bool fDr6);
3139VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPUCC pVCpu, bool fDr6);
3140/** @} */
3141#endif /* IN_RING0 */
3142
3143/** @defgroup grp_cpum_rz The CPUM raw-mode and ring-0 context API
3144 * @{
3145 */
3146VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPUCC pVCpu);
3147VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForRead(PVMCPUCC pVCpu);
3148VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForChange(PVMCPUCC pVCpu);
3149VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeSseForRead(PVMCPUCC pVCpu);
3150VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPUCC pVCpu);
3151/** @} */
3152
3153
3154#endif /* !VBOX_FOR_DTRACE_LIB */
3155/** @} */
3156RT_C_DECLS_END
3157
3158
3159#endif /* !VBOX_INCLUDED_vmm_cpum_x86_amd64_h */
3160
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette