VirtualBox

source: vbox/trunk/include/VBox/vmm/cpum.h@ 79576

Last change on this file since 79576 was 79576, checked in by vboxsync, 5 years ago

cpum.h: Nested VMX: bugref:9180 Add CPUMGetGuestVmxVirtApicPage for upcoming change.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 108.7 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager).
3 */
4
5/*
6 * Copyright (C) 2006-2019 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_cpum_h
27#define VBOX_INCLUDED_vmm_cpum_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <iprt/x86.h>
33#include <VBox/types.h>
34#include <VBox/vmm/cpumctx.h>
35#include <VBox/vmm/stam.h>
36#include <VBox/vmm/vmapi.h>
37#include <VBox/vmm/hm_svm.h>
38#include <VBox/vmm/hm_vmx.h>
39
40RT_C_DECLS_BEGIN
41
42/** @defgroup grp_cpum The CPU Monitor / Manager API
43 * @ingroup grp_vmm
44 * @{
45 */
46
47/**
48 * CPUID feature to set or clear.
49 */
50typedef enum CPUMCPUIDFEATURE
51{
52 CPUMCPUIDFEATURE_INVALID = 0,
53 /** The APIC feature bit. (Std+Ext)
54 * Note! There is a per-cpu flag for masking this CPUID feature bit when the
55 * APICBASE.ENABLED bit is zero. So, this feature is only set/cleared
56 * at VM construction time like all the others. This didn't used to be
57 * that way, this is new with 5.1. */
58 CPUMCPUIDFEATURE_APIC,
59 /** The sysenter/sysexit feature bit. (Std) */
60 CPUMCPUIDFEATURE_SEP,
61 /** The SYSCALL/SYSEXIT feature bit (64 bits mode only for Intel CPUs). (Ext) */
62 CPUMCPUIDFEATURE_SYSCALL,
63 /** The PAE feature bit. (Std+Ext) */
64 CPUMCPUIDFEATURE_PAE,
65 /** The NX feature bit. (Ext) */
66 CPUMCPUIDFEATURE_NX,
67 /** The LAHF/SAHF feature bit (64 bits mode only). (Ext) */
68 CPUMCPUIDFEATURE_LAHF,
69 /** The LONG MODE feature bit. (Ext) */
70 CPUMCPUIDFEATURE_LONG_MODE,
71 /** The PAT feature bit. (Std+Ext) */
72 CPUMCPUIDFEATURE_PAT,
73 /** The x2APIC feature bit. (Std) */
74 CPUMCPUIDFEATURE_X2APIC,
75 /** The RDTSCP feature bit. (Ext) */
76 CPUMCPUIDFEATURE_RDTSCP,
77 /** The Hypervisor Present bit. (Std) */
78 CPUMCPUIDFEATURE_HVP,
79 /** The MWait Extensions bits (Std) */
80 CPUMCPUIDFEATURE_MWAIT_EXTS,
81 /** The speculation control feature bits. (StExt) */
82 CPUMCPUIDFEATURE_SPEC_CTRL,
83 /** 32bit hackishness. */
84 CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
85} CPUMCPUIDFEATURE;
86
87/**
88 * CPU Vendor.
89 */
90typedef enum CPUMCPUVENDOR
91{
92 CPUMCPUVENDOR_INVALID = 0,
93 CPUMCPUVENDOR_INTEL,
94 CPUMCPUVENDOR_AMD,
95 CPUMCPUVENDOR_VIA,
96 CPUMCPUVENDOR_CYRIX,
97 CPUMCPUVENDOR_SHANGHAI,
98 CPUMCPUVENDOR_UNKNOWN,
99 /** 32bit hackishness. */
100 CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
101} CPUMCPUVENDOR;
102
103
104/**
105 * X86 and AMD64 CPU microarchitectures and in processor generations.
106 *
107 * @remarks The separation here is sometimes a little bit too finely grained,
108 * and the differences is more like processor generation than micro
109 * arch. This can be useful, so we'll provide functions for getting at
110 * more coarse grained info.
111 */
112typedef enum CPUMMICROARCH
113{
114 kCpumMicroarch_Invalid = 0,
115
116 kCpumMicroarch_Intel_First,
117
118 kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
119 kCpumMicroarch_Intel_80186,
120 kCpumMicroarch_Intel_80286,
121 kCpumMicroarch_Intel_80386,
122 kCpumMicroarch_Intel_80486,
123 kCpumMicroarch_Intel_P5,
124
125 kCpumMicroarch_Intel_P6_Core_Atom_First,
126 kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
127 kCpumMicroarch_Intel_P6_II,
128 kCpumMicroarch_Intel_P6_III,
129
130 kCpumMicroarch_Intel_P6_M_Banias,
131 kCpumMicroarch_Intel_P6_M_Dothan,
132 kCpumMicroarch_Intel_Core_Yonah, /**< Core, also known as Enhanced Pentium M. */
133
134 kCpumMicroarch_Intel_Core2_First,
135 kCpumMicroarch_Intel_Core2_Merom = kCpumMicroarch_Intel_Core2_First, /**< 65nm, Merom/Conroe/Kentsfield/Tigerton */
136 kCpumMicroarch_Intel_Core2_Penryn, /**< 45nm, Penryn/Wolfdale/Yorkfield/Harpertown */
137 kCpumMicroarch_Intel_Core2_End,
138
139 kCpumMicroarch_Intel_Core7_First,
140 kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
141 kCpumMicroarch_Intel_Core7_Westmere,
142 kCpumMicroarch_Intel_Core7_SandyBridge,
143 kCpumMicroarch_Intel_Core7_IvyBridge,
144 kCpumMicroarch_Intel_Core7_Haswell,
145 kCpumMicroarch_Intel_Core7_Broadwell,
146 kCpumMicroarch_Intel_Core7_Skylake,
147 kCpumMicroarch_Intel_Core7_KabyLake,
148 kCpumMicroarch_Intel_Core7_CoffeeLake,
149 kCpumMicroarch_Intel_Core7_WhiskeyLake,
150 kCpumMicroarch_Intel_Core7_CascadeLake,
151 kCpumMicroarch_Intel_Core7_CannonLake,
152 kCpumMicroarch_Intel_Core7_IceLake,
153 kCpumMicroarch_Intel_Core7_TigerLake,
154 kCpumMicroarch_Intel_Core7_End,
155
156 kCpumMicroarch_Intel_Atom_First,
157 kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
158 kCpumMicroarch_Intel_Atom_Lincroft, /**< Second generation bonnell (44nm). */
159 kCpumMicroarch_Intel_Atom_Saltwell, /**< 32nm shrink of Bonnell. */
160 kCpumMicroarch_Intel_Atom_Silvermont, /**< 22nm */
161 kCpumMicroarch_Intel_Atom_Airmount, /**< 14nm */
162 kCpumMicroarch_Intel_Atom_Goldmont, /**< 14nm */
163 kCpumMicroarch_Intel_Atom_GoldmontPlus, /**< 14nm */
164 kCpumMicroarch_Intel_Atom_Unknown,
165 kCpumMicroarch_Intel_Atom_End,
166
167
168 kCpumMicroarch_Intel_Phi_First,
169 kCpumMicroarch_Intel_Phi_KnightsFerry = kCpumMicroarch_Intel_Phi_First,
170 kCpumMicroarch_Intel_Phi_KnightsCorner,
171 kCpumMicroarch_Intel_Phi_KnightsLanding,
172 kCpumMicroarch_Intel_Phi_KnightsHill,
173 kCpumMicroarch_Intel_Phi_KnightsMill,
174 kCpumMicroarch_Intel_Phi_End,
175
176 kCpumMicroarch_Intel_P6_Core_Atom_End,
177
178 kCpumMicroarch_Intel_NB_First,
179 kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
180 kCpumMicroarch_Intel_NB_Northwood, /**< 130nm */
181 kCpumMicroarch_Intel_NB_Prescott, /**< 90nm */
182 kCpumMicroarch_Intel_NB_Prescott2M, /**< 90nm */
183 kCpumMicroarch_Intel_NB_CedarMill, /**< 65nm */
184 kCpumMicroarch_Intel_NB_Gallatin, /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
185 kCpumMicroarch_Intel_NB_Unknown,
186 kCpumMicroarch_Intel_NB_End,
187
188 kCpumMicroarch_Intel_Unknown,
189 kCpumMicroarch_Intel_End,
190
191 kCpumMicroarch_AMD_First,
192 kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
193 kCpumMicroarch_AMD_Am386,
194 kCpumMicroarch_AMD_Am486,
195 kCpumMicroarch_AMD_Am486Enh, /**< Covers Am5x86 as well. */
196 kCpumMicroarch_AMD_K5,
197 kCpumMicroarch_AMD_K6,
198
199 kCpumMicroarch_AMD_K7_First,
200 kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
201 kCpumMicroarch_AMD_K7_Spitfire,
202 kCpumMicroarch_AMD_K7_Thunderbird,
203 kCpumMicroarch_AMD_K7_Morgan,
204 kCpumMicroarch_AMD_K7_Thoroughbred,
205 kCpumMicroarch_AMD_K7_Barton,
206 kCpumMicroarch_AMD_K7_Unknown,
207 kCpumMicroarch_AMD_K7_End,
208
209 kCpumMicroarch_AMD_K8_First,
210 kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
211 kCpumMicroarch_AMD_K8_90nm, /**< 90nm shrink */
212 kCpumMicroarch_AMD_K8_90nm_DualCore, /**< 90nm with two cores. */
213 kCpumMicroarch_AMD_K8_90nm_AMDV, /**< 90nm with AMD-V (usually) and two cores (usually). */
214 kCpumMicroarch_AMD_K8_65nm, /**< 65nm shrink. */
215 kCpumMicroarch_AMD_K8_End,
216
217 kCpumMicroarch_AMD_K10,
218 kCpumMicroarch_AMD_K10_Lion,
219 kCpumMicroarch_AMD_K10_Llano,
220 kCpumMicroarch_AMD_Bobcat,
221 kCpumMicroarch_AMD_Jaguar,
222
223 kCpumMicroarch_AMD_15h_First,
224 kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
225 kCpumMicroarch_AMD_15h_Piledriver,
226 kCpumMicroarch_AMD_15h_Steamroller, /**< Yet to be released, might have different family. */
227 kCpumMicroarch_AMD_15h_Excavator, /**< Yet to be released, might have different family. */
228 kCpumMicroarch_AMD_15h_Unknown,
229 kCpumMicroarch_AMD_15h_End,
230
231 kCpumMicroarch_AMD_16h_First,
232 kCpumMicroarch_AMD_16h_End,
233
234 kCpumMicroarch_AMD_Zen_First,
235 kCpumMicroarch_AMD_Zen_Ryzen = kCpumMicroarch_AMD_Zen_First,
236 kCpumMicroarch_AMD_Zen_End,
237
238 kCpumMicroarch_AMD_Unknown,
239 kCpumMicroarch_AMD_End,
240
241 kCpumMicroarch_VIA_First,
242 kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
243 kCpumMicroarch_Centaur_C2,
244 kCpumMicroarch_Centaur_C3,
245 kCpumMicroarch_VIA_C3_M2,
246 kCpumMicroarch_VIA_C3_C5A, /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
247 kCpumMicroarch_VIA_C3_C5B, /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
248 kCpumMicroarch_VIA_C3_C5C, /**< 130nm Ezra - C3, Eden ESP. */
249 kCpumMicroarch_VIA_C3_C5N, /**< 130nm Ezra-T - C3. */
250 kCpumMicroarch_VIA_C3_C5XL, /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
251 kCpumMicroarch_VIA_C3_C5P, /**< 130nm Nehemiah+ - C3. */
252 kCpumMicroarch_VIA_C7_C5J, /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
253 kCpumMicroarch_VIA_Isaiah,
254 kCpumMicroarch_VIA_Unknown,
255 kCpumMicroarch_VIA_End,
256
257 kCpumMicroarch_Cyrix_First,
258 kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
259 kCpumMicroarch_Cyrix_M1,
260 kCpumMicroarch_Cyrix_MediaGX,
261 kCpumMicroarch_Cyrix_MediaGXm,
262 kCpumMicroarch_Cyrix_M2,
263 kCpumMicroarch_Cyrix_Unknown,
264 kCpumMicroarch_Cyrix_End,
265
266 kCpumMicroarch_NEC_First,
267 kCpumMicroarch_NEC_V20 = kCpumMicroarch_NEC_First,
268 kCpumMicroarch_NEC_V30,
269 kCpumMicroarch_NEC_End,
270
271 kCpumMicroarch_Shanghai_First,
272 kCpumMicroarch_Shanghai_Wudaokou = kCpumMicroarch_Shanghai_First,
273 kCpumMicroarch_Shanghai_Unknown,
274 kCpumMicroarch_Shanghai_End,
275
276 kCpumMicroarch_Unknown,
277
278 kCpumMicroarch_32BitHack = 0x7fffffff
279} CPUMMICROARCH;
280
281
282/** Predicate macro for catching netburst CPUs. */
283#define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
284 ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
285
286/** Predicate macro for catching Core7 CPUs. */
287#define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
288 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
289
290/** Predicate macro for catching Core 2 CPUs. */
291#define CPUMMICROARCH_IS_INTEL_CORE2(a_enmMicroarch) \
292 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core2_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core2_End)
293
294/** Predicate macro for catching Atom CPUs, Silvermont and upwards. */
295#define CPUMMICROARCH_IS_INTEL_SILVERMONT_PLUS(a_enmMicroarch) \
296 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Atom_Silvermont && (a_enmMicroarch) <= kCpumMicroarch_Intel_Atom_End)
297
298/** Predicate macro for catching AMD Family OFh CPUs (aka K8). */
299#define CPUMMICROARCH_IS_AMD_FAM_0FH(a_enmMicroarch) \
300 ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
301
302/** Predicate macro for catching AMD Family 10H CPUs (aka K10). */
303#define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
304
305/** Predicate macro for catching AMD Family 11H CPUs (aka Lion). */
306#define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
307
308/** Predicate macro for catching AMD Family 12H CPUs (aka Llano). */
309#define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
310
311/** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat). */
312#define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
313
314/** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
315 * decendants). */
316#define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
317 ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
318
319/** Predicate macro for catching AMD Family 16H CPUs. */
320#define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
321 ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
322
323
324
325/**
326 * CPUID leaf.
327 *
328 * @remarks This structure is used by the patch manager and is therefore
329 * more or less set in stone.
330 */
331typedef struct CPUMCPUIDLEAF
332{
333 /** The leaf number. */
334 uint32_t uLeaf;
335 /** The sub-leaf number. */
336 uint32_t uSubLeaf;
337 /** Sub-leaf mask. This is 0 when sub-leaves aren't used. */
338 uint32_t fSubLeafMask;
339
340 /** The EAX value. */
341 uint32_t uEax;
342 /** The EBX value. */
343 uint32_t uEbx;
344 /** The ECX value. */
345 uint32_t uEcx;
346 /** The EDX value. */
347 uint32_t uEdx;
348
349 /** Flags. */
350 uint32_t fFlags;
351} CPUMCPUIDLEAF;
352#ifndef VBOX_FOR_DTRACE_LIB
353AssertCompileSize(CPUMCPUIDLEAF, 32);
354#endif
355/** Pointer to a CPUID leaf. */
356typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
357/** Pointer to a const CPUID leaf. */
358typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
359
360/** @name CPUMCPUIDLEAF::fFlags
361 * @{ */
362/** Indicates working intel leaf 0xb where the lower 8 ECX bits are not modified
363 * and EDX containing the extended APIC ID. */
364#define CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES RT_BIT_32(0)
365/** The leaf contains an APIC ID that needs changing to that of the current CPU. */
366#define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID RT_BIT_32(1)
367/** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
368#define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE RT_BIT_32(2)
369/** The leaf contains an APIC feature bit which is tied to APICBASE.EN. */
370#define CPUMCPUIDLEAF_F_CONTAINS_APIC RT_BIT_32(3)
371/** Mask of the valid flags. */
372#define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0xf)
373/** @} */
374
375/**
376 * Method used to deal with unknown CPUID leaves.
377 * @remarks Used in patch code.
378 */
379typedef enum CPUMUNKNOWNCPUID
380{
381 /** Invalid zero value. */
382 CPUMUNKNOWNCPUID_INVALID = 0,
383 /** Use given default values (DefCpuId). */
384 CPUMUNKNOWNCPUID_DEFAULTS,
385 /** Return the last standard leaf.
386 * Intel Sandy Bridge has been observed doing this. */
387 CPUMUNKNOWNCPUID_LAST_STD_LEAF,
388 /** Return the last standard leaf, with ecx observed.
389 * Intel Sandy Bridge has been observed doing this. */
390 CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
391 /** The register values are passed thru unmodified. */
392 CPUMUNKNOWNCPUID_PASSTHRU,
393 /** End of valid value. */
394 CPUMUNKNOWNCPUID_END,
395 /** Ensure 32-bit type. */
396 CPUMUNKNOWNCPUID_32BIT_HACK = 0x7fffffff
397} CPUMUNKNOWNCPUID;
398/** Pointer to unknown CPUID leaf method. */
399typedef CPUMUNKNOWNCPUID *PCPUMUNKNOWNCPUID;
400
401
402/**
403 * MSR read functions.
404 */
405typedef enum CPUMMSRRDFN
406{
407 /** Invalid zero value. */
408 kCpumMsrRdFn_Invalid = 0,
409 /** Return the CPUMMSRRANGE::uValue. */
410 kCpumMsrRdFn_FixedValue,
411 /** Alias to the MSR range starting at the MSR given by
412 * CPUMMSRRANGE::uValue. Must be used in pair with
413 * kCpumMsrWrFn_MsrAlias. */
414 kCpumMsrRdFn_MsrAlias,
415 /** Write only register, GP all read attempts. */
416 kCpumMsrRdFn_WriteOnly,
417
418 kCpumMsrRdFn_Ia32P5McAddr,
419 kCpumMsrRdFn_Ia32P5McType,
420 kCpumMsrRdFn_Ia32TimestampCounter,
421 kCpumMsrRdFn_Ia32PlatformId, /**< Takes real CPU value for reference. */
422 kCpumMsrRdFn_Ia32ApicBase,
423 kCpumMsrRdFn_Ia32FeatureControl,
424 kCpumMsrRdFn_Ia32BiosSignId, /**< Range value returned. */
425 kCpumMsrRdFn_Ia32SmmMonitorCtl,
426 kCpumMsrRdFn_Ia32PmcN,
427 kCpumMsrRdFn_Ia32MonitorFilterLineSize,
428 kCpumMsrRdFn_Ia32MPerf,
429 kCpumMsrRdFn_Ia32APerf,
430 kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */
431 kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
432 kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
433 kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
434 kCpumMsrRdFn_Ia32MtrrDefType,
435 kCpumMsrRdFn_Ia32Pat,
436 kCpumMsrRdFn_Ia32SysEnterCs,
437 kCpumMsrRdFn_Ia32SysEnterEsp,
438 kCpumMsrRdFn_Ia32SysEnterEip,
439 kCpumMsrRdFn_Ia32McgCap,
440 kCpumMsrRdFn_Ia32McgStatus,
441 kCpumMsrRdFn_Ia32McgCtl,
442 kCpumMsrRdFn_Ia32DebugCtl,
443 kCpumMsrRdFn_Ia32SmrrPhysBase,
444 kCpumMsrRdFn_Ia32SmrrPhysMask,
445 kCpumMsrRdFn_Ia32PlatformDcaCap,
446 kCpumMsrRdFn_Ia32CpuDcaCap,
447 kCpumMsrRdFn_Ia32Dca0Cap,
448 kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
449 kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */
450 kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */
451 kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
452 kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */
453 kCpumMsrRdFn_Ia32FixedCtrCtrl,
454 kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */
455 kCpumMsrRdFn_Ia32PerfGlobalCtrl,
456 kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
457 kCpumMsrRdFn_Ia32PebsEnable,
458 kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */
459 kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */
460 kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */
461 kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */
462 kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */
463 kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
464 kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */
465 kCpumMsrRdFn_Ia32DsArea,
466 kCpumMsrRdFn_Ia32TscDeadline,
467 kCpumMsrRdFn_Ia32X2ApicN,
468 kCpumMsrRdFn_Ia32DebugInterface,
469 kCpumMsrRdFn_Ia32VmxBasic, /**< Takes real value as reference. */
470 kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */
471 kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */
472 kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */
473 kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */
474 kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */
475 kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */
476 kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */
477 kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */
478 kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */
479 kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */
480 kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */
481 kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */
482 kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */
483 kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */
484 kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */
485 kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */
486 kCpumMsrRdFn_Ia32VmxVmFunc, /**< Takes real value as reference. */
487 kCpumMsrRdFn_Ia32SpecCtrl,
488 kCpumMsrRdFn_Ia32ArchCapabilities,
489
490 kCpumMsrRdFn_Amd64Efer,
491 kCpumMsrRdFn_Amd64SyscallTarget,
492 kCpumMsrRdFn_Amd64LongSyscallTarget,
493 kCpumMsrRdFn_Amd64CompSyscallTarget,
494 kCpumMsrRdFn_Amd64SyscallFlagMask,
495 kCpumMsrRdFn_Amd64FsBase,
496 kCpumMsrRdFn_Amd64GsBase,
497 kCpumMsrRdFn_Amd64KernelGsBase,
498 kCpumMsrRdFn_Amd64TscAux,
499
500 kCpumMsrRdFn_IntelEblCrPowerOn,
501 kCpumMsrRdFn_IntelI7CoreThreadCount,
502 kCpumMsrRdFn_IntelP4EbcHardPowerOn,
503 kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
504 kCpumMsrRdFn_IntelP4EbcFrequencyId,
505 kCpumMsrRdFn_IntelP6FsbFrequency, /**< Takes real value as reference. */
506 kCpumMsrRdFn_IntelPlatformInfo,
507 kCpumMsrRdFn_IntelFlexRatio, /**< Takes real value as reference. */
508 kCpumMsrRdFn_IntelPkgCStConfigControl,
509 kCpumMsrRdFn_IntelPmgIoCaptureBase,
510 kCpumMsrRdFn_IntelLastBranchFromToN,
511 kCpumMsrRdFn_IntelLastBranchFromN,
512 kCpumMsrRdFn_IntelLastBranchToN,
513 kCpumMsrRdFn_IntelLastBranchTos,
514 kCpumMsrRdFn_IntelBblCrCtl,
515 kCpumMsrRdFn_IntelBblCrCtl3,
516 kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */
517 kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
518 kCpumMsrRdFn_IntelI7MiscPwrMgmt,
519 kCpumMsrRdFn_IntelP6CrN,
520 kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
521 kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
522 kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
523 kCpumMsrRdFn_IntelI7SandyAesNiCtl,
524 kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */
525 kCpumMsrRdFn_IntelI7LbrSelect,
526 kCpumMsrRdFn_IntelI7SandyErrorControl,
527 kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
528 kCpumMsrRdFn_IntelI7PowerCtl,
529 kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
530 kCpumMsrRdFn_IntelI7PebsLdLat,
531 kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */
532 kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */
533 kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
534 kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */
535 kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */
536 kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */
537 kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
538 kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */
539 kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
540 kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */
541 kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */
542 kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */
543 kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
544 kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */
545 kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */
546 kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */
547 kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
548 kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */
549 kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */
550 kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */
551 kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
552 kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */
553 kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
554 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1, /**< Takes real value as reference. */
555 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2, /**< Takes real value as reference. */
556 kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
557 kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
558 kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
559 kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
560 kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
561 kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
562 kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
563 kCpumMsrRdFn_IntelI7UncCBoxConfig,
564 kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
565 kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
566 kCpumMsrRdFn_IntelI7SmiCount,
567 kCpumMsrRdFn_IntelCore2EmttmCrTablesN, /**< Range value returned. */
568 kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
569 kCpumMsrRdFn_IntelCore1ExtConfig,
570 kCpumMsrRdFn_IntelCore1DtsCalControl,
571 kCpumMsrRdFn_IntelCore2PeciControl,
572 kCpumMsrRdFn_IntelAtSilvCoreC1Recidency,
573
574 kCpumMsrRdFn_P6LastBranchFromIp,
575 kCpumMsrRdFn_P6LastBranchToIp,
576 kCpumMsrRdFn_P6LastIntFromIp,
577 kCpumMsrRdFn_P6LastIntToIp,
578
579 kCpumMsrRdFn_AmdFam15hTscRate,
580 kCpumMsrRdFn_AmdFam15hLwpCfg,
581 kCpumMsrRdFn_AmdFam15hLwpCbAddr,
582 kCpumMsrRdFn_AmdFam10hMc4MiscN,
583 kCpumMsrRdFn_AmdK8PerfCtlN,
584 kCpumMsrRdFn_AmdK8PerfCtrN,
585 kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */
586 kCpumMsrRdFn_AmdK8HwCr,
587 kCpumMsrRdFn_AmdK8IorrBaseN,
588 kCpumMsrRdFn_AmdK8IorrMaskN,
589 kCpumMsrRdFn_AmdK8TopOfMemN,
590 kCpumMsrRdFn_AmdK8NbCfg1,
591 kCpumMsrRdFn_AmdK8McXcptRedir,
592 kCpumMsrRdFn_AmdK8CpuNameN,
593 kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */
594 kCpumMsrRdFn_AmdK8SwThermalCtrl,
595 kCpumMsrRdFn_AmdK8FidVidControl, /**< Range value returned. */
596 kCpumMsrRdFn_AmdK8FidVidStatus, /**< Range value returned. */
597 kCpumMsrRdFn_AmdK8McCtlMaskN,
598 kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
599 kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
600 kCpumMsrRdFn_AmdK8IntPendingMessage,
601 kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
602 kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
603 kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
604 kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */
605 kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */
606 kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */
607 kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */
608 kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */
609 kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */
610 kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
611 kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
612 kCpumMsrRdFn_AmdK8SmmBase,
613 kCpumMsrRdFn_AmdK8SmmAddr,
614 kCpumMsrRdFn_AmdK8SmmMask,
615 kCpumMsrRdFn_AmdK8VmCr,
616 kCpumMsrRdFn_AmdK8IgnNe,
617 kCpumMsrRdFn_AmdK8SmmCtl,
618 kCpumMsrRdFn_AmdK8VmHSavePa,
619 kCpumMsrRdFn_AmdFam10hVmLockKey,
620 kCpumMsrRdFn_AmdFam10hSmmLockKey,
621 kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
622 kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
623 kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
624 kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
625 kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
626 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
627 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
628 kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */
629 kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */
630 kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
631 kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
632 kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
633 kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
634 kCpumMsrRdFn_AmdK8PatchLevel, /**< Returns range value. */
635 kCpumMsrRdFn_AmdK7DebugStatusMaybe,
636 kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
637 kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
638 kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
639 kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
640 kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
641 kCpumMsrRdFn_AmdK7NodeId,
642 kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
643 kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
644 kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
645 kCpumMsrRdFn_AmdK7LoadStoreCfg,
646 kCpumMsrRdFn_AmdK7InstrCacheCfg,
647 kCpumMsrRdFn_AmdK7DataCacheCfg,
648 kCpumMsrRdFn_AmdK7BusUnitCfg,
649 kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
650 kCpumMsrRdFn_AmdFam15hFpuCfg,
651 kCpumMsrRdFn_AmdFam15hDecoderCfg,
652 kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
653 kCpumMsrRdFn_AmdFam15hCombUnitCfg,
654 kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
655 kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
656 kCpumMsrRdFn_AmdFam15hExecUnitCfg,
657 kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
658 kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
659 kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
660 kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
661 kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
662 kCpumMsrRdFn_AmdFam10hIbsOpRip,
663 kCpumMsrRdFn_AmdFam10hIbsOpData,
664 kCpumMsrRdFn_AmdFam10hIbsOpData2,
665 kCpumMsrRdFn_AmdFam10hIbsOpData3,
666 kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
667 kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
668 kCpumMsrRdFn_AmdFam10hIbsCtl,
669 kCpumMsrRdFn_AmdFam14hIbsBrTarget,
670
671 kCpumMsrRdFn_Gim,
672
673 /** End of valid MSR read function indexes. */
674 kCpumMsrRdFn_End
675} CPUMMSRRDFN;
676
677/**
678 * MSR write functions.
679 */
680typedef enum CPUMMSRWRFN
681{
682 /** Invalid zero value. */
683 kCpumMsrWrFn_Invalid = 0,
684 /** Writes are ignored, the fWrGpMask is observed though. */
685 kCpumMsrWrFn_IgnoreWrite,
686 /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
687 kCpumMsrWrFn_ReadOnly,
688 /** Alias to the MSR range starting at the MSR given by
689 * CPUMMSRRANGE::uValue. Must be used in pair with
690 * kCpumMsrRdFn_MsrAlias. */
691 kCpumMsrWrFn_MsrAlias,
692
693 kCpumMsrWrFn_Ia32P5McAddr,
694 kCpumMsrWrFn_Ia32P5McType,
695 kCpumMsrWrFn_Ia32TimestampCounter,
696 kCpumMsrWrFn_Ia32ApicBase,
697 kCpumMsrWrFn_Ia32FeatureControl,
698 kCpumMsrWrFn_Ia32BiosSignId,
699 kCpumMsrWrFn_Ia32BiosUpdateTrigger,
700 kCpumMsrWrFn_Ia32SmmMonitorCtl,
701 kCpumMsrWrFn_Ia32PmcN,
702 kCpumMsrWrFn_Ia32MonitorFilterLineSize,
703 kCpumMsrWrFn_Ia32MPerf,
704 kCpumMsrWrFn_Ia32APerf,
705 kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
706 kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
707 kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
708 kCpumMsrWrFn_Ia32MtrrDefType,
709 kCpumMsrWrFn_Ia32Pat,
710 kCpumMsrWrFn_Ia32SysEnterCs,
711 kCpumMsrWrFn_Ia32SysEnterEsp,
712 kCpumMsrWrFn_Ia32SysEnterEip,
713 kCpumMsrWrFn_Ia32McgStatus,
714 kCpumMsrWrFn_Ia32McgCtl,
715 kCpumMsrWrFn_Ia32DebugCtl,
716 kCpumMsrWrFn_Ia32SmrrPhysBase,
717 kCpumMsrWrFn_Ia32SmrrPhysMask,
718 kCpumMsrWrFn_Ia32PlatformDcaCap,
719 kCpumMsrWrFn_Ia32Dca0Cap,
720 kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
721 kCpumMsrWrFn_Ia32PerfStatus,
722 kCpumMsrWrFn_Ia32PerfCtl,
723 kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
724 kCpumMsrWrFn_Ia32PerfCapabilities,
725 kCpumMsrWrFn_Ia32FixedCtrCtrl,
726 kCpumMsrWrFn_Ia32PerfGlobalStatus,
727 kCpumMsrWrFn_Ia32PerfGlobalCtrl,
728 kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
729 kCpumMsrWrFn_Ia32PebsEnable,
730 kCpumMsrWrFn_Ia32ClockModulation,
731 kCpumMsrWrFn_Ia32ThermInterrupt,
732 kCpumMsrWrFn_Ia32ThermStatus,
733 kCpumMsrWrFn_Ia32Therm2Ctl,
734 kCpumMsrWrFn_Ia32MiscEnable,
735 kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
736 kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */
737 kCpumMsrWrFn_Ia32DsArea,
738 kCpumMsrWrFn_Ia32TscDeadline,
739 kCpumMsrWrFn_Ia32X2ApicN,
740 kCpumMsrWrFn_Ia32DebugInterface,
741 kCpumMsrWrFn_Ia32SpecCtrl,
742 kCpumMsrWrFn_Ia32PredCmd,
743 kCpumMsrWrFn_Ia32FlushCmd,
744
745 kCpumMsrWrFn_Amd64Efer,
746 kCpumMsrWrFn_Amd64SyscallTarget,
747 kCpumMsrWrFn_Amd64LongSyscallTarget,
748 kCpumMsrWrFn_Amd64CompSyscallTarget,
749 kCpumMsrWrFn_Amd64SyscallFlagMask,
750 kCpumMsrWrFn_Amd64FsBase,
751 kCpumMsrWrFn_Amd64GsBase,
752 kCpumMsrWrFn_Amd64KernelGsBase,
753 kCpumMsrWrFn_Amd64TscAux,
754 kCpumMsrWrFn_IntelEblCrPowerOn,
755 kCpumMsrWrFn_IntelP4EbcHardPowerOn,
756 kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
757 kCpumMsrWrFn_IntelP4EbcFrequencyId,
758 kCpumMsrWrFn_IntelFlexRatio,
759 kCpumMsrWrFn_IntelPkgCStConfigControl,
760 kCpumMsrWrFn_IntelPmgIoCaptureBase,
761 kCpumMsrWrFn_IntelLastBranchFromToN,
762 kCpumMsrWrFn_IntelLastBranchFromN,
763 kCpumMsrWrFn_IntelLastBranchToN,
764 kCpumMsrWrFn_IntelLastBranchTos,
765 kCpumMsrWrFn_IntelBblCrCtl,
766 kCpumMsrWrFn_IntelBblCrCtl3,
767 kCpumMsrWrFn_IntelI7TemperatureTarget,
768 kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
769 kCpumMsrWrFn_IntelI7MiscPwrMgmt,
770 kCpumMsrWrFn_IntelP6CrN,
771 kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
772 kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
773 kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
774 kCpumMsrWrFn_IntelI7SandyAesNiCtl,
775 kCpumMsrWrFn_IntelI7TurboRatioLimit,
776 kCpumMsrWrFn_IntelI7LbrSelect,
777 kCpumMsrWrFn_IntelI7SandyErrorControl,
778 kCpumMsrWrFn_IntelI7PowerCtl,
779 kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
780 kCpumMsrWrFn_IntelI7PebsLdLat,
781 kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
782 kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
783 kCpumMsrWrFn_IntelI7SandyRaplPowerUnit, /**< R/O but found writable bits on a Silvermont CPU here. */
784 kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
785 kCpumMsrWrFn_IntelI7SandyPkgC2Residency, /**< R/O but found writable bits on a Silvermont CPU here. */
786 kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
787 kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
788 kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
789 kCpumMsrWrFn_IntelI7RaplPp0Policy,
790 kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
791 kCpumMsrWrFn_IntelI7RaplPp1Policy,
792 kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
793 kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
794 kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
795 kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
796 kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
797 kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
798 kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
799 kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
800 kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
801 kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
802 kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
803 kCpumMsrWrFn_IntelCore1ExtConfig,
804 kCpumMsrWrFn_IntelCore1DtsCalControl,
805 kCpumMsrWrFn_IntelCore2PeciControl,
806
807 kCpumMsrWrFn_P6LastIntFromIp,
808 kCpumMsrWrFn_P6LastIntToIp,
809
810 kCpumMsrWrFn_AmdFam15hTscRate,
811 kCpumMsrWrFn_AmdFam15hLwpCfg,
812 kCpumMsrWrFn_AmdFam15hLwpCbAddr,
813 kCpumMsrWrFn_AmdFam10hMc4MiscN,
814 kCpumMsrWrFn_AmdK8PerfCtlN,
815 kCpumMsrWrFn_AmdK8PerfCtrN,
816 kCpumMsrWrFn_AmdK8SysCfg,
817 kCpumMsrWrFn_AmdK8HwCr,
818 kCpumMsrWrFn_AmdK8IorrBaseN,
819 kCpumMsrWrFn_AmdK8IorrMaskN,
820 kCpumMsrWrFn_AmdK8TopOfMemN,
821 kCpumMsrWrFn_AmdK8NbCfg1,
822 kCpumMsrWrFn_AmdK8McXcptRedir,
823 kCpumMsrWrFn_AmdK8CpuNameN,
824 kCpumMsrWrFn_AmdK8HwThermalCtrl,
825 kCpumMsrWrFn_AmdK8SwThermalCtrl,
826 kCpumMsrWrFn_AmdK8FidVidControl,
827 kCpumMsrWrFn_AmdK8McCtlMaskN,
828 kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
829 kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
830 kCpumMsrWrFn_AmdK8IntPendingMessage,
831 kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
832 kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
833 kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
834 kCpumMsrWrFn_AmdFam10hPStateControl,
835 kCpumMsrWrFn_AmdFam10hPStateStatus,
836 kCpumMsrWrFn_AmdFam10hPStateN,
837 kCpumMsrWrFn_AmdFam10hCofVidControl,
838 kCpumMsrWrFn_AmdFam10hCofVidStatus,
839 kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
840 kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
841 kCpumMsrWrFn_AmdK8SmmBase,
842 kCpumMsrWrFn_AmdK8SmmAddr,
843 kCpumMsrWrFn_AmdK8SmmMask,
844 kCpumMsrWrFn_AmdK8VmCr,
845 kCpumMsrWrFn_AmdK8IgnNe,
846 kCpumMsrWrFn_AmdK8SmmCtl,
847 kCpumMsrWrFn_AmdK8VmHSavePa,
848 kCpumMsrWrFn_AmdFam10hVmLockKey,
849 kCpumMsrWrFn_AmdFam10hSmmLockKey,
850 kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
851 kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
852 kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
853 kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
854 kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
855 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
856 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
857 kCpumMsrWrFn_AmdK7MicrocodeCtl,
858 kCpumMsrWrFn_AmdK7ClusterIdMaybe,
859 kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
860 kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
861 kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
862 kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
863 kCpumMsrWrFn_AmdK8PatchLoader,
864 kCpumMsrWrFn_AmdK7DebugStatusMaybe,
865 kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
866 kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
867 kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
868 kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
869 kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
870 kCpumMsrWrFn_AmdK7NodeId,
871 kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
872 kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
873 kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
874 kCpumMsrWrFn_AmdK7LoadStoreCfg,
875 kCpumMsrWrFn_AmdK7InstrCacheCfg,
876 kCpumMsrWrFn_AmdK7DataCacheCfg,
877 kCpumMsrWrFn_AmdK7BusUnitCfg,
878 kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
879 kCpumMsrWrFn_AmdFam15hFpuCfg,
880 kCpumMsrWrFn_AmdFam15hDecoderCfg,
881 kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
882 kCpumMsrWrFn_AmdFam15hCombUnitCfg,
883 kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
884 kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
885 kCpumMsrWrFn_AmdFam15hExecUnitCfg,
886 kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
887 kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
888 kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
889 kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
890 kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
891 kCpumMsrWrFn_AmdFam10hIbsOpRip,
892 kCpumMsrWrFn_AmdFam10hIbsOpData,
893 kCpumMsrWrFn_AmdFam10hIbsOpData2,
894 kCpumMsrWrFn_AmdFam10hIbsOpData3,
895 kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
896 kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
897 kCpumMsrWrFn_AmdFam10hIbsCtl,
898 kCpumMsrWrFn_AmdFam14hIbsBrTarget,
899
900 kCpumMsrWrFn_Gim,
901
902 /** End of valid MSR write function indexes. */
903 kCpumMsrWrFn_End
904} CPUMMSRWRFN;
905
906/**
907 * MSR range.
908 */
909typedef struct CPUMMSRRANGE
910{
911 /** The first MSR. [0] */
912 uint32_t uFirst;
913 /** The last MSR. [4] */
914 uint32_t uLast;
915 /** The read function (CPUMMSRRDFN). [8] */
916 uint16_t enmRdFn;
917 /** The write function (CPUMMSRWRFN). [10] */
918 uint16_t enmWrFn;
919 /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
920 * UINT16_MAX if not used by the read and write functions. [12] */
921 uint16_t offCpumCpu;
922 /** Reserved for future hacks. [14] */
923 uint16_t fReserved;
924 /** The init/read value. [16]
925 * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
926 * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
927 * offset into CPUM. */
928 uint64_t uValue;
929 /** The bits to ignore when writing. [24] */
930 uint64_t fWrIgnMask;
931 /** The bits that will cause a GP(0) when writing. [32]
932 * This is always checked prior to calling the write function. Using
933 * UINT64_MAX effectively marks the MSR as read-only. */
934 uint64_t fWrGpMask;
935 /** The register name, if applicable. [40] */
936 char szName[56];
937
938#ifdef VBOX_WITH_STATISTICS
939 /** The number of reads. */
940 STAMCOUNTER cReads;
941 /** The number of writes. */
942 STAMCOUNTER cWrites;
943 /** The number of times ignored bits were written. */
944 STAMCOUNTER cIgnoredBits;
945 /** The number of GPs generated. */
946 STAMCOUNTER cGps;
947#endif
948} CPUMMSRRANGE;
949#ifndef VBOX_FOR_DTRACE_LIB
950# ifdef VBOX_WITH_STATISTICS
951AssertCompileSize(CPUMMSRRANGE, 128);
952# else
953AssertCompileSize(CPUMMSRRANGE, 96);
954# endif
955#endif
956/** Pointer to an MSR range. */
957typedef CPUMMSRRANGE *PCPUMMSRRANGE;
958/** Pointer to a const MSR range. */
959typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
960
961
962/**
963 * MSRs.
964 * MSRs which are required while exploding features.
965 */
966typedef struct CPUMMSRS
967{
968 union
969 {
970 VMXMSRS vmx;
971 SVMMSRS svm;
972 } hwvirt;
973} CPUMMSRS;
974/** Pointer to an CPUMMSRS struct. */
975typedef CPUMMSRS *PCPUMMSRS;
976/** Pointer to a const CPUMMSRS struct. */
977typedef CPUMMSRS const *PCCPUMMSRS;
978
979
980/**
981 * CPU features and quirks.
982 * This is mostly exploded CPUID info.
983 */
984typedef struct CPUMFEATURES
985{
986 /** The CPU vendor (CPUMCPUVENDOR). */
987 uint8_t enmCpuVendor;
988 /** The CPU family. */
989 uint8_t uFamily;
990 /** The CPU model. */
991 uint8_t uModel;
992 /** The CPU stepping. */
993 uint8_t uStepping;
994 /** The microarchitecture. */
995#ifndef VBOX_FOR_DTRACE_LIB
996 CPUMMICROARCH enmMicroarch;
997#else
998 uint32_t enmMicroarch;
999#endif
1000 /** The maximum physical address width of the CPU. */
1001 uint8_t cMaxPhysAddrWidth;
1002 /** The maximum linear address width of the CPU. */
1003 uint8_t cMaxLinearAddrWidth;
1004 /** Max size of the extended state (or FPU state if no XSAVE). */
1005 uint16_t cbMaxExtendedState;
1006
1007 /** Supports MSRs. */
1008 uint32_t fMsr : 1;
1009 /** Supports the page size extension (4/2 MB pages). */
1010 uint32_t fPse : 1;
1011 /** Supports 36-bit page size extension (4 MB pages can map memory above
1012 * 4GB). */
1013 uint32_t fPse36 : 1;
1014 /** Supports physical address extension (PAE). */
1015 uint32_t fPae : 1;
1016 /** Page attribute table (PAT) support (page level cache control). */
1017 uint32_t fPat : 1;
1018 /** Supports the FXSAVE and FXRSTOR instructions. */
1019 uint32_t fFxSaveRstor : 1;
1020 /** Supports the XSAVE and XRSTOR instructions. */
1021 uint32_t fXSaveRstor : 1;
1022 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
1023 uint32_t fOpSysXSaveRstor : 1;
1024 /** Supports MMX. */
1025 uint32_t fMmx : 1;
1026 /** Supports AMD extensions to MMX instructions. */
1027 uint32_t fAmdMmxExts : 1;
1028 /** Supports SSE. */
1029 uint32_t fSse : 1;
1030 /** Supports SSE2. */
1031 uint32_t fSse2 : 1;
1032 /** Supports SSE3. */
1033 uint32_t fSse3 : 1;
1034 /** Supports SSSE3. */
1035 uint32_t fSsse3 : 1;
1036 /** Supports SSE4.1. */
1037 uint32_t fSse41 : 1;
1038 /** Supports SSE4.2. */
1039 uint32_t fSse42 : 1;
1040 /** Supports AVX. */
1041 uint32_t fAvx : 1;
1042 /** Supports AVX2. */
1043 uint32_t fAvx2 : 1;
1044 /** Supports AVX512 foundation. */
1045 uint32_t fAvx512Foundation : 1;
1046 /** Supports RDTSC. */
1047 uint32_t fTsc : 1;
1048 /** Intel SYSENTER/SYSEXIT support */
1049 uint32_t fSysEnter : 1;
1050 /** First generation APIC. */
1051 uint32_t fApic : 1;
1052 /** Second generation APIC. */
1053 uint32_t fX2Apic : 1;
1054 /** Hypervisor present. */
1055 uint32_t fHypervisorPresent : 1;
1056 /** MWAIT & MONITOR instructions supported. */
1057 uint32_t fMonitorMWait : 1;
1058 /** MWAIT Extensions present. */
1059 uint32_t fMWaitExtensions : 1;
1060 /** Supports CMPXCHG16B in 64-bit mode. */
1061 uint32_t fMovCmpXchg16b : 1;
1062 /** Supports CLFLUSH. */
1063 uint32_t fClFlush : 1;
1064 /** Supports CLFLUSHOPT. */
1065 uint32_t fClFlushOpt : 1;
1066 /** Supports IA32_PRED_CMD.IBPB. */
1067 uint32_t fIbpb : 1;
1068 /** Supports IA32_SPEC_CTRL.IBRS. */
1069 uint32_t fIbrs : 1;
1070 /** Supports IA32_SPEC_CTRL.STIBP. */
1071 uint32_t fStibp : 1;
1072 /** Supports IA32_FLUSH_CMD. */
1073 uint32_t fFlushCmd : 1;
1074 /** Supports IA32_ARCH_CAP. */
1075 uint32_t fArchCap : 1;
1076 /** Supports MD_CLEAR functionality (VERW, IA32_FLUSH_CMD). */
1077 uint32_t fMdsClear : 1;
1078 /** Supports PCID. */
1079 uint32_t fPcid : 1;
1080 /** Supports INVPCID. */
1081 uint32_t fInvpcid : 1;
1082 /** Supports read/write FSGSBASE instructions. */
1083 uint32_t fFsGsBase : 1;
1084
1085 /** Supports AMD 3DNow instructions. */
1086 uint32_t f3DNow : 1;
1087 /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
1088 uint32_t f3DNowPrefetch : 1;
1089
1090 /** AMD64: Supports long mode. */
1091 uint32_t fLongMode : 1;
1092 /** AMD64: SYSCALL/SYSRET support. */
1093 uint32_t fSysCall : 1;
1094 /** AMD64: No-execute page table bit. */
1095 uint32_t fNoExecute : 1;
1096 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
1097 uint32_t fLahfSahf : 1;
1098 /** AMD64: Supports RDTSCP. */
1099 uint32_t fRdTscP : 1;
1100 /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
1101 uint32_t fMovCr8In32Bit : 1;
1102 /** AMD64: Supports XOP (similar to VEX3/AVX). */
1103 uint32_t fXop : 1;
1104
1105 /** Indicates that FPU instruction and data pointers may leak.
1106 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
1107 * is only saved and restored if an exception is pending. */
1108 uint32_t fLeakyFxSR : 1;
1109
1110 /** AMD64: Supports AMD SVM. */
1111 uint32_t fSvm : 1;
1112
1113 /** Support for Intel VMX. */
1114 uint32_t fVmx : 1;
1115
1116 /** Indicates that speculative execution control CPUID bits and MSRs are exposed.
1117 * The details are different for Intel and AMD but both have similar
1118 * functionality. */
1119 uint32_t fSpeculationControl : 1;
1120
1121 /** MSR_IA32_ARCH_CAPABILITIES: RDCL_NO (bit 0).
1122 * @remarks Only safe use after CPUM ring-0 init! */
1123 uint32_t fArchRdclNo : 1;
1124 /** MSR_IA32_ARCH_CAPABILITIES: IBRS_ALL (bit 1).
1125 * @remarks Only safe use after CPUM ring-0 init! */
1126 uint32_t fArchIbrsAll : 1;
1127 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 2).
1128 * @remarks Only safe use after CPUM ring-0 init! */
1129 uint32_t fArchRsbOverride : 1;
1130 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 3).
1131 * @remarks Only safe use after CPUM ring-0 init! */
1132 uint32_t fArchVmmNeedNotFlushL1d : 1;
1133 /** MSR_IA32_ARCH_CAPABILITIES: MDS_NO (bit 4).
1134 * @remarks Only safe use after CPUM ring-0 init! */
1135 uint32_t fArchMdsNo : 1;
1136
1137 /** Alignment padding / reserved for future use. */
1138 uint32_t fPadding : 8;
1139
1140 /** SVM: Supports Nested-paging. */
1141 uint32_t fSvmNestedPaging : 1;
1142 /** SVM: Support LBR (Last Branch Record) virtualization. */
1143 uint32_t fSvmLbrVirt : 1;
1144 /** SVM: Supports SVM lock. */
1145 uint32_t fSvmSvmLock : 1;
1146 /** SVM: Supports Next RIP save. */
1147 uint32_t fSvmNextRipSave : 1;
1148 /** SVM: Supports TSC rate MSR. */
1149 uint32_t fSvmTscRateMsr : 1;
1150 /** SVM: Supports VMCB clean bits. */
1151 uint32_t fSvmVmcbClean : 1;
1152 /** SVM: Supports Flush-by-ASID. */
1153 uint32_t fSvmFlusbByAsid : 1;
1154 /** SVM: Supports decode assist. */
1155 uint32_t fSvmDecodeAssists : 1;
1156 /** SVM: Supports Pause filter. */
1157 uint32_t fSvmPauseFilter : 1;
1158 /** SVM: Supports Pause filter threshold. */
1159 uint32_t fSvmPauseFilterThreshold : 1;
1160 /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
1161 uint32_t fSvmAvic : 1;
1162 /** SVM: Supports Virtualized VMSAVE/VMLOAD. */
1163 uint32_t fSvmVirtVmsaveVmload : 1;
1164 /** SVM: Supports VGIF (Virtual Global Interrupt Flag). */
1165 uint32_t fSvmVGif : 1;
1166 /** SVM: Padding / reserved for future features. */
1167 uint32_t fSvmPadding0 : 19;
1168 /** SVM: Maximum supported ASID. */
1169 uint32_t uSvmMaxAsid;
1170
1171 /** VMX: Maximum physical address width. */
1172 uint8_t cVmxMaxPhysAddrWidth;
1173 /** VMX: Padding / reserved for future. */
1174 uint8_t abVmxPadding[3];
1175 /** VMX: Padding / reserved for future. */
1176 uint32_t fVmxPadding0;
1177
1178 /** @name VMX basic controls.
1179 * @{ */
1180 /** VMX: Supports INS/OUTS VM-exit instruction info. */
1181 uint32_t fVmxInsOutInfo : 1;
1182 /** @} */
1183
1184 /** @name VMX Pin-based controls.
1185 * @{ */
1186 /** VMX: Supports external interrupt VM-exit. */
1187 uint32_t fVmxExtIntExit : 1;
1188 /** VMX: Supports NMI VM-exit. */
1189 uint32_t fVmxNmiExit : 1;
1190 /** VMX: Supports Virtual NMIs. */
1191 uint32_t fVmxVirtNmi : 1;
1192 /** VMX: Supports preemption timer. */
1193 uint32_t fVmxPreemptTimer : 1;
1194 /** VMX: Supports posted interrupts. */
1195 uint32_t fVmxPostedInt : 1;
1196 /** @} */
1197
1198 /** @name VMX Processor-based controls.
1199 * @{ */
1200 /** VMX: Supports Interrupt-window exiting. */
1201 uint32_t fVmxIntWindowExit : 1;
1202 /** VMX: Supports TSC offsetting. */
1203 uint32_t fVmxTscOffsetting : 1;
1204 /** VMX: Supports HLT exiting. */
1205 uint32_t fVmxHltExit : 1;
1206 /** VMX: Supports INVLPG exiting. */
1207 uint32_t fVmxInvlpgExit : 1;
1208 /** VMX: Supports MWAIT exiting. */
1209 uint32_t fVmxMwaitExit : 1;
1210 /** VMX: Supports RDPMC exiting. */
1211 uint32_t fVmxRdpmcExit : 1;
1212 /** VMX: Supports RDTSC exiting. */
1213 uint32_t fVmxRdtscExit : 1;
1214 /** VMX: Supports CR3-load exiting. */
1215 uint32_t fVmxCr3LoadExit : 1;
1216 /** VMX: Supports CR3-store exiting. */
1217 uint32_t fVmxCr3StoreExit : 1;
1218 /** VMX: Supports CR8-load exiting. */
1219 uint32_t fVmxCr8LoadExit : 1;
1220 /** VMX: Supports CR8-store exiting. */
1221 uint32_t fVmxCr8StoreExit : 1;
1222 /** VMX: Supports TPR shadow. */
1223 uint32_t fVmxUseTprShadow : 1;
1224 /** VMX: Supports NMI-window exiting. */
1225 uint32_t fVmxNmiWindowExit : 1;
1226 /** VMX: Supports Mov-DRx exiting. */
1227 uint32_t fVmxMovDRxExit : 1;
1228 /** VMX: Supports Unconditional I/O exiting. */
1229 uint32_t fVmxUncondIoExit : 1;
1230 /** VMX: Supportgs I/O bitmaps. */
1231 uint32_t fVmxUseIoBitmaps : 1;
1232 /** VMX: Supports Monitor Trap Flag. */
1233 uint32_t fVmxMonitorTrapFlag : 1;
1234 /** VMX: Supports MSR bitmap. */
1235 uint32_t fVmxUseMsrBitmaps : 1;
1236 /** VMX: Supports MONITOR exiting. */
1237 uint32_t fVmxMonitorExit : 1;
1238 /** VMX: Supports PAUSE exiting. */
1239 uint32_t fVmxPauseExit : 1;
1240 /** VMX: Supports secondary processor-based VM-execution controls. */
1241 uint32_t fVmxSecondaryExecCtls : 1;
1242 /** @} */
1243
1244 /** @name VMX Secondary processor-based controls.
1245 * @{ */
1246 /** VMX: Supports virtualize-APIC access. */
1247 uint32_t fVmxVirtApicAccess : 1;
1248 /** VMX: Supports EPT (Extended Page Tables). */
1249 uint32_t fVmxEpt : 1;
1250 /** VMX: Supports descriptor-table exiting. */
1251 uint32_t fVmxDescTableExit : 1;
1252 /** VMX: Supports RDTSCP. */
1253 uint32_t fVmxRdtscp : 1;
1254 /** VMX: Supports virtualize-x2APIC mode. */
1255 uint32_t fVmxVirtX2ApicMode : 1;
1256 /** VMX: Supports VPID. */
1257 uint32_t fVmxVpid : 1;
1258 /** VMX: Supports WBIND exiting. */
1259 uint32_t fVmxWbinvdExit : 1;
1260 /** VMX: Supports Unrestricted guest. */
1261 uint32_t fVmxUnrestrictedGuest : 1;
1262 /** VMX: Supports APIC-register virtualization. */
1263 uint32_t fVmxApicRegVirt : 1;
1264 /** VMX: Supports virtual-interrupt delivery. */
1265 uint32_t fVmxVirtIntDelivery : 1;
1266 /** VMX: Supports Pause-loop exiting. */
1267 uint32_t fVmxPauseLoopExit : 1;
1268 /** VMX: Supports RDRAND exiting. */
1269 uint32_t fVmxRdrandExit : 1;
1270 /** VMX: Supports INVPCID. */
1271 uint32_t fVmxInvpcid : 1;
1272 /** VMX: Supports VM functions. */
1273 uint32_t fVmxVmFunc : 1;
1274 /** VMX: Supports VMCS shadowing. */
1275 uint32_t fVmxVmcsShadowing : 1;
1276 /** VMX: Supports RDSEED exiting. */
1277 uint32_t fVmxRdseedExit : 1;
1278 /** VMX: Supports PML. */
1279 uint32_t fVmxPml : 1;
1280 /** VMX: Supports EPT-violations \#VE. */
1281 uint32_t fVmxEptXcptVe : 1;
1282 /** VMX: Supports XSAVES/XRSTORS. */
1283 uint32_t fVmxXsavesXrstors : 1;
1284 /** VMX: Supports TSC scaling. */
1285 uint32_t fVmxUseTscScaling : 1;
1286 /** @} */
1287
1288 /** @name VMX VM-entry controls.
1289 * @{ */
1290 /** VMX: Supports load-debug controls on VM-entry. */
1291 uint32_t fVmxEntryLoadDebugCtls : 1;
1292 /** VMX: Supports IA32e mode guest. */
1293 uint32_t fVmxIa32eModeGuest : 1;
1294 /** VMX: Supports load guest EFER MSR on VM-entry. */
1295 uint32_t fVmxEntryLoadEferMsr : 1;
1296 /** VMX: Supports load guest PAT MSR on VM-entry. */
1297 uint32_t fVmxEntryLoadPatMsr : 1;
1298 /** @} */
1299
1300 /** @name VMX VM-exit controls.
1301 * @{ */
1302 /** VMX: Supports save debug controls on VM-exit. */
1303 uint32_t fVmxExitSaveDebugCtls : 1;
1304 /** VMX: Supports host-address space size. */
1305 uint32_t fVmxHostAddrSpaceSize : 1;
1306 /** VMX: Supports acknowledge external interrupt on VM-exit. */
1307 uint32_t fVmxExitAckExtInt : 1;
1308 /** VMX: Supports save guest PAT MSR on VM-exit. */
1309 uint32_t fVmxExitSavePatMsr : 1;
1310 /** VMX: Supports load hsot PAT MSR on VM-exit. */
1311 uint32_t fVmxExitLoadPatMsr : 1;
1312 /** VMX: Supports save guest EFER MSR on VM-exit. */
1313 uint32_t fVmxExitSaveEferMsr : 1;
1314 /** VMX: Supports load host EFER MSR on VM-exit. */
1315 uint32_t fVmxExitLoadEferMsr : 1;
1316 /** VMX: Supports save VMX preemption timer on VM-exit. */
1317 uint32_t fVmxSavePreemptTimer : 1;
1318 /** @} */
1319
1320 /** @name VMX Miscellaneous data.
1321 * @{ */
1322 /** VMX: Supports storing EFER.LMA into IA32e-mode guest field on VM-exit. */
1323 uint32_t fVmxExitSaveEferLma : 1;
1324 /** VMX: Whether Intel PT (Processor Trace) is supported in VMX mode or not. */
1325 uint32_t fVmxIntelPt : 1;
1326 /** VMX: Supports VMWRITE to any valid VMCS field incl. read-only fields, otherwise
1327 * VMWRITE cannot modify read-only VM-exit information fields. */
1328 uint32_t fVmxVmwriteAll : 1;
1329 /** VMX: Supports injection of software interrupts, ICEBP on VM-entry for zero
1330 * length instructions. */
1331 uint32_t fVmxEntryInjectSoftInt : 1;
1332 /** @} */
1333
1334 /** VMX: Padding / reserved for future features. */
1335 uint32_t fVmxPadding1 : 1;
1336 uint32_t fVmxPadding2;
1337} CPUMFEATURES;
1338#ifndef VBOX_FOR_DTRACE_LIB
1339AssertCompileSize(CPUMFEATURES, 48);
1340#endif
1341/** Pointer to a CPU feature structure. */
1342typedef CPUMFEATURES *PCPUMFEATURES;
1343/** Pointer to a const CPU feature structure. */
1344typedef CPUMFEATURES const *PCCPUMFEATURES;
1345
1346
1347#ifndef VBOX_FOR_DTRACE_LIB
1348
1349/** @name Guest Register Getters.
1350 * @{ */
1351VMMDECL(void) CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR);
1352VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit);
1353VMMDECL(RTSEL) CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden);
1354VMMDECL(RTSEL) CPUMGetGuestLDTR(PCVMCPU pVCpu);
1355VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
1356VMMDECL(uint64_t) CPUMGetGuestCR0(PCVMCPU pVCpu);
1357VMMDECL(uint64_t) CPUMGetGuestCR2(PCVMCPU pVCpu);
1358VMMDECL(uint64_t) CPUMGetGuestCR3(PCVMCPU pVCpu);
1359VMMDECL(uint64_t) CPUMGetGuestCR4(PCVMCPU pVCpu);
1360VMMDECL(uint64_t) CPUMGetGuestCR8(PCVMCPU pVCpu);
1361VMMDECL(int) CPUMGetGuestCRx(PCVMCPU pVCpu, unsigned iReg, uint64_t *pValue);
1362VMMDECL(uint32_t) CPUMGetGuestEFlags(PCVMCPU pVCpu);
1363VMMDECL(uint32_t) CPUMGetGuestEIP(PCVMCPU pVCpu);
1364VMMDECL(uint64_t) CPUMGetGuestRIP(PCVMCPU pVCpu);
1365VMMDECL(uint32_t) CPUMGetGuestEAX(PCVMCPU pVCpu);
1366VMMDECL(uint32_t) CPUMGetGuestEBX(PCVMCPU pVCpu);
1367VMMDECL(uint32_t) CPUMGetGuestECX(PCVMCPU pVCpu);
1368VMMDECL(uint32_t) CPUMGetGuestEDX(PCVMCPU pVCpu);
1369VMMDECL(uint32_t) CPUMGetGuestESI(PCVMCPU pVCpu);
1370VMMDECL(uint32_t) CPUMGetGuestEDI(PCVMCPU pVCpu);
1371VMMDECL(uint32_t) CPUMGetGuestESP(PCVMCPU pVCpu);
1372VMMDECL(uint32_t) CPUMGetGuestEBP(PCVMCPU pVCpu);
1373VMMDECL(RTSEL) CPUMGetGuestCS(PCVMCPU pVCpu);
1374VMMDECL(RTSEL) CPUMGetGuestDS(PCVMCPU pVCpu);
1375VMMDECL(RTSEL) CPUMGetGuestES(PCVMCPU pVCpu);
1376VMMDECL(RTSEL) CPUMGetGuestFS(PCVMCPU pVCpu);
1377VMMDECL(RTSEL) CPUMGetGuestGS(PCVMCPU pVCpu);
1378VMMDECL(RTSEL) CPUMGetGuestSS(PCVMCPU pVCpu);
1379VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu);
1380VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu);
1381VMMDECL(uint64_t) CPUMGetGuestDR0(PCVMCPU pVCpu);
1382VMMDECL(uint64_t) CPUMGetGuestDR1(PCVMCPU pVCpu);
1383VMMDECL(uint64_t) CPUMGetGuestDR2(PCVMCPU pVCpu);
1384VMMDECL(uint64_t) CPUMGetGuestDR3(PCVMCPU pVCpu);
1385VMMDECL(uint64_t) CPUMGetGuestDR6(PCVMCPU pVCpu);
1386VMMDECL(uint64_t) CPUMGetGuestDR7(PCVMCPU pVCpu);
1387VMMDECL(int) CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
1388VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t iSubLeaf,
1389 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
1390VMMDECL(uint64_t) CPUMGetGuestEFER(PCVMCPU pVCpu);
1391VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PCVMCPU pVCpu);
1392VMM_INT_DECL(uint64_t) CPUMGetGuestIa32SmmMonitorCtl(PCVMCPU pVCpu);
1393VMMDECL(VBOXSTRICTRC) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue);
1394VMMDECL(VBOXSTRICTRC) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue);
1395VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM);
1396VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM);
1397/** @} */
1398
1399/** @name Guest Register Setters.
1400 * @{ */
1401VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1402VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1403VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
1404VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
1405VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0);
1406VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
1407VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
1408VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
1409VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0);
1410VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1);
1411VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2);
1412VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3);
1413VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
1414VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7);
1415VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value);
1416VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue);
1417VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
1418VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
1419VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
1420VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
1421VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
1422VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
1423VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
1424VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
1425VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
1426VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
1427VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
1428VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
1429VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
1430VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
1431VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
1432VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
1433VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
1434VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1435VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1436VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1437VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
1438VMMDECL(void) CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1439VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu);
1440VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg);
1441VMM_INT_DECL(void) CPUMSetGuestTscAux(PVMCPU pVCpu, uint64_t uValue);
1442VMM_INT_DECL(uint64_t) CPUMGetGuestTscAux(PVMCPU pVCpu);
1443VMM_INT_DECL(void) CPUMSetGuestSpecCtrl(PVMCPU pVCpu, uint64_t uValue);
1444VMM_INT_DECL(uint64_t) CPUMGetGuestSpecCtrl(PVMCPU pVCpu);
1445VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM);
1446/** @} */
1447
1448
1449/** @name Misc Guest Predicate Functions.
1450 * @{ */
1451VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
1452VMMDECL(bool) CPUMIsGuestNXEnabled(PCVMCPU pVCpu);
1453VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu);
1454VMMDECL(bool) CPUMIsGuestPagingEnabled(PCVMCPU pVCpu);
1455VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu);
1456VMMDECL(bool) CPUMIsGuestInRealMode(PCVMCPU pVCpu);
1457VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu);
1458VMMDECL(bool) CPUMIsGuestInProtectedMode(PCVMCPU pVCpu);
1459VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu);
1460VMMDECL(bool) CPUMIsGuestInLongMode(PCVMCPU pVCpu);
1461VMMDECL(bool) CPUMIsGuestInPAEMode(PCVMCPU pVCpu);
1462VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PCVMCPU pVCpu);
1463/** @} */
1464
1465/** @name Nested Hardware-Virtualization Helpers.
1466 * @{ */
1467VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu);
1468VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu);
1469VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks);
1470VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTicks);
1471
1472/* SVM helpers. */
1473VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1474VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
1475VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx);
1476VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx);
1477VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
1478VMM_INT_DECL(bool) CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1479 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
1480 PSVMIOIOEXITINFO pIoExitInfo);
1481VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
1482
1483/* VMX helpers. */
1484VMM_INT_DECL(bool) CPUMIsGuestVmxVmcsFieldValid(PVM pVM, uint64_t u64VmcsField);
1485VMM_INT_DECL(bool) CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess);
1486VMM_INT_DECL(bool) CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3);
1487VMM_INT_DECL(bool) CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc);
1488VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr);
1489VMM_INT_DECL(bool) CPUMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
1490 uint8_t cbAccess);
1491/** @} */
1492
1493/** @name Externalized State Helpers.
1494 * @{ */
1495/** @def CPUM_ASSERT_NOT_EXTRN
1496 * Macro for asserting that @a a_fNotExtrn are present.
1497 *
1498 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1499 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
1500 *
1501 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1502 */
1503#define CPUM_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
1504 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fNotExtrn)), \
1505 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
1506
1507/** @def CPUM_IMPORT_EXTRN_RET
1508 * Macro for making sure the state specified by @a fExtrnImport is present,
1509 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1510 *
1511 * Will return if CPUMImportGuestStateOnDemand() fails.
1512 *
1513 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1514 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1515 * @thread EMT(a_pVCpu)
1516 *
1517 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1518 */
1519#define CPUM_IMPORT_EXTRN_RET(a_pVCpu, a_fExtrnImport) \
1520 do { \
1521 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1522 { /* already present, consider this likely */ } \
1523 else \
1524 { \
1525 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1526 AssertRCReturn(rcCpumImport, rcCpumImport); \
1527 } \
1528 } while (0)
1529
1530/** @def CPUM_IMPORT_EXTRN_RCSTRICT
1531 * Macro for making sure the state specified by @a fExtrnImport is present,
1532 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1533 *
1534 * Will update a_rcStrict if CPUMImportGuestStateOnDemand() fails.
1535 *
1536 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1537 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1538 * @param a_rcStrict Strict status code variable to update on failure.
1539 * @thread EMT(a_pVCpu)
1540 *
1541 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1542 */
1543#define CPUM_IMPORT_EXTRN_RCSTRICT(a_pVCpu, a_fExtrnImport, a_rcStrict) \
1544 do { \
1545 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1546 { /* already present, consider this likely */ } \
1547 else \
1548 { \
1549 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1550 AssertStmt(RT_SUCCESS(rcCpumImport) || RT_FAILURE_NP(a_rcStrict), a_rcStrict = rcCpumImport); \
1551 } \
1552 } while (0)
1553
1554VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPU pVCpu, uint64_t fExtrnImport);
1555/** @} */
1556
1557#ifndef IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS
1558
1559/**
1560 * Gets valid CR0 bits for the guest.
1561 *
1562 * @returns Valid CR0 bits.
1563 */
1564DECLINLINE(uint64_t) CPUMGetGuestCR0ValidMask(void)
1565{
1566 return ( X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
1567 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
1568 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG);
1569}
1570
1571/**
1572 * Tests if the guest is running in real mode or not.
1573 *
1574 * @returns true if in real mode, otherwise false.
1575 * @param pCtx Current CPU context.
1576 */
1577DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCCPUMCTX pCtx)
1578{
1579 return !(pCtx->cr0 & X86_CR0_PE);
1580}
1581
1582/**
1583 * Tests if the guest is running in real or virtual 8086 mode.
1584 *
1585 * @returns @c true if it is, @c false if not.
1586 * @param pCtx Current CPU context.
1587 */
1588DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCCPUMCTX pCtx)
1589{
1590 return !(pCtx->cr0 & X86_CR0_PE)
1591 || pCtx->eflags.Bits.u1VM; /* Cannot be set in long mode. Intel spec 2.3.1 "System Flags and Fields in IA-32e Mode". */
1592}
1593
1594/**
1595 * Tests if the guest is running in virtual 8086 mode.
1596 *
1597 * @returns @c true if it is, @c false if not.
1598 * @param pCtx Current CPU context.
1599 */
1600DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCCPUMCTX pCtx)
1601{
1602 return (pCtx->eflags.Bits.u1VM == 1);
1603}
1604
1605/**
1606 * Tests if the guest is running in paged protected or not.
1607 *
1608 * @returns true if in paged protected mode, otherwise false.
1609 * @param pCtx Current CPU context.
1610 */
1611DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
1612{
1613 return (pCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1614}
1615
1616/**
1617 * Tests if the guest is running in long mode or not.
1618 *
1619 * @returns true if in long mode, otherwise false.
1620 * @param pCtx Current CPU context.
1621 */
1622DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCCPUMCTX pCtx)
1623{
1624 return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1625}
1626
1627VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx);
1628
1629/**
1630 * Tests if the guest is running in 64 bits mode or not.
1631 *
1632 * @returns true if in 64 bits protected mode, otherwise false.
1633 * @param pCtx Current CPU context.
1634 */
1635DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
1636{
1637 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
1638 return false;
1639 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
1640 return CPUMIsGuestIn64BitCodeSlow(pCtx);
1641 return pCtx->cs.Attr.n.u1Long;
1642}
1643
1644/**
1645 * Tests if the guest has paging enabled or not.
1646 *
1647 * @returns true if paging is enabled, otherwise false.
1648 * @param pCtx Current CPU context.
1649 */
1650DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCCPUMCTX pCtx)
1651{
1652 return !!(pCtx->cr0 & X86_CR0_PG);
1653}
1654
1655/**
1656 * Tests if the guest is running in PAE mode or not.
1657 *
1658 * @returns true if in PAE mode, otherwise false.
1659 * @param pCtx Current CPU context.
1660 */
1661DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCCPUMCTX pCtx)
1662{
1663 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1664 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1665 return ( (pCtx->cr4 & X86_CR4_PAE)
1666 && CPUMIsGuestPagingEnabledEx(pCtx)
1667 && !(pCtx->msrEFER & MSR_K6_EFER_LMA));
1668}
1669
1670/**
1671 * Tests if the guest has AMD SVM enabled or not.
1672 *
1673 * @returns true if SMV is enabled, otherwise false.
1674 * @param pCtx Current CPU context.
1675 */
1676DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCCPUMCTX pCtx)
1677{
1678 return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
1679}
1680
1681/**
1682 * Tests if the guest has Intel VT-x enabled or not.
1683 *
1684 * @returns true if VMX is enabled, otherwise false.
1685 * @param pCtx Current CPU context.
1686 */
1687DECLINLINE(bool) CPUMIsGuestVmxEnabled(PCCPUMCTX pCtx)
1688{
1689 return RT_BOOL(pCtx->cr4 & X86_CR4_VMXE);
1690}
1691
1692/**
1693 * Returns the guest's global-interrupt (GIF) flag.
1694 *
1695 * @returns true when global-interrupts are enabled, otherwise false.
1696 * @param pCtx Current CPU context.
1697 */
1698DECLINLINE(bool) CPUMGetGuestGif(PCCPUMCTX pCtx)
1699{
1700 return pCtx->hwvirt.fGif;
1701}
1702
1703/**
1704 * Sets the guest's global-interrupt flag (GIF).
1705 *
1706 * @param pCtx Current CPU context.
1707 * @param fGif The value to set.
1708 */
1709DECLINLINE(void) CPUMSetGuestGif(PCPUMCTX pCtx, bool fGif)
1710{
1711 pCtx->hwvirt.fGif = fGif;
1712}
1713
1714/**
1715 * Checks if we are executing inside an SVM nested hardware-virtualized guest.
1716 *
1717 * @returns @c true if in SVM nested-guest mode, @c false otherwise.
1718 * @param pCtx Current CPU context.
1719 */
1720DECLINLINE(bool) CPUMIsGuestInSvmNestedHwVirtMode(PCCPUMCTX pCtx)
1721{
1722 /*
1723 * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
1724 * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
1725 */
1726#ifndef IN_RC
1727 if ( pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM
1728 || !(pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
1729 return false;
1730 return true;
1731#else
1732 NOREF(pCtx);
1733 return false;
1734#endif
1735}
1736
1737/**
1738 * Checks if the guest is in VMX non-root operation.
1739 *
1740 * @returns @c true if in VMX non-root operation, @c false otherwise.
1741 * @param pCtx Current CPU context.
1742 */
1743DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx)
1744{
1745#ifndef IN_RC
1746 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1747 return false;
1748 Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
1749 return pCtx->hwvirt.vmx.fInVmxNonRootMode;
1750#else
1751 NOREF(pCtx);
1752 return false;
1753#endif
1754}
1755
1756/**
1757 * Checks if we are executing inside an SVM or VMX nested hardware-virtualized
1758 * guest.
1759 *
1760 * @returns @c true if in nested-guest mode, @c false otherwise.
1761 * @param pCtx Current CPU context.
1762 */
1763DECLINLINE(bool) CPUMIsGuestInNestedHwvirtMode(PCCPUMCTX pCtx)
1764{
1765 return CPUMIsGuestInVmxNonRootMode(pCtx) || CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
1766}
1767
1768/**
1769 * Checks if the guest is in VMX root operation.
1770 *
1771 * @returns @c true if in VMX root operation, @c false otherwise.
1772 * @param pCtx Current CPU context.
1773 */
1774DECLINLINE(bool) CPUMIsGuestInVmxRootMode(PCCPUMCTX pCtx)
1775{
1776#ifndef IN_RC
1777 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1778 return false;
1779 return pCtx->hwvirt.vmx.fInVmxRootMode;
1780#else
1781 NOREF(pCtx);
1782 return false;
1783#endif
1784}
1785
1786# ifndef IN_RC
1787
1788/**
1789 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
1790 * active.
1791 *
1792 * @returns @c true if in intercept is set, @c false otherwise.
1793 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1794 * @param pCtx Pointer to the context.
1795 * @param fIntercept The SVM control/instruction intercept, see
1796 * SVM_CTRL_INTERCEPT_*.
1797 */
1798DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fIntercept)
1799{
1800 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1801 return false;
1802 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1803 Assert(pVmcb);
1804 uint64_t u64Intercepts;
1805 if (!HMGetGuestSvmCtrlIntercepts(pVCpu, &u64Intercepts))
1806 u64Intercepts = pVmcb->ctrl.u64InterceptCtrl;
1807 return RT_BOOL(u64Intercepts & fIntercept);
1808}
1809
1810/**
1811 * Checks if the nested-guest VMCB has the specified CR read intercept active.
1812 *
1813 * @returns @c true if in intercept is set, @c false otherwise.
1814 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1815 * @param pCtx Pointer to the context.
1816 * @param uCr The CR register number (0 to 15).
1817 */
1818DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1819{
1820 Assert(uCr < 16);
1821 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1822 return false;
1823 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1824 Assert(pVmcb);
1825 uint16_t u16Intercepts;
1826 if (!HMGetGuestSvmReadCRxIntercepts(pVCpu, &u16Intercepts))
1827 u16Intercepts = pVmcb->ctrl.u16InterceptRdCRx;
1828 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
1829}
1830
1831/**
1832 * Checks if the nested-guest VMCB has the specified CR write intercept active.
1833 *
1834 * @returns @c true if in intercept is set, @c false otherwise.
1835 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1836 * @param pCtx Pointer to the context.
1837 * @param uCr The CR register number (0 to 15).
1838 */
1839DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1840{
1841 Assert(uCr < 16);
1842 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1843 return false;
1844 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1845 Assert(pVmcb);
1846 uint16_t u16Intercepts;
1847 if (!HMGetGuestSvmWriteCRxIntercepts(pVCpu, &u16Intercepts))
1848 u16Intercepts = pVmcb->ctrl.u16InterceptWrCRx;
1849 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
1850}
1851
1852/**
1853 * Checks if the nested-guest VMCB has the specified DR read intercept active.
1854 *
1855 * @returns @c true if in intercept is set, @c false otherwise.
1856 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1857 * @param pCtx Pointer to the context.
1858 * @param uDr The DR register number (0 to 15).
1859 */
1860DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1861{
1862 Assert(uDr < 16);
1863 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1864 return false;
1865 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1866 Assert(pVmcb);
1867 uint16_t u16Intercepts;
1868 if (!HMGetGuestSvmReadDRxIntercepts(pVCpu, &u16Intercepts))
1869 u16Intercepts = pVmcb->ctrl.u16InterceptRdDRx;
1870 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
1871}
1872
1873/**
1874 * Checks if the nested-guest VMCB has the specified DR write intercept active.
1875 *
1876 * @returns @c true if in intercept is set, @c false otherwise.
1877 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1878 * @param pCtx Pointer to the context.
1879 * @param uDr The DR register number (0 to 15).
1880 */
1881DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1882{
1883 Assert(uDr < 16);
1884 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1885 return false;
1886 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1887 Assert(pVmcb);
1888 uint16_t u16Intercepts;
1889 if (!HMGetGuestSvmWriteDRxIntercepts(pVCpu, &u16Intercepts))
1890 u16Intercepts = pVmcb->ctrl.u16InterceptWrDRx;
1891 return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
1892}
1893
1894/**
1895 * Checks if the nested-guest VMCB has the specified exception intercept active.
1896 *
1897 * @returns @c true if in intercept is active, @c false otherwise.
1898 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1899 * @param pCtx Pointer to the context.
1900 * @param uVector The exception / interrupt vector.
1901 */
1902DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
1903{
1904 Assert(uVector <= X86_XCPT_LAST);
1905 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1906 return false;
1907 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1908 Assert(pVmcb);
1909 uint32_t u32Intercepts;
1910 if (!HMGetGuestSvmXcptIntercepts(pVCpu, &u32Intercepts))
1911 u32Intercepts = pVmcb->ctrl.u32InterceptXcpt;
1912 return RT_BOOL(u32Intercepts & RT_BIT(uVector));
1913}
1914
1915/**
1916 * Checks if the nested-guest VMCB has virtual-interrupt masking enabled.
1917 *
1918 * @returns @c true if virtual-interrupts are masked, @c false otherwise.
1919 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1920 * @param pCtx Pointer to the context.
1921 *
1922 * @remarks Should only be called when SVM feature is exposed to the guest.
1923 */
1924DECLINLINE(bool) CPUMIsGuestSvmVirtIntrMasking(PCVMCPU pVCpu, PCCPUMCTX pCtx)
1925{
1926 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1927 return false;
1928 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1929 Assert(pVmcb);
1930 bool fVIntrMasking;
1931 if (!HMGetGuestSvmVirtIntrMasking(pVCpu, &fVIntrMasking))
1932 fVIntrMasking = pVmcb->ctrl.IntCtrl.n.u1VIntrMasking;
1933 return fVIntrMasking;
1934}
1935
1936/**
1937 * Checks if the nested-guest VMCB has nested-paging enabled.
1938 *
1939 * @returns @c true if nested-paging is enabled, @c false otherwise.
1940 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1941 * @param pCtx Pointer to the context.
1942 *
1943 * @remarks Should only be called when SVM feature is exposed to the guest.
1944 */
1945DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
1946{
1947 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1948 return false;
1949 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1950 Assert(pVmcb);
1951 bool fNestedPaging;
1952 if (!HMGetGuestSvmNestedPaging(pVCpu, &fNestedPaging))
1953 fNestedPaging = pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging;
1954 return fNestedPaging;
1955}
1956
1957/**
1958 * Gets the nested-guest VMCB pause-filter count.
1959 *
1960 * @returns The pause-filter count.
1961 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1962 * @param pCtx Pointer to the context.
1963 *
1964 * @remarks Should only be called when SVM feature is exposed to the guest.
1965 */
1966DECLINLINE(uint16_t) CPUMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu, PCCPUMCTX pCtx)
1967{
1968 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1969 return false;
1970 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1971 Assert(pVmcb);
1972 uint16_t u16PauseFilterCount;
1973 if (!HMGetGuestSvmPauseFilterCount(pVCpu, &u16PauseFilterCount))
1974 u16PauseFilterCount = pVmcb->ctrl.u16PauseFilterCount;
1975 return u16PauseFilterCount;
1976}
1977
1978/**
1979 * Updates the NextRIP (NRIP) field in the nested-guest VMCB.
1980 *
1981 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1982 * @param pCtx Pointer to the context.
1983 * @param cbInstr The length of the current instruction in bytes.
1984 *
1985 * @remarks Should only be called when SVM feature is exposed to the guest.
1986 */
1987DECLINLINE(void) CPUMGuestSvmUpdateNRip(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbInstr)
1988{
1989 RT_NOREF(pVCpu);
1990 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
1991 PSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1992 Assert(pVmcb);
1993 pVmcb->ctrl.u64NextRIP = pCtx->rip + cbInstr;
1994}
1995
1996/**
1997 * Checks whether one of the given Pin-based VM-execution controls are set when
1998 * executing a nested-guest.
1999 *
2000 * @returns @c true if set, @c false otherwise.
2001 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2002 * @param pCtx Pointer to the context.
2003 * @param uPinCtls The Pin-based VM-execution controls to check.
2004 *
2005 * @remarks This does not check if all given controls are set if more than one
2006 * control is passed in @a uPinCtl.
2007 */
2008DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uPinCtls)
2009{
2010 RT_NOREF(pVCpu);
2011 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2012 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2013 Assert(pVmcs);
2014 return RT_BOOL(pVmcs->u32PinCtls & uPinCtls);
2015}
2016
2017/**
2018 * Checks whether one of the given Processor-based VM-execution controls are set
2019 * when executing a nested-guest.
2020 *
2021 * @returns @c true if set, @c false otherwise.
2022 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2023 * @param pCtx Pointer to the context.
2024 * @param uProcCtls The Processor-based VM-execution controls to check.
2025 *
2026 * @remarks This does not check if all given controls are set if more than one
2027 * control is passed in @a uProcCtls.
2028 */
2029DECLINLINE(bool) CPUMIsGuestVmxProcCtlsSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uProcCtls)
2030{
2031 RT_NOREF(pVCpu);
2032 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2033 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2034 Assert(pVmcs);
2035 return RT_BOOL(pVmcs->u32ProcCtls & uProcCtls);
2036}
2037
2038/**
2039 * Checks whether one of the given Secondary Processor-based VM-execution controls
2040 * are set when executing a nested-guest.
2041 *
2042 * @returns @c true if set, @c false otherwise.
2043 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2044 * @param pCtx Pointer to the context.
2045 * @param uProcCtls2 The Secondary Processor-based VM-execution controls to
2046 * check.
2047 *
2048 * @remarks This does not check if all given controls are set if more than one
2049 * control is passed in @a uProcCtls2.
2050 */
2051DECLINLINE(bool) CPUMIsGuestVmxProcCtls2Set(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uProcCtls2)
2052{
2053 RT_NOREF(pVCpu);
2054 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2055 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2056 Assert(pVmcs);
2057 return RT_BOOL(pVmcs->u32ProcCtls2 & uProcCtls2);
2058}
2059
2060/**
2061 * Checks whether one of the given VM-exit controls are set when executing a
2062 * nested-guest.
2063 *
2064 * @returns @c true if set, @c false otherwise.
2065 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2066 * @param pCtx Pointer to the context.
2067 * @param uExitCtls The VM-exit controls to check.
2068 *
2069 * @remarks This does not check if all given controls are set if more than one
2070 * control is passed in @a uExitCtls.
2071 */
2072DECLINLINE(bool) CPUMIsGuestVmxExitCtlsSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uExitCtls)
2073{
2074 RT_NOREF(pVCpu);
2075 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2076 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2077 Assert(pVmcs);
2078 return RT_BOOL(pVmcs->u32ExitCtls & uExitCtls);
2079}
2080
2081/**
2082 * Checks whether one of the given VM-entry controls are set when executing a
2083 * nested-guest.
2084 *
2085 * @returns @c true if set, @c false otherwise.
2086 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2087 * @param pCtx Pointer to the context.
2088 * @param uEntryCtls The VM-entry controls to check.
2089 *
2090 * @remarks This does not check if all given controls are set if more than one
2091 * control is passed in @a uEntryCtls.
2092 */
2093DECLINLINE(bool) CPUMIsGuestVmxEntryCtlsSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uEntryCtls)
2094{
2095 RT_NOREF(pVCpu);
2096 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2097 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2098 Assert(pVmcs);
2099 return RT_BOOL(pVmcs->u32EntryCtls & uEntryCtls);
2100}
2101
2102/**
2103 * Checks whether the given exception causes a VM-exit.
2104 *
2105 * The exception type include hardware exceptions, software exceptions (#BP, #OF)
2106 * and privileged software exceptions (#DB generated by INT1/ICEBP).
2107 *
2108 * Software interrupts do -not- cause VM-exits and hence must not be used with this
2109 * function.
2110 *
2111 * @returns @c true if the exception causes a VM-exit, @c false otherwise.
2112 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2113 * @param pCtx Pointer to the context.
2114 * @param uVector The exception vector.
2115 * @param uErrCode The error code associated with the exception. Pass 0 if not
2116 * applicable.
2117 */
2118DECLINLINE(bool) CPUMIsGuestVmxXcptInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector, uint32_t uErrCode)
2119{
2120 Assert(uVector <= X86_XCPT_LAST);
2121
2122 RT_NOREF(pVCpu);
2123 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2124 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2125 Assert(pVmcs);
2126
2127 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
2128 if (uVector == X86_XCPT_NMI)
2129 return RT_BOOL(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
2130
2131 /* Page-faults are subject to masking using its error code. */
2132 uint32_t fXcptBitmap = pVmcs->u32XcptBitmap;
2133 if (uVector == X86_XCPT_PF)
2134 {
2135 uint32_t const fXcptPFMask = pVmcs->u32XcptPFMask;
2136 uint32_t const fXcptPFMatch = pVmcs->u32XcptPFMatch;
2137 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
2138 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
2139 }
2140
2141 /* Consult the exception bitmap for all other exceptions. */
2142 if (fXcptBitmap & RT_BIT(uVector))
2143 return true;
2144 return false;
2145}
2146
2147/**
2148 * Implements VMSucceed for VMX instruction success.
2149 *
2150 * @param pVCpu The cross context virtual CPU structure.
2151 */
2152DECLINLINE(void) CPUMSetGuestVmxVmSucceed(PCPUMCTX pCtx)
2153{
2154 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2155}
2156
2157/**
2158 * Implements VMFailInvalid for VMX instruction failure.
2159 *
2160 * @param pVCpu The cross context virtual CPU structure.
2161 */
2162DECLINLINE(void) CPUMSetGuestVmxVmFailInvalid(PCPUMCTX pCtx)
2163{
2164 pCtx->eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2165 pCtx->eflags.u32 |= X86_EFL_CF;
2166}
2167
2168/**
2169 * Implements VMFailValid for VMX instruction failure.
2170 *
2171 * @param pVCpu The cross context virtual CPU structure.
2172 * @param enmInsErr The VM instruction error.
2173 */
2174DECLINLINE(void) CPUMSetGuestVmxVmFailValid(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2175{
2176 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2177 pCtx->eflags.u32 |= X86_EFL_ZF;
2178 pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u32RoVmInstrError = enmInsErr;
2179}
2180
2181/**
2182 * Implements VMFail for VMX instruction failure.
2183 *
2184 * @param pVCpu The cross context virtual CPU structure.
2185 * @param enmInsErr The VM instruction error.
2186 */
2187DECLINLINE(void) CPUMSetGuestVmxVmFail(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2188{
2189 if (pCtx->hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
2190 CPUMSetGuestVmxVmFailValid(pCtx, enmInsErr);
2191 else
2192 CPUMSetGuestVmxVmFailInvalid(pCtx);
2193}
2194
2195/**
2196 * Returns the guest-physical address of the APIC-access page when executing a
2197 * nested-guest.
2198 *
2199 * @returns The APIC-access page guest-physical address.
2200 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2201 * @param pCtx Pointer to the context.
2202 */
2203DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2204{
2205 RT_NOREF(pVCpu);
2206 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2207 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2208 Assert(pVmcs);
2209 return pVmcs->u64AddrApicAccess.u;
2210}
2211
2212/**
2213 * Gets the nested-guest CR0 subject to the guest/host mask and the read-shadow.
2214 *
2215 * @returns The nested-guest CR0.
2216 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2217 * @param pCtx Pointer to the context.
2218 * @param fGstHostMask The CR0 guest/host mask to use.
2219 */
2220DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr0(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fGstHostMask)
2221{
2222 /*
2223 * For each CR0 bit owned by the host, the corresponding bit from the
2224 * CR0 read shadow is loaded. For each CR0 bit that is not owned by the host,
2225 * the corresponding bit from the guest CR0 is loaded.
2226 *
2227 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2228 */
2229 RT_NOREF(pVCpu);
2230 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2231 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2232 Assert(pVmcs);
2233 uint64_t const uGstCr0 = pCtx->cr0;
2234 uint64_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2235 return (fReadShadow & fGstHostMask) | (uGstCr0 & ~fGstHostMask);
2236}
2237
2238/**
2239 * Gets the nested-guest CR4 subject to the guest/host mask and the read-shadow.
2240 *
2241 * @returns The nested-guest CR4.
2242 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2243 * @param pCtx Pointer to the context.
2244 * @param fGstHostMask The CR4 guest/host mask to use.
2245 */
2246DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr4(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fGstHostMask)
2247{
2248 /*
2249 * For each CR4 bit owned by the host, the corresponding bit from the
2250 * CR4 read shadow is loaded. For each CR4 bit that is not owned by the host,
2251 * the corresponding bit from the guest CR4 is loaded.
2252 *
2253 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2254 */
2255 RT_NOREF(pVCpu);
2256 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2257 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2258 Assert(pVmcs);
2259 uint64_t const uGstCr4 = pCtx->cr4;
2260 uint64_t const fReadShadow = pVmcs->u64Cr4ReadShadow.u;
2261 return (fReadShadow & fGstHostMask) | (uGstCr4 & ~fGstHostMask);
2262}
2263
2264/**
2265 * Checks whether the LMSW access causes a VM-exit or not.
2266 *
2267 * @returns @c true if the LMSW access causes a VM-exit, @c false otherwise.
2268 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2269 * @param pCtx Pointer to the context.
2270 * @param uNewMsw The LMSW source operand (the Machine Status Word).
2271 */
2272DECLINLINE(bool) CPUMIsGuestVmxLmswInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t uNewMsw)
2273{
2274 /*
2275 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
2276 *
2277 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
2278 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2279 */
2280 RT_NOREF(pVCpu);
2281 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2282 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2283 Assert(pVmcs);
2284
2285 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
2286 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2287
2288 /*
2289 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
2290 * CR0.PE case first, before the rest of the bits in the MSW.
2291 *
2292 * If CR0.PE is owned by the host and CR0.PE differs between the
2293 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
2294 */
2295 if ( (fGstHostMask & X86_CR0_PE)
2296 && (uNewMsw & X86_CR0_PE)
2297 && !(fReadShadow & X86_CR0_PE))
2298 return true;
2299
2300 /*
2301 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
2302 * bits differ between the MSW (source operand) and the read-shadow, we must
2303 * cause a VM-exit.
2304 */
2305 uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2306 if ((fReadShadow & fGstHostLmswMask) != (uNewMsw & fGstHostLmswMask))
2307 return true;
2308
2309 return false;
2310}
2311
2312/**
2313 * Checks whether the Mov-to-CR0/CR4 access causes a VM-exit or not.
2314 *
2315 * @returns @c true if the Mov CRX access causes a VM-exit, @c false otherwise.
2316 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2317 * @param pCtx Pointer to the context.
2318 * @param iCrReg The control register number (must be 0 or 4).
2319 * @param uNewCrX The CR0/CR4 value being written.
2320 */
2321DECLINLINE(bool) CPUMIsGuestVmxMovToCr0Cr4InterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t iCrReg, uint64_t uNewCrX)
2322{
2323 /*
2324 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
2325 * corresponding bits differ between the source operand and the read-shadow,
2326 * we must cause a VM-exit.
2327 *
2328 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2329 */
2330 RT_NOREF(pVCpu);
2331 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2332 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
2333 Assert(pVmcs);
2334 Assert(iCrReg == 0 || iCrReg == 4);
2335
2336 uint64_t fGstHostMask;
2337 uint64_t fReadShadow;
2338 if (iCrReg == 0)
2339 {
2340 fGstHostMask = pVmcs->u64Cr0Mask.u;
2341 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2342 }
2343 else
2344 {
2345 fGstHostMask = pVmcs->u64Cr4Mask.u;
2346 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
2347 }
2348
2349 if ((fReadShadow & fGstHostMask) != (uNewCrX & fGstHostMask))
2350 {
2351 Assert(fGstHostMask != 0);
2352 return true;
2353 }
2354
2355 return false;
2356}
2357
2358/**
2359 * Returns whether the guest has an active, current VMCS.
2360 *
2361 * @returns @c true if the guest has an active, current VMCS, @c false otherwise.
2362 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2363 * @param pCtx Pointer to the context.
2364 */
2365DECLINLINE(bool) CPUMIsGuestVmxCurrentVmcsValid(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2366{
2367 RT_NOREF(pVCpu);
2368 RTGCPHYS const GCPhysVmcs = pCtx->hwvirt.vmx.GCPhysVmcs;
2369 return RT_BOOL(GCPhysVmcs != NIL_RTGCPHYS);
2370}
2371
2372/**
2373 * Gets the nested-guest virtual-APIC page.
2374 *
2375 * @returns The virtual-APIC page.
2376 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2377 * @param pCtx Pointer to the context.
2378 * @param pHCPhys Where to store the host-physical address of the virtual-APIC
2379 * page.
2380 */
2381DECLINLINE(void *) CPUMGetGuestVmxVirtApicPage(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTHCPHYS pHCPhysVirtApicPage)
2382{
2383 RT_NOREF(pVCpu);
2384 Assert(pHCPhysVirtApicPage);
2385 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
2386 *pHCPhysVirtApicPage = pCtx->hwvirt.vmx.HCPhysVirtApicPage;
2387 return pCtx->hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
2388}
2389
2390# endif /* !IN_RC */
2391
2392/**
2393 * Checks whether the VMX nested-guest is in a state to receive physical (APIC)
2394 * interrupts.
2395 *
2396 * @returns @c true if it's ready, @c false otherwise.
2397 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2398 * @param pCtx The guest-CPU context.
2399 */
2400DECLINLINE(bool) CPUMIsGuestVmxPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2401{
2402#ifdef IN_RC
2403 RT_NOREF2(pVCpu, pCtx);
2404 AssertReleaseFailedReturn(false);
2405#else
2406 RT_NOREF(pVCpu);
2407 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2408 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2409#endif
2410}
2411
2412/**
2413 * Checks whether the VMX nested-guest is blocking virtual-NMIs.
2414 *
2415 * @returns @c true if it's blocked, @c false otherwise.
2416 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2417 * @param pCtx The guest-CPU context.
2418 */
2419DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2420{
2421#ifdef IN_RC
2422 RT_NOREF2(pVCpu, pCtx);
2423 AssertReleaseFailedReturn(false);
2424#else
2425 /*
2426 * Return the state of virtual-NMI blocking, if we are executing a
2427 * VMX nested-guest with virtual-NMIs enabled.
2428 */
2429 RT_NOREF(pVCpu);
2430 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2431 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI));
2432 return pCtx->hwvirt.vmx.fVirtNmiBlocking;
2433#endif
2434}
2435
2436/**
2437 * Sets or clears VMX nested-guest virtual-NMI blocking.
2438 *
2439 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2440 * @param pCtx The guest-CPU context.
2441 * @param fBlocking Whether virtual-NMI blocking is in effect or not.
2442 */
2443DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCVMCPU pVCpu, PCPUMCTX pCtx, bool fBlocking)
2444{
2445#ifdef IN_RC
2446 RT_NOREF3(pVCpu, pCtx, fBlocking);
2447 AssertReleaseFailedReturnVoid();
2448#else
2449 RT_NOREF(pVCpu);
2450 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2451 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_VIRT_NMI));
2452 pCtx->hwvirt.vmx.fVirtNmiBlocking = fBlocking;
2453#endif
2454}
2455
2456/**
2457 * Checks whether the VMX nested-guest is in a state to receive virtual interrupts
2458 * (those injected with the "virtual-interrupt delivery" feature).
2459 *
2460 * @returns @c true if it's ready, @c false otherwise.
2461 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2462 * @param pCtx The guest-CPU context.
2463 */
2464DECLINLINE(bool) CPUMIsGuestVmxVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
2465{
2466#ifdef IN_RC
2467 RT_NOREF2(pVCpu, pCtx);
2468 AssertReleaseFailedReturn(false);
2469#else
2470 RT_NOREF(pVCpu);
2471 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
2472 return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
2473#endif
2474}
2475
2476#endif /* IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS */
2477
2478/** @} */
2479
2480
2481/** @name Hypervisor Register Getters.
2482 * @{ */
2483VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu);
2484VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu);
2485VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu);
2486VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu);
2487VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu);
2488VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu);
2489#if 0 /* these are not correct. */
2490VMMDECL(uint32_t) CPUMGetHyperCR0(PVMCPU pVCpu);
2491VMMDECL(uint32_t) CPUMGetHyperCR2(PVMCPU pVCpu);
2492VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
2493VMMDECL(uint32_t) CPUMGetHyperCR4(PVMCPU pVCpu);
2494#endif
2495/** This register is only saved on fatal traps. */
2496VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu);
2497VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu);
2498/** This register is only saved on fatal traps. */
2499VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu);
2500/** This register is only saved on fatal traps. */
2501VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu);
2502VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu);
2503VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu);
2504VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu);
2505VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu);
2506VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu);
2507VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu);
2508VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu);
2509VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
2510VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
2511VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu);
2512VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu);
2513VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu);
2514VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu);
2515VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu);
2516VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu);
2517VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu);
2518VMMDECL(void) CPUMGetHyperCtx(PVMCPU pVCpu, PCPUMCTX pCtx);
2519VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
2520/** @} */
2521
2522/** @name Hypervisor Register Setters.
2523 * @{ */
2524VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit);
2525VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR);
2526VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit);
2527VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3);
2528VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR);
2529VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS);
2530VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS);
2531VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelDS);
2532VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelDS);
2533VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelDS);
2534VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS);
2535VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP);
2536VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl);
2537VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP);
2538VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX);
2539VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0);
2540VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1);
2541VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2);
2542VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3);
2543VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6);
2544VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7);
2545VMMDECL(void) CPUMSetHyperCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
2546VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper);
2547/** @} */
2548
2549VMMDECL(void) CPUMPushHyper(PVMCPU pVCpu, uint32_t u32);
2550VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx);
2551VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu);
2552VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu);
2553VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
2554VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu);
2555VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu);
2556VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu);
2557VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc);
2558VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu);
2559VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl);
2560
2561/** @name Changed flags.
2562 * These flags are used to keep track of which important register that
2563 * have been changed since last they were reset. The only one allowed
2564 * to clear them is REM!
2565 * @{
2566 */
2567#define CPUM_CHANGED_FPU_REM RT_BIT(0)
2568#define CPUM_CHANGED_CR0 RT_BIT(1)
2569#define CPUM_CHANGED_CR4 RT_BIT(2)
2570#define CPUM_CHANGED_GLOBAL_TLB_FLUSH RT_BIT(3)
2571#define CPUM_CHANGED_CR3 RT_BIT(4)
2572#define CPUM_CHANGED_GDTR RT_BIT(5)
2573#define CPUM_CHANGED_IDTR RT_BIT(6)
2574#define CPUM_CHANGED_LDTR RT_BIT(7)
2575#define CPUM_CHANGED_TR RT_BIT(8) /**@< Currently unused. */
2576#define CPUM_CHANGED_SYSENTER_MSR RT_BIT(9)
2577#define CPUM_CHANGED_HIDDEN_SEL_REGS RT_BIT(10) /**@< Currently unused. */
2578#define CPUM_CHANGED_CPUID RT_BIT(11)
2579#define CPUM_CHANGED_ALL ( CPUM_CHANGED_FPU_REM \
2580 | CPUM_CHANGED_CR0 \
2581 | CPUM_CHANGED_CR4 \
2582 | CPUM_CHANGED_GLOBAL_TLB_FLUSH \
2583 | CPUM_CHANGED_CR3 \
2584 | CPUM_CHANGED_GDTR \
2585 | CPUM_CHANGED_IDTR \
2586 | CPUM_CHANGED_LDTR \
2587 | CPUM_CHANGED_TR \
2588 | CPUM_CHANGED_SYSENTER_MSR \
2589 | CPUM_CHANGED_HIDDEN_SEL_REGS \
2590 | CPUM_CHANGED_CPUID )
2591/** @} */
2592
2593VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);
2594VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl);
2595VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels);
2596VMMDECL(bool) CPUMSupportsXSave(PVM pVM);
2597VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM);
2598VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM);
2599VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu);
2600VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu);
2601VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu);
2602VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu);
2603VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu);
2604VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu);
2605VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu);
2606VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu);
2607VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu);
2608VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu);
2609VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu);
2610VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu);
2611VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM);
2612VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM);
2613VMMDECL(uint64_t) CPUMGetGuestEferMsrValidMask(PVM pVM);
2614VMMDECL(int) CPUMIsGuestEferMsrWriteValid(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
2615 uint64_t *puValidEfer);
2616VMMDECL(void) CPUMSetGuestEferMsrNoChecks(PVMCPU pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
2617VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue);
2618
2619/** Guest CPU interruptibility level, see CPUMGetGuestInterruptibility(). */
2620typedef enum CPUMINTERRUPTIBILITY
2621{
2622 CPUMINTERRUPTIBILITY_INVALID = 0,
2623 CPUMINTERRUPTIBILITY_UNRESTRAINED,
2624 CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED,
2625 CPUMINTERRUPTIBILITY_INT_DISABLED,
2626 CPUMINTERRUPTIBILITY_INT_INHIBITED,
2627 CPUMINTERRUPTIBILITY_NMI_INHIBIT,
2628 CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT,
2629 CPUMINTERRUPTIBILITY_END,
2630 CPUMINTERRUPTIBILITY_32BIT_HACK = 0x7fffffff
2631} CPUMINTERRUPTIBILITY;
2632
2633VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu);
2634VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu);
2635VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock);
2636
2637/** @name Typical scalable bus frequency values.
2638 * @{ */
2639/** Special internal value indicating that we don't know the frequency.
2640 * @internal */
2641#define CPUM_SBUSFREQ_UNKNOWN UINT64_C(1)
2642#define CPUM_SBUSFREQ_100MHZ UINT64_C(100000000)
2643#define CPUM_SBUSFREQ_133MHZ UINT64_C(133333333)
2644#define CPUM_SBUSFREQ_167MHZ UINT64_C(166666666)
2645#define CPUM_SBUSFREQ_200MHZ UINT64_C(200000000)
2646#define CPUM_SBUSFREQ_267MHZ UINT64_C(266666666)
2647#define CPUM_SBUSFREQ_333MHZ UINT64_C(333333333)
2648#define CPUM_SBUSFREQ_400MHZ UINT64_C(400000000)
2649/** @} */
2650
2651
2652#ifdef IN_RING3
2653/** @defgroup grp_cpum_r3 The CPUM ring-3 API
2654 * @{
2655 */
2656
2657VMMR3DECL(int) CPUMR3Init(PVM pVM);
2658VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
2659VMMR3DECL(void) CPUMR3LogCpuIdAndMsrFeatures(PVM pVM);
2660VMMR3DECL(void) CPUMR3Relocate(PVM pVM);
2661VMMR3DECL(int) CPUMR3Term(PVM pVM);
2662VMMR3DECL(void) CPUMR3Reset(PVM pVM);
2663VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
2664VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM);
2665VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
2666
2667VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf);
2668VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf);
2669VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
2670 uint8_t bModel, uint8_t bStepping);
2671VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch);
2672VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
2673VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
2674VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
2675VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
2676VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor);
2677VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void);
2678
2679VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange);
2680
2681# if defined(VBOX_WITH_RAW_MODE) || defined(DOXYGEN_RUNNING)
2682/** @name APIs for the CPUID raw-mode patch (legacy).
2683 * @{ */
2684VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM);
2685VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM);
2686VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM);
2687VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM);
2688VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM);
2689VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM);
2690VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM);
2691/** @} */
2692# endif
2693
2694/** @} */
2695#endif /* IN_RING3 */
2696
2697#ifdef IN_RC
2698/** @defgroup grp_cpum_rc The CPUM Raw-mode Context API
2699 * @{
2700 */
2701
2702/**
2703 * Calls a guest trap/interrupt handler directly
2704 *
2705 * Assumes a trap stack frame has already been setup on the guest's stack!
2706 * This function does not return!
2707 *
2708 * @param pRegFrame Original trap/interrupt context
2709 * @param selCS Code selector of handler
2710 * @param pHandler GC virtual address of handler
2711 * @param eflags Callee's EFLAGS
2712 * @param selSS Stack selector for handler
2713 * @param pEsp Stack address for handler
2714 */
2715DECLASM(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTRCPTR pHandler,
2716 uint32_t eflags, uint32_t selSS, RTRCPTR pEsp);
2717
2718/**
2719 * Call guest V86 code directly.
2720 *
2721 * This function does not return!
2722 *
2723 * @param pRegFrame Original trap/interrupt context
2724 */
2725DECLASM(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
2726
2727VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu);
2728VMMDECL(uint32_t) CPUMRCGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
2729#ifdef VBOX_WITH_RAW_RING1
2730VMMDECL(void) CPUMRCRecheckRawState(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore);
2731#endif
2732VMMRCDECL(void) CPUMRCProcessForceFlag(PVMCPU pVCpu);
2733
2734/** @} */
2735#endif /* IN_RC */
2736
2737#ifdef IN_RING0
2738/** @defgroup grp_cpum_r0 The CPUM ring-0 API
2739 * @{
2740 */
2741VMMR0_INT_DECL(int) CPUMR0ModuleInit(void);
2742VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void);
2743VMMR0_INT_DECL(int) CPUMR0InitVM(PVM pVM);
2744DECLASM(void) CPUMR0RegisterVCpuThread(PVMCPU pVCpu);
2745DECLASM(void) CPUMR0TouchHostFpu(void);
2746VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu);
2747VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu);
2748VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu);
2749VMMR0_INT_DECL(int) CPUMR0SaveHostDebugState(PVM pVM, PVMCPU pVCpu);
2750VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6);
2751VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6);
2752
2753VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6);
2754VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6);
2755#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
2756VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, uint32_t iHostCpuSet);
2757#endif
2758
2759/** @} */
2760#endif /* IN_RING0 */
2761
2762/** @defgroup grp_cpum_rz The CPUM raw-mode and ring-0 context API
2763 * @{
2764 */
2765VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPU pVCpu);
2766VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForRead(PVMCPU pVCpu);
2767VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForChange(PVMCPU pVCpu);
2768VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeSseForRead(PVMCPU pVCpu);
2769VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPU pVCpu);
2770/** @} */
2771
2772
2773#endif /* !VBOX_FOR_DTRACE_LIB */
2774/** @} */
2775RT_C_DECLS_END
2776
2777
2778#endif /* !VBOX_INCLUDED_vmm_cpum_h */
2779
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette