VirtualBox

source: vbox/trunk/include/VBox/vmm/cpum.h@ 78415

Last change on this file since 78415 was 78371, checked in by vboxsync, 6 years ago

VMM: Move VT-x/AMD-V helpers that are based on CPU specs to CPUM in preparation of upcoming changes. It is better placed in CPUM if say NEM in future needs to implement nested VT-x/AMD-V.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 97.4 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager).
3 */
4
5/*
6 * Copyright (C) 2006-2019 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_cpum_h
27#define VBOX_INCLUDED_vmm_cpum_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <iprt/x86.h>
33#include <VBox/types.h>
34#include <VBox/vmm/cpumctx.h>
35#include <VBox/vmm/stam.h>
36#include <VBox/vmm/vmapi.h>
37#include <VBox/vmm/hm_svm.h>
38#include <VBox/vmm/hm_vmx.h>
39
40RT_C_DECLS_BEGIN
41
42/** @defgroup grp_cpum The CPU Monitor / Manager API
43 * @ingroup grp_vmm
44 * @{
45 */
46
47/**
48 * CPUID feature to set or clear.
49 */
50typedef enum CPUMCPUIDFEATURE
51{
52 CPUMCPUIDFEATURE_INVALID = 0,
53 /** The APIC feature bit. (Std+Ext)
54 * Note! There is a per-cpu flag for masking this CPUID feature bit when the
55 * APICBASE.ENABLED bit is zero. So, this feature is only set/cleared
56 * at VM construction time like all the others. This didn't used to be
57 * that way, this is new with 5.1. */
58 CPUMCPUIDFEATURE_APIC,
59 /** The sysenter/sysexit feature bit. (Std) */
60 CPUMCPUIDFEATURE_SEP,
61 /** The SYSCALL/SYSEXIT feature bit (64 bits mode only for Intel CPUs). (Ext) */
62 CPUMCPUIDFEATURE_SYSCALL,
63 /** The PAE feature bit. (Std+Ext) */
64 CPUMCPUIDFEATURE_PAE,
65 /** The NX feature bit. (Ext) */
66 CPUMCPUIDFEATURE_NX,
67 /** The LAHF/SAHF feature bit (64 bits mode only). (Ext) */
68 CPUMCPUIDFEATURE_LAHF,
69 /** The LONG MODE feature bit. (Ext) */
70 CPUMCPUIDFEATURE_LONG_MODE,
71 /** The PAT feature bit. (Std+Ext) */
72 CPUMCPUIDFEATURE_PAT,
73 /** The x2APIC feature bit. (Std) */
74 CPUMCPUIDFEATURE_X2APIC,
75 /** The RDTSCP feature bit. (Ext) */
76 CPUMCPUIDFEATURE_RDTSCP,
77 /** The Hypervisor Present bit. (Std) */
78 CPUMCPUIDFEATURE_HVP,
79 /** The MWait Extensions bits (Std) */
80 CPUMCPUIDFEATURE_MWAIT_EXTS,
81 /** The speculation control feature bits. (StExt) */
82 CPUMCPUIDFEATURE_SPEC_CTRL,
83 /** 32bit hackishness. */
84 CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
85} CPUMCPUIDFEATURE;
86
87/**
88 * CPU Vendor.
89 */
90typedef enum CPUMCPUVENDOR
91{
92 CPUMCPUVENDOR_INVALID = 0,
93 CPUMCPUVENDOR_INTEL,
94 CPUMCPUVENDOR_AMD,
95 CPUMCPUVENDOR_VIA,
96 CPUMCPUVENDOR_CYRIX,
97 CPUMCPUVENDOR_SHANGHAI,
98 CPUMCPUVENDOR_UNKNOWN,
99 /** 32bit hackishness. */
100 CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
101} CPUMCPUVENDOR;
102
103
104/**
105 * X86 and AMD64 CPU microarchitectures and in processor generations.
106 *
107 * @remarks The separation here is sometimes a little bit too finely grained,
108 * and the differences is more like processor generation than micro
109 * arch. This can be useful, so we'll provide functions for getting at
110 * more coarse grained info.
111 */
112typedef enum CPUMMICROARCH
113{
114 kCpumMicroarch_Invalid = 0,
115
116 kCpumMicroarch_Intel_First,
117
118 kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
119 kCpumMicroarch_Intel_80186,
120 kCpumMicroarch_Intel_80286,
121 kCpumMicroarch_Intel_80386,
122 kCpumMicroarch_Intel_80486,
123 kCpumMicroarch_Intel_P5,
124
125 kCpumMicroarch_Intel_P6_Core_Atom_First,
126 kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
127 kCpumMicroarch_Intel_P6_II,
128 kCpumMicroarch_Intel_P6_III,
129
130 kCpumMicroarch_Intel_P6_M_Banias,
131 kCpumMicroarch_Intel_P6_M_Dothan,
132 kCpumMicroarch_Intel_Core_Yonah, /**< Core, also known as Enhanced Pentium M. */
133
134 kCpumMicroarch_Intel_Core2_First,
135 kCpumMicroarch_Intel_Core2_Merom = kCpumMicroarch_Intel_Core2_First, /**< 65nm, Merom/Conroe/Kentsfield/Tigerton */
136 kCpumMicroarch_Intel_Core2_Penryn, /**< 45nm, Penryn/Wolfdale/Yorkfield/Harpertown */
137 kCpumMicroarch_Intel_Core2_End,
138
139 kCpumMicroarch_Intel_Core7_First,
140 kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
141 kCpumMicroarch_Intel_Core7_Westmere,
142 kCpumMicroarch_Intel_Core7_SandyBridge,
143 kCpumMicroarch_Intel_Core7_IvyBridge,
144 kCpumMicroarch_Intel_Core7_Haswell,
145 kCpumMicroarch_Intel_Core7_Broadwell,
146 kCpumMicroarch_Intel_Core7_Skylake,
147 kCpumMicroarch_Intel_Core7_KabyLake,
148 kCpumMicroarch_Intel_Core7_CoffeeLake,
149 kCpumMicroarch_Intel_Core7_CannonLake,
150 kCpumMicroarch_Intel_Core7_IceLake,
151 kCpumMicroarch_Intel_Core7_TigerLake,
152 kCpumMicroarch_Intel_Core7_End,
153
154 kCpumMicroarch_Intel_Atom_First,
155 kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
156 kCpumMicroarch_Intel_Atom_Lincroft, /**< Second generation bonnell (44nm). */
157 kCpumMicroarch_Intel_Atom_Saltwell, /**< 32nm shrink of Bonnell. */
158 kCpumMicroarch_Intel_Atom_Silvermont, /**< 22nm */
159 kCpumMicroarch_Intel_Atom_Airmount, /**< 14nm */
160 kCpumMicroarch_Intel_Atom_Goldmont, /**< 14nm */
161 kCpumMicroarch_Intel_Atom_GoldmontPlus, /**< 14nm */
162 kCpumMicroarch_Intel_Atom_Unknown,
163 kCpumMicroarch_Intel_Atom_End,
164
165
166 kCpumMicroarch_Intel_Phi_First,
167 kCpumMicroarch_Intel_Phi_KnightsFerry = kCpumMicroarch_Intel_Phi_First,
168 kCpumMicroarch_Intel_Phi_KnightsCorner,
169 kCpumMicroarch_Intel_Phi_KnightsLanding,
170 kCpumMicroarch_Intel_Phi_KnightsHill,
171 kCpumMicroarch_Intel_Phi_KnightsMill,
172 kCpumMicroarch_Intel_Phi_End,
173
174 kCpumMicroarch_Intel_P6_Core_Atom_End,
175
176 kCpumMicroarch_Intel_NB_First,
177 kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
178 kCpumMicroarch_Intel_NB_Northwood, /**< 130nm */
179 kCpumMicroarch_Intel_NB_Prescott, /**< 90nm */
180 kCpumMicroarch_Intel_NB_Prescott2M, /**< 90nm */
181 kCpumMicroarch_Intel_NB_CedarMill, /**< 65nm */
182 kCpumMicroarch_Intel_NB_Gallatin, /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
183 kCpumMicroarch_Intel_NB_Unknown,
184 kCpumMicroarch_Intel_NB_End,
185
186 kCpumMicroarch_Intel_Unknown,
187 kCpumMicroarch_Intel_End,
188
189 kCpumMicroarch_AMD_First,
190 kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
191 kCpumMicroarch_AMD_Am386,
192 kCpumMicroarch_AMD_Am486,
193 kCpumMicroarch_AMD_Am486Enh, /**< Covers Am5x86 as well. */
194 kCpumMicroarch_AMD_K5,
195 kCpumMicroarch_AMD_K6,
196
197 kCpumMicroarch_AMD_K7_First,
198 kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
199 kCpumMicroarch_AMD_K7_Spitfire,
200 kCpumMicroarch_AMD_K7_Thunderbird,
201 kCpumMicroarch_AMD_K7_Morgan,
202 kCpumMicroarch_AMD_K7_Thoroughbred,
203 kCpumMicroarch_AMD_K7_Barton,
204 kCpumMicroarch_AMD_K7_Unknown,
205 kCpumMicroarch_AMD_K7_End,
206
207 kCpumMicroarch_AMD_K8_First,
208 kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
209 kCpumMicroarch_AMD_K8_90nm, /**< 90nm shrink */
210 kCpumMicroarch_AMD_K8_90nm_DualCore, /**< 90nm with two cores. */
211 kCpumMicroarch_AMD_K8_90nm_AMDV, /**< 90nm with AMD-V (usually) and two cores (usually). */
212 kCpumMicroarch_AMD_K8_65nm, /**< 65nm shrink. */
213 kCpumMicroarch_AMD_K8_End,
214
215 kCpumMicroarch_AMD_K10,
216 kCpumMicroarch_AMD_K10_Lion,
217 kCpumMicroarch_AMD_K10_Llano,
218 kCpumMicroarch_AMD_Bobcat,
219 kCpumMicroarch_AMD_Jaguar,
220
221 kCpumMicroarch_AMD_15h_First,
222 kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
223 kCpumMicroarch_AMD_15h_Piledriver,
224 kCpumMicroarch_AMD_15h_Steamroller, /**< Yet to be released, might have different family. */
225 kCpumMicroarch_AMD_15h_Excavator, /**< Yet to be released, might have different family. */
226 kCpumMicroarch_AMD_15h_Unknown,
227 kCpumMicroarch_AMD_15h_End,
228
229 kCpumMicroarch_AMD_16h_First,
230 kCpumMicroarch_AMD_16h_End,
231
232 kCpumMicroarch_AMD_Zen_First,
233 kCpumMicroarch_AMD_Zen_Ryzen = kCpumMicroarch_AMD_Zen_First,
234 kCpumMicroarch_AMD_Zen_End,
235
236 kCpumMicroarch_AMD_Unknown,
237 kCpumMicroarch_AMD_End,
238
239 kCpumMicroarch_VIA_First,
240 kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
241 kCpumMicroarch_Centaur_C2,
242 kCpumMicroarch_Centaur_C3,
243 kCpumMicroarch_VIA_C3_M2,
244 kCpumMicroarch_VIA_C3_C5A, /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
245 kCpumMicroarch_VIA_C3_C5B, /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
246 kCpumMicroarch_VIA_C3_C5C, /**< 130nm Ezra - C3, Eden ESP. */
247 kCpumMicroarch_VIA_C3_C5N, /**< 130nm Ezra-T - C3. */
248 kCpumMicroarch_VIA_C3_C5XL, /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
249 kCpumMicroarch_VIA_C3_C5P, /**< 130nm Nehemiah+ - C3. */
250 kCpumMicroarch_VIA_C7_C5J, /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
251 kCpumMicroarch_VIA_Isaiah,
252 kCpumMicroarch_VIA_Unknown,
253 kCpumMicroarch_VIA_End,
254
255 kCpumMicroarch_Cyrix_First,
256 kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
257 kCpumMicroarch_Cyrix_M1,
258 kCpumMicroarch_Cyrix_MediaGX,
259 kCpumMicroarch_Cyrix_MediaGXm,
260 kCpumMicroarch_Cyrix_M2,
261 kCpumMicroarch_Cyrix_Unknown,
262 kCpumMicroarch_Cyrix_End,
263
264 kCpumMicroarch_NEC_First,
265 kCpumMicroarch_NEC_V20 = kCpumMicroarch_NEC_First,
266 kCpumMicroarch_NEC_V30,
267 kCpumMicroarch_NEC_End,
268
269 kCpumMicroarch_Shanghai_First,
270 kCpumMicroarch_Shanghai_Wudaokou = kCpumMicroarch_Shanghai_First,
271 kCpumMicroarch_Shanghai_Unknown,
272 kCpumMicroarch_Shanghai_End,
273
274 kCpumMicroarch_Unknown,
275
276 kCpumMicroarch_32BitHack = 0x7fffffff
277} CPUMMICROARCH;
278
279
280/** Predicate macro for catching netburst CPUs. */
281#define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
282 ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
283
284/** Predicate macro for catching Core7 CPUs. */
285#define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
286 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
287
288/** Predicate macro for catching Core 2 CPUs. */
289#define CPUMMICROARCH_IS_INTEL_CORE2(a_enmMicroarch) \
290 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core2_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core2_End)
291
292/** Predicate macro for catching Atom CPUs, Silvermont and upwards. */
293#define CPUMMICROARCH_IS_INTEL_SILVERMONT_PLUS(a_enmMicroarch) \
294 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Atom_Silvermont && (a_enmMicroarch) <= kCpumMicroarch_Intel_Atom_End)
295
296/** Predicate macro for catching AMD Family OFh CPUs (aka K8). */
297#define CPUMMICROARCH_IS_AMD_FAM_0FH(a_enmMicroarch) \
298 ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
299
300/** Predicate macro for catching AMD Family 10H CPUs (aka K10). */
301#define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
302
303/** Predicate macro for catching AMD Family 11H CPUs (aka Lion). */
304#define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
305
306/** Predicate macro for catching AMD Family 12H CPUs (aka Llano). */
307#define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
308
309/** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat). */
310#define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
311
312/** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
313 * decendants). */
314#define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
315 ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
316
317/** Predicate macro for catching AMD Family 16H CPUs. */
318#define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
319 ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
320
321
322
323/**
324 * CPUID leaf.
325 *
326 * @remarks This structure is used by the patch manager and is therefore
327 * more or less set in stone.
328 */
329typedef struct CPUMCPUIDLEAF
330{
331 /** The leaf number. */
332 uint32_t uLeaf;
333 /** The sub-leaf number. */
334 uint32_t uSubLeaf;
335 /** Sub-leaf mask. This is 0 when sub-leaves aren't used. */
336 uint32_t fSubLeafMask;
337
338 /** The EAX value. */
339 uint32_t uEax;
340 /** The EBX value. */
341 uint32_t uEbx;
342 /** The ECX value. */
343 uint32_t uEcx;
344 /** The EDX value. */
345 uint32_t uEdx;
346
347 /** Flags. */
348 uint32_t fFlags;
349} CPUMCPUIDLEAF;
350#ifndef VBOX_FOR_DTRACE_LIB
351AssertCompileSize(CPUMCPUIDLEAF, 32);
352#endif
353/** Pointer to a CPUID leaf. */
354typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
355/** Pointer to a const CPUID leaf. */
356typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
357
358/** @name CPUMCPUIDLEAF::fFlags
359 * @{ */
360/** Indicates working intel leaf 0xb where the lower 8 ECX bits are not modified
361 * and EDX containing the extended APIC ID. */
362#define CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES RT_BIT_32(0)
363/** The leaf contains an APIC ID that needs changing to that of the current CPU. */
364#define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID RT_BIT_32(1)
365/** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
366#define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE RT_BIT_32(2)
367/** The leaf contains an APIC feature bit which is tied to APICBASE.EN. */
368#define CPUMCPUIDLEAF_F_CONTAINS_APIC RT_BIT_32(3)
369/** Mask of the valid flags. */
370#define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0xf)
371/** @} */
372
373/**
374 * Method used to deal with unknown CPUID leaves.
375 * @remarks Used in patch code.
376 */
377typedef enum CPUMUNKNOWNCPUID
378{
379 /** Invalid zero value. */
380 CPUMUNKNOWNCPUID_INVALID = 0,
381 /** Use given default values (DefCpuId). */
382 CPUMUNKNOWNCPUID_DEFAULTS,
383 /** Return the last standard leaf.
384 * Intel Sandy Bridge has been observed doing this. */
385 CPUMUNKNOWNCPUID_LAST_STD_LEAF,
386 /** Return the last standard leaf, with ecx observed.
387 * Intel Sandy Bridge has been observed doing this. */
388 CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
389 /** The register values are passed thru unmodified. */
390 CPUMUNKNOWNCPUID_PASSTHRU,
391 /** End of valid value. */
392 CPUMUNKNOWNCPUID_END,
393 /** Ensure 32-bit type. */
394 CPUMUNKNOWNCPUID_32BIT_HACK = 0x7fffffff
395} CPUMUNKNOWNCPUID;
396/** Pointer to unknown CPUID leaf method. */
397typedef CPUMUNKNOWNCPUID *PCPUMUNKNOWNCPUID;
398
399
400/**
401 * MSR read functions.
402 */
403typedef enum CPUMMSRRDFN
404{
405 /** Invalid zero value. */
406 kCpumMsrRdFn_Invalid = 0,
407 /** Return the CPUMMSRRANGE::uValue. */
408 kCpumMsrRdFn_FixedValue,
409 /** Alias to the MSR range starting at the MSR given by
410 * CPUMMSRRANGE::uValue. Must be used in pair with
411 * kCpumMsrWrFn_MsrAlias. */
412 kCpumMsrRdFn_MsrAlias,
413 /** Write only register, GP all read attempts. */
414 kCpumMsrRdFn_WriteOnly,
415
416 kCpumMsrRdFn_Ia32P5McAddr,
417 kCpumMsrRdFn_Ia32P5McType,
418 kCpumMsrRdFn_Ia32TimestampCounter,
419 kCpumMsrRdFn_Ia32PlatformId, /**< Takes real CPU value for reference. */
420 kCpumMsrRdFn_Ia32ApicBase,
421 kCpumMsrRdFn_Ia32FeatureControl,
422 kCpumMsrRdFn_Ia32BiosSignId, /**< Range value returned. */
423 kCpumMsrRdFn_Ia32SmmMonitorCtl,
424 kCpumMsrRdFn_Ia32PmcN,
425 kCpumMsrRdFn_Ia32MonitorFilterLineSize,
426 kCpumMsrRdFn_Ia32MPerf,
427 kCpumMsrRdFn_Ia32APerf,
428 kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */
429 kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
430 kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
431 kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
432 kCpumMsrRdFn_Ia32MtrrDefType,
433 kCpumMsrRdFn_Ia32Pat,
434 kCpumMsrRdFn_Ia32SysEnterCs,
435 kCpumMsrRdFn_Ia32SysEnterEsp,
436 kCpumMsrRdFn_Ia32SysEnterEip,
437 kCpumMsrRdFn_Ia32McgCap,
438 kCpumMsrRdFn_Ia32McgStatus,
439 kCpumMsrRdFn_Ia32McgCtl,
440 kCpumMsrRdFn_Ia32DebugCtl,
441 kCpumMsrRdFn_Ia32SmrrPhysBase,
442 kCpumMsrRdFn_Ia32SmrrPhysMask,
443 kCpumMsrRdFn_Ia32PlatformDcaCap,
444 kCpumMsrRdFn_Ia32CpuDcaCap,
445 kCpumMsrRdFn_Ia32Dca0Cap,
446 kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
447 kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */
448 kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */
449 kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
450 kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */
451 kCpumMsrRdFn_Ia32FixedCtrCtrl,
452 kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */
453 kCpumMsrRdFn_Ia32PerfGlobalCtrl,
454 kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
455 kCpumMsrRdFn_Ia32PebsEnable,
456 kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */
457 kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */
458 kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */
459 kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */
460 kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */
461 kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
462 kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */
463 kCpumMsrRdFn_Ia32DsArea,
464 kCpumMsrRdFn_Ia32TscDeadline,
465 kCpumMsrRdFn_Ia32X2ApicN,
466 kCpumMsrRdFn_Ia32DebugInterface,
467 kCpumMsrRdFn_Ia32VmxBasic, /**< Takes real value as reference. */
468 kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */
469 kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */
470 kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */
471 kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */
472 kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */
473 kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */
474 kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */
475 kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */
476 kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */
477 kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */
478 kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */
479 kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */
480 kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */
481 kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */
482 kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */
483 kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */
484 kCpumMsrRdFn_Ia32VmxVmFunc, /**< Takes real value as reference. */
485 kCpumMsrRdFn_Ia32SpecCtrl,
486 kCpumMsrRdFn_Ia32ArchCapabilities,
487
488 kCpumMsrRdFn_Amd64Efer,
489 kCpumMsrRdFn_Amd64SyscallTarget,
490 kCpumMsrRdFn_Amd64LongSyscallTarget,
491 kCpumMsrRdFn_Amd64CompSyscallTarget,
492 kCpumMsrRdFn_Amd64SyscallFlagMask,
493 kCpumMsrRdFn_Amd64FsBase,
494 kCpumMsrRdFn_Amd64GsBase,
495 kCpumMsrRdFn_Amd64KernelGsBase,
496 kCpumMsrRdFn_Amd64TscAux,
497
498 kCpumMsrRdFn_IntelEblCrPowerOn,
499 kCpumMsrRdFn_IntelI7CoreThreadCount,
500 kCpumMsrRdFn_IntelP4EbcHardPowerOn,
501 kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
502 kCpumMsrRdFn_IntelP4EbcFrequencyId,
503 kCpumMsrRdFn_IntelP6FsbFrequency, /**< Takes real value as reference. */
504 kCpumMsrRdFn_IntelPlatformInfo,
505 kCpumMsrRdFn_IntelFlexRatio, /**< Takes real value as reference. */
506 kCpumMsrRdFn_IntelPkgCStConfigControl,
507 kCpumMsrRdFn_IntelPmgIoCaptureBase,
508 kCpumMsrRdFn_IntelLastBranchFromToN,
509 kCpumMsrRdFn_IntelLastBranchFromN,
510 kCpumMsrRdFn_IntelLastBranchToN,
511 kCpumMsrRdFn_IntelLastBranchTos,
512 kCpumMsrRdFn_IntelBblCrCtl,
513 kCpumMsrRdFn_IntelBblCrCtl3,
514 kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */
515 kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
516 kCpumMsrRdFn_IntelI7MiscPwrMgmt,
517 kCpumMsrRdFn_IntelP6CrN,
518 kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
519 kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
520 kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
521 kCpumMsrRdFn_IntelI7SandyAesNiCtl,
522 kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */
523 kCpumMsrRdFn_IntelI7LbrSelect,
524 kCpumMsrRdFn_IntelI7SandyErrorControl,
525 kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
526 kCpumMsrRdFn_IntelI7PowerCtl,
527 kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
528 kCpumMsrRdFn_IntelI7PebsLdLat,
529 kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */
530 kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */
531 kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
532 kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */
533 kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */
534 kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */
535 kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
536 kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */
537 kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
538 kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */
539 kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */
540 kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */
541 kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
542 kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */
543 kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */
544 kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */
545 kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
546 kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */
547 kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */
548 kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */
549 kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
550 kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */
551 kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
552 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1, /**< Takes real value as reference. */
553 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2, /**< Takes real value as reference. */
554 kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
555 kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
556 kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
557 kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
558 kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
559 kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
560 kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
561 kCpumMsrRdFn_IntelI7UncCBoxConfig,
562 kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
563 kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
564 kCpumMsrRdFn_IntelI7SmiCount,
565 kCpumMsrRdFn_IntelCore2EmttmCrTablesN, /**< Range value returned. */
566 kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
567 kCpumMsrRdFn_IntelCore1ExtConfig,
568 kCpumMsrRdFn_IntelCore1DtsCalControl,
569 kCpumMsrRdFn_IntelCore2PeciControl,
570 kCpumMsrRdFn_IntelAtSilvCoreC1Recidency,
571
572 kCpumMsrRdFn_P6LastBranchFromIp,
573 kCpumMsrRdFn_P6LastBranchToIp,
574 kCpumMsrRdFn_P6LastIntFromIp,
575 kCpumMsrRdFn_P6LastIntToIp,
576
577 kCpumMsrRdFn_AmdFam15hTscRate,
578 kCpumMsrRdFn_AmdFam15hLwpCfg,
579 kCpumMsrRdFn_AmdFam15hLwpCbAddr,
580 kCpumMsrRdFn_AmdFam10hMc4MiscN,
581 kCpumMsrRdFn_AmdK8PerfCtlN,
582 kCpumMsrRdFn_AmdK8PerfCtrN,
583 kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */
584 kCpumMsrRdFn_AmdK8HwCr,
585 kCpumMsrRdFn_AmdK8IorrBaseN,
586 kCpumMsrRdFn_AmdK8IorrMaskN,
587 kCpumMsrRdFn_AmdK8TopOfMemN,
588 kCpumMsrRdFn_AmdK8NbCfg1,
589 kCpumMsrRdFn_AmdK8McXcptRedir,
590 kCpumMsrRdFn_AmdK8CpuNameN,
591 kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */
592 kCpumMsrRdFn_AmdK8SwThermalCtrl,
593 kCpumMsrRdFn_AmdK8FidVidControl, /**< Range value returned. */
594 kCpumMsrRdFn_AmdK8FidVidStatus, /**< Range value returned. */
595 kCpumMsrRdFn_AmdK8McCtlMaskN,
596 kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
597 kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
598 kCpumMsrRdFn_AmdK8IntPendingMessage,
599 kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
600 kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
601 kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
602 kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */
603 kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */
604 kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */
605 kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */
606 kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */
607 kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */
608 kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
609 kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
610 kCpumMsrRdFn_AmdK8SmmBase,
611 kCpumMsrRdFn_AmdK8SmmAddr,
612 kCpumMsrRdFn_AmdK8SmmMask,
613 kCpumMsrRdFn_AmdK8VmCr,
614 kCpumMsrRdFn_AmdK8IgnNe,
615 kCpumMsrRdFn_AmdK8SmmCtl,
616 kCpumMsrRdFn_AmdK8VmHSavePa,
617 kCpumMsrRdFn_AmdFam10hVmLockKey,
618 kCpumMsrRdFn_AmdFam10hSmmLockKey,
619 kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
620 kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
621 kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
622 kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
623 kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
624 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
625 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
626 kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */
627 kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */
628 kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
629 kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
630 kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
631 kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
632 kCpumMsrRdFn_AmdK8PatchLevel, /**< Returns range value. */
633 kCpumMsrRdFn_AmdK7DebugStatusMaybe,
634 kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
635 kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
636 kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
637 kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
638 kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
639 kCpumMsrRdFn_AmdK7NodeId,
640 kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
641 kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
642 kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
643 kCpumMsrRdFn_AmdK7LoadStoreCfg,
644 kCpumMsrRdFn_AmdK7InstrCacheCfg,
645 kCpumMsrRdFn_AmdK7DataCacheCfg,
646 kCpumMsrRdFn_AmdK7BusUnitCfg,
647 kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
648 kCpumMsrRdFn_AmdFam15hFpuCfg,
649 kCpumMsrRdFn_AmdFam15hDecoderCfg,
650 kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
651 kCpumMsrRdFn_AmdFam15hCombUnitCfg,
652 kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
653 kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
654 kCpumMsrRdFn_AmdFam15hExecUnitCfg,
655 kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
656 kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
657 kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
658 kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
659 kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
660 kCpumMsrRdFn_AmdFam10hIbsOpRip,
661 kCpumMsrRdFn_AmdFam10hIbsOpData,
662 kCpumMsrRdFn_AmdFam10hIbsOpData2,
663 kCpumMsrRdFn_AmdFam10hIbsOpData3,
664 kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
665 kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
666 kCpumMsrRdFn_AmdFam10hIbsCtl,
667 kCpumMsrRdFn_AmdFam14hIbsBrTarget,
668
669 kCpumMsrRdFn_Gim,
670
671 /** End of valid MSR read function indexes. */
672 kCpumMsrRdFn_End
673} CPUMMSRRDFN;
674
675/**
676 * MSR write functions.
677 */
678typedef enum CPUMMSRWRFN
679{
680 /** Invalid zero value. */
681 kCpumMsrWrFn_Invalid = 0,
682 /** Writes are ignored, the fWrGpMask is observed though. */
683 kCpumMsrWrFn_IgnoreWrite,
684 /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
685 kCpumMsrWrFn_ReadOnly,
686 /** Alias to the MSR range starting at the MSR given by
687 * CPUMMSRRANGE::uValue. Must be used in pair with
688 * kCpumMsrRdFn_MsrAlias. */
689 kCpumMsrWrFn_MsrAlias,
690
691 kCpumMsrWrFn_Ia32P5McAddr,
692 kCpumMsrWrFn_Ia32P5McType,
693 kCpumMsrWrFn_Ia32TimestampCounter,
694 kCpumMsrWrFn_Ia32ApicBase,
695 kCpumMsrWrFn_Ia32FeatureControl,
696 kCpumMsrWrFn_Ia32BiosSignId,
697 kCpumMsrWrFn_Ia32BiosUpdateTrigger,
698 kCpumMsrWrFn_Ia32SmmMonitorCtl,
699 kCpumMsrWrFn_Ia32PmcN,
700 kCpumMsrWrFn_Ia32MonitorFilterLineSize,
701 kCpumMsrWrFn_Ia32MPerf,
702 kCpumMsrWrFn_Ia32APerf,
703 kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
704 kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
705 kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
706 kCpumMsrWrFn_Ia32MtrrDefType,
707 kCpumMsrWrFn_Ia32Pat,
708 kCpumMsrWrFn_Ia32SysEnterCs,
709 kCpumMsrWrFn_Ia32SysEnterEsp,
710 kCpumMsrWrFn_Ia32SysEnterEip,
711 kCpumMsrWrFn_Ia32McgStatus,
712 kCpumMsrWrFn_Ia32McgCtl,
713 kCpumMsrWrFn_Ia32DebugCtl,
714 kCpumMsrWrFn_Ia32SmrrPhysBase,
715 kCpumMsrWrFn_Ia32SmrrPhysMask,
716 kCpumMsrWrFn_Ia32PlatformDcaCap,
717 kCpumMsrWrFn_Ia32Dca0Cap,
718 kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
719 kCpumMsrWrFn_Ia32PerfStatus,
720 kCpumMsrWrFn_Ia32PerfCtl,
721 kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
722 kCpumMsrWrFn_Ia32PerfCapabilities,
723 kCpumMsrWrFn_Ia32FixedCtrCtrl,
724 kCpumMsrWrFn_Ia32PerfGlobalStatus,
725 kCpumMsrWrFn_Ia32PerfGlobalCtrl,
726 kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
727 kCpumMsrWrFn_Ia32PebsEnable,
728 kCpumMsrWrFn_Ia32ClockModulation,
729 kCpumMsrWrFn_Ia32ThermInterrupt,
730 kCpumMsrWrFn_Ia32ThermStatus,
731 kCpumMsrWrFn_Ia32Therm2Ctl,
732 kCpumMsrWrFn_Ia32MiscEnable,
733 kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
734 kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */
735 kCpumMsrWrFn_Ia32DsArea,
736 kCpumMsrWrFn_Ia32TscDeadline,
737 kCpumMsrWrFn_Ia32X2ApicN,
738 kCpumMsrWrFn_Ia32DebugInterface,
739 kCpumMsrWrFn_Ia32SpecCtrl,
740 kCpumMsrWrFn_Ia32PredCmd,
741 kCpumMsrWrFn_Ia32FlushCmd,
742
743 kCpumMsrWrFn_Amd64Efer,
744 kCpumMsrWrFn_Amd64SyscallTarget,
745 kCpumMsrWrFn_Amd64LongSyscallTarget,
746 kCpumMsrWrFn_Amd64CompSyscallTarget,
747 kCpumMsrWrFn_Amd64SyscallFlagMask,
748 kCpumMsrWrFn_Amd64FsBase,
749 kCpumMsrWrFn_Amd64GsBase,
750 kCpumMsrWrFn_Amd64KernelGsBase,
751 kCpumMsrWrFn_Amd64TscAux,
752 kCpumMsrWrFn_IntelEblCrPowerOn,
753 kCpumMsrWrFn_IntelP4EbcHardPowerOn,
754 kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
755 kCpumMsrWrFn_IntelP4EbcFrequencyId,
756 kCpumMsrWrFn_IntelFlexRatio,
757 kCpumMsrWrFn_IntelPkgCStConfigControl,
758 kCpumMsrWrFn_IntelPmgIoCaptureBase,
759 kCpumMsrWrFn_IntelLastBranchFromToN,
760 kCpumMsrWrFn_IntelLastBranchFromN,
761 kCpumMsrWrFn_IntelLastBranchToN,
762 kCpumMsrWrFn_IntelLastBranchTos,
763 kCpumMsrWrFn_IntelBblCrCtl,
764 kCpumMsrWrFn_IntelBblCrCtl3,
765 kCpumMsrWrFn_IntelI7TemperatureTarget,
766 kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
767 kCpumMsrWrFn_IntelI7MiscPwrMgmt,
768 kCpumMsrWrFn_IntelP6CrN,
769 kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
770 kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
771 kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
772 kCpumMsrWrFn_IntelI7SandyAesNiCtl,
773 kCpumMsrWrFn_IntelI7TurboRatioLimit,
774 kCpumMsrWrFn_IntelI7LbrSelect,
775 kCpumMsrWrFn_IntelI7SandyErrorControl,
776 kCpumMsrWrFn_IntelI7PowerCtl,
777 kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
778 kCpumMsrWrFn_IntelI7PebsLdLat,
779 kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
780 kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
781 kCpumMsrWrFn_IntelI7SandyRaplPowerUnit, /**< R/O but found writable bits on a Silvermont CPU here. */
782 kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
783 kCpumMsrWrFn_IntelI7SandyPkgC2Residency, /**< R/O but found writable bits on a Silvermont CPU here. */
784 kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
785 kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
786 kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
787 kCpumMsrWrFn_IntelI7RaplPp0Policy,
788 kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
789 kCpumMsrWrFn_IntelI7RaplPp1Policy,
790 kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
791 kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
792 kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
793 kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
794 kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
795 kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
796 kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
797 kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
798 kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
799 kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
800 kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
801 kCpumMsrWrFn_IntelCore1ExtConfig,
802 kCpumMsrWrFn_IntelCore1DtsCalControl,
803 kCpumMsrWrFn_IntelCore2PeciControl,
804
805 kCpumMsrWrFn_P6LastIntFromIp,
806 kCpumMsrWrFn_P6LastIntToIp,
807
808 kCpumMsrWrFn_AmdFam15hTscRate,
809 kCpumMsrWrFn_AmdFam15hLwpCfg,
810 kCpumMsrWrFn_AmdFam15hLwpCbAddr,
811 kCpumMsrWrFn_AmdFam10hMc4MiscN,
812 kCpumMsrWrFn_AmdK8PerfCtlN,
813 kCpumMsrWrFn_AmdK8PerfCtrN,
814 kCpumMsrWrFn_AmdK8SysCfg,
815 kCpumMsrWrFn_AmdK8HwCr,
816 kCpumMsrWrFn_AmdK8IorrBaseN,
817 kCpumMsrWrFn_AmdK8IorrMaskN,
818 kCpumMsrWrFn_AmdK8TopOfMemN,
819 kCpumMsrWrFn_AmdK8NbCfg1,
820 kCpumMsrWrFn_AmdK8McXcptRedir,
821 kCpumMsrWrFn_AmdK8CpuNameN,
822 kCpumMsrWrFn_AmdK8HwThermalCtrl,
823 kCpumMsrWrFn_AmdK8SwThermalCtrl,
824 kCpumMsrWrFn_AmdK8FidVidControl,
825 kCpumMsrWrFn_AmdK8McCtlMaskN,
826 kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
827 kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
828 kCpumMsrWrFn_AmdK8IntPendingMessage,
829 kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
830 kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
831 kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
832 kCpumMsrWrFn_AmdFam10hPStateControl,
833 kCpumMsrWrFn_AmdFam10hPStateStatus,
834 kCpumMsrWrFn_AmdFam10hPStateN,
835 kCpumMsrWrFn_AmdFam10hCofVidControl,
836 kCpumMsrWrFn_AmdFam10hCofVidStatus,
837 kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
838 kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
839 kCpumMsrWrFn_AmdK8SmmBase,
840 kCpumMsrWrFn_AmdK8SmmAddr,
841 kCpumMsrWrFn_AmdK8SmmMask,
842 kCpumMsrWrFn_AmdK8VmCr,
843 kCpumMsrWrFn_AmdK8IgnNe,
844 kCpumMsrWrFn_AmdK8SmmCtl,
845 kCpumMsrWrFn_AmdK8VmHSavePa,
846 kCpumMsrWrFn_AmdFam10hVmLockKey,
847 kCpumMsrWrFn_AmdFam10hSmmLockKey,
848 kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
849 kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
850 kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
851 kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
852 kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
853 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
854 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
855 kCpumMsrWrFn_AmdK7MicrocodeCtl,
856 kCpumMsrWrFn_AmdK7ClusterIdMaybe,
857 kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
858 kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
859 kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
860 kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
861 kCpumMsrWrFn_AmdK8PatchLoader,
862 kCpumMsrWrFn_AmdK7DebugStatusMaybe,
863 kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
864 kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
865 kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
866 kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
867 kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
868 kCpumMsrWrFn_AmdK7NodeId,
869 kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
870 kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
871 kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
872 kCpumMsrWrFn_AmdK7LoadStoreCfg,
873 kCpumMsrWrFn_AmdK7InstrCacheCfg,
874 kCpumMsrWrFn_AmdK7DataCacheCfg,
875 kCpumMsrWrFn_AmdK7BusUnitCfg,
876 kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
877 kCpumMsrWrFn_AmdFam15hFpuCfg,
878 kCpumMsrWrFn_AmdFam15hDecoderCfg,
879 kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
880 kCpumMsrWrFn_AmdFam15hCombUnitCfg,
881 kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
882 kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
883 kCpumMsrWrFn_AmdFam15hExecUnitCfg,
884 kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
885 kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
886 kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
887 kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
888 kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
889 kCpumMsrWrFn_AmdFam10hIbsOpRip,
890 kCpumMsrWrFn_AmdFam10hIbsOpData,
891 kCpumMsrWrFn_AmdFam10hIbsOpData2,
892 kCpumMsrWrFn_AmdFam10hIbsOpData3,
893 kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
894 kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
895 kCpumMsrWrFn_AmdFam10hIbsCtl,
896 kCpumMsrWrFn_AmdFam14hIbsBrTarget,
897
898 kCpumMsrWrFn_Gim,
899
900 /** End of valid MSR write function indexes. */
901 kCpumMsrWrFn_End
902} CPUMMSRWRFN;
903
904/**
905 * MSR range.
906 */
907typedef struct CPUMMSRRANGE
908{
909 /** The first MSR. [0] */
910 uint32_t uFirst;
911 /** The last MSR. [4] */
912 uint32_t uLast;
913 /** The read function (CPUMMSRRDFN). [8] */
914 uint16_t enmRdFn;
915 /** The write function (CPUMMSRWRFN). [10] */
916 uint16_t enmWrFn;
917 /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
918 * UINT16_MAX if not used by the read and write functions. [12] */
919 uint16_t offCpumCpu;
920 /** Reserved for future hacks. [14] */
921 uint16_t fReserved;
922 /** The init/read value. [16]
923 * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
924 * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
925 * offset into CPUM. */
926 uint64_t uValue;
927 /** The bits to ignore when writing. [24] */
928 uint64_t fWrIgnMask;
929 /** The bits that will cause a GP(0) when writing. [32]
930 * This is always checked prior to calling the write function. Using
931 * UINT64_MAX effectively marks the MSR as read-only. */
932 uint64_t fWrGpMask;
933 /** The register name, if applicable. [40] */
934 char szName[56];
935
936#ifdef VBOX_WITH_STATISTICS
937 /** The number of reads. */
938 STAMCOUNTER cReads;
939 /** The number of writes. */
940 STAMCOUNTER cWrites;
941 /** The number of times ignored bits were written. */
942 STAMCOUNTER cIgnoredBits;
943 /** The number of GPs generated. */
944 STAMCOUNTER cGps;
945#endif
946} CPUMMSRRANGE;
947#ifndef VBOX_FOR_DTRACE_LIB
948# ifdef VBOX_WITH_STATISTICS
949AssertCompileSize(CPUMMSRRANGE, 128);
950# else
951AssertCompileSize(CPUMMSRRANGE, 96);
952# endif
953#endif
954/** Pointer to an MSR range. */
955typedef CPUMMSRRANGE *PCPUMMSRRANGE;
956/** Pointer to a const MSR range. */
957typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
958
959
960/**
961 * MSRs.
962 * MSRs which are required while exploding features.
963 */
964typedef struct CPUMMSRS
965{
966 union
967 {
968 VMXMSRS vmx;
969 SVMMSRS svm;
970 } hwvirt;
971} CPUMMSRS;
972/** Pointer to an CPUMMSRS struct. */
973typedef CPUMMSRS *PCPUMMSRS;
974/** Pointer to a const CPUMMSRS struct. */
975typedef CPUMMSRS const *PCCPUMMSRS;
976
977
978/**
979 * CPU features and quirks.
980 * This is mostly exploded CPUID info.
981 */
982typedef struct CPUMFEATURES
983{
984 /** The CPU vendor (CPUMCPUVENDOR). */
985 uint8_t enmCpuVendor;
986 /** The CPU family. */
987 uint8_t uFamily;
988 /** The CPU model. */
989 uint8_t uModel;
990 /** The CPU stepping. */
991 uint8_t uStepping;
992 /** The microarchitecture. */
993#ifndef VBOX_FOR_DTRACE_LIB
994 CPUMMICROARCH enmMicroarch;
995#else
996 uint32_t enmMicroarch;
997#endif
998 /** The maximum physical address width of the CPU. */
999 uint8_t cMaxPhysAddrWidth;
1000 /** The maximum linear address width of the CPU. */
1001 uint8_t cMaxLinearAddrWidth;
1002 /** Max size of the extended state (or FPU state if no XSAVE). */
1003 uint16_t cbMaxExtendedState;
1004
1005 /** Supports MSRs. */
1006 uint32_t fMsr : 1;
1007 /** Supports the page size extension (4/2 MB pages). */
1008 uint32_t fPse : 1;
1009 /** Supports 36-bit page size extension (4 MB pages can map memory above
1010 * 4GB). */
1011 uint32_t fPse36 : 1;
1012 /** Supports physical address extension (PAE). */
1013 uint32_t fPae : 1;
1014 /** Page attribute table (PAT) support (page level cache control). */
1015 uint32_t fPat : 1;
1016 /** Supports the FXSAVE and FXRSTOR instructions. */
1017 uint32_t fFxSaveRstor : 1;
1018 /** Supports the XSAVE and XRSTOR instructions. */
1019 uint32_t fXSaveRstor : 1;
1020 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
1021 uint32_t fOpSysXSaveRstor : 1;
1022 /** Supports MMX. */
1023 uint32_t fMmx : 1;
1024 /** Supports AMD extensions to MMX instructions. */
1025 uint32_t fAmdMmxExts : 1;
1026 /** Supports SSE. */
1027 uint32_t fSse : 1;
1028 /** Supports SSE2. */
1029 uint32_t fSse2 : 1;
1030 /** Supports SSE3. */
1031 uint32_t fSse3 : 1;
1032 /** Supports SSSE3. */
1033 uint32_t fSsse3 : 1;
1034 /** Supports SSE4.1. */
1035 uint32_t fSse41 : 1;
1036 /** Supports SSE4.2. */
1037 uint32_t fSse42 : 1;
1038 /** Supports AVX. */
1039 uint32_t fAvx : 1;
1040 /** Supports AVX2. */
1041 uint32_t fAvx2 : 1;
1042 /** Supports AVX512 foundation. */
1043 uint32_t fAvx512Foundation : 1;
1044 /** Supports RDTSC. */
1045 uint32_t fTsc : 1;
1046 /** Intel SYSENTER/SYSEXIT support */
1047 uint32_t fSysEnter : 1;
1048 /** First generation APIC. */
1049 uint32_t fApic : 1;
1050 /** Second generation APIC. */
1051 uint32_t fX2Apic : 1;
1052 /** Hypervisor present. */
1053 uint32_t fHypervisorPresent : 1;
1054 /** MWAIT & MONITOR instructions supported. */
1055 uint32_t fMonitorMWait : 1;
1056 /** MWAIT Extensions present. */
1057 uint32_t fMWaitExtensions : 1;
1058 /** Supports CMPXCHG16B in 64-bit mode. */
1059 uint32_t fMovCmpXchg16b : 1;
1060 /** Supports CLFLUSH. */
1061 uint32_t fClFlush : 1;
1062 /** Supports CLFLUSHOPT. */
1063 uint32_t fClFlushOpt : 1;
1064 /** Supports IA32_PRED_CMD.IBPB. */
1065 uint32_t fIbpb : 1;
1066 /** Supports IA32_SPEC_CTRL.IBRS. */
1067 uint32_t fIbrs : 1;
1068 /** Supports IA32_SPEC_CTRL.STIBP. */
1069 uint32_t fStibp : 1;
1070 /** Supports IA32_FLUSH_CMD. */
1071 uint32_t fFlushCmd : 1;
1072 /** Supports IA32_ARCH_CAP. */
1073 uint32_t fArchCap : 1;
1074 /** Supports PCID. */
1075 uint32_t fPcid : 1;
1076 /** Supports INVPCID. */
1077 uint32_t fInvpcid : 1;
1078 /** Supports read/write FSGSBASE instructions. */
1079 uint32_t fFsGsBase : 1;
1080
1081 /** Supports AMD 3DNow instructions. */
1082 uint32_t f3DNow : 1;
1083 /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
1084 uint32_t f3DNowPrefetch : 1;
1085
1086 /** AMD64: Supports long mode. */
1087 uint32_t fLongMode : 1;
1088 /** AMD64: SYSCALL/SYSRET support. */
1089 uint32_t fSysCall : 1;
1090 /** AMD64: No-execute page table bit. */
1091 uint32_t fNoExecute : 1;
1092 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
1093 uint32_t fLahfSahf : 1;
1094 /** AMD64: Supports RDTSCP. */
1095 uint32_t fRdTscP : 1;
1096 /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
1097 uint32_t fMovCr8In32Bit : 1;
1098 /** AMD64: Supports XOP (similar to VEX3/AVX). */
1099 uint32_t fXop : 1;
1100
1101 /** Indicates that FPU instruction and data pointers may leak.
1102 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
1103 * is only saved and restored if an exception is pending. */
1104 uint32_t fLeakyFxSR : 1;
1105
1106 /** AMD64: Supports AMD SVM. */
1107 uint32_t fSvm : 1;
1108
1109 /** Support for Intel VMX. */
1110 uint32_t fVmx : 1;
1111
1112 /** Indicates that speculative execution control CPUID bits and MSRs are exposed.
1113 * The details are different for Intel and AMD but both have similar
1114 * functionality. */
1115 uint32_t fSpeculationControl : 1;
1116
1117 /** MSR_IA32_ARCH_CAPABILITIES: RDCL_NO (bit 0).
1118 * @remarks Only safe use after CPUM ring-0 init! */
1119 uint32_t fArchRdclNo : 1;
1120 /** MSR_IA32_ARCH_CAPABILITIES: IBRS_ALL (bit 1).
1121 * @remarks Only safe use after CPUM ring-0 init! */
1122 uint32_t fArchIbrsAll : 1;
1123 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 2).
1124 * @remarks Only safe use after CPUM ring-0 init! */
1125 uint32_t fArchRsbOverride : 1;
1126 /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 3).
1127 * @remarks Only safe use after CPUM ring-0 init! */
1128 uint32_t fArchVmmNeedNotFlushL1d : 1;
1129
1130 /** Alignment padding / reserved for future use. */
1131 uint32_t fPadding : 10;
1132
1133 /** SVM: Supports Nested-paging. */
1134 uint32_t fSvmNestedPaging : 1;
1135 /** SVM: Support LBR (Last Branch Record) virtualization. */
1136 uint32_t fSvmLbrVirt : 1;
1137 /** SVM: Supports SVM lock. */
1138 uint32_t fSvmSvmLock : 1;
1139 /** SVM: Supports Next RIP save. */
1140 uint32_t fSvmNextRipSave : 1;
1141 /** SVM: Supports TSC rate MSR. */
1142 uint32_t fSvmTscRateMsr : 1;
1143 /** SVM: Supports VMCB clean bits. */
1144 uint32_t fSvmVmcbClean : 1;
1145 /** SVM: Supports Flush-by-ASID. */
1146 uint32_t fSvmFlusbByAsid : 1;
1147 /** SVM: Supports decode assist. */
1148 uint32_t fSvmDecodeAssists : 1;
1149 /** SVM: Supports Pause filter. */
1150 uint32_t fSvmPauseFilter : 1;
1151 /** SVM: Supports Pause filter threshold. */
1152 uint32_t fSvmPauseFilterThreshold : 1;
1153 /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
1154 uint32_t fSvmAvic : 1;
1155 /** SVM: Supports Virtualized VMSAVE/VMLOAD. */
1156 uint32_t fSvmVirtVmsaveVmload : 1;
1157 /** SVM: Supports VGIF (Virtual Global Interrupt Flag). */
1158 uint32_t fSvmVGif : 1;
1159 /** SVM: Padding / reserved for future features. */
1160 uint32_t fSvmPadding0 : 19;
1161 /** SVM: Maximum supported ASID. */
1162 uint32_t uSvmMaxAsid;
1163
1164 /** VMX: Maximum physical address width. */
1165 uint8_t cVmxMaxPhysAddrWidth;
1166 /** VMX: Padding / reserved for future. */
1167 uint8_t abVmxPadding[3];
1168 /** VMX: Padding / reserved for future. */
1169 uint32_t fVmxPadding0;
1170
1171 /** @name VMX basic controls.
1172 * @{ */
1173 /** VMX: Supports INS/OUTS VM-exit instruction info. */
1174 uint32_t fVmxInsOutInfo : 1;
1175 /** @} */
1176
1177 /** @name VMX Pin-based controls.
1178 * @{ */
1179 /** VMX: Supports external interrupt VM-exit. */
1180 uint32_t fVmxExtIntExit : 1;
1181 /** VMX: Supports NMI VM-exit. */
1182 uint32_t fVmxNmiExit : 1;
1183 /** VMX: Supports Virtual NMIs. */
1184 uint32_t fVmxVirtNmi : 1;
1185 /** VMX: Supports preemption timer. */
1186 uint32_t fVmxPreemptTimer : 1;
1187 /** VMX: Supports posted interrupts. */
1188 uint32_t fVmxPostedInt : 1;
1189 /** @} */
1190
1191 /** @name VMX Processor-based controls.
1192 * @{ */
1193 /** VMX: Supports Interrupt-window exiting. */
1194 uint32_t fVmxIntWindowExit : 1;
1195 /** VMX: Supports TSC offsetting. */
1196 uint32_t fVmxTscOffsetting : 1;
1197 /** VMX: Supports HLT exiting. */
1198 uint32_t fVmxHltExit : 1;
1199 /** VMX: Supports INVLPG exiting. */
1200 uint32_t fVmxInvlpgExit : 1;
1201 /** VMX: Supports MWAIT exiting. */
1202 uint32_t fVmxMwaitExit : 1;
1203 /** VMX: Supports RDPMC exiting. */
1204 uint32_t fVmxRdpmcExit : 1;
1205 /** VMX: Supports RDTSC exiting. */
1206 uint32_t fVmxRdtscExit : 1;
1207 /** VMX: Supports CR3-load exiting. */
1208 uint32_t fVmxCr3LoadExit : 1;
1209 /** VMX: Supports CR3-store exiting. */
1210 uint32_t fVmxCr3StoreExit : 1;
1211 /** VMX: Supports CR8-load exiting. */
1212 uint32_t fVmxCr8LoadExit : 1;
1213 /** VMX: Supports CR8-store exiting. */
1214 uint32_t fVmxCr8StoreExit : 1;
1215 /** VMX: Supports TPR shadow. */
1216 uint32_t fVmxUseTprShadow : 1;
1217 /** VMX: Supports NMI-window exiting. */
1218 uint32_t fVmxNmiWindowExit : 1;
1219 /** VMX: Supports Mov-DRx exiting. */
1220 uint32_t fVmxMovDRxExit : 1;
1221 /** VMX: Supports Unconditional I/O exiting. */
1222 uint32_t fVmxUncondIoExit : 1;
1223 /** VMX: Supportgs I/O bitmaps. */
1224 uint32_t fVmxUseIoBitmaps : 1;
1225 /** VMX: Supports Monitor Trap Flag. */
1226 uint32_t fVmxMonitorTrapFlag : 1;
1227 /** VMX: Supports MSR bitmap. */
1228 uint32_t fVmxUseMsrBitmaps : 1;
1229 /** VMX: Supports MONITOR exiting. */
1230 uint32_t fVmxMonitorExit : 1;
1231 /** VMX: Supports PAUSE exiting. */
1232 uint32_t fVmxPauseExit : 1;
1233 /** VMX: Supports secondary processor-based VM-execution controls. */
1234 uint32_t fVmxSecondaryExecCtls : 1;
1235 /** @} */
1236
1237 /** @name VMX Secondary processor-based controls.
1238 * @{ */
1239 /** VMX: Supports virtualize-APIC access. */
1240 uint32_t fVmxVirtApicAccess : 1;
1241 /** VMX: Supports EPT (Extended Page Tables). */
1242 uint32_t fVmxEpt : 1;
1243 /** VMX: Supports descriptor-table exiting. */
1244 uint32_t fVmxDescTableExit : 1;
1245 /** VMX: Supports RDTSCP. */
1246 uint32_t fVmxRdtscp : 1;
1247 /** VMX: Supports virtualize-x2APIC mode. */
1248 uint32_t fVmxVirtX2ApicMode : 1;
1249 /** VMX: Supports VPID. */
1250 uint32_t fVmxVpid : 1;
1251 /** VMX: Supports WBIND exiting. */
1252 uint32_t fVmxWbinvdExit : 1;
1253 /** VMX: Supports Unrestricted guest. */
1254 uint32_t fVmxUnrestrictedGuest : 1;
1255 /** VMX: Supports APIC-register virtualization. */
1256 uint32_t fVmxApicRegVirt : 1;
1257 /** VMX: Supports virtual-interrupt delivery. */
1258 uint32_t fVmxVirtIntDelivery : 1;
1259 /** VMX: Supports Pause-loop exiting. */
1260 uint32_t fVmxPauseLoopExit : 1;
1261 /** VMX: Supports RDRAND exiting. */
1262 uint32_t fVmxRdrandExit : 1;
1263 /** VMX: Supports INVPCID. */
1264 uint32_t fVmxInvpcid : 1;
1265 /** VMX: Supports VM functions. */
1266 uint32_t fVmxVmFunc : 1;
1267 /** VMX: Supports VMCS shadowing. */
1268 uint32_t fVmxVmcsShadowing : 1;
1269 /** VMX: Supports RDSEED exiting. */
1270 uint32_t fVmxRdseedExit : 1;
1271 /** VMX: Supports PML. */
1272 uint32_t fVmxPml : 1;
1273 /** VMX: Supports EPT-violations \#VE. */
1274 uint32_t fVmxEptXcptVe : 1;
1275 /** VMX: Supports XSAVES/XRSTORS. */
1276 uint32_t fVmxXsavesXrstors : 1;
1277 /** VMX: Supports TSC scaling. */
1278 uint32_t fVmxUseTscScaling : 1;
1279 /** @} */
1280
1281 /** @name VMX VM-entry controls.
1282 * @{ */
1283 /** VMX: Supports load-debug controls on VM-entry. */
1284 uint32_t fVmxEntryLoadDebugCtls : 1;
1285 /** VMX: Supports IA32e mode guest. */
1286 uint32_t fVmxIa32eModeGuest : 1;
1287 /** VMX: Supports load guest EFER MSR on VM-entry. */
1288 uint32_t fVmxEntryLoadEferMsr : 1;
1289 /** VMX: Supports load guest PAT MSR on VM-entry. */
1290 uint32_t fVmxEntryLoadPatMsr : 1;
1291 /** @} */
1292
1293 /** @name VMX VM-exit controls.
1294 * @{ */
1295 /** VMX: Supports save debug controls on VM-exit. */
1296 uint32_t fVmxExitSaveDebugCtls : 1;
1297 /** VMX: Supports host-address space size. */
1298 uint32_t fVmxHostAddrSpaceSize : 1;
1299 /** VMX: Supports acknowledge external interrupt on VM-exit. */
1300 uint32_t fVmxExitAckExtInt : 1;
1301 /** VMX: Supports save guest PAT MSR on VM-exit. */
1302 uint32_t fVmxExitSavePatMsr : 1;
1303 /** VMX: Supports load hsot PAT MSR on VM-exit. */
1304 uint32_t fVmxExitLoadPatMsr : 1;
1305 /** VMX: Supports save guest EFER MSR on VM-exit. */
1306 uint32_t fVmxExitSaveEferMsr : 1;
1307 /** VMX: Supports load host EFER MSR on VM-exit. */
1308 uint32_t fVmxExitLoadEferMsr : 1;
1309 /** VMX: Supports save VMX preemption timer on VM-exit. */
1310 uint32_t fVmxSavePreemptTimer : 1;
1311 /** @} */
1312
1313 /** @name VMX Miscellaneous data.
1314 * @{ */
1315 /** VMX: Supports storing EFER.LMA into IA32e-mode guest field on VM-exit. */
1316 uint32_t fVmxExitSaveEferLma : 1;
1317 /** VMX: Whether Intel PT (Processor Trace) is supported in VMX mode or not. */
1318 uint32_t fVmxIntelPt : 1;
1319 /** VMX: Supports VMWRITE to any valid VMCS field incl. read-only fields, otherwise
1320 * VMWRITE cannot modify read-only VM-exit information fields. */
1321 uint32_t fVmxVmwriteAll : 1;
1322 /** VMX: Supports injection of software interrupts, ICEBP on VM-entry for zero
1323 * length instructions. */
1324 uint32_t fVmxEntryInjectSoftInt : 1;
1325 /** @} */
1326
1327 /** VMX: Padding / reserved for future features. */
1328 uint32_t fVmxPadding1 : 1;
1329 uint32_t fVmxPadding2;
1330} CPUMFEATURES;
1331#ifndef VBOX_FOR_DTRACE_LIB
1332AssertCompileSize(CPUMFEATURES, 48);
1333#endif
1334/** Pointer to a CPU feature structure. */
1335typedef CPUMFEATURES *PCPUMFEATURES;
1336/** Pointer to a const CPU feature structure. */
1337typedef CPUMFEATURES const *PCCPUMFEATURES;
1338
1339
1340#ifndef VBOX_FOR_DTRACE_LIB
1341
1342/** @name Guest Register Getters.
1343 * @{ */
1344VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR);
1345VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
1346VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden);
1347VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu);
1348VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
1349VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu);
1350VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu);
1351VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu);
1352VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu);
1353VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu);
1354VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue);
1355VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu);
1356VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu);
1357VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu);
1358VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu);
1359VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu);
1360VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu);
1361VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu);
1362VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu);
1363VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu);
1364VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu);
1365VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu);
1366VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu);
1367VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu);
1368VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu);
1369VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu);
1370VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu);
1371VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu);
1372VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu);
1373VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu);
1374VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu);
1375VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu);
1376VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu);
1377VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu);
1378VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu);
1379VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu);
1380VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
1381VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t iSubLeaf,
1382 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
1383VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu);
1384VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PVMCPU pVCpu);
1385VMM_INT_DECL(uint64_t) CPUMGetGuestIa32SmmMonitorCtl(PVMCPU pVCpu);
1386VMMDECL(VBOXSTRICTRC) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue);
1387VMMDECL(VBOXSTRICTRC) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue);
1388VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM);
1389VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM);
1390/** @} */
1391
1392/** @name Guest Register Setters.
1393 * @{ */
1394VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1395VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1396VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
1397VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
1398VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0);
1399VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
1400VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
1401VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
1402VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0);
1403VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1);
1404VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2);
1405VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3);
1406VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
1407VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7);
1408VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value);
1409VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue);
1410VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
1411VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
1412VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
1413VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
1414VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
1415VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
1416VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
1417VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
1418VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
1419VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
1420VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
1421VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
1422VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
1423VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
1424VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
1425VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
1426VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
1427VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1428VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1429VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1430VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
1431VMMDECL(void) CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1432VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu);
1433VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg);
1434VMM_INT_DECL(void) CPUMSetGuestTscAux(PVMCPU pVCpu, uint64_t uValue);
1435VMM_INT_DECL(uint64_t) CPUMGetGuestTscAux(PVMCPU pVCpu);
1436VMM_INT_DECL(void) CPUMSetGuestSpecCtrl(PVMCPU pVCpu, uint64_t uValue);
1437VMM_INT_DECL(uint64_t) CPUMGetGuestSpecCtrl(PVMCPU pVCpu);
1438VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM);
1439/** @} */
1440
1441
1442/** @name Misc Guest Predicate Functions.
1443 * @{ */
1444VMMDECL(bool) CPUMIsGuestIn16BitCode(PVMCPU pVCpu);
1445VMMDECL(bool) CPUMIsGuestIn32BitCode(PVMCPU pVCpu);
1446VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
1447VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu);
1448VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu);
1449VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu);
1450VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu);
1451VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu);
1452VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu);
1453VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu);
1454VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu);
1455VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu);
1456VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu);
1457VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu);
1458/** @} */
1459
1460/** @name Nested Hardware-Virtualization Helpers.
1461 * @{ */
1462VMM_INT_DECL(bool) CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu);
1463VMM_INT_DECL(bool) CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu);
1464VMM_INT_DECL(bool) CPUMIsGuestSvmPhysIntrEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx);
1465VMM_INT_DECL(bool) CPUMIsGuestSvmVirtIntrEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx);
1466VMM_INT_DECL(uint8_t) CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx);
1467VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx);
1468VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
1469VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks);
1470VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks);
1471VMM_INT_DECL(bool) CPUMIsGuestVmxPhysIntrEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx);
1472VMM_INT_DECL(bool) CPUMIsGuestVmxVirtIntrEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx);
1473VMM_INT_DECL(uint32_t) CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr);
1474VMM_INT_DECL(bool) CPUMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
1475 uint8_t cbAccess);
1476VMM_INT_DECL(bool) CPUMIsSvmIoInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1477 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
1478 PSVMIOIOEXITINFO pIoExitInfo);
1479VMM_INT_DECL(int) CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
1480/** @} */
1481
1482/** @name Externalized State Helpers.
1483 * @{ */
1484/** @def CPUM_ASSERT_NOT_EXTRN
1485 * Macro for asserting that @a a_fNotExtrn are present.
1486 *
1487 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1488 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check.
1489 *
1490 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1491 */
1492#define CPUM_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
1493 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fNotExtrn)), \
1494 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
1495
1496/** @def CPUM_IMPORT_EXTRN_RET
1497 * Macro for making sure the state specified by @a fExtrnImport is present,
1498 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1499 *
1500 * Will return if CPUMImportGuestStateOnDemand() fails.
1501 *
1502 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1503 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1504 * @thread EMT(a_pVCpu)
1505 *
1506 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1507 */
1508#define CPUM_IMPORT_EXTRN_RET(a_pVCpu, a_fExtrnImport) \
1509 do { \
1510 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1511 { /* already present, consider this likely */ } \
1512 else \
1513 { \
1514 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1515 AssertRCReturn(rcCpumImport, rcCpumImport); \
1516 } \
1517 } while (0)
1518
1519/** @def CPUM_IMPORT_EXTRN_RCSTRICT
1520 * Macro for making sure the state specified by @a fExtrnImport is present,
1521 * calling CPUMImportGuestStateOnDemand() to get it if necessary.
1522 *
1523 * Will update a_rcStrict if CPUMImportGuestStateOnDemand() fails.
1524 *
1525 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1526 * @param a_fExtrnImport Mask of CPUMCTX_EXTRN_XXX bits to get.
1527 * @param a_rcStrict Strict status code variable to update on failure.
1528 * @thread EMT(a_pVCpu)
1529 *
1530 * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
1531 */
1532#define CPUM_IMPORT_EXTRN_RCSTRICT(a_pVCpu, a_fExtrnImport, a_rcStrict) \
1533 do { \
1534 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1535 { /* already present, consider this likely */ } \
1536 else \
1537 { \
1538 int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1539 AssertStmt(RT_SUCCESS(rcCpumImport) || RT_FAILURE_NP(a_rcStrict), a_rcStrict = rcCpumImport); \
1540 } \
1541 } while (0)
1542
1543VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPU pVCpu, uint64_t fExtrnImport);
1544/** @} */
1545
1546#ifndef IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS
1547
1548/**
1549 * Gets valid CR0 bits for the guest.
1550 *
1551 * @returns Valid CR0 bits.
1552 */
1553DECLINLINE(uint64_t) CPUMGetGuestCR0ValidMask(void)
1554{
1555 return ( X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
1556 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
1557 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG);
1558}
1559
1560/**
1561 * Tests if the guest is running in real mode or not.
1562 *
1563 * @returns true if in real mode, otherwise false.
1564 * @param pCtx Current CPU context.
1565 */
1566DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCCPUMCTX pCtx)
1567{
1568 return !(pCtx->cr0 & X86_CR0_PE);
1569}
1570
1571/**
1572 * Tests if the guest is running in real or virtual 8086 mode.
1573 *
1574 * @returns @c true if it is, @c false if not.
1575 * @param pCtx Current CPU context.
1576 */
1577DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCCPUMCTX pCtx)
1578{
1579 return !(pCtx->cr0 & X86_CR0_PE)
1580 || pCtx->eflags.Bits.u1VM; /* Cannot be set in long mode. Intel spec 2.3.1 "System Flags and Fields in IA-32e Mode". */
1581}
1582
1583/**
1584 * Tests if the guest is running in virtual 8086 mode.
1585 *
1586 * @returns @c true if it is, @c false if not.
1587 * @param pCtx Current CPU context.
1588 */
1589DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCCPUMCTX pCtx)
1590{
1591 return (pCtx->eflags.Bits.u1VM == 1);
1592}
1593
1594/**
1595 * Tests if the guest is running in paged protected or not.
1596 *
1597 * @returns true if in paged protected mode, otherwise false.
1598 * @param pCtx Current CPU context.
1599 */
1600DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
1601{
1602 return (pCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1603}
1604
1605/**
1606 * Tests if the guest is running in long mode or not.
1607 *
1608 * @returns true if in long mode, otherwise false.
1609 * @param pCtx Current CPU context.
1610 */
1611DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCCPUMCTX pCtx)
1612{
1613 return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1614}
1615
1616VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx);
1617
1618/**
1619 * Tests if the guest is running in 64 bits mode or not.
1620 *
1621 * @returns true if in 64 bits protected mode, otherwise false.
1622 * @param pCtx Current CPU context.
1623 */
1624DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
1625{
1626 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
1627 return false;
1628 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
1629 return CPUMIsGuestIn64BitCodeSlow(pCtx);
1630 return pCtx->cs.Attr.n.u1Long;
1631}
1632
1633/**
1634 * Tests if the guest has paging enabled or not.
1635 *
1636 * @returns true if paging is enabled, otherwise false.
1637 * @param pCtx Current CPU context.
1638 */
1639DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCCPUMCTX pCtx)
1640{
1641 return !!(pCtx->cr0 & X86_CR0_PG);
1642}
1643
1644/**
1645 * Tests if the guest is running in PAE mode or not.
1646 *
1647 * @returns true if in PAE mode, otherwise false.
1648 * @param pCtx Current CPU context.
1649 */
1650DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCCPUMCTX pCtx)
1651{
1652 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1653 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1654 return ( (pCtx->cr4 & X86_CR4_PAE)
1655 && CPUMIsGuestPagingEnabledEx(pCtx)
1656 && !(pCtx->msrEFER & MSR_K6_EFER_LMA));
1657}
1658
1659/**
1660 * Tests if the guest has AMD SVM enabled or not.
1661 *
1662 * @returns true if SMV is enabled, otherwise false.
1663 * @param pCtx Current CPU context.
1664 */
1665DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCCPUMCTX pCtx)
1666{
1667 return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
1668}
1669
1670/**
1671 * Tests if the guest has Intel VT-x enabled or not.
1672 *
1673 * @returns true if VMX is enabled, otherwise false.
1674 * @param pCtx Current CPU context.
1675 */
1676DECLINLINE(bool) CPUMIsGuestVmxEnabled(PCCPUMCTX pCtx)
1677{
1678 return RT_BOOL(pCtx->cr4 & X86_CR4_VMXE);
1679}
1680
1681/**
1682 * Returns the guest's global-interrupt (GIF) flag.
1683 *
1684 * @returns true when global-interrupts are enabled, otherwise false.
1685 * @param pCtx Current CPU context.
1686 */
1687DECLINLINE(bool) CPUMGetGuestGif(PCCPUMCTX pCtx)
1688{
1689 return pCtx->hwvirt.fGif;
1690}
1691
1692/**
1693 * Sets the guest's global-interrupt flag (GIF).
1694 *
1695 * @param pCtx Current CPU context.
1696 * @param fGif The value to set.
1697 */
1698DECLINLINE(void) CPUMSetGuestGif(PCPUMCTX pCtx, bool fGif)
1699{
1700 pCtx->hwvirt.fGif = fGif;
1701}
1702
1703/**
1704 * Checks if we are executing inside an SVM nested hardware-virtualized guest.
1705 *
1706 * @returns @c true if in SVM nested-guest mode, @c false otherwise.
1707 * @param pCtx Current CPU context.
1708 */
1709DECLINLINE(bool) CPUMIsGuestInSvmNestedHwVirtMode(PCCPUMCTX pCtx)
1710{
1711 /*
1712 * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
1713 * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
1714 */
1715#ifndef IN_RC
1716 if ( pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM
1717 || !(pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
1718 return false;
1719 return true;
1720#else
1721 NOREF(pCtx);
1722 return false;
1723#endif
1724}
1725
1726/**
1727 * Checks if the guest is in VMX non-root operation.
1728 *
1729 * @returns @c true if in VMX non-root operation, @c false otherwise.
1730 * @param pCtx Current CPU context.
1731 */
1732DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx)
1733{
1734#ifndef IN_RC
1735 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1736 return false;
1737 Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
1738 return pCtx->hwvirt.vmx.fInVmxNonRootMode;
1739#else
1740 NOREF(pCtx);
1741 return false;
1742#endif
1743}
1744
1745/**
1746 * Checks if we are executing inside an SVM or VMX nested hardware-virtualized
1747 * guest.
1748 *
1749 * @returns @c true if in nested-guest mode, @c false otherwise.
1750 * @param pCtx Current CPU context.
1751 */
1752DECLINLINE(bool) CPUMIsGuestInNestedHwvirtMode(PCCPUMCTX pCtx)
1753{
1754 return CPUMIsGuestInVmxNonRootMode(pCtx) || CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
1755}
1756
1757/**
1758 * Checks if the guest is in VMX root operation.
1759 *
1760 * @returns @c true if in VMX root operation, @c false otherwise.
1761 * @param pCtx Current CPU context.
1762 */
1763DECLINLINE(bool) CPUMIsGuestInVmxRootMode(PCCPUMCTX pCtx)
1764{
1765#ifndef IN_RC
1766 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
1767 return false;
1768 return pCtx->hwvirt.vmx.fInVmxRootMode;
1769#else
1770 NOREF(pCtx);
1771 return false;
1772#endif
1773}
1774
1775# ifndef IN_RC
1776
1777/**
1778 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
1779 * active.
1780 *
1781 * @returns @c true if in intercept is set, @c false otherwise.
1782 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1783 * @param pCtx Pointer to the context.
1784 * @param fIntercept The SVM control/instruction intercept, see
1785 * SVM_CTRL_INTERCEPT_*.
1786 */
1787DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fIntercept)
1788{
1789 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1790 return false;
1791 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcb);
1792 if (HMHasGuestSvmVmcbCached(pVCpu))
1793 return HMIsGuestSvmCtrlInterceptSet(pVCpu, fIntercept);
1794 return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fIntercept);
1795}
1796
1797/**
1798 * Checks if the nested-guest VMCB has the specified CR read intercept active.
1799 *
1800 * @returns @c true if in intercept is set, @c false otherwise.
1801 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1802 * @param pCtx Pointer to the context.
1803 * @param uCr The CR register number (0 to 15).
1804 */
1805DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1806{
1807 Assert(uCr < 16);
1808 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1809 return false;
1810 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcb);
1811 if (HMHasGuestSvmVmcbCached(pVCpu))
1812 return HMIsGuestSvmReadCRxInterceptSet(pVCpu, uCr);
1813 return RT_BOOL(pVmcb->ctrl.u16InterceptRdCRx & (UINT16_C(1) << uCr));
1814}
1815
1816/**
1817 * Checks if the nested-guest VMCB has the specified CR write intercept active.
1818 *
1819 * @returns @c true if in intercept is set, @c false otherwise.
1820 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1821 * @param pCtx Pointer to the context.
1822 * @param uCr The CR register number (0 to 15).
1823 */
1824DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1825{
1826 Assert(uCr < 16);
1827 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1828 return false;
1829 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcb);
1830 if (HMHasGuestSvmVmcbCached(pVCpu))
1831 return HMIsGuestSvmWriteCRxInterceptSet(pVCpu, uCr);
1832 return RT_BOOL(pVmcb->ctrl.u16InterceptWrCRx & (UINT16_C(1) << uCr));
1833}
1834
1835/**
1836 * Checks if the nested-guest VMCB has the specified DR read intercept active.
1837 *
1838 * @returns @c true if in intercept is set, @c false otherwise.
1839 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1840 * @param pCtx Pointer to the context.
1841 * @param uDr The DR register number (0 to 15).
1842 */
1843DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1844{
1845 Assert(uDr < 16);
1846 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1847 return false;
1848 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcb);
1849 if (HMHasGuestSvmVmcbCached(pVCpu))
1850 return HMIsGuestSvmReadDRxInterceptSet(pVCpu, uDr);
1851 return RT_BOOL(pVmcb->ctrl.u16InterceptRdDRx & (UINT16_C(1) << uDr));
1852}
1853
1854/**
1855 * Checks if the nested-guest VMCB has the specified DR write intercept active.
1856 *
1857 * @returns @c true if in intercept is set, @c false otherwise.
1858 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1859 * @param pCtx Pointer to the context.
1860 * @param uDr The DR register number (0 to 15).
1861 */
1862DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1863{
1864 Assert(uDr < 16);
1865 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1866 return false;
1867 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcb);
1868 if (HMHasGuestSvmVmcbCached(pVCpu))
1869 return HMIsGuestSvmWriteDRxInterceptSet(pVCpu, uDr);
1870 return RT_BOOL(pVmcb->ctrl.u16InterceptWrDRx & (UINT16_C(1) << uDr));
1871}
1872
1873/**
1874 * Checks if the nested-guest VMCB has the specified exception intercept active.
1875 *
1876 * @returns @c true if in intercept is active, @c false otherwise.
1877 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1878 * @param pCtx Pointer to the context.
1879 * @param uVector The exception / interrupt vector.
1880 */
1881DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
1882{
1883 Assert(uVector < 32);
1884 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1885 return false;
1886 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcb);
1887 if (HMHasGuestSvmVmcbCached(pVCpu))
1888 return HMIsGuestSvmXcptInterceptSet(pVCpu, uVector);
1889 return RT_BOOL(pVmcb->ctrl.u32InterceptXcpt & (UINT32_C(1) << uVector));
1890}
1891
1892/**
1893 * Checks if the nested-guest VMCB has virtual-interrupt masking enabled.
1894 *
1895 * @returns @c true if virtual-interrupts are masked, @c false otherwise.
1896 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1897 * @param pCtx Pointer to the context.
1898 *
1899 * @remarks Should only be called when SVM feature is exposed to the guest.
1900 */
1901DECLINLINE(bool) CPUMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx)
1902{
1903 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1904 return false;
1905 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcb);
1906 if (HMHasGuestSvmVmcbCached(pVCpu))
1907 return HMIsGuestSvmVirtIntrMasking(pVCpu);
1908 return pVmcb->ctrl.IntCtrl.n.u1VIntrMasking;
1909}
1910
1911/**
1912 * Checks if the nested-guest VMCB has nested-paging enabled.
1913 *
1914 * @returns @c true if nested-paging is enabled, @c false otherwise.
1915 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1916 * @param pCtx Pointer to the context.
1917 *
1918 * @remarks Should only be called when SVM feature is exposed to the guest.
1919 */
1920DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx)
1921{
1922 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1923 return false;
1924 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcb);
1925 if (HMHasGuestSvmVmcbCached(pVCpu))
1926 return HMIsGuestSvmNestedPagingEnabled(pVCpu);
1927 return pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging;
1928}
1929
1930/**
1931 * Gets the nested-guest VMCB pause-filter count.
1932 *
1933 * @returns The pause-filter count.
1934 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1935 * @param pCtx Pointer to the context.
1936 *
1937 * @remarks Should only be called when SVM feature is exposed to the guest.
1938 */
1939DECLINLINE(uint16_t) CPUMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx)
1940{
1941 if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
1942 return false;
1943 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcb);
1944 if (HMHasGuestSvmVmcbCached(pVCpu))
1945 return HMGetGuestSvmPauseFilterCount(pVCpu);
1946 return pVmcb->ctrl.u16PauseFilterCount;
1947}
1948
1949/**
1950 * Updates the NextRIP (NRIP) field in the nested-guest VMCB.
1951 *
1952 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1953 * @param pCtx Pointer to the context.
1954 * @param cbInstr The length of the current instruction in bytes.
1955 *
1956 * @remarks Should only be called when SVM feature is exposed to the guest.
1957 */
1958DECLINLINE(void) CPUMGuestSvmUpdateNRip(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbInstr)
1959{
1960 RT_NOREF(pVCpu);
1961 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
1962 PSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1963 Assert(pVmcb);
1964 pVmcb->ctrl.u64NextRIP = pCtx->rip + cbInstr;
1965}
1966
1967/**
1968 * Checks whether one of the given Pin-based VM-execution controls are set when
1969 * executing a nested-guest.
1970 *
1971 * @returns @c true if set, @c false otherwise.
1972 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1973 * @param pCtx Pointer to the context.
1974 * @param uPinCtls The Pin-based VM-execution controls to check.
1975 *
1976 * @remarks This does not check if all given controls are set if more than one
1977 * control is passed in @a uPinCtl.
1978 */
1979DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uPinCtls)
1980{
1981 RT_NOREF(pVCpu);
1982 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
1983 Assert(pCtx->hwvirt.vmx.fInVmxNonRootMode);
1984 Assert(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
1985 return RT_BOOL(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u32PinCtls & uPinCtls);
1986}
1987
1988/**
1989 * Checks whether one of the given Processor-based VM-execution controls are set
1990 * when executing a nested-guest.
1991 *
1992 * @returns @c true if set, @c false otherwise.
1993 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1994 * @param pCtx Pointer to the context.
1995 * @param uProcCtls The Processor-based VM-execution controls to check.
1996 *
1997 * @remarks This does not check if all given controls are set if more than one
1998 * control is passed in @a uProcCtls.
1999 */
2000DECLINLINE(bool) CPUMIsGuestVmxProcCtlsSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uProcCtls)
2001{
2002 RT_NOREF(pVCpu);
2003 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
2004 Assert(pCtx->hwvirt.vmx.fInVmxNonRootMode);
2005 Assert(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
2006 return RT_BOOL(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u32ProcCtls & uProcCtls);
2007}
2008
2009/**
2010 * Checks whether one of the given Secondary Processor-based VM-execution controls
2011 * are set when executing a nested-guest.
2012 *
2013 * @returns @c true if set, @c false otherwise.
2014 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2015 * @param pCtx Pointer to the context.
2016 * @param uProcCtls2 The Secondary Processor-based VM-execution controls to
2017 * check.
2018 *
2019 * @remarks This does not check if all given controls are set if more than one
2020 * control is passed in @a uProcCtls2.
2021 */
2022DECLINLINE(bool) CPUMIsGuestVmxProcCtls2Set(PVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uProcCtls2)
2023{
2024 RT_NOREF(pVCpu);
2025 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
2026 Assert(pCtx->hwvirt.vmx.fInVmxNonRootMode);
2027 Assert(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
2028 return RT_BOOL(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u32ProcCtls2 & uProcCtls2);
2029}
2030
2031/**
2032 * Checks whether one of the given VM-exit controls are set when executing a
2033 * nested-guest.
2034 *
2035 * @returns @c true if set, @c false otherwise.
2036 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2037 * @param pCtx Pointer to the context.
2038 * @param uExitCtls The VM-exit controls to check.
2039 *
2040 * @remarks This does not check if all given controls are set if more than one
2041 * control is passed in @a uExitCtls.
2042 */
2043DECLINLINE(bool) CPUMIsGuestVmxExitCtlsSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uExitCtls)
2044{
2045 RT_NOREF(pVCpu);
2046 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
2047 Assert(pCtx->hwvirt.vmx.fInVmxNonRootMode);
2048 Assert(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
2049 return RT_BOOL(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u32ExitCtls & uExitCtls);
2050}
2051
2052/**
2053 * Checks whether one of the given VM-entry controls are set when executing a
2054 * nested-guest.
2055 *
2056 * @returns @c true if set, @c false otherwise.
2057 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2058 * @param pCtx Pointer to the context.
2059 * @param uEntryCtls The VM-entry controls to check.
2060 *
2061 * @remarks This does not check if all given controls are set if more than one
2062 * control is passed in @a uEntryCtls.
2063 */
2064DECLINLINE(bool) CPUMIsGuestVmxEntryCtlsSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint32_t uEntryCtls)
2065{
2066 RT_NOREF(pVCpu);
2067 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
2068 Assert(pCtx->hwvirt.vmx.fInVmxNonRootMode);
2069 Assert(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
2070 return RT_BOOL(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryCtls & uEntryCtls);
2071}
2072
2073/**
2074 * Implements VMSucceed for VMX instruction success.
2075 *
2076 * @param pVCpu The cross context virtual CPU structure.
2077 */
2078DECLINLINE(void) CPUMSetGuestVmxVmSucceed(PCPUMCTX pCtx)
2079{
2080 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2081}
2082
2083/**
2084 * Implements VMFailInvalid for VMX instruction failure.
2085 *
2086 * @param pVCpu The cross context virtual CPU structure.
2087 */
2088DECLINLINE(void) CPUMSetGuestVmxVmFailInvalid(PCPUMCTX pCtx)
2089{
2090 pCtx->eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2091 pCtx->eflags.u32 |= X86_EFL_CF;
2092}
2093
2094/**
2095 * Implements VMFailValid for VMX instruction failure.
2096 *
2097 * @param pVCpu The cross context virtual CPU structure.
2098 * @param enmInsErr The VM instruction error.
2099 */
2100DECLINLINE(void) CPUMSetGuestVmxVmFailValid(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2101{
2102 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
2103 pCtx->eflags.u32 |= X86_EFL_ZF;
2104 pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u32RoVmInstrError = enmInsErr;
2105}
2106
2107/**
2108 * Implements VMFail for VMX instruction failure.
2109 *
2110 * @param pVCpu The cross context virtual CPU structure.
2111 * @param enmInsErr The VM instruction error.
2112 */
2113DECLINLINE(void) CPUMSetGuestVmxVmFail(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
2114{
2115 if (pCtx->hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
2116 CPUMSetGuestVmxVmFailValid(pCtx, enmInsErr);
2117 else
2118 CPUMSetGuestVmxVmFailInvalid(pCtx);
2119}
2120
2121/**
2122 * Returns the guest-physical address of the APIC-access page when executing a
2123 * nested-guest.
2124 *
2125 * @returns The APIC-access page guest-physical address.
2126 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2127 * @param pCtx Pointer to the context.
2128 */
2129DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddr(PVMCPU pVCpu, PCCPUMCTX pCtx)
2130{
2131 RT_NOREF(pVCpu);
2132 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
2133 Assert(pCtx->hwvirt.vmx.fInVmxNonRootMode);
2134 Assert(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
2135 return pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u64AddrApicAccess.u;
2136}
2137
2138# endif /* !IN_RC */
2139
2140#endif /* IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS */
2141
2142/** @} */
2143
2144
2145/** @name Hypervisor Register Getters.
2146 * @{ */
2147VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu);
2148VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu);
2149VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu);
2150VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu);
2151VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu);
2152VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu);
2153#if 0 /* these are not correct. */
2154VMMDECL(uint32_t) CPUMGetHyperCR0(PVMCPU pVCpu);
2155VMMDECL(uint32_t) CPUMGetHyperCR2(PVMCPU pVCpu);
2156VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
2157VMMDECL(uint32_t) CPUMGetHyperCR4(PVMCPU pVCpu);
2158#endif
2159/** This register is only saved on fatal traps. */
2160VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu);
2161VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu);
2162/** This register is only saved on fatal traps. */
2163VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu);
2164/** This register is only saved on fatal traps. */
2165VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu);
2166VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu);
2167VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu);
2168VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu);
2169VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu);
2170VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu);
2171VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu);
2172VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu);
2173VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
2174VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
2175VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu);
2176VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu);
2177VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu);
2178VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu);
2179VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu);
2180VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu);
2181VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu);
2182VMMDECL(void) CPUMGetHyperCtx(PVMCPU pVCpu, PCPUMCTX pCtx);
2183VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
2184/** @} */
2185
2186/** @name Hypervisor Register Setters.
2187 * @{ */
2188VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit);
2189VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR);
2190VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit);
2191VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3);
2192VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR);
2193VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS);
2194VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS);
2195VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelDS);
2196VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelDS);
2197VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelDS);
2198VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS);
2199VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP);
2200VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl);
2201VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP);
2202VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX);
2203VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0);
2204VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1);
2205VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2);
2206VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3);
2207VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6);
2208VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7);
2209VMMDECL(void) CPUMSetHyperCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
2210VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper);
2211/** @} */
2212
2213VMMDECL(void) CPUMPushHyper(PVMCPU pVCpu, uint32_t u32);
2214VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx);
2215VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu);
2216VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu);
2217VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
2218VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu);
2219VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu);
2220VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu);
2221VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc);
2222VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu);
2223VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl);
2224
2225/** @name Changed flags.
2226 * These flags are used to keep track of which important register that
2227 * have been changed since last they were reset. The only one allowed
2228 * to clear them is REM!
2229 * @{
2230 */
2231#define CPUM_CHANGED_FPU_REM RT_BIT(0)
2232#define CPUM_CHANGED_CR0 RT_BIT(1)
2233#define CPUM_CHANGED_CR4 RT_BIT(2)
2234#define CPUM_CHANGED_GLOBAL_TLB_FLUSH RT_BIT(3)
2235#define CPUM_CHANGED_CR3 RT_BIT(4)
2236#define CPUM_CHANGED_GDTR RT_BIT(5)
2237#define CPUM_CHANGED_IDTR RT_BIT(6)
2238#define CPUM_CHANGED_LDTR RT_BIT(7)
2239#define CPUM_CHANGED_TR RT_BIT(8) /**@< Currently unused. */
2240#define CPUM_CHANGED_SYSENTER_MSR RT_BIT(9)
2241#define CPUM_CHANGED_HIDDEN_SEL_REGS RT_BIT(10) /**@< Currently unused. */
2242#define CPUM_CHANGED_CPUID RT_BIT(11)
2243#define CPUM_CHANGED_ALL ( CPUM_CHANGED_FPU_REM \
2244 | CPUM_CHANGED_CR0 \
2245 | CPUM_CHANGED_CR4 \
2246 | CPUM_CHANGED_GLOBAL_TLB_FLUSH \
2247 | CPUM_CHANGED_CR3 \
2248 | CPUM_CHANGED_GDTR \
2249 | CPUM_CHANGED_IDTR \
2250 | CPUM_CHANGED_LDTR \
2251 | CPUM_CHANGED_TR \
2252 | CPUM_CHANGED_SYSENTER_MSR \
2253 | CPUM_CHANGED_HIDDEN_SEL_REGS \
2254 | CPUM_CHANGED_CPUID )
2255/** @} */
2256
2257VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);
2258VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl);
2259VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels);
2260VMMDECL(bool) CPUMSupportsXSave(PVM pVM);
2261VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM);
2262VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM);
2263VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu);
2264VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu);
2265VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu);
2266VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu);
2267VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu);
2268VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu);
2269VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu);
2270VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu);
2271VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu);
2272VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu);
2273VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu);
2274VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu);
2275VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM);
2276VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM);
2277VMMDECL(uint64_t) CPUMGetGuestEferMsrValidMask(PVM pVM);
2278VMMDECL(int) CPUMIsGuestEferMsrWriteValid(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
2279 uint64_t *puValidEfer);
2280VMMDECL(void) CPUMSetGuestEferMsrNoChecks(PVMCPU pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
2281VMMDECL(bool) CPUMIsPatMsrValid(uint64_t uValue);
2282
2283/** Guest CPU interruptibility level, see CPUMGetGuestInterruptibility(). */
2284typedef enum CPUMINTERRUPTIBILITY
2285{
2286 CPUMINTERRUPTIBILITY_INVALID = 0,
2287 CPUMINTERRUPTIBILITY_UNRESTRAINED,
2288 CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED,
2289 CPUMINTERRUPTIBILITY_INT_DISABLED,
2290 CPUMINTERRUPTIBILITY_INT_INHIBITED,
2291 CPUMINTERRUPTIBILITY_NMI_INHIBIT,
2292 CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT,
2293 CPUMINTERRUPTIBILITY_END,
2294 CPUMINTERRUPTIBILITY_32BIT_HACK = 0x7fffffff
2295} CPUMINTERRUPTIBILITY;
2296
2297VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu);
2298VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PVMCPU pVCpu);
2299
2300
2301/** @name Typical scalable bus frequency values.
2302 * @{ */
2303/** Special internal value indicating that we don't know the frequency.
2304 * @internal */
2305#define CPUM_SBUSFREQ_UNKNOWN UINT64_C(1)
2306#define CPUM_SBUSFREQ_100MHZ UINT64_C(100000000)
2307#define CPUM_SBUSFREQ_133MHZ UINT64_C(133333333)
2308#define CPUM_SBUSFREQ_167MHZ UINT64_C(166666666)
2309#define CPUM_SBUSFREQ_200MHZ UINT64_C(200000000)
2310#define CPUM_SBUSFREQ_267MHZ UINT64_C(266666666)
2311#define CPUM_SBUSFREQ_333MHZ UINT64_C(333333333)
2312#define CPUM_SBUSFREQ_400MHZ UINT64_C(400000000)
2313/** @} */
2314
2315
2316#ifdef IN_RING3
2317/** @defgroup grp_cpum_r3 The CPUM ring-3 API
2318 * @{
2319 */
2320
2321VMMR3DECL(int) CPUMR3Init(PVM pVM);
2322VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
2323VMMR3DECL(void) CPUMR3LogCpuIdAndMsrFeatures(PVM pVM);
2324VMMR3DECL(void) CPUMR3Relocate(PVM pVM);
2325VMMR3DECL(int) CPUMR3Term(PVM pVM);
2326VMMR3DECL(void) CPUMR3Reset(PVM pVM);
2327VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
2328VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM);
2329VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
2330
2331VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf);
2332VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf);
2333VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
2334 uint8_t bModel, uint8_t bStepping);
2335VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch);
2336VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
2337VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
2338VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
2339VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
2340VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor);
2341VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void);
2342
2343VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange);
2344
2345# if defined(VBOX_WITH_RAW_MODE) || defined(DOXYGEN_RUNNING)
2346/** @name APIs for the CPUID raw-mode patch (legacy).
2347 * @{ */
2348VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM);
2349VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM);
2350VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM);
2351VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM);
2352VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM);
2353VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM);
2354VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM);
2355/** @} */
2356# endif
2357
2358/** @} */
2359#endif /* IN_RING3 */
2360
2361#ifdef IN_RC
2362/** @defgroup grp_cpum_rc The CPUM Raw-mode Context API
2363 * @{
2364 */
2365
2366/**
2367 * Calls a guest trap/interrupt handler directly
2368 *
2369 * Assumes a trap stack frame has already been setup on the guest's stack!
2370 * This function does not return!
2371 *
2372 * @param pRegFrame Original trap/interrupt context
2373 * @param selCS Code selector of handler
2374 * @param pHandler GC virtual address of handler
2375 * @param eflags Callee's EFLAGS
2376 * @param selSS Stack selector for handler
2377 * @param pEsp Stack address for handler
2378 */
2379DECLASM(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTRCPTR pHandler,
2380 uint32_t eflags, uint32_t selSS, RTRCPTR pEsp);
2381
2382/**
2383 * Call guest V86 code directly.
2384 *
2385 * This function does not return!
2386 *
2387 * @param pRegFrame Original trap/interrupt context
2388 */
2389DECLASM(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
2390
2391VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu);
2392VMMDECL(uint32_t) CPUMRCGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
2393#ifdef VBOX_WITH_RAW_RING1
2394VMMDECL(void) CPUMRCRecheckRawState(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore);
2395#endif
2396VMMRCDECL(void) CPUMRCProcessForceFlag(PVMCPU pVCpu);
2397
2398/** @} */
2399#endif /* IN_RC */
2400
2401#ifdef IN_RING0
2402/** @defgroup grp_cpum_r0 The CPUM ring-0 API
2403 * @{
2404 */
2405VMMR0_INT_DECL(int) CPUMR0ModuleInit(void);
2406VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void);
2407VMMR0_INT_DECL(int) CPUMR0InitVM(PVM pVM);
2408DECLASM(void) CPUMR0RegisterVCpuThread(PVMCPU pVCpu);
2409DECLASM(void) CPUMR0TouchHostFpu(void);
2410VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu);
2411VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu);
2412VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu);
2413VMMR0_INT_DECL(int) CPUMR0SaveHostDebugState(PVM pVM, PVMCPU pVCpu);
2414VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6);
2415VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6);
2416
2417VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6);
2418VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6);
2419#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
2420VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, uint32_t iHostCpuSet);
2421#endif
2422
2423/** @} */
2424#endif /* IN_RING0 */
2425
2426/** @defgroup grp_cpum_rz The CPUM raw-mode and ring-0 context API
2427 * @{
2428 */
2429VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPU pVCpu);
2430VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForRead(PVMCPU pVCpu);
2431VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForChange(PVMCPU pVCpu);
2432VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeSseForRead(PVMCPU pVCpu);
2433VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPU pVCpu);
2434/** @} */
2435
2436
2437#endif /* !VBOX_FOR_DTRACE_LIB */
2438/** @} */
2439RT_C_DECLS_END
2440
2441
2442#endif /* !VBOX_INCLUDED_vmm_cpum_h */
2443
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette