VirtualBox

source: vbox/trunk/include/VBox/vmm/cpum.h@ 70641

Last change on this file since 70641 was 70612, checked in by vboxsync, 7 years ago

VMM: Expose PCID, INVPCID, FSGSBASE features to guests. Implemented the relevant instructions in IEM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 73.2 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager).
3 */
4
5/*
6 * Copyright (C) 2006-2017 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_cpum_h
27#define ___VBox_vmm_cpum_h
28
29#include <iprt/x86.h>
30#include <VBox/types.h>
31#include <VBox/vmm/cpumctx.h>
32#include <VBox/vmm/stam.h>
33#include <VBox/vmm/vmapi.h>
34
35RT_C_DECLS_BEGIN
36
37/** @defgroup grp_cpum The CPU Monitor / Manager API
38 * @ingroup grp_vmm
39 * @{
40 */
41
42/**
43 * CPUID feature to set or clear.
44 */
45typedef enum CPUMCPUIDFEATURE
46{
47 CPUMCPUIDFEATURE_INVALID = 0,
48 /** The APIC feature bit. (Std+Ext)
49 * Note! There is a per-cpu flag for masking this CPUID feature bit when the
50 * APICBASE.ENABLED bit is zero. So, this feature is only set/cleared
51 * at VM construction time like all the others. This didn't used to be
52 * that way, this is new with 5.1. */
53 CPUMCPUIDFEATURE_APIC,
54 /** The sysenter/sysexit feature bit. (Std) */
55 CPUMCPUIDFEATURE_SEP,
56 /** The SYSCALL/SYSEXIT feature bit (64 bits mode only for Intel CPUs). (Ext) */
57 CPUMCPUIDFEATURE_SYSCALL,
58 /** The PAE feature bit. (Std+Ext) */
59 CPUMCPUIDFEATURE_PAE,
60 /** The NX feature bit. (Ext) */
61 CPUMCPUIDFEATURE_NX,
62 /** The LAHF/SAHF feature bit (64 bits mode only). (Ext) */
63 CPUMCPUIDFEATURE_LAHF,
64 /** The LONG MODE feature bit. (Ext) */
65 CPUMCPUIDFEATURE_LONG_MODE,
66 /** The PAT feature bit. (Std+Ext) */
67 CPUMCPUIDFEATURE_PAT,
68 /** The x2APIC feature bit. (Std) */
69 CPUMCPUIDFEATURE_X2APIC,
70 /** The RDTSCP feature bit. (Ext) */
71 CPUMCPUIDFEATURE_RDTSCP,
72 /** The Hypervisor Present bit. (Std) */
73 CPUMCPUIDFEATURE_HVP,
74 /** The MWait Extensions bits (Std) */
75 CPUMCPUIDFEATURE_MWAIT_EXTS,
76 /** 32bit hackishness. */
77 CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
78} CPUMCPUIDFEATURE;
79
80/**
81 * CPU Vendor.
82 */
83typedef enum CPUMCPUVENDOR
84{
85 CPUMCPUVENDOR_INVALID = 0,
86 CPUMCPUVENDOR_INTEL,
87 CPUMCPUVENDOR_AMD,
88 CPUMCPUVENDOR_VIA,
89 CPUMCPUVENDOR_CYRIX,
90 CPUMCPUVENDOR_UNKNOWN,
91 /** 32bit hackishness. */
92 CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
93} CPUMCPUVENDOR;
94
95
96/**
97 * X86 and AMD64 CPU microarchitectures and in processor generations.
98 *
99 * @remarks The separation here is sometimes a little bit too finely grained,
100 * and the differences is more like processor generation than micro
101 * arch. This can be useful, so we'll provide functions for getting at
102 * more coarse grained info.
103 */
104typedef enum CPUMMICROARCH
105{
106 kCpumMicroarch_Invalid = 0,
107
108 kCpumMicroarch_Intel_First,
109
110 kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
111 kCpumMicroarch_Intel_80186,
112 kCpumMicroarch_Intel_80286,
113 kCpumMicroarch_Intel_80386,
114 kCpumMicroarch_Intel_80486,
115 kCpumMicroarch_Intel_P5,
116
117 kCpumMicroarch_Intel_P6_Core_Atom_First,
118 kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
119 kCpumMicroarch_Intel_P6_II,
120 kCpumMicroarch_Intel_P6_III,
121
122 kCpumMicroarch_Intel_P6_M_Banias,
123 kCpumMicroarch_Intel_P6_M_Dothan,
124 kCpumMicroarch_Intel_Core_Yonah, /**< Core, also known as Enhanced Pentium M. */
125
126 kCpumMicroarch_Intel_Core2_First,
127 kCpumMicroarch_Intel_Core2_Merom = kCpumMicroarch_Intel_Core2_First, /**< 65nm, Merom/Conroe/Kentsfield/Tigerton */
128 kCpumMicroarch_Intel_Core2_Penryn, /**< 45nm, Penryn/Wolfdale/Yorkfield/Harpertown */
129 kCpumMicroarch_Intel_Core2_End,
130
131 kCpumMicroarch_Intel_Core7_First,
132 kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
133 kCpumMicroarch_Intel_Core7_Westmere,
134 kCpumMicroarch_Intel_Core7_SandyBridge,
135 kCpumMicroarch_Intel_Core7_IvyBridge,
136 kCpumMicroarch_Intel_Core7_Haswell,
137 kCpumMicroarch_Intel_Core7_Broadwell,
138 kCpumMicroarch_Intel_Core7_Skylake,
139 kCpumMicroarch_Intel_Core7_KabyLake,
140 kCpumMicroarch_Intel_Core7_CoffeeLake,
141 kCpumMicroarch_Intel_Core7_CannonLake,
142 kCpumMicroarch_Intel_Core7_IceLake,
143 kCpumMicroarch_Intel_Core7_TigerLake,
144 kCpumMicroarch_Intel_Core7_End,
145
146 kCpumMicroarch_Intel_Atom_First,
147 kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
148 kCpumMicroarch_Intel_Atom_Lincroft, /**< Second generation bonnell (44nm). */
149 kCpumMicroarch_Intel_Atom_Saltwell, /**< 32nm shrink of Bonnell. */
150 kCpumMicroarch_Intel_Atom_Silvermont, /**< 22nm */
151 kCpumMicroarch_Intel_Atom_Airmount, /**< 14nm */
152 kCpumMicroarch_Intel_Atom_Goldmont, /**< 14nm */
153 kCpumMicroarch_Intel_Atom_GoldmontPlus, /**< 14nm */
154 kCpumMicroarch_Intel_Atom_Unknown,
155 kCpumMicroarch_Intel_Atom_End,
156
157
158 kCpumMicroarch_Intel_Phi_First,
159 kCpumMicroarch_Intel_Phi_KnightsFerry = kCpumMicroarch_Intel_Phi_First,
160 kCpumMicroarch_Intel_Phi_KnightsCorner,
161 kCpumMicroarch_Intel_Phi_KnightsLanding,
162 kCpumMicroarch_Intel_Phi_KnightsHill,
163 kCpumMicroarch_Intel_Phi_KnightsMill,
164 kCpumMicroarch_Intel_Phi_End,
165
166 kCpumMicroarch_Intel_P6_Core_Atom_End,
167
168 kCpumMicroarch_Intel_NB_First,
169 kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
170 kCpumMicroarch_Intel_NB_Northwood, /**< 130nm */
171 kCpumMicroarch_Intel_NB_Prescott, /**< 90nm */
172 kCpumMicroarch_Intel_NB_Prescott2M, /**< 90nm */
173 kCpumMicroarch_Intel_NB_CedarMill, /**< 65nm */
174 kCpumMicroarch_Intel_NB_Gallatin, /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
175 kCpumMicroarch_Intel_NB_Unknown,
176 kCpumMicroarch_Intel_NB_End,
177
178 kCpumMicroarch_Intel_Unknown,
179 kCpumMicroarch_Intel_End,
180
181 kCpumMicroarch_AMD_First,
182 kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
183 kCpumMicroarch_AMD_Am386,
184 kCpumMicroarch_AMD_Am486,
185 kCpumMicroarch_AMD_Am486Enh, /**< Covers Am5x86 as well. */
186 kCpumMicroarch_AMD_K5,
187 kCpumMicroarch_AMD_K6,
188
189 kCpumMicroarch_AMD_K7_First,
190 kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
191 kCpumMicroarch_AMD_K7_Spitfire,
192 kCpumMicroarch_AMD_K7_Thunderbird,
193 kCpumMicroarch_AMD_K7_Morgan,
194 kCpumMicroarch_AMD_K7_Thoroughbred,
195 kCpumMicroarch_AMD_K7_Barton,
196 kCpumMicroarch_AMD_K7_Unknown,
197 kCpumMicroarch_AMD_K7_End,
198
199 kCpumMicroarch_AMD_K8_First,
200 kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
201 kCpumMicroarch_AMD_K8_90nm, /**< 90nm shrink */
202 kCpumMicroarch_AMD_K8_90nm_DualCore, /**< 90nm with two cores. */
203 kCpumMicroarch_AMD_K8_90nm_AMDV, /**< 90nm with AMD-V (usually) and two cores (usually). */
204 kCpumMicroarch_AMD_K8_65nm, /**< 65nm shrink. */
205 kCpumMicroarch_AMD_K8_End,
206
207 kCpumMicroarch_AMD_K10,
208 kCpumMicroarch_AMD_K10_Lion,
209 kCpumMicroarch_AMD_K10_Llano,
210 kCpumMicroarch_AMD_Bobcat,
211 kCpumMicroarch_AMD_Jaguar,
212
213 kCpumMicroarch_AMD_15h_First,
214 kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
215 kCpumMicroarch_AMD_15h_Piledriver,
216 kCpumMicroarch_AMD_15h_Steamroller, /**< Yet to be released, might have different family. */
217 kCpumMicroarch_AMD_15h_Excavator, /**< Yet to be released, might have different family. */
218 kCpumMicroarch_AMD_15h_Unknown,
219 kCpumMicroarch_AMD_15h_End,
220
221 kCpumMicroarch_AMD_16h_First,
222 kCpumMicroarch_AMD_16h_End,
223
224 kCpumMicroarch_AMD_Zen_First,
225 kCpumMicroarch_AMD_Zen_Ryzen = kCpumMicroarch_AMD_Zen_First,
226 kCpumMicroarch_AMD_Zen_End,
227
228 kCpumMicroarch_AMD_Unknown,
229 kCpumMicroarch_AMD_End,
230
231 kCpumMicroarch_VIA_First,
232 kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
233 kCpumMicroarch_Centaur_C2,
234 kCpumMicroarch_Centaur_C3,
235 kCpumMicroarch_VIA_C3_M2,
236 kCpumMicroarch_VIA_C3_C5A, /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
237 kCpumMicroarch_VIA_C3_C5B, /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
238 kCpumMicroarch_VIA_C3_C5C, /**< 130nm Ezra - C3, Eden ESP. */
239 kCpumMicroarch_VIA_C3_C5N, /**< 130nm Ezra-T - C3. */
240 kCpumMicroarch_VIA_C3_C5XL, /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
241 kCpumMicroarch_VIA_C3_C5P, /**< 130nm Nehemiah+ - C3. */
242 kCpumMicroarch_VIA_C7_C5J, /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
243 kCpumMicroarch_VIA_Isaiah,
244 kCpumMicroarch_VIA_Unknown,
245 kCpumMicroarch_VIA_End,
246
247 kCpumMicroarch_Cyrix_First,
248 kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
249 kCpumMicroarch_Cyrix_M1,
250 kCpumMicroarch_Cyrix_MediaGX,
251 kCpumMicroarch_Cyrix_MediaGXm,
252 kCpumMicroarch_Cyrix_M2,
253 kCpumMicroarch_Cyrix_Unknown,
254 kCpumMicroarch_Cyrix_End,
255
256 kCpumMicroarch_NEC_First,
257 kCpumMicroarch_NEC_V20 = kCpumMicroarch_NEC_First,
258 kCpumMicroarch_NEC_V30,
259 kCpumMicroarch_NEC_End,
260
261 kCpumMicroarch_Unknown,
262
263 kCpumMicroarch_32BitHack = 0x7fffffff
264} CPUMMICROARCH;
265
266
267/** Predicate macro for catching netburst CPUs. */
268#define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
269 ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
270
271/** Predicate macro for catching Core7 CPUs. */
272#define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
273 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
274
275/** Predicate macro for catching Core 2 CPUs. */
276#define CPUMMICROARCH_IS_INTEL_CORE2(a_enmMicroarch) \
277 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core2_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core2_End)
278
279/** Predicate macro for catching Atom CPUs, Silvermont and upwards. */
280#define CPUMMICROARCH_IS_INTEL_SILVERMONT_PLUS(a_enmMicroarch) \
281 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Atom_Silvermont && (a_enmMicroarch) <= kCpumMicroarch_Intel_Atom_End)
282
283/** Predicate macro for catching AMD Family OFh CPUs (aka K8). */
284#define CPUMMICROARCH_IS_AMD_FAM_0FH(a_enmMicroarch) \
285 ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
286
287/** Predicate macro for catching AMD Family 10H CPUs (aka K10). */
288#define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
289
290/** Predicate macro for catching AMD Family 11H CPUs (aka Lion). */
291#define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
292
293/** Predicate macro for catching AMD Family 12H CPUs (aka Llano). */
294#define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
295
296/** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat). */
297#define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
298
299/** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
300 * decendants). */
301#define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
302 ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
303
304/** Predicate macro for catching AMD Family 16H CPUs. */
305#define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
306 ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
307
308
309
310/**
311 * CPUID leaf.
312 *
313 * @remarks This structure is used by the patch manager and is therefore
314 * more or less set in stone.
315 */
316typedef struct CPUMCPUIDLEAF
317{
318 /** The leaf number. */
319 uint32_t uLeaf;
320 /** The sub-leaf number. */
321 uint32_t uSubLeaf;
322 /** Sub-leaf mask. This is 0 when sub-leaves aren't used. */
323 uint32_t fSubLeafMask;
324
325 /** The EAX value. */
326 uint32_t uEax;
327 /** The EBX value. */
328 uint32_t uEbx;
329 /** The ECX value. */
330 uint32_t uEcx;
331 /** The EDX value. */
332 uint32_t uEdx;
333
334 /** Flags. */
335 uint32_t fFlags;
336} CPUMCPUIDLEAF;
337#ifndef VBOX_FOR_DTRACE_LIB
338AssertCompileSize(CPUMCPUIDLEAF, 32);
339#endif
340/** Pointer to a CPUID leaf. */
341typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
342/** Pointer to a const CPUID leaf. */
343typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
344
345/** @name CPUMCPUIDLEAF::fFlags
346 * @{ */
347/** Indicates working intel leaf 0xb where the lower 8 ECX bits are not modified
348 * and EDX containing the extended APIC ID. */
349#define CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES RT_BIT_32(0)
350/** The leaf contains an APIC ID that needs changing to that of the current CPU. */
351#define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID RT_BIT_32(1)
352/** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
353#define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE RT_BIT_32(2)
354/** The leaf contains an APIC feature bit which is tied to APICBASE.EN. */
355#define CPUMCPUIDLEAF_F_CONTAINS_APIC RT_BIT_32(3)
356/** Mask of the valid flags. */
357#define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0xf)
358/** @} */
359
360/**
361 * Method used to deal with unknown CPUID leaves.
362 * @remarks Used in patch code.
363 */
364typedef enum CPUMUNKNOWNCPUID
365{
366 /** Invalid zero value. */
367 CPUMUNKNOWNCPUID_INVALID = 0,
368 /** Use given default values (DefCpuId). */
369 CPUMUNKNOWNCPUID_DEFAULTS,
370 /** Return the last standard leaf.
371 * Intel Sandy Bridge has been observed doing this. */
372 CPUMUNKNOWNCPUID_LAST_STD_LEAF,
373 /** Return the last standard leaf, with ecx observed.
374 * Intel Sandy Bridge has been observed doing this. */
375 CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
376 /** The register values are passed thru unmodified. */
377 CPUMUNKNOWNCPUID_PASSTHRU,
378 /** End of valid value. */
379 CPUMUNKNOWNCPUID_END,
380 /** Ensure 32-bit type. */
381 CPUMUNKNOWNCPUID_32BIT_HACK = 0x7fffffff
382} CPUMUNKNOWNCPUID;
383/** Pointer to unknown CPUID leaf method. */
384typedef CPUMUNKNOWNCPUID *PCPUMUNKNOWNCPUID;
385
386
387/**
388 * MSR read functions.
389 */
390typedef enum CPUMMSRRDFN
391{
392 /** Invalid zero value. */
393 kCpumMsrRdFn_Invalid = 0,
394 /** Return the CPUMMSRRANGE::uValue. */
395 kCpumMsrRdFn_FixedValue,
396 /** Alias to the MSR range starting at the MSR given by
397 * CPUMMSRRANGE::uValue. Must be used in pair with
398 * kCpumMsrWrFn_MsrAlias. */
399 kCpumMsrRdFn_MsrAlias,
400 /** Write only register, GP all read attempts. */
401 kCpumMsrRdFn_WriteOnly,
402
403 kCpumMsrRdFn_Ia32P5McAddr,
404 kCpumMsrRdFn_Ia32P5McType,
405 kCpumMsrRdFn_Ia32TimestampCounter,
406 kCpumMsrRdFn_Ia32PlatformId, /**< Takes real CPU value for reference. */
407 kCpumMsrRdFn_Ia32ApicBase,
408 kCpumMsrRdFn_Ia32FeatureControl,
409 kCpumMsrRdFn_Ia32BiosSignId, /**< Range value returned. */
410 kCpumMsrRdFn_Ia32SmmMonitorCtl,
411 kCpumMsrRdFn_Ia32PmcN,
412 kCpumMsrRdFn_Ia32MonitorFilterLineSize,
413 kCpumMsrRdFn_Ia32MPerf,
414 kCpumMsrRdFn_Ia32APerf,
415 kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */
416 kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
417 kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
418 kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
419 kCpumMsrRdFn_Ia32MtrrDefType,
420 kCpumMsrRdFn_Ia32Pat,
421 kCpumMsrRdFn_Ia32SysEnterCs,
422 kCpumMsrRdFn_Ia32SysEnterEsp,
423 kCpumMsrRdFn_Ia32SysEnterEip,
424 kCpumMsrRdFn_Ia32McgCap,
425 kCpumMsrRdFn_Ia32McgStatus,
426 kCpumMsrRdFn_Ia32McgCtl,
427 kCpumMsrRdFn_Ia32DebugCtl,
428 kCpumMsrRdFn_Ia32SmrrPhysBase,
429 kCpumMsrRdFn_Ia32SmrrPhysMask,
430 kCpumMsrRdFn_Ia32PlatformDcaCap,
431 kCpumMsrRdFn_Ia32CpuDcaCap,
432 kCpumMsrRdFn_Ia32Dca0Cap,
433 kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
434 kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */
435 kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */
436 kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
437 kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */
438 kCpumMsrRdFn_Ia32FixedCtrCtrl,
439 kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */
440 kCpumMsrRdFn_Ia32PerfGlobalCtrl,
441 kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
442 kCpumMsrRdFn_Ia32PebsEnable,
443 kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */
444 kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */
445 kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */
446 kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */
447 kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */
448 kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
449 kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */
450 kCpumMsrRdFn_Ia32DsArea,
451 kCpumMsrRdFn_Ia32TscDeadline,
452 kCpumMsrRdFn_Ia32X2ApicN,
453 kCpumMsrRdFn_Ia32DebugInterface,
454 kCpumMsrRdFn_Ia32VmxBase, /**< Takes real value as reference. */
455 kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */
456 kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */
457 kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */
458 kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */
459 kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */
460 kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */
461 kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */
462 kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */
463 kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */
464 kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */
465 kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */
466 kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */
467 kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */
468 kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */
469 kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */
470 kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */
471 kCpumMsrRdFn_Ia32VmxVmFunc, /**< Takes real value as reference. */
472
473 kCpumMsrRdFn_Amd64Efer,
474 kCpumMsrRdFn_Amd64SyscallTarget,
475 kCpumMsrRdFn_Amd64LongSyscallTarget,
476 kCpumMsrRdFn_Amd64CompSyscallTarget,
477 kCpumMsrRdFn_Amd64SyscallFlagMask,
478 kCpumMsrRdFn_Amd64FsBase,
479 kCpumMsrRdFn_Amd64GsBase,
480 kCpumMsrRdFn_Amd64KernelGsBase,
481 kCpumMsrRdFn_Amd64TscAux,
482
483 kCpumMsrRdFn_IntelEblCrPowerOn,
484 kCpumMsrRdFn_IntelI7CoreThreadCount,
485 kCpumMsrRdFn_IntelP4EbcHardPowerOn,
486 kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
487 kCpumMsrRdFn_IntelP4EbcFrequencyId,
488 kCpumMsrRdFn_IntelP6FsbFrequency, /**< Takes real value as reference. */
489 kCpumMsrRdFn_IntelPlatformInfo,
490 kCpumMsrRdFn_IntelFlexRatio, /**< Takes real value as reference. */
491 kCpumMsrRdFn_IntelPkgCStConfigControl,
492 kCpumMsrRdFn_IntelPmgIoCaptureBase,
493 kCpumMsrRdFn_IntelLastBranchFromToN,
494 kCpumMsrRdFn_IntelLastBranchFromN,
495 kCpumMsrRdFn_IntelLastBranchToN,
496 kCpumMsrRdFn_IntelLastBranchTos,
497 kCpumMsrRdFn_IntelBblCrCtl,
498 kCpumMsrRdFn_IntelBblCrCtl3,
499 kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */
500 kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
501 kCpumMsrRdFn_IntelI7MiscPwrMgmt,
502 kCpumMsrRdFn_IntelP6CrN,
503 kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
504 kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
505 kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
506 kCpumMsrRdFn_IntelI7SandyAesNiCtl,
507 kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */
508 kCpumMsrRdFn_IntelI7LbrSelect,
509 kCpumMsrRdFn_IntelI7SandyErrorControl,
510 kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
511 kCpumMsrRdFn_IntelI7PowerCtl,
512 kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
513 kCpumMsrRdFn_IntelI7PebsLdLat,
514 kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */
515 kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */
516 kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
517 kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */
518 kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */
519 kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */
520 kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
521 kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */
522 kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
523 kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */
524 kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */
525 kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */
526 kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
527 kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */
528 kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */
529 kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */
530 kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
531 kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */
532 kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */
533 kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */
534 kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
535 kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */
536 kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
537 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1, /**< Takes real value as reference. */
538 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2, /**< Takes real value as reference. */
539 kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
540 kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
541 kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
542 kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
543 kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
544 kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
545 kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
546 kCpumMsrRdFn_IntelI7UncCBoxConfig,
547 kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
548 kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
549 kCpumMsrRdFn_IntelI7SmiCount,
550 kCpumMsrRdFn_IntelCore2EmttmCrTablesN, /**< Range value returned. */
551 kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
552 kCpumMsrRdFn_IntelCore1ExtConfig,
553 kCpumMsrRdFn_IntelCore1DtsCalControl,
554 kCpumMsrRdFn_IntelCore2PeciControl,
555 kCpumMsrRdFn_IntelAtSilvCoreC1Recidency,
556
557 kCpumMsrRdFn_P6LastBranchFromIp,
558 kCpumMsrRdFn_P6LastBranchToIp,
559 kCpumMsrRdFn_P6LastIntFromIp,
560 kCpumMsrRdFn_P6LastIntToIp,
561
562 kCpumMsrRdFn_AmdFam15hTscRate,
563 kCpumMsrRdFn_AmdFam15hLwpCfg,
564 kCpumMsrRdFn_AmdFam15hLwpCbAddr,
565 kCpumMsrRdFn_AmdFam10hMc4MiscN,
566 kCpumMsrRdFn_AmdK8PerfCtlN,
567 kCpumMsrRdFn_AmdK8PerfCtrN,
568 kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */
569 kCpumMsrRdFn_AmdK8HwCr,
570 kCpumMsrRdFn_AmdK8IorrBaseN,
571 kCpumMsrRdFn_AmdK8IorrMaskN,
572 kCpumMsrRdFn_AmdK8TopOfMemN,
573 kCpumMsrRdFn_AmdK8NbCfg1,
574 kCpumMsrRdFn_AmdK8McXcptRedir,
575 kCpumMsrRdFn_AmdK8CpuNameN,
576 kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */
577 kCpumMsrRdFn_AmdK8SwThermalCtrl,
578 kCpumMsrRdFn_AmdK8FidVidControl, /**< Range value returned. */
579 kCpumMsrRdFn_AmdK8FidVidStatus, /**< Range value returned. */
580 kCpumMsrRdFn_AmdK8McCtlMaskN,
581 kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
582 kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
583 kCpumMsrRdFn_AmdK8IntPendingMessage,
584 kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
585 kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
586 kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
587 kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */
588 kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */
589 kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */
590 kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */
591 kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */
592 kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */
593 kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
594 kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
595 kCpumMsrRdFn_AmdK8SmmBase,
596 kCpumMsrRdFn_AmdK8SmmAddr,
597 kCpumMsrRdFn_AmdK8SmmMask,
598 kCpumMsrRdFn_AmdK8VmCr,
599 kCpumMsrRdFn_AmdK8IgnNe,
600 kCpumMsrRdFn_AmdK8SmmCtl,
601 kCpumMsrRdFn_AmdK8VmHSavePa,
602 kCpumMsrRdFn_AmdFam10hVmLockKey,
603 kCpumMsrRdFn_AmdFam10hSmmLockKey,
604 kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
605 kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
606 kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
607 kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
608 kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
609 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
610 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
611 kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */
612 kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */
613 kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
614 kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
615 kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
616 kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
617 kCpumMsrRdFn_AmdK8PatchLevel, /**< Returns range value. */
618 kCpumMsrRdFn_AmdK7DebugStatusMaybe,
619 kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
620 kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
621 kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
622 kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
623 kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
624 kCpumMsrRdFn_AmdK7NodeId,
625 kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
626 kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
627 kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
628 kCpumMsrRdFn_AmdK7LoadStoreCfg,
629 kCpumMsrRdFn_AmdK7InstrCacheCfg,
630 kCpumMsrRdFn_AmdK7DataCacheCfg,
631 kCpumMsrRdFn_AmdK7BusUnitCfg,
632 kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
633 kCpumMsrRdFn_AmdFam15hFpuCfg,
634 kCpumMsrRdFn_AmdFam15hDecoderCfg,
635 kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
636 kCpumMsrRdFn_AmdFam15hCombUnitCfg,
637 kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
638 kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
639 kCpumMsrRdFn_AmdFam15hExecUnitCfg,
640 kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
641 kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
642 kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
643 kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
644 kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
645 kCpumMsrRdFn_AmdFam10hIbsOpRip,
646 kCpumMsrRdFn_AmdFam10hIbsOpData,
647 kCpumMsrRdFn_AmdFam10hIbsOpData2,
648 kCpumMsrRdFn_AmdFam10hIbsOpData3,
649 kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
650 kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
651 kCpumMsrRdFn_AmdFam10hIbsCtl,
652 kCpumMsrRdFn_AmdFam14hIbsBrTarget,
653
654 kCpumMsrRdFn_Gim,
655
656 /** End of valid MSR read function indexes. */
657 kCpumMsrRdFn_End
658} CPUMMSRRDFN;
659
660/**
661 * MSR write functions.
662 */
663typedef enum CPUMMSRWRFN
664{
665 /** Invalid zero value. */
666 kCpumMsrWrFn_Invalid = 0,
667 /** Writes are ignored, the fWrGpMask is observed though. */
668 kCpumMsrWrFn_IgnoreWrite,
669 /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
670 kCpumMsrWrFn_ReadOnly,
671 /** Alias to the MSR range starting at the MSR given by
672 * CPUMMSRRANGE::uValue. Must be used in pair with
673 * kCpumMsrRdFn_MsrAlias. */
674 kCpumMsrWrFn_MsrAlias,
675
676 kCpumMsrWrFn_Ia32P5McAddr,
677 kCpumMsrWrFn_Ia32P5McType,
678 kCpumMsrWrFn_Ia32TimestampCounter,
679 kCpumMsrWrFn_Ia32ApicBase,
680 kCpumMsrWrFn_Ia32FeatureControl,
681 kCpumMsrWrFn_Ia32BiosSignId,
682 kCpumMsrWrFn_Ia32BiosUpdateTrigger,
683 kCpumMsrWrFn_Ia32SmmMonitorCtl,
684 kCpumMsrWrFn_Ia32PmcN,
685 kCpumMsrWrFn_Ia32MonitorFilterLineSize,
686 kCpumMsrWrFn_Ia32MPerf,
687 kCpumMsrWrFn_Ia32APerf,
688 kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
689 kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
690 kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
691 kCpumMsrWrFn_Ia32MtrrDefType,
692 kCpumMsrWrFn_Ia32Pat,
693 kCpumMsrWrFn_Ia32SysEnterCs,
694 kCpumMsrWrFn_Ia32SysEnterEsp,
695 kCpumMsrWrFn_Ia32SysEnterEip,
696 kCpumMsrWrFn_Ia32McgStatus,
697 kCpumMsrWrFn_Ia32McgCtl,
698 kCpumMsrWrFn_Ia32DebugCtl,
699 kCpumMsrWrFn_Ia32SmrrPhysBase,
700 kCpumMsrWrFn_Ia32SmrrPhysMask,
701 kCpumMsrWrFn_Ia32PlatformDcaCap,
702 kCpumMsrWrFn_Ia32Dca0Cap,
703 kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
704 kCpumMsrWrFn_Ia32PerfStatus,
705 kCpumMsrWrFn_Ia32PerfCtl,
706 kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
707 kCpumMsrWrFn_Ia32PerfCapabilities,
708 kCpumMsrWrFn_Ia32FixedCtrCtrl,
709 kCpumMsrWrFn_Ia32PerfGlobalStatus,
710 kCpumMsrWrFn_Ia32PerfGlobalCtrl,
711 kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
712 kCpumMsrWrFn_Ia32PebsEnable,
713 kCpumMsrWrFn_Ia32ClockModulation,
714 kCpumMsrWrFn_Ia32ThermInterrupt,
715 kCpumMsrWrFn_Ia32ThermStatus,
716 kCpumMsrWrFn_Ia32Therm2Ctl,
717 kCpumMsrWrFn_Ia32MiscEnable,
718 kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
719 kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */
720 kCpumMsrWrFn_Ia32DsArea,
721 kCpumMsrWrFn_Ia32TscDeadline,
722 kCpumMsrWrFn_Ia32X2ApicN,
723 kCpumMsrWrFn_Ia32DebugInterface,
724
725 kCpumMsrWrFn_Amd64Efer,
726 kCpumMsrWrFn_Amd64SyscallTarget,
727 kCpumMsrWrFn_Amd64LongSyscallTarget,
728 kCpumMsrWrFn_Amd64CompSyscallTarget,
729 kCpumMsrWrFn_Amd64SyscallFlagMask,
730 kCpumMsrWrFn_Amd64FsBase,
731 kCpumMsrWrFn_Amd64GsBase,
732 kCpumMsrWrFn_Amd64KernelGsBase,
733 kCpumMsrWrFn_Amd64TscAux,
734 kCpumMsrWrFn_IntelEblCrPowerOn,
735 kCpumMsrWrFn_IntelP4EbcHardPowerOn,
736 kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
737 kCpumMsrWrFn_IntelP4EbcFrequencyId,
738 kCpumMsrWrFn_IntelFlexRatio,
739 kCpumMsrWrFn_IntelPkgCStConfigControl,
740 kCpumMsrWrFn_IntelPmgIoCaptureBase,
741 kCpumMsrWrFn_IntelLastBranchFromToN,
742 kCpumMsrWrFn_IntelLastBranchFromN,
743 kCpumMsrWrFn_IntelLastBranchToN,
744 kCpumMsrWrFn_IntelLastBranchTos,
745 kCpumMsrWrFn_IntelBblCrCtl,
746 kCpumMsrWrFn_IntelBblCrCtl3,
747 kCpumMsrWrFn_IntelI7TemperatureTarget,
748 kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
749 kCpumMsrWrFn_IntelI7MiscPwrMgmt,
750 kCpumMsrWrFn_IntelP6CrN,
751 kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
752 kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
753 kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
754 kCpumMsrWrFn_IntelI7SandyAesNiCtl,
755 kCpumMsrWrFn_IntelI7TurboRatioLimit,
756 kCpumMsrWrFn_IntelI7LbrSelect,
757 kCpumMsrWrFn_IntelI7SandyErrorControl,
758 kCpumMsrWrFn_IntelI7PowerCtl,
759 kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
760 kCpumMsrWrFn_IntelI7PebsLdLat,
761 kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
762 kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
763 kCpumMsrWrFn_IntelI7SandyRaplPowerUnit, /**< R/O but found writable bits on a Silvermont CPU here. */
764 kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
765 kCpumMsrWrFn_IntelI7SandyPkgC2Residency, /**< R/O but found writable bits on a Silvermont CPU here. */
766 kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
767 kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
768 kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
769 kCpumMsrWrFn_IntelI7RaplPp0Policy,
770 kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
771 kCpumMsrWrFn_IntelI7RaplPp1Policy,
772 kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
773 kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
774 kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
775 kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
776 kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
777 kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
778 kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
779 kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
780 kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
781 kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
782 kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
783 kCpumMsrWrFn_IntelCore1ExtConfig,
784 kCpumMsrWrFn_IntelCore1DtsCalControl,
785 kCpumMsrWrFn_IntelCore2PeciControl,
786
787 kCpumMsrWrFn_P6LastIntFromIp,
788 kCpumMsrWrFn_P6LastIntToIp,
789
790 kCpumMsrWrFn_AmdFam15hTscRate,
791 kCpumMsrWrFn_AmdFam15hLwpCfg,
792 kCpumMsrWrFn_AmdFam15hLwpCbAddr,
793 kCpumMsrWrFn_AmdFam10hMc4MiscN,
794 kCpumMsrWrFn_AmdK8PerfCtlN,
795 kCpumMsrWrFn_AmdK8PerfCtrN,
796 kCpumMsrWrFn_AmdK8SysCfg,
797 kCpumMsrWrFn_AmdK8HwCr,
798 kCpumMsrWrFn_AmdK8IorrBaseN,
799 kCpumMsrWrFn_AmdK8IorrMaskN,
800 kCpumMsrWrFn_AmdK8TopOfMemN,
801 kCpumMsrWrFn_AmdK8NbCfg1,
802 kCpumMsrWrFn_AmdK8McXcptRedir,
803 kCpumMsrWrFn_AmdK8CpuNameN,
804 kCpumMsrWrFn_AmdK8HwThermalCtrl,
805 kCpumMsrWrFn_AmdK8SwThermalCtrl,
806 kCpumMsrWrFn_AmdK8FidVidControl,
807 kCpumMsrWrFn_AmdK8McCtlMaskN,
808 kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
809 kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
810 kCpumMsrWrFn_AmdK8IntPendingMessage,
811 kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
812 kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
813 kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
814 kCpumMsrWrFn_AmdFam10hPStateControl,
815 kCpumMsrWrFn_AmdFam10hPStateStatus,
816 kCpumMsrWrFn_AmdFam10hPStateN,
817 kCpumMsrWrFn_AmdFam10hCofVidControl,
818 kCpumMsrWrFn_AmdFam10hCofVidStatus,
819 kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
820 kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
821 kCpumMsrWrFn_AmdK8SmmBase,
822 kCpumMsrWrFn_AmdK8SmmAddr,
823 kCpumMsrWrFn_AmdK8SmmMask,
824 kCpumMsrWrFn_AmdK8VmCr,
825 kCpumMsrWrFn_AmdK8IgnNe,
826 kCpumMsrWrFn_AmdK8SmmCtl,
827 kCpumMsrWrFn_AmdK8VmHSavePa,
828 kCpumMsrWrFn_AmdFam10hVmLockKey,
829 kCpumMsrWrFn_AmdFam10hSmmLockKey,
830 kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
831 kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
832 kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
833 kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
834 kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
835 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
836 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
837 kCpumMsrWrFn_AmdK7MicrocodeCtl,
838 kCpumMsrWrFn_AmdK7ClusterIdMaybe,
839 kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
840 kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
841 kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
842 kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
843 kCpumMsrWrFn_AmdK8PatchLoader,
844 kCpumMsrWrFn_AmdK7DebugStatusMaybe,
845 kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
846 kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
847 kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
848 kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
849 kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
850 kCpumMsrWrFn_AmdK7NodeId,
851 kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
852 kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
853 kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
854 kCpumMsrWrFn_AmdK7LoadStoreCfg,
855 kCpumMsrWrFn_AmdK7InstrCacheCfg,
856 kCpumMsrWrFn_AmdK7DataCacheCfg,
857 kCpumMsrWrFn_AmdK7BusUnitCfg,
858 kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
859 kCpumMsrWrFn_AmdFam15hFpuCfg,
860 kCpumMsrWrFn_AmdFam15hDecoderCfg,
861 kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
862 kCpumMsrWrFn_AmdFam15hCombUnitCfg,
863 kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
864 kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
865 kCpumMsrWrFn_AmdFam15hExecUnitCfg,
866 kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
867 kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
868 kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
869 kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
870 kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
871 kCpumMsrWrFn_AmdFam10hIbsOpRip,
872 kCpumMsrWrFn_AmdFam10hIbsOpData,
873 kCpumMsrWrFn_AmdFam10hIbsOpData2,
874 kCpumMsrWrFn_AmdFam10hIbsOpData3,
875 kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
876 kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
877 kCpumMsrWrFn_AmdFam10hIbsCtl,
878 kCpumMsrWrFn_AmdFam14hIbsBrTarget,
879
880 kCpumMsrWrFn_Gim,
881
882 /** End of valid MSR write function indexes. */
883 kCpumMsrWrFn_End
884} CPUMMSRWRFN;
885
886/**
887 * MSR range.
888 */
889typedef struct CPUMMSRRANGE
890{
891 /** The first MSR. [0] */
892 uint32_t uFirst;
893 /** The last MSR. [4] */
894 uint32_t uLast;
895 /** The read function (CPUMMSRRDFN). [8] */
896 uint16_t enmRdFn;
897 /** The write function (CPUMMSRWRFN). [10] */
898 uint16_t enmWrFn;
899 /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
900 * UINT16_MAX if not used by the read and write functions. [12] */
901 uint16_t offCpumCpu;
902 /** Reserved for future hacks. [14] */
903 uint16_t fReserved;
904 /** The init/read value. [16]
905 * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
906 * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
907 * offset into CPUM. */
908 uint64_t uValue;
909 /** The bits to ignore when writing. [24] */
910 uint64_t fWrIgnMask;
911 /** The bits that will cause a GP(0) when writing. [32]
912 * This is always checked prior to calling the write function. Using
913 * UINT64_MAX effectively marks the MSR as read-only. */
914 uint64_t fWrGpMask;
915 /** The register name, if applicable. [40] */
916 char szName[56];
917
918#ifdef VBOX_WITH_STATISTICS
919 /** The number of reads. */
920 STAMCOUNTER cReads;
921 /** The number of writes. */
922 STAMCOUNTER cWrites;
923 /** The number of times ignored bits were written. */
924 STAMCOUNTER cIgnoredBits;
925 /** The number of GPs generated. */
926 STAMCOUNTER cGps;
927#endif
928} CPUMMSRRANGE;
929#ifndef VBOX_FOR_DTRACE_LIB
930# ifdef VBOX_WITH_STATISTICS
931AssertCompileSize(CPUMMSRRANGE, 128);
932# else
933AssertCompileSize(CPUMMSRRANGE, 96);
934# endif
935#endif
936/** Pointer to an MSR range. */
937typedef CPUMMSRRANGE *PCPUMMSRRANGE;
938/** Pointer to a const MSR range. */
939typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
940
941
942/**
943 * CPU features and quirks.
944 * This is mostly exploded CPUID info.
945 */
946typedef struct CPUMFEATURES
947{
948 /** The CPU vendor (CPUMCPUVENDOR). */
949 uint8_t enmCpuVendor;
950 /** The CPU family. */
951 uint8_t uFamily;
952 /** The CPU model. */
953 uint8_t uModel;
954 /** The CPU stepping. */
955 uint8_t uStepping;
956 /** The microarchitecture. */
957#ifndef VBOX_FOR_DTRACE_LIB
958 CPUMMICROARCH enmMicroarch;
959#else
960 uint32_t enmMicroarch;
961#endif
962 /** The maximum physical address with of the CPU. */
963 uint8_t cMaxPhysAddrWidth;
964 /** Alignment padding. */
965 uint8_t abPadding[1];
966 /** Max size of the extended state (or FPU state if no XSAVE). */
967 uint16_t cbMaxExtendedState;
968
969 /** Supports MSRs. */
970 uint32_t fMsr : 1;
971 /** Supports the page size extension (4/2 MB pages). */
972 uint32_t fPse : 1;
973 /** Supports 36-bit page size extension (4 MB pages can map memory above
974 * 4GB). */
975 uint32_t fPse36 : 1;
976 /** Supports physical address extension (PAE). */
977 uint32_t fPae : 1;
978 /** Page attribute table (PAT) support (page level cache control). */
979 uint32_t fPat : 1;
980 /** Supports the FXSAVE and FXRSTOR instructions. */
981 uint32_t fFxSaveRstor : 1;
982 /** Supports the XSAVE and XRSTOR instructions. */
983 uint32_t fXSaveRstor : 1;
984 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
985 uint32_t fOpSysXSaveRstor : 1;
986 /** Supports MMX. */
987 uint32_t fMmx : 1;
988 /** Supports AMD extensions to MMX instructions. */
989 uint32_t fAmdMmxExts : 1;
990 /** Supports SSE. */
991 uint32_t fSse : 1;
992 /** Supports SSE2. */
993 uint32_t fSse2 : 1;
994 /** Supports SSE3. */
995 uint32_t fSse3 : 1;
996 /** Supports SSSE3. */
997 uint32_t fSsse3 : 1;
998 /** Supports SSE4.1. */
999 uint32_t fSse41 : 1;
1000 /** Supports SSE4.2. */
1001 uint32_t fSse42 : 1;
1002 /** Supports AVX. */
1003 uint32_t fAvx : 1;
1004 /** Supports AVX2. */
1005 uint32_t fAvx2 : 1;
1006 /** Supports AVX512 foundation. */
1007 uint32_t fAvx512Foundation : 1;
1008 /** Supports RDTSC. */
1009 uint32_t fTsc : 1;
1010 /** Intel SYSENTER/SYSEXIT support */
1011 uint32_t fSysEnter : 1;
1012 /** First generation APIC. */
1013 uint32_t fApic : 1;
1014 /** Second generation APIC. */
1015 uint32_t fX2Apic : 1;
1016 /** Hypervisor present. */
1017 uint32_t fHypervisorPresent : 1;
1018 /** MWAIT & MONITOR instructions supported. */
1019 uint32_t fMonitorMWait : 1;
1020 /** MWAIT Extensions present. */
1021 uint32_t fMWaitExtensions : 1;
1022 /** Supports CMPXCHG16B in 64-bit mode. */
1023 uint32_t fMovCmpXchg16b : 1;
1024 /** Supports CLFLUSH. */
1025 uint32_t fClFlush : 1;
1026 /** Supports CLFLUSHOPT. */
1027 uint32_t fClFlushOpt : 1;
1028 /** Supports IA32_PRED_CMD.IBPB. */
1029 uint32_t fIbpb : 1;
1030 /** Supports IA32_SPEC_CTRL.IBRS. */
1031 uint32_t fIbrs : 1;
1032 /** Supports IA32_SPEC_CTRL.STIBP. */
1033 uint32_t fStibp : 1;
1034 /** Supports IA32_ARCH_CAP. */
1035 uint32_t fArchCap : 1;
1036 /** Supports PCID. */
1037 uint32_t fPcid : 1;
1038 /** Supports INVPCID. */
1039 uint32_t fInvpcid : 1;
1040 /** Supports read/write FSGSBASE instructions. */
1041 uint32_t fFsGsBase : 1;
1042
1043 /** Supports AMD 3DNow instructions. */
1044 uint32_t f3DNow : 1;
1045 /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
1046 uint32_t f3DNowPrefetch : 1;
1047
1048 /** AMD64: Supports long mode. */
1049 uint32_t fLongMode : 1;
1050 /** AMD64: SYSCALL/SYSRET support. */
1051 uint32_t fSysCall : 1;
1052 /** AMD64: No-execute page table bit. */
1053 uint32_t fNoExecute : 1;
1054 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
1055 uint32_t fLahfSahf : 1;
1056 /** AMD64: Supports RDTSCP. */
1057 uint32_t fRdTscP : 1;
1058 /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
1059 uint32_t fMovCr8In32Bit : 1;
1060 /** AMD64: Supports XOP (similar to VEX3/AVX). */
1061 uint32_t fXop : 1;
1062
1063 /** Indicates that FPU instruction and data pointers may leak.
1064 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
1065 * is only saved and restored if an exception is pending. */
1066 uint32_t fLeakyFxSR : 1;
1067
1068 /** AMD64: Supports AMD SVM. */
1069 uint32_t fSvm : 1;
1070
1071 /** Support for Intel VMX. */
1072 uint32_t fVmx : 1;
1073
1074 /** Alignment padding / reserved for future use. */
1075 uint32_t fPadding : 16;
1076
1077 /** SVM: Supports Nested-paging. */
1078 uint32_t fSvmNestedPaging : 1;
1079 /** SVM: Support LBR (Last Branch Record) virtualization. */
1080 uint32_t fSvmLbrVirt : 1;
1081 /** SVM: Supports SVM lock. */
1082 uint32_t fSvmSvmLock : 1;
1083 /** SVM: Supports Next RIP save. */
1084 uint32_t fSvmNextRipSave : 1;
1085 /** SVM: Supports TSC rate MSR. */
1086 uint32_t fSvmTscRateMsr : 1;
1087 /** SVM: Supports VMCB clean bits. */
1088 uint32_t fSvmVmcbClean : 1;
1089 /** SVM: Supports Flush-by-ASID. */
1090 uint32_t fSvmFlusbByAsid : 1;
1091 /** SVM: Supports decode assist. */
1092 uint32_t fSvmDecodeAssists : 1;
1093 /** SVM: Supports Pause filter. */
1094 uint32_t fSvmPauseFilter : 1;
1095 /** SVM: Supports Pause filter threshold. */
1096 uint32_t fSvmPauseFilterThreshold : 1;
1097 /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
1098 uint32_t fSvmAvic : 1;
1099 /** SVM: Supports Virtualized VMSAVE/VMLOAD. */
1100 uint32_t fSvmVirtVmsaveVmload : 1;
1101 /** SVM: Supports VGIF (Virtual Global Interrupt Flag). */
1102 uint32_t fSvmVGif : 1;
1103 /** SVM: Padding / reserved for future features. */
1104 uint32_t fSvmPadding0 : 19;
1105 /** SVM: Maximum supported ASID. */
1106 uint32_t uSvmMaxAsid;
1107
1108 /** @todo VMX features. */
1109 uint32_t auPadding[1];
1110} CPUMFEATURES;
1111#ifndef VBOX_FOR_DTRACE_LIB
1112AssertCompileSize(CPUMFEATURES, 32);
1113#endif
1114/** Pointer to a CPU feature structure. */
1115typedef CPUMFEATURES *PCPUMFEATURES;
1116/** Pointer to a const CPU feature structure. */
1117typedef CPUMFEATURES const *PCCPUMFEATURES;
1118
1119
1120#ifndef VBOX_FOR_DTRACE_LIB
1121
1122/** @name Guest Register Getters.
1123 * @{ */
1124VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR);
1125VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
1126VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden);
1127VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu);
1128VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
1129VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu);
1130VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu);
1131VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu);
1132VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu);
1133VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu);
1134VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue);
1135VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu);
1136VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu);
1137VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu);
1138VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu);
1139VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu);
1140VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu);
1141VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu);
1142VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu);
1143VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu);
1144VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu);
1145VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu);
1146VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu);
1147VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu);
1148VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu);
1149VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu);
1150VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu);
1151VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu);
1152VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu);
1153VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu);
1154VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu);
1155VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu);
1156VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu);
1157VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu);
1158VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu);
1159VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu);
1160VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
1161VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t iSubLeaf,
1162 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
1163VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu);
1164VMMDECL(VBOXSTRICTRC) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue);
1165VMMDECL(VBOXSTRICTRC) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue);
1166VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM);
1167VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM);
1168/** @} */
1169
1170/** @name Guest Register Setters.
1171 * @{ */
1172VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1173VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1174VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
1175VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
1176VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0);
1177VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
1178VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
1179VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
1180VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0);
1181VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1);
1182VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2);
1183VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3);
1184VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
1185VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7);
1186VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value);
1187VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue);
1188VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
1189VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
1190VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
1191VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
1192VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
1193VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
1194VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
1195VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
1196VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
1197VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
1198VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
1199VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
1200VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
1201VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
1202VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
1203VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
1204VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
1205VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1206VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1207VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1208VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
1209VMMDECL(void) CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1210VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu);
1211VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg);
1212VMMR0_INT_DECL(void) CPUMR0SetGuestTscAux(PVMCPU pVCpu, uint64_t uValue);
1213VMMR0_INT_DECL(uint64_t) CPUMR0GetGuestTscAux(PVMCPU pVCpu);
1214/** @} */
1215
1216
1217/** @name Misc Guest Predicate Functions.
1218 * @{ */
1219VMMDECL(bool) CPUMIsGuestIn16BitCode(PVMCPU pVCpu);
1220VMMDECL(bool) CPUMIsGuestIn32BitCode(PVMCPU pVCpu);
1221VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
1222VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu);
1223VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu);
1224VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu);
1225VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu);
1226VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu);
1227VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu);
1228VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu);
1229VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu);
1230VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu);
1231VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu);
1232VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu);
1233/** @} */
1234
1235/** @name Nested Hardware-Virtualization Helpers.
1236 * @{ */
1237VMM_INT_DECL(bool) CPUMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);
1238VMM_INT_DECL(bool) CPUMCanSvmNstGstTakeVirtIntr(PCCPUMCTX pCtx);
1239VMM_INT_DECL(uint8_t) CPUMGetSvmNstGstInterrupt(PCCPUMCTX pCtx);
1240VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx);
1241VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
1242VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks);
1243/** @} */
1244
1245#ifndef IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS
1246
1247/**
1248 * Tests if the guest is running in real mode or not.
1249 *
1250 * @returns true if in real mode, otherwise false.
1251 * @param pCtx Current CPU context.
1252 */
1253DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCPUMCTX pCtx)
1254{
1255 return !(pCtx->cr0 & X86_CR0_PE);
1256}
1257
1258/**
1259 * Tests if the guest is running in real or virtual 8086 mode.
1260 *
1261 * @returns @c true if it is, @c false if not.
1262 * @param pCtx Current CPU context.
1263 */
1264DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCPUMCTX pCtx)
1265{
1266 return !(pCtx->cr0 & X86_CR0_PE)
1267 || pCtx->eflags.Bits.u1VM; /* Cannot be set in long mode. Intel spec 2.3.1 "System Flags and Fields in IA-32e Mode". */
1268}
1269
1270/**
1271 * Tests if the guest is running in virtual 8086 mode.
1272 *
1273 * @returns @c true if it is, @c false if not.
1274 * @param pCtx Current CPU context.
1275 */
1276DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCPUMCTX pCtx)
1277{
1278 return (pCtx->eflags.Bits.u1VM == 1);
1279}
1280
1281/**
1282 * Tests if the guest is running in paged protected or not.
1283 *
1284 * @returns true if in paged protected mode, otherwise false.
1285 * @param pCtx Current CPU context.
1286 */
1287DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
1288{
1289 return (pCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1290}
1291
1292/**
1293 * Tests if the guest is running in long mode or not.
1294 *
1295 * @returns true if in long mode, otherwise false.
1296 * @param pCtx Current CPU context.
1297 */
1298DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCPUMCTX pCtx)
1299{
1300 return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1301}
1302
1303VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx);
1304
1305/**
1306 * Tests if the guest is running in 64 bits mode or not.
1307 *
1308 * @returns true if in 64 bits protected mode, otherwise false.
1309 * @param pCtx Current CPU context.
1310 */
1311DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
1312{
1313 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
1314 return false;
1315 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
1316 return CPUMIsGuestIn64BitCodeSlow(pCtx);
1317 return pCtx->cs.Attr.n.u1Long;
1318}
1319
1320/**
1321 * Tests if the guest has paging enabled or not.
1322 *
1323 * @returns true if paging is enabled, otherwise false.
1324 * @param pCtx Current CPU context.
1325 */
1326DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCPUMCTX pCtx)
1327{
1328 return !!(pCtx->cr0 & X86_CR0_PG);
1329}
1330
1331/**
1332 * Tests if the guest is running in PAE mode or not.
1333 *
1334 * @returns true if in PAE mode, otherwise false.
1335 * @param pCtx Current CPU context.
1336 */
1337DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCPUMCTX pCtx)
1338{
1339 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1340 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1341 return ( (pCtx->cr4 & X86_CR4_PAE)
1342 && CPUMIsGuestPagingEnabledEx(pCtx)
1343 && !(pCtx->msrEFER & MSR_K6_EFER_LMA));
1344}
1345
1346/**
1347 * Tests is if the guest has AMD SVM enabled or not.
1348 *
1349 * @returns true if SMV is enabled, otherwise false.
1350 * @param pCtx Current CPU context.
1351 */
1352DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCCPUMCTX pCtx)
1353{
1354 return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
1355}
1356
1357#ifndef IN_RC
1358/**
1359 * Checks if the guest VMCB has the specified ctrl/instruction intercept active.
1360 *
1361 * @returns @c true if in intercept is set, @c false otherwise.
1362 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1363 * @param pCtx Pointer to the context.
1364 * @param fIntercept The SVM control/instruction intercept, see
1365 * SVM_CTRL_INTERCEPT_*.
1366 */
1367DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fIntercept)
1368{
1369 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1370 if (!pVmcb)
1371 return false;
1372 if (!pCtx->hwvirt.svm.fHMCachedVmcb)
1373 return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fIntercept);
1374 return HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, fIntercept);
1375}
1376
1377/**
1378 * Checks if the guest VMCB has the specified CR read intercept active.
1379 *
1380 * @returns @c true if in intercept is set, @c false otherwise.
1381 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1382 * @param pCtx Pointer to the context.
1383 * @param uCr The CR register number (0 to 15).
1384 */
1385DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1386{
1387 Assert(uCr < 16);
1388 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1389 if (!pVmcb)
1390 return false;
1391 if (!pCtx->hwvirt.svm.fHMCachedVmcb)
1392 return RT_BOOL(pVmcb->ctrl.u16InterceptRdCRx & (UINT16_C(1) << uCr));
1393 return HMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr);
1394}
1395
1396/**
1397 * Checks if the guest VMCB has the specified CR write intercept active.
1398 *
1399 * @returns @c true if in intercept is set, @c false otherwise.
1400 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1401 * @param pCtx Pointer to the context.
1402 * @param uCr The CR register number (0 to 15).
1403 */
1404DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
1405{
1406 Assert(uCr < 16);
1407 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1408 if (!pVmcb)
1409 return false;
1410 if (!pCtx->hwvirt.svm.fHMCachedVmcb)
1411 return RT_BOOL(pVmcb->ctrl.u16InterceptWrCRx & (UINT16_C(1) << uCr));
1412 return HMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr);
1413}
1414
1415/**
1416 * Checks if the guest VMCB has the specified DR read intercept active.
1417 *
1418 * @returns @c true if in intercept is set, @c false otherwise.
1419 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1420 * @param pCtx Pointer to the context.
1421 * @param uDr The DR register number (0 to 15).
1422 */
1423DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1424{
1425 Assert(uDr < 16);
1426 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1427 if (!pVmcb)
1428 return false;
1429 if (!pCtx->hwvirt.svm.fHMCachedVmcb)
1430 return RT_BOOL(pVmcb->ctrl.u16InterceptRdDRx & (UINT16_C(1) << uDr));
1431 return HMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr);
1432}
1433
1434/**
1435 * Checks if the guest VMCB has the specified DR write intercept active.
1436 *
1437 * @returns @c true if in intercept is set, @c false otherwise.
1438 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1439 * @param pCtx Pointer to the context.
1440 * @param uDr The DR register number (0 to 15).
1441 */
1442DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
1443{
1444 Assert(uDr < 16);
1445 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1446 if (!pVmcb)
1447 return false;
1448 if (!pCtx->hwvirt.svm.fHMCachedVmcb)
1449 return RT_BOOL(pVmcb->ctrl.u16InterceptWrDRx & (UINT16_C(1) << uDr));
1450 return HMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr);
1451}
1452
1453/**
1454 * Checks if the guest VMCB has the specified exception intercept active.
1455 *
1456 * @returns @c true if in intercept is active, @c false otherwise.
1457 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1458 * @param pCtx Pointer to the context.
1459 * @param uVector The exception / interrupt vector.
1460 */
1461DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
1462{
1463 Assert(uVector < 32);
1464 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1465 if (!pVmcb)
1466 return false;
1467 if (!pCtx->hwvirt.svm.fHMCachedVmcb)
1468 return RT_BOOL(pVmcb->ctrl.u32InterceptXcpt & (UINT32_C(1) << uVector));
1469 return HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector);
1470}
1471#endif /* !IN_RC */
1472
1473/**
1474 * Checks if we are executing inside an SVM nested hardware-virtualized guest.
1475 *
1476 * @returns @c true if in SVM nested-guest mode, @c false otherwise.
1477 * @param pCtx Pointer to the context.
1478 */
1479DECLINLINE(bool) CPUMIsGuestInSvmNestedHwVirtMode(PCCPUMCTX pCtx)
1480{
1481 /*
1482 * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
1483 * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
1484 */
1485#ifndef IN_RC
1486 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1487 return pVmcb && (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN);
1488#else
1489 NOREF(pCtx);
1490 return false;
1491#endif
1492}
1493
1494/**
1495 * Checks if we are executing inside a VMX nested hardware-virtualized guest.
1496 *
1497 * @returns @c true if in VMX nested-guest mode, @c false otherwise.
1498 * @param pCtx Pointer to the context.
1499 */
1500DECLINLINE(bool) CPUMIsGuestInVmxNestedHwVirtMode(PCCPUMCTX pCtx)
1501{
1502 /** @todo Intel. */
1503 NOREF(pCtx);
1504 return false;
1505}
1506
1507/**
1508 * Checks if we are executing inside a nested hardware-virtualized guest.
1509 *
1510 * @returns @c true if in SVM/VMX nested-guest mode, @c false otherwise.
1511 * @param pCtx Pointer to the context.
1512 */
1513DECLINLINE(bool) CPUMIsGuestInNestedHwVirtMode(PCCPUMCTX pCtx)
1514{
1515 return CPUMIsGuestInSvmNestedHwVirtMode(pCtx) || CPUMIsGuestInVmxNestedHwVirtMode(pCtx);
1516}
1517#endif /* IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS */
1518
1519/** @} */
1520
1521
1522/** @name Hypervisor Register Getters.
1523 * @{ */
1524VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu);
1525VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu);
1526VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu);
1527VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu);
1528VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu);
1529VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu);
1530#if 0 /* these are not correct. */
1531VMMDECL(uint32_t) CPUMGetHyperCR0(PVMCPU pVCpu);
1532VMMDECL(uint32_t) CPUMGetHyperCR2(PVMCPU pVCpu);
1533VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
1534VMMDECL(uint32_t) CPUMGetHyperCR4(PVMCPU pVCpu);
1535#endif
1536/** This register is only saved on fatal traps. */
1537VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu);
1538VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu);
1539/** This register is only saved on fatal traps. */
1540VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu);
1541/** This register is only saved on fatal traps. */
1542VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu);
1543VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu);
1544VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu);
1545VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu);
1546VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu);
1547VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu);
1548VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu);
1549VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu);
1550VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
1551VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
1552VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu);
1553VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu);
1554VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu);
1555VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu);
1556VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu);
1557VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu);
1558VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu);
1559VMMDECL(void) CPUMGetHyperCtx(PVMCPU pVCpu, PCPUMCTX pCtx);
1560VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
1561/** @} */
1562
1563/** @name Hypervisor Register Setters.
1564 * @{ */
1565VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit);
1566VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR);
1567VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit);
1568VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3);
1569VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR);
1570VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS);
1571VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS);
1572VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelDS);
1573VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelDS);
1574VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelDS);
1575VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS);
1576VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP);
1577VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl);
1578VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP);
1579VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX);
1580VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0);
1581VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1);
1582VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2);
1583VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3);
1584VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6);
1585VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7);
1586VMMDECL(void) CPUMSetHyperCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1587VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper);
1588/** @} */
1589
1590VMMDECL(void) CPUMPushHyper(PVMCPU pVCpu, uint32_t u32);
1591VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx);
1592VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu);
1593VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu);
1594VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
1595VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu);
1596VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu);
1597VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc);
1598VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu);
1599VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl);
1600
1601/** @name Changed flags.
1602 * These flags are used to keep track of which important register that
1603 * have been changed since last they were reset. The only one allowed
1604 * to clear them is REM!
1605 * @{
1606 */
1607#define CPUM_CHANGED_FPU_REM RT_BIT(0)
1608#define CPUM_CHANGED_CR0 RT_BIT(1)
1609#define CPUM_CHANGED_CR4 RT_BIT(2)
1610#define CPUM_CHANGED_GLOBAL_TLB_FLUSH RT_BIT(3)
1611#define CPUM_CHANGED_CR3 RT_BIT(4)
1612#define CPUM_CHANGED_GDTR RT_BIT(5)
1613#define CPUM_CHANGED_IDTR RT_BIT(6)
1614#define CPUM_CHANGED_LDTR RT_BIT(7)
1615#define CPUM_CHANGED_TR RT_BIT(8) /**@< Currently unused. */
1616#define CPUM_CHANGED_SYSENTER_MSR RT_BIT(9)
1617#define CPUM_CHANGED_HIDDEN_SEL_REGS RT_BIT(10) /**@< Currently unused. */
1618#define CPUM_CHANGED_CPUID RT_BIT(11)
1619#define CPUM_CHANGED_ALL ( CPUM_CHANGED_FPU_REM \
1620 | CPUM_CHANGED_CR0 \
1621 | CPUM_CHANGED_CR4 \
1622 | CPUM_CHANGED_GLOBAL_TLB_FLUSH \
1623 | CPUM_CHANGED_CR3 \
1624 | CPUM_CHANGED_GDTR \
1625 | CPUM_CHANGED_IDTR \
1626 | CPUM_CHANGED_LDTR \
1627 | CPUM_CHANGED_TR \
1628 | CPUM_CHANGED_SYSENTER_MSR \
1629 | CPUM_CHANGED_HIDDEN_SEL_REGS \
1630 | CPUM_CHANGED_CPUID )
1631/** @} */
1632
1633VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);
1634VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl);
1635VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels);
1636VMMDECL(bool) CPUMSupportsXSave(PVM pVM);
1637VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM);
1638VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM);
1639VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu);
1640VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu);
1641VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu);
1642VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu);
1643VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu);
1644VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu);
1645VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu);
1646VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu);
1647VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu);
1648VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu);
1649VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu);
1650VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu);
1651VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM);
1652VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM);
1653VMMDECL(int) CPUMQueryValidatedGuestEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
1654 uint64_t *puValidEfer);
1655VMMDECL(void) CPUMSetGuestMsrEferNoCheck(PVMCPU pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
1656
1657
1658/** @name Typical scalable bus frequency values.
1659 * @{ */
1660/** Special internal value indicating that we don't know the frequency.
1661 * @internal */
1662#define CPUM_SBUSFREQ_UNKNOWN UINT64_C(1)
1663#define CPUM_SBUSFREQ_100MHZ UINT64_C(100000000)
1664#define CPUM_SBUSFREQ_133MHZ UINT64_C(133333333)
1665#define CPUM_SBUSFREQ_167MHZ UINT64_C(166666666)
1666#define CPUM_SBUSFREQ_200MHZ UINT64_C(200000000)
1667#define CPUM_SBUSFREQ_267MHZ UINT64_C(266666666)
1668#define CPUM_SBUSFREQ_333MHZ UINT64_C(333333333)
1669#define CPUM_SBUSFREQ_400MHZ UINT64_C(400000000)
1670/** @} */
1671
1672
1673#ifdef IN_RING3
1674/** @defgroup grp_cpum_r3 The CPUM ring-3 API
1675 * @{
1676 */
1677
1678VMMR3DECL(int) CPUMR3Init(PVM pVM);
1679VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
1680VMMR3DECL(void) CPUMR3LogCpuIds(PVM pVM);
1681VMMR3DECL(void) CPUMR3Relocate(PVM pVM);
1682VMMR3DECL(int) CPUMR3Term(PVM pVM);
1683VMMR3DECL(void) CPUMR3Reset(PVM pVM);
1684VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
1685VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM);
1686VMMR3DECL(void) CPUMR3SetHWVirtEx(PVM pVM, bool fHWVirtExEnabled);
1687VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
1688
1689VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf);
1690VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf);
1691VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
1692 uint8_t bModel, uint8_t bStepping);
1693VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch);
1694VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
1695VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
1696VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
1697VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
1698VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor);
1699VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void);
1700
1701VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange);
1702
1703# if defined(VBOX_WITH_RAW_MODE) || defined(DOXYGEN_RUNNING)
1704/** @name APIs for the CPUID raw-mode patch (legacy).
1705 * @{ */
1706VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM);
1707VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM);
1708VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM);
1709VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM);
1710VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM);
1711VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM);
1712VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM);
1713/** @} */
1714# endif
1715
1716/** @} */
1717#endif /* IN_RING3 */
1718
1719#ifdef IN_RC
1720/** @defgroup grp_cpum_rc The CPUM Raw-mode Context API
1721 * @{
1722 */
1723
1724/**
1725 * Calls a guest trap/interrupt handler directly
1726 *
1727 * Assumes a trap stack frame has already been setup on the guest's stack!
1728 * This function does not return!
1729 *
1730 * @param pRegFrame Original trap/interrupt context
1731 * @param selCS Code selector of handler
1732 * @param pHandler GC virtual address of handler
1733 * @param eflags Callee's EFLAGS
1734 * @param selSS Stack selector for handler
1735 * @param pEsp Stack address for handler
1736 */
1737DECLASM(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTRCPTR pHandler,
1738 uint32_t eflags, uint32_t selSS, RTRCPTR pEsp);
1739
1740/**
1741 * Call guest V86 code directly.
1742 *
1743 * This function does not return!
1744 *
1745 * @param pRegFrame Original trap/interrupt context
1746 */
1747DECLASM(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
1748
1749VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu);
1750VMMDECL(uint32_t) CPUMRCGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
1751#ifdef VBOX_WITH_RAW_RING1
1752VMMDECL(void) CPUMRCRecheckRawState(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore);
1753#endif
1754VMMRCDECL(void) CPUMRCProcessForceFlag(PVMCPU pVCpu);
1755
1756/** @} */
1757#endif /* IN_RC */
1758
1759#ifdef IN_RING0
1760/** @defgroup grp_cpum_r0 The CPUM ring-0 API
1761 * @{
1762 */
1763VMMR0_INT_DECL(int) CPUMR0ModuleInit(void);
1764VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void);
1765VMMR0_INT_DECL(int) CPUMR0InitVM(PVM pVM);
1766DECLASM(void) CPUMR0RegisterVCpuThread(PVMCPU pVCpu);
1767DECLASM(void) CPUMR0TouchHostFpu(void);
1768VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu);
1769VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu);
1770VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu);
1771VMMR0_INT_DECL(int) CPUMR0SaveHostDebugState(PVM pVM, PVMCPU pVCpu);
1772VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6);
1773VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6);
1774
1775VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6);
1776VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6);
1777#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1778VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, uint32_t iHostCpuSet);
1779#endif
1780
1781/** @} */
1782#endif /* IN_RING0 */
1783
1784/** @defgroup grp_cpum_rz The CPUM raw-mode and ring-0 context API
1785 * @{
1786 */
1787VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPU pVCpu);
1788VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForRead(PVMCPU pVCpu);
1789VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForChange(PVMCPU pVCpu);
1790VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeSseForRead(PVMCPU pVCpu);
1791VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPU pVCpu);
1792/** @} */
1793
1794
1795#endif /* !VBOX_FOR_DTRACE_LIB */
1796/** @} */
1797RT_C_DECLS_END
1798
1799
1800#endif
1801
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette