VirtualBox

source: vbox/trunk/include/VBox/vmm/cpum.h@ 68485

Last change on this file since 68485 was 68226, checked in by vboxsync, 7 years ago

VMM: Nested Hw.virt: SVM R0 bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 69.9 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager).
3 */
4
5/*
6 * Copyright (C) 2006-2016 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_cpum_h
27#define ___VBox_vmm_cpum_h
28
29#include <iprt/x86.h>
30#include <VBox/types.h>
31#include <VBox/vmm/cpumctx.h>
32#include <VBox/vmm/stam.h>
33#include <VBox/vmm/vmapi.h>
34
35RT_C_DECLS_BEGIN
36
37/** @defgroup grp_cpum The CPU Monitor / Manager API
38 * @ingroup grp_vmm
39 * @{
40 */
41
42/**
43 * CPUID feature to set or clear.
44 */
45typedef enum CPUMCPUIDFEATURE
46{
47 CPUMCPUIDFEATURE_INVALID = 0,
48 /** The APIC feature bit. (Std+Ext)
49 * Note! There is a per-cpu flag for masking this CPUID feature bit when the
50 * APICBASE.ENABLED bit is zero. So, this feature is only set/cleared
51 * at VM construction time like all the others. This didn't used to be
52 * that way, this is new with 5.1. */
53 CPUMCPUIDFEATURE_APIC,
54 /** The sysenter/sysexit feature bit. (Std) */
55 CPUMCPUIDFEATURE_SEP,
56 /** The SYSCALL/SYSEXIT feature bit (64 bits mode only for Intel CPUs). (Ext) */
57 CPUMCPUIDFEATURE_SYSCALL,
58 /** The PAE feature bit. (Std+Ext) */
59 CPUMCPUIDFEATURE_PAE,
60 /** The NX feature bit. (Ext) */
61 CPUMCPUIDFEATURE_NX,
62 /** The LAHF/SAHF feature bit (64 bits mode only). (Ext) */
63 CPUMCPUIDFEATURE_LAHF,
64 /** The LONG MODE feature bit. (Ext) */
65 CPUMCPUIDFEATURE_LONG_MODE,
66 /** The PAT feature bit. (Std+Ext) */
67 CPUMCPUIDFEATURE_PAT,
68 /** The x2APIC feature bit. (Std) */
69 CPUMCPUIDFEATURE_X2APIC,
70 /** The RDTSCP feature bit. (Ext) */
71 CPUMCPUIDFEATURE_RDTSCP,
72 /** The Hypervisor Present bit. (Std) */
73 CPUMCPUIDFEATURE_HVP,
74 /** The MWait Extensions bits (Std) */
75 CPUMCPUIDFEATURE_MWAIT_EXTS,
76 /** 32bit hackishness. */
77 CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
78} CPUMCPUIDFEATURE;
79
80/**
81 * CPU Vendor.
82 */
83typedef enum CPUMCPUVENDOR
84{
85 CPUMCPUVENDOR_INVALID = 0,
86 CPUMCPUVENDOR_INTEL,
87 CPUMCPUVENDOR_AMD,
88 CPUMCPUVENDOR_VIA,
89 CPUMCPUVENDOR_CYRIX,
90 CPUMCPUVENDOR_UNKNOWN,
91 /** 32bit hackishness. */
92 CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
93} CPUMCPUVENDOR;
94
95
96/**
97 * X86 and AMD64 CPU microarchitectures and in processor generations.
98 *
99 * @remarks The separation here is sometimes a little bit too finely grained,
100 * and the differences is more like processor generation than micro
101 * arch. This can be useful, so we'll provide functions for getting at
102 * more coarse grained info.
103 */
104typedef enum CPUMMICROARCH
105{
106 kCpumMicroarch_Invalid = 0,
107
108 kCpumMicroarch_Intel_First,
109
110 kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
111 kCpumMicroarch_Intel_80186,
112 kCpumMicroarch_Intel_80286,
113 kCpumMicroarch_Intel_80386,
114 kCpumMicroarch_Intel_80486,
115 kCpumMicroarch_Intel_P5,
116
117 kCpumMicroarch_Intel_P6_Core_Atom_First,
118 kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
119 kCpumMicroarch_Intel_P6_II,
120 kCpumMicroarch_Intel_P6_III,
121
122 kCpumMicroarch_Intel_P6_M_Banias,
123 kCpumMicroarch_Intel_P6_M_Dothan,
124 kCpumMicroarch_Intel_Core_Yonah, /**< Core, also known as Enhanced Pentium M. */
125
126 kCpumMicroarch_Intel_Core2_First,
127 kCpumMicroarch_Intel_Core2_Merom = kCpumMicroarch_Intel_Core2_First,
128 kCpumMicroarch_Intel_Core2_Penryn,
129
130 kCpumMicroarch_Intel_Core7_First,
131 kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
132 kCpumMicroarch_Intel_Core7_Westmere,
133 kCpumMicroarch_Intel_Core7_SandyBridge,
134 kCpumMicroarch_Intel_Core7_IvyBridge,
135 kCpumMicroarch_Intel_Core7_Haswell,
136 kCpumMicroarch_Intel_Core7_Broadwell,
137 kCpumMicroarch_Intel_Core7_Skylake,
138 kCpumMicroarch_Intel_Core7_Cannonlake,
139 kCpumMicroarch_Intel_Core7_End,
140
141 kCpumMicroarch_Intel_Atom_First,
142 kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
143 kCpumMicroarch_Intel_Atom_Lincroft, /**< Second generation bonnell (44nm). */
144 kCpumMicroarch_Intel_Atom_Saltwell, /**< 32nm shrink of Bonnell. */
145 kCpumMicroarch_Intel_Atom_Silvermont, /**< 22nm */
146 kCpumMicroarch_Intel_Atom_Airmount, /**< 14nm */
147 kCpumMicroarch_Intel_Atom_Goldmont, /**< 14nm */
148 kCpumMicroarch_Intel_Atom_Unknown,
149 kCpumMicroarch_Intel_Atom_End,
150
151 kCpumMicroarch_Intel_P6_Core_Atom_End,
152
153 kCpumMicroarch_Intel_NB_First,
154 kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
155 kCpumMicroarch_Intel_NB_Northwood, /**< 130nm */
156 kCpumMicroarch_Intel_NB_Prescott, /**< 90nm */
157 kCpumMicroarch_Intel_NB_Prescott2M, /**< 90nm */
158 kCpumMicroarch_Intel_NB_CedarMill, /**< 65nm */
159 kCpumMicroarch_Intel_NB_Gallatin, /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
160 kCpumMicroarch_Intel_NB_Unknown,
161 kCpumMicroarch_Intel_NB_End,
162
163 kCpumMicroarch_Intel_Unknown,
164 kCpumMicroarch_Intel_End,
165
166 kCpumMicroarch_AMD_First,
167 kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
168 kCpumMicroarch_AMD_Am386,
169 kCpumMicroarch_AMD_Am486,
170 kCpumMicroarch_AMD_Am486Enh, /**< Covers Am5x86 as well. */
171 kCpumMicroarch_AMD_K5,
172 kCpumMicroarch_AMD_K6,
173
174 kCpumMicroarch_AMD_K7_First,
175 kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
176 kCpumMicroarch_AMD_K7_Spitfire,
177 kCpumMicroarch_AMD_K7_Thunderbird,
178 kCpumMicroarch_AMD_K7_Morgan,
179 kCpumMicroarch_AMD_K7_Thoroughbred,
180 kCpumMicroarch_AMD_K7_Barton,
181 kCpumMicroarch_AMD_K7_Unknown,
182 kCpumMicroarch_AMD_K7_End,
183
184 kCpumMicroarch_AMD_K8_First,
185 kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
186 kCpumMicroarch_AMD_K8_90nm, /**< 90nm shrink */
187 kCpumMicroarch_AMD_K8_90nm_DualCore, /**< 90nm with two cores. */
188 kCpumMicroarch_AMD_K8_90nm_AMDV, /**< 90nm with AMD-V (usually) and two cores (usually). */
189 kCpumMicroarch_AMD_K8_65nm, /**< 65nm shrink. */
190 kCpumMicroarch_AMD_K8_End,
191
192 kCpumMicroarch_AMD_K10,
193 kCpumMicroarch_AMD_K10_Lion,
194 kCpumMicroarch_AMD_K10_Llano,
195 kCpumMicroarch_AMD_Bobcat,
196 kCpumMicroarch_AMD_Jaguar,
197
198 kCpumMicroarch_AMD_15h_First,
199 kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
200 kCpumMicroarch_AMD_15h_Piledriver,
201 kCpumMicroarch_AMD_15h_Steamroller, /**< Yet to be released, might have different family. */
202 kCpumMicroarch_AMD_15h_Excavator, /**< Yet to be released, might have different family. */
203 kCpumMicroarch_AMD_15h_Unknown,
204 kCpumMicroarch_AMD_15h_End,
205
206 kCpumMicroarch_AMD_16h_First,
207 kCpumMicroarch_AMD_16h_End,
208
209 kCpumMicroarch_AMD_Zen_First,
210 kCpumMicroarch_AMD_Zen_Ryzen = kCpumMicroarch_AMD_Zen_First,
211 kCpumMicroarch_AMD_Zen_End,
212
213 kCpumMicroarch_AMD_Unknown,
214 kCpumMicroarch_AMD_End,
215
216 kCpumMicroarch_VIA_First,
217 kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
218 kCpumMicroarch_Centaur_C2,
219 kCpumMicroarch_Centaur_C3,
220 kCpumMicroarch_VIA_C3_M2,
221 kCpumMicroarch_VIA_C3_C5A, /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
222 kCpumMicroarch_VIA_C3_C5B, /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
223 kCpumMicroarch_VIA_C3_C5C, /**< 130nm Ezra - C3, Eden ESP. */
224 kCpumMicroarch_VIA_C3_C5N, /**< 130nm Ezra-T - C3. */
225 kCpumMicroarch_VIA_C3_C5XL, /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
226 kCpumMicroarch_VIA_C3_C5P, /**< 130nm Nehemiah+ - C3. */
227 kCpumMicroarch_VIA_C7_C5J, /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
228 kCpumMicroarch_VIA_Isaiah,
229 kCpumMicroarch_VIA_Unknown,
230 kCpumMicroarch_VIA_End,
231
232 kCpumMicroarch_Cyrix_First,
233 kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
234 kCpumMicroarch_Cyrix_M1,
235 kCpumMicroarch_Cyrix_MediaGX,
236 kCpumMicroarch_Cyrix_MediaGXm,
237 kCpumMicroarch_Cyrix_M2,
238 kCpumMicroarch_Cyrix_Unknown,
239 kCpumMicroarch_Cyrix_End,
240
241 kCpumMicroarch_NEC_First,
242 kCpumMicroarch_NEC_V20 = kCpumMicroarch_NEC_First,
243 kCpumMicroarch_NEC_V30,
244 kCpumMicroarch_NEC_End,
245
246 kCpumMicroarch_Unknown,
247
248 kCpumMicroarch_32BitHack = 0x7fffffff
249} CPUMMICROARCH;
250
251
252/** Predicate macro for catching netburst CPUs. */
253#define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
254 ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
255
256/** Predicate macro for catching Core7 CPUs. */
257#define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
258 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
259
260/** Predicate macro for catching Atom CPUs, Silvermont and upwards. */
261#define CPUMMICROARCH_IS_INTEL_SILVERMONT_PLUS(a_enmMicroarch) \
262 ((a_enmMicroarch) >= kCpumMicroarch_Intel_Atom_Silvermont && (a_enmMicroarch) <= kCpumMicroarch_Intel_Atom_End)
263
264/** Predicate macro for catching AMD Family OFh CPUs (aka K8). */
265#define CPUMMICROARCH_IS_AMD_FAM_0FH(a_enmMicroarch) \
266 ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
267
268/** Predicate macro for catching AMD Family 10H CPUs (aka K10). */
269#define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
270
271/** Predicate macro for catching AMD Family 11H CPUs (aka Lion). */
272#define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
273
274/** Predicate macro for catching AMD Family 12H CPUs (aka Llano). */
275#define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
276
277/** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat). */
278#define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
279
280/** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
281 * decendants). */
282#define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
283 ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
284
285/** Predicate macro for catching AMD Family 16H CPUs. */
286#define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
287 ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
288
289
290
291/**
292 * CPUID leaf.
293 *
294 * @remarks This structure is used by the patch manager and is therefore
295 * more or less set in stone.
296 */
297typedef struct CPUMCPUIDLEAF
298{
299 /** The leaf number. */
300 uint32_t uLeaf;
301 /** The sub-leaf number. */
302 uint32_t uSubLeaf;
303 /** Sub-leaf mask. This is 0 when sub-leaves aren't used. */
304 uint32_t fSubLeafMask;
305
306 /** The EAX value. */
307 uint32_t uEax;
308 /** The EBX value. */
309 uint32_t uEbx;
310 /** The ECX value. */
311 uint32_t uEcx;
312 /** The EDX value. */
313 uint32_t uEdx;
314
315 /** Flags. */
316 uint32_t fFlags;
317} CPUMCPUIDLEAF;
318#ifndef VBOX_FOR_DTRACE_LIB
319AssertCompileSize(CPUMCPUIDLEAF, 32);
320#endif
321/** Pointer to a CPUID leaf. */
322typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
323/** Pointer to a const CPUID leaf. */
324typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
325
326/** @name CPUMCPUIDLEAF::fFlags
327 * @{ */
328/** Indicates working intel leaf 0xb where the lower 8 ECX bits are not modified
329 * and EDX containing the extended APIC ID. */
330#define CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES RT_BIT_32(0)
331/** The leaf contains an APIC ID that needs changing to that of the current CPU. */
332#define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID RT_BIT_32(1)
333/** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
334#define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE RT_BIT_32(2)
335/** The leaf contains an APIC feature bit which is tied to APICBASE.EN. */
336#define CPUMCPUIDLEAF_F_CONTAINS_APIC RT_BIT_32(3)
337/** Mask of the valid flags. */
338#define CPUMCPUIDLEAF_F_VALID_MASK UINT32_C(0xf)
339/** @} */
340
341/**
342 * Method used to deal with unknown CPUID leaves.
343 * @remarks Used in patch code.
344 */
345typedef enum CPUMUNKNOWNCPUID
346{
347 /** Invalid zero value. */
348 CPUMUNKNOWNCPUID_INVALID = 0,
349 /** Use given default values (DefCpuId). */
350 CPUMUNKNOWNCPUID_DEFAULTS,
351 /** Return the last standard leaf.
352 * Intel Sandy Bridge has been observed doing this. */
353 CPUMUNKNOWNCPUID_LAST_STD_LEAF,
354 /** Return the last standard leaf, with ecx observed.
355 * Intel Sandy Bridge has been observed doing this. */
356 CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
357 /** The register values are passed thru unmodified. */
358 CPUMUNKNOWNCPUID_PASSTHRU,
359 /** End of valid value. */
360 CPUMUNKNOWNCPUID_END,
361 /** Ensure 32-bit type. */
362 CPUMUNKNOWNCPUID_32BIT_HACK = 0x7fffffff
363} CPUMUNKNOWNCPUID;
364/** Pointer to unknown CPUID leaf method. */
365typedef CPUMUNKNOWNCPUID *PCPUMUNKNOWNCPUID;
366
367
368/**
369 * MSR read functions.
370 */
371typedef enum CPUMMSRRDFN
372{
373 /** Invalid zero value. */
374 kCpumMsrRdFn_Invalid = 0,
375 /** Return the CPUMMSRRANGE::uValue. */
376 kCpumMsrRdFn_FixedValue,
377 /** Alias to the MSR range starting at the MSR given by
378 * CPUMMSRRANGE::uValue. Must be used in pair with
379 * kCpumMsrWrFn_MsrAlias. */
380 kCpumMsrRdFn_MsrAlias,
381 /** Write only register, GP all read attempts. */
382 kCpumMsrRdFn_WriteOnly,
383
384 kCpumMsrRdFn_Ia32P5McAddr,
385 kCpumMsrRdFn_Ia32P5McType,
386 kCpumMsrRdFn_Ia32TimestampCounter,
387 kCpumMsrRdFn_Ia32PlatformId, /**< Takes real CPU value for reference. */
388 kCpumMsrRdFn_Ia32ApicBase,
389 kCpumMsrRdFn_Ia32FeatureControl,
390 kCpumMsrRdFn_Ia32BiosSignId, /**< Range value returned. */
391 kCpumMsrRdFn_Ia32SmmMonitorCtl,
392 kCpumMsrRdFn_Ia32PmcN,
393 kCpumMsrRdFn_Ia32MonitorFilterLineSize,
394 kCpumMsrRdFn_Ia32MPerf,
395 kCpumMsrRdFn_Ia32APerf,
396 kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */
397 kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
398 kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
399 kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
400 kCpumMsrRdFn_Ia32MtrrDefType,
401 kCpumMsrRdFn_Ia32Pat,
402 kCpumMsrRdFn_Ia32SysEnterCs,
403 kCpumMsrRdFn_Ia32SysEnterEsp,
404 kCpumMsrRdFn_Ia32SysEnterEip,
405 kCpumMsrRdFn_Ia32McgCap,
406 kCpumMsrRdFn_Ia32McgStatus,
407 kCpumMsrRdFn_Ia32McgCtl,
408 kCpumMsrRdFn_Ia32DebugCtl,
409 kCpumMsrRdFn_Ia32SmrrPhysBase,
410 kCpumMsrRdFn_Ia32SmrrPhysMask,
411 kCpumMsrRdFn_Ia32PlatformDcaCap,
412 kCpumMsrRdFn_Ia32CpuDcaCap,
413 kCpumMsrRdFn_Ia32Dca0Cap,
414 kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
415 kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */
416 kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */
417 kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
418 kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */
419 kCpumMsrRdFn_Ia32FixedCtrCtrl,
420 kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */
421 kCpumMsrRdFn_Ia32PerfGlobalCtrl,
422 kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
423 kCpumMsrRdFn_Ia32PebsEnable,
424 kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */
425 kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */
426 kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */
427 kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */
428 kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */
429 kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
430 kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */
431 kCpumMsrRdFn_Ia32DsArea,
432 kCpumMsrRdFn_Ia32TscDeadline,
433 kCpumMsrRdFn_Ia32X2ApicN,
434 kCpumMsrRdFn_Ia32DebugInterface,
435 kCpumMsrRdFn_Ia32VmxBase, /**< Takes real value as reference. */
436 kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */
437 kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */
438 kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */
439 kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */
440 kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */
441 kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */
442 kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */
443 kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */
444 kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */
445 kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */
446 kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */
447 kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */
448 kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */
449 kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */
450 kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */
451 kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */
452 kCpumMsrRdFn_Ia32VmxVmFunc, /**< Takes real value as reference. */
453
454 kCpumMsrRdFn_Amd64Efer,
455 kCpumMsrRdFn_Amd64SyscallTarget,
456 kCpumMsrRdFn_Amd64LongSyscallTarget,
457 kCpumMsrRdFn_Amd64CompSyscallTarget,
458 kCpumMsrRdFn_Amd64SyscallFlagMask,
459 kCpumMsrRdFn_Amd64FsBase,
460 kCpumMsrRdFn_Amd64GsBase,
461 kCpumMsrRdFn_Amd64KernelGsBase,
462 kCpumMsrRdFn_Amd64TscAux,
463
464 kCpumMsrRdFn_IntelEblCrPowerOn,
465 kCpumMsrRdFn_IntelI7CoreThreadCount,
466 kCpumMsrRdFn_IntelP4EbcHardPowerOn,
467 kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
468 kCpumMsrRdFn_IntelP4EbcFrequencyId,
469 kCpumMsrRdFn_IntelP6FsbFrequency, /**< Takes real value as reference. */
470 kCpumMsrRdFn_IntelPlatformInfo,
471 kCpumMsrRdFn_IntelFlexRatio, /**< Takes real value as reference. */
472 kCpumMsrRdFn_IntelPkgCStConfigControl,
473 kCpumMsrRdFn_IntelPmgIoCaptureBase,
474 kCpumMsrRdFn_IntelLastBranchFromToN,
475 kCpumMsrRdFn_IntelLastBranchFromN,
476 kCpumMsrRdFn_IntelLastBranchToN,
477 kCpumMsrRdFn_IntelLastBranchTos,
478 kCpumMsrRdFn_IntelBblCrCtl,
479 kCpumMsrRdFn_IntelBblCrCtl3,
480 kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */
481 kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
482 kCpumMsrRdFn_IntelI7MiscPwrMgmt,
483 kCpumMsrRdFn_IntelP6CrN,
484 kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
485 kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
486 kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
487 kCpumMsrRdFn_IntelI7SandyAesNiCtl,
488 kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */
489 kCpumMsrRdFn_IntelI7LbrSelect,
490 kCpumMsrRdFn_IntelI7SandyErrorControl,
491 kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
492 kCpumMsrRdFn_IntelI7PowerCtl,
493 kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
494 kCpumMsrRdFn_IntelI7PebsLdLat,
495 kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */
496 kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */
497 kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
498 kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */
499 kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */
500 kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */
501 kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
502 kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */
503 kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
504 kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */
505 kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */
506 kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */
507 kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
508 kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */
509 kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */
510 kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */
511 kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
512 kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */
513 kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */
514 kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */
515 kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
516 kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */
517 kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
518 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1, /**< Takes real value as reference. */
519 kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2, /**< Takes real value as reference. */
520 kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
521 kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
522 kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
523 kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
524 kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
525 kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
526 kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
527 kCpumMsrRdFn_IntelI7UncCBoxConfig,
528 kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
529 kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
530 kCpumMsrRdFn_IntelI7SmiCount,
531 kCpumMsrRdFn_IntelCore2EmttmCrTablesN, /**< Range value returned. */
532 kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
533 kCpumMsrRdFn_IntelCore1ExtConfig,
534 kCpumMsrRdFn_IntelCore1DtsCalControl,
535 kCpumMsrRdFn_IntelCore2PeciControl,
536 kCpumMsrRdFn_IntelAtSilvCoreC1Recidency,
537
538 kCpumMsrRdFn_P6LastBranchFromIp,
539 kCpumMsrRdFn_P6LastBranchToIp,
540 kCpumMsrRdFn_P6LastIntFromIp,
541 kCpumMsrRdFn_P6LastIntToIp,
542
543 kCpumMsrRdFn_AmdFam15hTscRate,
544 kCpumMsrRdFn_AmdFam15hLwpCfg,
545 kCpumMsrRdFn_AmdFam15hLwpCbAddr,
546 kCpumMsrRdFn_AmdFam10hMc4MiscN,
547 kCpumMsrRdFn_AmdK8PerfCtlN,
548 kCpumMsrRdFn_AmdK8PerfCtrN,
549 kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */
550 kCpumMsrRdFn_AmdK8HwCr,
551 kCpumMsrRdFn_AmdK8IorrBaseN,
552 kCpumMsrRdFn_AmdK8IorrMaskN,
553 kCpumMsrRdFn_AmdK8TopOfMemN,
554 kCpumMsrRdFn_AmdK8NbCfg1,
555 kCpumMsrRdFn_AmdK8McXcptRedir,
556 kCpumMsrRdFn_AmdK8CpuNameN,
557 kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */
558 kCpumMsrRdFn_AmdK8SwThermalCtrl,
559 kCpumMsrRdFn_AmdK8FidVidControl, /**< Range value returned. */
560 kCpumMsrRdFn_AmdK8FidVidStatus, /**< Range value returned. */
561 kCpumMsrRdFn_AmdK8McCtlMaskN,
562 kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
563 kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
564 kCpumMsrRdFn_AmdK8IntPendingMessage,
565 kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
566 kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
567 kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
568 kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */
569 kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */
570 kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */
571 kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */
572 kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */
573 kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */
574 kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
575 kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
576 kCpumMsrRdFn_AmdK8SmmBase,
577 kCpumMsrRdFn_AmdK8SmmAddr,
578 kCpumMsrRdFn_AmdK8SmmMask,
579 kCpumMsrRdFn_AmdK8VmCr,
580 kCpumMsrRdFn_AmdK8IgnNe,
581 kCpumMsrRdFn_AmdK8SmmCtl,
582 kCpumMsrRdFn_AmdK8VmHSavePa,
583 kCpumMsrRdFn_AmdFam10hVmLockKey,
584 kCpumMsrRdFn_AmdFam10hSmmLockKey,
585 kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
586 kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
587 kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
588 kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
589 kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
590 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
591 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
592 kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */
593 kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */
594 kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
595 kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
596 kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
597 kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
598 kCpumMsrRdFn_AmdK8PatchLevel, /**< Returns range value. */
599 kCpumMsrRdFn_AmdK7DebugStatusMaybe,
600 kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
601 kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
602 kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
603 kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
604 kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
605 kCpumMsrRdFn_AmdK7NodeId,
606 kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
607 kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
608 kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
609 kCpumMsrRdFn_AmdK7LoadStoreCfg,
610 kCpumMsrRdFn_AmdK7InstrCacheCfg,
611 kCpumMsrRdFn_AmdK7DataCacheCfg,
612 kCpumMsrRdFn_AmdK7BusUnitCfg,
613 kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
614 kCpumMsrRdFn_AmdFam15hFpuCfg,
615 kCpumMsrRdFn_AmdFam15hDecoderCfg,
616 kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
617 kCpumMsrRdFn_AmdFam15hCombUnitCfg,
618 kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
619 kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
620 kCpumMsrRdFn_AmdFam15hExecUnitCfg,
621 kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
622 kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
623 kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
624 kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
625 kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
626 kCpumMsrRdFn_AmdFam10hIbsOpRip,
627 kCpumMsrRdFn_AmdFam10hIbsOpData,
628 kCpumMsrRdFn_AmdFam10hIbsOpData2,
629 kCpumMsrRdFn_AmdFam10hIbsOpData3,
630 kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
631 kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
632 kCpumMsrRdFn_AmdFam10hIbsCtl,
633 kCpumMsrRdFn_AmdFam14hIbsBrTarget,
634
635 kCpumMsrRdFn_Gim,
636
637 /** End of valid MSR read function indexes. */
638 kCpumMsrRdFn_End
639} CPUMMSRRDFN;
640
641/**
642 * MSR write functions.
643 */
644typedef enum CPUMMSRWRFN
645{
646 /** Invalid zero value. */
647 kCpumMsrWrFn_Invalid = 0,
648 /** Writes are ignored, the fWrGpMask is observed though. */
649 kCpumMsrWrFn_IgnoreWrite,
650 /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
651 kCpumMsrWrFn_ReadOnly,
652 /** Alias to the MSR range starting at the MSR given by
653 * CPUMMSRRANGE::uValue. Must be used in pair with
654 * kCpumMsrRdFn_MsrAlias. */
655 kCpumMsrWrFn_MsrAlias,
656
657 kCpumMsrWrFn_Ia32P5McAddr,
658 kCpumMsrWrFn_Ia32P5McType,
659 kCpumMsrWrFn_Ia32TimestampCounter,
660 kCpumMsrWrFn_Ia32ApicBase,
661 kCpumMsrWrFn_Ia32FeatureControl,
662 kCpumMsrWrFn_Ia32BiosSignId,
663 kCpumMsrWrFn_Ia32BiosUpdateTrigger,
664 kCpumMsrWrFn_Ia32SmmMonitorCtl,
665 kCpumMsrWrFn_Ia32PmcN,
666 kCpumMsrWrFn_Ia32MonitorFilterLineSize,
667 kCpumMsrWrFn_Ia32MPerf,
668 kCpumMsrWrFn_Ia32APerf,
669 kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */
670 kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */
671 kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */
672 kCpumMsrWrFn_Ia32MtrrDefType,
673 kCpumMsrWrFn_Ia32Pat,
674 kCpumMsrWrFn_Ia32SysEnterCs,
675 kCpumMsrWrFn_Ia32SysEnterEsp,
676 kCpumMsrWrFn_Ia32SysEnterEip,
677 kCpumMsrWrFn_Ia32McgStatus,
678 kCpumMsrWrFn_Ia32McgCtl,
679 kCpumMsrWrFn_Ia32DebugCtl,
680 kCpumMsrWrFn_Ia32SmrrPhysBase,
681 kCpumMsrWrFn_Ia32SmrrPhysMask,
682 kCpumMsrWrFn_Ia32PlatformDcaCap,
683 kCpumMsrWrFn_Ia32Dca0Cap,
684 kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */
685 kCpumMsrWrFn_Ia32PerfStatus,
686 kCpumMsrWrFn_Ia32PerfCtl,
687 kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */
688 kCpumMsrWrFn_Ia32PerfCapabilities,
689 kCpumMsrWrFn_Ia32FixedCtrCtrl,
690 kCpumMsrWrFn_Ia32PerfGlobalStatus,
691 kCpumMsrWrFn_Ia32PerfGlobalCtrl,
692 kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
693 kCpumMsrWrFn_Ia32PebsEnable,
694 kCpumMsrWrFn_Ia32ClockModulation,
695 kCpumMsrWrFn_Ia32ThermInterrupt,
696 kCpumMsrWrFn_Ia32ThermStatus,
697 kCpumMsrWrFn_Ia32Therm2Ctl,
698 kCpumMsrWrFn_Ia32MiscEnable,
699 kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */
700 kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */
701 kCpumMsrWrFn_Ia32DsArea,
702 kCpumMsrWrFn_Ia32TscDeadline,
703 kCpumMsrWrFn_Ia32X2ApicN,
704 kCpumMsrWrFn_Ia32DebugInterface,
705
706 kCpumMsrWrFn_Amd64Efer,
707 kCpumMsrWrFn_Amd64SyscallTarget,
708 kCpumMsrWrFn_Amd64LongSyscallTarget,
709 kCpumMsrWrFn_Amd64CompSyscallTarget,
710 kCpumMsrWrFn_Amd64SyscallFlagMask,
711 kCpumMsrWrFn_Amd64FsBase,
712 kCpumMsrWrFn_Amd64GsBase,
713 kCpumMsrWrFn_Amd64KernelGsBase,
714 kCpumMsrWrFn_Amd64TscAux,
715 kCpumMsrWrFn_IntelEblCrPowerOn,
716 kCpumMsrWrFn_IntelP4EbcHardPowerOn,
717 kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
718 kCpumMsrWrFn_IntelP4EbcFrequencyId,
719 kCpumMsrWrFn_IntelFlexRatio,
720 kCpumMsrWrFn_IntelPkgCStConfigControl,
721 kCpumMsrWrFn_IntelPmgIoCaptureBase,
722 kCpumMsrWrFn_IntelLastBranchFromToN,
723 kCpumMsrWrFn_IntelLastBranchFromN,
724 kCpumMsrWrFn_IntelLastBranchToN,
725 kCpumMsrWrFn_IntelLastBranchTos,
726 kCpumMsrWrFn_IntelBblCrCtl,
727 kCpumMsrWrFn_IntelBblCrCtl3,
728 kCpumMsrWrFn_IntelI7TemperatureTarget,
729 kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
730 kCpumMsrWrFn_IntelI7MiscPwrMgmt,
731 kCpumMsrWrFn_IntelP6CrN,
732 kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
733 kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
734 kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
735 kCpumMsrWrFn_IntelI7SandyAesNiCtl,
736 kCpumMsrWrFn_IntelI7TurboRatioLimit,
737 kCpumMsrWrFn_IntelI7LbrSelect,
738 kCpumMsrWrFn_IntelI7SandyErrorControl,
739 kCpumMsrWrFn_IntelI7PowerCtl,
740 kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
741 kCpumMsrWrFn_IntelI7PebsLdLat,
742 kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
743 kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
744 kCpumMsrWrFn_IntelI7SandyRaplPowerUnit, /**< R/O but found writable bits on a Silvermont CPU here. */
745 kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
746 kCpumMsrWrFn_IntelI7SandyPkgC2Residency, /**< R/O but found writable bits on a Silvermont CPU here. */
747 kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
748 kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
749 kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
750 kCpumMsrWrFn_IntelI7RaplPp0Policy,
751 kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
752 kCpumMsrWrFn_IntelI7RaplPp1Policy,
753 kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
754 kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
755 kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
756 kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
757 kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
758 kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
759 kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
760 kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
761 kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
762 kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
763 kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
764 kCpumMsrWrFn_IntelCore1ExtConfig,
765 kCpumMsrWrFn_IntelCore1DtsCalControl,
766 kCpumMsrWrFn_IntelCore2PeciControl,
767
768 kCpumMsrWrFn_P6LastIntFromIp,
769 kCpumMsrWrFn_P6LastIntToIp,
770
771 kCpumMsrWrFn_AmdFam15hTscRate,
772 kCpumMsrWrFn_AmdFam15hLwpCfg,
773 kCpumMsrWrFn_AmdFam15hLwpCbAddr,
774 kCpumMsrWrFn_AmdFam10hMc4MiscN,
775 kCpumMsrWrFn_AmdK8PerfCtlN,
776 kCpumMsrWrFn_AmdK8PerfCtrN,
777 kCpumMsrWrFn_AmdK8SysCfg,
778 kCpumMsrWrFn_AmdK8HwCr,
779 kCpumMsrWrFn_AmdK8IorrBaseN,
780 kCpumMsrWrFn_AmdK8IorrMaskN,
781 kCpumMsrWrFn_AmdK8TopOfMemN,
782 kCpumMsrWrFn_AmdK8NbCfg1,
783 kCpumMsrWrFn_AmdK8McXcptRedir,
784 kCpumMsrWrFn_AmdK8CpuNameN,
785 kCpumMsrWrFn_AmdK8HwThermalCtrl,
786 kCpumMsrWrFn_AmdK8SwThermalCtrl,
787 kCpumMsrWrFn_AmdK8FidVidControl,
788 kCpumMsrWrFn_AmdK8McCtlMaskN,
789 kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
790 kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
791 kCpumMsrWrFn_AmdK8IntPendingMessage,
792 kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
793 kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
794 kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
795 kCpumMsrWrFn_AmdFam10hPStateControl,
796 kCpumMsrWrFn_AmdFam10hPStateStatus,
797 kCpumMsrWrFn_AmdFam10hPStateN,
798 kCpumMsrWrFn_AmdFam10hCofVidControl,
799 kCpumMsrWrFn_AmdFam10hCofVidStatus,
800 kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
801 kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
802 kCpumMsrWrFn_AmdK8SmmBase,
803 kCpumMsrWrFn_AmdK8SmmAddr,
804 kCpumMsrWrFn_AmdK8SmmMask,
805 kCpumMsrWrFn_AmdK8VmCr,
806 kCpumMsrWrFn_AmdK8IgnNe,
807 kCpumMsrWrFn_AmdK8SmmCtl,
808 kCpumMsrWrFn_AmdK8VmHSavePa,
809 kCpumMsrWrFn_AmdFam10hVmLockKey,
810 kCpumMsrWrFn_AmdFam10hSmmLockKey,
811 kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
812 kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
813 kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
814 kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
815 kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
816 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
817 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
818 kCpumMsrWrFn_AmdK7MicrocodeCtl,
819 kCpumMsrWrFn_AmdK7ClusterIdMaybe,
820 kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
821 kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
822 kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
823 kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
824 kCpumMsrWrFn_AmdK8PatchLoader,
825 kCpumMsrWrFn_AmdK7DebugStatusMaybe,
826 kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
827 kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
828 kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
829 kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
830 kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
831 kCpumMsrWrFn_AmdK7NodeId,
832 kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */
833 kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
834 kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
835 kCpumMsrWrFn_AmdK7LoadStoreCfg,
836 kCpumMsrWrFn_AmdK7InstrCacheCfg,
837 kCpumMsrWrFn_AmdK7DataCacheCfg,
838 kCpumMsrWrFn_AmdK7BusUnitCfg,
839 kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
840 kCpumMsrWrFn_AmdFam15hFpuCfg,
841 kCpumMsrWrFn_AmdFam15hDecoderCfg,
842 kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
843 kCpumMsrWrFn_AmdFam15hCombUnitCfg,
844 kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
845 kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
846 kCpumMsrWrFn_AmdFam15hExecUnitCfg,
847 kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
848 kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
849 kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
850 kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
851 kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
852 kCpumMsrWrFn_AmdFam10hIbsOpRip,
853 kCpumMsrWrFn_AmdFam10hIbsOpData,
854 kCpumMsrWrFn_AmdFam10hIbsOpData2,
855 kCpumMsrWrFn_AmdFam10hIbsOpData3,
856 kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
857 kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
858 kCpumMsrWrFn_AmdFam10hIbsCtl,
859 kCpumMsrWrFn_AmdFam14hIbsBrTarget,
860
861 kCpumMsrWrFn_Gim,
862
863 /** End of valid MSR write function indexes. */
864 kCpumMsrWrFn_End
865} CPUMMSRWRFN;
866
867/**
868 * MSR range.
869 */
870typedef struct CPUMMSRRANGE
871{
872 /** The first MSR. [0] */
873 uint32_t uFirst;
874 /** The last MSR. [4] */
875 uint32_t uLast;
876 /** The read function (CPUMMSRRDFN). [8] */
877 uint16_t enmRdFn;
878 /** The write function (CPUMMSRWRFN). [10] */
879 uint16_t enmWrFn;
880 /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
881 * UINT16_MAX if not used by the read and write functions. [12] */
882 uint16_t offCpumCpu;
883 /** Reserved for future hacks. [14] */
884 uint16_t fReserved;
885 /** The init/read value. [16]
886 * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
887 * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
888 * offset into CPUM. */
889 uint64_t uValue;
890 /** The bits to ignore when writing. [24] */
891 uint64_t fWrIgnMask;
892 /** The bits that will cause a GP(0) when writing. [32]
893 * This is always checked prior to calling the write function. Using
894 * UINT64_MAX effectively marks the MSR as read-only. */
895 uint64_t fWrGpMask;
896 /** The register name, if applicable. [40] */
897 char szName[56];
898
899#ifdef VBOX_WITH_STATISTICS
900 /** The number of reads. */
901 STAMCOUNTER cReads;
902 /** The number of writes. */
903 STAMCOUNTER cWrites;
904 /** The number of times ignored bits were written. */
905 STAMCOUNTER cIgnoredBits;
906 /** The number of GPs generated. */
907 STAMCOUNTER cGps;
908#endif
909} CPUMMSRRANGE;
910#ifndef VBOX_FOR_DTRACE_LIB
911# ifdef VBOX_WITH_STATISTICS
912AssertCompileSize(CPUMMSRRANGE, 128);
913# else
914AssertCompileSize(CPUMMSRRANGE, 96);
915# endif
916#endif
917/** Pointer to an MSR range. */
918typedef CPUMMSRRANGE *PCPUMMSRRANGE;
919/** Pointer to a const MSR range. */
920typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
921
922
923/**
924 * CPU features and quirks.
925 * This is mostly exploded CPUID info.
926 */
927typedef struct CPUMFEATURES
928{
929 /** The CPU vendor (CPUMCPUVENDOR). */
930 uint8_t enmCpuVendor;
931 /** The CPU family. */
932 uint8_t uFamily;
933 /** The CPU model. */
934 uint8_t uModel;
935 /** The CPU stepping. */
936 uint8_t uStepping;
937 /** The microarchitecture. */
938#ifndef VBOX_FOR_DTRACE_LIB
939 CPUMMICROARCH enmMicroarch;
940#else
941 uint32_t enmMicroarch;
942#endif
943 /** The maximum physical address with of the CPU. */
944 uint8_t cMaxPhysAddrWidth;
945 /** Alignment padding. */
946 uint8_t abPadding[1];
947 /** Max size of the extended state (or FPU state if no XSAVE). */
948 uint16_t cbMaxExtendedState;
949
950 /** Supports MSRs. */
951 uint32_t fMsr : 1;
952 /** Supports the page size extension (4/2 MB pages). */
953 uint32_t fPse : 1;
954 /** Supports 36-bit page size extension (4 MB pages can map memory above
955 * 4GB). */
956 uint32_t fPse36 : 1;
957 /** Supports physical address extension (PAE). */
958 uint32_t fPae : 1;
959 /** Page attribute table (PAT) support (page level cache control). */
960 uint32_t fPat : 1;
961 /** Supports the FXSAVE and FXRSTOR instructions. */
962 uint32_t fFxSaveRstor : 1;
963 /** Supports the XSAVE and XRSTOR instructions. */
964 uint32_t fXSaveRstor : 1;
965 /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
966 uint32_t fOpSysXSaveRstor : 1;
967 /** Supports MMX. */
968 uint32_t fMmx : 1;
969 /** Supports AMD extensions to MMX instructions. */
970 uint32_t fAmdMmxExts : 1;
971 /** Supports SSE. */
972 uint32_t fSse : 1;
973 /** Supports SSE2. */
974 uint32_t fSse2 : 1;
975 /** Supports SSE3. */
976 uint32_t fSse3 : 1;
977 /** Supports SSSE3. */
978 uint32_t fSsse3 : 1;
979 /** Supports SSE4.1. */
980 uint32_t fSse41 : 1;
981 /** Supports SSE4.2. */
982 uint32_t fSse42 : 1;
983 /** Supports AVX. */
984 uint32_t fAvx : 1;
985 /** Supports AVX2. */
986 uint32_t fAvx2 : 1;
987 /** Supports AVX512 foundation. */
988 uint32_t fAvx512Foundation : 1;
989 /** Supports RDTSC. */
990 uint32_t fTsc : 1;
991 /** Intel SYSENTER/SYSEXIT support */
992 uint32_t fSysEnter : 1;
993 /** First generation APIC. */
994 uint32_t fApic : 1;
995 /** Second generation APIC. */
996 uint32_t fX2Apic : 1;
997 /** Hypervisor present. */
998 uint32_t fHypervisorPresent : 1;
999 /** MWAIT & MONITOR instructions supported. */
1000 uint32_t fMonitorMWait : 1;
1001 /** MWAIT Extensions present. */
1002 uint32_t fMWaitExtensions : 1;
1003 /** Supports CMPXCHG16B in 64-bit mode. */
1004 uint32_t fMovCmpXchg16b : 1;
1005 /** Supports CLFLUSH. */
1006 uint32_t fClFlush : 1;
1007 /** Supports CLFLUSHOPT. */
1008 uint32_t fClFlushOpt : 1;
1009
1010 /** Supports AMD 3DNow instructions. */
1011 uint32_t f3DNow : 1;
1012 /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
1013 uint32_t f3DNowPrefetch : 1;
1014
1015 /** AMD64: Supports long mode. */
1016 uint32_t fLongMode : 1;
1017 /** AMD64: SYSCALL/SYSRET support. */
1018 uint32_t fSysCall : 1;
1019 /** AMD64: No-execute page table bit. */
1020 uint32_t fNoExecute : 1;
1021 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
1022 uint32_t fLahfSahf : 1;
1023 /** AMD64: Supports RDTSCP. */
1024 uint32_t fRdTscP : 1;
1025 /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
1026 uint32_t fMovCr8In32Bit : 1;
1027 /** AMD64: Supports XOP (similar to VEX3/AVX). */
1028 uint32_t fXop : 1;
1029
1030 /** Indicates that FPU instruction and data pointers may leak.
1031 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
1032 * is only saved and restored if an exception is pending. */
1033 uint32_t fLeakyFxSR : 1;
1034
1035 /** AMD64: Supports AMD SVM. */
1036 uint32_t fSvm : 1;
1037
1038 /** Support for Intel VMX. */
1039 uint32_t fVmx : 1;
1040
1041 /** Alignment padding / reserved for future use. */
1042 uint32_t fPadding : 23;
1043
1044 /** SVM: Supports Nested-paging. */
1045 uint32_t fSvmNestedPaging : 1;
1046 /** SVM: Support LBR (Last Branch Record) virtualization. */
1047 uint32_t fSvmLbrVirt : 1;
1048 /** SVM: Supports SVM lock. */
1049 uint32_t fSvmSvmLock : 1;
1050 /** SVM: Supports Next RIP save. */
1051 uint32_t fSvmNextRipSave : 1;
1052 /** SVM: Supports TSC rate MSR. */
1053 uint32_t fSvmTscRateMsr : 1;
1054 /** SVM: Supports VMCB clean bits. */
1055 uint32_t fSvmVmcbClean : 1;
1056 /** SVM: Supports Flush-by-ASID. */
1057 uint32_t fSvmFlusbByAsid : 1;
1058 /** SVM: Supports decode assist. */
1059 uint32_t fSvmDecodeAssist : 1;
1060 /** SVM: Supports Pause filter. */
1061 uint32_t fSvmPauseFilter : 1;
1062 /** SVM: Supports Pause filter threshold. */
1063 uint32_t fSvmPauseFilterThreshold : 1;
1064 /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
1065 uint32_t fSvmAvic : 1;
1066 /** SVM: Padding / reserved for future features. */
1067 uint32_t fSvmPadding0 : 21;
1068 /** SVM: Maximum supported ASID. */
1069 uint32_t uSvmMaxAsid;
1070
1071 /** @todo VMX features. */
1072 uint32_t auPadding[1];
1073} CPUMFEATURES;
1074#ifndef VBOX_FOR_DTRACE_LIB
1075AssertCompileSize(CPUMFEATURES, 32);
1076#endif
1077/** Pointer to a CPU feature structure. */
1078typedef CPUMFEATURES *PCPUMFEATURES;
1079/** Pointer to a const CPU feature structure. */
1080typedef CPUMFEATURES const *PCCPUMFEATURES;
1081
1082
1083#ifndef VBOX_FOR_DTRACE_LIB
1084
1085/** @name Guest Register Getters.
1086 * @{ */
1087VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR);
1088VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
1089VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden);
1090VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu);
1091VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
1092VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu);
1093VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu);
1094VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu);
1095VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu);
1096VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu);
1097VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue);
1098VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu);
1099VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu);
1100VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu);
1101VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu);
1102VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu);
1103VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu);
1104VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu);
1105VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu);
1106VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu);
1107VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu);
1108VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu);
1109VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu);
1110VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu);
1111VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu);
1112VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu);
1113VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu);
1114VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu);
1115VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu);
1116VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu);
1117VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu);
1118VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu);
1119VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu);
1120VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu);
1121VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu);
1122VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu);
1123VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
1124VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t iLeaf, uint32_t iSubLeaf,
1125 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
1126VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu);
1127VMMDECL(VBOXSTRICTRC) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue);
1128VMMDECL(VBOXSTRICTRC) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue);
1129VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM);
1130VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM);
1131/** @} */
1132
1133/** @name Guest Register Setters.
1134 * @{ */
1135VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1136VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
1137VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
1138VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
1139VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0);
1140VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
1141VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
1142VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
1143VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0);
1144VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1);
1145VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2);
1146VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3);
1147VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
1148VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7);
1149VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value);
1150VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue);
1151VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
1152VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
1153VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
1154VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
1155VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
1156VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
1157VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
1158VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
1159VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
1160VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
1161VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
1162VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
1163VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
1164VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
1165VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
1166VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
1167VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
1168VMMR3_INT_DECL(void) CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1169VMMR3_INT_DECL(void) CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1170VMMR3_INT_DECL(bool) CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
1171VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
1172VMMDECL(void) CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1173VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu);
1174VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg);
1175VMMR0_INT_DECL(void) CPUMR0SetGuestTscAux(PVMCPU pVCpu, uint64_t uValue);
1176VMMR0_INT_DECL(uint64_t) CPUMR0GetGuestTscAux(PVMCPU pVCpu);
1177/** @} */
1178
1179
1180/** @name Misc Guest Predicate Functions.
1181 * @{ */
1182VMMDECL(bool) CPUMIsGuestIn16BitCode(PVMCPU pVCpu);
1183VMMDECL(bool) CPUMIsGuestIn32BitCode(PVMCPU pVCpu);
1184VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
1185VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu);
1186VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu);
1187VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu);
1188VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu);
1189VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu);
1190VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu);
1191VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu);
1192VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu);
1193VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu);
1194VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu);
1195VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu);
1196/** @} */
1197
1198/** @name Nested Hardware-Virtualization Helpers.
1199 * @{ */
1200VMM_INT_DECL(bool) CPUMCanSvmNstGstTakePhysIntr(PCCPUMCTX pCtx);
1201VMM_INT_DECL(bool) CPUMCanSvmNstGstTakeVirtIntr(PCCPUMCTX pCtx);
1202VMM_INT_DECL(uint8_t) CPUMGetSvmNstGstInterrupt(PCCPUMCTX pCtx);
1203VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PCPUMCTX pCtx);
1204VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
1205/** @} */
1206
1207#ifndef VBOX_WITHOUT_UNNAMED_UNIONS
1208
1209/**
1210 * Tests if the guest is running in real mode or not.
1211 *
1212 * @returns true if in real mode, otherwise false.
1213 * @param pCtx Current CPU context.
1214 */
1215DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCPUMCTX pCtx)
1216{
1217 return !(pCtx->cr0 & X86_CR0_PE);
1218}
1219
1220/**
1221 * Tests if the guest is running in real or virtual 8086 mode.
1222 *
1223 * @returns @c true if it is, @c false if not.
1224 * @param pCtx Current CPU context.
1225 */
1226DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCPUMCTX pCtx)
1227{
1228 return !(pCtx->cr0 & X86_CR0_PE)
1229 || pCtx->eflags.Bits.u1VM; /* Cannot be set in long mode. Intel spec 2.3.1 "System Flags and Fields in IA-32e Mode". */
1230}
1231
1232/**
1233 * Tests if the guest is running in virtual 8086 mode.
1234 *
1235 * @returns @c true if it is, @c false if not.
1236 * @param pCtx Current CPU context.
1237 */
1238DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCPUMCTX pCtx)
1239{
1240 return (pCtx->eflags.Bits.u1VM == 1);
1241}
1242
1243/**
1244 * Tests if the guest is running in paged protected or not.
1245 *
1246 * @returns true if in paged protected mode, otherwise false.
1247 * @param pCtx Current CPU context.
1248 */
1249DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
1250{
1251 return (pCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1252}
1253
1254/**
1255 * Tests if the guest is running in long mode or not.
1256 *
1257 * @returns true if in long mode, otherwise false.
1258 * @param pCtx Current CPU context.
1259 */
1260DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCPUMCTX pCtx)
1261{
1262 return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1263}
1264
1265VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx);
1266
1267/**
1268 * Tests if the guest is running in 64 bits mode or not.
1269 *
1270 * @returns true if in 64 bits protected mode, otherwise false.
1271 * @param pCtx Current CPU context.
1272 */
1273DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
1274{
1275 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
1276 return false;
1277 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
1278 return CPUMIsGuestIn64BitCodeSlow(pCtx);
1279 return pCtx->cs.Attr.n.u1Long;
1280}
1281
1282/**
1283 * Tests if the guest has paging enabled or not.
1284 *
1285 * @returns true if paging is enabled, otherwise false.
1286 * @param pCtx Current CPU context.
1287 */
1288DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCPUMCTX pCtx)
1289{
1290 return !!(pCtx->cr0 & X86_CR0_PG);
1291}
1292
1293/**
1294 * Tests if the guest is running in PAE mode or not.
1295 *
1296 * @returns true if in PAE mode, otherwise false.
1297 * @param pCtx Current CPU context.
1298 */
1299DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCPUMCTX pCtx)
1300{
1301 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1302 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1303 return ( (pCtx->cr4 & X86_CR4_PAE)
1304 && CPUMIsGuestPagingEnabledEx(pCtx)
1305 && !(pCtx->msrEFER & MSR_K6_EFER_LMA));
1306}
1307
1308/**
1309 * Tests is if the guest has AMD SVM enabled or not.
1310 *
1311 * @returns true if SMV is enabled, otherwise false.
1312 * @param pCtx Current CPU context.
1313 */
1314DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCCPUMCTX pCtx)
1315{
1316 return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
1317}
1318
1319#ifndef IN_RC
1320/**
1321 * Checks if the guest VMCB has the specified ctrl/instruction intercept active.
1322 *
1323 * @returns @c true if in intercept is set, @c false otherwise.
1324 * @param pCtx Pointer to the context.
1325 * @param fIntercept The SVM control/instruction intercept,
1326 * see SVM_CTRL_INTERCEPT_*.
1327 */
1328DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCCPUMCTX pCtx, uint64_t fIntercept)
1329{
1330 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1331 return pVmcb && (pVmcb->ctrl.u64InterceptCtrl & fIntercept);
1332}
1333
1334/**
1335 * Checks if the guest VMCB has the specified CR read intercept
1336 * active.
1337 *
1338 * @returns @c true if in intercept is set, @c false otherwise.
1339 * @param pCtx Pointer to the context.
1340 * @param uCr The CR register number (0 to 15).
1341 */
1342DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCCPUMCTX pCtx, uint8_t uCr)
1343{
1344 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1345 return pVmcb && (pVmcb->ctrl.u16InterceptRdCRx & (1 << uCr));
1346}
1347
1348/**
1349 * Checks if the guest VMCB has the specified CR write intercept
1350 * active.
1351 *
1352 * @returns @c true if in intercept is set, @c false otherwise.
1353 * @param pCtx Pointer to the context.
1354 * @param uCr The CR register number (0 to 15).
1355 */
1356DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCCPUMCTX pCtx, uint8_t uCr)
1357{
1358 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1359 return pVmcb && (pVmcb->ctrl.u16InterceptWrCRx & (1 << uCr));
1360}
1361
1362/**
1363 * Checks if the guest VMCB has the specified DR read intercept
1364 * active.
1365 *
1366 * @returns @c true if in intercept is set, @c false otherwise.
1367 * @param pCtx Pointer to the context.
1368 * @param uDr The DR register number (0 to 15).
1369 */
1370DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCCPUMCTX pCtx, uint8_t uDr)
1371{
1372 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1373 return pVmcb && (pVmcb->ctrl.u16InterceptRdDRx & (1 << uDr));
1374}
1375
1376/**
1377 * Checks if the guest VMCB has the specified DR write intercept
1378 * active.
1379 *
1380 * @returns @c true if in intercept is set, @c false otherwise.
1381 * @param pCtx Pointer to the context.
1382 * @param uDr The DR register number (0 to 15).
1383 */
1384DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCCPUMCTX pCtx, uint8_t uDr)
1385{
1386 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1387 return pVmcb && (pVmcb->ctrl.u16InterceptWrDRx & (1 << uDr));
1388}
1389
1390/**
1391 * Checks if the guest VMCB has the specified exception
1392 * intercept active.
1393 *
1394 * @returns true if in intercept is active, false otherwise.
1395 * @param pCtx Pointer to the context.
1396 * @param uVector The exception / interrupt vector.
1397 */
1398DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCCPUMCTX pCtx, uint8_t uVector)
1399{
1400 Assert(uVector < 32);
1401 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1402 return pVmcb && (pVmcb->ctrl.u32InterceptXcpt & (UINT32_C(1) << uVector));
1403}
1404#endif /* !IN_RC */
1405
1406/**
1407 * Checks if we are executing inside an SVM nested hardware-virtualized guest.
1408 *
1409 * @returns true if in SVM nested-guest mode, false otherwise.
1410 * @param pCtx Pointer to the context.
1411 */
1412DECLINLINE(bool) CPUMIsGuestInSvmNestedHwVirtMode(PCCPUMCTX pCtx)
1413{
1414 /*
1415 * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
1416 * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
1417 */
1418#ifndef IN_RC
1419 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
1420 return pVmcb && (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN);
1421#else
1422 RT_NOREF(pCtx);
1423 return false;
1424#endif
1425}
1426
1427/**
1428 * Checks if we are executing inside a VMX nested hardware-virtualized guest.
1429 *
1430 * @returns true if in VMX nested-guest mode, false otherwise.
1431 * @param pCtx Pointer to the context.
1432 */
1433DECLINLINE(bool) CPUMIsGuestInVmxNestedHwVirtMode(PCCPUMCTX pCtx)
1434{
1435 /** @todo Intel. */
1436 RT_NOREF(pCtx);
1437 return false;
1438}
1439
1440/**
1441 * Checks if we are executing inside a nested hardware-virtualized guest.
1442 *
1443 * @returns true if in SVM/VMX nested-guest mode, false otherwise.
1444 * @param pCtx Pointer to the context.
1445 */
1446DECLINLINE(bool) CPUMIsGuestInNestedHwVirtMode(PCCPUMCTX pCtx)
1447{
1448 return CPUMIsGuestInSvmNestedHwVirtMode(pCtx) || CPUMIsGuestInVmxNestedHwVirtMode(pCtx);
1449}
1450#endif /* VBOX_WITHOUT_UNNAMED_UNIONS */
1451
1452/** @} */
1453
1454
1455/** @name Hypervisor Register Getters.
1456 * @{ */
1457VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu);
1458VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu);
1459VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu);
1460VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu);
1461VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu);
1462VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu);
1463#if 0 /* these are not correct. */
1464VMMDECL(uint32_t) CPUMGetHyperCR0(PVMCPU pVCpu);
1465VMMDECL(uint32_t) CPUMGetHyperCR2(PVMCPU pVCpu);
1466VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
1467VMMDECL(uint32_t) CPUMGetHyperCR4(PVMCPU pVCpu);
1468#endif
1469/** This register is only saved on fatal traps. */
1470VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu);
1471VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu);
1472/** This register is only saved on fatal traps. */
1473VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu);
1474/** This register is only saved on fatal traps. */
1475VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu);
1476VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu);
1477VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu);
1478VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu);
1479VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu);
1480VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu);
1481VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu);
1482VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu);
1483VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
1484VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit);
1485VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu);
1486VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu);
1487VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu);
1488VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu);
1489VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu);
1490VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu);
1491VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu);
1492VMMDECL(void) CPUMGetHyperCtx(PVMCPU pVCpu, PCPUMCTX pCtx);
1493VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu);
1494/** @} */
1495
1496/** @name Hypervisor Register Setters.
1497 * @{ */
1498VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit);
1499VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR);
1500VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit);
1501VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3);
1502VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR);
1503VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS);
1504VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS);
1505VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelDS);
1506VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelDS);
1507VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelDS);
1508VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS);
1509VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP);
1510VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl);
1511VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP);
1512VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX);
1513VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0);
1514VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1);
1515VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2);
1516VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3);
1517VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6);
1518VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7);
1519VMMDECL(void) CPUMSetHyperCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
1520VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper);
1521/** @} */
1522
1523VMMDECL(void) CPUMPushHyper(PVMCPU pVCpu, uint32_t u32);
1524VMMDECL(int) CPUMQueryHyperCtxPtr(PVMCPU pVCpu, PCPUMCTX *ppCtx);
1525VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu);
1526VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu);
1527VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
1528VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu);
1529VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu);
1530VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc);
1531VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu);
1532VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl);
1533
1534/** @name Changed flags.
1535 * These flags are used to keep track of which important register that
1536 * have been changed since last they were reset. The only one allowed
1537 * to clear them is REM!
1538 * @{
1539 */
1540#define CPUM_CHANGED_FPU_REM RT_BIT(0)
1541#define CPUM_CHANGED_CR0 RT_BIT(1)
1542#define CPUM_CHANGED_CR4 RT_BIT(2)
1543#define CPUM_CHANGED_GLOBAL_TLB_FLUSH RT_BIT(3)
1544#define CPUM_CHANGED_CR3 RT_BIT(4)
1545#define CPUM_CHANGED_GDTR RT_BIT(5)
1546#define CPUM_CHANGED_IDTR RT_BIT(6)
1547#define CPUM_CHANGED_LDTR RT_BIT(7)
1548#define CPUM_CHANGED_TR RT_BIT(8) /**@< Currently unused. */
1549#define CPUM_CHANGED_SYSENTER_MSR RT_BIT(9)
1550#define CPUM_CHANGED_HIDDEN_SEL_REGS RT_BIT(10) /**@< Currently unused. */
1551#define CPUM_CHANGED_CPUID RT_BIT(11)
1552#define CPUM_CHANGED_ALL ( CPUM_CHANGED_FPU_REM \
1553 | CPUM_CHANGED_CR0 \
1554 | CPUM_CHANGED_CR4 \
1555 | CPUM_CHANGED_GLOBAL_TLB_FLUSH \
1556 | CPUM_CHANGED_CR3 \
1557 | CPUM_CHANGED_GDTR \
1558 | CPUM_CHANGED_IDTR \
1559 | CPUM_CHANGED_LDTR \
1560 | CPUM_CHANGED_TR \
1561 | CPUM_CHANGED_SYSENTER_MSR \
1562 | CPUM_CHANGED_HIDDEN_SEL_REGS \
1563 | CPUM_CHANGED_CPUID )
1564/** @} */
1565
1566VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);
1567VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl);
1568VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels);
1569VMMDECL(bool) CPUMSupportsXSave(PVM pVM);
1570VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM);
1571VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM);
1572VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu);
1573VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu);
1574VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu);
1575VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu);
1576VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu);
1577VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu);
1578VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu);
1579VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu);
1580VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu);
1581VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu);
1582VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu);
1583VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu);
1584VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM);
1585VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM);
1586VMMDECL(int) CPUMQueryValidatedGuestEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
1587 uint64_t *puValidEfer);
1588
1589/** @name Typical scalable bus frequency values.
1590 * @{ */
1591/** Special internal value indicating that we don't know the frequency.
1592 * @internal */
1593#define CPUM_SBUSFREQ_UNKNOWN UINT64_C(1)
1594#define CPUM_SBUSFREQ_100MHZ UINT64_C(100000000)
1595#define CPUM_SBUSFREQ_133MHZ UINT64_C(133333333)
1596#define CPUM_SBUSFREQ_167MHZ UINT64_C(166666666)
1597#define CPUM_SBUSFREQ_200MHZ UINT64_C(200000000)
1598#define CPUM_SBUSFREQ_267MHZ UINT64_C(266666666)
1599#define CPUM_SBUSFREQ_333MHZ UINT64_C(333333333)
1600#define CPUM_SBUSFREQ_400MHZ UINT64_C(400000000)
1601/** @} */
1602
1603
1604#ifdef IN_RING3
1605/** @defgroup grp_cpum_r3 The CPUM ring-3 API
1606 * @{
1607 */
1608
1609VMMR3DECL(int) CPUMR3Init(PVM pVM);
1610VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
1611VMMR3DECL(void) CPUMR3LogCpuIds(PVM pVM);
1612VMMR3DECL(void) CPUMR3Relocate(PVM pVM);
1613VMMR3DECL(int) CPUMR3Term(PVM pVM);
1614VMMR3DECL(void) CPUMR3Reset(PVM pVM);
1615VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
1616VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM);
1617VMMR3DECL(void) CPUMR3SetHWVirtEx(PVM pVM, bool fHWVirtExEnabled);
1618VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
1619
1620VMMR3DECL(int) CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf);
1621VMMR3DECL(int) CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf);
1622VMMR3DECL(CPUMMICROARCH) CPUMR3CpuIdDetermineMicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
1623 uint8_t bModel, uint8_t bStepping);
1624VMMR3DECL(const char *) CPUMR3MicroarchName(CPUMMICROARCH enmMicroarch);
1625VMMR3DECL(int) CPUMR3CpuIdCollectLeaves(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
1626VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
1627VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
1628VMMR3DECL(CPUMCPUVENDOR) CPUMR3CpuIdDetectVendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
1629VMMR3DECL(const char *) CPUMR3CpuVendorName(CPUMCPUVENDOR enmVendor);
1630VMMR3DECL(uint32_t) CPUMR3DeterminHostMxCsrMask(void);
1631
1632VMMR3DECL(int) CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange);
1633
1634# if defined(VBOX_WITH_RAW_MODE) || defined(DOXYGEN_RUNNING)
1635/** @name APIs for the CPUID raw-mode patch (legacy).
1636 * @{ */
1637VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM);
1638VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM);
1639VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM);
1640VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM);
1641VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM);
1642VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM);
1643VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM);
1644/** @} */
1645# endif
1646
1647/** @} */
1648#endif /* IN_RING3 */
1649
1650#ifdef IN_RC
1651/** @defgroup grp_cpum_rc The CPUM Raw-mode Context API
1652 * @{
1653 */
1654
1655/**
1656 * Calls a guest trap/interrupt handler directly
1657 *
1658 * Assumes a trap stack frame has already been setup on the guest's stack!
1659 * This function does not return!
1660 *
1661 * @param pRegFrame Original trap/interrupt context
1662 * @param selCS Code selector of handler
1663 * @param pHandler GC virtual address of handler
1664 * @param eflags Callee's EFLAGS
1665 * @param selSS Stack selector for handler
1666 * @param pEsp Stack address for handler
1667 */
1668DECLASM(void) CPUMGCCallGuestTrapHandler(PCPUMCTXCORE pRegFrame, uint32_t selCS, RTRCPTR pHandler,
1669 uint32_t eflags, uint32_t selSS, RTRCPTR pEsp);
1670
1671/**
1672 * Call guest V86 code directly.
1673 *
1674 * This function does not return!
1675 *
1676 * @param pRegFrame Original trap/interrupt context
1677 */
1678DECLASM(void) CPUMGCCallV86Code(PCPUMCTXCORE pRegFrame);
1679
1680VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu);
1681VMMDECL(uint32_t) CPUMRCGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
1682#ifdef VBOX_WITH_RAW_RING1
1683VMMDECL(void) CPUMRCRecheckRawState(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore);
1684#endif
1685VMMRCDECL(void) CPUMRCProcessForceFlag(PVMCPU pVCpu);
1686
1687/** @} */
1688#endif /* IN_RC */
1689
1690#ifdef IN_RING0
1691/** @defgroup grp_cpum_r0 The CPUM ring-0 API
1692 * @{
1693 */
1694VMMR0_INT_DECL(int) CPUMR0ModuleInit(void);
1695VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void);
1696VMMR0_INT_DECL(int) CPUMR0InitVM(PVM pVM);
1697DECLASM(void) CPUMR0RegisterVCpuThread(PVMCPU pVCpu);
1698DECLASM(void) CPUMR0TouchHostFpu(void);
1699VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu);
1700VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu);
1701VMMR0_INT_DECL(bool) CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu);
1702VMMR0_INT_DECL(int) CPUMR0SaveHostDebugState(PVM pVM, PVMCPU pVCpu);
1703VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6);
1704VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6);
1705
1706VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6);
1707VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6);
1708#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1709VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, uint32_t iHostCpuSet);
1710#endif
1711
1712/** @} */
1713#endif /* IN_RING0 */
1714
1715/** @defgroup grp_cpum_rz The CPUM raw-mode and ring-0 context API
1716 * @{
1717 */
1718VMMRZ_INT_DECL(void) CPUMRZFpuStatePrepareHostCpuForUse(PVMCPU pVCpu);
1719VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForRead(PVMCPU pVCpu);
1720VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeForChange(PVMCPU pVCpu);
1721VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeSseForRead(PVMCPU pVCpu);
1722VMMRZ_INT_DECL(void) CPUMRZFpuStateActualizeAvxForRead(PVMCPU pVCpu);
1723/** @} */
1724
1725
1726#endif /* !VBOX_FOR_DTRACE_LIB */
1727/** @} */
1728RT_C_DECLS_END
1729
1730
1731#endif
1732
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette