VirtualBox

source: vbox/trunk/include/VBox/vmm/cpumctx.h@ 75681

Last change on this file since 75681 was 75671, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 Implement NMI-unblocking due to IRET for VM-exits. Implemented restoring blocking of NMI when VM-entry fails while checking/loading guest-state. Fixed loading blocking by NMI during VM-entry.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 54.7 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager), Context Structures.
3 */
4
5/*
6 * Copyright (C) 2006-2017 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_cpumctx_h
27#define ___VBox_vmm_cpumctx_h
28
29#ifndef VBOX_FOR_DTRACE_LIB
30# include <iprt/x86.h>
31# include <VBox/types.h>
32# include <VBox/vmm/hm_svm.h>
33# include <VBox/vmm/hm_vmx.h>
34#else
35# pragma D depends_on library x86.d
36#endif
37
38
39RT_C_DECLS_BEGIN
40
41/** @defgroup grp_cpum_ctx The CPUM Context Structures
42 * @ingroup grp_cpum
43 * @{
44 */
45
46/**
47 * Selector hidden registers.
48 */
49typedef struct CPUMSELREG
50{
51 /** The selector register. */
52 RTSEL Sel;
53 /** Padding, don't use. */
54 RTSEL PaddingSel;
55 /** The selector which info resides in u64Base, u32Limit and Attr, provided
56 * that CPUMSELREG_FLAGS_VALID is set. */
57 RTSEL ValidSel;
58 /** Flags, see CPUMSELREG_FLAGS_XXX. */
59 uint16_t fFlags;
60
61 /** Base register.
62 *
63 * Long mode remarks:
64 * - Unused in long mode for CS, DS, ES, SS
65 * - 32 bits for FS & GS; FS(GS)_BASE msr used for the base address
66 * - 64 bits for TR & LDTR
67 */
68 uint64_t u64Base;
69 /** Limit (expanded). */
70 uint32_t u32Limit;
71 /** Flags.
72 * This is the high 32-bit word of the descriptor entry.
73 * Only the flags, dpl and type are used. */
74 X86DESCATTR Attr;
75} CPUMSELREG;
76#ifndef VBOX_FOR_DTRACE_LIB
77AssertCompileSize(CPUMSELREG, 24);
78#endif
79
80/** @name CPUMSELREG_FLAGS_XXX - CPUMSELREG::fFlags values.
81 * @{ */
82#define CPUMSELREG_FLAGS_VALID UINT16_C(0x0001)
83#define CPUMSELREG_FLAGS_STALE UINT16_C(0x0002)
84#define CPUMSELREG_FLAGS_VALID_MASK UINT16_C(0x0003)
85/** @} */
86
87/** Checks if the hidden parts of the selector register are valid. */
88#ifdef VBOX_WITH_RAW_MODE_NOT_R0
89# define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSelReg) \
90 ( ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \
91 && ( (a_pSelReg)->ValidSel == (a_pSelReg)->Sel \
92 || ( (a_pVCpu) /*!= NULL*/ \
93 && (a_pSelReg)->ValidSel == ((a_pSelReg)->Sel & X86_SEL_MASK_OFF_RPL) \
94 && ((a_pSelReg)->Sel & X86_SEL_RPL) == 1 \
95 && ((a_pSelReg)->ValidSel & X86_SEL_RPL) == 0 \
96 && CPUMIsGuestInRawMode(a_pVCpu) \
97 ) \
98 ) \
99 )
100#else
101# define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSelReg) \
102 ( ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \
103 && (a_pSelReg)->ValidSel == (a_pSelReg)->Sel )
104#endif
105
106/** Old type used for the hidden register part.
107 * @deprecated */
108typedef CPUMSELREG CPUMSELREGHID;
109
110/**
111 * The sysenter register set.
112 */
113typedef struct CPUMSYSENTER
114{
115 /** Ring 0 cs.
116 * This value + 8 is the Ring 0 ss.
117 * This value + 16 is the Ring 3 cs.
118 * This value + 24 is the Ring 3 ss.
119 */
120 uint64_t cs;
121 /** Ring 0 eip. */
122 uint64_t eip;
123 /** Ring 0 esp. */
124 uint64_t esp;
125} CPUMSYSENTER;
126
127/** @def CPUM_UNION_NM
128 * For compilers (like DTrace) that does not grok nameless unions, we have a
129 * little hack to make them palatable.
130 */
131/** @def CPUM_STRUCT_NM
132 * For compilers (like DTrace) that does not grok nameless structs (it is
133 * non-standard C++), we have a little hack to make them palatable.
134 */
135#ifdef VBOX_FOR_DTRACE_LIB
136# define CPUM_UNION_NM(a_Nm) a_Nm
137# define CPUM_STRUCT_NM(a_Nm) a_Nm
138#elif defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS)
139# define CPUM_UNION_NM(a_Nm) a_Nm
140# define CPUM_STRUCT_NM(a_Nm) a_Nm
141#else
142# define CPUM_UNION_NM(a_Nm)
143# define CPUM_STRUCT_NM(a_Nm)
144#endif
145/** @def CPUM_UNION_STRUCT_NM
146 * Combines CPUM_UNION_NM and CPUM_STRUCT_NM to avoid hitting the right side of
147 * the screen in the compile time assertions.
148 */
149#define CPUM_UNION_STRUCT_NM(a_UnionNm, a_StructNm) CPUM_UNION_NM(a_UnionNm .) CPUM_STRUCT_NM(a_StructNm)
150
151/** A general register (union). */
152typedef union CPUMCTXGREG
153{
154 /** Natural unsigned integer view. */
155 uint64_t u;
156 /** 64-bit view. */
157 uint64_t u64;
158 /** 32-bit view. */
159 uint32_t u32;
160 /** 16-bit view. */
161 uint16_t u16;
162 /** 8-bit view. */
163 uint8_t u8;
164 /** 8-bit low/high view. */
165 RT_GCC_EXTENSION struct
166 {
167 /** Low byte (al, cl, dl, bl, ++). */
168 uint8_t bLo;
169 /** High byte in the first word - ah, ch, dh, bh. */
170 uint8_t bHi;
171 } CPUM_STRUCT_NM(s);
172} CPUMCTXGREG;
173#ifndef VBOX_FOR_DTRACE_LIB
174AssertCompileSize(CPUMCTXGREG, 8);
175AssertCompileMemberOffset(CPUMCTXGREG, CPUM_STRUCT_NM(s.) bLo, 0);
176AssertCompileMemberOffset(CPUMCTXGREG, CPUM_STRUCT_NM(s.) bHi, 1);
177#endif
178
179
180
181/**
182 * CPU context core.
183 *
184 * @todo Eliminate this structure!
185 * @deprecated We don't push any context cores any more in TRPM.
186 */
187#pragma pack(1)
188typedef struct CPUMCTXCORE
189{
190 /** @name General Register.
191 * @note These follow the encoding order (X86_GREG_XXX) and can be accessed as
192 * an array starting a rax.
193 * @{ */
194 union
195 {
196 uint8_t al;
197 uint16_t ax;
198 uint32_t eax;
199 uint64_t rax;
200 } CPUM_UNION_NM(rax);
201 union
202 {
203 uint8_t cl;
204 uint16_t cx;
205 uint32_t ecx;
206 uint64_t rcx;
207 } CPUM_UNION_NM(rcx);
208 union
209 {
210 uint8_t dl;
211 uint16_t dx;
212 uint32_t edx;
213 uint64_t rdx;
214 } CPUM_UNION_NM(rdx);
215 union
216 {
217 uint8_t bl;
218 uint16_t bx;
219 uint32_t ebx;
220 uint64_t rbx;
221 } CPUM_UNION_NM(rbx);
222 union
223 {
224 uint16_t sp;
225 uint32_t esp;
226 uint64_t rsp;
227 } CPUM_UNION_NM(rsp);
228 union
229 {
230 uint16_t bp;
231 uint32_t ebp;
232 uint64_t rbp;
233 } CPUM_UNION_NM(rbp);
234 union
235 {
236 uint8_t sil;
237 uint16_t si;
238 uint32_t esi;
239 uint64_t rsi;
240 } CPUM_UNION_NM(rsi);
241 union
242 {
243 uint8_t dil;
244 uint16_t di;
245 uint32_t edi;
246 uint64_t rdi;
247 } CPUM_UNION_NM(rdi);
248 uint64_t r8;
249 uint64_t r9;
250 uint64_t r10;
251 uint64_t r11;
252 uint64_t r12;
253 uint64_t r13;
254 uint64_t r14;
255 uint64_t r15;
256 /** @} */
257
258 /** @name Segment registers.
259 * @note These follow the encoding order (X86_SREG_XXX) and can be accessed as
260 * an array starting a es.
261 * @{ */
262 CPUMSELREG es;
263 CPUMSELREG cs;
264 CPUMSELREG ss;
265 CPUMSELREG ds;
266 CPUMSELREG fs;
267 CPUMSELREG gs;
268 /** @} */
269
270 /** The program counter. */
271 union
272 {
273 uint16_t ip;
274 uint32_t eip;
275 uint64_t rip;
276 } CPUM_UNION_NM(rip);
277
278 /** The flags register. */
279 union
280 {
281 X86EFLAGS eflags;
282 X86RFLAGS rflags;
283 } CPUM_UNION_NM(rflags);
284
285} CPUMCTXCORE;
286#pragma pack()
287
288
289/**
290 * SVM Host-state area (Nested Hw.virt - VirtualBox's layout).
291 *
292 * @warning Exercise caution while modifying the layout of this struct. It's
293 * part of VM saved states.
294 */
295#pragma pack(1)
296typedef struct SVMHOSTSTATE
297{
298 uint64_t uEferMsr;
299 uint64_t uCr0;
300 uint64_t uCr4;
301 uint64_t uCr3;
302 uint64_t uRip;
303 uint64_t uRsp;
304 uint64_t uRax;
305 X86RFLAGS rflags;
306 CPUMSELREG es;
307 CPUMSELREG cs;
308 CPUMSELREG ss;
309 CPUMSELREG ds;
310 VBOXGDTR gdtr;
311 VBOXIDTR idtr;
312 uint8_t abPadding[4];
313} SVMHOSTSTATE;
314#pragma pack()
315/** Pointer to the SVMHOSTSTATE structure. */
316typedef SVMHOSTSTATE *PSVMHOSTSTATE;
317/** Pointer to a const SVMHOSTSTATE structure. */
318typedef const SVMHOSTSTATE *PCSVMHOSTSTATE;
319#ifndef VBOX_FOR_DTRACE_LIB
320AssertCompileSizeAlignment(SVMHOSTSTATE, 8);
321AssertCompileSize(SVMHOSTSTATE, 184);
322#endif
323
324
325/**
326 * CPU hardware virtualization types.
327 */
328typedef enum
329{
330 CPUMHWVIRT_NONE = 0,
331 CPUMHWVIRT_VMX,
332 CPUMHWVIRT_SVM,
333 CPUMHWVIRT_32BIT_HACK = 0x7fffffff
334} CPUMHWVIRT;
335#ifndef VBOX_FOR_DTRACE_LIB
336AssertCompileSize(CPUMHWVIRT, 4);
337#endif
338
339
340/**
341 * CPU context.
342 */
343#pragma pack(1) /* for VBOXIDTR / VBOXGDTR. */
344typedef struct CPUMCTX
345{
346 /** CPUMCTXCORE Part.
347 * @{ */
348
349 /** General purpose registers. */
350 union /* no tag! */
351 {
352 /** The general purpose register array view, indexed by X86_GREG_XXX. */
353 CPUMCTXGREG aGRegs[16];
354
355 /** 64-bit general purpose register view. */
356 RT_GCC_EXTENSION struct /* no tag! */
357 {
358 uint64_t rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15;
359 } CPUM_STRUCT_NM(qw);
360 /** 64-bit general purpose register view. */
361 RT_GCC_EXTENSION struct /* no tag! */
362 {
363 uint64_t r0, r1, r2, r3, r4, r5, r6, r7;
364 } CPUM_STRUCT_NM(qw2);
365 /** 32-bit general purpose register view. */
366 RT_GCC_EXTENSION struct /* no tag! */
367 {
368 uint32_t eax, u32Pad00, ecx, u32Pad01, edx, u32Pad02, ebx, u32Pad03,
369 esp, u32Pad04, ebp, u32Pad05, esi, u32Pad06, edi, u32Pad07,
370 r8d, u32Pad08, r9d, u32Pad09, r10d, u32Pad10, r11d, u32Pad11,
371 r12d, u32Pad12, r13d, u32Pad13, r14d, u32Pad14, r15d, u32Pad15;
372 } CPUM_STRUCT_NM(dw);
373 /** 16-bit general purpose register view. */
374 RT_GCC_EXTENSION struct /* no tag! */
375 {
376 uint16_t ax, au16Pad00[3], cx, au16Pad01[3], dx, au16Pad02[3], bx, au16Pad03[3],
377 sp, au16Pad04[3], bp, au16Pad05[3], si, au16Pad06[3], di, au16Pad07[3],
378 r8w, au16Pad08[3], r9w, au16Pad09[3], r10w, au16Pad10[3], r11w, au16Pad11[3],
379 r12w, au16Pad12[3], r13w, au16Pad13[3], r14w, au16Pad14[3], r15w, au16Pad15[3];
380 } CPUM_STRUCT_NM(w);
381 RT_GCC_EXTENSION struct /* no tag! */
382 {
383 uint8_t al, ah, abPad00[6], cl, ch, abPad01[6], dl, dh, abPad02[6], bl, bh, abPad03[6],
384 spl, abPad04[7], bpl, abPad05[7], sil, abPad06[7], dil, abPad07[7],
385 r8l, abPad08[7], r9l, abPad09[7], r10l, abPad10[7], r11l, abPad11[7],
386 r12l, abPad12[7], r13l, abPad13[7], r14l, abPad14[7], r15l, abPad15[7];
387 } CPUM_STRUCT_NM(b);
388 } CPUM_UNION_NM(g);
389
390 /** Segment registers. */
391 union /* no tag! */
392 {
393 /** The segment register array view, indexed by X86_SREG_XXX. */
394 CPUMSELREG aSRegs[6];
395 /** The named segment register view. */
396 RT_GCC_EXTENSION struct /* no tag! */
397 {
398 CPUMSELREG es, cs, ss, ds, fs, gs;
399 } CPUM_STRUCT_NM(n);
400 } CPUM_UNION_NM(s);
401
402 /** The program counter. */
403 union
404 {
405 uint16_t ip;
406 uint32_t eip;
407 uint64_t rip;
408 } CPUM_UNION_NM(rip);
409
410 /** The flags register. */
411 union
412 {
413 X86EFLAGS eflags;
414 X86RFLAGS rflags;
415 } CPUM_UNION_NM(rflags);
416
417 /** @} */ /*(CPUMCTXCORE)*/
418
419
420 /** @name Control registers.
421 * @{ */
422 uint64_t cr0;
423 uint64_t cr2;
424 uint64_t cr3;
425 uint64_t cr4;
426 /** @} */
427
428 /** Debug registers.
429 * @remarks DR4 and DR5 should not be used since they are aliases for
430 * DR6 and DR7 respectively on both AMD and Intel CPUs.
431 * @remarks DR8-15 are currently not supported by AMD or Intel, so
432 * neither do we.
433 */
434 uint64_t dr[8];
435
436 /** Padding before the structure so the 64-bit member is correctly aligned.
437 * @todo fix this structure! */
438 uint16_t gdtrPadding[3];
439 /** Global Descriptor Table register. */
440 VBOXGDTR gdtr;
441
442 /** Padding before the structure so the 64-bit member is correctly aligned.
443 * @todo fix this structure! */
444 uint16_t idtrPadding[3];
445 /** Interrupt Descriptor Table register. */
446 VBOXIDTR idtr;
447
448 /** The task register.
449 * Only the guest context uses all the members. */
450 CPUMSELREG ldtr;
451 /** The task register.
452 * Only the guest context uses all the members. */
453 CPUMSELREG tr;
454
455 /** The sysenter msr registers.
456 * This member is not used by the hypervisor context. */
457 CPUMSYSENTER SysEnter;
458
459 /** @name System MSRs.
460 * @{ */
461 uint64_t msrEFER;
462 uint64_t msrSTAR; /**< Legacy syscall eip, cs & ss. */
463 uint64_t msrPAT; /**< Page attribute table. */
464 uint64_t msrLSTAR; /**< 64 bits mode syscall rip. */
465 uint64_t msrCSTAR; /**< Compatibility mode syscall rip. */
466 uint64_t msrSFMASK; /**< syscall flag mask. */
467 uint64_t msrKERNELGSBASE; /**< swapgs exchange value. */
468 uint64_t uMsrPadding0; /**< no longer used (used to hold a copy of APIC base MSR). */
469 /** @} */
470
471 /** The XCR0..XCR1 registers. */
472 uint64_t aXcr[2];
473 /** The mask to pass to XSAVE/XRSTOR in EDX:EAX. If zero we use
474 * FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */
475 uint64_t fXStateMask;
476
477 /** Pointer to the FPU/SSE/AVX/XXXX state ring-0 mapping. */
478 R0PTRTYPE(PX86XSAVEAREA) pXStateR0;
479#if HC_ARCH_BITS == 32
480 uint32_t uXStateR0Padding;
481#endif
482 /** Pointer to the FPU/SSE/AVX/XXXX state ring-3 mapping. */
483 R3PTRTYPE(PX86XSAVEAREA) pXStateR3;
484#if HC_ARCH_BITS == 32
485 uint32_t uXStateR3Padding;
486#endif
487 /** Pointer to the FPU/SSE/AVX/XXXX state raw-mode mapping. */
488 RCPTRTYPE(PX86XSAVEAREA) pXStateRC;
489 /** State component offsets into pXState, UINT16_MAX if not present. */
490 uint16_t aoffXState[64];
491
492 /** 0x2d4 - World switcher flags, CPUMCTX_WSF_XXX. */
493 uint32_t fWorldSwitcher;
494 /** 0x2d8 - Externalized state tracker, CPUMCTX_EXTRN_XXX.
495 * Currently only used internally in NEM/win. */
496 uint64_t fExtrn;
497
498 /** 0x2e0 - Hardware virtualization state. */
499 struct
500 {
501 union /* no tag! */
502 {
503 struct
504 {
505 /** 0x2e0 - MSR holding physical address of the Guest's Host-state. */
506 uint64_t uMsrHSavePa;
507 /** 0x2e8 - Guest physical address of the nested-guest VMCB. */
508 RTGCPHYS GCPhysVmcb;
509 /** 0x2f0 - Cache of the nested-guest VMCB - R0 ptr. */
510 R0PTRTYPE(PSVMVMCB) pVmcbR0;
511#if HC_ARCH_BITS == 32
512 uint32_t uVmcbR0Padding;
513#endif
514 /** 0x2f8 - Cache of the nested-guest VMCB - R3 ptr. */
515 R3PTRTYPE(PSVMVMCB) pVmcbR3;
516#if HC_ARCH_BITS == 32
517 uint32_t uVmcbR3Padding;
518#endif
519 /** 0x300 - Guest's host-state save area. */
520 SVMHOSTSTATE HostState;
521 /** 0x3b8 - Guest TSC time-stamp of when the previous PAUSE instr. was executed. */
522 uint64_t uPrevPauseTick;
523 /** 0x3c0 - Pause filter count. */
524 uint16_t cPauseFilter;
525 /** 0x3c2 - Pause filter threshold. */
526 uint16_t cPauseFilterThreshold;
527 /** 0x3c4 - Whether the injected event is subject to event intercepts. */
528 bool fInterceptEvents;
529 /** 0x3c5 - Padding. */
530 bool afPadding[3];
531 /** 0x3c8 - MSR permission bitmap - R0 ptr. */
532 R0PTRTYPE(void *) pvMsrBitmapR0;
533#if HC_ARCH_BITS == 32
534 uint32_t uvMsrBitmapR0Padding;
535#endif
536 /** 0x3d0 - MSR permission bitmap - R3 ptr. */
537 R3PTRTYPE(void *) pvMsrBitmapR3;
538#if HC_ARCH_BITS == 32
539 uint32_t uvMsrBitmapR3Padding;
540#endif
541 /** 0x3d8 - IO permission bitmap - R0 ptr. */
542 R0PTRTYPE(void *) pvIoBitmapR0;
543#if HC_ARCH_BITS == 32
544 uint32_t uIoBitmapR0Padding;
545#endif
546 /** 0x3e0 - IO permission bitmap - R3 ptr. */
547 R3PTRTYPE(void *) pvIoBitmapR3;
548#if HC_ARCH_BITS == 32
549 uint32_t uIoBitmapR3Padding;
550#endif
551 /** 0x3e8 - Host physical address of the nested-guest VMCB. */
552 RTHCPHYS HCPhysVmcb;
553 } svm;
554
555 struct
556 {
557 /** 0x2e4 - Guest physical address of the VMXON region. */
558 RTGCPHYS GCPhysVmxon;
559 /** 0x2e8 - Guest physical address of the current VMCS pointer. */
560 RTGCPHYS GCPhysVmcs;
561 /** 0x2f0 - Guest physical address of the shadow VMCS pointer. */
562 RTGCPHYS GCPhysShadowVmcs;
563 /** 0x2f8 - Last emulated VMX instruction/VM-exit diagnostic. */
564 VMXVDIAG enmDiag;
565 /** 0x2fc - VMX abort reason. */
566 VMXABORT enmAbort;
567 /** 0x300 - VMX abort auxiliary information field. */
568 uint32_t uAbortAux;
569 /** 0x304 - Whether the guest is in VMX root mode. */
570 bool fInVmxRootMode;
571 /** 0x305 - Whether the guest is in VMX non-root mode. */
572 bool fInVmxNonRootMode;
573 /** 0x306 - Whether the injected events are subjected to event intercepts. */
574 bool fInterceptEvents;
575 /** 0x307 - Whether blocking of NMI (or virtual-NMIs) was in effect in VMX non-root
576 * mode before execution of IRET. */
577 bool fNmiUnblockingIret;
578 /** 0x308 - Cache of the nested-guest current VMCS - R0 ptr. */
579 R0PTRTYPE(PVMXVVMCS) pVmcsR0;
580#if HC_ARCH_BITS == 32
581 uint32_t uVmcsR0Padding;
582#endif
583 /** 0x310 - Cache of the nested-guest curent VMCS - R3 ptr. */
584 R3PTRTYPE(PVMXVVMCS) pVmcsR3;
585#if HC_ARCH_BITS == 32
586 uint32_t uVmcsR3Padding;
587#endif
588 /** 0X318 - Cache of the nested-guest shadow VMCS - R0 ptr. */
589 R0PTRTYPE(PVMXVVMCS) pShadowVmcsR0;
590#if HC_ARCH_BITS == 32
591 uint32_t uShadowVmcsR0Padding;
592#endif
593 /** 0x320 - Cache of the nested-guest shadow VMCS - R3 ptr. */
594 R3PTRTYPE(PVMXVVMCS) pShadowVmcsR3;
595#if HC_ARCH_BITS == 32
596 uint32_t uShadowVmcsR3Padding;
597#endif
598 /** 0x328 - Cache of the nested-guest Virtual-APIC page - R0 ptr. */
599 R0PTRTYPE(void *) pvVirtApicPageR0;
600#if HC_ARCH_BITS == 32
601 uint32_t uVirtApicPageR0Padding;
602#endif
603 /** 0x330 - Cache of the nested-guest Virtual-APIC page - R3 ptr. */
604 R3PTRTYPE(void *) pvVirtApicPageR3;
605#if HC_ARCH_BITS == 32
606 uint32_t uVirtApicPageR3Padding;
607#endif
608 /** 0x338 - Cache of the nested-guest VMREAD-bitmap - R0 ptr. */
609 R0PTRTYPE(void *) pvVmreadBitmapR0;
610#if HC_ARCH_BITS == 32
611 uint32_t uVmreadBitmapR0Padding;
612#endif
613 /** 0x340 - Cache of the nested-guest VMREAD-bitmap - R3 ptr. */
614 R3PTRTYPE(void *) pvVmreadBitmapR3;
615#if HC_ARCH_BITS == 32
616 uint32_t uVmreadBitmapR3Padding;
617#endif
618 /** 0x348 - Cache of the nested-guest VMWRITE-bitmap - R0 ptr. */
619 R0PTRTYPE(void *) pvVmwriteBitmapR0;
620#if HC_ARCH_BITS == 32
621 uint32_t uVmwriteBitmapR0Padding;
622#endif
623 /** 0x350 - Cache of the nested-guest VMWRITE-bitmap - R3 ptr. */
624 R3PTRTYPE(void *) pvVmwriteBitmapR3;
625#if HC_ARCH_BITS == 32
626 uint32_t uVmwriteBitmapR3Padding;
627#endif
628 /** 0x358 - The MSR auto-load/store area - R0 ptr. */
629 R0PTRTYPE(PVMXAUTOMSR) pAutoMsrAreaR0;
630#if HC_ARCH_BITS == 32
631 uint32_t uAutoMsrAreaR0;
632#endif
633 /** 0x360 - The MSR auto-load/store area - R3 ptr. */
634 R3PTRTYPE(PVMXAUTOMSR) pAutoMsrAreaR3;
635#if HC_ARCH_BITS == 32
636 uint32_t uAutoMsrAreaR3;
637#endif
638 /** 0x368 - The MSR bitmap - R0 ptr. */
639 R0PTRTYPE(void *) pvMsrBitmapR0;
640#if HC_ARCH_BITS == 32
641 uint32_t uMsrBitmapR0;
642#endif
643 /** 0x370 - The MSR bitmap - R3 ptr. */
644 R3PTRTYPE(void *) pvMsrBitmapR3;
645#if HC_ARCH_BITS == 32
646 uint32_t uMsrBitmapR3;
647#endif
648 /** 0x378 - The I/O bitmap - R0 ptr. */
649 R0PTRTYPE(void *) pvIoBitmapR0;
650#if HC_ARCH_BITS == 32
651 uint32_t uIoBitmapR0;
652#endif
653 /** 0x380 - The I/O bitmap - R3 ptr. */
654 R3PTRTYPE(void *) pvIoBitmapR3;
655#if HC_ARCH_BITS == 32
656 uint32_t uIoBitmapR3;
657#endif
658 /** 0x388 - Guest TSC timestamp of the first PAUSE instruction that is considered to
659 * be the first in a loop. */
660 uint64_t uFirstPauseLoopTick;
661 /** 0x390 - Guest TSC timestamp of the previous PAUSE instruction. */
662 uint64_t uPrevPauseTick;
663 /** 0x398 - Guest TSC timestamp of VM-entry (used for VMX-preemption timer). */
664 uint64_t uVmentryTick;
665 /** 0x3a0 - Virtual-APIC write offset (until trap-like VM-exit). */
666 uint16_t offVirtApicWrite;
667 /** 0x3a2 - Padding. */
668 uint8_t abPadding[0x3f0 - 0x3a2];
669 } vmx;
670 } CPUM_UNION_NM(s);
671
672 /** 0x3f0 - Hardware virtualization type currently in use. */
673 CPUMHWVIRT enmHwvirt;
674 /** 0x3f4 - Global interrupt flag - AMD only (always true on Intel). */
675 bool fGif;
676 bool afPadding1[3];
677 /** 0x3f8 - A subset of guest force flags that are saved while running the
678 * nested-guest. */
679#ifdef VMCPU_WITH_64_BIT_FFS
680 uint64_t fLocalForcedActions;
681#else
682 uint32_t fLocalForcedActions;
683 uint32_t fPadding;
684#endif
685 } hwvirt;
686 /** @} */
687} CPUMCTX;
688#pragma pack()
689
690#ifndef VBOX_FOR_DTRACE_LIB
691AssertCompileSizeAlignment(CPUMCTX, 64);
692AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax, 0);
693AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx, 8);
694AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx, 16);
695AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx, 24);
696AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp, 32);
697AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp, 40);
698AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi, 48);
699AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi, 56);
700AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r8, 64);
701AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r9, 72);
702AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10, 80);
703AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11, 88);
704AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12, 96);
705AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 104);
706AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 112);
707AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 120);
708AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 128);
709AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 152);
710AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 176);
711AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 200);
712AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 224);
713AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 248);
714AssertCompileMemberOffset(CPUMCTX, rip, 272);
715AssertCompileMemberOffset(CPUMCTX, rflags, 280);
716AssertCompileMemberOffset(CPUMCTX, cr0, 288);
717AssertCompileMemberOffset(CPUMCTX, cr2, 296);
718AssertCompileMemberOffset(CPUMCTX, cr3, 304);
719AssertCompileMemberOffset(CPUMCTX, cr4, 312);
720AssertCompileMemberOffset(CPUMCTX, dr, 320);
721AssertCompileMemberOffset(CPUMCTX, gdtr, 384+6);
722AssertCompileMemberOffset(CPUMCTX, idtr, 400+6);
723AssertCompileMemberOffset(CPUMCTX, ldtr, 416);
724AssertCompileMemberOffset(CPUMCTX, tr, 440);
725AssertCompileMemberOffset(CPUMCTX, SysEnter, 464);
726AssertCompileMemberOffset(CPUMCTX, msrEFER, 488);
727AssertCompileMemberOffset(CPUMCTX, msrSTAR, 496);
728AssertCompileMemberOffset(CPUMCTX, msrPAT, 504);
729AssertCompileMemberOffset(CPUMCTX, msrLSTAR, 512);
730AssertCompileMemberOffset(CPUMCTX, msrCSTAR, 520);
731AssertCompileMemberOffset(CPUMCTX, msrSFMASK, 528);
732AssertCompileMemberOffset(CPUMCTX, msrKERNELGSBASE, 536);
733AssertCompileMemberOffset(CPUMCTX, aXcr, 552);
734AssertCompileMemberOffset(CPUMCTX, fXStateMask, 568);
735AssertCompileMemberOffset(CPUMCTX, pXStateR0, 576);
736AssertCompileMemberOffset(CPUMCTX, pXStateR3, 584);
737AssertCompileMemberOffset(CPUMCTX, pXStateRC, 592);
738AssertCompileMemberOffset(CPUMCTX, aoffXState, 596);
739AssertCompileMemberOffset(CPUMCTX, hwvirt, 0x2e0);
740AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa, 0x2e0);
741AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pVmcbR0, 0x2f0);
742AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pVmcbR3, 0x2f8);
743AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HostState, 0x300);
744AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.cPauseFilter, 0x3c0);
745AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0, 0x3c8);
746AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR3, 0x3e0);
747AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HCPhysVmcb, 0x3e8);
748AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pVmcbR0, 8);
749AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0, 8);
750AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR0, 8);
751AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.GCPhysVmxon, 0x2e0);
752AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.GCPhysVmcs, 0x2e8);
753AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.GCPhysShadowVmcs, 0x2f0);
754AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.enmDiag, 0x2f8);
755AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.enmAbort, 0x2fc);
756AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uAbortAux, 0x300);
757AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxRootMode, 0x304);
758AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxNonRootMode, 0x305);
759AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInterceptEvents, 0x306);
760AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fNmiUnblockingIret, 0x307);
761AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0, 0x308);
762AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR3, 0x310);
763AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0, 0x318);
764AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR3, 0x320);
765AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR0, 0x328);
766AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR3, 0x330);
767AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR0, 0x338);
768AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR3, 0x340);
769AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR0, 0x348);
770AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR3, 0x350);
771AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR0, 0x358);
772AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR3, 0x360);
773AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvMsrBitmapR0, 0x368);
774AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvMsrBitmapR3, 0x370);
775AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvIoBitmapR0, 0x378);
776AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvIoBitmapR3, 0x380);
777AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uFirstPauseLoopTick, 0x388);
778AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uPrevPauseTick, 0x390);
779AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uVmentryTick, 0x398);
780AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.offVirtApicWrite, 0x3a0);
781AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0, 8);
782AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0, 8);
783AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVirtApicPageR0, 8);
784AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR0, 8);
785AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR0, 8);
786AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pAutoMsrAreaR0, 8);
787AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvMsrBitmapR0, 8);
788AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvIoBitmapR0, 8);
789AssertCompileMemberOffset(CPUMCTX, hwvirt.enmHwvirt, 0x3f0);
790AssertCompileMemberOffset(CPUMCTX, hwvirt.fGif, 0x3f4);
791AssertCompileMemberOffset(CPUMCTX, hwvirt.fLocalForcedActions, 0x3f8);
792AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs);
793AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r0);
794AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r1);
795AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r2);
796AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r3);
797AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r4);
798AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r5);
799AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r6);
800AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r7);
801AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) eax);
802AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) ecx);
803AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) edx);
804AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) ebx);
805AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) esp);
806AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) ebp);
807AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) esi);
808AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) edi);
809AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r8d);
810AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r9d);
811AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r10d);
812AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r11d);
813AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r12d);
814AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r13d);
815AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r14d);
816AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r15d);
817AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) ax);
818AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) cx);
819AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) dx);
820AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) bx);
821AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) sp);
822AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) bp);
823AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) si);
824AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) di);
825AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r8w);
826AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r9w);
827AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r10w);
828AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r11w);
829AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r12w);
830AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r13w);
831AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r14w);
832AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r15w);
833AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) al);
834AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) cl);
835AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) dl);
836AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) bl);
837AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) spl);
838AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) bpl);
839AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) sil);
840AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) dil);
841AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r8l);
842AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r9l);
843AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r10l);
844AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r11l);
845AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r12l);
846AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r13l);
847AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r14l);
848AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r15l);
849AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, CPUMCTX, CPUM_UNION_NM(s.) aSRegs);
850# ifndef _MSC_VER
851AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xAX]);
852AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xCX]);
853AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xDX]);
854AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xBX]);
855AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xSP]);
856AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xBP]);
857AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xSI]);
858AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xDI]);
859AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x8]);
860AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x9]);
861AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x10]);
862AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x11]);
863AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x12]);
864AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x13]);
865AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x14]);
866AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x15]);
867AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) es, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_ES]);
868AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) cs, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_CS]);
869AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) ss, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_SS]);
870AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) ds, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_DS]);
871AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) fs, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_FS]);
872AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) gs, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_GS]);
873# endif
874
875/**
876 * Calculates the pointer to the given extended state component.
877 *
878 * @returns Pointer of type @a a_PtrType
879 * @param a_pCtx Pointer to the context.
880 * @param a_iCompBit The extended state component bit number. This bit
881 * must be set in CPUMCTX::fXStateMask.
882 * @param a_PtrType The pointer type of the extended state component.
883 *
884 */
885#if defined(VBOX_STRICT) && defined(RT_COMPILER_SUPPORTS_LAMBDA)
886# define CPUMCTX_XSAVE_C_PTR(a_pCtx, a_iCompBit, a_PtrType) \
887 ([](PCCPUMCTX a_pLambdaCtx) -> a_PtrType \
888 { \
889 AssertCompile((a_iCompBit) < 64U); \
890 AssertMsg(a_pLambdaCtx->fXStateMask & RT_BIT_64(a_iCompBit), (#a_iCompBit "\n")); \
891 AssertMsg(a_pLambdaCtx->aoffXState[(a_iCompBit)] != UINT16_MAX, (#a_iCompBit "\n")); \
892 return (a_PtrType)((uint8_t *)a_pLambdaCtx->CTX_SUFF(pXState) + a_pLambdaCtx->aoffXState[(a_iCompBit)]); \
893 }(a_pCtx))
894#elif defined(VBOX_STRICT) && defined(__GNUC__)
895# define CPUMCTX_XSAVE_C_PTR(a_pCtx, a_iCompBit, a_PtrType) \
896 __extension__ (\
897 { \
898 AssertCompile((a_iCompBit) < 64U); \
899 AssertMsg((a_pCtx)->fXStateMask & RT_BIT_64(a_iCompBit), (#a_iCompBit "\n")); \
900 AssertMsg((a_pCtx)->aoffXState[(a_iCompBit)] != UINT16_MAX, (#a_iCompBit "\n")); \
901 (a_PtrType)((uint8_t *)(a_pCtx)->CTX_SUFF(pXState) + (a_pCtx)->aoffXState[(a_iCompBit)]); \
902 })
903#else
904# define CPUMCTX_XSAVE_C_PTR(a_pCtx, a_iCompBit, a_PtrType) \
905 ((a_PtrType)((uint8_t *)(a_pCtx)->CTX_SUFF(pXState) + (a_pCtx)->aoffXState[(a_iCompBit)]))
906#endif
907
908/**
909 * Gets the CPUMCTXCORE part of a CPUMCTX.
910 */
911# define CPUMCTX2CORE(pCtx) ((PCPUMCTXCORE)(void *)&(pCtx)->rax)
912
913/**
914 * Gets the CPUMCTX part from a CPUMCTXCORE.
915 */
916# define CPUMCTX_FROM_CORE(a_pCtxCore) RT_FROM_MEMBER(a_pCtxCore, CPUMCTX, rax)
917
918/**
919 * Gets the first selector register of a CPUMCTX.
920 *
921 * Use this with X86_SREG_COUNT to loop thru the selector registers.
922 */
923# define CPUMCTX_FIRST_SREG(a_pCtx) (&(a_pCtx)->es)
924
925#endif /* !VBOX_FOR_DTRACE_LIB */
926
927
928/** @name CPUMCTX_WSF_XXX
929 * @{ */
930/** Touch IA32_PRED_CMD.IBPB on VM exit. */
931#define CPUMCTX_WSF_IBPB_EXIT RT_BIT_32(0)
932/** Touch IA32_PRED_CMD.IBPB on VM entry. */
933#define CPUMCTX_WSF_IBPB_ENTRY RT_BIT_32(1)
934/** @} */
935
936/** @name CPUMCTX_EXTRN_XXX
937 * Used for parts of the CPUM state that is externalized and needs fetching
938 * before use.
939 *
940 * @{ */
941/** External state keeper: Invalid. */
942#define CPUMCTX_EXTRN_KEEPER_INVALID UINT64_C(0x0000000000000000)
943/** External state keeper: HM. */
944#define CPUMCTX_EXTRN_KEEPER_HM UINT64_C(0x0000000000000001)
945/** External state keeper: NEM. */
946#define CPUMCTX_EXTRN_KEEPER_NEM UINT64_C(0x0000000000000002)
947/** External state keeper: REM. */
948#define CPUMCTX_EXTRN_KEEPER_REM UINT64_C(0x0000000000000003)
949/** External state keeper mask. */
950#define CPUMCTX_EXTRN_KEEPER_MASK UINT64_C(0x0000000000000003)
951
952/** The RIP register value is kept externally. */
953#define CPUMCTX_EXTRN_RIP UINT64_C(0x0000000000000004)
954/** The RFLAGS register values are kept externally. */
955#define CPUMCTX_EXTRN_RFLAGS UINT64_C(0x0000000000000008)
956
957/** The RAX register value is kept externally. */
958#define CPUMCTX_EXTRN_RAX UINT64_C(0x0000000000000010)
959/** The RCX register value is kept externally. */
960#define CPUMCTX_EXTRN_RCX UINT64_C(0x0000000000000020)
961/** The RDX register value is kept externally. */
962#define CPUMCTX_EXTRN_RDX UINT64_C(0x0000000000000040)
963/** The RBX register value is kept externally. */
964#define CPUMCTX_EXTRN_RBX UINT64_C(0x0000000000000080)
965/** The RSP register value is kept externally. */
966#define CPUMCTX_EXTRN_RSP UINT64_C(0x0000000000000100)
967/** The RBP register value is kept externally. */
968#define CPUMCTX_EXTRN_RBP UINT64_C(0x0000000000000200)
969/** The RSI register value is kept externally. */
970#define CPUMCTX_EXTRN_RSI UINT64_C(0x0000000000000400)
971/** The RDI register value is kept externally. */
972#define CPUMCTX_EXTRN_RDI UINT64_C(0x0000000000000800)
973/** The R8 thru R15 register values are kept externally. */
974#define CPUMCTX_EXTRN_R8_R15 UINT64_C(0x0000000000001000)
975/** General purpose registers mask. */
976#define CPUMCTX_EXTRN_GPRS_MASK UINT64_C(0x0000000000001ff0)
977
978/** The ES register values are kept externally. */
979#define CPUMCTX_EXTRN_ES UINT64_C(0x0000000000002000)
980/** The CS register values are kept externally. */
981#define CPUMCTX_EXTRN_CS UINT64_C(0x0000000000004000)
982/** The SS register values are kept externally. */
983#define CPUMCTX_EXTRN_SS UINT64_C(0x0000000000008000)
984/** The DS register values are kept externally. */
985#define CPUMCTX_EXTRN_DS UINT64_C(0x0000000000010000)
986/** The FS register values are kept externally. */
987#define CPUMCTX_EXTRN_FS UINT64_C(0x0000000000020000)
988/** The GS register values are kept externally. */
989#define CPUMCTX_EXTRN_GS UINT64_C(0x0000000000040000)
990/** Segment registers (includes CS). */
991#define CPUMCTX_EXTRN_SREG_MASK UINT64_C(0x000000000007e000)
992/** Converts a X86_XREG_XXX index to a CPUMCTX_EXTRN_xS mask. */
993#define CPUMCTX_EXTRN_SREG_FROM_IDX(a_SRegIdx) RT_BIT_64((a_SRegIdx) + 13)
994#ifndef VBOX_FOR_DTRACE_LIB
995AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_ES) == CPUMCTX_EXTRN_ES);
996AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_CS) == CPUMCTX_EXTRN_CS);
997AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_DS) == CPUMCTX_EXTRN_DS);
998AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_FS) == CPUMCTX_EXTRN_FS);
999AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_GS) == CPUMCTX_EXTRN_GS);
1000#endif
1001
1002/** The GDTR register values are kept externally. */
1003#define CPUMCTX_EXTRN_GDTR UINT64_C(0x0000000000080000)
1004/** The IDTR register values are kept externally. */
1005#define CPUMCTX_EXTRN_IDTR UINT64_C(0x0000000000100000)
1006/** The LDTR register values are kept externally. */
1007#define CPUMCTX_EXTRN_LDTR UINT64_C(0x0000000000200000)
1008/** The TR register values are kept externally. */
1009#define CPUMCTX_EXTRN_TR UINT64_C(0x0000000000400000)
1010/** Table register mask. */
1011#define CPUMCTX_EXTRN_TABLE_MASK UINT64_C(0x0000000000780000)
1012
1013/** The CR0 register value is kept externally. */
1014#define CPUMCTX_EXTRN_CR0 UINT64_C(0x0000000000800000)
1015/** The CR2 register value is kept externally. */
1016#define CPUMCTX_EXTRN_CR2 UINT64_C(0x0000000001000000)
1017/** The CR3 register value is kept externally. */
1018#define CPUMCTX_EXTRN_CR3 UINT64_C(0x0000000002000000)
1019/** The CR4 register value is kept externally. */
1020#define CPUMCTX_EXTRN_CR4 UINT64_C(0x0000000004000000)
1021/** Control register mask. */
1022#define CPUMCTX_EXTRN_CR_MASK UINT64_C(0x0000000007800000)
1023/** The TPR/CR8 register value is kept externally. */
1024#define CPUMCTX_EXTRN_APIC_TPR UINT64_C(0x0000000008000000)
1025/** The EFER register value is kept externally. */
1026#define CPUMCTX_EXTRN_EFER UINT64_C(0x0000000010000000)
1027
1028/** The DR0, DR1, DR2 and DR3 register values are kept externally. */
1029#define CPUMCTX_EXTRN_DR0_DR3 UINT64_C(0x0000000020000000)
1030/** The DR6 register value is kept externally. */
1031#define CPUMCTX_EXTRN_DR6 UINT64_C(0x0000000040000000)
1032/** The DR7 register value is kept externally. */
1033#define CPUMCTX_EXTRN_DR7 UINT64_C(0x0000000080000000)
1034/** Debug register mask. */
1035#define CPUMCTX_EXTRN_DR_MASK UINT64_C(0x00000000e0000000)
1036
1037/** The XSAVE_C_X87 state is kept externally. */
1038#define CPUMCTX_EXTRN_X87 UINT64_C(0x0000000100000000)
1039/** The XSAVE_C_SSE, XSAVE_C_YMM, XSAVE_C_ZMM_HI256, XSAVE_C_ZMM_16HI and
1040 * XSAVE_C_OPMASK state is kept externally. */
1041#define CPUMCTX_EXTRN_SSE_AVX UINT64_C(0x0000000200000000)
1042/** The state of XSAVE components not covered by CPUMCTX_EXTRN_X87 and
1043 * CPUMCTX_EXTRN_SEE_AVX is kept externally. */
1044#define CPUMCTX_EXTRN_OTHER_XSAVE UINT64_C(0x0000000400000000)
1045/** The state of XCR0 and XCR1 register values are kept externally. */
1046#define CPUMCTX_EXTRN_XCRx UINT64_C(0x0000000800000000)
1047
1048
1049/** The KERNEL GS BASE MSR value is kept externally. */
1050#define CPUMCTX_EXTRN_KERNEL_GS_BASE UINT64_C(0x0000001000000000)
1051/** The STAR, LSTAR, CSTAR and SFMASK MSR values are kept externally. */
1052#define CPUMCTX_EXTRN_SYSCALL_MSRS UINT64_C(0x0000002000000000)
1053/** The SYSENTER_CS, SYSENTER_EIP and SYSENTER_ESP MSR values are kept externally. */
1054#define CPUMCTX_EXTRN_SYSENTER_MSRS UINT64_C(0x0000004000000000)
1055/** The TSC_AUX MSR is kept externally. */
1056#define CPUMCTX_EXTRN_TSC_AUX UINT64_C(0x0000008000000000)
1057/** All other stateful MSRs not covered by CPUMCTX_EXTRN_EFER,
1058 * CPUMCTX_EXTRN_KERNEL_GS_BASE, CPUMCTX_EXTRN_SYSCALL_MSRS,
1059 * CPUMCTX_EXTRN_SYSENTER_MSRS, and CPUMCTX_EXTRN_TSC_AUX. */
1060#define CPUMCTX_EXTRN_OTHER_MSRS UINT64_C(0x0000010000000000)
1061
1062/** Mask of all the MSRs. */
1063#define CPUMCTX_EXTRN_ALL_MSRS ( CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS \
1064 | CPUMCTX_EXTRN_SYSENTER_MSRS | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)
1065
1066/** Hardware-virtualization (SVM or VMX) state is kept externally. */
1067#define CPUMCTX_EXTRN_HWVIRT UINT64_C(0x0000020000000000)
1068
1069/** Mask of bits the keepers can use for state tracking. */
1070#define CPUMCTX_EXTRN_KEEPER_STATE_MASK UINT64_C(0xffff000000000000)
1071
1072/** NEM/Win: Event injection (known was interruption) pending state. */
1073#define CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT UINT64_C(0x0001000000000000)
1074/** NEM/Win: Inhibit maskable interrupts (VMCPU_FF_INHIBIT_INTERRUPTS). */
1075#define CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT UINT64_C(0x0002000000000000)
1076/** NEM/Win: Inhibit non-maskable interrupts (VMCPU_FF_BLOCK_NMIS). */
1077#define CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI UINT64_C(0x0004000000000000)
1078/** NEM/Win: Mask. */
1079#define CPUMCTX_EXTRN_NEM_WIN_MASK UINT64_C(0x0007000000000000)
1080
1081/** HM/SVM: Inhibit maskable interrupts (VMCPU_FF_INHIBIT_INTERRUPTS). */
1082#define CPUMCTX_EXTRN_HM_SVM_INT_SHADOW UINT64_C(0x0001000000000000)
1083/** HM/SVM: Nested-guest interrupt pending (VMCPU_FF_INTERRUPT_NESTED_GUEST). */
1084#define CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ UINT64_C(0x0002000000000000)
1085/** HM/SVM: Mask. */
1086#define CPUMCTX_EXTRN_HM_SVM_MASK UINT64_C(0x0003000000000000)
1087
1088/** HM/VMX: Guest-interruptibility state (VMCPU_FF_INHIBIT_INTERRUPTS,
1089 * VMCPU_FF_BLOCK_NMIS). */
1090#define CPUMCTX_EXTRN_HM_VMX_INT_STATE UINT64_C(0x0001000000000000)
1091/** HM/VMX: Mask. */
1092#define CPUMCTX_EXTRN_HM_VMX_MASK UINT64_C(0x0001000000000000)
1093
1094/** All CPUM state bits, not including keeper specific ones. */
1095#define CPUMCTX_EXTRN_ALL UINT64_C(0x000003fffffffffc)
1096/** All CPUM state bits, including keeper specific ones. */
1097#define CPUMCTX_EXTRN_ABSOLUTELY_ALL UINT64_C(0xfffffffffffffffc)
1098/** @} */
1099
1100
1101/**
1102 * Additional guest MSRs (i.e. not part of the CPU context structure).
1103 *
1104 * @remarks Never change the order here because of the saved stated! The size
1105 * can in theory be changed, but keep older VBox versions in mind.
1106 */
1107typedef union CPUMCTXMSRS
1108{
1109 struct
1110 {
1111 uint64_t TscAux; /**< MSR_K8_TSC_AUX */
1112 uint64_t MiscEnable; /**< MSR_IA32_MISC_ENABLE */
1113 uint64_t MtrrDefType; /**< IA32_MTRR_DEF_TYPE */
1114 uint64_t MtrrFix64K_00000; /**< IA32_MTRR_FIX16K_80000 */
1115 uint64_t MtrrFix16K_80000; /**< IA32_MTRR_FIX16K_80000 */
1116 uint64_t MtrrFix16K_A0000; /**< IA32_MTRR_FIX16K_A0000 */
1117 uint64_t MtrrFix4K_C0000; /**< IA32_MTRR_FIX4K_C0000 */
1118 uint64_t MtrrFix4K_C8000; /**< IA32_MTRR_FIX4K_C8000 */
1119 uint64_t MtrrFix4K_D0000; /**< IA32_MTRR_FIX4K_D0000 */
1120 uint64_t MtrrFix4K_D8000; /**< IA32_MTRR_FIX4K_D8000 */
1121 uint64_t MtrrFix4K_E0000; /**< IA32_MTRR_FIX4K_E0000 */
1122 uint64_t MtrrFix4K_E8000; /**< IA32_MTRR_FIX4K_E8000 */
1123 uint64_t MtrrFix4K_F0000; /**< IA32_MTRR_FIX4K_F0000 */
1124 uint64_t MtrrFix4K_F8000; /**< IA32_MTRR_FIX4K_F8000 */
1125 uint64_t PkgCStateCfgCtrl; /**< MSR_PKG_CST_CONFIG_CONTROL */
1126 uint64_t SpecCtrl; /**< IA32_SPEC_CTRL */
1127 uint64_t ArchCaps; /**< IA32_ARCH_CAPABILITIES */
1128 } msr;
1129 uint64_t au64[64];
1130} CPUMCTXMSRS;
1131/** Pointer to the guest MSR state. */
1132typedef CPUMCTXMSRS *PCPUMCTXMSRS;
1133/** Pointer to the const guest MSR state. */
1134typedef const CPUMCTXMSRS *PCCPUMCTXMSRS;
1135
1136/**
1137 * The register set returned by a CPUID operation.
1138 */
1139typedef struct CPUMCPUID
1140{
1141 uint32_t uEax;
1142 uint32_t uEbx;
1143 uint32_t uEcx;
1144 uint32_t uEdx;
1145} CPUMCPUID;
1146/** Pointer to a CPUID leaf. */
1147typedef CPUMCPUID *PCPUMCPUID;
1148/** Pointer to a const CPUID leaf. */
1149typedef const CPUMCPUID *PCCPUMCPUID;
1150
1151/** @} */
1152
1153RT_C_DECLS_END
1154
1155#endif
1156
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette