VirtualBox

source: vbox/trunk/include/VBox/vmm/cpumctx.h@ 78632

Last change on this file since 78632 was 78632, checked in by vboxsync, 6 years ago

Forward ported 130474,130475,130477,130479. bugref:9453

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 58.9 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager), Context Structures.
3 */
4
5/*
6 * Copyright (C) 2006-2019 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_cpumctx_h
27#define VBOX_INCLUDED_vmm_cpumctx_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#ifndef VBOX_FOR_DTRACE_LIB
33# include <iprt/x86.h>
34# include <VBox/types.h>
35# include <VBox/vmm/hm_svm.h>
36# include <VBox/vmm/hm_vmx.h>
37#else
38# pragma D depends_on library x86.d
39#endif
40
41
42RT_C_DECLS_BEGIN
43
44/** @defgroup grp_cpum_ctx The CPUM Context Structures
45 * @ingroup grp_cpum
46 * @{
47 */
48
49/**
50 * Selector hidden registers.
51 */
52typedef struct CPUMSELREG
53{
54 /** The selector register. */
55 RTSEL Sel;
56 /** Padding, don't use. */
57 RTSEL PaddingSel;
58 /** The selector which info resides in u64Base, u32Limit and Attr, provided
59 * that CPUMSELREG_FLAGS_VALID is set. */
60 RTSEL ValidSel;
61 /** Flags, see CPUMSELREG_FLAGS_XXX. */
62 uint16_t fFlags;
63
64 /** Base register.
65 *
66 * Long mode remarks:
67 * - Unused in long mode for CS, DS, ES, SS
68 * - 32 bits for FS & GS; FS(GS)_BASE msr used for the base address
69 * - 64 bits for TR & LDTR
70 */
71 uint64_t u64Base;
72 /** Limit (expanded). */
73 uint32_t u32Limit;
74 /** Flags.
75 * This is the high 32-bit word of the descriptor entry.
76 * Only the flags, dpl and type are used. */
77 X86DESCATTR Attr;
78} CPUMSELREG;
79#ifndef VBOX_FOR_DTRACE_LIB
80AssertCompileSize(CPUMSELREG, 24);
81#endif
82
83/** @name CPUMSELREG_FLAGS_XXX - CPUMSELREG::fFlags values.
84 * @{ */
85#define CPUMSELREG_FLAGS_VALID UINT16_C(0x0001)
86#define CPUMSELREG_FLAGS_STALE UINT16_C(0x0002)
87#define CPUMSELREG_FLAGS_VALID_MASK UINT16_C(0x0003)
88/** @} */
89
90/** Checks if the hidden parts of the selector register are valid. */
91#ifdef VBOX_WITH_RAW_MODE_NOT_R0
92# define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSelReg) \
93 ( ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \
94 && ( (a_pSelReg)->ValidSel == (a_pSelReg)->Sel \
95 || ( (a_pVCpu) /*!= NULL*/ \
96 && (a_pSelReg)->ValidSel == ((a_pSelReg)->Sel & X86_SEL_MASK_OFF_RPL) \
97 && ((a_pSelReg)->Sel & X86_SEL_RPL) == 1 \
98 && ((a_pSelReg)->ValidSel & X86_SEL_RPL) == 0 \
99 && CPUMIsGuestInRawMode(a_pVCpu) \
100 ) \
101 ) \
102 )
103#else
104# define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSelReg) \
105 ( ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \
106 && (a_pSelReg)->ValidSel == (a_pSelReg)->Sel )
107#endif
108
109/** Old type used for the hidden register part.
110 * @deprecated */
111typedef CPUMSELREG CPUMSELREGHID;
112
113/**
114 * The sysenter register set.
115 */
116typedef struct CPUMSYSENTER
117{
118 /** Ring 0 cs.
119 * This value + 8 is the Ring 0 ss.
120 * This value + 16 is the Ring 3 cs.
121 * This value + 24 is the Ring 3 ss.
122 */
123 uint64_t cs;
124 /** Ring 0 eip. */
125 uint64_t eip;
126 /** Ring 0 esp. */
127 uint64_t esp;
128} CPUMSYSENTER;
129
130/** @def CPUM_UNION_NM
131 * For compilers (like DTrace) that does not grok nameless unions, we have a
132 * little hack to make them palatable.
133 */
134/** @def CPUM_STRUCT_NM
135 * For compilers (like DTrace) that does not grok nameless structs (it is
136 * non-standard C++), we have a little hack to make them palatable.
137 */
138#ifdef VBOX_FOR_DTRACE_LIB
139# define CPUM_UNION_NM(a_Nm) a_Nm
140# define CPUM_STRUCT_NM(a_Nm) a_Nm
141#elif defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS)
142# define CPUM_UNION_NM(a_Nm) a_Nm
143# define CPUM_STRUCT_NM(a_Nm) a_Nm
144#else
145# define CPUM_UNION_NM(a_Nm)
146# define CPUM_STRUCT_NM(a_Nm)
147#endif
148/** @def CPUM_UNION_STRUCT_NM
149 * Combines CPUM_UNION_NM and CPUM_STRUCT_NM to avoid hitting the right side of
150 * the screen in the compile time assertions.
151 */
152#define CPUM_UNION_STRUCT_NM(a_UnionNm, a_StructNm) CPUM_UNION_NM(a_UnionNm .) CPUM_STRUCT_NM(a_StructNm)
153
154/** A general register (union). */
155typedef union CPUMCTXGREG
156{
157 /** Natural unsigned integer view. */
158 uint64_t u;
159 /** 64-bit view. */
160 uint64_t u64;
161 /** 32-bit view. */
162 uint32_t u32;
163 /** 16-bit view. */
164 uint16_t u16;
165 /** 8-bit view. */
166 uint8_t u8;
167 /** 8-bit low/high view. */
168 RT_GCC_EXTENSION struct
169 {
170 /** Low byte (al, cl, dl, bl, ++). */
171 uint8_t bLo;
172 /** High byte in the first word - ah, ch, dh, bh. */
173 uint8_t bHi;
174 } CPUM_STRUCT_NM(s);
175} CPUMCTXGREG;
176#ifndef VBOX_FOR_DTRACE_LIB
177AssertCompileSize(CPUMCTXGREG, 8);
178AssertCompileMemberOffset(CPUMCTXGREG, CPUM_STRUCT_NM(s.) bLo, 0);
179AssertCompileMemberOffset(CPUMCTXGREG, CPUM_STRUCT_NM(s.) bHi, 1);
180#endif
181
182
183
184/**
185 * CPU context core.
186 *
187 * @todo Eliminate this structure!
188 * @deprecated We don't push any context cores any more in TRPM.
189 */
190#pragma pack(1)
191typedef struct CPUMCTXCORE
192{
193 /** @name General Register.
194 * @note These follow the encoding order (X86_GREG_XXX) and can be accessed as
195 * an array starting a rax.
196 * @{ */
197 union
198 {
199 uint8_t al;
200 uint16_t ax;
201 uint32_t eax;
202 uint64_t rax;
203 } CPUM_UNION_NM(rax);
204 union
205 {
206 uint8_t cl;
207 uint16_t cx;
208 uint32_t ecx;
209 uint64_t rcx;
210 } CPUM_UNION_NM(rcx);
211 union
212 {
213 uint8_t dl;
214 uint16_t dx;
215 uint32_t edx;
216 uint64_t rdx;
217 } CPUM_UNION_NM(rdx);
218 union
219 {
220 uint8_t bl;
221 uint16_t bx;
222 uint32_t ebx;
223 uint64_t rbx;
224 } CPUM_UNION_NM(rbx);
225 union
226 {
227 uint16_t sp;
228 uint32_t esp;
229 uint64_t rsp;
230 } CPUM_UNION_NM(rsp);
231 union
232 {
233 uint16_t bp;
234 uint32_t ebp;
235 uint64_t rbp;
236 } CPUM_UNION_NM(rbp);
237 union
238 {
239 uint8_t sil;
240 uint16_t si;
241 uint32_t esi;
242 uint64_t rsi;
243 } CPUM_UNION_NM(rsi);
244 union
245 {
246 uint8_t dil;
247 uint16_t di;
248 uint32_t edi;
249 uint64_t rdi;
250 } CPUM_UNION_NM(rdi);
251 uint64_t r8;
252 uint64_t r9;
253 uint64_t r10;
254 uint64_t r11;
255 uint64_t r12;
256 uint64_t r13;
257 uint64_t r14;
258 uint64_t r15;
259 /** @} */
260
261 /** @name Segment registers.
262 * @note These follow the encoding order (X86_SREG_XXX) and can be accessed as
263 * an array starting a es.
264 * @{ */
265 CPUMSELREG es;
266 CPUMSELREG cs;
267 CPUMSELREG ss;
268 CPUMSELREG ds;
269 CPUMSELREG fs;
270 CPUMSELREG gs;
271 /** @} */
272
273 /** The program counter. */
274 union
275 {
276 uint16_t ip;
277 uint32_t eip;
278 uint64_t rip;
279 } CPUM_UNION_NM(rip);
280
281 /** The flags register. */
282 union
283 {
284 X86EFLAGS eflags;
285 X86RFLAGS rflags;
286 } CPUM_UNION_NM(rflags);
287
288} CPUMCTXCORE;
289#pragma pack()
290
291
292/**
293 * SVM Host-state area (Nested Hw.virt - VirtualBox's layout).
294 *
295 * @warning Exercise caution while modifying the layout of this struct. It's
296 * part of VM saved states.
297 */
298#pragma pack(1)
299typedef struct SVMHOSTSTATE
300{
301 uint64_t uEferMsr;
302 uint64_t uCr0;
303 uint64_t uCr4;
304 uint64_t uCr3;
305 uint64_t uRip;
306 uint64_t uRsp;
307 uint64_t uRax;
308 X86RFLAGS rflags;
309 CPUMSELREG es;
310 CPUMSELREG cs;
311 CPUMSELREG ss;
312 CPUMSELREG ds;
313 VBOXGDTR gdtr;
314 VBOXIDTR idtr;
315 uint8_t abPadding[4];
316} SVMHOSTSTATE;
317#pragma pack()
318/** Pointer to the SVMHOSTSTATE structure. */
319typedef SVMHOSTSTATE *PSVMHOSTSTATE;
320/** Pointer to a const SVMHOSTSTATE structure. */
321typedef const SVMHOSTSTATE *PCSVMHOSTSTATE;
322#ifndef VBOX_FOR_DTRACE_LIB
323AssertCompileSizeAlignment(SVMHOSTSTATE, 8);
324AssertCompileSize(SVMHOSTSTATE, 184);
325#endif
326
327
328/**
329 * CPU hardware virtualization types.
330 */
331typedef enum
332{
333 CPUMHWVIRT_NONE = 0,
334 CPUMHWVIRT_VMX,
335 CPUMHWVIRT_SVM,
336 CPUMHWVIRT_32BIT_HACK = 0x7fffffff
337} CPUMHWVIRT;
338#ifndef VBOX_FOR_DTRACE_LIB
339AssertCompileSize(CPUMHWVIRT, 4);
340#endif
341
342
343/**
344 * CPU context.
345 */
346#pragma pack(1) /* for VBOXIDTR / VBOXGDTR. */
347typedef struct CPUMCTX
348{
349 /** CPUMCTXCORE Part.
350 * @{ */
351
352 /** General purpose registers. */
353 union /* no tag! */
354 {
355 /** The general purpose register array view, indexed by X86_GREG_XXX. */
356 CPUMCTXGREG aGRegs[16];
357
358 /** 64-bit general purpose register view. */
359 RT_GCC_EXTENSION struct /* no tag! */
360 {
361 uint64_t rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15;
362 } CPUM_STRUCT_NM(qw);
363 /** 64-bit general purpose register view. */
364 RT_GCC_EXTENSION struct /* no tag! */
365 {
366 uint64_t r0, r1, r2, r3, r4, r5, r6, r7;
367 } CPUM_STRUCT_NM(qw2);
368 /** 32-bit general purpose register view. */
369 RT_GCC_EXTENSION struct /* no tag! */
370 {
371 uint32_t eax, u32Pad00, ecx, u32Pad01, edx, u32Pad02, ebx, u32Pad03,
372 esp, u32Pad04, ebp, u32Pad05, esi, u32Pad06, edi, u32Pad07,
373 r8d, u32Pad08, r9d, u32Pad09, r10d, u32Pad10, r11d, u32Pad11,
374 r12d, u32Pad12, r13d, u32Pad13, r14d, u32Pad14, r15d, u32Pad15;
375 } CPUM_STRUCT_NM(dw);
376 /** 16-bit general purpose register view. */
377 RT_GCC_EXTENSION struct /* no tag! */
378 {
379 uint16_t ax, au16Pad00[3], cx, au16Pad01[3], dx, au16Pad02[3], bx, au16Pad03[3],
380 sp, au16Pad04[3], bp, au16Pad05[3], si, au16Pad06[3], di, au16Pad07[3],
381 r8w, au16Pad08[3], r9w, au16Pad09[3], r10w, au16Pad10[3], r11w, au16Pad11[3],
382 r12w, au16Pad12[3], r13w, au16Pad13[3], r14w, au16Pad14[3], r15w, au16Pad15[3];
383 } CPUM_STRUCT_NM(w);
384 RT_GCC_EXTENSION struct /* no tag! */
385 {
386 uint8_t al, ah, abPad00[6], cl, ch, abPad01[6], dl, dh, abPad02[6], bl, bh, abPad03[6],
387 spl, abPad04[7], bpl, abPad05[7], sil, abPad06[7], dil, abPad07[7],
388 r8l, abPad08[7], r9l, abPad09[7], r10l, abPad10[7], r11l, abPad11[7],
389 r12l, abPad12[7], r13l, abPad13[7], r14l, abPad14[7], r15l, abPad15[7];
390 } CPUM_STRUCT_NM(b);
391 } CPUM_UNION_NM(g);
392
393 /** Segment registers. */
394 union /* no tag! */
395 {
396 /** The segment register array view, indexed by X86_SREG_XXX. */
397 CPUMSELREG aSRegs[6];
398 /** The named segment register view. */
399 RT_GCC_EXTENSION struct /* no tag! */
400 {
401 CPUMSELREG es, cs, ss, ds, fs, gs;
402 } CPUM_STRUCT_NM(n);
403 } CPUM_UNION_NM(s);
404
405 /** The program counter. */
406 union
407 {
408 uint16_t ip;
409 uint32_t eip;
410 uint64_t rip;
411 } CPUM_UNION_NM(rip);
412
413 /** The flags register. */
414 union
415 {
416 X86EFLAGS eflags;
417 X86RFLAGS rflags;
418 } CPUM_UNION_NM(rflags);
419
420 /** @} */ /*(CPUMCTXCORE)*/
421
422
423 /** @name Control registers.
424 * @{ */
425 uint64_t cr0;
426 uint64_t cr2;
427 uint64_t cr3;
428 uint64_t cr4;
429 /** @} */
430
431 /** Debug registers.
432 * @remarks DR4 and DR5 should not be used since they are aliases for
433 * DR6 and DR7 respectively on both AMD and Intel CPUs.
434 * @remarks DR8-15 are currently not supported by AMD or Intel, so
435 * neither do we.
436 */
437 uint64_t dr[8];
438
439 /** Padding before the structure so the 64-bit member is correctly aligned.
440 * @todo fix this structure! */
441 uint16_t gdtrPadding[3];
442 /** Global Descriptor Table register. */
443 VBOXGDTR gdtr;
444
445 /** Padding before the structure so the 64-bit member is correctly aligned.
446 * @todo fix this structure! */
447 uint16_t idtrPadding[3];
448 /** Interrupt Descriptor Table register. */
449 VBOXIDTR idtr;
450
451 /** The task register.
452 * Only the guest context uses all the members. */
453 CPUMSELREG ldtr;
454 /** The task register.
455 * Only the guest context uses all the members. */
456 CPUMSELREG tr;
457
458 /** The sysenter msr registers.
459 * This member is not used by the hypervisor context. */
460 CPUMSYSENTER SysEnter;
461
462 /** @name System MSRs.
463 * @{ */
464 uint64_t msrEFER;
465 uint64_t msrSTAR; /**< Legacy syscall eip, cs & ss. */
466 uint64_t msrPAT; /**< Page attribute table. */
467 uint64_t msrLSTAR; /**< 64 bits mode syscall rip. */
468 uint64_t msrCSTAR; /**< Compatibility mode syscall rip. */
469 uint64_t msrSFMASK; /**< syscall flag mask. */
470 uint64_t msrKERNELGSBASE; /**< swapgs exchange value. */
471 uint64_t uMsrPadding0; /**< no longer used (used to hold a copy of APIC base MSR). */
472 /** @} */
473
474 /** The XCR0..XCR1 registers. */
475 uint64_t aXcr[2];
476 /** The mask to pass to XSAVE/XRSTOR in EDX:EAX. If zero we use
477 * FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */
478 uint64_t fXStateMask;
479
480 /** Pointer to the FPU/SSE/AVX/XXXX state ring-0 mapping. */
481 R0PTRTYPE(PX86XSAVEAREA) pXStateR0;
482#if HC_ARCH_BITS == 32
483 uint32_t uXStateR0Padding;
484#endif
485 /** Pointer to the FPU/SSE/AVX/XXXX state ring-3 mapping. */
486 R3PTRTYPE(PX86XSAVEAREA) pXStateR3;
487#if HC_ARCH_BITS == 32
488 uint32_t uXStateR3Padding;
489#endif
490 /** Pointer to the FPU/SSE/AVX/XXXX state raw-mode mapping. */
491 RCPTRTYPE(PX86XSAVEAREA) pXStateRC;
492 /** State component offsets into pXState, UINT16_MAX if not present. */
493 uint16_t aoffXState[64];
494
495 /** 0x2d4 - World switcher flags, CPUMCTX_WSF_XXX. */
496 uint32_t fWorldSwitcher;
497 /** 0x2d8 - Externalized state tracker, CPUMCTX_EXTRN_XXX.
498 * Currently only used internally in NEM/win. */
499 uint64_t fExtrn;
500
501 /** 0x2e0 - Hardware virtualization state. */
502 struct
503 {
504 union /* no tag! */
505 {
506 struct
507 {
508 /** 0x2e0 - MSR holding physical address of the Guest's Host-state. */
509 uint64_t uMsrHSavePa;
510 /** 0x2e8 - Guest physical address of the nested-guest VMCB. */
511 RTGCPHYS GCPhysVmcb;
512 /** 0x2f0 - Cache of the nested-guest VMCB - R0 ptr. */
513 R0PTRTYPE(PSVMVMCB) pVmcbR0;
514#if HC_ARCH_BITS == 32
515 uint32_t uVmcbR0Padding;
516#endif
517 /** 0x2f8 - Cache of the nested-guest VMCB - R3 ptr. */
518 R3PTRTYPE(PSVMVMCB) pVmcbR3;
519#if HC_ARCH_BITS == 32
520 uint32_t uVmcbR3Padding;
521#endif
522 /** 0x300 - Guest's host-state save area. */
523 SVMHOSTSTATE HostState;
524 /** 0x3b8 - Guest TSC time-stamp of when the previous PAUSE instr. was executed. */
525 uint64_t uPrevPauseTick;
526 /** 0x3c0 - Pause filter count. */
527 uint16_t cPauseFilter;
528 /** 0x3c2 - Pause filter threshold. */
529 uint16_t cPauseFilterThreshold;
530 /** 0x3c4 - Whether the injected event is subject to event intercepts. */
531 bool fInterceptEvents;
532 /** 0x3c5 - Padding. */
533 bool afPadding[3];
534 /** 0x3c8 - MSR permission bitmap - R0 ptr. */
535 R0PTRTYPE(void *) pvMsrBitmapR0;
536#if HC_ARCH_BITS == 32
537 uint32_t uvMsrBitmapR0Padding;
538#endif
539 /** 0x3d0 - MSR permission bitmap - R3 ptr. */
540 R3PTRTYPE(void *) pvMsrBitmapR3;
541#if HC_ARCH_BITS == 32
542 uint32_t uvMsrBitmapR3Padding;
543#endif
544 /** 0x3d8 - IO permission bitmap - R0 ptr. */
545 R0PTRTYPE(void *) pvIoBitmapR0;
546#if HC_ARCH_BITS == 32
547 uint32_t uIoBitmapR0Padding;
548#endif
549 /** 0x3e0 - IO permission bitmap - R3 ptr. */
550 R3PTRTYPE(void *) pvIoBitmapR3;
551#if HC_ARCH_BITS == 32
552 uint32_t uIoBitmapR3Padding;
553#endif
554 /** 0x3e8 - Host physical address of the nested-guest VMCB. */
555 RTHCPHYS HCPhysVmcb;
556 /** 0x3f0 - Padding. */
557 uint64_t au64Padding0[33];
558 } svm;
559
560 struct
561 {
562 /** 0x2e4 - Guest physical address of the VMXON region. */
563 RTGCPHYS GCPhysVmxon;
564 /** 0x2e8 - Guest physical address of the current VMCS pointer. */
565 RTGCPHYS GCPhysVmcs;
566 /** 0x2f0 - Guest physical address of the shadow VMCS pointer. */
567 RTGCPHYS GCPhysShadowVmcs;
568 /** 0x2f8 - Last emulated VMX instruction/VM-exit diagnostic. */
569 VMXVDIAG enmDiag;
570 /** 0x2fc - VMX abort reason. */
571 VMXABORT enmAbort;
572 /** 0x300 - VMX abort auxiliary information field. */
573 uint32_t uAbortAux;
574 /** 0x304 - Whether the guest is in VMX root mode. */
575 bool fInVmxRootMode;
576 /** 0x305 - Whether the guest is in VMX non-root mode. */
577 bool fInVmxNonRootMode;
578 /** 0x306 - Whether the injected events are subjected to event intercepts. */
579 bool fInterceptEvents;
580 /** 0x307 - Whether blocking of NMI (or virtual-NMIs) was in effect in VMX non-root
581 * mode before execution of IRET. */
582 bool fNmiUnblockingIret;
583 /** 0x308 - The current VMCS - R0 ptr. */
584 R0PTRTYPE(PVMXVVMCS) pVmcsR0;
585#if HC_ARCH_BITS == 32
586 uint32_t uVmcsR0Padding;
587#endif
588 /** 0x310 - The curent VMCS - R3 ptr. */
589 R3PTRTYPE(PVMXVVMCS) pVmcsR3;
590#if HC_ARCH_BITS == 32
591 uint32_t uVmcsR3Padding;
592#endif
593 /** 0X318 - The shadow VMCS - R0 ptr. */
594 R0PTRTYPE(PVMXVVMCS) pShadowVmcsR0;
595#if HC_ARCH_BITS == 32
596 uint32_t uShadowVmcsR0Padding;
597#endif
598 /** 0x320 - The shadow VMCS - R3 ptr. */
599 R3PTRTYPE(PVMXVVMCS) pShadowVmcsR3;
600#if HC_ARCH_BITS == 32
601 uint32_t uShadowVmcsR3Padding;
602#endif
603 /** 0x328 - Reserved - R0 ptr. */
604 R0PTRTYPE(void *) pvRsvdR0;
605#if HC_ARCH_BITS == 32
606 uint32_t uRsvdR0Padding0;
607#endif
608 /** 0x330 - Reserved - R3 ptr. */
609 R3PTRTYPE(void *) pvRsvdR3;
610#if HC_ARCH_BITS == 32
611 uint32_t uRsvdR3Padding0;
612#endif
613 /** 0x338 - The VMREAD bitmap - R0 ptr. */
614 R0PTRTYPE(void *) pvVmreadBitmapR0;
615#if HC_ARCH_BITS == 32
616 uint32_t uVmreadBitmapR0Padding;
617#endif
618 /** 0x340 - The VMREAD bitmap - R3 ptr. */
619 R3PTRTYPE(void *) pvVmreadBitmapR3;
620#if HC_ARCH_BITS == 32
621 uint32_t uVmreadBitmapR3Padding;
622#endif
623 /** 0x348 - The VMWRITE bitmap - R0 ptr. */
624 R0PTRTYPE(void *) pvVmwriteBitmapR0;
625#if HC_ARCH_BITS == 32
626 uint32_t uVmwriteBitmapR0Padding;
627#endif
628 /** 0x350 - The VMWRITE bitmap - R3 ptr. */
629 R3PTRTYPE(void *) pvVmwriteBitmapR3;
630#if HC_ARCH_BITS == 32
631 uint32_t uVmwriteBitmapR3Padding;
632#endif
633 /** 0x358 - The VM-entry MSR-load area - R0 ptr. */
634 R0PTRTYPE(PVMXAUTOMSR) pEntryMsrLoadAreaR0;
635#if HC_ARCH_BITS == 32
636 uint32_t uEntryMsrLoadAreaR0;
637#endif
638 /** 0x360 - The VM-entry MSR-load area - R3 ptr. */
639 R3PTRTYPE(PVMXAUTOMSR) pEntryMsrLoadAreaR3;
640#if HC_ARCH_BITS == 32
641 uint32_t uEntryMsrLoadAreaR3;
642#endif
643 /** 0x368 - The VM-exit MSR-store area - R0 ptr. */
644 R0PTRTYPE(PVMXAUTOMSR) pExitMsrStoreAreaR0;
645#if HC_ARCH_BITS == 32
646 uint32_t uExitMsrStoreAreaR0;
647#endif
648 /** 0x370 - The VM-exit MSR-store area - R3 ptr. */
649 R3PTRTYPE(PVMXAUTOMSR) pExitMsrStoreAreaR3;
650#if HC_ARCH_BITS == 32
651 uint32_t uExitMsrStoreAreaR3;
652#endif
653 /** 0x378 - The VM-exit MSR-load area - R0 ptr. */
654 R0PTRTYPE(PVMXAUTOMSR) pExitMsrLoadAreaR0;
655#if HC_ARCH_BITS == 32
656 uint32_t uExitMsrLoadAreaR0;
657#endif
658 /** 0x380 - The VM-exit MSR-load area - R3 ptr. */
659 R3PTRTYPE(PVMXAUTOMSR) pExitMsrLoadAreaR3;
660#if HC_ARCH_BITS == 32
661 uint32_t uExitMsrLoadAreaR3;
662#endif
663 /** 0x388 - MSR bitmap - R0 ptr. */
664 R0PTRTYPE(void *) pvMsrBitmapR0;
665#if HC_ARCH_BITS == 32
666 uint32_t uMsrBitmapR0;
667#endif
668 /** 0x390 - The MSR bitmap - R3 ptr. */
669 R3PTRTYPE(void *) pvMsrBitmapR3;
670#if HC_ARCH_BITS == 32
671 uint32_t uMsrBitmapR3;
672#endif
673 /** 0x398 - The I/O bitmap - R0 ptr. */
674 R0PTRTYPE(void *) pvIoBitmapR0;
675#if HC_ARCH_BITS == 32
676 uint32_t uIoBitmapR0;
677#endif
678 /** 0x3a0 - The I/O bitmap - R3 ptr. */
679 R3PTRTYPE(void *) pvIoBitmapR3;
680#if HC_ARCH_BITS == 32
681 uint32_t uIoBitmapR3;
682#endif
683 /** 0x3a8 - Guest TSC timestamp of the first PAUSE instruction that is considered to
684 * be the first in a loop. */
685 uint64_t uFirstPauseLoopTick;
686 /** 0x3b0 - Guest TSC timestamp of the previous PAUSE instruction. */
687 uint64_t uPrevPauseTick;
688 /** 0x3b8 - Guest TSC timestamp of VM-entry (used for VMX-preemption timer). */
689 uint64_t uEntryTick;
690 /** 0x3c0 - Virtual-APIC write offset (until trap-like VM-exit). */
691 uint16_t offVirtApicWrite;
692 /** 0x3c2 - Whether virtual-NMI blocking is in effect. */
693 bool fVirtNmiBlocking;
694 /** 0x3c3 - Padding. */
695 uint8_t abPadding0[5];
696 /** 0x3c8 - Guest VMX MSRs. */
697 VMXMSRS Msrs;
698 /** 0x4a8 - Host physical address of the VMCS. */
699 RTHCPHYS HCPhysVmcs;
700 /** 0x4b0 - Host physical address of the shadow VMCS. */
701 RTHCPHYS HCPhysShadowVmcs;
702 /** 0x4b8 - Host physical address of the virtual-APIC page. */
703 RTHCPHYS HCPhysRsvd0;
704 /** 0x4c0 - Host physical address of the VMREAD bitmap. */
705 RTHCPHYS HCPhysVmreadBitmap;
706 /** 0x4c8 - Host physical address of the VMWRITE bitmap. */
707 RTHCPHYS HCPhysVmwriteBitmap;
708 /** 0x4d0 - Host physical address of the VM-entry MSR-load area. */
709 RTHCPHYS HCPhysEntryMsrLoadArea;
710 /** 0x4d8 - Host physical address of the VM-exit MSR-store area. */
711 RTHCPHYS HCPhysExitMsrStoreArea;
712 /** 0x4e0 - Host physical address of the VM-exit MSR-load area. */
713 RTHCPHYS HCPhysExitMsrLoadArea;
714 /** 0x4e8 - Host physical address of the MSR bitmap. */
715 RTHCPHYS HCPhysMsrBitmap;
716 /** 0x4f0 - Host physical address of the I/O bitmap. */
717 RTHCPHYS HCPhysIoBitmap;
718 } vmx;
719 } CPUM_UNION_NM(s);
720
721 /** 0x4f8 - Hardware virtualization type currently in use. */
722 CPUMHWVIRT enmHwvirt;
723 /** 0x4fc - Global interrupt flag - AMD only (always true on Intel). */
724 bool fGif;
725 bool afPadding1[3];
726 /** 0x500 - A subset of guest force flags that are saved while running the
727 * nested-guest. */
728#ifdef VMCPU_WITH_64_BIT_FFS
729 uint64_t fLocalForcedActions;
730#else
731 uint32_t fLocalForcedActions;
732 uint32_t fPadding;
733#endif
734 /** 0x508 - Pad to 64 byte boundary. */
735 uint8_t abPadding0[56];
736 } hwvirt;
737 /** @} */
738} CPUMCTX;
739#pragma pack()
740
741#ifndef VBOX_FOR_DTRACE_LIB
742AssertCompileSizeAlignment(CPUMCTX, 64);
743AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax, 0);
744AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx, 8);
745AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx, 16);
746AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx, 24);
747AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp, 32);
748AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp, 40);
749AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi, 48);
750AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi, 56);
751AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r8, 64);
752AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r9, 72);
753AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10, 80);
754AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11, 88);
755AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12, 96);
756AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 104);
757AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 112);
758AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 120);
759AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 128);
760AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 152);
761AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 176);
762AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 200);
763AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 224);
764AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 248);
765AssertCompileMemberOffset(CPUMCTX, rip, 272);
766AssertCompileMemberOffset(CPUMCTX, rflags, 280);
767AssertCompileMemberOffset(CPUMCTX, cr0, 288);
768AssertCompileMemberOffset(CPUMCTX, cr2, 296);
769AssertCompileMemberOffset(CPUMCTX, cr3, 304);
770AssertCompileMemberOffset(CPUMCTX, cr4, 312);
771AssertCompileMemberOffset(CPUMCTX, dr, 320);
772AssertCompileMemberOffset(CPUMCTX, gdtr, 384+6);
773AssertCompileMemberOffset(CPUMCTX, idtr, 400+6);
774AssertCompileMemberOffset(CPUMCTX, ldtr, 416);
775AssertCompileMemberOffset(CPUMCTX, tr, 440);
776AssertCompileMemberOffset(CPUMCTX, SysEnter, 464);
777AssertCompileMemberOffset(CPUMCTX, msrEFER, 488);
778AssertCompileMemberOffset(CPUMCTX, msrSTAR, 496);
779AssertCompileMemberOffset(CPUMCTX, msrPAT, 504);
780AssertCompileMemberOffset(CPUMCTX, msrLSTAR, 512);
781AssertCompileMemberOffset(CPUMCTX, msrCSTAR, 520);
782AssertCompileMemberOffset(CPUMCTX, msrSFMASK, 528);
783AssertCompileMemberOffset(CPUMCTX, msrKERNELGSBASE, 536);
784AssertCompileMemberOffset(CPUMCTX, aXcr, 552);
785AssertCompileMemberOffset(CPUMCTX, fXStateMask, 568);
786AssertCompileMemberOffset(CPUMCTX, pXStateR0, 576);
787AssertCompileMemberOffset(CPUMCTX, pXStateR3, 584);
788AssertCompileMemberOffset(CPUMCTX, pXStateRC, 592);
789AssertCompileMemberOffset(CPUMCTX, aoffXState, 596);
790AssertCompileMemberOffset(CPUMCTX, hwvirt, 0x2e0);
791AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa, 0x2e0);
792AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pVmcbR0, 0x2f0);
793AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pVmcbR3, 0x2f8);
794AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HostState, 0x300);
795AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.cPauseFilter, 0x3c0);
796AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0, 0x3c8);
797AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR3, 0x3e0);
798AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HCPhysVmcb, 0x3e8);
799AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pVmcbR0, 8);
800AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0, 8);
801AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR0, 8);
802AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.GCPhysVmxon, 0x2e0);
803AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.GCPhysVmcs, 0x2e8);
804AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.GCPhysShadowVmcs, 0x2f0);
805AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.enmDiag, 0x2f8);
806AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.enmAbort, 0x2fc);
807AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uAbortAux, 0x300);
808AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxRootMode, 0x304);
809AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInVmxNonRootMode, 0x305);
810AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fInterceptEvents, 0x306);
811AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fNmiUnblockingIret, 0x307);
812AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0, 0x308);
813AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR3, 0x310);
814AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0, 0x318);
815AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR3, 0x320);
816AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR0, 0x338);
817AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR3, 0x340);
818AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR0, 0x348);
819AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR3, 0x350);
820AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pEntryMsrLoadAreaR0, 0x358);
821AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pEntryMsrLoadAreaR3, 0x360);
822AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pExitMsrStoreAreaR0, 0x368);
823AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pExitMsrStoreAreaR3, 0x370);
824AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pExitMsrLoadAreaR0, 0x378);
825AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pExitMsrLoadAreaR3, 0x380);
826AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvMsrBitmapR0, 0x388);
827AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvMsrBitmapR3, 0x390);
828AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvIoBitmapR0, 0x398);
829AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvIoBitmapR3, 0x3a0);
830AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uFirstPauseLoopTick, 0x3a8);
831AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uPrevPauseTick, 0x3b0);
832AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uEntryTick, 0x3b8);
833AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.offVirtApicWrite, 0x3c0);
834AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fVirtNmiBlocking, 0x3c2);
835AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.Msrs, 0x3c8);
836AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.HCPhysVmcs, 0x4a8);
837AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.HCPhysShadowVmcs, 0x4b0);
838AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.HCPhysVmreadBitmap, 0x4c0);
839AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.HCPhysVmwriteBitmap, 0x4c8);
840AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.HCPhysEntryMsrLoadArea, 0x4d0);
841AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.HCPhysExitMsrStoreArea, 0x4d8);
842AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.HCPhysExitMsrLoadArea, 0x4e0);
843AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.HCPhysMsrBitmap, 0x4e8);
844AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.HCPhysIoBitmap, 0x4f0);
845AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0, 8);
846AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0, 8);
847AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR0, 8);
848AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmwriteBitmapR0, 8);
849AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pEntryMsrLoadAreaR0, 8);
850AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pExitMsrStoreAreaR0, 8);
851AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pExitMsrLoadAreaR0, 8);
852AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvMsrBitmapR0, 8);
853AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvIoBitmapR0, 8);
854AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.Msrs, 8);
855AssertCompileMemberOffset(CPUMCTX, hwvirt.enmHwvirt, 0x4f8);
856AssertCompileMemberOffset(CPUMCTX, hwvirt.fGif, 0x4fc);
857AssertCompileMemberOffset(CPUMCTX, hwvirt.fLocalForcedActions, 0x500);
858AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs);
859AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r0);
860AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r1);
861AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r2);
862AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r3);
863AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r4);
864AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r5);
865AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r6);
866AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r7);
867AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) eax);
868AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) ecx);
869AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) edx);
870AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) ebx);
871AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) esp);
872AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) ebp);
873AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) esi);
874AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) edi);
875AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r8d);
876AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r9d);
877AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r10d);
878AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r11d);
879AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r12d);
880AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r13d);
881AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r14d);
882AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r15d);
883AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) ax);
884AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) cx);
885AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) dx);
886AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) bx);
887AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) sp);
888AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) bp);
889AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) si);
890AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) di);
891AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r8w);
892AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r9w);
893AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r10w);
894AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r11w);
895AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r12w);
896AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r13w);
897AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r14w);
898AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r15w);
899AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) al);
900AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) cl);
901AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) dl);
902AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) bl);
903AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) spl);
904AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) bpl);
905AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) sil);
906AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) dil);
907AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r8l);
908AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r9l);
909AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r10l);
910AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r11l);
911AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r12l);
912AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r13l);
913AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r14l);
914AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r15l);
915AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, CPUMCTX, CPUM_UNION_NM(s.) aSRegs);
916# ifndef _MSC_VER
917AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xAX]);
918AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xCX]);
919AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xDX]);
920AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xBX]);
921AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xSP]);
922AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xBP]);
923AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xSI]);
924AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xDI]);
925AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x8]);
926AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x9]);
927AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x10]);
928AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x11]);
929AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x12]);
930AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x13]);
931AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x14]);
932AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x15]);
933AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) es, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_ES]);
934AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) cs, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_CS]);
935AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) ss, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_SS]);
936AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) ds, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_DS]);
937AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) fs, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_FS]);
938AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) gs, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_GS]);
939# endif
940
941/**
942 * Calculates the pointer to the given extended state component.
943 *
944 * @returns Pointer of type @a a_PtrType
945 * @param a_pCtx Pointer to the context.
946 * @param a_iCompBit The extended state component bit number. This bit
947 * must be set in CPUMCTX::fXStateMask.
948 * @param a_PtrType The pointer type of the extended state component.
949 *
950 */
951#if defined(VBOX_STRICT) && defined(RT_COMPILER_SUPPORTS_LAMBDA)
952# define CPUMCTX_XSAVE_C_PTR(a_pCtx, a_iCompBit, a_PtrType) \
953 ([](PCCPUMCTX a_pLambdaCtx) -> a_PtrType \
954 { \
955 AssertCompile((a_iCompBit) < 64U); \
956 AssertMsg(a_pLambdaCtx->fXStateMask & RT_BIT_64(a_iCompBit), (#a_iCompBit "\n")); \
957 AssertMsg(a_pLambdaCtx->aoffXState[(a_iCompBit)] != UINT16_MAX, (#a_iCompBit "\n")); \
958 return (a_PtrType)((uint8_t *)a_pLambdaCtx->CTX_SUFF(pXState) + a_pLambdaCtx->aoffXState[(a_iCompBit)]); \
959 }(a_pCtx))
960#elif defined(VBOX_STRICT) && defined(__GNUC__)
961# define CPUMCTX_XSAVE_C_PTR(a_pCtx, a_iCompBit, a_PtrType) \
962 __extension__ (\
963 { \
964 AssertCompile((a_iCompBit) < 64U); \
965 AssertMsg((a_pCtx)->fXStateMask & RT_BIT_64(a_iCompBit), (#a_iCompBit "\n")); \
966 AssertMsg((a_pCtx)->aoffXState[(a_iCompBit)] != UINT16_MAX, (#a_iCompBit "\n")); \
967 (a_PtrType)((uint8_t *)(a_pCtx)->CTX_SUFF(pXState) + (a_pCtx)->aoffXState[(a_iCompBit)]); \
968 })
969#else
970# define CPUMCTX_XSAVE_C_PTR(a_pCtx, a_iCompBit, a_PtrType) \
971 ((a_PtrType)((uint8_t *)(a_pCtx)->CTX_SUFF(pXState) + (a_pCtx)->aoffXState[(a_iCompBit)]))
972#endif
973
974/**
975 * Gets the CPUMCTXCORE part of a CPUMCTX.
976 */
977# define CPUMCTX2CORE(pCtx) ((PCPUMCTXCORE)(void *)&(pCtx)->rax)
978
979/**
980 * Gets the CPUMCTX part from a CPUMCTXCORE.
981 */
982# define CPUMCTX_FROM_CORE(a_pCtxCore) RT_FROM_MEMBER(a_pCtxCore, CPUMCTX, rax)
983
984/**
985 * Gets the first selector register of a CPUMCTX.
986 *
987 * Use this with X86_SREG_COUNT to loop thru the selector registers.
988 */
989# define CPUMCTX_FIRST_SREG(a_pCtx) (&(a_pCtx)->es)
990
991#endif /* !VBOX_FOR_DTRACE_LIB */
992
993
994/** @name CPUMCTX_WSF_XXX
995 * @{ */
996/** Touch IA32_PRED_CMD.IBPB on VM exit. */
997#define CPUMCTX_WSF_IBPB_EXIT RT_BIT_32(0)
998/** Touch IA32_PRED_CMD.IBPB on VM entry. */
999#define CPUMCTX_WSF_IBPB_ENTRY RT_BIT_32(1)
1000/** Touch IA32_FLUSH_CMD.L1D on VM entry. */
1001#define CPUMCTX_WSF_L1D_ENTRY RT_BIT_32(2)
1002/** Flush MDS buffers on VM entry. */
1003#define CPUMCTX_WSF_MDS_ENTRY RT_BIT_32(3)
1004/** @} */
1005
1006/** @name CPUMCTX_EXTRN_XXX
1007 * Used for parts of the CPUM state that is externalized and needs fetching
1008 * before use.
1009 *
1010 * @{ */
1011/** External state keeper: Invalid. */
1012#define CPUMCTX_EXTRN_KEEPER_INVALID UINT64_C(0x0000000000000000)
1013/** External state keeper: HM. */
1014#define CPUMCTX_EXTRN_KEEPER_HM UINT64_C(0x0000000000000001)
1015/** External state keeper: NEM. */
1016#define CPUMCTX_EXTRN_KEEPER_NEM UINT64_C(0x0000000000000002)
1017/** External state keeper: REM. */
1018#define CPUMCTX_EXTRN_KEEPER_REM UINT64_C(0x0000000000000003)
1019/** External state keeper mask. */
1020#define CPUMCTX_EXTRN_KEEPER_MASK UINT64_C(0x0000000000000003)
1021
1022/** The RIP register value is kept externally. */
1023#define CPUMCTX_EXTRN_RIP UINT64_C(0x0000000000000004)
1024/** The RFLAGS register values are kept externally. */
1025#define CPUMCTX_EXTRN_RFLAGS UINT64_C(0x0000000000000008)
1026
1027/** The RAX register value is kept externally. */
1028#define CPUMCTX_EXTRN_RAX UINT64_C(0x0000000000000010)
1029/** The RCX register value is kept externally. */
1030#define CPUMCTX_EXTRN_RCX UINT64_C(0x0000000000000020)
1031/** The RDX register value is kept externally. */
1032#define CPUMCTX_EXTRN_RDX UINT64_C(0x0000000000000040)
1033/** The RBX register value is kept externally. */
1034#define CPUMCTX_EXTRN_RBX UINT64_C(0x0000000000000080)
1035/** The RSP register value is kept externally. */
1036#define CPUMCTX_EXTRN_RSP UINT64_C(0x0000000000000100)
1037/** The RBP register value is kept externally. */
1038#define CPUMCTX_EXTRN_RBP UINT64_C(0x0000000000000200)
1039/** The RSI register value is kept externally. */
1040#define CPUMCTX_EXTRN_RSI UINT64_C(0x0000000000000400)
1041/** The RDI register value is kept externally. */
1042#define CPUMCTX_EXTRN_RDI UINT64_C(0x0000000000000800)
1043/** The R8 thru R15 register values are kept externally. */
1044#define CPUMCTX_EXTRN_R8_R15 UINT64_C(0x0000000000001000)
1045/** General purpose registers mask. */
1046#define CPUMCTX_EXTRN_GPRS_MASK UINT64_C(0x0000000000001ff0)
1047
1048/** The ES register values are kept externally. */
1049#define CPUMCTX_EXTRN_ES UINT64_C(0x0000000000002000)
1050/** The CS register values are kept externally. */
1051#define CPUMCTX_EXTRN_CS UINT64_C(0x0000000000004000)
1052/** The SS register values are kept externally. */
1053#define CPUMCTX_EXTRN_SS UINT64_C(0x0000000000008000)
1054/** The DS register values are kept externally. */
1055#define CPUMCTX_EXTRN_DS UINT64_C(0x0000000000010000)
1056/** The FS register values are kept externally. */
1057#define CPUMCTX_EXTRN_FS UINT64_C(0x0000000000020000)
1058/** The GS register values are kept externally. */
1059#define CPUMCTX_EXTRN_GS UINT64_C(0x0000000000040000)
1060/** Segment registers (includes CS). */
1061#define CPUMCTX_EXTRN_SREG_MASK UINT64_C(0x000000000007e000)
1062/** Converts a X86_XREG_XXX index to a CPUMCTX_EXTRN_xS mask. */
1063#define CPUMCTX_EXTRN_SREG_FROM_IDX(a_SRegIdx) RT_BIT_64((a_SRegIdx) + 13)
1064#ifndef VBOX_FOR_DTRACE_LIB
1065AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_ES) == CPUMCTX_EXTRN_ES);
1066AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_CS) == CPUMCTX_EXTRN_CS);
1067AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_DS) == CPUMCTX_EXTRN_DS);
1068AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_FS) == CPUMCTX_EXTRN_FS);
1069AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_GS) == CPUMCTX_EXTRN_GS);
1070#endif
1071
1072/** The GDTR register values are kept externally. */
1073#define CPUMCTX_EXTRN_GDTR UINT64_C(0x0000000000080000)
1074/** The IDTR register values are kept externally. */
1075#define CPUMCTX_EXTRN_IDTR UINT64_C(0x0000000000100000)
1076/** The LDTR register values are kept externally. */
1077#define CPUMCTX_EXTRN_LDTR UINT64_C(0x0000000000200000)
1078/** The TR register values are kept externally. */
1079#define CPUMCTX_EXTRN_TR UINT64_C(0x0000000000400000)
1080/** Table register mask. */
1081#define CPUMCTX_EXTRN_TABLE_MASK UINT64_C(0x0000000000780000)
1082
1083/** The CR0 register value is kept externally. */
1084#define CPUMCTX_EXTRN_CR0 UINT64_C(0x0000000000800000)
1085/** The CR2 register value is kept externally. */
1086#define CPUMCTX_EXTRN_CR2 UINT64_C(0x0000000001000000)
1087/** The CR3 register value is kept externally. */
1088#define CPUMCTX_EXTRN_CR3 UINT64_C(0x0000000002000000)
1089/** The CR4 register value is kept externally. */
1090#define CPUMCTX_EXTRN_CR4 UINT64_C(0x0000000004000000)
1091/** Control register mask. */
1092#define CPUMCTX_EXTRN_CR_MASK UINT64_C(0x0000000007800000)
1093/** The TPR/CR8 register value is kept externally. */
1094#define CPUMCTX_EXTRN_APIC_TPR UINT64_C(0x0000000008000000)
1095/** The EFER register value is kept externally. */
1096#define CPUMCTX_EXTRN_EFER UINT64_C(0x0000000010000000)
1097
1098/** The DR0, DR1, DR2 and DR3 register values are kept externally. */
1099#define CPUMCTX_EXTRN_DR0_DR3 UINT64_C(0x0000000020000000)
1100/** The DR6 register value is kept externally. */
1101#define CPUMCTX_EXTRN_DR6 UINT64_C(0x0000000040000000)
1102/** The DR7 register value is kept externally. */
1103#define CPUMCTX_EXTRN_DR7 UINT64_C(0x0000000080000000)
1104/** Debug register mask. */
1105#define CPUMCTX_EXTRN_DR_MASK UINT64_C(0x00000000e0000000)
1106
1107/** The XSAVE_C_X87 state is kept externally. */
1108#define CPUMCTX_EXTRN_X87 UINT64_C(0x0000000100000000)
1109/** The XSAVE_C_SSE, XSAVE_C_YMM, XSAVE_C_ZMM_HI256, XSAVE_C_ZMM_16HI and
1110 * XSAVE_C_OPMASK state is kept externally. */
1111#define CPUMCTX_EXTRN_SSE_AVX UINT64_C(0x0000000200000000)
1112/** The state of XSAVE components not covered by CPUMCTX_EXTRN_X87 and
1113 * CPUMCTX_EXTRN_SEE_AVX is kept externally. */
1114#define CPUMCTX_EXTRN_OTHER_XSAVE UINT64_C(0x0000000400000000)
1115/** The state of XCR0 and XCR1 register values are kept externally. */
1116#define CPUMCTX_EXTRN_XCRx UINT64_C(0x0000000800000000)
1117
1118
1119/** The KERNEL GS BASE MSR value is kept externally. */
1120#define CPUMCTX_EXTRN_KERNEL_GS_BASE UINT64_C(0x0000001000000000)
1121/** The STAR, LSTAR, CSTAR and SFMASK MSR values are kept externally. */
1122#define CPUMCTX_EXTRN_SYSCALL_MSRS UINT64_C(0x0000002000000000)
1123/** The SYSENTER_CS, SYSENTER_EIP and SYSENTER_ESP MSR values are kept externally. */
1124#define CPUMCTX_EXTRN_SYSENTER_MSRS UINT64_C(0x0000004000000000)
1125/** The TSC_AUX MSR is kept externally. */
1126#define CPUMCTX_EXTRN_TSC_AUX UINT64_C(0x0000008000000000)
1127/** All other stateful MSRs not covered by CPUMCTX_EXTRN_EFER,
1128 * CPUMCTX_EXTRN_KERNEL_GS_BASE, CPUMCTX_EXTRN_SYSCALL_MSRS,
1129 * CPUMCTX_EXTRN_SYSENTER_MSRS, and CPUMCTX_EXTRN_TSC_AUX. */
1130#define CPUMCTX_EXTRN_OTHER_MSRS UINT64_C(0x0000010000000000)
1131
1132/** Mask of all the MSRs. */
1133#define CPUMCTX_EXTRN_ALL_MSRS ( CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS \
1134 | CPUMCTX_EXTRN_SYSENTER_MSRS | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)
1135
1136/** Hardware-virtualization (SVM or VMX) state is kept externally. */
1137#define CPUMCTX_EXTRN_HWVIRT UINT64_C(0x0000020000000000)
1138
1139/** Mask of bits the keepers can use for state tracking. */
1140#define CPUMCTX_EXTRN_KEEPER_STATE_MASK UINT64_C(0xffff000000000000)
1141
1142/** NEM/Win: Event injection (known was interruption) pending state. */
1143#define CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT UINT64_C(0x0001000000000000)
1144/** NEM/Win: Inhibit maskable interrupts (VMCPU_FF_INHIBIT_INTERRUPTS). */
1145#define CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT UINT64_C(0x0002000000000000)
1146/** NEM/Win: Inhibit non-maskable interrupts (VMCPU_FF_BLOCK_NMIS). */
1147#define CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI UINT64_C(0x0004000000000000)
1148/** NEM/Win: Mask. */
1149#define CPUMCTX_EXTRN_NEM_WIN_MASK UINT64_C(0x0007000000000000)
1150
1151/** HM/SVM: Inhibit maskable interrupts (VMCPU_FF_INHIBIT_INTERRUPTS). */
1152#define CPUMCTX_EXTRN_HM_SVM_INT_SHADOW UINT64_C(0x0001000000000000)
1153/** HM/SVM: Nested-guest interrupt pending (VMCPU_FF_INTERRUPT_NESTED_GUEST). */
1154#define CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ UINT64_C(0x0002000000000000)
1155/** HM/SVM: Mask. */
1156#define CPUMCTX_EXTRN_HM_SVM_MASK UINT64_C(0x0003000000000000)
1157
1158/** HM/VMX: Guest-interruptibility state (VMCPU_FF_INHIBIT_INTERRUPTS,
1159 * VMCPU_FF_BLOCK_NMIS). */
1160#define CPUMCTX_EXTRN_HM_VMX_INT_STATE UINT64_C(0x0001000000000000)
1161/** HM/VMX: Mask. */
1162#define CPUMCTX_EXTRN_HM_VMX_MASK UINT64_C(0x0001000000000000)
1163
1164/** All CPUM state bits, not including keeper specific ones. */
1165#define CPUMCTX_EXTRN_ALL UINT64_C(0x000003fffffffffc)
1166/** All CPUM state bits, including keeper specific ones. */
1167#define CPUMCTX_EXTRN_ABSOLUTELY_ALL UINT64_C(0xfffffffffffffffc)
1168/** @} */
1169
1170
1171/**
1172 * Additional guest MSRs (i.e. not part of the CPU context structure).
1173 *
1174 * @remarks Never change the order here because of the saved stated! The size
1175 * can in theory be changed, but keep older VBox versions in mind.
1176 */
1177typedef union CPUMCTXMSRS
1178{
1179 struct
1180 {
1181 uint64_t TscAux; /**< MSR_K8_TSC_AUX */
1182 uint64_t MiscEnable; /**< MSR_IA32_MISC_ENABLE */
1183 uint64_t MtrrDefType; /**< IA32_MTRR_DEF_TYPE */
1184 uint64_t MtrrFix64K_00000; /**< IA32_MTRR_FIX16K_80000 */
1185 uint64_t MtrrFix16K_80000; /**< IA32_MTRR_FIX16K_80000 */
1186 uint64_t MtrrFix16K_A0000; /**< IA32_MTRR_FIX16K_A0000 */
1187 uint64_t MtrrFix4K_C0000; /**< IA32_MTRR_FIX4K_C0000 */
1188 uint64_t MtrrFix4K_C8000; /**< IA32_MTRR_FIX4K_C8000 */
1189 uint64_t MtrrFix4K_D0000; /**< IA32_MTRR_FIX4K_D0000 */
1190 uint64_t MtrrFix4K_D8000; /**< IA32_MTRR_FIX4K_D8000 */
1191 uint64_t MtrrFix4K_E0000; /**< IA32_MTRR_FIX4K_E0000 */
1192 uint64_t MtrrFix4K_E8000; /**< IA32_MTRR_FIX4K_E8000 */
1193 uint64_t MtrrFix4K_F0000; /**< IA32_MTRR_FIX4K_F0000 */
1194 uint64_t MtrrFix4K_F8000; /**< IA32_MTRR_FIX4K_F8000 */
1195 uint64_t PkgCStateCfgCtrl; /**< MSR_PKG_CST_CONFIG_CONTROL */
1196 uint64_t SpecCtrl; /**< IA32_SPEC_CTRL */
1197 uint64_t ArchCaps; /**< IA32_ARCH_CAPABILITIES */
1198 } msr;
1199 uint64_t au64[64];
1200} CPUMCTXMSRS;
1201/** Pointer to the guest MSR state. */
1202typedef CPUMCTXMSRS *PCPUMCTXMSRS;
1203/** Pointer to the const guest MSR state. */
1204typedef const CPUMCTXMSRS *PCCPUMCTXMSRS;
1205
1206/**
1207 * The register set returned by a CPUID operation.
1208 */
1209typedef struct CPUMCPUID
1210{
1211 uint32_t uEax;
1212 uint32_t uEbx;
1213 uint32_t uEcx;
1214 uint32_t uEdx;
1215} CPUMCPUID;
1216/** Pointer to a CPUID leaf. */
1217typedef CPUMCPUID *PCPUMCPUID;
1218/** Pointer to a const CPUID leaf. */
1219typedef const CPUMCPUID *PCCPUMCPUID;
1220
1221/** @} */
1222
1223RT_C_DECLS_END
1224
1225#endif /* !VBOX_INCLUDED_vmm_cpumctx_h */
1226
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette