VirtualBox

source: vbox/trunk/include/VBox/vmm/cpumctx.h@ 97262

Last change on this file since 97262 was 97232, checked in by vboxsync, 2 years ago

VMM/CPUM: Define our own X86EFLAGS/X86RFLAGS structures so we can use reserved bits for internal state. [build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 53.2 KB
Line 
1/** @file
2 * CPUM - CPU Monitor(/ Manager), Context Structures.
3 */
4
5/*
6 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * The contents of this file may alternatively be used under the terms
25 * of the Common Development and Distribution License Version 1.0
26 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
27 * in the VirtualBox distribution, in which case the provisions of the
28 * CDDL are applicable instead of those of the GPL.
29 *
30 * You may elect to license modified versions of this file under the
31 * terms and conditions of either the GPL or the CDDL or both.
32 *
33 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
34 */
35
36#ifndef VBOX_INCLUDED_vmm_cpumctx_h
37#define VBOX_INCLUDED_vmm_cpumctx_h
38#ifndef RT_WITHOUT_PRAGMA_ONCE
39# pragma once
40#endif
41
42#ifndef VBOX_FOR_DTRACE_LIB
43# include <iprt/x86.h>
44# include <VBox/types.h>
45# include <VBox/vmm/hm_svm.h>
46# include <VBox/vmm/hm_vmx.h>
47#else
48# pragma D depends_on library x86.d
49#endif
50
51
52RT_C_DECLS_BEGIN
53
54/** @defgroup grp_cpum_ctx The CPUM Context Structures
55 * @ingroup grp_cpum
56 * @{
57 */
58
59/**
60 * Selector hidden registers.
61 */
62typedef struct CPUMSELREG
63{
64 /** The selector register. */
65 RTSEL Sel;
66 /** Padding, don't use. */
67 RTSEL PaddingSel;
68 /** The selector which info resides in u64Base, u32Limit and Attr, provided
69 * that CPUMSELREG_FLAGS_VALID is set. */
70 RTSEL ValidSel;
71 /** Flags, see CPUMSELREG_FLAGS_XXX. */
72 uint16_t fFlags;
73
74 /** Base register.
75 *
76 * Long mode remarks:
77 * - Unused in long mode for CS, DS, ES, SS
78 * - 32 bits for FS & GS; FS(GS)_BASE msr used for the base address
79 * - 64 bits for TR & LDTR
80 */
81 uint64_t u64Base;
82 /** Limit (expanded). */
83 uint32_t u32Limit;
84 /** Flags.
85 * This is the high 32-bit word of the descriptor entry.
86 * Only the flags, dpl and type are used. */
87 X86DESCATTR Attr;
88} CPUMSELREG;
89#ifndef VBOX_FOR_DTRACE_LIB
90AssertCompileSize(CPUMSELREG, 24);
91#endif
92
93/** @name CPUMSELREG_FLAGS_XXX - CPUMSELREG::fFlags values.
94 * @{ */
95#define CPUMSELREG_FLAGS_VALID UINT16_C(0x0001)
96#define CPUMSELREG_FLAGS_STALE UINT16_C(0x0002)
97#define CPUMSELREG_FLAGS_VALID_MASK UINT16_C(0x0003)
98/** @} */
99
100/** Checks if the hidden parts of the selector register are valid. */
101#define CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSelReg) \
102 ( ((a_pSelReg)->fFlags & CPUMSELREG_FLAGS_VALID) \
103 && (a_pSelReg)->ValidSel == (a_pSelReg)->Sel )
104
105/** Old type used for the hidden register part.
106 * @deprecated */
107typedef CPUMSELREG CPUMSELREGHID;
108
109/**
110 * The sysenter register set.
111 */
112typedef struct CPUMSYSENTER
113{
114 /** Ring 0 cs.
115 * This value + 8 is the Ring 0 ss.
116 * This value + 16 is the Ring 3 cs.
117 * This value + 24 is the Ring 3 ss.
118 */
119 uint64_t cs;
120 /** Ring 0 eip. */
121 uint64_t eip;
122 /** Ring 0 esp. */
123 uint64_t esp;
124} CPUMSYSENTER;
125
126/** @def CPUM_UNION_NM
127 * For compilers (like DTrace) that does not grok nameless unions, we have a
128 * little hack to make them palatable.
129 */
130/** @def CPUM_STRUCT_NM
131 * For compilers (like DTrace) that does not grok nameless structs (it is
132 * non-standard C++), we have a little hack to make them palatable.
133 */
134#ifdef VBOX_FOR_DTRACE_LIB
135# define CPUM_UNION_NM(a_Nm) a_Nm
136# define CPUM_STRUCT_NM(a_Nm) a_Nm
137#elif defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS)
138# define CPUM_UNION_NM(a_Nm) a_Nm
139# define CPUM_STRUCT_NM(a_Nm) a_Nm
140#else
141# define CPUM_UNION_NM(a_Nm)
142# define CPUM_STRUCT_NM(a_Nm)
143#endif
144/** @def CPUM_UNION_STRUCT_NM
145 * Combines CPUM_UNION_NM and CPUM_STRUCT_NM to avoid hitting the right side of
146 * the screen in the compile time assertions.
147 */
148#define CPUM_UNION_STRUCT_NM(a_UnionNm, a_StructNm) CPUM_UNION_NM(a_UnionNm .) CPUM_STRUCT_NM(a_StructNm)
149
150/** A general register (union). */
151typedef union CPUMCTXGREG
152{
153 /** Natural unsigned integer view. */
154 uint64_t u;
155 /** 64-bit view. */
156 uint64_t u64;
157 /** 32-bit view. */
158 uint32_t u32;
159 /** 16-bit view. */
160 uint16_t u16;
161 /** 8-bit view. */
162 uint8_t u8;
163 /** 8-bit low/high view. */
164 RT_GCC_EXTENSION struct
165 {
166 /** Low byte (al, cl, dl, bl, ++). */
167 uint8_t bLo;
168 /** High byte in the first word - ah, ch, dh, bh. */
169 uint8_t bHi;
170 } CPUM_STRUCT_NM(s);
171} CPUMCTXGREG;
172#ifndef VBOX_FOR_DTRACE_LIB
173AssertCompileSize(CPUMCTXGREG, 8);
174AssertCompileMemberOffset(CPUMCTXGREG, CPUM_STRUCT_NM(s.) bLo, 0);
175AssertCompileMemberOffset(CPUMCTXGREG, CPUM_STRUCT_NM(s.) bHi, 1);
176#endif
177
178
179
180/**
181 * SVM Host-state area (Nested Hw.virt - VirtualBox's layout).
182 *
183 * @warning Exercise caution while modifying the layout of this struct. It's
184 * part of VM saved states.
185 */
186#pragma pack(1)
187typedef struct SVMHOSTSTATE
188{
189 uint64_t uEferMsr;
190 uint64_t uCr0;
191 uint64_t uCr4;
192 uint64_t uCr3;
193 uint64_t uRip;
194 uint64_t uRsp;
195 uint64_t uRax;
196 X86RFLAGS rflags;
197 CPUMSELREG es;
198 CPUMSELREG cs;
199 CPUMSELREG ss;
200 CPUMSELREG ds;
201 VBOXGDTR gdtr;
202 VBOXIDTR idtr;
203 uint8_t abPadding[4];
204} SVMHOSTSTATE;
205#pragma pack()
206/** Pointer to the SVMHOSTSTATE structure. */
207typedef SVMHOSTSTATE *PSVMHOSTSTATE;
208/** Pointer to a const SVMHOSTSTATE structure. */
209typedef const SVMHOSTSTATE *PCSVMHOSTSTATE;
210#ifndef VBOX_FOR_DTRACE_LIB
211AssertCompileSizeAlignment(SVMHOSTSTATE, 8);
212AssertCompileSize(SVMHOSTSTATE, 184);
213#endif
214
215
216/**
217 * CPU hardware virtualization types.
218 */
219typedef enum
220{
221 CPUMHWVIRT_NONE = 0,
222 CPUMHWVIRT_VMX,
223 CPUMHWVIRT_SVM,
224 CPUMHWVIRT_32BIT_HACK = 0x7fffffff
225} CPUMHWVIRT;
226#ifndef VBOX_FOR_DTRACE_LIB
227AssertCompileSize(CPUMHWVIRT, 4);
228#endif
229
230/** Number of EFLAGS bits we put aside for the hardware EFLAGS, with the bits
231 * above this we use for storing internal state not visible to the guest.
232 *
233 * The initial plan was to use 24 or 22 here and keep bits that needs clearing
234 * on instruction boundrary in the top of the first 32 bits, allowing us to use
235 * a AND with a 32-bit immediate for clearing both RF and the interrupt shadow
236 * bits. However, when using anything less than 32, there is a significant code
237 * size increase: VMMR0.ro is 2475709 bytes with 32 bits, 2482069 bytes with 24
238 * bits, and 2482261 bytes with 22 bits.
239 *
240 * So, for now we're best off setting this to 32.
241 */
242#define CPUMX86EFLAGS_HW_BITS 32
243/** Mask for the hardware EFLAGS bits, 64-bit version. */
244#define CPUMX86EFLAGS_HW_MASK_64 (RT_BIT_64(CPUMX86EFLAGS_HW_BITS) - UINT64_C(1))
245/** Mask for the hardware EFLAGS bits, 32-bit version. */
246#if CPUMX86EFLAGS_HW_BITS == 32
247# define CPUMX86EFLAGS_HW_MASK_32 UINT32_MAX
248#elif CPUMX86EFLAGS_HW_BITS < 32 && CPUMX86EFLAGS_HW_BITS >= 22
249# define CPUMX86EFLAGS_HW_MASK_32 (RT_BIT_32(CPUMX86EFLAGS_HW_BITS) - UINT32_C(1))
250#else
251# error "Misconfigured CPUMX86EFLAGS_HW_BITS value!"
252#endif
253
254/** Mask of internal flags kept with EFLAGS, 64-bit version. */
255#define CPUMX86EFLAGS_INT_MASK_64 UINT64_C(0x0000000000000000)
256/** Mask of internal flags kept with EFLAGS, 32-bit version. */
257#define CPUMX86EFLAGS_INT_MASK_32 UINT64_C(0x0000000000000000)
258
259
260/**
261 * CPUM EFLAGS.
262 *
263 * This differs from X86EFLAGS in that we could use bits 31:22 for internal
264 * purposes, see CPUMX86EFLAGS_HW_BITS.
265 */
266typedef union CPUMX86EFLAGS
267{
268 /** The full unsigned view, both hardware and VBox bits. */
269 uint32_t uBoth;
270 /** The plain unsigned view of the hardware bits. */
271#if CPUMX86EFLAGS_HW_BITS == 32
272 uint32_t u;
273#else
274 uint32_t u : CPUMX86EFLAGS_HW_BITS;
275#endif
276#ifndef VBOX_FOR_DTRACE_LIB
277 /** The bitfield view. */
278 X86EFLAGSBITS Bits;
279#endif
280} CPUMX86EFLAGS;
281/** Pointer to CPUM EFLAGS. */
282typedef CPUMX86EFLAGS *PCPUMX86EFLAGS;
283/** Pointer to const CPUM EFLAGS. */
284typedef const CPUMX86EFLAGS *PCCPUMX86EFLAGS;
285
286/**
287 * CPUM RFLAGS.
288 *
289 * This differs from X86EFLAGS in that we use could be using bits 63:22 for
290 * internal purposes, see CPUMX86EFLAGS_HW_BITS.
291 */
292typedef union CPUMX86RFLAGS
293{
294 /** The full unsigned view, both hardware and VBox bits. */
295 uint64_t uBoth;
296 /** The plain unsigned view of the hardware bits. */
297#if CPUMX86EFLAGS_HW_BITS == 32
298 uint32_t u;
299#else
300 uint32_t u : CPUMX86EFLAGS_HW_BITS;
301#endif
302#ifndef VBOX_FOR_DTRACE_LIB
303 /** The bitfield view. */
304 X86EFLAGSBITS Bits;
305#endif
306} CPUMX86RFLAGS;
307/** Pointer to CPUM RFLAGS. */
308typedef CPUMX86RFLAGS *PCPUMX86RFLAGS;
309/** Pointer to const CPUM RFLAGS. */
310typedef const CPUMX86RFLAGS *PCCPUMX86RFLAGS;
311
312
313/**
314 * CPU context.
315 */
316#pragma pack(1) /* for VBOXIDTR / VBOXGDTR. */
317typedef struct CPUMCTX
318{
319 /** General purpose registers. */
320 union /* no tag! */
321 {
322 /** The general purpose register array view, indexed by X86_GREG_XXX. */
323 CPUMCTXGREG aGRegs[16];
324
325 /** 64-bit general purpose register view. */
326 RT_GCC_EXTENSION struct /* no tag! */
327 {
328 uint64_t rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15;
329 } CPUM_STRUCT_NM(qw);
330 /** 64-bit general purpose register view. */
331 RT_GCC_EXTENSION struct /* no tag! */
332 {
333 uint64_t r0, r1, r2, r3, r4, r5, r6, r7;
334 } CPUM_STRUCT_NM(qw2);
335 /** 32-bit general purpose register view. */
336 RT_GCC_EXTENSION struct /* no tag! */
337 {
338 uint32_t eax, u32Pad00, ecx, u32Pad01, edx, u32Pad02, ebx, u32Pad03,
339 esp, u32Pad04, ebp, u32Pad05, esi, u32Pad06, edi, u32Pad07,
340 r8d, u32Pad08, r9d, u32Pad09, r10d, u32Pad10, r11d, u32Pad11,
341 r12d, u32Pad12, r13d, u32Pad13, r14d, u32Pad14, r15d, u32Pad15;
342 } CPUM_STRUCT_NM(dw);
343 /** 16-bit general purpose register view. */
344 RT_GCC_EXTENSION struct /* no tag! */
345 {
346 uint16_t ax, au16Pad00[3], cx, au16Pad01[3], dx, au16Pad02[3], bx, au16Pad03[3],
347 sp, au16Pad04[3], bp, au16Pad05[3], si, au16Pad06[3], di, au16Pad07[3],
348 r8w, au16Pad08[3], r9w, au16Pad09[3], r10w, au16Pad10[3], r11w, au16Pad11[3],
349 r12w, au16Pad12[3], r13w, au16Pad13[3], r14w, au16Pad14[3], r15w, au16Pad15[3];
350 } CPUM_STRUCT_NM(w);
351 RT_GCC_EXTENSION struct /* no tag! */
352 {
353 uint8_t al, ah, abPad00[6], cl, ch, abPad01[6], dl, dh, abPad02[6], bl, bh, abPad03[6],
354 spl, abPad04[7], bpl, abPad05[7], sil, abPad06[7], dil, abPad07[7],
355 r8l, abPad08[7], r9l, abPad09[7], r10l, abPad10[7], r11l, abPad11[7],
356 r12l, abPad12[7], r13l, abPad13[7], r14l, abPad14[7], r15l, abPad15[7];
357 } CPUM_STRUCT_NM(b);
358 } CPUM_UNION_NM(g);
359
360 /** Segment registers. */
361 union /* no tag! */
362 {
363 /** The segment register array view, indexed by X86_SREG_XXX. */
364 CPUMSELREG aSRegs[6];
365 /** The named segment register view. */
366 RT_GCC_EXTENSION struct /* no tag! */
367 {
368 CPUMSELREG es, cs, ss, ds, fs, gs;
369 } CPUM_STRUCT_NM(n);
370 } CPUM_UNION_NM(s);
371
372 /** The task register.
373 * Only the guest context uses all the members. */
374 CPUMSELREG ldtr;
375 /** The task register.
376 * Only the guest context uses all the members. */
377 CPUMSELREG tr;
378
379 /** The program counter. */
380 union
381 {
382 uint16_t ip;
383 uint32_t eip;
384 uint64_t rip;
385 } CPUM_UNION_NM(rip);
386
387 /** The flags register. */
388 union
389 {
390 CPUMX86EFLAGS eflags;
391 CPUMX86RFLAGS rflags;
392 } CPUM_UNION_NM(rflags);
393
394 /** Interrupt & exception inhibiting (CPUMCTX_INHIBIT_XXX). */
395 uint8_t fInhibit;
396 uint8_t abPadding[7];
397 /** The RIP value fInhibit is/was valid for. */
398 uint64_t uRipInhibitInt;
399
400 /** @name Control registers.
401 * @{ */
402 uint64_t cr0;
403 uint64_t cr2;
404 uint64_t cr3;
405 uint64_t cr4;
406 /** @} */
407
408 /** Debug registers.
409 * @remarks DR4 and DR5 should not be used since they are aliases for
410 * DR6 and DR7 respectively on both AMD and Intel CPUs.
411 * @remarks DR8-15 are currently not supported by AMD or Intel, so
412 * neither do we.
413 */
414 uint64_t dr[8];
415
416 /** Padding before the structure so the 64-bit member is correctly aligned.
417 * @todo fix this structure! */
418 uint16_t gdtrPadding[3];
419 /** Global Descriptor Table register. */
420 VBOXGDTR gdtr;
421
422 /** Padding before the structure so the 64-bit member is correctly aligned.
423 * @todo fix this structure! */
424 uint16_t idtrPadding[3];
425 /** Interrupt Descriptor Table register. */
426 VBOXIDTR idtr;
427
428 /** The sysenter msr registers.
429 * This member is not used by the hypervisor context. */
430 CPUMSYSENTER SysEnter;
431
432 /** @name System MSRs.
433 * @{ */
434 uint64_t msrEFER; /**< @todo move EFER up to the crX registers for better cacheline mojo */
435 uint64_t msrSTAR; /**< Legacy syscall eip, cs & ss. */
436 uint64_t msrPAT; /**< Page attribute table. */
437 uint64_t msrLSTAR; /**< 64 bits mode syscall rip. */
438 uint64_t msrCSTAR; /**< Compatibility mode syscall rip. */
439 uint64_t msrSFMASK; /**< syscall flag mask. */
440 uint64_t msrKERNELGSBASE; /**< swapgs exchange value. */
441 /** @} */
442
443 /** 0x230 - Externalized state tracker, CPUMCTX_EXTRN_XXX.
444 * @todo Move up after uRipInhibitInt after fInhibit moves into RFLAGS.
445 * That will put this in the same cacheline as RIP, RFLAGS and CR0
446 * which are typically always imported and exported again during an
447 * VM exit. */
448 uint64_t fExtrn;
449
450 uint64_t u64Unused;
451
452 /** 0x240 - PAE PDPTEs. */
453 X86PDPE aPaePdpes[4];
454
455 /** 0x260 - The XCR0..XCR1 registers. */
456 uint64_t aXcr[2];
457 /** 0x270 - The mask to pass to XSAVE/XRSTOR in EDX:EAX. If zero we use
458 * FXSAVE/FXRSTOR (since bit 0 will always be set, we only need to test it). */
459 uint64_t fXStateMask;
460 /** 0x278 - Mirror of CPUMCPU::fUseFlags[CPUM_USED_FPU_GUEST]. */
461 bool fUsedFpuGuest;
462 uint8_t afUnused[7];
463
464 /* ---- Start of members not zeroed at reset. ---- */
465
466 /** 0x280 - State component offsets into pXState, UINT16_MAX if not present.
467 * @note Everything before this member will be memset to zero during reset. */
468 uint16_t aoffXState[64];
469 /** 0x300 - The extended state (FPU/SSE/AVX/AVX-2/XXXX).
470 * Aligned on 256 byte boundrary (min req is currently 64 bytes). */
471 union /* no tag */
472 {
473 X86XSAVEAREA XState;
474 /** Byte view for simple indexing and space allocation. */
475 uint8_t abXState[0x4000 - 0x300];
476 } CPUM_UNION_NM(u);
477
478 /** 0x4000 - Hardware virtualization state.
479 * @note This is page aligned, so an full page member comes first in the
480 * substructures. */
481 struct
482 {
483 union /* no tag! */
484 {
485 struct
486 {
487 /** 0x4000 - Cache of the nested-guest VMCB. */
488 SVMVMCB Vmcb;
489 /** 0x5000 - The MSRPM (MSR Permission bitmap).
490 *
491 * This need not be physically contiguous pages because we use the one from
492 * HMPHYSCPU while executing the nested-guest using hardware-assisted SVM.
493 * This one is just used for caching the bitmap from guest physical memory.
494 *
495 * @todo r=bird: This is not used directly by AMD-V hardware, so it doesn't
496 * really need to even be page aligned.
497 *
498 * Also, couldn't we just access the guest page directly when we need to,
499 * or do we have to use a cached copy of it? */
500 uint8_t abMsrBitmap[SVM_MSRPM_PAGES * X86_PAGE_SIZE];
501 /** 0x7000 - The IOPM (IO Permission bitmap).
502 *
503 * This need not be physically contiguous pages because we re-use the ring-0
504 * allocated IOPM while executing the nested-guest using hardware-assisted SVM
505 * because it's identical (we trap all IO accesses).
506 *
507 * This one is just used for caching the IOPM from guest physical memory in
508 * case the guest hypervisor allows direct access to some IO ports.
509 *
510 * @todo r=bird: This is not used directly by AMD-V hardware, so it doesn't
511 * really need to even be page aligned.
512 *
513 * Also, couldn't we just access the guest page directly when we need to,
514 * or do we have to use a cached copy of it? */
515 uint8_t abIoBitmap[SVM_IOPM_PAGES * X86_PAGE_SIZE];
516
517 /** 0xa000 - MSR holding physical address of the Guest's Host-state. */
518 uint64_t uMsrHSavePa;
519 /** 0xa008 - Guest physical address of the nested-guest VMCB. */
520 RTGCPHYS GCPhysVmcb;
521 /** 0xa010 - Guest's host-state save area. */
522 SVMHOSTSTATE HostState;
523 /** 0xa0c8 - Guest TSC time-stamp of when the previous PAUSE instr. was
524 * executed. */
525 uint64_t uPrevPauseTick;
526 /** 0xa0d0 - Pause filter count. */
527 uint16_t cPauseFilter;
528 /** 0xa0d2 - Pause filter threshold. */
529 uint16_t cPauseFilterThreshold;
530 /** 0xa0d4 - Whether the injected event is subject to event intercepts. */
531 bool fInterceptEvents;
532 /** 0xa0d5 - Padding. */
533 bool afPadding[3];
534 } svm;
535
536 struct
537 {
538 /** 0x4000 - The current VMCS. */
539 VMXVVMCS Vmcs;
540 /** 0X5000 - The shadow VMCS. */
541 VMXVVMCS ShadowVmcs;
542 /** 0x6000 - The VMREAD bitmap.
543 * @todo r=bird: Do we really need to keep copies for these? Couldn't we just
544 * access the guest memory directly as needed? */
545 uint8_t abVmreadBitmap[VMX_V_VMREAD_VMWRITE_BITMAP_SIZE];
546 /** 0x7000 - The VMWRITE bitmap.
547 * @todo r=bird: Do we really need to keep copies for these? Couldn't we just
548 * access the guest memory directly as needed? */
549 uint8_t abVmwriteBitmap[VMX_V_VMREAD_VMWRITE_BITMAP_SIZE];
550 /** 0x8000 - The VM-entry MSR-load area. */
551 VMXAUTOMSR aEntryMsrLoadArea[VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR)];
552 /** 0xa000 - The VM-exit MSR-store area. */
553 VMXAUTOMSR aExitMsrStoreArea[VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR)];
554 /** 0xc000 - The VM-exit MSR-load area. */
555 VMXAUTOMSR aExitMsrLoadArea[VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR)];
556 /** 0xe000 - The MSR permission bitmap.
557 * @todo r=bird: Do we really need to keep copies for these? Couldn't we just
558 * access the guest memory directly as needed? */
559 uint8_t abMsrBitmap[VMX_V_MSR_BITMAP_SIZE];
560 /** 0xf000 - The I/O permission bitmap.
561 * @todo r=bird: Do we really need to keep copies for these? Couldn't we just
562 * access the guest memory directly as needed? */
563 uint8_t abIoBitmap[VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE];
564
565 /** 0x11000 - Guest physical address of the VMXON region. */
566 RTGCPHYS GCPhysVmxon;
567 /** 0x11008 - Guest physical address of the current VMCS pointer. */
568 RTGCPHYS GCPhysVmcs;
569 /** 0x11010 - Guest physical address of the shadow VMCS pointer. */
570 RTGCPHYS GCPhysShadowVmcs;
571 /** 0x11018 - Last emulated VMX instruction/VM-exit diagnostic. */
572 VMXVDIAG enmDiag;
573 /** 0x1101c - VMX abort reason. */
574 VMXABORT enmAbort;
575 /** 0x11020 - Last emulated VMX instruction/VM-exit diagnostic auxiliary info.
576 * (mainly used for info. that's not part of the VMCS). */
577 uint64_t uDiagAux;
578 /** 0x11028 - VMX abort auxiliary info. */
579 uint32_t uAbortAux;
580 /** 0x1102c - Whether the guest is in VMX root mode. */
581 bool fInVmxRootMode;
582 /** 0x1102d - Whether the guest is in VMX non-root mode. */
583 bool fInVmxNonRootMode;
584 /** 0x1102e - Whether the injected events are subjected to event intercepts. */
585 bool fInterceptEvents;
586 /** 0x1102f - Whether blocking of NMI (or virtual-NMIs) was in effect in VMX
587 * non-root mode before execution of IRET. */
588 bool fNmiUnblockingIret;
589 /** 0x11030 - Guest TSC timestamp of the first PAUSE instruction that is
590 * considered to be the first in a loop. */
591 uint64_t uFirstPauseLoopTick;
592 /** 0x11038 - Guest TSC timestamp of the previous PAUSE instruction. */
593 uint64_t uPrevPauseTick;
594 /** 0x11040 - Guest TSC timestamp of VM-entry (used for VMX-preemption
595 * timer). */
596 uint64_t uEntryTick;
597 /** 0x11048 - Virtual-APIC write offset (until trap-like VM-exit). */
598 uint16_t offVirtApicWrite;
599 /** 0x1104a - Whether virtual-NMI blocking is in effect. */
600 bool fVirtNmiBlocking;
601 /** 0x1104b - Padding. */
602 uint8_t abPadding0[5];
603 /** 0x11050 - Guest VMX MSRs. */
604 VMXMSRS Msrs;
605 } vmx;
606 } CPUM_UNION_NM(s);
607
608 /** 0x11130 - Hardware virtualization type currently in use. */
609 CPUMHWVIRT enmHwvirt;
610 /** 0x11134 - Global interrupt flag - AMD only (always true on Intel). */
611 bool fGif;
612 /** 0x11135 - Padding. */
613 bool afPadding0[3];
614 /** 0x11138 - A subset of guest inhibit flags (CPUMCTX_INHIBIT_XXX) that are
615 * saved while running the nested-guest. */
616 uint32_t fSavedInhibit;
617 /** 0x1113c - Pad to 64 byte boundary. */
618 uint8_t abPadding1[4];
619 } hwvirt;
620} CPUMCTX;
621#pragma pack()
622
623#ifndef VBOX_FOR_DTRACE_LIB
624AssertCompileSizeAlignment(CPUMCTX, 64);
625AssertCompileSizeAlignment(CPUMCTX, 32);
626AssertCompileSizeAlignment(CPUMCTX, 16);
627AssertCompileSizeAlignment(CPUMCTX, 8);
628AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax, 0x0000);
629AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx, 0x0008);
630AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx, 0x0010);
631AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx, 0x0018);
632AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp, 0x0020);
633AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp, 0x0028);
634AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi, 0x0030);
635AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi, 0x0038);
636AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r8, 0x0040);
637AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r9, 0x0048);
638AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10, 0x0050);
639AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11, 0x0058);
640AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12, 0x0060);
641AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 0x0068);
642AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 0x0070);
643AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 0x0078);
644AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 0x0080);
645AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 0x0098);
646AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 0x00b0);
647AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 0x00c8);
648AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 0x00e0);
649AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 0x00f8);
650AssertCompileMemberOffset(CPUMCTX, ldtr, 0x0110);
651AssertCompileMemberOffset(CPUMCTX, tr, 0x0128);
652AssertCompileMemberOffset(CPUMCTX, rip, 0x0140);
653AssertCompileMemberOffset(CPUMCTX, rflags, 0x0148);
654AssertCompileMemberOffset(CPUMCTX, fInhibit, 0x0150);
655AssertCompileMemberOffset(CPUMCTX, uRipInhibitInt, 0x0158);
656AssertCompileMemberOffset(CPUMCTX, cr0, 0x0160);
657AssertCompileMemberOffset(CPUMCTX, cr2, 0x0168);
658AssertCompileMemberOffset(CPUMCTX, cr3, 0x0170);
659AssertCompileMemberOffset(CPUMCTX, cr4, 0x0178);
660AssertCompileMemberOffset(CPUMCTX, dr, 0x0180);
661AssertCompileMemberOffset(CPUMCTX, gdtr, 0x01c0+6);
662AssertCompileMemberOffset(CPUMCTX, idtr, 0x01d0+6);
663AssertCompileMemberOffset(CPUMCTX, SysEnter, 0x01e0);
664AssertCompileMemberOffset(CPUMCTX, msrEFER, 0x01f8);
665AssertCompileMemberOffset(CPUMCTX, msrSTAR, 0x0200);
666AssertCompileMemberOffset(CPUMCTX, msrPAT, 0x0208);
667AssertCompileMemberOffset(CPUMCTX, msrLSTAR, 0x0210);
668AssertCompileMemberOffset(CPUMCTX, msrCSTAR, 0x0218);
669AssertCompileMemberOffset(CPUMCTX, msrSFMASK, 0x0220);
670AssertCompileMemberOffset(CPUMCTX, msrKERNELGSBASE, 0x0228);
671AssertCompileMemberOffset(CPUMCTX, aPaePdpes, 0x0240);
672AssertCompileMemberOffset(CPUMCTX, aXcr, 0x0260);
673AssertCompileMemberOffset(CPUMCTX, fXStateMask, 0x0270);
674AssertCompileMemberOffset(CPUMCTX, fUsedFpuGuest, 0x0278);
675AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x0300);
676AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(u.) abXState, 0x0300);
677AssertCompileMemberAlignment(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x0100);
678/* Only do spot checks for hwvirt */
679AssertCompileMemberAlignment(CPUMCTX, hwvirt, 0x1000);
680AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.Vmcb, X86_PAGE_SIZE);
681AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abMsrBitmap, X86_PAGE_SIZE);
682AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abIoBitmap, X86_PAGE_SIZE);
683AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.Vmcs, X86_PAGE_SIZE);
684AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.ShadowVmcs, X86_PAGE_SIZE);
685AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.abVmreadBitmap, X86_PAGE_SIZE);
686AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.abVmwriteBitmap, X86_PAGE_SIZE);
687AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.aEntryMsrLoadArea, X86_PAGE_SIZE);
688AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.aExitMsrStoreArea, X86_PAGE_SIZE);
689AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.aExitMsrLoadArea, X86_PAGE_SIZE);
690AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.abMsrBitmap, X86_PAGE_SIZE);
691AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.abIoBitmap, X86_PAGE_SIZE);
692AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.Msrs, 8);
693AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abIoBitmap, 0x7000);
694AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fInterceptEvents, 0xa0d4);
695AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.abIoBitmap, 0xf000);
696AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.fVirtNmiBlocking, 0x1104a);
697AssertCompileMemberOffset(CPUMCTX, hwvirt.enmHwvirt, 0x11130);
698AssertCompileMemberOffset(CPUMCTX, hwvirt.fGif, 0x11134);
699AssertCompileMemberOffset(CPUMCTX, hwvirt.fSavedInhibit, 0x11138);
700AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs);
701AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r0);
702AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r1);
703AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r2);
704AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r3);
705AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r4);
706AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r5);
707AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r6);
708AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r7);
709AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) eax);
710AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) ecx);
711AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) edx);
712AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) ebx);
713AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) esp);
714AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) ebp);
715AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) esi);
716AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) edi);
717AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r8d);
718AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r9d);
719AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r10d);
720AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r11d);
721AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r12d);
722AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r13d);
723AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r14d);
724AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_STRUCT_NM(g,dw.) r15d);
725AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) ax);
726AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) cx);
727AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) dx);
728AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) bx);
729AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) sp);
730AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) bp);
731AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) si);
732AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) di);
733AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r8w);
734AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r9w);
735AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r10w);
736AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r11w);
737AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r12w);
738AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r13w);
739AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r14w);
740AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_STRUCT_NM(g,w.) r15w);
741AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) al);
742AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) cl);
743AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) dl);
744AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) bl);
745AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) spl);
746AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) bpl);
747AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) sil);
748AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) dil);
749AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r8l);
750AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r9l);
751AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r10l);
752AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r11l);
753AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r12l);
754AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r13l);
755AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r14l);
756AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_STRUCT_NM(g,b.) r15l);
757AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, CPUMCTX, CPUM_UNION_NM(s.) aSRegs);
758# ifndef _MSC_VER
759AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xAX]);
760AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rcx, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xCX]);
761AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdx, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xDX]);
762AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbx, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xBX]);
763AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsp, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xSP]);
764AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rbp, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xBP]);
765AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rsi, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xSI]);
766AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rdi, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_xDI]);
767AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r8, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x8]);
768AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r9, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x9]);
769AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r10, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x10]);
770AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r11, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x11]);
771AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r12, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x12]);
772AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r13, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x13]);
773AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r14, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x14]);
774AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) r15, CPUMCTX, CPUM_UNION_NM(g.) aGRegs[X86_GREG_x15]);
775AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) es, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_ES]);
776AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) cs, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_CS]);
777AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) ss, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_SS]);
778AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) ds, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_DS]);
779AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) fs, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_FS]);
780AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(s,n.) gs, CPUMCTX, CPUM_UNION_NM(s.) aSRegs[X86_SREG_GS]);
781# endif
782
783
784/**
785 * Calculates the pointer to the given extended state component.
786 *
787 * @returns Pointer of type @a a_PtrType
788 * @param a_pCtx Pointer to the context.
789 * @param a_iCompBit The extended state component bit number. This bit
790 * must be set in CPUMCTX::fXStateMask.
791 * @param a_PtrType The pointer type of the extended state component.
792 *
793 */
794#if defined(VBOX_STRICT) && defined(RT_COMPILER_SUPPORTS_LAMBDA)
795# define CPUMCTX_XSAVE_C_PTR(a_pCtx, a_iCompBit, a_PtrType) \
796 ([](PCCPUMCTX a_pLambdaCtx) -> a_PtrType \
797 { \
798 AssertCompile((a_iCompBit) < 64U); \
799 AssertMsg(a_pLambdaCtx->fXStateMask & RT_BIT_64(a_iCompBit), (#a_iCompBit "\n")); \
800 AssertMsg(a_pLambdaCtx->aoffXState[(a_iCompBit)] != UINT16_MAX, (#a_iCompBit "\n")); \
801 return (a_PtrType)(&a_pLambdaCtx->abXState[a_pLambdaCtx->aoffXState[(a_iCompBit)]]); \
802 }(a_pCtx))
803#elif defined(VBOX_STRICT) && defined(__GNUC__)
804# define CPUMCTX_XSAVE_C_PTR(a_pCtx, a_iCompBit, a_PtrType) \
805 __extension__ (\
806 { \
807 AssertCompile((a_iCompBit) < 64U); \
808 AssertMsg((a_pCtx)->fXStateMask & RT_BIT_64(a_iCompBit), (#a_iCompBit "\n")); \
809 AssertMsg((a_pCtx)->aoffXState[(a_iCompBit)] != UINT16_MAX, (#a_iCompBit "\n")); \
810 (a_PtrType)(&(a_pCtx)->abXState[(a_pCtx)->aoffXState[(a_iCompBit)]]); \
811 })
812#else
813# define CPUMCTX_XSAVE_C_PTR(a_pCtx, a_iCompBit, a_PtrType) \
814 ((a_PtrType)(&(a_pCtx)->abXState[(a_pCtx)->aoffXState[(a_iCompBit)]]))
815#endif
816
817/**
818 * Gets the first selector register of a CPUMCTX.
819 *
820 * Use this with X86_SREG_COUNT to loop thru the selector registers.
821 */
822# define CPUMCTX_FIRST_SREG(a_pCtx) (&(a_pCtx)->es)
823
824#endif /* !VBOX_FOR_DTRACE_LIB */
825
826
827/** @name CPUMCTX_EXTRN_XXX
828 * Used for parts of the CPUM state that is externalized and needs fetching
829 * before use.
830 *
831 * @{ */
832/** External state keeper: Invalid. */
833#define CPUMCTX_EXTRN_KEEPER_INVALID UINT64_C(0x0000000000000000)
834/** External state keeper: HM. */
835#define CPUMCTX_EXTRN_KEEPER_HM UINT64_C(0x0000000000000001)
836/** External state keeper: NEM. */
837#define CPUMCTX_EXTRN_KEEPER_NEM UINT64_C(0x0000000000000002)
838/** External state keeper: REM. */
839#define CPUMCTX_EXTRN_KEEPER_REM UINT64_C(0x0000000000000003)
840/** External state keeper mask. */
841#define CPUMCTX_EXTRN_KEEPER_MASK UINT64_C(0x0000000000000003)
842
843/** The RIP register value is kept externally. */
844#define CPUMCTX_EXTRN_RIP UINT64_C(0x0000000000000004)
845/** The RFLAGS register values are kept externally. */
846#define CPUMCTX_EXTRN_RFLAGS UINT64_C(0x0000000000000008)
847
848/** The RAX register value is kept externally. */
849#define CPUMCTX_EXTRN_RAX UINT64_C(0x0000000000000010)
850/** The RCX register value is kept externally. */
851#define CPUMCTX_EXTRN_RCX UINT64_C(0x0000000000000020)
852/** The RDX register value is kept externally. */
853#define CPUMCTX_EXTRN_RDX UINT64_C(0x0000000000000040)
854/** The RBX register value is kept externally. */
855#define CPUMCTX_EXTRN_RBX UINT64_C(0x0000000000000080)
856/** The RSP register value is kept externally. */
857#define CPUMCTX_EXTRN_RSP UINT64_C(0x0000000000000100)
858/** The RBP register value is kept externally. */
859#define CPUMCTX_EXTRN_RBP UINT64_C(0x0000000000000200)
860/** The RSI register value is kept externally. */
861#define CPUMCTX_EXTRN_RSI UINT64_C(0x0000000000000400)
862/** The RDI register value is kept externally. */
863#define CPUMCTX_EXTRN_RDI UINT64_C(0x0000000000000800)
864/** The R8 thru R15 register values are kept externally. */
865#define CPUMCTX_EXTRN_R8_R15 UINT64_C(0x0000000000001000)
866/** General purpose registers mask. */
867#define CPUMCTX_EXTRN_GPRS_MASK UINT64_C(0x0000000000001ff0)
868
869/** The ES register values are kept externally. */
870#define CPUMCTX_EXTRN_ES UINT64_C(0x0000000000002000)
871/** The CS register values are kept externally. */
872#define CPUMCTX_EXTRN_CS UINT64_C(0x0000000000004000)
873/** The SS register values are kept externally. */
874#define CPUMCTX_EXTRN_SS UINT64_C(0x0000000000008000)
875/** The DS register values are kept externally. */
876#define CPUMCTX_EXTRN_DS UINT64_C(0x0000000000010000)
877/** The FS register values are kept externally. */
878#define CPUMCTX_EXTRN_FS UINT64_C(0x0000000000020000)
879/** The GS register values are kept externally. */
880#define CPUMCTX_EXTRN_GS UINT64_C(0x0000000000040000)
881/** Segment registers (includes CS). */
882#define CPUMCTX_EXTRN_SREG_MASK UINT64_C(0x000000000007e000)
883/** Converts a X86_XREG_XXX index to a CPUMCTX_EXTRN_xS mask. */
884#define CPUMCTX_EXTRN_SREG_FROM_IDX(a_SRegIdx) RT_BIT_64((a_SRegIdx) + 13)
885#ifndef VBOX_FOR_DTRACE_LIB
886AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_ES) == CPUMCTX_EXTRN_ES);
887AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_CS) == CPUMCTX_EXTRN_CS);
888AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_DS) == CPUMCTX_EXTRN_DS);
889AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_FS) == CPUMCTX_EXTRN_FS);
890AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_GS) == CPUMCTX_EXTRN_GS);
891#endif
892
893/** The GDTR register values are kept externally. */
894#define CPUMCTX_EXTRN_GDTR UINT64_C(0x0000000000080000)
895/** The IDTR register values are kept externally. */
896#define CPUMCTX_EXTRN_IDTR UINT64_C(0x0000000000100000)
897/** The LDTR register values are kept externally. */
898#define CPUMCTX_EXTRN_LDTR UINT64_C(0x0000000000200000)
899/** The TR register values are kept externally. */
900#define CPUMCTX_EXTRN_TR UINT64_C(0x0000000000400000)
901/** Table register mask. */
902#define CPUMCTX_EXTRN_TABLE_MASK UINT64_C(0x0000000000780000)
903
904/** The CR0 register value is kept externally. */
905#define CPUMCTX_EXTRN_CR0 UINT64_C(0x0000000000800000)
906/** The CR2 register value is kept externally. */
907#define CPUMCTX_EXTRN_CR2 UINT64_C(0x0000000001000000)
908/** The CR3 register value is kept externally. */
909#define CPUMCTX_EXTRN_CR3 UINT64_C(0x0000000002000000)
910/** The CR4 register value is kept externally. */
911#define CPUMCTX_EXTRN_CR4 UINT64_C(0x0000000004000000)
912/** Control register mask. */
913#define CPUMCTX_EXTRN_CR_MASK UINT64_C(0x0000000007800000)
914/** The TPR/CR8 register value is kept externally. */
915#define CPUMCTX_EXTRN_APIC_TPR UINT64_C(0x0000000008000000)
916/** The EFER register value is kept externally. */
917#define CPUMCTX_EXTRN_EFER UINT64_C(0x0000000010000000)
918
919/** The DR0, DR1, DR2 and DR3 register values are kept externally. */
920#define CPUMCTX_EXTRN_DR0_DR3 UINT64_C(0x0000000020000000)
921/** The DR6 register value is kept externally. */
922#define CPUMCTX_EXTRN_DR6 UINT64_C(0x0000000040000000)
923/** The DR7 register value is kept externally. */
924#define CPUMCTX_EXTRN_DR7 UINT64_C(0x0000000080000000)
925/** Debug register mask. */
926#define CPUMCTX_EXTRN_DR_MASK UINT64_C(0x00000000e0000000)
927
928/** The XSAVE_C_X87 state is kept externally. */
929#define CPUMCTX_EXTRN_X87 UINT64_C(0x0000000100000000)
930/** The XSAVE_C_SSE, XSAVE_C_YMM, XSAVE_C_ZMM_HI256, XSAVE_C_ZMM_16HI and
931 * XSAVE_C_OPMASK state is kept externally. */
932#define CPUMCTX_EXTRN_SSE_AVX UINT64_C(0x0000000200000000)
933/** The state of XSAVE components not covered by CPUMCTX_EXTRN_X87 and
934 * CPUMCTX_EXTRN_SEE_AVX is kept externally. */
935#define CPUMCTX_EXTRN_OTHER_XSAVE UINT64_C(0x0000000400000000)
936/** The state of XCR0 and XCR1 register values are kept externally. */
937#define CPUMCTX_EXTRN_XCRx UINT64_C(0x0000000800000000)
938
939
940/** The KERNEL GS BASE MSR value is kept externally. */
941#define CPUMCTX_EXTRN_KERNEL_GS_BASE UINT64_C(0x0000001000000000)
942/** The STAR, LSTAR, CSTAR and SFMASK MSR values are kept externally. */
943#define CPUMCTX_EXTRN_SYSCALL_MSRS UINT64_C(0x0000002000000000)
944/** The SYSENTER_CS, SYSENTER_EIP and SYSENTER_ESP MSR values are kept externally. */
945#define CPUMCTX_EXTRN_SYSENTER_MSRS UINT64_C(0x0000004000000000)
946/** The TSC_AUX MSR is kept externally. */
947#define CPUMCTX_EXTRN_TSC_AUX UINT64_C(0x0000008000000000)
948/** All other stateful MSRs not covered by CPUMCTX_EXTRN_EFER,
949 * CPUMCTX_EXTRN_KERNEL_GS_BASE, CPUMCTX_EXTRN_SYSCALL_MSRS,
950 * CPUMCTX_EXTRN_SYSENTER_MSRS, and CPUMCTX_EXTRN_TSC_AUX. */
951#define CPUMCTX_EXTRN_OTHER_MSRS UINT64_C(0x0000010000000000)
952
953/** Mask of all the MSRs. */
954#define CPUMCTX_EXTRN_ALL_MSRS ( CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS \
955 | CPUMCTX_EXTRN_SYSENTER_MSRS | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)
956
957/** Hardware-virtualization (SVM or VMX) state is kept externally. */
958#define CPUMCTX_EXTRN_HWVIRT UINT64_C(0x0000020000000000)
959
960/** Inhibit maskable interrupts (VMCPU_FF_INHIBIT_INTERRUPTS) */
961#define CPUMCTX_EXTRN_INHIBIT_INT UINT64_C(0x0000040000000000)
962/** Inhibit non-maskable interrupts (VMCPU_FF_BLOCK_NMIS). */
963#define CPUMCTX_EXTRN_INHIBIT_NMI UINT64_C(0x0000080000000000)
964
965/** Mask of bits the keepers can use for state tracking. */
966#define CPUMCTX_EXTRN_KEEPER_STATE_MASK UINT64_C(0xffff000000000000)
967
968/** NEM/Win: Event injection (known was interruption) pending state. */
969#define CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT UINT64_C(0x0001000000000000)
970/** NEM/Win: Mask. */
971#define CPUMCTX_EXTRN_NEM_WIN_MASK UINT64_C(0x0001000000000000)
972
973/** HM/SVM: Nested-guest interrupt pending (VMCPU_FF_INTERRUPT_NESTED_GUEST). */
974#define CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ UINT64_C(0x0001000000000000)
975/** HM/SVM: Mask. */
976#define CPUMCTX_EXTRN_HM_SVM_MASK UINT64_C(0x0001000000000000)
977
978/** All CPUM state bits, not including keeper specific ones. */
979#define CPUMCTX_EXTRN_ALL UINT64_C(0x00000ffffffffffc)
980/** All CPUM state bits, including keeper specific ones. */
981#define CPUMCTX_EXTRN_ABSOLUTELY_ALL UINT64_C(0xfffffffffffffffc)
982/** @} */
983
984
985/** @name CPUMCTX_INHIBIT_XXX - Interrupt inhibiting flags.
986 * @{ */
987/** Interrupt shadow following MOV SS or POP SS.
988 *
989 * When this in effect, both maskable and non-maskable interrupts are blocked
990 * from delivery for one instruction. Same for certain debug exceptions too,
991 * unlike the STI variant.
992 *
993 * It is implementation specific whether a sequence of two or more of these
994 * instructions will have any effect on the instruction following the last one
995 * of them. */
996#define CPUMCTX_INHIBIT_SHADOW_SS UINT8_C(0x01)
997/** Interrupt shadow following STI.
998 * Same as CPUMCTX_INHIBIT_SHADOW_SS but without blocking any debug exceptions. */
999#define CPUMCTX_INHIBIT_SHADOW_STI UINT8_C(0x02)
1000/** Mask combining STI and SS shadowing. */
1001#define CPUMCTX_INHIBIT_SHADOW (CPUMCTX_INHIBIT_SHADOW_SS | CPUMCTX_INHIBIT_SHADOW_STI)
1002
1003/** Interrupts blocked by NMI delivery. This condition is cleared by IRET.
1004 *
1005 * Section "6.7 NONMASKABLE INTERRUPT (NMI)" in Intel SDM Vol 3A states that
1006 * "The processor also invokes certain hardware conditions to ensure that no
1007 * other interrupts, including NMI interrupts, are received until the NMI
1008 * handler has completed executing." This flag indicates that these
1009 * conditions are currently active. */
1010#define CPUMCTX_INHIBIT_NMI UINT8_C(0x04)
1011/** @} */
1012
1013
1014/**
1015 * Additional guest MSRs (i.e. not part of the CPU context structure).
1016 *
1017 * @remarks Never change the order here because of the saved stated! The size
1018 * can in theory be changed, but keep older VBox versions in mind.
1019 */
1020typedef union CPUMCTXMSRS
1021{
1022 struct
1023 {
1024 uint64_t TscAux; /**< MSR_K8_TSC_AUX */
1025 uint64_t MiscEnable; /**< MSR_IA32_MISC_ENABLE */
1026 uint64_t MtrrDefType; /**< IA32_MTRR_DEF_TYPE */
1027 uint64_t MtrrFix64K_00000; /**< IA32_MTRR_FIX16K_80000 */
1028 uint64_t MtrrFix16K_80000; /**< IA32_MTRR_FIX16K_80000 */
1029 uint64_t MtrrFix16K_A0000; /**< IA32_MTRR_FIX16K_A0000 */
1030 uint64_t MtrrFix4K_C0000; /**< IA32_MTRR_FIX4K_C0000 */
1031 uint64_t MtrrFix4K_C8000; /**< IA32_MTRR_FIX4K_C8000 */
1032 uint64_t MtrrFix4K_D0000; /**< IA32_MTRR_FIX4K_D0000 */
1033 uint64_t MtrrFix4K_D8000; /**< IA32_MTRR_FIX4K_D8000 */
1034 uint64_t MtrrFix4K_E0000; /**< IA32_MTRR_FIX4K_E0000 */
1035 uint64_t MtrrFix4K_E8000; /**< IA32_MTRR_FIX4K_E8000 */
1036 uint64_t MtrrFix4K_F0000; /**< IA32_MTRR_FIX4K_F0000 */
1037 uint64_t MtrrFix4K_F8000; /**< IA32_MTRR_FIX4K_F8000 */
1038 uint64_t PkgCStateCfgCtrl; /**< MSR_PKG_CST_CONFIG_CONTROL */
1039 uint64_t SpecCtrl; /**< IA32_SPEC_CTRL */
1040 uint64_t ArchCaps; /**< IA32_ARCH_CAPABILITIES */
1041 } msr;
1042 uint64_t au64[64];
1043} CPUMCTXMSRS;
1044/** Pointer to the guest MSR state. */
1045typedef CPUMCTXMSRS *PCPUMCTXMSRS;
1046/** Pointer to the const guest MSR state. */
1047typedef const CPUMCTXMSRS *PCCPUMCTXMSRS;
1048
1049/** @} */
1050
1051RT_C_DECLS_END
1052
1053#endif /* !VBOX_INCLUDED_vmm_cpumctx_h */
1054
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette