VirtualBox

source: vbox/trunk/include/VBox/vmm/hm_vmx.h@ 47100

Last change on this file since 47100 was 46942, checked in by vboxsync, 12 years ago

VMM: Fix LDTR restoration to be done dynamically. This is required for 64-bit Darwin hosts.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 76.9 KB
Line 
1/** @file
2 * HM - VMX Structures and Definitions. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2013 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_vmx_h
27#define ___VBox_vmm_vmx_h
28
29#include <VBox/types.h>
30#include <VBox/err.h>
31#include <iprt/x86.h>
32#include <iprt/assert.h>
33
34/** @defgroup grp_vmx vmx Types and Definitions
35 * @ingroup grp_hm
36 * @{
37 */
38
39/** @name Host-state restoration flags.
40 * @{
41 */
42/* If you change these values don't forget to update the assembly defines as well! */
43#define VMX_RESTORE_HOST_SEL_DS RT_BIT(0)
44#define VMX_RESTORE_HOST_SEL_ES RT_BIT(1)
45#define VMX_RESTORE_HOST_SEL_FS RT_BIT(2)
46#define VMX_RESTORE_HOST_SEL_GS RT_BIT(3)
47#define VMX_RESTORE_HOST_SEL_TR RT_BIT(4)
48#define VMX_RESTORE_HOST_GDTR RT_BIT(5)
49#define VMX_RESTORE_HOST_IDTR RT_BIT(6)
50/** @} */
51
52/**
53 * Host-state restoration structure.
54 * This holds host-state fields that require manual restoration. The layout is
55 * critical as it's used from assembly code.
56 */
57#pragma pack(1)
58typedef struct VMXRESTOREHOST
59{
60 RTSEL uHostSelDS; /* 0x00 */
61 RTSEL uHostSelES; /* 0x02 */
62 RTSEL uHostSelFS; /* 0x04 */
63 RTSEL uHostSelGS; /* 0x06 */
64 RTSEL uHostSelTR; /* 0x08 */
65 uint16_t u16Padding; /* 0x0a */
66 uint64_t uHostFSBase; /* 0x0c */
67 uint64_t uHostGSBase; /* 0x14 */
68 X86XDTR64 HostGdtr; /* 0x1c */
69 X86XDTR64 HostIdtr; /* 0x26 */
70} VMXRESTOREHOST;
71#pragma pack()
72/** Pointer to VMXRESTOREHOST. */
73typedef VMXRESTOREHOST *PVMXRESTOREHOST;
74AssertCompileSize(X86XDTR64, 10);
75AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelDS, 0);
76AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelES, 2);
77AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelFS, 4);
78AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelGS, 6);
79AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelTR, 8);
80AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase, 12);
81AssertCompileMemberOffset(VMXRESTOREHOST, uHostGSBase, 20);
82AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtr, 28);
83AssertCompileMemberOffset(VMXRESTOREHOST, HostIdtr, 38);
84AssertCompileSize(VMXRESTOREHOST, 48);
85
86/** @name VMX VMCS-Read cache indices.
87 * @{
88 */
89#ifndef VBOX_WITH_OLD_VTX_CODE
90# define VMX_VMCS_GUEST_ES_BASE_CACHE_IDX 0
91# define VMX_VMCS_GUEST_CS_BASE_CACHE_IDX 1
92# define VMX_VMCS_GUEST_SS_BASE_CACHE_IDX 2
93# define VMX_VMCS_GUEST_DS_BASE_CACHE_IDX 3
94# define VMX_VMCS_GUEST_FS_BASE_CACHE_IDX 4
95# define VMX_VMCS_GUEST_GS_BASE_CACHE_IDX 5
96# define VMX_VMCS_GUEST_LDTR_BASE_CACHE_IDX 6
97# define VMX_VMCS_GUEST_TR_BASE_CACHE_IDX 7
98# define VMX_VMCS_GUEST_GDTR_BASE_CACHE_IDX 8
99# define VMX_VMCS_GUEST_IDTR_BASE_CACHE_IDX 9
100# define VMX_VMCS_GUEST_RSP_CACHE_IDX 10
101# define VMX_VMCS_GUEST_RIP_CACHE_IDX 11
102# define VMX_VMCS_GUEST_SYSENTER_ESP_CACHE_IDX 12
103# define VMX_VMCS_GUEST_SYSENTER_EIP_CACHE_IDX 13
104# define VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX 14
105# define VMX_VMCS_MAX_CACHE_IDX (VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX + 1)
106# define VMX_VMCS_GUEST_CR3_CACHE_IDX 15
107# define VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX (VMX_VMCS_GUEST_CR3_CACHE_IDX + 1)
108#else /* VBOX_WITH_OLD_VTX_CODE */
109# define VMX_VMCS_GUEST_RIP_CACHE_IDX 0
110# define VMX_VMCS_GUEST_RSP_CACHE_IDX 1
111# define VMX_VMCS_GUEST_RFLAGS_CACHE_IDX 2
112# define VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE_CACHE_IDX 3
113# define VMX_VMCS_CTRL_CR0_READ_SHADOW_CACHE_IDX 4
114# define VMX_VMCS_GUEST_CR0_CACHE_IDX 5
115# define VMX_VMCS_CTRL_CR4_READ_SHADOW_CACHE_IDX 6
116# define VMX_VMCS_GUEST_CR4_CACHE_IDX 7
117# define VMX_VMCS_GUEST_DR7_CACHE_IDX 8
118# define VMX_VMCS32_GUEST_SYSENTER_CS_CACHE_IDX 9
119# define VMX_VMCS_GUEST_SYSENTER_EIP_CACHE_IDX 10
120# define VMX_VMCS_GUEST_SYSENTER_ESP_CACHE_IDX 11
121# define VMX_VMCS32_GUEST_GDTR_LIMIT_CACHE_IDX 12
122# define VMX_VMCS_GUEST_GDTR_BASE_CACHE_IDX 13
123# define VMX_VMCS32_GUEST_IDTR_LIMIT_CACHE_IDX 14
124# define VMX_VMCS_GUEST_IDTR_BASE_CACHE_IDX 15
125# define VMX_VMCS16_GUEST_FIELD_CS_CACHE_IDX 16
126# define VMX_VMCS32_GUEST_CS_LIMIT_CACHE_IDX 17
127# define VMX_VMCS_GUEST_CS_BASE_CACHE_IDX 18
128# define VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS_CACHE_IDX 19
129# define VMX_VMCS16_GUEST_FIELD_DS_CACHE_IDX 20
130# define VMX_VMCS32_GUEST_DS_LIMIT_CACHE_IDX 21
131# define VMX_VMCS_GUEST_DS_BASE_CACHE_IDX 22
132# define VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS_CACHE_IDX 23
133# define VMX_VMCS16_GUEST_FIELD_ES_CACHE_IDX 24
134# define VMX_VMCS32_GUEST_ES_LIMIT_CACHE_IDX 25
135# define VMX_VMCS_GUEST_ES_BASE_CACHE_IDX 26
136# define VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS_CACHE_IDX 27
137# define VMX_VMCS16_GUEST_FIELD_FS_CACHE_IDX 28
138# define VMX_VMCS32_GUEST_FS_LIMIT_CACHE_IDX 29
139# define VMX_VMCS_GUEST_FS_BASE_CACHE_IDX 30
140# define VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS_CACHE_IDX 31
141# define VMX_VMCS16_GUEST_FIELD_GS_CACHE_IDX 32
142# define VMX_VMCS32_GUEST_GS_LIMIT_CACHE_IDX 33
143# define VMX_VMCS_GUEST_GS_BASE_CACHE_IDX 34
144# define VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS_CACHE_IDX 35
145# define VMX_VMCS16_GUEST_FIELD_SS_CACHE_IDX 36
146# define VMX_VMCS32_GUEST_SS_LIMIT_CACHE_IDX 37
147# define VMX_VMCS_GUEST_SS_BASE_CACHE_IDX 38
148# define VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS_CACHE_IDX 39
149# define VMX_VMCS16_GUEST_FIELD_TR_CACHE_IDX 40
150# define VMX_VMCS32_GUEST_TR_LIMIT_CACHE_IDX 41
151# define VMX_VMCS_GUEST_TR_BASE_CACHE_IDX 42
152# define VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS_CACHE_IDX 43
153# define VMX_VMCS16_GUEST_FIELD_LDTR_CACHE_IDX 44
154# define VMX_VMCS32_GUEST_LDTR_LIMIT_CACHE_IDX 45
155# define VMX_VMCS_GUEST_LDTR_BASE_CACHE_IDX 46
156# define VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS_CACHE_IDX 47
157# define VMX_VMCS32_RO_EXIT_REASON_CACHE_IDX 48
158# define VMX_VMCS32_RO_VM_INSTR_ERROR_CACHE_IDX 49
159# define VMX_VMCS32_RO_EXIT_INSTR_LENGTH_CACHE_IDX 50
160# define VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE_CACHE_IDX 51
161# define VMX_VMCS32_RO_EXIT_INSTR_INFO_CACHE_IDX 52
162# define VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO_CACHE_IDX 53
163# define VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX 54
164# define VMX_VMCS32_RO_IDT_INFO_CACHE_IDX 55
165# define VMX_VMCS32_RO_IDT_ERROR_CODE_CACHE_IDX 56
166# define VMX_VMCS_MAX_CACHE_IDX (VMX_VMCS32_RO_IDT_ERROR_CODE_CACHE_IDX + 1)
167# define VMX_VMCS_GUEST_CR3_CACHE_IDX 57
168# define VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL_CACHE_IDX 58
169# define VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX (VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL_CACHE_IDX + 1)
170#endif /* VBOX_WITH_OLD_VTX_CODE */
171/** @} */
172
173/** @name VMX EPT paging structures
174 * @{
175 */
176
177/**
178 * Number of page table entries in the EPT. (PDPTE/PDE/PTE)
179 */
180#define EPT_PG_ENTRIES X86_PG_PAE_ENTRIES
181
182/**
183 * EPT Page Directory Pointer Entry. Bit view.
184 * @todo uint64_t isn't safe for bitfields (gcc pedantic warnings, and IIRC,
185 * this did cause trouble with one compiler/version).
186 */
187#pragma pack(1)
188typedef struct EPTPML4EBITS
189{
190 /** Present bit. */
191 uint64_t u1Present : 1;
192 /** Writable bit. */
193 uint64_t u1Write : 1;
194 /** Executable bit. */
195 uint64_t u1Execute : 1;
196 /** Reserved (must be 0). */
197 uint64_t u5Reserved : 5;
198 /** Available for software. */
199 uint64_t u4Available : 4;
200 /** Physical address of the next level (PD). Restricted by maximum physical address width of the cpu. */
201 uint64_t u40PhysAddr : 40;
202 /** Availabe for software. */
203 uint64_t u12Available : 12;
204} EPTPML4EBITS;
205#pragma pack()
206AssertCompileSize(EPTPML4EBITS, 8);
207
208/** Bits 12-51 - - EPT - Physical Page number of the next level. */
209#define EPT_PML4E_PG_MASK X86_PML4E_PG_MASK
210/** The page shift to get the PML4 index. */
211#define EPT_PML4_SHIFT X86_PML4_SHIFT
212/** The PML4 index mask (apply to a shifted page address). */
213#define EPT_PML4_MASK X86_PML4_MASK
214
215/**
216 * EPT PML4E.
217 */
218#pragma pack(1)
219typedef union EPTPML4E
220{
221 /** Normal view. */
222 EPTPML4EBITS n;
223 /** Unsigned integer view. */
224 X86PGPAEUINT u;
225 /** 64 bit unsigned integer view. */
226 uint64_t au64[1];
227 /** 32 bit unsigned integer view. */
228 uint32_t au32[2];
229} EPTPML4E;
230#pragma pack()
231/** Pointer to a PML4 table entry. */
232typedef EPTPML4E *PEPTPML4E;
233/** Pointer to a const PML4 table entry. */
234typedef const EPTPML4E *PCEPTPML4E;
235AssertCompileSize(EPTPML4E, 8);
236
237/**
238 * EPT PML4 Table.
239 */
240#pragma pack(1)
241typedef struct EPTPML4
242{
243 EPTPML4E a[EPT_PG_ENTRIES];
244} EPTPML4;
245#pragma pack()
246/** Pointer to an EPT PML4 Table. */
247typedef EPTPML4 *PEPTPML4;
248/** Pointer to a const EPT PML4 Table. */
249typedef const EPTPML4 *PCEPTPML4;
250
251/**
252 * EPT Page Directory Pointer Entry. Bit view.
253 */
254#pragma pack(1)
255typedef struct EPTPDPTEBITS
256{
257 /** Present bit. */
258 uint64_t u1Present : 1;
259 /** Writable bit. */
260 uint64_t u1Write : 1;
261 /** Executable bit. */
262 uint64_t u1Execute : 1;
263 /** Reserved (must be 0). */
264 uint64_t u5Reserved : 5;
265 /** Available for software. */
266 uint64_t u4Available : 4;
267 /** Physical address of the next level (PD). Restricted by maximum physical address width of the cpu. */
268 uint64_t u40PhysAddr : 40;
269 /** Availabe for software. */
270 uint64_t u12Available : 12;
271} EPTPDPTEBITS;
272#pragma pack()
273AssertCompileSize(EPTPDPTEBITS, 8);
274
275/** Bits 12-51 - - EPT - Physical Page number of the next level. */
276#define EPT_PDPTE_PG_MASK X86_PDPE_PG_MASK
277/** The page shift to get the PDPT index. */
278#define EPT_PDPT_SHIFT X86_PDPT_SHIFT
279/** The PDPT index mask (apply to a shifted page address). */
280#define EPT_PDPT_MASK X86_PDPT_MASK_AMD64
281
282/**
283 * EPT Page Directory Pointer.
284 */
285#pragma pack(1)
286typedef union EPTPDPTE
287{
288 /** Normal view. */
289 EPTPDPTEBITS n;
290 /** Unsigned integer view. */
291 X86PGPAEUINT u;
292 /** 64 bit unsigned integer view. */
293 uint64_t au64[1];
294 /** 32 bit unsigned integer view. */
295 uint32_t au32[2];
296} EPTPDPTE;
297#pragma pack()
298/** Pointer to an EPT Page Directory Pointer Entry. */
299typedef EPTPDPTE *PEPTPDPTE;
300/** Pointer to a const EPT Page Directory Pointer Entry. */
301typedef const EPTPDPTE *PCEPTPDPTE;
302AssertCompileSize(EPTPDPTE, 8);
303
304/**
305 * EPT Page Directory Pointer Table.
306 */
307#pragma pack(1)
308typedef struct EPTPDPT
309{
310 EPTPDPTE a[EPT_PG_ENTRIES];
311} EPTPDPT;
312#pragma pack()
313/** Pointer to an EPT Page Directory Pointer Table. */
314typedef EPTPDPT *PEPTPDPT;
315/** Pointer to a const EPT Page Directory Pointer Table. */
316typedef const EPTPDPT *PCEPTPDPT;
317
318
319/**
320 * EPT Page Directory Table Entry. Bit view.
321 */
322#pragma pack(1)
323typedef struct EPTPDEBITS
324{
325 /** Present bit. */
326 uint64_t u1Present : 1;
327 /** Writable bit. */
328 uint64_t u1Write : 1;
329 /** Executable bit. */
330 uint64_t u1Execute : 1;
331 /** Reserved (must be 0). */
332 uint64_t u4Reserved : 4;
333 /** Big page (must be 0 here). */
334 uint64_t u1Size : 1;
335 /** Available for software. */
336 uint64_t u4Available : 4;
337 /** Physical address of page table. Restricted by maximum physical address width of the cpu. */
338 uint64_t u40PhysAddr : 40;
339 /** Availabe for software. */
340 uint64_t u12Available : 12;
341} EPTPDEBITS;
342#pragma pack()
343AssertCompileSize(EPTPDEBITS, 8);
344
345/** Bits 12-51 - - EPT - Physical Page number of the next level. */
346#define EPT_PDE_PG_MASK X86_PDE_PAE_PG_MASK
347/** The page shift to get the PD index. */
348#define EPT_PD_SHIFT X86_PD_PAE_SHIFT
349/** The PD index mask (apply to a shifted page address). */
350#define EPT_PD_MASK X86_PD_PAE_MASK
351
352/**
353 * EPT 2MB Page Directory Table Entry. Bit view.
354 */
355#pragma pack(1)
356typedef struct EPTPDE2MBITS
357{
358 /** Present bit. */
359 uint64_t u1Present : 1;
360 /** Writable bit. */
361 uint64_t u1Write : 1;
362 /** Executable bit. */
363 uint64_t u1Execute : 1;
364 /** EPT Table Memory Type. MBZ for non-leaf nodes. */
365 uint64_t u3EMT : 3;
366 /** Ignore PAT memory type */
367 uint64_t u1IgnorePAT : 1;
368 /** Big page (must be 1 here). */
369 uint64_t u1Size : 1;
370 /** Available for software. */
371 uint64_t u4Available : 4;
372 /** Reserved (must be 0). */
373 uint64_t u9Reserved : 9;
374 /** Physical address of the 2MB page. Restricted by maximum physical address width of the cpu. */
375 uint64_t u31PhysAddr : 31;
376 /** Availabe for software. */
377 uint64_t u12Available : 12;
378} EPTPDE2MBITS;
379#pragma pack()
380AssertCompileSize(EPTPDE2MBITS, 8);
381
382/** Bits 21-51 - - EPT - Physical Page number of the next level. */
383#define EPT_PDE2M_PG_MASK X86_PDE2M_PAE_PG_MASK
384
385/**
386 * EPT Page Directory Table Entry.
387 */
388#pragma pack(1)
389typedef union EPTPDE
390{
391 /** Normal view. */
392 EPTPDEBITS n;
393 /** 2MB view (big). */
394 EPTPDE2MBITS b;
395 /** Unsigned integer view. */
396 X86PGPAEUINT u;
397 /** 64 bit unsigned integer view. */
398 uint64_t au64[1];
399 /** 32 bit unsigned integer view. */
400 uint32_t au32[2];
401} EPTPDE;
402#pragma pack()
403/** Pointer to an EPT Page Directory Table Entry. */
404typedef EPTPDE *PEPTPDE;
405/** Pointer to a const EPT Page Directory Table Entry. */
406typedef const EPTPDE *PCEPTPDE;
407AssertCompileSize(EPTPDE, 8);
408
409/**
410 * EPT Page Directory Table.
411 */
412#pragma pack(1)
413typedef struct EPTPD
414{
415 EPTPDE a[EPT_PG_ENTRIES];
416} EPTPD;
417#pragma pack()
418/** Pointer to an EPT Page Directory Table. */
419typedef EPTPD *PEPTPD;
420/** Pointer to a const EPT Page Directory Table. */
421typedef const EPTPD *PCEPTPD;
422
423
424/**
425 * EPT Page Table Entry. Bit view.
426 */
427#pragma pack(1)
428typedef struct EPTPTEBITS
429{
430 /** 0 - Present bit.
431 * @remark This is a convenience "misnomer". The bit actually indicates
432 * read access and the CPU will consider an entry with any of the
433 * first three bits set as present. Since all our valid entries
434 * will have this bit set, it can be used as a present indicator
435 * and allow some code sharing. */
436 uint64_t u1Present : 1;
437 /** 1 - Writable bit. */
438 uint64_t u1Write : 1;
439 /** 2 - Executable bit. */
440 uint64_t u1Execute : 1;
441 /** 5:3 - EPT Memory Type. MBZ for non-leaf nodes. */
442 uint64_t u3EMT : 3;
443 /** 6 - Ignore PAT memory type */
444 uint64_t u1IgnorePAT : 1;
445 /** 11:7 - Available for software. */
446 uint64_t u5Available : 5;
447 /** 51:12 - Physical address of page. Restricted by maximum physical
448 * address width of the cpu. */
449 uint64_t u40PhysAddr : 40;
450 /** 63:52 - Available for software. */
451 uint64_t u12Available : 12;
452} EPTPTEBITS;
453#pragma pack()
454AssertCompileSize(EPTPTEBITS, 8);
455
456/** Bits 12-51 - - EPT - Physical Page number of the next level. */
457#define EPT_PTE_PG_MASK X86_PTE_PAE_PG_MASK
458/** The page shift to get the EPT PTE index. */
459#define EPT_PT_SHIFT X86_PT_PAE_SHIFT
460/** The EPT PT index mask (apply to a shifted page address). */
461#define EPT_PT_MASK X86_PT_PAE_MASK
462
463/**
464 * EPT Page Table Entry.
465 */
466#pragma pack(1)
467typedef union EPTPTE
468{
469 /** Normal view. */
470 EPTPTEBITS n;
471 /** Unsigned integer view. */
472 X86PGPAEUINT u;
473 /** 64 bit unsigned integer view. */
474 uint64_t au64[1];
475 /** 32 bit unsigned integer view. */
476 uint32_t au32[2];
477} EPTPTE;
478#pragma pack()
479/** Pointer to an EPT Page Directory Table Entry. */
480typedef EPTPTE *PEPTPTE;
481/** Pointer to a const EPT Page Directory Table Entry. */
482typedef const EPTPTE *PCEPTPTE;
483AssertCompileSize(EPTPTE, 8);
484
485/**
486 * EPT Page Table.
487 */
488#pragma pack(1)
489typedef struct EPTPT
490{
491 EPTPTE a[EPT_PG_ENTRIES];
492} EPTPT;
493#pragma pack()
494/** Pointer to an extended page table. */
495typedef EPTPT *PEPTPT;
496/** Pointer to a const extended table. */
497typedef const EPTPT *PCEPTPT;
498
499/**
500 * VPID flush types.
501 */
502typedef enum
503{
504 /** Invalidate a specific page. */
505 VMX_FLUSH_VPID_INDIV_ADDR = 0,
506 /** Invalidate one context (specific VPID). */
507 VMX_FLUSH_VPID_SINGLE_CONTEXT = 1,
508 /** Invalidate all contexts (all VPIDs). */
509 VMX_FLUSH_VPID_ALL_CONTEXTS = 2,
510 /** Invalidate a single VPID context retaining global mappings. */
511 VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS = 3,
512 /** Unsupported by VirtualBox. */
513 VMX_FLUSH_VPID_NOT_SUPPORTED = 0xbad,
514 /** Unsupported by CPU. */
515 VMX_FLUSH_VPID_NONE = 0xb00,
516 /** 32bit hackishness. */
517 VMX_FLUSH_VPID_32BIT_HACK = 0x7fffffff
518} VMX_FLUSH_VPID;
519
520/**
521 * EPT flush types.
522 */
523typedef enum
524{
525 /** Invalidate one context (specific EPT). */
526 VMX_FLUSH_EPT_SINGLE_CONTEXT = 1,
527 /* Invalidate all contexts (all EPTs) */
528 VMX_FLUSH_EPT_ALL_CONTEXTS = 2,
529 /** Unsupported by VirtualBox. */
530 VMX_FLUSH_EPT_NOT_SUPPORTED = 0xbad,
531 /** Unsupported by CPU. */
532 VMX_FLUSH_EPT_NONE = 0xb00,
533 /** 32bit hackishness. */
534 VMX_FLUSH_EPT_32BIT_HACK = 0x7fffffff
535} VMX_FLUSH_EPT;
536/** @} */
537
538/** @name MSR autoload/store elements
539 * @{
540 */
541#pragma pack(1)
542typedef struct
543{
544 uint32_t u32IndexMSR;
545 uint32_t u32Reserved;
546 uint64_t u64Value;
547} VMXMSR;
548#pragma pack()
549/** Pointer to an MSR load/store element. */
550typedef VMXMSR *PVMXMSR;
551/** Pointer to a const MSR load/store element. */
552typedef const VMXMSR *PCVMXMSR;
553
554/** @} */
555
556
557/** @name VMX-capability qword
558 * @{
559 */
560#pragma pack(1)
561typedef union
562{
563 struct
564 {
565 /** Bits set here -must- be set in the correpsonding VM-execution controls. */
566 uint32_t disallowed0;
567 /** Bits cleared here -must- be cleared in the corresponding VM-execution
568 * controls. */
569 uint32_t allowed1;
570 } n;
571 uint64_t u;
572} VMX_CAPABILITY;
573#pragma pack()
574/** @} */
575
576/** @name VMX EFLAGS reserved bits.
577 * @{
578 */
579/** And-mask for setting reserved bits to zero */
580#define VMX_EFLAGS_RESERVED_0 (~0xffc08028)
581/** Or-mask for setting reserved bits to 1 */
582#define VMX_EFLAGS_RESERVED_1 0x00000002
583/** @} */
584
585/** @name VMX Basic Exit Reasons.
586 * @{
587 */
588/** -1 Invalid exit code */
589#define VMX_EXIT_INVALID -1
590/** 0 Exception or non-maskable interrupt (NMI). */
591#define VMX_EXIT_XCPT_NMI 0
592/** 1 External interrupt. */
593#define VMX_EXIT_EXT_INT 1
594/** 2 Triple fault. */
595#define VMX_EXIT_TRIPLE_FAULT 2
596/** 3 INIT signal. */
597#define VMX_EXIT_INIT_SIGNAL 3
598/** 4 Start-up IPI (SIPI). */
599#define VMX_EXIT_SIPI 4
600/** 5 I/O system-management interrupt (SMI). */
601#define VMX_EXIT_IO_SMI 5
602/** 6 Other SMI. */
603#define VMX_EXIT_SMI 6
604/** 7 Interrupt window exiting. */
605#define VMX_EXIT_INT_WINDOW 7
606/** 8 NMI window exiting. */
607#define VMX_EXIT_NMI_WINDOW 8
608/** 9 Task switch. */
609#define VMX_EXIT_TASK_SWITCH 9
610/** 10 Guest software attempted to execute CPUID. */
611#define VMX_EXIT_CPUID 10
612/** 10 Guest software attempted to execute GETSEC. */
613#define VMX_EXIT_GETSEC 11
614/** 12 Guest software attempted to execute HLT. */
615#define VMX_EXIT_HLT 12
616/** 13 Guest software attempted to execute INVD. */
617#define VMX_EXIT_INVD 13
618/** 14 Guest software attempted to execute INVLPG. */
619#define VMX_EXIT_INVLPG 14
620/** 15 Guest software attempted to execute RDPMC. */
621#define VMX_EXIT_RDPMC 15
622/** 16 Guest software attempted to execute RDTSC. */
623#define VMX_EXIT_RDTSC 16
624/** 17 Guest software attempted to execute RSM in SMM. */
625#define VMX_EXIT_RSM 17
626/** 18 Guest software executed VMCALL. */
627#define VMX_EXIT_VMCALL 18
628/** 19 Guest software executed VMCLEAR. */
629#define VMX_EXIT_VMCLEAR 19
630/** 20 Guest software executed VMLAUNCH. */
631#define VMX_EXIT_VMLAUNCH 20
632/** 21 Guest software executed VMPTRLD. */
633#define VMX_EXIT_VMPTRLD 21
634/** 22 Guest software executed VMPTRST. */
635#define VMX_EXIT_VMPTRST 22
636/** 23 Guest software executed VMREAD. */
637#define VMX_EXIT_VMREAD 23
638/** 24 Guest software executed VMRESUME. */
639#define VMX_EXIT_VMRESUME 24
640/** 25 Guest software executed VMWRITE. */
641#define VMX_EXIT_VMWRITE 25
642/** 26 Guest software executed VMXOFF. */
643#define VMX_EXIT_VMXOFF 26
644/** 27 Guest software executed VMXON. */
645#define VMX_EXIT_VMXON 27
646/** 28 Control-register accesses. */
647#define VMX_EXIT_MOV_CRX 28
648/** 29 Debug-register accesses. */
649#define VMX_EXIT_MOV_DRX 29
650/** 30 I/O instruction. */
651#define VMX_EXIT_IO_INSTR 30
652/** 31 RDMSR. Guest software attempted to execute RDMSR. */
653#define VMX_EXIT_RDMSR 31
654/** 32 WRMSR. Guest software attempted to execute WRMSR. */
655#define VMX_EXIT_WRMSR 32
656/** 33 VM-entry failure due to invalid guest state. */
657#define VMX_EXIT_ERR_INVALID_GUEST_STATE 33
658/** 34 VM-entry failure due to MSR loading. */
659#define VMX_EXIT_ERR_MSR_LOAD 34
660/** 36 Guest software executed MWAIT. */
661#define VMX_EXIT_MWAIT 36
662/** 37 VM exit due to monitor trap flag. */
663#define VMX_EXIT_MTF 37
664/** 39 Guest software attempted to execute MONITOR. */
665#define VMX_EXIT_MONITOR 39
666/** 40 Guest software attempted to execute PAUSE. */
667#define VMX_EXIT_PAUSE 40
668/** 41 VM-entry failure due to machine-check. */
669#define VMX_EXIT_ERR_MACHINE_CHECK 41
670/** 43 TPR below threshold. Guest software executed MOV to CR8. */
671#define VMX_EXIT_TPR_BELOW_THRESHOLD 43
672/** 44 APIC access. Guest software attempted to access memory at a physical address on the APIC-access page. */
673#define VMX_EXIT_APIC_ACCESS 44
674/** 46 Access to GDTR or IDTR. Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT. */
675#define VMX_EXIT_XDTR_ACCESS 46
676/** 47 Access to LDTR or TR. Guest software attempted to execute LLDT, LTR, SLDT, or STR. */
677#define VMX_EXIT_TR_ACCESS 47
678/** 48 EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures. */
679#define VMX_EXIT_EPT_VIOLATION 48
680/** 49 EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry. */
681#define VMX_EXIT_EPT_MISCONFIG 49
682/** 50 INVEPT. Guest software attempted to execute INVEPT. */
683#define VMX_EXIT_INVEPT 50
684/** 51 RDTSCP. Guest software attempted to execute RDTSCP. */
685#define VMX_EXIT_RDTSCP 51
686/** 52 VMX-preemption timer expired. The preemption timer counted down to zero. */
687#define VMX_EXIT_PREEMPT_TIMER 52
688/** 53 INVVPID. Guest software attempted to execute INVVPID. */
689#define VMX_EXIT_INVVPID 53
690/** 54 WBINVD. Guest software attempted to execute WBINVD. */
691#define VMX_EXIT_WBINVD 54
692/** 55 XSETBV. Guest software attempted to execute XSETBV. */
693#define VMX_EXIT_XSETBV 55
694/** 57 RDRAND. Guest software attempted to execute RDRAND. */
695#define VMX_EXIT_RDRAND 57
696/** 58 INVPCID. Guest software attempted to execute INVPCID. */
697#define VMX_EXIT_INVPCID 58
698/** 59 VMFUNC. Guest software attempted to execute VMFUNC. */
699#define VMX_EXIT_VMFUNC 59
700/** The maximum exit value (inclusive). */
701#define VMX_EXIT_MAX (VMX_EXIT_VMFUNC)
702/** @} */
703
704
705/** @name VM Instruction Errors
706 * @{
707 */
708/** 1 VMCALL executed in VMX root operation. */
709#define VMX_ERROR_VMCALL 1
710/** 2 VMCLEAR with invalid physical address. */
711#define VMX_ERROR_VMCLEAR_INVALID_PHYS_ADDR 2
712/** 3 VMCLEAR with VMXON pointer. */
713#define VMX_ERROR_VMCLEAR_INVALID_VMXON_PTR 3
714/** 4 VMLAUNCH with non-clear VMCS. */
715#define VMX_ERROR_VMLAUCH_NON_CLEAR_VMCS 4
716/** 5 VMRESUME with non-launched VMCS. */
717#define VMX_ERROR_VMRESUME_NON_LAUNCHED_VMCS 5
718/** 6 VMRESUME with a corrupted VMCS (indicates corruption of the current VMCS). */
719#define VMX_ERROR_VMRESUME_CORRUPTED_VMCS 6
720/** 7 VM entry with invalid control field(s). */
721#define VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS 7
722/** 8 VM entry with invalid host-state field(s). */
723#define VMX_ERROR_VMENTRY_INVALID_HOST_STATE 8
724/** 9 VMPTRLD with invalid physical address. */
725#define VMX_ERROR_VMPTRLD_INVALID_PHYS_ADDR 9
726/** 10 VMPTRLD with VMXON pointer. */
727#define VMX_ERROR_VMPTRLD_VMXON_PTR 10
728/** 11 VMPTRLD with incorrect VMCS revision identifier. */
729#define VMX_ERROR_VMPTRLD_WRONG_VMCS_REVISION 11
730/** 12 VMREAD/VMWRITE from/to unsupported VMCS component. */
731#define VMX_ERROR_VMREAD_INVALID_COMPONENT 12
732#define VMX_ERROR_VMWRITE_INVALID_COMPONENT VMX_ERROR_VMREAD_INVALID_COMPONENT
733/** 13 VMWRITE to read-only VMCS component. */
734#define VMX_ERROR_VMWRITE_READONLY_COMPONENT 13
735/** 15 VMXON executed in VMX root operation. */
736#define VMX_ERROR_VMXON_IN_VMX_ROOT_OP 15
737/** 16 VM entry with invalid executive-VMCS pointer. */
738#define VMX_ERROR_VMENTRY_INVALID_VMCS_EXEC_PTR 16
739/** 17 VM entry with non-launched executive VMCS. */
740#define VMX_ERROR_VMENTRY_NON_LAUNCHED_EXEC_VMCS 17
741/** 18 VM entry with executive-VMCS pointer not VMXON pointer. */
742#define VMX_ERROR_VMENTRY_EXEC_VMCS_PTR 18
743/** 19 VMCALL with non-clear VMCS. */
744#define VMX_ERROR_VMCALL_NON_CLEAR_VMCS 19
745/** 20 VMCALL with invalid VM-exit control fields. */
746#define VMX_ERROR_VMCALL_INVALID_VMEXIT_FIELDS 20
747/** 22 VMCALL with incorrect MSEG revision identifier. */
748#define VMX_ERROR_VMCALL_INVALID_MSEG_REVISION 22
749/** 23 VMXOFF under dual-monitor treatment of SMIs and SMM. */
750#define VMX_ERROR_VMXOFF_DUAL_MONITOR 23
751/** 24 VMCALL with invalid SMM-monitor features. */
752#define VMX_ERROR_VMCALL_INVALID_SMM_MONITOR 24
753/** 25 VM entry with invalid VM-execution control fields in executive VMCS. */
754#define VMX_ERROR_VMENTRY_INVALID_VM_EXEC_CTRL 25
755/** 26 VM entry with events blocked by MOV SS. */
756#define VMX_ERROR_VMENTRY_MOV_SS 26
757/** 26 Invalid operand to INVEPT/INVVPID. */
758#define VMX_ERROR_INVEPTVPID_INVALID_OPERAND 28
759
760/** @} */
761
762
763/** @name VMX MSRs - Basic VMX information.
764 * @{
765 */
766/** VMCS revision identifier used by the processor. */
767#define MSR_IA32_VMX_BASIC_INFO_VMCS_ID(a) (a & 0x7FFFFFFF)
768/** Size of the VMCS. */
769#define MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(a) (((a) >> 32) & 0xFFF)
770/** Width of physical address used for the VMCS.
771 * 0 -> limited to the available amount of physical ram
772 * 1 -> within the first 4 GB
773 */
774#define MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(a) (((a) >> 48) & 1)
775/** Whether the processor supports the dual-monitor treatment of system-management interrupts and system-management code. (always 1) */
776#define MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(a) (((a) >> 49) & 1)
777/** Memory type that must be used for the VMCS. */
778#define MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(a) (((a) >> 50) & 0xF)
779/** @} */
780
781
782/** @name VMX MSRs - Misc VMX info.
783 * @{
784 */
785/** Relationship between the preemption timer and tsc; count down every time bit x of the tsc changes. */
786#define MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(a) ((a) & 0x1f)
787/** Whether VM-exit stores EFER.LMA into the "IA32e mode guest" field. */
788#define MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(a) (((a) >> 5) & 1)
789/** Activity states supported by the implementation. */
790#define MSR_IA32_VMX_MISC_ACTIVITY_STATES(a) (((a) >> 6) & 0x7)
791/** Number of CR3 target values supported by the processor. (0-256) */
792#define MSR_IA32_VMX_MISC_CR3_TARGET(a) (((a) >> 16) & 0x1FF)
793/** Maximum nr of MSRs in the VMCS. (N+1)*512. */
794#define MSR_IA32_VMX_MISC_MAX_MSR(a) (((((a) >> 25) & 0x7) + 1) * 512)
795/** Whether RDMSR can be used to read IA32_SMBASE_MSR in SMM. */
796#define MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(a) (((a) >> 15) & 1)
797/** Whether bit 2 of IA32_SMM_MONITOR_CTL can be set to 1. */
798#define MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(a) (((a) >> 28) & 1)
799/** Whether VMWRITE can be used to write VM-exit information fields. */
800#define MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(a) (((a) >> 29) & 1)
801/** MSEG revision identifier used by the processor. */
802#define MSR_IA32_VMX_MISC_MSEG_ID(a) ((a) >> 32)
803/** @} */
804
805
806/** @name VMX MSRs - VMCS enumeration field info
807 * @{
808 */
809/** Highest field index. */
810#define MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(a) (((a) >> 1) & 0x1FF)
811/** @} */
812
813
814/** @name MSR_IA32_VMX_EPT_VPID_CAPS; EPT capabilities MSR
815 * @{
816 */
817#define MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY RT_BIT_64(0)
818#define MSR_IA32_VMX_EPT_VPID_CAP_RWX_W_ONLY RT_BIT_64(1)
819#define MSR_IA32_VMX_EPT_VPID_CAP_RWX_WX_ONLY RT_BIT_64(2)
820#define MSR_IA32_VMX_EPT_VPID_CAP_GAW_21_BITS RT_BIT_64(3)
821#define MSR_IA32_VMX_EPT_VPID_CAP_GAW_30_BITS RT_BIT_64(4)
822#define MSR_IA32_VMX_EPT_VPID_CAP_GAW_39_BITS RT_BIT_64(5)
823#define MSR_IA32_VMX_EPT_VPID_CAP_GAW_48_BITS RT_BIT_64(6)
824#define MSR_IA32_VMX_EPT_VPID_CAP_GAW_57_BITS RT_BIT_64(7)
825#define MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC RT_BIT_64(8)
826#define MSR_IA32_VMX_EPT_VPID_CAP_EMT_WC RT_BIT_64(9)
827#define MSR_IA32_VMX_EPT_VPID_CAP_EMT_WT RT_BIT_64(12)
828#define MSR_IA32_VMX_EPT_VPID_CAP_EMT_WP RT_BIT_64(13)
829#define MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB RT_BIT_64(14)
830#define MSR_IA32_VMX_EPT_VPID_CAP_SP_21_BITS RT_BIT_64(16)
831#define MSR_IA32_VMX_EPT_VPID_CAP_SP_30_BITS RT_BIT_64(17)
832#define MSR_IA32_VMX_EPT_VPID_CAP_SP_39_BITS RT_BIT_64(18)
833#define MSR_IA32_VMX_EPT_VPID_CAP_SP_48_BITS RT_BIT_64(19)
834#define MSR_IA32_VMX_EPT_VPID_CAP_INVEPT RT_BIT_64(20)
835#define MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT RT_BIT_64(25)
836#define MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS RT_BIT_64(26)
837#define MSR_IA32_VMX_EPT_VPID_CAP_INVVPID RT_BIT_64(32)
838#define MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR RT_BIT_64(40)
839#define MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT RT_BIT_64(41)
840#define MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS RT_BIT_64(42)
841#define MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS RT_BIT_64(43)
842
843/** @} */
844
845/** @name Extended Page Table Pointer (EPTP)
846 * @{
847 */
848/** Uncachable EPT paging structure memory type. */
849#define VMX_EPT_MEMTYPE_UC 0
850/** Write-back EPT paging structure memory type. */
851#define VMX_EPT_MEMTYPE_WB 6
852/** Shift value to get the EPT page walk length (bits 5-3) */
853#define VMX_EPT_PAGE_WALK_LENGTH_SHIFT 3
854/** Mask value to get the EPT page walk length (bits 5-3) */
855#define VMX_EPT_PAGE_WALK_LENGTH_MASK 7
856/** Default EPT page-walk length (1 less than the actual EPT page-walk
857 * length) */
858#define VMX_EPT_PAGE_WALK_LENGTH_DEFAULT 3
859/** @} */
860
861
862/** @name VMCS field encoding - 16 bits guest fields
863 * @{
864 */
865#define VMX_VMCS16_GUEST_FIELD_VPID 0x0
866#define VMX_VMCS16_GUEST_FIELD_ES 0x800
867#define VMX_VMCS16_GUEST_FIELD_CS 0x802
868#define VMX_VMCS16_GUEST_FIELD_SS 0x804
869#define VMX_VMCS16_GUEST_FIELD_DS 0x806
870#define VMX_VMCS16_GUEST_FIELD_FS 0x808
871#define VMX_VMCS16_GUEST_FIELD_GS 0x80A
872#define VMX_VMCS16_GUEST_FIELD_LDTR 0x80C
873#define VMX_VMCS16_GUEST_FIELD_TR 0x80E
874/** @} */
875
876/** @name VMCS field encoding - 16 bits host fields
877 * @{
878 */
879#define VMX_VMCS16_HOST_FIELD_ES 0xC00
880#define VMX_VMCS16_HOST_FIELD_CS 0xC02
881#define VMX_VMCS16_HOST_FIELD_SS 0xC04
882#define VMX_VMCS16_HOST_FIELD_DS 0xC06
883#define VMX_VMCS16_HOST_FIELD_FS 0xC08
884#define VMX_VMCS16_HOST_FIELD_GS 0xC0A
885#define VMX_VMCS16_HOST_FIELD_TR 0xC0C
886/** @} */
887
888/** @name VMCS field encoding - 64 bits host fields
889 * @{
890 */
891#define VMX_VMCS64_HOST_FIELD_PAT_FULL 0x2C00
892#define VMX_VMCS64_HOST_FIELD_PAT_HIGH 0x2C01
893#define VMX_VMCS64_HOST_FIELD_EFER_FULL 0x2C02
894#define VMX_VMCS64_HOST_FIELD_EFER_HIGH 0x2C03
895#define VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL 0x2C04 /**< MSR IA32_PERF_GLOBAL_CTRL */
896#define VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH 0x2C05 /**< MSR IA32_PERF_GLOBAL_CTRL */
897/** @} */
898
899
900/** @name VMCS field encoding - 64 Bits control fields
901 * @{
902 */
903#define VMX_VMCS64_CTRL_IO_BITMAP_A_FULL 0x2000
904#define VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH 0x2001
905#define VMX_VMCS64_CTRL_IO_BITMAP_B_FULL 0x2002
906#define VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH 0x2003
907
908/* Optional */
909#define VMX_VMCS64_CTRL_MSR_BITMAP_FULL 0x2004
910#define VMX_VMCS64_CTRL_MSR_BITMAP_HIGH 0x2005
911
912#define VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL 0x2006
913#define VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH 0x2007
914#define VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL 0x2008
915#define VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH 0x2009
916
917#define VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL 0x200A
918#define VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH 0x200B
919
920#define VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL 0x200C
921#define VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH 0x200D
922
923#define VMX_VMCS64_CTRL_TSC_OFFSET_FULL 0x2010
924#define VMX_VMCS64_CTRL_TSC_OFFSET_HIGH 0x2011
925
926/** Optional (VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW) */
927#define VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL 0x2012
928#define VMX_VMCS64_CTRL_VAPIC_PAGEADDR_HIGH 0x2013
929
930/** Optional (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) */
931#define VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL 0x2014
932#define VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH 0x2015
933
934/** Optional (VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC) */
935#define VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL 0x2018
936#define VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH 0x2019
937
938/** Extended page table pointer. */
939#define VMX_VMCS64_CTRL_EPTP_FULL 0x201a
940#define VMX_VMCS64_CTRL_EPTP_HIGH 0x201b
941
942/** Extended page table pointer lists. */
943#define VMX_VMCS64_CTRL_EPTP_LIST_FULL 0x2024
944#define VMX_VMCS64_CTRL_EPTP_LIST_HIGH 0x2025
945
946/** VM-exit guest phyiscal address. */
947#define VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL 0x2400
948#define VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_HIGH 0x2401
949/** @} */
950
951
952/** @name VMCS field encoding - 64 Bits guest fields
953 * @{
954 */
955#define VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL 0x2800
956#define VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH 0x2801
957#define VMX_VMCS64_GUEST_DEBUGCTL_FULL 0x2802 /**< MSR IA32_DEBUGCTL */
958#define VMX_VMCS64_GUEST_DEBUGCTL_HIGH 0x2803 /**< MSR IA32_DEBUGCTL */
959#define VMX_VMCS64_GUEST_PAT_FULL 0x2804
960#define VMX_VMCS64_GUEST_PAT_HIGH 0x2805
961#define VMX_VMCS64_GUEST_EFER_FULL 0x2806
962#define VMX_VMCS64_GUEST_EFER_HIGH 0x2807
963#define VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL 0x2808 /**< MSR IA32_PERF_GLOBAL_CTRL */
964#define VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH 0x2809 /**< MSR IA32_PERF_GLOBAL_CTRL */
965#define VMX_VMCS64_GUEST_PDPTE0_FULL 0x280A
966#define VMX_VMCS64_GUEST_PDPTE0_HIGH 0x280B
967#define VMX_VMCS64_GUEST_PDPTE1_FULL 0x280C
968#define VMX_VMCS64_GUEST_PDPTE1_HIGH 0x280D
969#define VMX_VMCS64_GUEST_PDPTE2_FULL 0x280E
970#define VMX_VMCS64_GUEST_PDPTE2_HIGH 0x280F
971#define VMX_VMCS64_GUEST_PDPTE3_FULL 0x2810
972#define VMX_VMCS64_GUEST_PDPTE3_HIGH 0x2811
973/** @} */
974
975
976/** @name VMCS field encoding - 32 Bits control fields
977 * @{
978 */
979#define VMX_VMCS32_CTRL_PIN_EXEC 0x4000
980#define VMX_VMCS32_CTRL_PROC_EXEC 0x4002
981#define VMX_VMCS32_CTRL_EXCEPTION_BITMAP 0x4004
982#define VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK 0x4006
983#define VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH 0x4008
984#define VMX_VMCS32_CTRL_CR3_TARGET_COUNT 0x400A
985#define VMX_VMCS32_CTRL_EXIT 0x400C
986#define VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT 0x400E
987#define VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT 0x4010
988#define VMX_VMCS32_CTRL_ENTRY 0x4012
989#define VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT 0x4014
990#define VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO 0x4016
991#define VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE 0x4018
992#define VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH 0x401A
993#define VMX_VMCS32_CTRL_TPR_THRESHOLD 0x401C
994#define VMX_VMCS32_CTRL_PROC_EXEC2 0x401E
995/** @} */
996
997
998/** @name VMX_VMCS_CTRL_PIN_EXEC
999 * @{
1000 */
1001/** External interrupts cause VM exits if set; otherwise dispatched through the guest's IDT. */
1002#define VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT RT_BIT(0)
1003/** Non-maskable interrupts cause VM exits if set; otherwise dispatched through the guest's IDT. */
1004#define VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT RT_BIT(3)
1005/** Virtual NMIs. */
1006#define VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI RT_BIT(5)
1007/** Activate VMX preemption timer. */
1008#define VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER RT_BIT(6)
1009/* All other bits are reserved and must be set according to MSR IA32_VMX_PROCBASED_CTLS. */
1010/** @} */
1011
1012/** @name VMX_VMCS_CTRL_PROC_EXEC
1013 * @{
1014 */
1015/** VM Exit as soon as RFLAGS.IF=1 and no blocking is active. */
1016#define VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT RT_BIT(2)
1017/** Use timestamp counter offset. */
1018#define VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING RT_BIT(3)
1019/** VM Exit when executing the HLT instruction. */
1020#define VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT RT_BIT(7)
1021/** VM Exit when executing the INVLPG instruction. */
1022#define VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT RT_BIT(9)
1023/** VM Exit when executing the MWAIT instruction. */
1024#define VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT RT_BIT(10)
1025/** VM Exit when executing the RDPMC instruction. */
1026#define VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT RT_BIT(11)
1027/** VM Exit when executing the RDTSC/RDTSCP instruction. */
1028#define VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT RT_BIT(12)
1029/** VM Exit when executing the MOV to CR3 instruction. (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
1030#define VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT RT_BIT(15)
1031/** VM Exit when executing the MOV from CR3 instruction. (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
1032#define VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT RT_BIT(16)
1033/** VM Exit on CR8 loads. */
1034#define VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT RT_BIT(19)
1035/** VM Exit on CR8 stores. */
1036#define VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT RT_BIT(20)
1037/** Use TPR shadow. */
1038#define VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW RT_BIT(21)
1039/** VM Exit when virtual nmi blocking is disabled. */
1040#define VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT RT_BIT(22)
1041/** VM Exit when executing a MOV DRx instruction. */
1042#define VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT RT_BIT(23)
1043/** VM Exit when executing IO instructions. */
1044#define VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT RT_BIT(24)
1045/** Use IO bitmaps. */
1046#define VMX_VMCS_CTRL_PROC_EXEC_USE_IO_BITMAPS RT_BIT(25)
1047/** Monitor trap flag. */
1048#define VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG RT_BIT(27)
1049/** Use MSR bitmaps. */
1050#define VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS RT_BIT(28)
1051/** VM Exit when executing the MONITOR instruction. */
1052#define VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT RT_BIT(29)
1053/** VM Exit when executing the PAUSE instruction. */
1054#define VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT RT_BIT(30)
1055/** Determines whether the secondary processor based VM-execution controls are used. */
1056#define VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL RT_BIT(31)
1057/** @} */
1058
1059/** @name VMX_VMCS_CTRL_PROC_EXEC2
1060 * @{
1061 */
1062/** Virtualize APIC access. */
1063#define VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC RT_BIT(0)
1064/** EPT supported/enabled. */
1065#define VMX_VMCS_CTRL_PROC_EXEC2_EPT RT_BIT(1)
1066/** Descriptor table instructions cause VM-exits. */
1067#define VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT RT_BIT(2)
1068/** RDTSCP supported/enabled. */
1069#define VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP RT_BIT(3)
1070/** Virtualize x2APIC mode. */
1071#define VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC RT_BIT(4)
1072/** VPID supported/enabled. */
1073#define VMX_VMCS_CTRL_PROC_EXEC2_VPID RT_BIT(5)
1074/** VM Exit when executing the WBINVD instruction. */
1075#define VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT RT_BIT(6)
1076/** Unrestricted guest execution. */
1077#define VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST RT_BIT(7)
1078/** A specified nr of pause loops cause a VM-exit. */
1079#define VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT RT_BIT(10)
1080/** VM Exit when executing RDRAND instructions. */
1081#define VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT RT_BIT(11)
1082/** Enables INVPCID instructions. */
1083#define VMX_VMCS_CTRL_PROC_EXEC2_INVPCID RT_BIT(12)
1084/** Enables VMFUNC instructions. */
1085#define VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC RT_BIT(13)
1086/** @} */
1087
1088
1089/** @name VMX_VMCS_CTRL_ENTRY
1090 * @{
1091 */
1092/** Load guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
1093#define VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG RT_BIT(2)
1094/** 64 bits guest mode. Must be 0 for CPUs that don't support AMD64. */
1095#define VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST RT_BIT(9)
1096/** In SMM mode after VM-entry. */
1097#define VMX_VMCS_CTRL_ENTRY_ENTRY_SMM RT_BIT(10)
1098/** Disable dual treatment of SMI and SMM; must be zero for VM-entry outside of SMM. */
1099#define VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON RT_BIT(11)
1100/** This control determines whether the guest IA32_PERF_GLOBAL_CTRL MSR is loaded on VM entry. */
1101#define VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR RT_BIT(13)
1102/** This control determines whether the guest IA32_PAT MSR is loaded on VM entry. */
1103#define VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR RT_BIT(14)
1104/** This control determines whether the guest IA32_EFER MSR is loaded on VM entry. */
1105#define VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR RT_BIT(15)
1106/** @} */
1107
1108
1109/** @name VMX_VMCS_CTRL_EXIT
1110 * @{
1111 */
1112/** Save guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
1113#define VMX_VMCS_CTRL_EXIT_SAVE_DEBUG RT_BIT(2)
1114/** Return to long mode after a VM-exit. */
1115#define VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE RT_BIT(9)
1116/** This control determines whether the IA32_PERF_GLOBAL_CTRL MSR is loaded on VM exit. */
1117#define VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR RT_BIT(12)
1118/** Acknowledge external interrupts with the irq controller if one caused a VM-exit. */
1119#define VMX_VMCS_CTRL_EXIT_ACK_EXT_INT RT_BIT(15)
1120/** This control determines whether the guest IA32_PAT MSR is saved on VM exit. */
1121#define VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR RT_BIT(18)
1122/** This control determines whether the host IA32_PAT MSR is loaded on VM exit. */
1123#define VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR RT_BIT(19)
1124/** This control determines whether the guest IA32_EFER MSR is saved on VM exit. */
1125#define VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR RT_BIT(20)
1126/** This control determines whether the host IA32_EFER MSR is loaded on VM exit. */
1127#define VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR RT_BIT(21)
1128/** This control determines whether the value of the VMX preemption timer is
1129 * saved on every VM exit. */
1130#define VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER RT_BIT(22)
1131/** @} */
1132
1133
1134/** @name VMX_VMCS_CTRL_VMFUNC
1135 * @{
1136 */
1137/** EPTP-switching function changes the value of the EPTP to one chosen from the EPTP list. */
1138#define VMX_VMCS_CTRL_VMFUNC_EPTP_SWITCHING RT_BIT_64(0)
1139/** @} */
1140
1141
1142/** @name VMCS field encoding - 32 Bits read-only fields
1143 * @{
1144 */
1145#define VMX_VMCS32_RO_VM_INSTR_ERROR 0x4400
1146#define VMX_VMCS32_RO_EXIT_REASON 0x4402
1147#define VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO 0x4404
1148#define VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE 0x4406
1149#define VMX_VMCS32_RO_IDT_INFO 0x4408
1150#define VMX_VMCS32_RO_IDT_ERROR_CODE 0x440A
1151#define VMX_VMCS32_RO_EXIT_INSTR_LENGTH 0x440C
1152#define VMX_VMCS32_RO_EXIT_INSTR_INFO 0x440E
1153/** @} */
1154
1155/** @name VMX_VMCS32_RO_EXIT_REASON
1156 * @{
1157 */
1158#define VMX_EXIT_REASON_BASIC(a) (a & 0xffff)
1159/** @} */
1160
1161/** @name VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO
1162 * @{
1163 */
1164#define VMX_ENTRY_INTERRUPTION_INFO_VALID(a) (a & RT_BIT(31))
1165/** @} */
1166
1167
1168/** @name VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO
1169 * @{
1170 */
1171#define VMX_EXIT_INTERRUPTION_INFO_VECTOR(a) (a & 0xff)
1172#define VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT 8
1173#define VMX_EXIT_INTERRUPTION_INFO_TYPE(a) ((a >> VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT) & 7)
1174#define VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID RT_BIT(11)
1175#define VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(a) (a & VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID)
1176#define VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(a) (a & RT_BIT(12))
1177#ifdef VBOX_WITH_OLD_VTX_CODE
1178# define VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT 31
1179#endif
1180#define VMX_EXIT_INTERRUPTION_INFO_VALID RT_BIT(31)
1181#define VMX_EXIT_INTERRUPTION_INFO_IS_VALID(a) (a & RT_BIT(31))
1182/** Construct an irq event injection value from the exit interruption info value (same except that bit 12 is reserved). */
1183#define VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(a) (a & ~RT_BIT(12))
1184/** @} */
1185
1186/** @name VMX_VMCS_RO_EXIT_INTERRUPTION_INFO_TYPE
1187 * @{
1188 */
1189#define VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT 0
1190#define VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI 2
1191#define VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT 3
1192#define VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT 4 /**< int xx */
1193#define VMX_EXIT_INTERRUPTION_INFO_TYPE_DB_XCPT 5 /**< Why are we getting this one?? */
1194#define VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT 6
1195/** @} */
1196
1197/** @name VMX_VMCS32_RO_IDT_VECTORING_INFO
1198 * @{
1199 */
1200#define VMX_IDT_VECTORING_INFO_VECTOR(a) (a & 0xff)
1201#define VMX_IDT_VECTORING_INFO_TYPE_SHIFT 8
1202#define VMX_IDT_VECTORING_INFO_TYPE(a) ((a >> VMX_IDT_VECTORING_INFO_TYPE_SHIFT) & 7)
1203#define VMX_IDT_VECTORING_INFO_ERROR_CODE_VALID RT_BIT(11)
1204#define VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(a) (a & VMX_IDT_VECTORING_INFO_ERROR_CODE_VALID)
1205#define VMX_IDT_VECTORING_INFO_VALID(a) (a & RT_BIT(31))
1206#define VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(a) (a & ~RT_BIT(12))
1207/** @} */
1208
1209/** @name VMX_VMCS_RO_IDT_VECTORING_INFO_TYPE
1210 * @{
1211 */
1212#define VMX_IDT_VECTORING_INFO_TYPE_EXT_INT 0
1213#define VMX_IDT_VECTORING_INFO_TYPE_NMI 2
1214#define VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT 3
1215#define VMX_IDT_VECTORING_INFO_TYPE_SW_INT 4
1216#define VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT 5
1217#define VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT 6
1218/** @} */
1219
1220
1221/** @name VMCS field encoding - 32 Bits guest state fields
1222 * @{
1223 */
1224#define VMX_VMCS32_GUEST_ES_LIMIT 0x4800
1225#define VMX_VMCS32_GUEST_CS_LIMIT 0x4802
1226#define VMX_VMCS32_GUEST_SS_LIMIT 0x4804
1227#define VMX_VMCS32_GUEST_DS_LIMIT 0x4806
1228#define VMX_VMCS32_GUEST_FS_LIMIT 0x4808
1229#define VMX_VMCS32_GUEST_GS_LIMIT 0x480A
1230#define VMX_VMCS32_GUEST_LDTR_LIMIT 0x480C
1231#define VMX_VMCS32_GUEST_TR_LIMIT 0x480E
1232#define VMX_VMCS32_GUEST_GDTR_LIMIT 0x4810
1233#define VMX_VMCS32_GUEST_IDTR_LIMIT 0x4812
1234#define VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS 0x4814
1235#define VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS 0x4816
1236#define VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS 0x4818
1237#define VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS 0x481A
1238#define VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS 0x481C
1239#define VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS 0x481E
1240#define VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS 0x4820
1241#define VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS 0x4822
1242#define VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE 0x4824
1243#define VMX_VMCS32_GUEST_ACTIVITY_STATE 0x4826
1244#define VMX_VMCS32_GUEST_SYSENTER_CS 0x482A /**< MSR IA32_SYSENTER_CS */
1245#define VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE 0x482E
1246/** @} */
1247
1248
1249/** @name VMX_VMCS_GUEST_ACTIVITY_STATE
1250 * @{
1251 */
1252/** The logical processor is active. */
1253#define VMX_VMCS_GUEST_ACTIVITY_ACTIVE 0x0
1254/** The logical processor is inactive, because executed a HLT instruction. */
1255#define VMX_VMCS_GUEST_ACTIVITY_HLT 0x1
1256/** The logical processor is inactive, because of a triple fault or other
1257 * serious error. */
1258#define VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN 0x2
1259/** The logical processor is inactive, because it's waiting for a startup-IPI */
1260#define VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT 0x3
1261/** @} */
1262
1263
1264/** @name VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE
1265 * @{
1266 */
1267#define VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI RT_BIT(0)
1268#define VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS RT_BIT(1)
1269#define VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI RT_BIT(2)
1270#define VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI RT_BIT(3)
1271/** @} */
1272
1273
1274/** @name VMCS field encoding - 32 Bits host state fields
1275 * @{
1276 */
1277#define VMX_VMCS32_HOST_SYSENTER_CS 0x4C00
1278/** @} */
1279
1280/** @name Natural width control fields
1281 * @{
1282 */
1283#define VMX_VMCS_CTRL_CR0_MASK 0x6000
1284#define VMX_VMCS_CTRL_CR4_MASK 0x6002
1285#define VMX_VMCS_CTRL_CR0_READ_SHADOW 0x6004
1286#define VMX_VMCS_CTRL_CR4_READ_SHADOW 0x6006
1287#define VMX_VMCS_CTRL_CR3_TARGET_VAL0 0x6008
1288#define VMX_VMCS_CTRL_CR3_TARGET_VAL1 0x600A
1289#define VMX_VMCS_CTRL_CR3_TARGET_VAL2 0x600C
1290#define VMX_VMCS_CTRL_CR3_TARGET_VAL31 0x600E
1291/** @} */
1292
1293
1294/** @name Natural width read-only data fields
1295 * @{
1296 */
1297#define VMX_VMCS_RO_EXIT_QUALIFICATION 0x6400
1298#define VMX_VMCS_RO_IO_RCX 0x6402
1299#define VMX_VMCS_RO_IO_RSX 0x6404
1300#define VMX_VMCS_RO_IO_RDI 0x6406
1301#define VMX_VMCS_RO_IO_RIP 0x6408
1302#define VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR 0x640A
1303/** @} */
1304
1305
1306/** @name VMX_VMCS_RO_EXIT_QUALIFICATION
1307 * @{
1308 */
1309/** 0-2: Debug register number */
1310#define VMX_EXIT_QUALIFICATION_DRX_REGISTER(a) (a & 7)
1311/** 3: Reserved; cleared to 0. */
1312#define VMX_EXIT_QUALIFICATION_DRX_RES1(a) ((a >> 3) & 1)
1313/** 4: Direction of move (0 = write, 1 = read) */
1314#define VMX_EXIT_QUALIFICATION_DRX_DIRECTION(a) ((a >> 4) & 1)
1315/** 5-7: Reserved; cleared to 0. */
1316#define VMX_EXIT_QUALIFICATION_DRX_RES2(a) ((a >> 5) & 7)
1317/** 8-11: General purpose register number. */
1318#define VMX_EXIT_QUALIFICATION_DRX_GENREG(a) ((a >> 8) & 0xF)
1319/** Rest: reserved. */
1320/** @} */
1321
1322/** @name VMX_EXIT_QUALIFICATION_DRX_DIRECTION values
1323 * @{
1324 */
1325#define VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE 0
1326#define VMX_EXIT_QUALIFICATION_DRX_DIRECTION_READ 1
1327/** @} */
1328
1329
1330
1331/** @name CRx accesses
1332 * @{
1333 */
1334/** 0-3: Control register number (0 for CLTS & LMSW) */
1335#define VMX_EXIT_QUALIFICATION_CRX_REGISTER(a) (a & 0xF)
1336/** 4-5: Access type. */
1337#define VMX_EXIT_QUALIFICATION_CRX_ACCESS(a) ((a >> 4) & 3)
1338/** 6: LMSW operand type */
1339#define VMX_EXIT_QUALIFICATION_CRX_LMSW_OP(a) ((a >> 6) & 1)
1340/** 7: Reserved; cleared to 0. */
1341#define VMX_EXIT_QUALIFICATION_CRX_RES1(a) ((a >> 7) & 1)
1342/** 8-11: General purpose register number (0 for CLTS & LMSW). */
1343#define VMX_EXIT_QUALIFICATION_CRX_GENREG(a) ((a >> 8) & 0xF)
1344/** 12-15: Reserved; cleared to 0. */
1345#define VMX_EXIT_QUALIFICATION_CRX_RES2(a) ((a >> 12) & 0xF)
1346/** 16-31: LMSW source data (else 0). */
1347#define VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(a) ((a >> 16) & 0xFFFF)
1348/** Rest: reserved. */
1349/** @} */
1350
1351/** @name VMX_EXIT_QUALIFICATION_CRX_ACCESS
1352 * @{
1353 */
1354#define VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE 0
1355#define VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ 1
1356#define VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS 2
1357#define VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW 3
1358/** @} */
1359
1360/** @name VMX_EXIT_QUALIFICATION_TASK_SWITCH
1361 * @{
1362 */
1363#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_SELECTOR(a) (a & 0xffff)
1364#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(a) ((a >> 30)& 0x3)
1365/** Task switch caused by a call instruction. */
1366#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_CALL 0
1367/** Task switch caused by an iret instruction. */
1368#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IRET 1
1369/** Task switch caused by a jmp instruction. */
1370#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_JMP 2
1371/** Task switch caused by an interrupt gate. */
1372#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT 3
1373/** @} */
1374
1375
1376/** @name VMX_EXIT_EPT_VIOLATION
1377 * @{
1378 */
1379/** Set if the violation was caused by a data read. */
1380#define VMX_EXIT_QUALIFICATION_EPT_DATA_READ RT_BIT(0)
1381/** Set if the violation was caused by a data write. */
1382#define VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE RT_BIT(1)
1383/** Set if the violation was caused by an insruction fetch. */
1384#define VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH RT_BIT(2)
1385/** AND of the present bit of all EPT structures. */
1386#define VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT RT_BIT(3)
1387/** AND of the write bit of all EPT structures. */
1388#define VMX_EXIT_QUALIFICATION_EPT_ENTRY_WRITE RT_BIT(4)
1389/** AND of the execute bit of all EPT structures. */
1390#define VMX_EXIT_QUALIFICATION_EPT_ENTRY_EXECUTE RT_BIT(5)
1391/** Set if the guest linear address field contains the faulting address. */
1392#define VMX_EXIT_QUALIFICATION_EPT_GUEST_ADDR_VALID RT_BIT(7)
1393/** If bit 7 is one: (reserved otherwise)
1394 * 1 - violation due to physical address access.
1395 * 0 - violation caused by page walk or access/dirty bit updates
1396 */
1397#define VMX_EXIT_QUALIFICATION_EPT_TRANSLATED_ACCESS RT_BIT(8)
1398/** @} */
1399
1400
1401/** @name VMX_EXIT_PORT_IO
1402 * @{
1403 */
1404/** 0-2: IO operation width. */
1405#define VMX_EXIT_QUALIFICATION_IO_WIDTH(a) (a & 7)
1406/** 3: IO operation direction. */
1407#define VMX_EXIT_QUALIFICATION_IO_DIRECTION(a) ((a >> 3) & 1)
1408/** 4: String IO operation. */
1409#define VMX_EXIT_QUALIFICATION_IO_STRING(a) ((a >> 4) & 1)
1410/** 5: Repeated IO operation. */
1411#define VMX_EXIT_QUALIFICATION_IO_REP(a) ((a >> 5) & 1)
1412/** 6: Operand encoding. */
1413#define VMX_EXIT_QUALIFICATION_IO_ENCODING(a) ((a >> 6) & 1)
1414/** 16-31: IO Port (0-0xffff). */
1415#define VMX_EXIT_QUALIFICATION_IO_PORT(a) ((a >> 16) & 0xffff)
1416/* Rest reserved. */
1417/** @} */
1418
1419/** @name VMX_EXIT_QUALIFICATION_IO_DIRECTION
1420 * @{
1421 */
1422#define VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT 0
1423#define VMX_EXIT_QUALIFICATION_IO_DIRECTION_IN 1
1424/** @} */
1425
1426
1427/** @name VMX_EXIT_QUALIFICATION_IO_ENCODING
1428 * @{
1429 */
1430#define VMX_EXIT_QUALIFICATION_IO_ENCODING_DX 0
1431#define VMX_EXIT_QUALIFICATION_IO_ENCODING_IMM 1
1432/** @} */
1433
1434/** @name VMX_EXIT_APIC_ACCESS
1435 * @{
1436 */
1437/** 0-11: If the APIC-access VM exit is due to a linear access, the offset of access within the APIC page. */
1438#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(a) ((a) & 0xfff)
1439/** 12-15: Access type. */
1440#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(a) ((a) & 0xf000)
1441/* Rest reserved. */
1442/** @} */
1443
1444
1445/** @name VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE; access types
1446 * @{
1447 */
1448/** Linear read access. */
1449#define VMX_APIC_ACCESS_TYPE_LINEAR_READ 0
1450/** Linear write access. */
1451#define VMX_APIC_ACCESS_TYPE_LINEAR_WRITE 1
1452/** Linear instruction fetch access. */
1453#define VMX_APIC_ACCESS_TYPE_LINEAR_INSTR_FETCH 2
1454/** Linear read/write access during event delivery. */
1455#define VMX_APIC_ACCESS_TYPE_LINEAR_EVENT_DELIVERY 3
1456/** Physical read/write access during event delivery. */
1457#define VMX_APIC_ACCESS_TYPE_PHYSICAL_EVENT_DELIVERY 10
1458/** Physical access for an instruction fetch or during instruction execution. */
1459#define VMX_APIC_ACCESS_TYPE_PHYSICAL_INSTR 15
1460/** @} */
1461
1462/** @} */
1463
1464/** @name VMCS field encoding - Natural width guest state fields
1465 * @{
1466 */
1467#define VMX_VMCS_GUEST_CR0 0x6800
1468#define VMX_VMCS_GUEST_CR3 0x6802
1469#define VMX_VMCS_GUEST_CR4 0x6804
1470#define VMX_VMCS_GUEST_ES_BASE 0x6806
1471#define VMX_VMCS_GUEST_CS_BASE 0x6808
1472#define VMX_VMCS_GUEST_SS_BASE 0x680A
1473#define VMX_VMCS_GUEST_DS_BASE 0x680C
1474#define VMX_VMCS_GUEST_FS_BASE 0x680E
1475#define VMX_VMCS_GUEST_GS_BASE 0x6810
1476#define VMX_VMCS_GUEST_LDTR_BASE 0x6812
1477#define VMX_VMCS_GUEST_TR_BASE 0x6814
1478#define VMX_VMCS_GUEST_GDTR_BASE 0x6816
1479#define VMX_VMCS_GUEST_IDTR_BASE 0x6818
1480#define VMX_VMCS_GUEST_DR7 0x681A
1481#define VMX_VMCS_GUEST_RSP 0x681C
1482#define VMX_VMCS_GUEST_RIP 0x681E
1483#define VMX_VMCS_GUEST_RFLAGS 0x6820
1484#define VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS 0x6822
1485#define VMX_VMCS_GUEST_SYSENTER_ESP 0x6824 /**< MSR IA32_SYSENTER_ESP */
1486#define VMX_VMCS_GUEST_SYSENTER_EIP 0x6826 /**< MSR IA32_SYSENTER_EIP */
1487/** @} */
1488
1489
1490/** @name VMX_VMCS_GUEST_DEBUG_EXCEPTIONS
1491 * @{
1492 */
1493/** Hardware breakpoint 0 was met. */
1494#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_B0 RT_BIT(0)
1495/** Hardware breakpoint 1 was met. */
1496#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_B1 RT_BIT(1)
1497/** Hardware breakpoint 2 was met. */
1498#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_B2 RT_BIT(2)
1499/** Hardware breakpoint 3 was met. */
1500#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_B3 RT_BIT(3)
1501/** At least one data or IO breakpoint was hit. */
1502#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BREAKPOINT_ENABLED RT_BIT(12)
1503/** A debug exception would have been triggered by single-step execution mode. */
1504#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS RT_BIT(14)
1505/** Bits 4-11, 13 and 15-63 are reserved. */
1506
1507/** @} */
1508
1509/** @name VMCS field encoding - Natural width host state fields
1510 * @{
1511 */
1512#define VMX_VMCS_HOST_CR0 0x6C00
1513#define VMX_VMCS_HOST_CR3 0x6C02
1514#define VMX_VMCS_HOST_CR4 0x6C04
1515#define VMX_VMCS_HOST_FS_BASE 0x6C06
1516#define VMX_VMCS_HOST_GS_BASE 0x6C08
1517#define VMX_VMCS_HOST_TR_BASE 0x6C0A
1518#define VMX_VMCS_HOST_GDTR_BASE 0x6C0C
1519#define VMX_VMCS_HOST_IDTR_BASE 0x6C0E
1520#define VMX_VMCS_HOST_SYSENTER_ESP 0x6C10
1521#define VMX_VMCS_HOST_SYSENTER_EIP 0x6C12
1522#define VMX_VMCS_HOST_RSP 0x6C14
1523#define VMX_VMCS_HOST_RIP 0x6C16
1524/** @} */
1525
1526/** @} */
1527
1528
1529#if RT_INLINE_ASM_GNU_STYLE
1530# define __STR(x) #x
1531# define STR(x) __STR(x)
1532#endif
1533
1534
1535/** @defgroup grp_vmx_asm vmx assembly helpers
1536 * @ingroup grp_vmx
1537 * @{
1538 */
1539
1540/**
1541 * Restores some host-state fields that need not be done on every VM-exit.
1542 *
1543 * @returns VBox status code.
1544 * @param fRestoreHostFlags Flags of which host registers needs to be
1545 * restored.
1546 * @param pRestoreHost Pointer to the host-restore structure.
1547 */
1548DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);
1549
1550
1551/**
1552 * Executes VMXON
1553 *
1554 * @returns VBox status code
1555 * @param pVMXOn Physical address of VMXON structure
1556 */
1557#if RT_INLINE_ASM_EXTERNAL || HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1558DECLASM(int) VMXEnable(RTHCPHYS pVMXOn);
1559#else
1560DECLINLINE(int) VMXEnable(RTHCPHYS pVMXOn)
1561{
1562 int rc = VINF_SUCCESS;
1563# if RT_INLINE_ASM_GNU_STYLE
1564 __asm__ __volatile__ (
1565 "push %3 \n\t"
1566 "push %2 \n\t"
1567 ".byte 0xF3, 0x0F, 0xC7, 0x34, 0x24 # VMXON [esp] \n\t"
1568 "ja 2f \n\t"
1569 "je 1f \n\t"
1570 "movl $"STR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
1571 "jmp 2f \n\t"
1572 "1: \n\t"
1573 "movl $"STR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
1574 "2: \n\t"
1575 "add $8, %%esp \n\t"
1576 :"=rm"(rc)
1577 :"0"(VINF_SUCCESS),
1578 "ir"((uint32_t)pVMXOn), /* don't allow direct memory reference here, */
1579 "ir"((uint32_t)(pVMXOn >> 32)) /* this would not work with -fomit-frame-pointer */
1580 :"memory"
1581 );
1582# else
1583 __asm
1584 {
1585 push dword ptr [pVMXOn+4]
1586 push dword ptr [pVMXOn]
1587 _emit 0xF3
1588 _emit 0x0F
1589 _emit 0xC7
1590 _emit 0x34
1591 _emit 0x24 /* VMXON [esp] */
1592 jnc vmxon_good
1593 mov dword ptr [rc], VERR_VMX_INVALID_VMXON_PTR
1594 jmp the_end
1595
1596vmxon_good:
1597 jnz the_end
1598 mov dword ptr [rc], VERR_VMX_VMXON_FAILED
1599the_end:
1600 add esp, 8
1601 }
1602# endif
1603 return rc;
1604}
1605#endif
1606
1607
1608/**
1609 * Executes VMXOFF
1610 */
1611#if RT_INLINE_ASM_EXTERNAL || HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1612DECLASM(void) VMXDisable(void);
1613#else
1614DECLINLINE(void) VMXDisable(void)
1615{
1616# if RT_INLINE_ASM_GNU_STYLE
1617 __asm__ __volatile__ (
1618 ".byte 0x0F, 0x01, 0xC4 # VMXOFF \n\t"
1619 );
1620# else
1621 __asm
1622 {
1623 _emit 0x0F
1624 _emit 0x01
1625 _emit 0xC4 /* VMXOFF */
1626 }
1627# endif
1628}
1629#endif
1630
1631
1632/**
1633 * Executes VMCLEAR
1634 *
1635 * @returns VBox status code
1636 * @param pVMCS Physical address of VM control structure
1637 */
1638#if RT_INLINE_ASM_EXTERNAL || HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1639DECLASM(int) VMXClearVMCS(RTHCPHYS pVMCS);
1640#else
1641DECLINLINE(int) VMXClearVMCS(RTHCPHYS pVMCS)
1642{
1643 int rc = VINF_SUCCESS;
1644# if RT_INLINE_ASM_GNU_STYLE
1645 __asm__ __volatile__ (
1646 "push %3 \n\t"
1647 "push %2 \n\t"
1648 ".byte 0x66, 0x0F, 0xC7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
1649 "jnc 1f \n\t"
1650 "movl $"STR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1651 "1: \n\t"
1652 "add $8, %%esp \n\t"
1653 :"=rm"(rc)
1654 :"0"(VINF_SUCCESS),
1655 "ir"((uint32_t)pVMCS), /* don't allow direct memory reference here, */
1656 "ir"((uint32_t)(pVMCS >> 32)) /* this would not work with -fomit-frame-pointer */
1657 :"memory"
1658 );
1659# else
1660 __asm
1661 {
1662 push dword ptr [pVMCS+4]
1663 push dword ptr [pVMCS]
1664 _emit 0x66
1665 _emit 0x0F
1666 _emit 0xC7
1667 _emit 0x34
1668 _emit 0x24 /* VMCLEAR [esp] */
1669 jnc success
1670 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
1671success:
1672 add esp, 8
1673 }
1674# endif
1675 return rc;
1676}
1677#endif
1678
1679
1680/**
1681 * Executes VMPTRLD
1682 *
1683 * @returns VBox status code
1684 * @param pVMCS Physical address of VMCS structure
1685 */
1686#if RT_INLINE_ASM_EXTERNAL || HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1687DECLASM(int) VMXActivateVMCS(RTHCPHYS pVMCS);
1688#else
1689DECLINLINE(int) VMXActivateVMCS(RTHCPHYS pVMCS)
1690{
1691 int rc = VINF_SUCCESS;
1692# if RT_INLINE_ASM_GNU_STYLE
1693 __asm__ __volatile__ (
1694 "push %3 \n\t"
1695 "push %2 \n\t"
1696 ".byte 0x0F, 0xC7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
1697 "jnc 1f \n\t"
1698 "movl $"STR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1699 "1: \n\t"
1700 "add $8, %%esp \n\t"
1701 :"=rm"(rc)
1702 :"0"(VINF_SUCCESS),
1703 "ir"((uint32_t)pVMCS), /* don't allow direct memory reference here, */
1704 "ir"((uint32_t)(pVMCS >> 32)) /* this will not work with -fomit-frame-pointer */
1705 );
1706# else
1707 __asm
1708 {
1709 push dword ptr [pVMCS+4]
1710 push dword ptr [pVMCS]
1711 _emit 0x0F
1712 _emit 0xC7
1713 _emit 0x34
1714 _emit 0x24 /* VMPTRLD [esp] */
1715 jnc success
1716 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
1717
1718success:
1719 add esp, 8
1720 }
1721# endif
1722 return rc;
1723}
1724#endif
1725
1726/**
1727 * Executes VMPTRST
1728 *
1729 * @returns VBox status code
1730 * @param pVMCS Address that will receive the current pointer
1731 */
1732DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
1733
1734/**
1735 * Executes VMWRITE
1736 *
1737 * @returns VBox status code
1738 * @param idxField VMCS index
1739 * @param u32Val 32 bits value
1740 */
1741#if RT_INLINE_ASM_EXTERNAL || HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1742DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Val);
1743#else
1744DECLINLINE(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Val)
1745{
1746 int rc = VINF_SUCCESS;
1747# if RT_INLINE_ASM_GNU_STYLE
1748 __asm__ __volatile__ (
1749 ".byte 0x0F, 0x79, 0xC2 # VMWRITE eax, edx \n\t"
1750 "ja 2f \n\t"
1751 "je 1f \n\t"
1752 "movl $"STR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1753 "jmp 2f \n\t"
1754 "1: \n\t"
1755 "movl $"STR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
1756 "2: \n\t"
1757 :"=rm"(rc)
1758 :"0"(VINF_SUCCESS),
1759 "a"(idxField),
1760 "d"(u32Val)
1761 );
1762# else
1763 __asm
1764 {
1765 push dword ptr [u32Val]
1766 mov eax, [idxField]
1767 _emit 0x0F
1768 _emit 0x79
1769 _emit 0x04
1770 _emit 0x24 /* VMWRITE eax, [esp] */
1771 jnc valid_vmcs
1772 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
1773 jmp the_end
1774
1775valid_vmcs:
1776 jnz the_end
1777 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
1778the_end:
1779 add esp, 4
1780 }
1781# endif
1782 return rc;
1783}
1784#endif
1785
1786/**
1787 * Executes VMWRITE
1788 *
1789 * @returns VBox status code
1790 * @param idxField VMCS index
1791 * @param u64Val 16, 32 or 64 bits value
1792 */
1793#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1794DECLASM(int) VMXWriteVmcs64(uint32_t idxField, uint64_t u64Val);
1795#else
1796VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val);
1797
1798#define VMXWriteVmcs64(idxField, u64Val) VMXWriteVmcs64Ex(pVCpu, idxField, u64Val)
1799#endif
1800
1801#ifdef VBOX_WITH_OLD_VTX_CODE
1802# if HC_ARCH_BITS == 64
1803# define VMXWriteVmcs VMXWriteVmcs64
1804# else
1805# define VMXWriteVmcs VMXWriteVmcs32
1806# endif
1807#else /* !VBOX_WITH_OLD_VTX_CODE */
1808# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1809# define VMXWriteVmcsHstN(idxField, uVal) HMVMX_IS_64BIT_HOST_MODE() ? \
1810 VMXWriteVmcs64(idxField, uVal) \
1811 : VMXWriteVmcs32(idxField, uVal)
1812# define VMXWriteVmcsGstN(idxField, u64Val) (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) ? \
1813 VMXWriteVmcs64(idxField, u64Val) \
1814 : VMXWriteVmcs32(idxField, u64Val)
1815# elif HC_ARCH_BITS == 32
1816# define VMXWriteVmcsHstN VMXWriteVmcs32
1817# define VMXWriteVmcsGstN(idxField, u64Val) VMXWriteVmcs64Ex(pVCpu, idxField, u64Val)
1818# else /* HC_ARCH_BITS == 64 */
1819# define VMXWriteVmcsHstN VMXWriteVmcs64
1820# define VMXWriteVmcsGstN VMXWriteVmcs64
1821# endif
1822#endif /* !VBOX_WITH_OLD_VTX_CODE */
1823
1824
1825/**
1826 * Invalidate a page using invept
1827 * @returns VBox status code
1828 * @param enmFlush Type of flush
1829 * @param pDescriptor Descriptor
1830 */
1831DECLASM(int) VMXR0InvEPT(VMX_FLUSH_EPT enmFlush, uint64_t *pDescriptor);
1832
1833/**
1834 * Invalidate a page using invvpid
1835 * @returns VBox status code
1836 * @param enmFlush Type of flush
1837 * @param pDescriptor Descriptor
1838 */
1839DECLASM(int) VMXR0InvVPID(VMX_FLUSH_VPID enmFlush, uint64_t *pDescriptor);
1840
1841/**
1842 * Executes VMREAD
1843 *
1844 * @returns VBox status code
1845 * @param idxField VMCS index
1846 * @param pData Ptr to store VM field value
1847 */
1848#if RT_INLINE_ASM_EXTERNAL || HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1849DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pData);
1850#else
1851DECLINLINE(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pData)
1852{
1853 int rc = VINF_SUCCESS;
1854# if RT_INLINE_ASM_GNU_STYLE
1855 __asm__ __volatile__ (
1856 "movl $"STR(VINF_SUCCESS)", %0 \n\t"
1857 ".byte 0x0F, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1858 "ja 2f \n\t"
1859 "je 1f \n\t"
1860 "movl $"STR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1861 "jmp 2f \n\t"
1862 "1: \n\t"
1863 "movl $"STR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
1864 "2: \n\t"
1865 :"=&r"(rc),
1866 "=d"(*pData)
1867 :"a"(idxField),
1868 "d"(0)
1869 );
1870# else
1871 __asm
1872 {
1873 sub esp, 4
1874 mov dword ptr [esp], 0
1875 mov eax, [idxField]
1876 _emit 0x0F
1877 _emit 0x78
1878 _emit 0x04
1879 _emit 0x24 /* VMREAD eax, [esp] */
1880 mov edx, pData
1881 pop dword ptr [edx]
1882 jnc valid_vmcs
1883 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
1884 jmp the_end
1885
1886valid_vmcs:
1887 jnz the_end
1888 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
1889the_end:
1890 }
1891# endif
1892 return rc;
1893}
1894#endif
1895
1896/**
1897 * Executes VMREAD
1898 *
1899 * @returns VBox status code
1900 * @param idxField VMCS index
1901 * @param pData Ptr to store VM field value
1902 */
1903#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1904DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
1905#else
1906DECLINLINE(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData)
1907{
1908 int rc;
1909
1910 uint32_t val_hi, val;
1911 rc = VMXReadVmcs32(idxField, &val);
1912 rc |= VMXReadVmcs32(idxField + 1, &val_hi);
1913 AssertRC(rc);
1914 *pData = RT_MAKE_U64(val, val_hi);
1915 return rc;
1916}
1917#endif
1918
1919#ifdef VBOX_WITH_OLD_VTX_CODE
1920# if HC_ARCH_BITS == 64
1921# define VMXReadVmcsField VMXReadVmcs64
1922# else
1923# define VMXReadVmcsField VMXReadVmcs32
1924# endif
1925#endif
1926
1927/**
1928 * Gets the last instruction error value from the current VMCS
1929 *
1930 * @returns error value
1931 */
1932DECLINLINE(uint32_t) VMXGetLastError(void)
1933{
1934#if HC_ARCH_BITS == 64
1935 uint64_t uLastError = 0;
1936 int rc = VMXReadVmcs64(VMX_VMCS32_RO_VM_INSTR_ERROR, &uLastError);
1937 AssertRC(rc);
1938 return (uint32_t)uLastError;
1939
1940#else /* 32-bit host: */
1941 uint32_t uLastError = 0;
1942 int rc = VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &uLastError);
1943 AssertRC(rc);
1944 return uLastError;
1945#endif
1946}
1947
1948#ifdef IN_RING0
1949VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);
1950VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys);
1951#endif /* IN_RING0 */
1952
1953/** @} */
1954
1955#endif
1956
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette