VirtualBox

source: vbox/trunk/include/VBox/vmm/hm_svm.h@ 75853

Last change on this file since 75853 was 75611, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 Move the VMX APIC-access guest-physical page registration into IEM and got rid of the CPUM all context code that does not quite fit because we still have to declare the prototypes in the HM headers anyway, so just keep it in HM all context code for now.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 46.9 KB
Line 
1/** @file
2 * HM - SVM (AMD-V) Structures and Definitions. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2017 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_svm_h
27#define ___VBox_vmm_svm_h
28
29#include <VBox/types.h>
30#include <VBox/err.h>
31#include <iprt/assert.h>
32#include <iprt/asm.h>
33
34#ifdef RT_OS_SOLARIS
35# undef ES
36# undef CS
37# undef DS
38# undef SS
39# undef FS
40# undef GS
41#endif
42
43/** @defgroup grp_hm_svm SVM (AMD-V) Types and Definitions
44 * @ingroup grp_hm
45 * @{
46 */
47
48/** @name SVM generic / convenient defines.
49 * @{
50 */
51/** Number of pages required for the VMCB. */
52#define SVM_VMCB_PAGES 1
53/** Number of pages required for the MSR permission bitmap. */
54#define SVM_MSRPM_PAGES 2
55/** Number of pages required for the IO permission bitmap. */
56#define SVM_IOPM_PAGES 3
57/** @} */
58
59/*
60 * Ugly!
61 * When compiling the recompiler, its own svm.h defines clash with
62 * the following defines. Avoid just the duplicates here as we still
63 * require other definitions and structures in this header.
64 */
65#ifndef IN_REM_R3
66/** @name SVM_EXIT_XXX - SVM Basic Exit Reasons.
67 * @{
68 */
69/** Invalid guest state in VMCB. */
70# define SVM_EXIT_INVALID (uint64_t)(-1)
71/** Read from CR0-CR15. */
72# define SVM_EXIT_READ_CR0 0x0
73# define SVM_EXIT_READ_CR1 0x1
74# define SVM_EXIT_READ_CR2 0x2
75# define SVM_EXIT_READ_CR3 0x3
76# define SVM_EXIT_READ_CR4 0x4
77# define SVM_EXIT_READ_CR5 0x5
78# define SVM_EXIT_READ_CR6 0x6
79# define SVM_EXIT_READ_CR7 0x7
80# define SVM_EXIT_READ_CR8 0x8
81# define SVM_EXIT_READ_CR9 0x9
82# define SVM_EXIT_READ_CR10 0xa
83# define SVM_EXIT_READ_CR11 0xb
84# define SVM_EXIT_READ_CR12 0xc
85# define SVM_EXIT_READ_CR13 0xd
86# define SVM_EXIT_READ_CR14 0xe
87# define SVM_EXIT_READ_CR15 0xf
88/** Writes to CR0-CR15. */
89# define SVM_EXIT_WRITE_CR0 0x10
90# define SVM_EXIT_WRITE_CR1 0x11
91# define SVM_EXIT_WRITE_CR2 0x12
92# define SVM_EXIT_WRITE_CR3 0x13
93# define SVM_EXIT_WRITE_CR4 0x14
94# define SVM_EXIT_WRITE_CR5 0x15
95# define SVM_EXIT_WRITE_CR6 0x16
96# define SVM_EXIT_WRITE_CR7 0x17
97# define SVM_EXIT_WRITE_CR8 0x18
98# define SVM_EXIT_WRITE_CR9 0x19
99# define SVM_EXIT_WRITE_CR10 0x1a
100# define SVM_EXIT_WRITE_CR11 0x1b
101# define SVM_EXIT_WRITE_CR12 0x1c
102# define SVM_EXIT_WRITE_CR13 0x1d
103# define SVM_EXIT_WRITE_CR14 0x1e
104# define SVM_EXIT_WRITE_CR15 0x1f
105/** Read from DR0-DR15. */
106# define SVM_EXIT_READ_DR0 0x20
107# define SVM_EXIT_READ_DR1 0x21
108# define SVM_EXIT_READ_DR2 0x22
109# define SVM_EXIT_READ_DR3 0x23
110# define SVM_EXIT_READ_DR4 0x24
111# define SVM_EXIT_READ_DR5 0x25
112# define SVM_EXIT_READ_DR6 0x26
113# define SVM_EXIT_READ_DR7 0x27
114# define SVM_EXIT_READ_DR8 0x28
115# define SVM_EXIT_READ_DR9 0x29
116# define SVM_EXIT_READ_DR10 0x2a
117# define SVM_EXIT_READ_DR11 0x2b
118# define SVM_EXIT_READ_DR12 0x2c
119# define SVM_EXIT_READ_DR13 0x2d
120# define SVM_EXIT_READ_DR14 0x2e
121# define SVM_EXIT_READ_DR15 0x2f
122/** Writes to DR0-DR15. */
123# define SVM_EXIT_WRITE_DR0 0x30
124# define SVM_EXIT_WRITE_DR1 0x31
125# define SVM_EXIT_WRITE_DR2 0x32
126# define SVM_EXIT_WRITE_DR3 0x33
127# define SVM_EXIT_WRITE_DR4 0x34
128# define SVM_EXIT_WRITE_DR5 0x35
129# define SVM_EXIT_WRITE_DR6 0x36
130# define SVM_EXIT_WRITE_DR7 0x37
131# define SVM_EXIT_WRITE_DR8 0x38
132# define SVM_EXIT_WRITE_DR9 0x39
133# define SVM_EXIT_WRITE_DR10 0x3a
134# define SVM_EXIT_WRITE_DR11 0x3b
135# define SVM_EXIT_WRITE_DR12 0x3c
136# define SVM_EXIT_WRITE_DR13 0x3d
137# define SVM_EXIT_WRITE_DR14 0x3e
138# define SVM_EXIT_WRITE_DR15 0x3f
139/* Exception 0-31. */
140# define SVM_EXIT_XCPT_0 0x40
141# define SVM_EXIT_XCPT_1 0x41
142# define SVM_EXIT_XCPT_2 0x42
143# define SVM_EXIT_XCPT_3 0x43
144# define SVM_EXIT_XCPT_4 0x44
145# define SVM_EXIT_XCPT_5 0x45
146# define SVM_EXIT_XCPT_6 0x46
147# define SVM_EXIT_XCPT_7 0x47
148# define SVM_EXIT_XCPT_8 0x48
149# define SVM_EXIT_XCPT_9 0x49
150# define SVM_EXIT_XCPT_10 0x4a
151# define SVM_EXIT_XCPT_11 0x4b
152# define SVM_EXIT_XCPT_12 0x4c
153# define SVM_EXIT_XCPT_13 0x4d
154# define SVM_EXIT_XCPT_14 0x4e
155# define SVM_EXIT_XCPT_15 0x4f
156# define SVM_EXIT_XCPT_16 0x50
157# define SVM_EXIT_XCPT_17 0x51
158# define SVM_EXIT_XCPT_18 0x52
159# define SVM_EXIT_XCPT_19 0x53
160# define SVM_EXIT_XCPT_20 0x54
161# define SVM_EXIT_XCPT_21 0x55
162# define SVM_EXIT_XCPT_22 0x56
163# define SVM_EXIT_XCPT_23 0x57
164# define SVM_EXIT_XCPT_24 0x58
165# define SVM_EXIT_XCPT_25 0x59
166# define SVM_EXIT_XCPT_26 0x5a
167# define SVM_EXIT_XCPT_27 0x5b
168# define SVM_EXIT_XCPT_28 0x5c
169# define SVM_EXIT_XCPT_29 0x5d
170# define SVM_EXIT_XCPT_30 0x5e
171# define SVM_EXIT_XCPT_31 0x5f
172/* Exception (more readable) */
173# define SVM_EXIT_XCPT_DE SVM_EXIT_XCPT_0
174# define SVM_EXIT_XCPT_DB SVM_EXIT_XCPT_1
175# define SVM_EXIT_XCPT_NMI SVM_EXIT_XCPT_2
176# define SVM_EXIT_XCPT_BP SVM_EXIT_XCPT_3
177# define SVM_EXIT_XCPT_OF SVM_EXIT_XCPT_4
178# define SVM_EXIT_XCPT_BR SVM_EXIT_XCPT_5
179# define SVM_EXIT_XCPT_UD SVM_EXIT_XCPT_6
180# define SVM_EXIT_XCPT_NM SVM_EXIT_XCPT_7
181# define SVM_EXIT_XCPT_DF SVM_EXIT_XCPT_8
182# define SVM_EXIT_XCPT_CO_SEG_OVERRUN SVM_EXIT_XCPT_9
183# define SVM_EXIT_XCPT_TS SVM_EXIT_XCPT_10
184# define SVM_EXIT_XCPT_NP SVM_EXIT_XCPT_11
185# define SVM_EXIT_XCPT_SS SVM_EXIT_XCPT_12
186# define SVM_EXIT_XCPT_GP SVM_EXIT_XCPT_13
187# define SVM_EXIT_XCPT_PF SVM_EXIT_XCPT_14
188# define SVM_EXIT_XCPT_MF SVM_EXIT_XCPT_16
189# define SVM_EXIT_XCPT_AC SVM_EXIT_XCPT_17
190# define SVM_EXIT_XCPT_MC SVM_EXIT_XCPT_18
191# define SVM_EXIT_XCPT_XF SVM_EXIT_XCPT_19
192# define SVM_EXIT_XCPT_VE SVM_EXIT_XCPT_20
193# define SVM_EXIT_XCPT_SX SVM_EXIT_XCPT_30
194/** Physical maskable interrupt. */
195# define SVM_EXIT_INTR 0x60
196/** Non-maskable interrupt. */
197# define SVM_EXIT_NMI 0x61
198/** System Management interrupt. */
199# define SVM_EXIT_SMI 0x62
200/** Physical INIT signal. */
201# define SVM_EXIT_INIT 0x63
202/** Virtual interrupt. */
203# define SVM_EXIT_VINTR 0x64
204/** Write to CR0 that changed any bits other than CR0.TS or CR0.MP. */
205# define SVM_EXIT_CR0_SEL_WRITE 0x65
206/** IDTR read. */
207# define SVM_EXIT_IDTR_READ 0x66
208/** GDTR read. */
209# define SVM_EXIT_GDTR_READ 0x67
210/** LDTR read. */
211# define SVM_EXIT_LDTR_READ 0x68
212/** TR read. */
213# define SVM_EXIT_TR_READ 0x69
214/** IDTR write. */
215# define SVM_EXIT_IDTR_WRITE 0x6a
216/** GDTR write. */
217# define SVM_EXIT_GDTR_WRITE 0x6b
218/** LDTR write. */
219# define SVM_EXIT_LDTR_WRITE 0x6c
220/** TR write. */
221# define SVM_EXIT_TR_WRITE 0x6d
222/** RDTSC instruction. */
223# define SVM_EXIT_RDTSC 0x6e
224/** RDPMC instruction. */
225# define SVM_EXIT_RDPMC 0x6f
226/** PUSHF instruction. */
227# define SVM_EXIT_PUSHF 0x70
228/** POPF instruction. */
229# define SVM_EXIT_POPF 0x71
230/** CPUID instruction. */
231# define SVM_EXIT_CPUID 0x72
232/** RSM instruction. */
233# define SVM_EXIT_RSM 0x73
234/** IRET instruction. */
235# define SVM_EXIT_IRET 0x74
236/** software interrupt (INTn instructions). */
237# define SVM_EXIT_SWINT 0x75
238/** INVD instruction. */
239# define SVM_EXIT_INVD 0x76
240/** PAUSE instruction. */
241# define SVM_EXIT_PAUSE 0x77
242/** HLT instruction. */
243# define SVM_EXIT_HLT 0x78
244/** INVLPG instructions. */
245# define SVM_EXIT_INVLPG 0x79
246/** INVLPGA instruction. */
247# define SVM_EXIT_INVLPGA 0x7a
248/** IN or OUT accessing protected port (the EXITINFO1 field provides more information). */
249# define SVM_EXIT_IOIO 0x7b
250/** RDMSR or WRMSR access to protected MSR. */
251# define SVM_EXIT_MSR 0x7c
252/** task switch. */
253# define SVM_EXIT_TASK_SWITCH 0x7d
254/** FP legacy handling enabled, and processor is frozen in an x87/mmx instruction waiting for an interrupt. */
255# define SVM_EXIT_FERR_FREEZE 0x7e
256/** Shutdown. */
257# define SVM_EXIT_SHUTDOWN 0x7f
258/** VMRUN instruction. */
259# define SVM_EXIT_VMRUN 0x80
260/** VMMCALL instruction. */
261# define SVM_EXIT_VMMCALL 0x81
262/** VMLOAD instruction. */
263# define SVM_EXIT_VMLOAD 0x82
264/** VMSAVE instruction. */
265# define SVM_EXIT_VMSAVE 0x83
266/** STGI instruction. */
267# define SVM_EXIT_STGI 0x84
268/** CLGI instruction. */
269# define SVM_EXIT_CLGI 0x85
270/** SKINIT instruction. */
271# define SVM_EXIT_SKINIT 0x86
272/** RDTSCP instruction. */
273# define SVM_EXIT_RDTSCP 0x87
274/** ICEBP instruction. */
275# define SVM_EXIT_ICEBP 0x88
276/** WBINVD instruction. */
277# define SVM_EXIT_WBINVD 0x89
278/** MONITOR instruction. */
279# define SVM_EXIT_MONITOR 0x8a
280/** MWAIT instruction. */
281# define SVM_EXIT_MWAIT 0x8b
282/** MWAIT instruction, when armed. */
283# define SVM_EXIT_MWAIT_ARMED 0x8c
284/** XSETBV instruction. */
285# define SVM_EXIT_XSETBV 0x8d
286/** Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault). */
287# define SVM_EXIT_NPF 0x400
288/** AVIC: Virtual IPI delivery not completed. */
289# define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
290/** AVIC: Attempted access by guest to a vAPIC register not handled by AVIC
291 * hardware. */
292# define SVM_EXIT_AVIC_NOACCEL 0x402
293/** The maximum possible exit value. */
294# define SVM_EXIT_MAX (SVM_EXIT_AVIC_NOACCEL)
295/** @} */
296#endif /* !IN_REM_R3*/
297
298
299/** @name SVMVMCB.u64ExitInfo2 for task switches
300 * @{
301 */
302/** Set to 1 if the task switch was caused by an IRET; else cleared to 0. */
303#define SVM_EXIT2_TASK_SWITCH_IRET RT_BIT_64(36)
304/** Set to 1 if the task switch was caused by a far jump; else cleared to 0. */
305#define SVM_EXIT2_TASK_SWITCH_JUMP RT_BIT_64(38)
306/** Set to 1 if the task switch has an error code; else cleared to 0. */
307#define SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE RT_BIT_64(44)
308/** The value of EFLAGS.RF that would be saved in the outgoing TSS if the task switch were not intercepted. */
309#define SVM_EXIT2_TASK_SWITCH_EFLAGS_RF RT_BIT_64(48)
310/** @} */
311
312/** @name SVMVMCB.u64ExitInfo1 for MSR accesses
313 * @{
314 */
315/** The access was a read MSR. */
316#define SVM_EXIT1_MSR_READ 0x0
317/** The access was a write MSR. */
318#define SVM_EXIT1_MSR_WRITE 0x1
319/** @} */
320
321/** @name SVMVMCB.u64ExitInfo1 for Mov CRx accesses.
322 * @{
323 */
324/** The mask of whether the access was via a Mov CRx instruction. */
325#define SVM_EXIT1_MOV_CRX_MASK RT_BIT_64(63)
326/** The mask for the GPR number of the Mov CRx instruction. */
327#define SVM_EXIT1_MOV_CRX_GPR_NUMBER 0xf
328/** @} */
329
330/** @name SVMVMCB.u64ExitInfo1 for Mov DRx accesses.
331 * @{
332 */
333/** The mask for the GPR number of the Mov DRx instruction. */
334#define SVM_EXIT1_MOV_DRX_GPR_NUMBER 0xf
335/** @} */
336
337/** @name SVMVMCB.ctrl.u64InterceptCtrl
338 * @{
339 */
340/** Intercept INTR (physical maskable interrupt). */
341#define SVM_CTRL_INTERCEPT_INTR RT_BIT_64(0)
342/** Intercept NMI. */
343#define SVM_CTRL_INTERCEPT_NMI RT_BIT_64(1)
344/** Intercept SMI. */
345#define SVM_CTRL_INTERCEPT_SMI RT_BIT_64(2)
346/** Intercept INIT. */
347#define SVM_CTRL_INTERCEPT_INIT RT_BIT_64(3)
348/** Intercept VINTR (virtual maskable interrupt). */
349#define SVM_CTRL_INTERCEPT_VINTR RT_BIT_64(4)
350/** Intercept CR0 writes that change bits other than CR0.TS or CR0.MP */
351#define SVM_CTRL_INTERCEPT_CR0_SEL_WRITE RT_BIT_64(5)
352/** Intercept reads of IDTR. */
353#define SVM_CTRL_INTERCEPT_IDTR_READS RT_BIT_64(6)
354/** Intercept reads of GDTR. */
355#define SVM_CTRL_INTERCEPT_GDTR_READS RT_BIT_64(7)
356/** Intercept reads of LDTR. */
357#define SVM_CTRL_INTERCEPT_LDTR_READS RT_BIT_64(8)
358/** Intercept reads of TR. */
359#define SVM_CTRL_INTERCEPT_TR_READS RT_BIT_64(9)
360/** Intercept writes of IDTR. */
361#define SVM_CTRL_INTERCEPT_IDTR_WRITES RT_BIT_64(10)
362/** Intercept writes of GDTR. */
363#define SVM_CTRL_INTERCEPT_GDTR_WRITES RT_BIT_64(11)
364/** Intercept writes of LDTR. */
365#define SVM_CTRL_INTERCEPT_LDTR_WRITES RT_BIT_64(12)
366/** Intercept writes of TR. */
367#define SVM_CTRL_INTERCEPT_TR_WRITES RT_BIT_64(13)
368/** Intercept RDTSC instruction. */
369#define SVM_CTRL_INTERCEPT_RDTSC RT_BIT_64(14)
370/** Intercept RDPMC instruction. */
371#define SVM_CTRL_INTERCEPT_RDPMC RT_BIT_64(15)
372/** Intercept PUSHF instruction. */
373#define SVM_CTRL_INTERCEPT_PUSHF RT_BIT_64(16)
374/** Intercept POPF instruction. */
375#define SVM_CTRL_INTERCEPT_POPF RT_BIT_64(17)
376/** Intercept CPUID instruction. */
377#define SVM_CTRL_INTERCEPT_CPUID RT_BIT_64(18)
378/** Intercept RSM instruction. */
379#define SVM_CTRL_INTERCEPT_RSM RT_BIT_64(19)
380/** Intercept IRET instruction. */
381#define SVM_CTRL_INTERCEPT_IRET RT_BIT_64(20)
382/** Intercept INTn instruction. */
383#define SVM_CTRL_INTERCEPT_INTN RT_BIT_64(21)
384/** Intercept INVD instruction. */
385#define SVM_CTRL_INTERCEPT_INVD RT_BIT_64(22)
386/** Intercept PAUSE instruction. */
387#define SVM_CTRL_INTERCEPT_PAUSE RT_BIT_64(23)
388/** Intercept HLT instruction. */
389#define SVM_CTRL_INTERCEPT_HLT RT_BIT_64(24)
390/** Intercept INVLPG instruction. */
391#define SVM_CTRL_INTERCEPT_INVLPG RT_BIT_64(25)
392/** Intercept INVLPGA instruction. */
393#define SVM_CTRL_INTERCEPT_INVLPGA RT_BIT_64(26)
394/** IOIO_PROT Intercept IN/OUT accesses to selected ports. */
395#define SVM_CTRL_INTERCEPT_IOIO_PROT RT_BIT_64(27)
396/** MSR_PROT Intercept RDMSR or WRMSR accesses to selected MSRs. */
397#define SVM_CTRL_INTERCEPT_MSR_PROT RT_BIT_64(28)
398/** Intercept task switches. */
399#define SVM_CTRL_INTERCEPT_TASK_SWITCH RT_BIT_64(29)
400/** FERR_FREEZE: intercept processor "freezing" during legacy FERR handling. */
401#define SVM_CTRL_INTERCEPT_FERR_FREEZE RT_BIT_64(30)
402/** Intercept shutdown events. */
403#define SVM_CTRL_INTERCEPT_SHUTDOWN RT_BIT_64(31)
404/** Intercept VMRUN instruction. */
405#define SVM_CTRL_INTERCEPT_VMRUN RT_BIT_64(32 + 0)
406/** Intercept VMMCALL instruction. */
407#define SVM_CTRL_INTERCEPT_VMMCALL RT_BIT_64(32 + 1)
408/** Intercept VMLOAD instruction. */
409#define SVM_CTRL_INTERCEPT_VMLOAD RT_BIT_64(32 + 2)
410/** Intercept VMSAVE instruction. */
411#define SVM_CTRL_INTERCEPT_VMSAVE RT_BIT_64(32 + 3)
412/** Intercept STGI instruction. */
413#define SVM_CTRL_INTERCEPT_STGI RT_BIT_64(32 + 4)
414/** Intercept CLGI instruction. */
415#define SVM_CTRL_INTERCEPT_CLGI RT_BIT_64(32 + 5)
416/** Intercept SKINIT instruction. */
417#define SVM_CTRL_INTERCEPT_SKINIT RT_BIT_64(32 + 6)
418/** Intercept RDTSCP instruction. */
419#define SVM_CTRL_INTERCEPT_RDTSCP RT_BIT_64(32 + 7)
420/** Intercept ICEBP instruction. */
421#define SVM_CTRL_INTERCEPT_ICEBP RT_BIT_64(32 + 8)
422/** Intercept WBINVD instruction. */
423#define SVM_CTRL_INTERCEPT_WBINVD RT_BIT_64(32 + 9)
424/** Intercept MONITOR instruction. */
425#define SVM_CTRL_INTERCEPT_MONITOR RT_BIT_64(32 + 10)
426/** Intercept MWAIT instruction unconditionally. */
427#define SVM_CTRL_INTERCEPT_MWAIT RT_BIT_64(32 + 11)
428/** Intercept MWAIT instruction when armed. */
429#define SVM_CTRL_INTERCEPT_MWAIT_ARMED RT_BIT_64(32 + 12)
430/** Intercept XSETBV instruction. */
431#define SVM_CTRL_INTERCEPT_XSETBV RT_BIT_64(32 + 13)
432/* Bit 14 - Reserved, SBZ. */
433/** Intercept EFER writes after guest instruction finishes. */
434#define SVM_CTRL_INTERCEPT_EFER_WRITES_TRAP RT_BIT_64(32 + 15)
435/** Intercept CR0 writes after guest instruction finishes. */
436#define SVM_CTRL_INTERCEPT_CR0_WRITES_TRAP RT_BIT_64(32 + 16)
437/** Intercept CR0 writes after guest instruction finishes. */
438#define SVM_CTRL_INTERCEPT_CR1_WRITES_TRAP RT_BIT_64(32 + 17)
439/** Intercept CR0 writes after guest instruction finishes. */
440#define SVM_CTRL_INTERCEPT_CR2_WRITES_TRAP RT_BIT_64(32 + 18)
441/** Intercept CR0 writes after guest instruction finishes. */
442#define SVM_CTRL_INTERCEPT_CR3_WRITES_TRAP RT_BIT_64(32 + 19)
443/** Intercept CR0 writes after guest instruction finishes. */
444#define SVM_CTRL_INTERCEPT_CR4_WRITES_TRAP RT_BIT_64(32 + 20)
445/** Intercept CR0 writes after guest instruction finishes. */
446#define SVM_CTRL_INTERCEPT_CR5_WRITES_TRAP RT_BIT_64(32 + 21)
447/** Intercept CR0 writes after guest instruction finishes. */
448#define SVM_CTRL_INTERCEPT_CR6_WRITES_TRAP RT_BIT_64(32 + 22)
449/** Intercept CR0 writes after guest instruction finishes. */
450#define SVM_CTRL_INTERCEPT_CR7_WRITES_TRAP RT_BIT_64(32 + 23)
451/** Intercept CR0 writes after guest instruction finishes. */
452#define SVM_CTRL_INTERCEPT_CR8_WRITES_TRAP RT_BIT_64(32 + 24)
453/** Intercept CR0 writes after guest instruction finishes. */
454#define SVM_CTRL_INTERCEPT_CR9_WRITES_TRAP RT_BIT_64(32 + 25)
455/** Intercept CR0 writes after guest instruction finishes. */
456#define SVM_CTRL_INTERCEPT_CR10_WRITES_TRAP RT_BIT_64(32 + 26)
457/** Intercept CR0 writes after guest instruction finishes. */
458#define SVM_CTRL_INTERCEPT_CR11_WRITES_TRAP RT_BIT_64(32 + 27)
459/** Intercept CR0 writes after guest instruction finishes. */
460#define SVM_CTRL_INTERCEPT_CR12_WRITES_TRAP RT_BIT_64(32 + 28)
461/** Intercept CR0 writes after guest instruction finishes. */
462#define SVM_CTRL_INTERCEPT_CR13_WRITES_TRAP RT_BIT_64(32 + 29)
463/** Intercept CR0 writes after guest instruction finishes. */
464#define SVM_CTRL_INTERCEPT_CR14_WRITES_TRAP RT_BIT_64(32 + 30)
465/** Intercept CR0 writes after guest instruction finishes. */
466#define SVM_CTRL_INTERCEPT_CR15_WRITES_TRAP RT_BIT_64(32 + 31)
467/** @} */
468
469/** @name SVMINTCTRL.u3Type
470 * @{
471 */
472/** External or virtual interrupt. */
473#define SVM_EVENT_EXTERNAL_IRQ 0
474/** Non-maskable interrupt. */
475#define SVM_EVENT_NMI 2
476/** Exception; fault or trap. */
477#define SVM_EVENT_EXCEPTION 3
478/** Software interrupt. */
479#define SVM_EVENT_SOFTWARE_INT 4
480/** @} */
481
482/** @name SVMVMCB.ctrl.TLBCtrl.n.u8TLBFlush
483 * @{
484 */
485/** Flush nothing. */
486#define SVM_TLB_FLUSH_NOTHING 0
487/** Flush entire TLB (host+guest entries) */
488#define SVM_TLB_FLUSH_ENTIRE 1
489/** Flush this guest's TLB entries (by ASID) */
490#define SVM_TLB_FLUSH_SINGLE_CONTEXT 3
491/** Flush this guest's non-global TLB entries (by ASID) */
492#define SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS 7
493/** @} */
494
495/**
496 * SVM selector/segment register type.
497 */
498typedef struct
499{
500 uint16_t u16Sel;
501 uint16_t u16Attr;
502 uint32_t u32Limit;
503 uint64_t u64Base; /**< Only lower 32 bits are implemented for CS, DS, ES & SS. */
504} SVMSELREG;
505AssertCompileSize(SVMSELREG, 16);
506/** Pointer to the SVMSELREG struct. */
507typedef SVMSELREG *PSVMSELREG;
508/** Pointer to a const SVMSELREG struct. */
509typedef const SVMSELREG *PCSVMSELREG;
510
511/**
512 * SVM GDTR/IDTR type.
513 */
514typedef struct
515{
516 uint16_t u16Reserved0;
517 uint16_t u16Reserved1;
518 uint32_t u32Limit; /**< Only lower 16 bits are implemented. */
519 uint64_t u64Base;
520} SVMXDTR;
521AssertCompileSize(SVMXDTR, 16);
522typedef SVMXDTR SVMIDTR;
523typedef SVMXDTR SVMGDTR;
524/** Pointer to the SVMXDTR struct. */
525typedef SVMXDTR *PSVMXDTR;
526/** Pointer to a const SVMXDTR struct. */
527typedef const SVMXDTR *PCSVMXDTR;
528
529/**
530 * SVM Event injection structure (EVENTINJ and EXITINTINFO).
531 */
532typedef union
533{
534 struct
535 {
536 uint32_t u8Vector : 8;
537 uint32_t u3Type : 3;
538 uint32_t u1ErrorCodeValid : 1;
539 uint32_t u19Reserved : 19;
540 uint32_t u1Valid : 1;
541 uint32_t u32ErrorCode : 32;
542 } n;
543 uint64_t u;
544} SVMEVENT;
545/** Pointer to the SVMEVENT union. */
546typedef SVMEVENT *PSVMEVENT;
547/** Pointer to a const SVMEVENT union. */
548typedef const SVMEVENT *PCSVMEVENT;
549
550/** Gets the event type given an SVMEVENT parameter. */
551#define SVM_EVENT_GET_TYPE(a_SvmEvent) (((a_SvmEvent) >> 8) & 7)
552
553/**
554 * SVM Interrupt control structure (Virtual Interrupt Control).
555 */
556typedef union
557{
558 struct
559 {
560 uint32_t u8VTPR : 8; /* V_TPR */
561 uint32_t u1VIrqPending : 1; /* V_IRQ */
562 uint32_t u1VGif : 1; /* VGIF */
563 uint32_t u6Reserved : 6;
564 uint32_t u4VIntrPrio : 4; /* V_INTR_PRIO */
565 uint32_t u1IgnoreTPR : 1; /* V_IGN_TPR */
566 uint32_t u3Reserved : 3;
567 uint32_t u1VIntrMasking : 1; /* V_INTR_MASKING */
568 uint32_t u1VGifEnable : 1; /* VGIF enable */
569 uint32_t u5Reserved : 5;
570 uint32_t u1AvicEnable : 1; /* AVIC enable */
571 uint32_t u8VIntrVector : 8; /* V_INTR_VECTOR */
572 uint32_t u24Reserved : 24;
573 } n;
574 uint64_t u;
575} SVMINTCTRL;
576/** Pointer to an SVMINTCTRL structure. */
577typedef SVMINTCTRL *PSVMINTCTRL;
578/** Pointer to a const SVMINTCTRL structure. */
579typedef const SVMINTCTRL *PCSVMINTCTRL;
580
581/**
582 * SVM TLB control structure.
583 */
584typedef union
585{
586 struct
587 {
588 uint32_t u32ASID : 32;
589 uint32_t u8TLBFlush : 8;
590 uint32_t u24Reserved : 24;
591 } n;
592 uint64_t u;
593} SVMTLBCTRL;
594
595/**
596 * SVM IOIO exit info. structure (EXITINFO1 for IOIO intercepts).
597 */
598typedef union
599{
600 struct
601 {
602 uint32_t u1Type : 1; /**< Bit 0: 0 = out, 1 = in */
603 uint32_t u1Reserved : 1; /**< Bit 1: Reserved */
604 uint32_t u1Str : 1; /**< Bit 2: String I/O (1) or not (0). */
605 uint32_t u1Rep : 1; /**< Bit 3: Repeat prefixed string I/O. */
606 uint32_t u1Op8 : 1; /**< Bit 4: 8-bit operand. */
607 uint32_t u1Op16 : 1; /**< Bit 5: 16-bit operand. */
608 uint32_t u1Op32 : 1; /**< Bit 6: 32-bit operand. */
609 uint32_t u1Addr16 : 1; /**< Bit 7: 16-bit address size. */
610 uint32_t u1Addr32 : 1; /**< Bit 8: 32-bit address size. */
611 uint32_t u1Addr64 : 1; /**< Bit 9: 64-bit address size. */
612 uint32_t u3Seg : 3; /**< Bits 12:10: Effective segment number. Added w/ decode assist in APM v3.17. */
613 uint32_t u3Reserved : 3;
614 uint32_t u16Port : 16; /**< Bits 31:16: Port number. */
615 } n;
616 uint32_t u;
617} SVMIOIOEXITINFO;
618/** Pointer to an SVM IOIO exit info. structure. */
619typedef SVMIOIOEXITINFO *PSVMIOIOEXITINFO;
620/** Pointer to a const SVM IOIO exit info. structure. */
621typedef const SVMIOIOEXITINFO *PCSVMIOIOEXITINFO;
622
623/** 8-bit IO transfer. */
624#define SVM_IOIO_8_BIT_OP RT_BIT_32(4)
625/** 16-bit IO transfer. */
626#define SVM_IOIO_16_BIT_OP RT_BIT_32(5)
627/** 32-bit IO transfer. */
628#define SVM_IOIO_32_BIT_OP RT_BIT_32(6)
629/** Number of bits to shift right to get the operand sizes. */
630#define SVM_IOIO_OP_SIZE_SHIFT 4
631/** Mask of all possible IO transfer sizes. */
632#define SVM_IOIO_OP_SIZE_MASK (SVM_IOIO_8_BIT_OP | SVM_IOIO_16_BIT_OP | SVM_IOIO_32_BIT_OP)
633/** 16-bit address for the IO buffer. */
634#define SVM_IOIO_16_BIT_ADDR RT_BIT_32(7)
635/** 32-bit address for the IO buffer. */
636#define SVM_IOIO_32_BIT_ADDR RT_BIT_32(8)
637/** 64-bit address for the IO buffer. */
638#define SVM_IOIO_64_BIT_ADDR RT_BIT_32(9)
639/** Number of bits to shift right to get the address sizes. */
640#define SVM_IOIO_ADDR_SIZE_SHIFT 7
641/** Mask of all the IO address sizes. */
642#define SVM_IOIO_ADDR_SIZE_MASK (SVM_IOIO_16_BIT_ADDR | SVM_IOIO_32_BIT_ADDR | SVM_IOIO_64_BIT_ADDR)
643/** Number of bits to shift right to get the IO port number. */
644#define SVM_IOIO_PORT_SHIFT 16
645/** IO write. */
646#define SVM_IOIO_WRITE 0
647/** IO read. */
648#define SVM_IOIO_READ 1
649/**
650 * SVM IOIO transfer type.
651 */
652typedef enum
653{
654 SVMIOIOTYPE_OUT = SVM_IOIO_WRITE,
655 SVMIOIOTYPE_IN = SVM_IOIO_READ
656} SVMIOIOTYPE;
657
658/**
659 * SVM AVIC.
660 */
661typedef union
662{
663 struct
664 {
665 uint64_t u12Reserved0 : 12;
666 uint64_t u40Addr : 40;
667 uint64_t u12Reserved1 : 12;
668 } n;
669 uint64_t u;
670} SVMAVIC;
671AssertCompileSize(SVMAVIC, 8);
672
673/**
674 * SVM AVIC PHYSICAL_TABLE pointer.
675 */
676typedef union
677{
678 struct
679 {
680 uint64_t u8LastGuestCoreId : 8;
681 uint64_t u4Reserved : 4;
682 uint64_t u40Addr : 40;
683 uint64_t u12Reserved : 12;
684 } n;
685 uint64_t u;
686} SVMAVICPHYS;
687AssertCompileSize(SVMAVICPHYS, 8);
688
689/**
690 * SVM Nested Paging struct.
691 */
692typedef union
693{
694 struct
695 {
696 uint32_t u1NestedPaging : 1;
697 uint32_t u1Sev : 1;
698 uint32_t u1SevEs : 1;
699 uint32_t u29Reserved : 29;
700 } n;
701 uint64_t u;
702} SVMNP;
703AssertCompileSize(SVMNP, 8);
704
705/**
706 * SVM Interrupt shadow struct.
707 */
708typedef union
709{
710 struct
711 {
712 uint32_t u1IntShadow : 1;
713 uint32_t u1GuestIntMask : 1;
714 uint32_t u30Reserved : 30;
715 } n;
716 uint64_t u;
717} SVMINTSHADOW;
718AssertCompileSize(SVMINTSHADOW, 8);
719
720/**
721 * SVM LBR virtualization struct.
722 */
723typedef union
724{
725 struct
726 {
727 uint32_t u1LbrVirt : 1;
728 uint32_t u1VirtVmsaveVmload : 1;
729 uint32_t u30Reserved : 30;
730 } n;
731 uint64_t u;
732} SVMLBRVIRT;
733AssertCompileSize(SVMLBRVIRT, 8);
734
735/** Maximum number of bytes in the Guest-instruction bytes field. */
736#define SVM_CTRL_GUEST_INSTR_BYTES_MAX 15
737
738/**
739 * SVM VMCB control area.
740 */
741#pragma pack(1)
742typedef struct
743{
744 /** Offset 0x00 - Intercept reads of CR0-CR15. */
745 uint16_t u16InterceptRdCRx;
746 /** Offset 0x02 - Intercept writes to CR0-CR15. */
747 uint16_t u16InterceptWrCRx;
748 /** Offset 0x04 - Intercept reads of DR0-DR15. */
749 uint16_t u16InterceptRdDRx;
750 /** Offset 0x06 - Intercept writes to DR0-DR15. */
751 uint16_t u16InterceptWrDRx;
752 /** Offset 0x08 - Intercept exception vectors 0-31. */
753 uint32_t u32InterceptXcpt;
754 /** Offset 0x0c - Intercept control. */
755 uint64_t u64InterceptCtrl;
756 /** Offset 0x14-0x3f - Reserved. */
757 uint8_t u8Reserved0[0x3c - 0x14];
758 /** Offset 0x3c - PAUSE filter threshold. */
759 uint16_t u16PauseFilterThreshold;
760 /** Offset 0x3e - PAUSE intercept filter count. */
761 uint16_t u16PauseFilterCount;
762 /** Offset 0x40 - Physical address of IOPM. */
763 uint64_t u64IOPMPhysAddr;
764 /** Offset 0x48 - Physical address of MSRPM. */
765 uint64_t u64MSRPMPhysAddr;
766 /** Offset 0x50 - TSC Offset. */
767 uint64_t u64TSCOffset;
768 /** Offset 0x58 - TLB control field. */
769 SVMTLBCTRL TLBCtrl;
770 /** Offset 0x60 - Interrupt control field. */
771 SVMINTCTRL IntCtrl;
772 /** Offset 0x68 - Interrupt shadow. */
773 SVMINTSHADOW IntShadow;
774 /** Offset 0x70 - Exit code. */
775 uint64_t u64ExitCode;
776 /** Offset 0x78 - Exit info 1. */
777 uint64_t u64ExitInfo1;
778 /** Offset 0x80 - Exit info 2. */
779 uint64_t u64ExitInfo2;
780 /** Offset 0x88 - Exit Interrupt info. */
781 SVMEVENT ExitIntInfo;
782 /** Offset 0x90 - Nested Paging control. */
783 SVMNP NestedPagingCtrl;
784 /** Offset 0x98 - AVIC APIC BAR. */
785 SVMAVIC AvicBar;
786 /** Offset 0xa0-0xa7 - Reserved. */
787 uint8_t u8Reserved1[0xa8 - 0xa0];
788 /** Offset 0xa8 - Event injection. */
789 SVMEVENT EventInject;
790 /** Offset 0xb0 - Host CR3 for nested paging. */
791 uint64_t u64NestedPagingCR3;
792 /** Offset 0xb8 - LBR Virtualization. */
793 SVMLBRVIRT LbrVirt;
794 /** Offset 0xc0 - VMCB Clean Bits. */
795 uint32_t u32VmcbCleanBits;
796 uint32_t u32Reserved0;
797 /** Offset 0xc8 - Next sequential instruction pointer. */
798 uint64_t u64NextRIP;
799 /** Offset 0xd0 - Number of bytes fetched. */
800 uint8_t cbInstrFetched;
801 /** Offset 0xd1 - Guest instruction bytes. */
802 uint8_t abInstr[SVM_CTRL_GUEST_INSTR_BYTES_MAX];
803 /** Offset 0xe0 - AVIC APIC_BACKING_PAGE pointer. */
804 SVMAVIC AvicBackingPagePtr;
805 /** Offset 0xe8-0xef - Reserved. */
806 uint8_t u8Reserved2[0xf0 - 0xe8];
807 /** Offset 0xf0 - AVIC LOGICAL_TABLE pointer. */
808 SVMAVIC AvicLogicalTablePtr;
809 /** Offset 0xf8 - AVIC PHYSICAL_TABLE pointer. */
810 SVMAVICPHYS AvicPhysicalTablePtr;
811} SVMVMCBCTRL;
812#pragma pack()
813/** Pointer to the SVMVMCBSTATESAVE structure. */
814typedef SVMVMCBCTRL *PSVMVMCBCTRL;
815/** Pointer to a const SVMVMCBSTATESAVE structure. */
816typedef const SVMVMCBCTRL *PCSVMVMCBCTRL;
817AssertCompileSize(SVMVMCBCTRL, 0x100);
818AssertCompileMemberOffset(SVMVMCBCTRL, u16InterceptRdCRx, 0x00);
819AssertCompileMemberOffset(SVMVMCBCTRL, u16InterceptWrCRx, 0x02);
820AssertCompileMemberOffset(SVMVMCBCTRL, u16InterceptRdDRx, 0x04);
821AssertCompileMemberOffset(SVMVMCBCTRL, u16InterceptWrDRx, 0x06);
822AssertCompileMemberOffset(SVMVMCBCTRL, u32InterceptXcpt, 0x08);
823AssertCompileMemberOffset(SVMVMCBCTRL, u64InterceptCtrl, 0x0c);
824AssertCompileMemberOffset(SVMVMCBCTRL, u8Reserved0, 0x14);
825AssertCompileMemberOffset(SVMVMCBCTRL, u16PauseFilterThreshold, 0x3c);
826AssertCompileMemberOffset(SVMVMCBCTRL, u16PauseFilterCount, 0x3e);
827AssertCompileMemberOffset(SVMVMCBCTRL, u64IOPMPhysAddr, 0x40);
828AssertCompileMemberOffset(SVMVMCBCTRL, u64MSRPMPhysAddr, 0x48);
829AssertCompileMemberOffset(SVMVMCBCTRL, u64TSCOffset, 0x50);
830AssertCompileMemberOffset(SVMVMCBCTRL, TLBCtrl, 0x58);
831AssertCompileMemberOffset(SVMVMCBCTRL, IntCtrl, 0x60);
832AssertCompileMemberOffset(SVMVMCBCTRL, IntShadow, 0x68);
833AssertCompileMemberOffset(SVMVMCBCTRL, u64ExitCode, 0x70);
834AssertCompileMemberOffset(SVMVMCBCTRL, u64ExitInfo1, 0x78);
835AssertCompileMemberOffset(SVMVMCBCTRL, u64ExitInfo2, 0x80);
836AssertCompileMemberOffset(SVMVMCBCTRL, ExitIntInfo, 0x88);
837AssertCompileMemberOffset(SVMVMCBCTRL, NestedPagingCtrl, 0x90);
838AssertCompileMemberOffset(SVMVMCBCTRL, AvicBar, 0x98);
839AssertCompileMemberOffset(SVMVMCBCTRL, u8Reserved1, 0xa0);
840AssertCompileMemberOffset(SVMVMCBCTRL, EventInject, 0xa8);
841AssertCompileMemberOffset(SVMVMCBCTRL, u64NestedPagingCR3, 0xb0);
842AssertCompileMemberOffset(SVMVMCBCTRL, LbrVirt, 0xb8);
843AssertCompileMemberOffset(SVMVMCBCTRL, u32VmcbCleanBits, 0xc0);
844AssertCompileMemberOffset(SVMVMCBCTRL, u64NextRIP, 0xc8);
845AssertCompileMemberOffset(SVMVMCBCTRL, cbInstrFetched, 0xd0);
846AssertCompileMemberOffset(SVMVMCBCTRL, abInstr, 0xd1);
847AssertCompileMemberOffset(SVMVMCBCTRL, AvicBackingPagePtr, 0xe0);
848AssertCompileMemberOffset(SVMVMCBCTRL, u8Reserved2, 0xe8);
849AssertCompileMemberOffset(SVMVMCBCTRL, AvicLogicalTablePtr, 0xf0);
850AssertCompileMemberOffset(SVMVMCBCTRL, AvicPhysicalTablePtr, 0xf8);
851AssertCompileMemberSize(SVMVMCBCTRL, abInstr, 0x0f);
852
853/**
854 * SVM VMCB state save area.
855 */
856#pragma pack(1)
857typedef struct
858{
859 /** Offset 0x400 - Guest ES register + hidden parts. */
860 SVMSELREG ES;
861 /** Offset 0x410 - Guest CS register + hidden parts. */
862 SVMSELREG CS;
863 /** Offset 0x420 - Guest SS register + hidden parts. */
864 SVMSELREG SS;
865 /** Offset 0x430 - Guest DS register + hidden parts. */
866 SVMSELREG DS;
867 /** Offset 0x440 - Guest FS register + hidden parts. */
868 SVMSELREG FS;
869 /** Offset 0x450 - Guest GS register + hidden parts. */
870 SVMSELREG GS;
871 /** Offset 0x460 - Guest GDTR register. */
872 SVMGDTR GDTR;
873 /** Offset 0x470 - Guest LDTR register + hidden parts. */
874 SVMSELREG LDTR;
875 /** Offset 0x480 - Guest IDTR register. */
876 SVMIDTR IDTR;
877 /** Offset 0x490 - Guest TR register + hidden parts. */
878 SVMSELREG TR;
879 /** Offset 0x4A0-0x4CA - Reserved. */
880 uint8_t u8Reserved0[0x4cb - 0x4a0];
881 /** Offset 0x4CB - CPL. */
882 uint8_t u8CPL;
883 /** Offset 0x4CC-0x4CF - Reserved. */
884 uint8_t u8Reserved1[0x4d0 - 0x4cc];
885 /** Offset 0x4D0 - EFER. */
886 uint64_t u64EFER;
887 /** Offset 0x4D8-0x547 - Reserved. */
888 uint8_t u8Reserved2[0x548 - 0x4d8];
889 /** Offset 0x548 - CR4. */
890 uint64_t u64CR4;
891 /** Offset 0x550 - CR3. */
892 uint64_t u64CR3;
893 /** Offset 0x558 - CR0. */
894 uint64_t u64CR0;
895 /** Offset 0x560 - DR7. */
896 uint64_t u64DR7;
897 /** Offset 0x568 - DR6. */
898 uint64_t u64DR6;
899 /** Offset 0x570 - RFLAGS. */
900 uint64_t u64RFlags;
901 /** Offset 0x578 - RIP. */
902 uint64_t u64RIP;
903 /** Offset 0x580-0x5D7 - Reserved. */
904 uint8_t u8Reserved3[0x5d8 - 0x580];
905 /** Offset 0x5D8 - RSP. */
906 uint64_t u64RSP;
907 /** Offset 0x5E0-0x5F7 - Reserved. */
908 uint8_t u8Reserved4[0x5f8 - 0x5e0];
909 /** Offset 0x5F8 - RAX. */
910 uint64_t u64RAX;
911 /** Offset 0x600 - STAR. */
912 uint64_t u64STAR;
913 /** Offset 0x608 - LSTAR. */
914 uint64_t u64LSTAR;
915 /** Offset 0x610 - CSTAR. */
916 uint64_t u64CSTAR;
917 /** Offset 0x618 - SFMASK. */
918 uint64_t u64SFMASK;
919 /** Offset 0x620 - KernelGSBase. */
920 uint64_t u64KernelGSBase;
921 /** Offset 0x628 - SYSENTER_CS. */
922 uint64_t u64SysEnterCS;
923 /** Offset 0x630 - SYSENTER_ESP. */
924 uint64_t u64SysEnterESP;
925 /** Offset 0x638 - SYSENTER_EIP. */
926 uint64_t u64SysEnterEIP;
927 /** Offset 0x640 - CR2. */
928 uint64_t u64CR2;
929 /** Offset 0x648-0x667 - Reserved. */
930 uint8_t u8Reserved5[0x668 - 0x648];
931 /** Offset 0x668 - PAT (Page Attribute Table) MSR. */
932 uint64_t u64PAT;
933 /** Offset 0x670 - DBGCTL. */
934 uint64_t u64DBGCTL;
935 /** Offset 0x678 - BR_FROM. */
936 uint64_t u64BR_FROM;
937 /** Offset 0x680 - BR_TO. */
938 uint64_t u64BR_TO;
939 /** Offset 0x688 - LASTEXCPFROM. */
940 uint64_t u64LASTEXCPFROM;
941 /** Offset 0x690 - LASTEXCPTO. */
942 uint64_t u64LASTEXCPTO;
943} SVMVMCBSTATESAVE;
944#pragma pack()
945/** Pointer to the SVMVMCBSTATESAVE structure. */
946typedef SVMVMCBSTATESAVE *PSVMVMCBSTATESAVE;
947/** Pointer to a const SVMVMCBSTATESAVE structure. */
948typedef const SVMVMCBSTATESAVE *PCSVMVMCBSTATESAVE;
949AssertCompileSize(SVMVMCBSTATESAVE, 0x298);
950AssertCompileMemberOffset(SVMVMCBSTATESAVE, ES, 0x400 - 0x400);
951AssertCompileMemberOffset(SVMVMCBSTATESAVE, CS, 0x410 - 0x400);
952AssertCompileMemberOffset(SVMVMCBSTATESAVE, SS, 0x420 - 0x400);
953AssertCompileMemberOffset(SVMVMCBSTATESAVE, DS, 0x430 - 0x400);
954AssertCompileMemberOffset(SVMVMCBSTATESAVE, FS, 0x440 - 0x400);
955AssertCompileMemberOffset(SVMVMCBSTATESAVE, GS, 0x450 - 0x400);
956AssertCompileMemberOffset(SVMVMCBSTATESAVE, GDTR, 0x460 - 0x400);
957AssertCompileMemberOffset(SVMVMCBSTATESAVE, LDTR, 0x470 - 0x400);
958AssertCompileMemberOffset(SVMVMCBSTATESAVE, IDTR, 0x480 - 0x400);
959AssertCompileMemberOffset(SVMVMCBSTATESAVE, TR, 0x490 - 0x400);
960AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved0, 0x4a0 - 0x400);
961AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8CPL, 0x4cb - 0x400);
962AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved1, 0x4cc - 0x400);
963AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64EFER, 0x4d0 - 0x400);
964AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved2, 0x4d8 - 0x400);
965AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR4, 0x548 - 0x400);
966AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR3, 0x550 - 0x400);
967AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR0, 0x558 - 0x400);
968AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64DR7, 0x560 - 0x400);
969AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64DR6, 0x568 - 0x400);
970AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64RFlags, 0x570 - 0x400);
971AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64RIP, 0x578 - 0x400);
972AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved3, 0x580 - 0x400);
973AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64RSP, 0x5d8 - 0x400);
974AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved4, 0x5e0 - 0x400);
975AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64RAX, 0x5f8 - 0x400);
976AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64STAR, 0x600 - 0x400);
977AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64LSTAR, 0x608 - 0x400);
978AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CSTAR, 0x610 - 0x400);
979AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64SFMASK, 0x618 - 0x400);
980AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64KernelGSBase, 0x620 - 0x400);
981AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64SysEnterCS, 0x628 - 0x400);
982AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64SysEnterESP, 0x630 - 0x400);
983AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64SysEnterEIP, 0x638 - 0x400);
984AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR2, 0x640 - 0x400);
985AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved5, 0x648 - 0x400);
986AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64PAT, 0x668 - 0x400);
987AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64DBGCTL, 0x670 - 0x400);
988AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64BR_FROM, 0x678 - 0x400);
989AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64BR_TO, 0x680 - 0x400);
990AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64LASTEXCPFROM, 0x688 - 0x400);
991AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64LASTEXCPTO, 0x690 - 0x400);
992
993/**
994 * SVM VM Control Block. (VMCB)
995 */
996#pragma pack(1)
997typedef struct SVMVMCB
998{
999 /** Offset 0x00 - Control area. */
1000 SVMVMCBCTRL ctrl;
1001 /** Offset 0x100-0x3FF - Reserved. */
1002 uint8_t u8Reserved0[0x400 - 0x100];
1003 /** Offset 0x400 - State save area. */
1004 SVMVMCBSTATESAVE guest;
1005 /** Offset 0x698-0xFFF- Reserved. */
1006 uint8_t u8Reserved1[0x1000 - 0x698];
1007} SVMVMCB;
1008#pragma pack()
1009/** Pointer to the SVMVMCB structure. */
1010typedef SVMVMCB *PSVMVMCB;
1011/** Pointer to a const SVMVMCB structure. */
1012typedef const SVMVMCB *PCSVMVMCB;
1013AssertCompileMemberOffset(SVMVMCB, ctrl, 0x00);
1014AssertCompileMemberOffset(SVMVMCB, u8Reserved0, 0x100);
1015AssertCompileMemberOffset(SVMVMCB, guest, 0x400);
1016AssertCompileMemberOffset(SVMVMCB, u8Reserved1, 0x698);
1017AssertCompileSize(SVMVMCB, 0x1000);
1018
1019/**
1020 * SVM nested-guest VMCB cache.
1021 *
1022 * Contains VMCB fields from the nested-guest VMCB before they're modified by
1023 * SVM R0 code for hardware-assisted SVM execution of a nested-guest.
1024 *
1025 * A VMCB field needs to be cached when it needs to be modified for execution using
1026 * hardware-assisted SVM and any of the following are true:
1027 * - If the original field needs to be inspected during execution of the
1028 * nested-guest or \#VMEXIT processing.
1029 * - If the field is written back to memory on \#VMEXIT by the physical CPU.
1030 *
1031 * A VMCB field needs to be restored only when the field is written back to
1032 * memory on \#VMEXIT by the physical CPU and thus would be visible to the
1033 * guest.
1034 *
1035 * @remarks Please update hmR3InfoSvmNstGstVmcbCache() when changes are made to
1036 * this structure.
1037 */
1038#pragma pack(1)
1039typedef struct SVMNESTEDVMCBCACHE
1040{
1041 /** Cache of CRX read intercepts. */
1042 uint16_t u16InterceptRdCRx;
1043 /** Cache of CRX write intercepts. */
1044 uint16_t u16InterceptWrCRx;
1045 /** Cache of DRX read intercepts. */
1046 uint16_t u16InterceptRdDRx;
1047 /** Cache of DRX write intercepts. */
1048 uint16_t u16InterceptWrDRx;
1049
1050 /** Cache of the pause-filter threshold. */
1051 uint16_t u16PauseFilterThreshold;
1052 /** Cache of the pause-filter count. */
1053 uint16_t u16PauseFilterCount;
1054
1055 /** Cache of exception intercepts. */
1056 uint32_t u32InterceptXcpt;
1057 /** Cache of control intercepts. */
1058 uint64_t u64InterceptCtrl;
1059
1060 /** Cache of the TSC offset. */
1061 uint64_t u64TSCOffset;
1062
1063 /** Cache of V_INTR_MASKING bit. */
1064 bool fVIntrMasking;
1065 /** Cache of the nested-paging bit. */
1066 bool fNestedPaging;
1067 /** Cache of the LBR virtualization bit. */
1068 bool fLbrVirt;
1069 /** Whether the VMCB is cached by HM. */
1070 bool fCacheValid;
1071 /** Alignment. */
1072 bool afPadding0[4];
1073} SVMNESTEDVMCBCACHE;
1074#pragma pack()
1075/** Pointer to the SVMNESTEDVMCBCACHE structure. */
1076typedef SVMNESTEDVMCBCACHE *PSVMNESTEDVMCBCACHE;
1077/** Pointer to a const SVMNESTEDVMCBCACHE structure. */
1078typedef const SVMNESTEDVMCBCACHE *PCSVMNESTEDVMCBCACHE;
1079AssertCompileSizeAlignment(SVMNESTEDVMCBCACHE, 8);
1080
1081/**
1082 * Segment attribute conversion between CPU and AMD-V VMCB format.
1083 *
1084 * The CPU format of the segment attribute is described in X86DESCATTRBITS
1085 * which is 16-bits (i.e. includes 4 bits of the segment limit).
1086 *
1087 * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly
1088 * only the attribute bits and nothing else). Upper 4-bits are unused.
1089 */
1090#define HMSVM_CPU_2_VMCB_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0xf000) >> 4) )
1091#define HMSVM_VMCB_2_CPU_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0x0f00) << 4) )
1092
1093/** @def HMSVM_SEG_REG_COPY_TO_VMCB
1094 * Copies the specified segment register to a VMCB from a virtual CPU context.
1095 *
1096 * @param a_pCtx The virtual-CPU context.
1097 * @param a_pVmcbStateSave Pointer to the VMCB state-save area.
1098 * @param a_REG The segment register in the VMCB state-save
1099 * struct (ES/CS/SS/DS).
1100 * @param a_reg The segment register in the virtual CPU struct
1101 * (es/cs/ss/ds).
1102 */
1103#define HMSVM_SEG_REG_COPY_TO_VMCB(a_pCtx, a_pVmcbStateSave, a_REG, a_reg) \
1104 do \
1105 { \
1106 Assert((a_pCtx)->a_reg.fFlags & CPUMSELREG_FLAGS_VALID); \
1107 Assert((a_pCtx)->a_reg.ValidSel == (a_pCtx)->a_reg.Sel); \
1108 (a_pVmcbStateSave)->a_REG.u16Sel = (a_pCtx)->a_reg.Sel; \
1109 (a_pVmcbStateSave)->a_REG.u32Limit = (a_pCtx)->a_reg.u32Limit; \
1110 (a_pVmcbStateSave)->a_REG.u64Base = (a_pCtx)->a_reg.u64Base; \
1111 (a_pVmcbStateSave)->a_REG.u16Attr = HMSVM_CPU_2_VMCB_SEG_ATTR((a_pCtx)->a_reg.Attr.u); \
1112 } while (0)
1113
1114/** @def HMSVM_SEG_REG_COPY_TO_VMCB
1115 * Copies the specified segment register from the VMCB to a virtual CPU
1116 * context.
1117 *
1118 * @param a_pCtx The virtual-CPU context.
1119 * @param a_pVmcbStateSave Pointer to the VMCB state-save area.
1120 * @param a_REG The segment register in the VMCB state-save
1121 * struct (ES/CS/SS/DS).
1122 * @param a_reg The segment register in the virtual CPU struct
1123 * (es/ds/ss/ds).
1124 */
1125#define HMSVM_SEG_REG_COPY_FROM_VMCB(a_pCtx, a_pVmcbStateSave, a_REG, a_reg) \
1126 do \
1127 { \
1128 (a_pCtx)->a_reg.Sel = (a_pVmcbStateSave)->a_REG.u16Sel; \
1129 (a_pCtx)->a_reg.ValidSel = (a_pVmcbStateSave)->a_REG.u16Sel; \
1130 (a_pCtx)->a_reg.fFlags = CPUMSELREG_FLAGS_VALID; \
1131 (a_pCtx)->a_reg.u32Limit = (a_pVmcbStateSave)->a_REG.u32Limit; \
1132 (a_pCtx)->a_reg.u64Base = (a_pVmcbStateSave)->a_REG.u64Base; \
1133 (a_pCtx)->a_reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR((a_pVmcbStateSave)->a_REG.u16Attr); \
1134 } while (0)
1135
1136
1137/** @defgroup grp_hm_svm_c SVM C Helpers
1138 *
1139 * These are functions that strictly only implement SVM functionality that is in
1140 * accordance to the SVM spec. and thus fit to use by IEM/REM/HM.
1141 *
1142 * These are not HM all-context API functions, those are to be placed in hm.h.
1143 * @{
1144 */
1145VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
1146VMM_INT_DECL(bool) HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
1147 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
1148 PSVMIOIOEXITINFO pIoExitInfo);
1149/** @} */
1150
1151
1152/** @defgroup grp_hm_svm_hwexec SVM Hardware-assisted execution Helpers
1153 *
1154 * These functions are only here because the inline functions in cpum.h calls them.
1155 * Don't add any more functions here unless there is no other option.
1156 * @{
1157 */
1158VMM_INT_DECL(bool) HMHasGuestSvmVmcbCached(PVMCPU pVCpu);
1159VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, uint64_t fIntercept);
1160VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr);
1161VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr);
1162VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr);
1163VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr);
1164VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, uint8_t uVector);
1165VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu);
1166VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu);
1167VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu);
1168/** @} */
1169
1170
1171/** @} */
1172
1173#endif
1174
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette