VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMAll.cpp@ 79178

Last change on this file since 79178 was 78707, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 Renamed HM_CHANGED_[VMX|SVM]_GUEST_XCPT_INTERCEPTS.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 35.4 KB
Line 
1/* $Id: HMAll.cpp 78707 2019-05-24 04:57:21Z vboxsync $ */
2/** @file
3 * HM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <VBox/vmm/hm.h>
25#include <VBox/vmm/pgm.h>
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/hm_vmx.h>
29#include <VBox/vmm/hm_svm.h>
30#include <iprt/errcore.h>
31#include <VBox/log.h>
32#include <iprt/param.h>
33#include <iprt/assert.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/thread.h>
37#include <iprt/x86.h>
38#include <iprt/asm-amd64-x86.h>
39
40
41/*********************************************************************************************************************************
42* Global Variables *
43*********************************************************************************************************************************/
44#define EXIT_REASON(a_Def, a_Val, a_Str) #a_Def " - " #a_Val " - " a_Str
45#define EXIT_REASON_NIL() NULL
46
47/** Exit reason descriptions for VT-x, used to describe statistics and exit
48 * history. */
49static const char * const g_apszVmxExitReasons[MAX_EXITREASON_STAT] =
50{
51 EXIT_REASON(VMX_EXIT_XCPT_OR_NMI , 0, "Exception or non-maskable interrupt (NMI)."),
52 EXIT_REASON(VMX_EXIT_EXT_INT , 1, "External interrupt."),
53 EXIT_REASON(VMX_EXIT_TRIPLE_FAULT , 2, "Triple fault."),
54 EXIT_REASON(VMX_EXIT_INIT_SIGNAL , 3, "INIT signal."),
55 EXIT_REASON(VMX_EXIT_SIPI , 4, "Start-up IPI (SIPI)."),
56 EXIT_REASON(VMX_EXIT_IO_SMI_IRQ , 5, "I/O system-management interrupt (SMI)."),
57 EXIT_REASON(VMX_EXIT_SMI_IRQ , 6, "Other SMI."),
58 EXIT_REASON(VMX_EXIT_INT_WINDOW , 7, "Interrupt window."),
59 EXIT_REASON(VMX_EXIT_NMI_WINDOW , 8, "NMI window."),
60 EXIT_REASON(VMX_EXIT_TASK_SWITCH , 9, "Task switch."),
61 EXIT_REASON(VMX_EXIT_CPUID , 10, "CPUID instruction."),
62 EXIT_REASON(VMX_EXIT_GETSEC , 11, "GETSEC instrunction."),
63 EXIT_REASON(VMX_EXIT_HLT , 12, "HLT instruction."),
64 EXIT_REASON(VMX_EXIT_INVD , 13, "INVD instruction."),
65 EXIT_REASON(VMX_EXIT_INVLPG , 14, "INVLPG instruction."),
66 EXIT_REASON(VMX_EXIT_RDPMC , 15, "RDPMCinstruction."),
67 EXIT_REASON(VMX_EXIT_RDTSC , 16, "RDTSC instruction."),
68 EXIT_REASON(VMX_EXIT_RSM , 17, "RSM instruction in SMM."),
69 EXIT_REASON(VMX_EXIT_VMCALL , 18, "VMCALL instruction."),
70 EXIT_REASON(VMX_EXIT_VMCLEAR , 19, "VMCLEAR instruction."),
71 EXIT_REASON(VMX_EXIT_VMLAUNCH , 20, "VMLAUNCH instruction."),
72 EXIT_REASON(VMX_EXIT_VMPTRLD , 21, "VMPTRLD instruction."),
73 EXIT_REASON(VMX_EXIT_VMPTRST , 22, "VMPTRST instruction."),
74 EXIT_REASON(VMX_EXIT_VMREAD , 23, "VMREAD instruction."),
75 EXIT_REASON(VMX_EXIT_VMRESUME , 24, "VMRESUME instruction."),
76 EXIT_REASON(VMX_EXIT_VMWRITE , 25, "VMWRITE instruction."),
77 EXIT_REASON(VMX_EXIT_VMXOFF , 26, "VMXOFF instruction."),
78 EXIT_REASON(VMX_EXIT_VMXON , 27, "VMXON instruction."),
79 EXIT_REASON(VMX_EXIT_MOV_CRX , 28, "Control-register accesses."),
80 EXIT_REASON(VMX_EXIT_MOV_DRX , 29, "Debug-register accesses."),
81 EXIT_REASON(VMX_EXIT_PORT_IO , 30, "I/O instruction."),
82 EXIT_REASON(VMX_EXIT_RDMSR , 31, "RDMSR instruction."),
83 EXIT_REASON(VMX_EXIT_WRMSR , 32, "WRMSR instruction."),
84 EXIT_REASON(VMX_EXIT_ERR_INVALID_GUEST_STATE, 33, "VM-entry failure due to invalid guest state."),
85 EXIT_REASON(VMX_EXIT_ERR_MSR_LOAD , 34, "VM-entry failure due to MSR loading."),
86 EXIT_REASON_NIL(),
87 EXIT_REASON(VMX_EXIT_MWAIT , 36, "MWAIT instruction."),
88 EXIT_REASON(VMX_EXIT_MTF , 37, "Monitor Trap Flag."),
89 EXIT_REASON_NIL(),
90 EXIT_REASON(VMX_EXIT_MONITOR , 39, "MONITOR instruction."),
91 EXIT_REASON(VMX_EXIT_PAUSE , 40, "PAUSE instruction."),
92 EXIT_REASON(VMX_EXIT_ERR_MACHINE_CHECK , 41, "VM-entry failure due to machine-check."),
93 EXIT_REASON_NIL(),
94 EXIT_REASON(VMX_EXIT_TPR_BELOW_THRESHOLD , 43, "TPR below threshold (MOV to CR8)."),
95 EXIT_REASON(VMX_EXIT_APIC_ACCESS , 44, "APIC access."),
96 EXIT_REASON(VMX_EXIT_VIRTUALIZED_EOI , 45, "Virtualized EOI."),
97 EXIT_REASON(VMX_EXIT_GDTR_IDTR_ACCESS , 46, "GDTR/IDTR access using LGDT/SGDT/LIDT/SIDT."),
98 EXIT_REASON(VMX_EXIT_LDTR_TR_ACCESS , 47, "LDTR/TR access using LLDT/SLDT/LTR/STR."),
99 EXIT_REASON(VMX_EXIT_EPT_VIOLATION , 48, "EPT violation."),
100 EXIT_REASON(VMX_EXIT_EPT_MISCONFIG , 49, "EPT misconfiguration."),
101 EXIT_REASON(VMX_EXIT_INVEPT , 50, "INVEPT instruction."),
102 EXIT_REASON(VMX_EXIT_RDTSCP , 51, "RDTSCP instruction."),
103 EXIT_REASON(VMX_EXIT_PREEMPT_TIMER , 52, "VMX-preemption timer expired."),
104 EXIT_REASON(VMX_EXIT_INVVPID , 53, "INVVPID instruction."),
105 EXIT_REASON(VMX_EXIT_WBINVD , 54, "WBINVD instruction."),
106 EXIT_REASON(VMX_EXIT_XSETBV , 55, "XSETBV instruction."),
107 EXIT_REASON(VMX_EXIT_APIC_WRITE , 56, "APIC write completed to virtual-APIC page."),
108 EXIT_REASON(VMX_EXIT_RDRAND , 57, "RDRAND instruction."),
109 EXIT_REASON(VMX_EXIT_INVPCID , 58, "INVPCID instruction."),
110 EXIT_REASON(VMX_EXIT_VMFUNC , 59, "VMFUNC instruction."),
111 EXIT_REASON(VMX_EXIT_ENCLS , 60, "ENCLS instruction."),
112 EXIT_REASON(VMX_EXIT_RDSEED , 61, "RDSEED instruction."),
113 EXIT_REASON(VMX_EXIT_PML_FULL , 62, "Page-modification log full."),
114 EXIT_REASON(VMX_EXIT_XSAVES , 63, "XSAVES instruction."),
115 EXIT_REASON(VMX_EXIT_XRSTORS , 64, "XRSTORS instruction."),
116 EXIT_REASON_NIL(),
117 EXIT_REASON(VMX_EXIT_SPP_EVENT , 66, "SPP-related event."),
118 EXIT_REASON(VMX_EXIT_UMWAIT , 67, "UMWAIT instruction."),
119 EXIT_REASON(VMX_EXIT_TPAUSE , 68, "TPAUSE instruction.")
120};
121/** Array index of the last valid VT-x exit reason. */
122#define MAX_EXITREASON_VTX 68
123
124/** A partial list of \#EXIT reason descriptions for AMD-V, used to describe
125 * statistics and exit history.
126 *
127 * @note AMD-V have annoyingly large gaps (e.g. \#NPF VMEXIT comes at 1024),
128 * this array doesn't contain the entire set of exit reasons, we
129 * handle them via hmSvmGetSpecialExitReasonDesc(). */
130static const char * const g_apszSvmExitReasons[MAX_EXITREASON_STAT] =
131{
132 EXIT_REASON(SVM_EXIT_READ_CR0 , 0, "Read CR0."),
133 EXIT_REASON(SVM_EXIT_READ_CR1 , 1, "Read CR1."),
134 EXIT_REASON(SVM_EXIT_READ_CR2 , 2, "Read CR2."),
135 EXIT_REASON(SVM_EXIT_READ_CR3 , 3, "Read CR3."),
136 EXIT_REASON(SVM_EXIT_READ_CR4 , 4, "Read CR4."),
137 EXIT_REASON(SVM_EXIT_READ_CR5 , 5, "Read CR5."),
138 EXIT_REASON(SVM_EXIT_READ_CR6 , 6, "Read CR6."),
139 EXIT_REASON(SVM_EXIT_READ_CR7 , 7, "Read CR7."),
140 EXIT_REASON(SVM_EXIT_READ_CR8 , 8, "Read CR8."),
141 EXIT_REASON(SVM_EXIT_READ_CR9 , 9, "Read CR9."),
142 EXIT_REASON(SVM_EXIT_READ_CR10 , 10, "Read CR10."),
143 EXIT_REASON(SVM_EXIT_READ_CR11 , 11, "Read CR11."),
144 EXIT_REASON(SVM_EXIT_READ_CR12 , 12, "Read CR12."),
145 EXIT_REASON(SVM_EXIT_READ_CR13 , 13, "Read CR13."),
146 EXIT_REASON(SVM_EXIT_READ_CR14 , 14, "Read CR14."),
147 EXIT_REASON(SVM_EXIT_READ_CR15 , 15, "Read CR15."),
148 EXIT_REASON(SVM_EXIT_WRITE_CR0 , 16, "Write CR0."),
149 EXIT_REASON(SVM_EXIT_WRITE_CR1 , 17, "Write CR1."),
150 EXIT_REASON(SVM_EXIT_WRITE_CR2 , 18, "Write CR2."),
151 EXIT_REASON(SVM_EXIT_WRITE_CR3 , 19, "Write CR3."),
152 EXIT_REASON(SVM_EXIT_WRITE_CR4 , 20, "Write CR4."),
153 EXIT_REASON(SVM_EXIT_WRITE_CR5 , 21, "Write CR5."),
154 EXIT_REASON(SVM_EXIT_WRITE_CR6 , 22, "Write CR6."),
155 EXIT_REASON(SVM_EXIT_WRITE_CR7 , 23, "Write CR7."),
156 EXIT_REASON(SVM_EXIT_WRITE_CR8 , 24, "Write CR8."),
157 EXIT_REASON(SVM_EXIT_WRITE_CR9 , 25, "Write CR9."),
158 EXIT_REASON(SVM_EXIT_WRITE_CR10 , 26, "Write CR10."),
159 EXIT_REASON(SVM_EXIT_WRITE_CR11 , 27, "Write CR11."),
160 EXIT_REASON(SVM_EXIT_WRITE_CR12 , 28, "Write CR12."),
161 EXIT_REASON(SVM_EXIT_WRITE_CR13 , 29, "Write CR13."),
162 EXIT_REASON(SVM_EXIT_WRITE_CR14 , 30, "Write CR14."),
163 EXIT_REASON(SVM_EXIT_WRITE_CR15 , 31, "Write CR15."),
164 EXIT_REASON(SVM_EXIT_READ_DR0 , 32, "Read DR0."),
165 EXIT_REASON(SVM_EXIT_READ_DR1 , 33, "Read DR1."),
166 EXIT_REASON(SVM_EXIT_READ_DR2 , 34, "Read DR2."),
167 EXIT_REASON(SVM_EXIT_READ_DR3 , 35, "Read DR3."),
168 EXIT_REASON(SVM_EXIT_READ_DR4 , 36, "Read DR4."),
169 EXIT_REASON(SVM_EXIT_READ_DR5 , 37, "Read DR5."),
170 EXIT_REASON(SVM_EXIT_READ_DR6 , 38, "Read DR6."),
171 EXIT_REASON(SVM_EXIT_READ_DR7 , 39, "Read DR7."),
172 EXIT_REASON(SVM_EXIT_READ_DR8 , 40, "Read DR8."),
173 EXIT_REASON(SVM_EXIT_READ_DR9 , 41, "Read DR9."),
174 EXIT_REASON(SVM_EXIT_READ_DR10 , 42, "Read DR10."),
175 EXIT_REASON(SVM_EXIT_READ_DR11 , 43, "Read DR11"),
176 EXIT_REASON(SVM_EXIT_READ_DR12 , 44, "Read DR12."),
177 EXIT_REASON(SVM_EXIT_READ_DR13 , 45, "Read DR13."),
178 EXIT_REASON(SVM_EXIT_READ_DR14 , 46, "Read DR14."),
179 EXIT_REASON(SVM_EXIT_READ_DR15 , 47, "Read DR15."),
180 EXIT_REASON(SVM_EXIT_WRITE_DR0 , 48, "Write DR0."),
181 EXIT_REASON(SVM_EXIT_WRITE_DR1 , 49, "Write DR1."),
182 EXIT_REASON(SVM_EXIT_WRITE_DR2 , 50, "Write DR2."),
183 EXIT_REASON(SVM_EXIT_WRITE_DR3 , 51, "Write DR3."),
184 EXIT_REASON(SVM_EXIT_WRITE_DR4 , 52, "Write DR4."),
185 EXIT_REASON(SVM_EXIT_WRITE_DR5 , 53, "Write DR5."),
186 EXIT_REASON(SVM_EXIT_WRITE_DR6 , 54, "Write DR6."),
187 EXIT_REASON(SVM_EXIT_WRITE_DR7 , 55, "Write DR7."),
188 EXIT_REASON(SVM_EXIT_WRITE_DR8 , 56, "Write DR8."),
189 EXIT_REASON(SVM_EXIT_WRITE_DR9 , 57, "Write DR9."),
190 EXIT_REASON(SVM_EXIT_WRITE_DR10 , 58, "Write DR10."),
191 EXIT_REASON(SVM_EXIT_WRITE_DR11 , 59, "Write DR11."),
192 EXIT_REASON(SVM_EXIT_WRITE_DR12 , 60, "Write DR12."),
193 EXIT_REASON(SVM_EXIT_WRITE_DR13 , 61, "Write DR13."),
194 EXIT_REASON(SVM_EXIT_WRITE_DR14 , 62, "Write DR14."),
195 EXIT_REASON(SVM_EXIT_WRITE_DR15 , 63, "Write DR15."),
196 EXIT_REASON(SVM_EXIT_XCPT_0 , 64, "Exception 0 (#DE)."),
197 EXIT_REASON(SVM_EXIT_XCPT_1 , 65, "Exception 1 (#DB)."),
198 EXIT_REASON(SVM_EXIT_XCPT_2 , 66, "Exception 2 (#NMI)."),
199 EXIT_REASON(SVM_EXIT_XCPT_3 , 67, "Exception 3 (#BP)."),
200 EXIT_REASON(SVM_EXIT_XCPT_4 , 68, "Exception 4 (#OF)."),
201 EXIT_REASON(SVM_EXIT_XCPT_5 , 69, "Exception 5 (#BR)."),
202 EXIT_REASON(SVM_EXIT_XCPT_6 , 70, "Exception 6 (#UD)."),
203 EXIT_REASON(SVM_EXIT_XCPT_7 , 71, "Exception 7 (#NM)."),
204 EXIT_REASON(SVM_EXIT_XCPT_8 , 72, "Exception 8 (#DF)."),
205 EXIT_REASON(SVM_EXIT_XCPT_9 , 73, "Exception 9 (#CO_SEG_OVERRUN)."),
206 EXIT_REASON(SVM_EXIT_XCPT_10 , 74, "Exception 10 (#TS)."),
207 EXIT_REASON(SVM_EXIT_XCPT_11 , 75, "Exception 11 (#NP)."),
208 EXIT_REASON(SVM_EXIT_XCPT_12 , 76, "Exception 12 (#SS)."),
209 EXIT_REASON(SVM_EXIT_XCPT_13 , 77, "Exception 13 (#GP)."),
210 EXIT_REASON(SVM_EXIT_XCPT_14 , 78, "Exception 14 (#PF)."),
211 EXIT_REASON(SVM_EXIT_XCPT_15 , 79, "Exception 15 (0x0f)."),
212 EXIT_REASON(SVM_EXIT_XCPT_16 , 80, "Exception 16 (#MF)."),
213 EXIT_REASON(SVM_EXIT_XCPT_17 , 81, "Exception 17 (#AC)."),
214 EXIT_REASON(SVM_EXIT_XCPT_18 , 82, "Exception 18 (#MC)."),
215 EXIT_REASON(SVM_EXIT_XCPT_19 , 83, "Exception 19 (#XF)."),
216 EXIT_REASON(SVM_EXIT_XCPT_20 , 84, "Exception 20 (#VE)."),
217 EXIT_REASON(SVM_EXIT_XCPT_21 , 85, "Exception 22 (0x15)."),
218 EXIT_REASON(SVM_EXIT_XCPT_22 , 86, "Exception 22 (0x16)."),
219 EXIT_REASON(SVM_EXIT_XCPT_23 , 87, "Exception 23 (0x17)."),
220 EXIT_REASON(SVM_EXIT_XCPT_24 , 88, "Exception 24 (0x18)."),
221 EXIT_REASON(SVM_EXIT_XCPT_25 , 89, "Exception 25 (0x19)."),
222 EXIT_REASON(SVM_EXIT_XCPT_26 , 90, "Exception 26 (0x1a)."),
223 EXIT_REASON(SVM_EXIT_XCPT_27 , 91, "Exception 27 (0x1b)."),
224 EXIT_REASON(SVM_EXIT_XCPT_28 , 92, "Exception 28 (0x1c)."),
225 EXIT_REASON(SVM_EXIT_XCPT_29 , 93, "Exception 29 (0x1d)."),
226 EXIT_REASON(SVM_EXIT_XCPT_30 , 94, "Exception 30 (#SX)."),
227 EXIT_REASON(SVM_EXIT_XCPT_31 , 95, "Exception 31 (0x1F)."),
228 EXIT_REASON(SVM_EXIT_INTR , 96, "Physical maskable interrupt (host)."),
229 EXIT_REASON(SVM_EXIT_NMI , 97, "Physical non-maskable interrupt (host)."),
230 EXIT_REASON(SVM_EXIT_SMI , 98, "System management interrupt (host)."),
231 EXIT_REASON(SVM_EXIT_INIT , 99, "Physical INIT signal (host)."),
232 EXIT_REASON(SVM_EXIT_VINTR , 100, "Virtual interrupt-window exit."),
233 EXIT_REASON(SVM_EXIT_CR0_SEL_WRITE, 101, "Selective CR0 Write (to bits other than CR0.TS and CR0.MP)."),
234 EXIT_REASON(SVM_EXIT_IDTR_READ , 102, "Read IDTR."),
235 EXIT_REASON(SVM_EXIT_GDTR_READ , 103, "Read GDTR."),
236 EXIT_REASON(SVM_EXIT_LDTR_READ , 104, "Read LDTR."),
237 EXIT_REASON(SVM_EXIT_TR_READ , 105, "Read TR."),
238 EXIT_REASON(SVM_EXIT_IDTR_WRITE , 106, "Write IDTR."),
239 EXIT_REASON(SVM_EXIT_GDTR_WRITE , 107, "Write GDTR."),
240 EXIT_REASON(SVM_EXIT_LDTR_WRITE , 108, "Write LDTR."),
241 EXIT_REASON(SVM_EXIT_TR_WRITE , 109, "Write TR."),
242 EXIT_REASON(SVM_EXIT_RDTSC , 110, "RDTSC instruction."),
243 EXIT_REASON(SVM_EXIT_RDPMC , 111, "RDPMC instruction."),
244 EXIT_REASON(SVM_EXIT_PUSHF , 112, "PUSHF instruction."),
245 EXIT_REASON(SVM_EXIT_POPF , 113, "POPF instruction."),
246 EXIT_REASON(SVM_EXIT_CPUID , 114, "CPUID instruction."),
247 EXIT_REASON(SVM_EXIT_RSM , 115, "RSM instruction."),
248 EXIT_REASON(SVM_EXIT_IRET , 116, "IRET instruction."),
249 EXIT_REASON(SVM_EXIT_SWINT , 117, "Software interrupt (INTn instructions)."),
250 EXIT_REASON(SVM_EXIT_INVD , 118, "INVD instruction."),
251 EXIT_REASON(SVM_EXIT_PAUSE , 119, "PAUSE instruction."),
252 EXIT_REASON(SVM_EXIT_HLT , 120, "HLT instruction."),
253 EXIT_REASON(SVM_EXIT_INVLPG , 121, "INVLPG instruction."),
254 EXIT_REASON(SVM_EXIT_INVLPGA , 122, "INVLPGA instruction."),
255 EXIT_REASON(SVM_EXIT_IOIO , 123, "IN/OUT/INS/OUTS instruction."),
256 EXIT_REASON(SVM_EXIT_MSR , 124, "RDMSR or WRMSR access to protected MSR."),
257 EXIT_REASON(SVM_EXIT_TASK_SWITCH , 125, "Task switch."),
258 EXIT_REASON(SVM_EXIT_FERR_FREEZE , 126, "FERR Freeze; CPU frozen in an x87/mmx instruction waiting for interrupt."),
259 EXIT_REASON(SVM_EXIT_SHUTDOWN , 127, "Shutdown."),
260 EXIT_REASON(SVM_EXIT_VMRUN , 128, "VMRUN instruction."),
261 EXIT_REASON(SVM_EXIT_VMMCALL , 129, "VMCALL instruction."),
262 EXIT_REASON(SVM_EXIT_VMLOAD , 130, "VMLOAD instruction."),
263 EXIT_REASON(SVM_EXIT_VMSAVE , 131, "VMSAVE instruction."),
264 EXIT_REASON(SVM_EXIT_STGI , 132, "STGI instruction."),
265 EXIT_REASON(SVM_EXIT_CLGI , 133, "CLGI instruction."),
266 EXIT_REASON(SVM_EXIT_SKINIT , 134, "SKINIT instruction."),
267 EXIT_REASON(SVM_EXIT_RDTSCP , 135, "RDTSCP instruction."),
268 EXIT_REASON(SVM_EXIT_ICEBP , 136, "ICEBP instruction."),
269 EXIT_REASON(SVM_EXIT_WBINVD , 137, "WBINVD instruction."),
270 EXIT_REASON(SVM_EXIT_MONITOR , 138, "MONITOR instruction."),
271 EXIT_REASON(SVM_EXIT_MWAIT , 139, "MWAIT instruction."),
272 EXIT_REASON(SVM_EXIT_MWAIT_ARMED , 140, "MWAIT instruction when armed."),
273 EXIT_REASON(SVM_EXIT_XSETBV , 141, "XSETBV instruction."),
274};
275/** Array index of the last valid AMD-V exit reason. */
276#define MAX_EXITREASON_AMDV 141
277
278/** Special exit reasons not covered in the array above. */
279#define SVM_EXIT_REASON_NPF EXIT_REASON(SVM_EXIT_NPF , 1024, "Nested Page Fault.")
280#define SVM_EXIT_REASON_AVIC_INCOMPLETE_IPI EXIT_REASON(SVM_EXIT_AVIC_INCOMPLETE_IPI, 1025, "AVIC - Incomplete IPI delivery.")
281#define SVM_EXIT_REASON_AVIC_NOACCEL EXIT_REASON(SVM_EXIT_AVIC_NOACCEL , 1026, "AVIC - Unhandled register.")
282
283/**
284 * Gets the SVM exit reason if it's one of the reasons not present in the @c
285 * g_apszSvmExitReasons array.
286 *
287 * @returns The exit reason or NULL if unknown.
288 * @param uExit The exit.
289 */
290DECLINLINE(const char *) hmSvmGetSpecialExitReasonDesc(uint16_t uExit)
291{
292 switch (uExit)
293 {
294 case SVM_EXIT_NPF: return SVM_EXIT_REASON_NPF;
295 case SVM_EXIT_AVIC_INCOMPLETE_IPI: return SVM_EXIT_REASON_AVIC_INCOMPLETE_IPI;
296 case SVM_EXIT_AVIC_NOACCEL: return SVM_EXIT_REASON_AVIC_NOACCEL;
297 }
298 return EXIT_REASON_NIL();
299}
300#undef EXIT_REASON_NIL
301#undef EXIT_REASON
302
303
304/**
305 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
306 *
307 * @retval true if used.
308 * @retval false if software virtualization (raw-mode) is used.
309 * @param pVM The cross context VM structure.
310 * @sa HMIsEnabled, HMR3IsEnabled
311 * @internal
312 */
313VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM)
314{
315 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
316 return pVM->fHMEnabled;
317}
318
319
320/**
321 * Checks if the guest is in a suitable state for hardware-assisted execution.
322 *
323 * @returns @c true if it is suitable, @c false otherwise.
324 * @param pVCpu The cross context virtual CPU structure.
325 * @param pCtx Pointer to the guest CPU context.
326 *
327 * @remarks @a pCtx can be a partial context created and not necessarily the same as
328 * pVCpu->cpum.GstCtx.
329 */
330VMMDECL(bool) HMCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
331{
332 PVM pVM = pVCpu->CTX_SUFF(pVM);
333 Assert(HMIsEnabled(pVM));
334
335#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
336 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
337 || CPUMIsGuestInVmxNonRootMode(pCtx))
338 {
339 LogFunc(("In nested-guest mode - returning false"));
340 return false;
341 }
342#endif
343
344 /* AMD-V supports real & protected mode with or without paging. */
345 if (pVM->hm.s.svm.fEnabled)
346 {
347 pVCpu->hm.s.fActive = true;
348 return true;
349 }
350
351 bool rc = HMCanExecuteVmxGuest(pVCpu, pCtx);
352 LogFlowFunc(("returning %RTbool\n", rc));
353 return rc;
354}
355
356
357/**
358 * Queues a guest page for invalidation.
359 *
360 * @returns VBox status code.
361 * @param pVCpu The cross context virtual CPU structure.
362 * @param GCVirt Page to invalidate.
363 */
364static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
365{
366 /* Nothing to do if a TLB flush is already pending */
367 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
368 return;
369 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
370 NOREF(GCVirt);
371}
372
373
374/**
375 * Invalidates a guest page.
376 *
377 * @returns VBox status code.
378 * @param pVCpu The cross context virtual CPU structure.
379 * @param GCVirt Page to invalidate.
380 */
381VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
382{
383 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
384#ifdef IN_RING0
385 return HMR0InvalidatePage(pVCpu, GCVirt);
386#else
387 hmQueueInvlPage(pVCpu, GCVirt);
388 return VINF_SUCCESS;
389#endif
390}
391
392
393#ifdef IN_RING0
394
395/**
396 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
397 *
398 */
399static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
400{
401 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
402 return;
403}
404
405
406/**
407 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
408 */
409static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
410{
411 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
412
413 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
414 int rc = RTMpPokeCpu(idHostCpu);
415 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
416
417 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
418 back to a less efficient implementation (broadcast). */
419 if (rc == VERR_NOT_SUPPORTED)
420 {
421 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
422 /* synchronous. */
423 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
424 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
425 }
426 else
427 {
428 if (rc == VINF_SUCCESS)
429 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
430 else
431 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
432
433/** @todo If more than one CPU is going to be poked, we could optimize this
434 * operation by poking them first and wait afterwards. Would require
435 * recording who to poke and their current cWorldSwitchExits values,
436 * that's something not suitable for stack... So, pVCpu->hm.s.something
437 * then. */
438 /* Spin until the VCPU has switched back (poking is async). */
439 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
440 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
441 ASMNopPause();
442
443 if (rc == VINF_SUCCESS)
444 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
445 else
446 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
447 }
448}
449
450#endif /* IN_RING0 */
451#ifndef IN_RC
452
453/**
454 * Flushes the guest TLB.
455 *
456 * @returns VBox status code.
457 * @param pVCpu The cross context virtual CPU structure.
458 */
459VMM_INT_DECL(int) HMFlushTlb(PVMCPU pVCpu)
460{
461 LogFlow(("HMFlushTlb\n"));
462
463 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
464 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
465 return VINF_SUCCESS;
466}
467
468/**
469 * Poke an EMT so it can perform the appropriate TLB shootdowns.
470 *
471 * @param pVCpu The cross context virtual CPU structure of the
472 * EMT poke.
473 * @param fAccountFlushStat Whether to account the call to
474 * StatTlbShootdownFlush or StatTlbShootdown.
475 */
476static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
477{
478 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
479 {
480 if (fAccountFlushStat)
481 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
482 else
483 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
484#ifdef IN_RING0
485 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
486 if (idHostCpu != NIL_RTCPUID)
487 hmR0PokeCpu(pVCpu, idHostCpu);
488#else
489 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
490#endif
491 }
492 else
493 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
494}
495
496
497/**
498 * Invalidates a guest page on all VCPUs.
499 *
500 * @returns VBox status code.
501 * @param pVM The cross context VM structure.
502 * @param GCVirt Page to invalidate.
503 */
504VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt)
505{
506 /*
507 * The VT-x/AMD-V code will be flushing TLB each time a VCPU migrates to a different
508 * host CPU, see hmR0VmxFlushTaggedTlbBoth() and hmR0SvmFlushTaggedTlb().
509 *
510 * This is the reason why we do not care about thread preemption here and just
511 * execute HMInvalidatePage() assuming it might be the 'right' CPU.
512 */
513 VMCPUID idCurCpu = VMMGetCpuId(pVM);
514 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
515
516 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
517 {
518 PVMCPU pVCpu = &pVM->aCpus[idCpu];
519
520 /* Nothing to do if a TLB flush is already pending; the VCPU should
521 have already been poked if it were active. */
522 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
523 continue;
524
525 if (pVCpu->idCpu == idCurCpu)
526 HMInvalidatePage(pVCpu, GCVirt);
527 else
528 {
529 hmQueueInvlPage(pVCpu, GCVirt);
530 hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */);
531 }
532 }
533
534 return VINF_SUCCESS;
535}
536
537
538/**
539 * Flush the TLBs of all VCPUs.
540 *
541 * @returns VBox status code.
542 * @param pVM The cross context VM structure.
543 */
544VMM_INT_DECL(int) HMFlushTlbOnAllVCpus(PVM pVM)
545{
546 if (pVM->cCpus == 1)
547 return HMFlushTlb(&pVM->aCpus[0]);
548
549 VMCPUID idThisCpu = VMMGetCpuId(pVM);
550
551 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
552
553 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
554 {
555 PVMCPU pVCpu = &pVM->aCpus[idCpu];
556
557 /* Nothing to do if a TLB flush is already pending; the VCPU should
558 have already been poked if it were active. */
559 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
560 {
561 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
562 if (idThisCpu != idCpu)
563 hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */);
564 }
565 }
566
567 return VINF_SUCCESS;
568}
569
570
571/**
572 * Invalidates a guest page by physical address.
573 *
574 * @returns VBox status code.
575 * @param pVM The cross context VM structure.
576 * @param GCPhys Page to invalidate.
577 *
578 * @remarks Assumes the current instruction references this physical page
579 * though a virtual address!
580 */
581VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
582{
583 if (!HMIsNestedPagingActive(pVM))
584 return VINF_SUCCESS;
585
586 /*
587 * AMD-V: Doesn't support invalidation with guest physical addresses.
588 *
589 * VT-x: Doesn't support invalidation with guest physical addresses.
590 * INVVPID instruction takes only a linear address while invept only flushes by EPT
591 * not individual addresses.
592 *
593 * We update the force flag and flush before the next VM-entry, see @bugref{6568}.
594 */
595 RT_NOREF(GCPhys);
596 /** @todo Remove or figure out to way to update the Phys STAT counter. */
597 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys); */
598 return HMFlushTlbOnAllVCpus(pVM);
599}
600
601
602/**
603 * Checks if nested paging is enabled.
604 *
605 * @returns true if nested paging is active, false otherwise.
606 * @param pVM The cross context VM structure.
607 *
608 * @remarks Works before hmR3InitFinalizeR0.
609 */
610VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
611{
612 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
613}
614
615
616/**
617 * Checks if both nested paging and unhampered guest execution are enabled.
618 *
619 * The almost complete guest execution in hardware is only applicable to VT-x.
620 *
621 * @returns true if we have both enabled, otherwise false.
622 * @param pVM The cross context VM structure.
623 *
624 * @remarks Works before hmR3InitFinalizeR0.
625 */
626VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
627{
628 return HMIsEnabled(pVM)
629 && pVM->hm.s.fNestedPaging
630 && ( pVM->hm.s.vmx.fUnrestrictedGuest
631 || pVM->hm.s.svm.fSupported);
632}
633
634
635/**
636 * Checks if this VM is using HM and is long-mode capable.
637 *
638 * Use VMR3IsLongModeAllowed() instead of this, when possible.
639 *
640 * @returns true if long mode is allowed, false otherwise.
641 * @param pVM The cross context VM structure.
642 * @sa VMR3IsLongModeAllowed, NEMHCIsLongModeAllowed
643 */
644VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
645{
646 return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
647}
648
649
650/**
651 * Checks if MSR bitmaps are active. It is assumed that when it's available
652 * it will be used as well.
653 *
654 * @returns true if MSR bitmaps are available, false otherwise.
655 * @param pVM The cross context VM structure.
656 */
657VMM_INT_DECL(bool) HMIsMsrBitmapActive(PVM pVM)
658{
659 if (HMIsEnabled(pVM))
660 {
661 if (pVM->hm.s.svm.fSupported)
662 return true;
663
664 if ( pVM->hm.s.vmx.fSupported
665 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS))
666 return true;
667 }
668 return false;
669}
670
671
672/**
673 * Checks if AMD-V is active.
674 *
675 * @returns true if AMD-V is active.
676 * @param pVM The cross context VM structure.
677 *
678 * @remarks Works before hmR3InitFinalizeR0.
679 */
680VMM_INT_DECL(bool) HMIsSvmActive(PVM pVM)
681{
682 return pVM->hm.s.svm.fSupported && HMIsEnabled(pVM);
683}
684
685
686/**
687 * Checks if VT-x is active.
688 *
689 * @returns true if VT-x is active.
690 * @param pVM The cross context VM structure.
691 *
692 * @remarks Works before hmR3InitFinalizeR0.
693 */
694VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM)
695{
696 return pVM->hm.s.vmx.fSupported && HMIsEnabled(pVM);
697}
698
699#endif /* !IN_RC */
700
701/**
702 * Checks if an interrupt event is currently pending.
703 *
704 * @returns Interrupt event pending state.
705 * @param pVM The cross context VM structure.
706 */
707VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
708{
709 PVMCPU pVCpu = VMMGetCpu(pVM);
710 return !!pVCpu->hm.s.Event.fPending;
711}
712
713
714/**
715 * Return the PAE PDPE entries.
716 *
717 * @returns Pointer to the PAE PDPE array.
718 * @param pVCpu The cross context virtual CPU structure.
719 */
720VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
721{
722 return &pVCpu->hm.s.aPdpes[0];
723}
724
725
726/**
727 * Sets or clears the single instruction flag.
728 *
729 * When set, HM will try its best to return to ring-3 after executing a single
730 * instruction. This can be used for debugging. See also
731 * EMR3HmSingleInstruction.
732 *
733 * @returns The old flag state.
734 * @param pVM The cross context VM structure.
735 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
736 * @param fEnable The new flag state.
737 */
738VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
739{
740 VMCPU_ASSERT_EMT(pVCpu);
741 bool fOld = pVCpu->hm.s.fSingleInstruction;
742 pVCpu->hm.s.fSingleInstruction = fEnable;
743 pVCpu->hm.s.fUseDebugLoop = fEnable || pVM->hm.s.fUseDebugLoop;
744 return fOld;
745}
746
747
748#ifndef IN_RC
749/**
750 * Notification callback which is called whenever there is a chance that a CR3
751 * value might have changed.
752 *
753 * This is called by PGM.
754 *
755 * @param pVM The cross context VM structure.
756 * @param pVCpu The cross context virtual CPU structure.
757 * @param enmShadowMode New shadow paging mode.
758 * @param enmGuestMode New guest paging mode.
759 */
760VMM_INT_DECL(void) HMHCChangedPagingMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
761{
762# ifdef IN_RING3
763 /* Ignore page mode changes during state loading. */
764 if (VMR3GetState(pVM) == VMSTATE_LOADING)
765 return;
766# endif
767
768 pVCpu->hm.s.enmShadowMode = enmShadowMode;
769
770 /*
771 * If the guest left protected mode VMX execution, we'll have to be
772 * extra careful if/when the guest switches back to protected mode.
773 */
774 if (enmGuestMode == PGMMODE_REAL)
775 {
776 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
777 pVmcsInfo->fWasInRealMode = true;
778 }
779
780# ifdef IN_RING0
781 /*
782 * We need to tickle SVM and VT-x state updates.
783 *
784 * Note! We could probably reduce this depending on what exactly changed.
785 */
786 if (VM_IS_HM_ENABLED(pVM))
787 {
788 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); /* No recursion! */
789 uint64_t fChanged = HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3 | HM_CHANGED_GUEST_CR4 | HM_CHANGED_GUEST_EFER_MSR;
790 if (pVM->hm.s.svm.fSupported)
791 fChanged |= HM_CHANGED_SVM_XCPT_INTERCEPTS;
792 else
793 fChanged |= HM_CHANGED_VMX_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_EXIT_CTLS;
794 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fChanged);
795 }
796# endif
797
798 Log4(("HMHCChangedPagingMode: Guest paging mode '%s', shadow paging mode '%s'\n", PGMGetModeName(enmGuestMode),
799 PGMGetModeName(enmShadowMode)));
800}
801#endif /* !IN_RC */
802
803
804/**
805 * Gets VMX MSRs from the provided hardware-virtualization MSRs struct.
806 *
807 * This abstraction exists to insulate the support driver from including VMX
808 * structures from HM headers.
809 *
810 * @param pHwvirtMsrs The hardware-virtualization MSRs.
811 * @param pVmxMsrs Where to store the VMX MSRs.
812 */
813VMM_INT_DECL(void) HMGetVmxMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pHwvirtMsrs, PVMXMSRS pVmxMsrs)
814{
815 AssertReturnVoid(pHwvirtMsrs);
816 AssertReturnVoid(pVmxMsrs);
817 pVmxMsrs->u64FeatCtrl = pHwvirtMsrs->u.vmx.u64FeatCtrl;
818 pVmxMsrs->u64Basic = pHwvirtMsrs->u.vmx.u64Basic;
819 pVmxMsrs->PinCtls.u = pHwvirtMsrs->u.vmx.u64PinCtls;
820 pVmxMsrs->ProcCtls.u = pHwvirtMsrs->u.vmx.u64ProcCtls;
821 pVmxMsrs->ProcCtls2.u = pHwvirtMsrs->u.vmx.u64ProcCtls2;
822 pVmxMsrs->ExitCtls.u = pHwvirtMsrs->u.vmx.u64ExitCtls;
823 pVmxMsrs->EntryCtls.u = pHwvirtMsrs->u.vmx.u64EntryCtls;
824 pVmxMsrs->TruePinCtls.u = pHwvirtMsrs->u.vmx.u64TruePinCtls;
825 pVmxMsrs->TrueProcCtls.u = pHwvirtMsrs->u.vmx.u64TrueProcCtls;
826 pVmxMsrs->TrueEntryCtls.u = pHwvirtMsrs->u.vmx.u64TrueEntryCtls;
827 pVmxMsrs->TrueExitCtls.u = pHwvirtMsrs->u.vmx.u64TrueExitCtls;
828 pVmxMsrs->u64Misc = pHwvirtMsrs->u.vmx.u64Misc;
829 pVmxMsrs->u64Cr0Fixed0 = pHwvirtMsrs->u.vmx.u64Cr0Fixed0;
830 pVmxMsrs->u64Cr0Fixed1 = pHwvirtMsrs->u.vmx.u64Cr0Fixed1;
831 pVmxMsrs->u64Cr4Fixed0 = pHwvirtMsrs->u.vmx.u64Cr4Fixed0;
832 pVmxMsrs->u64Cr4Fixed1 = pHwvirtMsrs->u.vmx.u64Cr4Fixed1;
833 pVmxMsrs->u64VmcsEnum = pHwvirtMsrs->u.vmx.u64VmcsEnum;
834 pVmxMsrs->u64VmFunc = pHwvirtMsrs->u.vmx.u64VmFunc;
835 pVmxMsrs->u64EptVpidCaps = pHwvirtMsrs->u.vmx.u64EptVpidCaps;
836}
837
838
839/**
840 * Gets SVM MSRs from the provided hardware-virtualization MSRs struct.
841 *
842 * This abstraction exists to insulate the support driver from including SVM
843 * structures from HM headers.
844 *
845 * @param pHwvirtMsrs The hardware-virtualization MSRs.
846 * @param pSvmMsrs Where to store the SVM MSRs.
847 */
848VMM_INT_DECL(void) HMGetSvmMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pHwvirtMsrs, PSVMMSRS pSvmMsrs)
849{
850 AssertReturnVoid(pHwvirtMsrs);
851 AssertReturnVoid(pSvmMsrs);
852 pSvmMsrs->u64MsrHwcr = pHwvirtMsrs->u.svm.u64MsrHwcr;
853}
854
855
856/**
857 * Gets the name of a VT-x exit code.
858 *
859 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
860 * @param uExit The VT-x exit to name.
861 */
862VMM_INT_DECL(const char *) HMGetVmxExitName(uint32_t uExit)
863{
864 if (uExit <= MAX_EXITREASON_VTX)
865 {
866 Assert(uExit < RT_ELEMENTS(g_apszVmxExitReasons));
867 return g_apszVmxExitReasons[uExit];
868 }
869 return NULL;
870}
871
872
873/**
874 * Gets the name of an AMD-V exit code.
875 *
876 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
877 * @param uExit The AMD-V exit to name.
878 */
879VMM_INT_DECL(const char *) HMGetSvmExitName(uint32_t uExit)
880{
881 if (uExit <= MAX_EXITREASON_AMDV)
882 {
883 Assert(uExit < RT_ELEMENTS(g_apszSvmExitReasons));
884 return g_apszSvmExitReasons[uExit];
885 }
886 return hmSvmGetSpecialExitReasonDesc(uExit);
887}
888
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette