1 | /* $Id: HMSVMR0.cpp 73047 2018-07-11 02:42:55Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * HM SVM (AMD-V) - Host Context Ring-0.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2013-2017 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 |
|
---|
19 | /*********************************************************************************************************************************
|
---|
20 | * Header Files *
|
---|
21 | *********************************************************************************************************************************/
|
---|
22 | #define LOG_GROUP LOG_GROUP_HM
|
---|
23 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
24 | #include <iprt/asm-amd64-x86.h>
|
---|
25 | #include <iprt/thread.h>
|
---|
26 |
|
---|
27 | #include <VBox/vmm/pdmapi.h>
|
---|
28 | #include <VBox/vmm/dbgf.h>
|
---|
29 | #include <VBox/vmm/iem.h>
|
---|
30 | #include <VBox/vmm/iom.h>
|
---|
31 | #include <VBox/vmm/tm.h>
|
---|
32 | #include <VBox/vmm/gim.h>
|
---|
33 | #include <VBox/vmm/apic.h>
|
---|
34 | #include "HMInternal.h"
|
---|
35 | #include <VBox/vmm/vm.h>
|
---|
36 | #include "HMSVMR0.h"
|
---|
37 | #include "dtrace/VBoxVMM.h"
|
---|
38 |
|
---|
39 | #ifdef DEBUG_ramshankar
|
---|
40 | # define HMSVM_SYNC_FULL_GUEST_STATE
|
---|
41 | # define HMSVM_ALWAYS_TRAP_ALL_XCPTS
|
---|
42 | # define HMSVM_ALWAYS_TRAP_PF
|
---|
43 | # define HMSVM_ALWAYS_TRAP_TASK_SWITCH
|
---|
44 | #endif
|
---|
45 |
|
---|
46 |
|
---|
47 | /*********************************************************************************************************************************
|
---|
48 | * Defined Constants And Macros *
|
---|
49 | *********************************************************************************************************************************/
|
---|
50 | #ifdef VBOX_WITH_STATISTICS
|
---|
51 | # define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
|
---|
52 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \
|
---|
53 | if ((u64ExitCode) == SVM_EXIT_NPF) \
|
---|
54 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
|
---|
55 | else \
|
---|
56 | STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
|
---|
57 | } while (0)
|
---|
58 |
|
---|
59 | # ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
60 | # define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
|
---|
61 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \
|
---|
62 | if ((u64ExitCode) == SVM_EXIT_NPF) \
|
---|
63 | STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitReasonNpf); \
|
---|
64 | else \
|
---|
65 | STAM_COUNTER_INC(&pVCpu->hm.s.paStatNestedExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
|
---|
66 | } while (0)
|
---|
67 | # endif
|
---|
68 | #else
|
---|
69 | # define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
|
---|
70 | # ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
71 | # define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
|
---|
72 | # endif
|
---|
73 | #endif /* !VBOX_WITH_STATISTICS */
|
---|
74 |
|
---|
75 | /** If we decide to use a function table approach this can be useful to
|
---|
76 | * switch to a "static DECLCALLBACK(int)". */
|
---|
77 | #define HMSVM_EXIT_DECL static int
|
---|
78 |
|
---|
79 | /**
|
---|
80 | * Subset of the guest-CPU state that is kept by SVM R0 code while executing the
|
---|
81 | * guest using hardware-assisted SVM.
|
---|
82 | *
|
---|
83 | * This excludes state like TSC AUX, GPRs (other than RSP, RAX) which are always
|
---|
84 | * are swapped and restored across the world-switch and also registers like
|
---|
85 | * EFER, PAT MSR etc. which cannot be modified by the guest without causing a
|
---|
86 | * \#VMEXIT.
|
---|
87 | */
|
---|
88 | #define HMSVM_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
|
---|
89 | | CPUMCTX_EXTRN_RFLAGS \
|
---|
90 | | CPUMCTX_EXTRN_RAX \
|
---|
91 | | CPUMCTX_EXTRN_RSP \
|
---|
92 | | CPUMCTX_EXTRN_SREG_MASK \
|
---|
93 | | CPUMCTX_EXTRN_CR0 \
|
---|
94 | | CPUMCTX_EXTRN_CR2 \
|
---|
95 | | CPUMCTX_EXTRN_CR3 \
|
---|
96 | | CPUMCTX_EXTRN_TABLE_MASK \
|
---|
97 | | CPUMCTX_EXTRN_DR6 \
|
---|
98 | | CPUMCTX_EXTRN_DR7 \
|
---|
99 | | CPUMCTX_EXTRN_KERNEL_GS_BASE \
|
---|
100 | | CPUMCTX_EXTRN_SYSCALL_MSRS \
|
---|
101 | | CPUMCTX_EXTRN_SYSENTER_MSRS \
|
---|
102 | | CPUMCTX_EXTRN_HWVIRT \
|
---|
103 | | CPUMCTX_EXTRN_HM_SVM_MASK)
|
---|
104 |
|
---|
105 | /**
|
---|
106 | * Subset of the guest-CPU state that is shared between the guest and host.
|
---|
107 | */
|
---|
108 | #define HMSVM_CPUMCTX_SHARED_STATE CPUMCTX_EXTRN_DR_MASK
|
---|
109 |
|
---|
110 | /** Macro for importing guest state from the VMCB back into CPUMCTX. */
|
---|
111 | #define HMSVM_CPUMCTX_IMPORT_STATE(a_pVCpu, a_fWhat) \
|
---|
112 | do { \
|
---|
113 | if ((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fWhat)) \
|
---|
114 | hmR0SvmImportGuestState((a_pVCpu), (a_fWhat)); \
|
---|
115 | } while (0)
|
---|
116 |
|
---|
117 | /** Assert that the required state bits are fetched. */
|
---|
118 | #define HMSVM_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
|
---|
119 | ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
|
---|
120 | (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
|
---|
121 |
|
---|
122 | /** Assert that preemption is disabled or covered by thread-context hooks. */
|
---|
123 | #define HMSVM_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
|
---|
124 | || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
125 |
|
---|
126 | /** Assert that we haven't migrated CPUs when thread-context hooks are not
|
---|
127 | * used. */
|
---|
128 | #define HMSVM_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
|
---|
129 | || (a_pVCpu)->hm.s.idEnteredCpu == RTMpCpuId(), \
|
---|
130 | ("Illegal migration! Entered on CPU %u Current %u\n", \
|
---|
131 | (a_pVCpu)->hm.s.idEnteredCpu, RTMpCpuId()));
|
---|
132 |
|
---|
133 | /** Assert that we're not executing a nested-guest. */
|
---|
134 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
135 | # define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
|
---|
136 | #else
|
---|
137 | # define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
|
---|
138 | #endif
|
---|
139 |
|
---|
140 | /** Assert that we're executing a nested-guest. */
|
---|
141 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
142 | # define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
|
---|
143 | #else
|
---|
144 | # define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
|
---|
145 | #endif
|
---|
146 |
|
---|
147 | /** Macro for checking and returning from the using function for
|
---|
148 | * \#VMEXIT intercepts that maybe caused during delivering of another
|
---|
149 | * event in the guest. */
|
---|
150 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
151 | # define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \
|
---|
152 | do \
|
---|
153 | { \
|
---|
154 | int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \
|
---|
155 | if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
|
---|
156 | else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
|
---|
157 | else if ( rc == VINF_EM_RESET \
|
---|
158 | && CPUMIsGuestSvmCtrlInterceptSet((a_pVCpu), &(a_pVCpu)->cpum.GstCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \
|
---|
159 | { \
|
---|
160 | HMSVM_CPUMCTX_IMPORT_STATE((a_pVCpu), IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); \
|
---|
161 | return VBOXSTRICTRC_TODO(IEMExecSvmVmexit((a_pVCpu), SVM_EXIT_SHUTDOWN, 0, 0)); \
|
---|
162 | } \
|
---|
163 | else \
|
---|
164 | return rc; \
|
---|
165 | } while (0)
|
---|
166 | #else
|
---|
167 | # define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \
|
---|
168 | do \
|
---|
169 | { \
|
---|
170 | int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \
|
---|
171 | if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
|
---|
172 | else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
|
---|
173 | else \
|
---|
174 | return rc; \
|
---|
175 | } while (0)
|
---|
176 | #endif
|
---|
177 |
|
---|
178 | /** Macro which updates interrupt shadow for the current RIP. */
|
---|
179 | #define HMSVM_UPDATE_INTR_SHADOW(a_pVCpu) \
|
---|
180 | do { \
|
---|
181 | /* Update interrupt shadow. */ \
|
---|
182 | if ( VMCPU_FF_IS_PENDING((a_pVCpu), VMCPU_FF_INHIBIT_INTERRUPTS) \
|
---|
183 | && (a_pVCpu)->cpum.GstCtx.rip != EMGetInhibitInterruptsPC((a_pVCpu))) \
|
---|
184 | VMCPU_FF_CLEAR((a_pVCpu), VMCPU_FF_INHIBIT_INTERRUPTS); \
|
---|
185 | } while (0)
|
---|
186 |
|
---|
187 | /** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an
|
---|
188 | * instruction that exited. */
|
---|
189 | #define HMSVM_CHECK_SINGLE_STEP(a_pVCpu, a_rc) \
|
---|
190 | do { \
|
---|
191 | if ((a_pVCpu)->hm.s.fSingleInstruction && (a_rc) == VINF_SUCCESS) \
|
---|
192 | (a_rc) = VINF_EM_DBG_STEPPED; \
|
---|
193 | } while (0)
|
---|
194 |
|
---|
195 | /** Validate segment descriptor granularity bit. */
|
---|
196 | #ifdef VBOX_STRICT
|
---|
197 | # define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) \
|
---|
198 | AssertMsg( !(a_pCtx)->reg.Attr.n.u1Present \
|
---|
199 | || ( (a_pCtx)->reg.Attr.n.u1Granularity \
|
---|
200 | ? ((a_pCtx)->reg.u32Limit & 0xfff) == 0xfff \
|
---|
201 | : (a_pCtx)->reg.u32Limit <= UINT32_C(0xfffff)), \
|
---|
202 | ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", (a_pCtx)->reg.u32Limit, \
|
---|
203 | (a_pCtx)->reg.Attr.u, (a_pCtx)->reg.u64Base))
|
---|
204 | #else
|
---|
205 | # define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) do { } while (0)
|
---|
206 | #endif
|
---|
207 |
|
---|
208 | /**
|
---|
209 | * Exception bitmap mask for all contributory exceptions.
|
---|
210 | *
|
---|
211 | * Page fault is deliberately excluded here as it's conditional as to whether
|
---|
212 | * it's contributory or benign. Page faults are handled separately.
|
---|
213 | */
|
---|
214 | #define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
|
---|
215 | | RT_BIT(X86_XCPT_DE))
|
---|
216 |
|
---|
217 | /**
|
---|
218 | * Mandatory/unconditional guest control intercepts.
|
---|
219 | *
|
---|
220 | * SMIs can and do happen in normal operation. We need not intercept them
|
---|
221 | * while executing the guest (or nested-guest).
|
---|
222 | */
|
---|
223 | #define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS ( SVM_CTRL_INTERCEPT_INTR \
|
---|
224 | | SVM_CTRL_INTERCEPT_NMI \
|
---|
225 | | SVM_CTRL_INTERCEPT_INIT \
|
---|
226 | | SVM_CTRL_INTERCEPT_RDPMC \
|
---|
227 | | SVM_CTRL_INTERCEPT_CPUID \
|
---|
228 | | SVM_CTRL_INTERCEPT_RSM \
|
---|
229 | | SVM_CTRL_INTERCEPT_HLT \
|
---|
230 | | SVM_CTRL_INTERCEPT_IOIO_PROT \
|
---|
231 | | SVM_CTRL_INTERCEPT_MSR_PROT \
|
---|
232 | | SVM_CTRL_INTERCEPT_INVLPGA \
|
---|
233 | | SVM_CTRL_INTERCEPT_SHUTDOWN \
|
---|
234 | | SVM_CTRL_INTERCEPT_FERR_FREEZE \
|
---|
235 | | SVM_CTRL_INTERCEPT_VMRUN \
|
---|
236 | | SVM_CTRL_INTERCEPT_SKINIT \
|
---|
237 | | SVM_CTRL_INTERCEPT_WBINVD \
|
---|
238 | | SVM_CTRL_INTERCEPT_MONITOR \
|
---|
239 | | SVM_CTRL_INTERCEPT_MWAIT \
|
---|
240 | | SVM_CTRL_INTERCEPT_CR0_SEL_WRITE \
|
---|
241 | | SVM_CTRL_INTERCEPT_XSETBV)
|
---|
242 |
|
---|
243 | /** @name VMCB Clean Bits.
|
---|
244 | *
|
---|
245 | * These flags are used for VMCB-state caching. A set VMCB Clean bit indicates
|
---|
246 | * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
|
---|
247 | * memory.
|
---|
248 | *
|
---|
249 | * @{ */
|
---|
250 | /** All intercepts vectors, TSC offset, PAUSE filter counter. */
|
---|
251 | #define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
|
---|
252 | /** I/O permission bitmap, MSR permission bitmap. */
|
---|
253 | #define HMSVM_VMCB_CLEAN_IOPM_MSRPM RT_BIT(1)
|
---|
254 | /** ASID. */
|
---|
255 | #define HMSVM_VMCB_CLEAN_ASID RT_BIT(2)
|
---|
256 | /** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
|
---|
257 | V_INTR_VECTOR. */
|
---|
258 | #define HMSVM_VMCB_CLEAN_INT_CTRL RT_BIT(3)
|
---|
259 | /** Nested Paging: Nested CR3 (nCR3), PAT. */
|
---|
260 | #define HMSVM_VMCB_CLEAN_NP RT_BIT(4)
|
---|
261 | /** Control registers (CR0, CR3, CR4, EFER). */
|
---|
262 | #define HMSVM_VMCB_CLEAN_CRX_EFER RT_BIT(5)
|
---|
263 | /** Debug registers (DR6, DR7). */
|
---|
264 | #define HMSVM_VMCB_CLEAN_DRX RT_BIT(6)
|
---|
265 | /** GDT, IDT limit and base. */
|
---|
266 | #define HMSVM_VMCB_CLEAN_DT RT_BIT(7)
|
---|
267 | /** Segment register: CS, SS, DS, ES limit and base. */
|
---|
268 | #define HMSVM_VMCB_CLEAN_SEG RT_BIT(8)
|
---|
269 | /** CR2.*/
|
---|
270 | #define HMSVM_VMCB_CLEAN_CR2 RT_BIT(9)
|
---|
271 | /** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
|
---|
272 | #define HMSVM_VMCB_CLEAN_LBR RT_BIT(10)
|
---|
273 | /** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
|
---|
274 | PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
|
---|
275 | #define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11)
|
---|
276 | /** Mask of all valid VMCB Clean bits. */
|
---|
277 | #define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \
|
---|
278 | | HMSVM_VMCB_CLEAN_IOPM_MSRPM \
|
---|
279 | | HMSVM_VMCB_CLEAN_ASID \
|
---|
280 | | HMSVM_VMCB_CLEAN_INT_CTRL \
|
---|
281 | | HMSVM_VMCB_CLEAN_NP \
|
---|
282 | | HMSVM_VMCB_CLEAN_CRX_EFER \
|
---|
283 | | HMSVM_VMCB_CLEAN_DRX \
|
---|
284 | | HMSVM_VMCB_CLEAN_DT \
|
---|
285 | | HMSVM_VMCB_CLEAN_SEG \
|
---|
286 | | HMSVM_VMCB_CLEAN_CR2 \
|
---|
287 | | HMSVM_VMCB_CLEAN_LBR \
|
---|
288 | | HMSVM_VMCB_CLEAN_AVIC)
|
---|
289 | /** @} */
|
---|
290 |
|
---|
291 | /** @name SVM transient.
|
---|
292 | *
|
---|
293 | * A state structure for holding miscellaneous information across AMD-V
|
---|
294 | * VMRUN/\#VMEXIT operation, restored after the transition.
|
---|
295 | *
|
---|
296 | * @{ */
|
---|
297 | typedef struct SVMTRANSIENT
|
---|
298 | {
|
---|
299 | /** The host's rflags/eflags. */
|
---|
300 | RTCCUINTREG fEFlags;
|
---|
301 | #if HC_ARCH_BITS == 32
|
---|
302 | uint32_t u32Alignment0;
|
---|
303 | #endif
|
---|
304 |
|
---|
305 | /** The \#VMEXIT exit code (the EXITCODE field in the VMCB). */
|
---|
306 | uint64_t u64ExitCode;
|
---|
307 | /** The guest's TPR value used for TPR shadowing. */
|
---|
308 | uint8_t u8GuestTpr;
|
---|
309 | /** Alignment. */
|
---|
310 | uint8_t abAlignment0[7];
|
---|
311 |
|
---|
312 | /** Pointer to the currently executing VMCB. */
|
---|
313 | PSVMVMCB pVmcb;
|
---|
314 | /** Whether we are currently executing a nested-guest. */
|
---|
315 | bool fIsNestedGuest;
|
---|
316 |
|
---|
317 | /** Whether the guest debug state was active at the time of \#VMEXIT. */
|
---|
318 | bool fWasGuestDebugStateActive;
|
---|
319 | /** Whether the hyper debug state was active at the time of \#VMEXIT. */
|
---|
320 | bool fWasHyperDebugStateActive;
|
---|
321 | /** Whether the TSC offset mode needs to be updated. */
|
---|
322 | bool fUpdateTscOffsetting;
|
---|
323 | /** Whether the TSC_AUX MSR needs restoring on \#VMEXIT. */
|
---|
324 | bool fRestoreTscAuxMsr;
|
---|
325 | /** Whether the \#VMEXIT was caused by a page-fault during delivery of a
|
---|
326 | * contributary exception or a page-fault. */
|
---|
327 | bool fVectoringDoublePF;
|
---|
328 | /** Whether the \#VMEXIT was caused by a page-fault during delivery of an
|
---|
329 | * external interrupt or NMI. */
|
---|
330 | bool fVectoringPF;
|
---|
331 | } SVMTRANSIENT, *PSVMTRANSIENT;
|
---|
332 | AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t));
|
---|
333 | AssertCompileMemberAlignment(SVMTRANSIENT, pVmcb, sizeof(uint64_t));
|
---|
334 | /** @} */
|
---|
335 |
|
---|
336 | /**
|
---|
337 | * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
|
---|
338 | */
|
---|
339 | typedef enum SVMMSREXITREAD
|
---|
340 | {
|
---|
341 | /** Reading this MSR causes a \#VMEXIT. */
|
---|
342 | SVMMSREXIT_INTERCEPT_READ = 0xb,
|
---|
343 | /** Reading this MSR does not cause a \#VMEXIT. */
|
---|
344 | SVMMSREXIT_PASSTHRU_READ
|
---|
345 | } SVMMSREXITREAD;
|
---|
346 |
|
---|
347 | /**
|
---|
348 | * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
|
---|
349 | */
|
---|
350 | typedef enum SVMMSREXITWRITE
|
---|
351 | {
|
---|
352 | /** Writing to this MSR causes a \#VMEXIT. */
|
---|
353 | SVMMSREXIT_INTERCEPT_WRITE = 0xd,
|
---|
354 | /** Writing to this MSR does not cause a \#VMEXIT. */
|
---|
355 | SVMMSREXIT_PASSTHRU_WRITE
|
---|
356 | } SVMMSREXITWRITE;
|
---|
357 |
|
---|
358 | /**
|
---|
359 | * SVM \#VMEXIT handler.
|
---|
360 | *
|
---|
361 | * @returns VBox status code.
|
---|
362 | * @param pVCpu The cross context virtual CPU structure.
|
---|
363 | * @param pSvmTransient Pointer to the SVM-transient structure.
|
---|
364 | */
|
---|
365 | typedef int FNSVMEXITHANDLER(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient);
|
---|
366 |
|
---|
367 |
|
---|
368 | /*********************************************************************************************************************************
|
---|
369 | * Internal Functions *
|
---|
370 | *********************************************************************************************************************************/
|
---|
371 | static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
|
---|
372 | static void hmR0SvmLeave(PVMCPU pVCpu, bool fImportState);
|
---|
373 |
|
---|
374 |
|
---|
375 | /** @name \#VMEXIT handlers.
|
---|
376 | * @{
|
---|
377 | */
|
---|
378 | static FNSVMEXITHANDLER hmR0SvmExitIntr;
|
---|
379 | static FNSVMEXITHANDLER hmR0SvmExitWbinvd;
|
---|
380 | static FNSVMEXITHANDLER hmR0SvmExitInvd;
|
---|
381 | static FNSVMEXITHANDLER hmR0SvmExitCpuid;
|
---|
382 | static FNSVMEXITHANDLER hmR0SvmExitRdtsc;
|
---|
383 | static FNSVMEXITHANDLER hmR0SvmExitRdtscp;
|
---|
384 | static FNSVMEXITHANDLER hmR0SvmExitRdpmc;
|
---|
385 | static FNSVMEXITHANDLER hmR0SvmExitInvlpg;
|
---|
386 | static FNSVMEXITHANDLER hmR0SvmExitHlt;
|
---|
387 | static FNSVMEXITHANDLER hmR0SvmExitMonitor;
|
---|
388 | static FNSVMEXITHANDLER hmR0SvmExitMwait;
|
---|
389 | static FNSVMEXITHANDLER hmR0SvmExitShutdown;
|
---|
390 | static FNSVMEXITHANDLER hmR0SvmExitUnexpected;
|
---|
391 | static FNSVMEXITHANDLER hmR0SvmExitReadCRx;
|
---|
392 | static FNSVMEXITHANDLER hmR0SvmExitWriteCRx;
|
---|
393 | static FNSVMEXITHANDLER hmR0SvmExitMsr;
|
---|
394 | static FNSVMEXITHANDLER hmR0SvmExitReadDRx;
|
---|
395 | static FNSVMEXITHANDLER hmR0SvmExitWriteDRx;
|
---|
396 | static FNSVMEXITHANDLER hmR0SvmExitXsetbv;
|
---|
397 | static FNSVMEXITHANDLER hmR0SvmExitIOInstr;
|
---|
398 | static FNSVMEXITHANDLER hmR0SvmExitNestedPF;
|
---|
399 | static FNSVMEXITHANDLER hmR0SvmExitVIntr;
|
---|
400 | static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch;
|
---|
401 | static FNSVMEXITHANDLER hmR0SvmExitVmmCall;
|
---|
402 | static FNSVMEXITHANDLER hmR0SvmExitPause;
|
---|
403 | static FNSVMEXITHANDLER hmR0SvmExitFerrFreeze;
|
---|
404 | static FNSVMEXITHANDLER hmR0SvmExitIret;
|
---|
405 | static FNSVMEXITHANDLER hmR0SvmExitXcptPF;
|
---|
406 | static FNSVMEXITHANDLER hmR0SvmExitXcptUD;
|
---|
407 | static FNSVMEXITHANDLER hmR0SvmExitXcptMF;
|
---|
408 | static FNSVMEXITHANDLER hmR0SvmExitXcptDB;
|
---|
409 | static FNSVMEXITHANDLER hmR0SvmExitXcptAC;
|
---|
410 | static FNSVMEXITHANDLER hmR0SvmExitXcptBP;
|
---|
411 | #if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
|
---|
412 | static FNSVMEXITHANDLER hmR0SvmExitXcptGeneric;
|
---|
413 | #endif
|
---|
414 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
415 | static FNSVMEXITHANDLER hmR0SvmExitClgi;
|
---|
416 | static FNSVMEXITHANDLER hmR0SvmExitStgi;
|
---|
417 | static FNSVMEXITHANDLER hmR0SvmExitVmload;
|
---|
418 | static FNSVMEXITHANDLER hmR0SvmExitVmsave;
|
---|
419 | static FNSVMEXITHANDLER hmR0SvmExitInvlpga;
|
---|
420 | static FNSVMEXITHANDLER hmR0SvmExitVmrun;
|
---|
421 | static FNSVMEXITHANDLER hmR0SvmNestedExitXcptDB;
|
---|
422 | static FNSVMEXITHANDLER hmR0SvmNestedExitXcptBP;
|
---|
423 | #endif
|
---|
424 | /** @} */
|
---|
425 |
|
---|
426 | static int hmR0SvmHandleExit(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient);
|
---|
427 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
428 | static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient);
|
---|
429 | #endif
|
---|
430 |
|
---|
431 |
|
---|
432 | /*********************************************************************************************************************************
|
---|
433 | * Global Variables *
|
---|
434 | *********************************************************************************************************************************/
|
---|
435 | /** Ring-0 memory object for the IO bitmap. */
|
---|
436 | static RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
|
---|
437 | /** Physical address of the IO bitmap. */
|
---|
438 | static RTHCPHYS g_HCPhysIOBitmap;
|
---|
439 | /** Pointer to the IO bitmap. */
|
---|
440 | static R0PTRTYPE(void *) g_pvIOBitmap;
|
---|
441 |
|
---|
442 | #ifdef VBOX_STRICT
|
---|
443 | # define HMSVM_LOG_RBP_RSP RT_BIT_32(0)
|
---|
444 | # define HMSVM_LOG_CR_REGS RT_BIT_32(1)
|
---|
445 | # define HMSVM_LOG_CS RT_BIT_32(2)
|
---|
446 | # define HMSVM_LOG_SS RT_BIT_32(3)
|
---|
447 | # define HMSVM_LOG_FS RT_BIT_32(4)
|
---|
448 | # define HMSVM_LOG_GS RT_BIT_32(5)
|
---|
449 | # define HMSVM_LOG_LBR RT_BIT_32(6)
|
---|
450 | # define HMSVM_LOG_ALL ( HMSVM_LOG_RBP_RSP \
|
---|
451 | | HMSVM_LOG_CR_REGS \
|
---|
452 | | HMSVM_LOG_CS \
|
---|
453 | | HMSVM_LOG_SS \
|
---|
454 | | HMSVM_LOG_FS \
|
---|
455 | | HMSVM_LOG_GS \
|
---|
456 | | HMSVM_LOG_LBR)
|
---|
457 |
|
---|
458 | /**
|
---|
459 | * Dumps virtual CPU state and additional info. to the logger for diagnostics.
|
---|
460 | *
|
---|
461 | * @param pVCpu The cross context virtual CPU structure.
|
---|
462 | * @param pVmcb Pointer to the VM control block.
|
---|
463 | * @param pszPrefix Log prefix.
|
---|
464 | * @param fFlags Log flags, see HMSVM_LOG_XXX.
|
---|
465 | * @param uVerbose The verbosity level, currently unused.
|
---|
466 | */
|
---|
467 | static void hmR0SvmLogState(PVMCPU pVCpu, PCSVMVMCB pVmcb, const char *pszPrefix, uint32_t fFlags, uint8_t uVerbose)
|
---|
468 | {
|
---|
469 | RT_NOREF2(pVCpu, uVerbose);
|
---|
470 | PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
471 |
|
---|
472 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
|
---|
473 | Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX64\n", pszPrefix, pCtx->cs.Sel, pCtx->rip, pCtx->rflags.u));
|
---|
474 |
|
---|
475 | if (fFlags & HMSVM_LOG_RBP_RSP)
|
---|
476 | {
|
---|
477 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RBP);
|
---|
478 | Log4(("%s: rsp=%#RX64 rbp=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp));
|
---|
479 | }
|
---|
480 |
|
---|
481 | if (fFlags & HMSVM_LOG_CR_REGS)
|
---|
482 | {
|
---|
483 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4);
|
---|
484 | Log4(("%s: cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", pszPrefix, pCtx->cr0, pCtx->cr3, pCtx->cr4));
|
---|
485 | }
|
---|
486 |
|
---|
487 | if (fFlags & HMSVM_LOG_CS)
|
---|
488 | {
|
---|
489 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
|
---|
490 | Log4(("%s: cs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base,
|
---|
491 | pCtx->cs.u32Limit, pCtx->cs.Attr.u));
|
---|
492 | }
|
---|
493 | if (fFlags & HMSVM_LOG_SS)
|
---|
494 | {
|
---|
495 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
|
---|
496 | Log4(("%s: ss={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base,
|
---|
497 | pCtx->ss.u32Limit, pCtx->ss.Attr.u));
|
---|
498 | }
|
---|
499 | if (fFlags & HMSVM_LOG_FS)
|
---|
500 | {
|
---|
501 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
|
---|
502 | Log4(("%s: fs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base,
|
---|
503 | pCtx->fs.u32Limit, pCtx->fs.Attr.u));
|
---|
504 | }
|
---|
505 | if (fFlags & HMSVM_LOG_GS)
|
---|
506 | {
|
---|
507 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
|
---|
508 | Log4(("%s: gs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base,
|
---|
509 | pCtx->gs.u32Limit, pCtx->gs.Attr.u));
|
---|
510 | }
|
---|
511 |
|
---|
512 | PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
|
---|
513 | if (fFlags & HMSVM_LOG_LBR)
|
---|
514 | {
|
---|
515 | Log4(("%s: br_from=%#RX64 br_to=%#RX64 lastxcpt_from=%#RX64 lastxcpt_to=%#RX64\n", pszPrefix, pVmcbGuest->u64BR_FROM,
|
---|
516 | pVmcbGuest->u64BR_TO, pVmcbGuest->u64LASTEXCPFROM, pVmcbGuest->u64LASTEXCPTO));
|
---|
517 | }
|
---|
518 | NOREF(pVmcbGuest); NOREF(pCtx);
|
---|
519 | }
|
---|
520 | #endif /* VBOX_STRICT */
|
---|
521 |
|
---|
522 |
|
---|
523 | /**
|
---|
524 | * Sets up and activates AMD-V on the current CPU.
|
---|
525 | *
|
---|
526 | * @returns VBox status code.
|
---|
527 | * @param pHostCpu Pointer to the CPU info struct.
|
---|
528 | * @param pVM The cross context VM structure. Can be
|
---|
529 | * NULL after a resume!
|
---|
530 | * @param pvCpuPage Pointer to the global CPU page.
|
---|
531 | * @param HCPhysCpuPage Physical address of the global CPU page.
|
---|
532 | * @param fEnabledByHost Whether the host OS has already initialized AMD-V.
|
---|
533 | * @param pvArg Unused on AMD-V.
|
---|
534 | */
|
---|
535 | VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
|
---|
536 | void *pvArg)
|
---|
537 | {
|
---|
538 | Assert(!fEnabledByHost);
|
---|
539 | Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
|
---|
540 | Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
|
---|
541 | Assert(pvCpuPage); NOREF(pvCpuPage);
|
---|
542 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
543 |
|
---|
544 | NOREF(pvArg);
|
---|
545 | NOREF(fEnabledByHost);
|
---|
546 |
|
---|
547 | /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */
|
---|
548 | RTCCUINTREG const fEFlags = ASMIntDisableFlags();
|
---|
549 |
|
---|
550 | /*
|
---|
551 | * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
|
---|
552 | */
|
---|
553 | uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
|
---|
554 | if (u64HostEfer & MSR_K6_EFER_SVME)
|
---|
555 | {
|
---|
556 | /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
|
---|
557 | if ( pVM
|
---|
558 | && pVM->hm.s.svm.fIgnoreInUseError)
|
---|
559 | pHostCpu->fIgnoreAMDVInUseError = true;
|
---|
560 |
|
---|
561 | if (!pHostCpu->fIgnoreAMDVInUseError)
|
---|
562 | {
|
---|
563 | ASMSetFlags(fEFlags);
|
---|
564 | return VERR_SVM_IN_USE;
|
---|
565 | }
|
---|
566 | }
|
---|
567 |
|
---|
568 | /* Turn on AMD-V in the EFER MSR. */
|
---|
569 | ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
|
---|
570 |
|
---|
571 | /* Write the physical page address where the CPU will store the host state while executing the VM. */
|
---|
572 | ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
|
---|
573 |
|
---|
574 | /* Restore interrupts. */
|
---|
575 | ASMSetFlags(fEFlags);
|
---|
576 |
|
---|
577 | /*
|
---|
578 | * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all
|
---|
579 | * non-zero ASIDs when enabling SVM. AMD doesn't have an SVM instruction to flush all
|
---|
580 | * ASIDs (flushing is done upon VMRUN). Therefore, flag that we need to flush the TLB
|
---|
581 | * entirely with before executing any guest code.
|
---|
582 | */
|
---|
583 | pHostCpu->fFlushAsidBeforeUse = true;
|
---|
584 |
|
---|
585 | /*
|
---|
586 | * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}.
|
---|
587 | */
|
---|
588 | ++pHostCpu->cTlbFlushes;
|
---|
589 |
|
---|
590 | return VINF_SUCCESS;
|
---|
591 | }
|
---|
592 |
|
---|
593 |
|
---|
594 | /**
|
---|
595 | * Deactivates AMD-V on the current CPU.
|
---|
596 | *
|
---|
597 | * @returns VBox status code.
|
---|
598 | * @param pHostCpu Pointer to the CPU info struct.
|
---|
599 | * @param pvCpuPage Pointer to the global CPU page.
|
---|
600 | * @param HCPhysCpuPage Physical address of the global CPU page.
|
---|
601 | */
|
---|
602 | VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
|
---|
603 | {
|
---|
604 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
605 | AssertReturn( HCPhysCpuPage
|
---|
606 | && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
|
---|
607 | AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
|
---|
608 | RT_NOREF(pHostCpu);
|
---|
609 |
|
---|
610 | /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
|
---|
611 | RTCCUINTREG const fEFlags = ASMIntDisableFlags();
|
---|
612 |
|
---|
613 | /* Turn off AMD-V in the EFER MSR. */
|
---|
614 | uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
|
---|
615 | ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
|
---|
616 |
|
---|
617 | /* Invalidate host state physical address. */
|
---|
618 | ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
|
---|
619 |
|
---|
620 | /* Restore interrupts. */
|
---|
621 | ASMSetFlags(fEFlags);
|
---|
622 |
|
---|
623 | return VINF_SUCCESS;
|
---|
624 | }
|
---|
625 |
|
---|
626 |
|
---|
627 | /**
|
---|
628 | * Does global AMD-V initialization (called during module initialization).
|
---|
629 | *
|
---|
630 | * @returns VBox status code.
|
---|
631 | */
|
---|
632 | VMMR0DECL(int) SVMR0GlobalInit(void)
|
---|
633 | {
|
---|
634 | /*
|
---|
635 | * Allocate 12 KB (3 pages) for the IO bitmap. Since this is non-optional and we always
|
---|
636 | * intercept all IO accesses, it's done once globally here instead of per-VM.
|
---|
637 | */
|
---|
638 | Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ);
|
---|
639 | int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */);
|
---|
640 | if (RT_FAILURE(rc))
|
---|
641 | return rc;
|
---|
642 |
|
---|
643 | g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
|
---|
644 | g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
|
---|
645 |
|
---|
646 | /* Set all bits to intercept all IO accesses. */
|
---|
647 | ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
|
---|
648 |
|
---|
649 | return VINF_SUCCESS;
|
---|
650 | }
|
---|
651 |
|
---|
652 |
|
---|
653 | /**
|
---|
654 | * Does global AMD-V termination (called during module termination).
|
---|
655 | */
|
---|
656 | VMMR0DECL(void) SVMR0GlobalTerm(void)
|
---|
657 | {
|
---|
658 | if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
|
---|
659 | {
|
---|
660 | RTR0MemObjFree(g_hMemObjIOBitmap, true /* fFreeMappings */);
|
---|
661 | g_pvIOBitmap = NULL;
|
---|
662 | g_HCPhysIOBitmap = 0;
|
---|
663 | g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
|
---|
664 | }
|
---|
665 | }
|
---|
666 |
|
---|
667 |
|
---|
668 | /**
|
---|
669 | * Frees any allocated per-VCPU structures for a VM.
|
---|
670 | *
|
---|
671 | * @param pVM The cross context VM structure.
|
---|
672 | */
|
---|
673 | DECLINLINE(void) hmR0SvmFreeStructs(PVM pVM)
|
---|
674 | {
|
---|
675 | for (uint32_t i = 0; i < pVM->cCpus; i++)
|
---|
676 | {
|
---|
677 | PVMCPU pVCpu = &pVM->aCpus[i];
|
---|
678 | AssertPtr(pVCpu);
|
---|
679 |
|
---|
680 | if (pVCpu->hm.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
|
---|
681 | {
|
---|
682 | RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false);
|
---|
683 | pVCpu->hm.s.svm.HCPhysVmcbHost = 0;
|
---|
684 | pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
|
---|
685 | }
|
---|
686 |
|
---|
687 | if (pVCpu->hm.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ)
|
---|
688 | {
|
---|
689 | RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false);
|
---|
690 | pVCpu->hm.s.svm.pVmcb = NULL;
|
---|
691 | pVCpu->hm.s.svm.HCPhysVmcb = 0;
|
---|
692 | pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
|
---|
693 | }
|
---|
694 |
|
---|
695 | if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
|
---|
696 | {
|
---|
697 | RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
|
---|
698 | pVCpu->hm.s.svm.pvMsrBitmap = NULL;
|
---|
699 | pVCpu->hm.s.svm.HCPhysMsrBitmap = 0;
|
---|
700 | pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
|
---|
701 | }
|
---|
702 | }
|
---|
703 | }
|
---|
704 |
|
---|
705 |
|
---|
706 | /**
|
---|
707 | * Does per-VM AMD-V initialization.
|
---|
708 | *
|
---|
709 | * @returns VBox status code.
|
---|
710 | * @param pVM The cross context VM structure.
|
---|
711 | */
|
---|
712 | VMMR0DECL(int) SVMR0InitVM(PVM pVM)
|
---|
713 | {
|
---|
714 | int rc = VERR_INTERNAL_ERROR_5;
|
---|
715 |
|
---|
716 | /*
|
---|
717 | * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
|
---|
718 | */
|
---|
719 | uint32_t u32Family;
|
---|
720 | uint32_t u32Model;
|
---|
721 | uint32_t u32Stepping;
|
---|
722 | if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
|
---|
723 | {
|
---|
724 | Log4Func(("AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
|
---|
725 | pVM->hm.s.svm.fAlwaysFlushTLB = true;
|
---|
726 | }
|
---|
727 |
|
---|
728 | /*
|
---|
729 | * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
|
---|
730 | */
|
---|
731 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
732 | {
|
---|
733 | PVMCPU pVCpu = &pVM->aCpus[i];
|
---|
734 | pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
|
---|
735 | pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
|
---|
736 | pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
|
---|
737 | }
|
---|
738 |
|
---|
739 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
740 | {
|
---|
741 | PVMCPU pVCpu = &pVM->aCpus[i];
|
---|
742 |
|
---|
743 | /*
|
---|
744 | * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
|
---|
745 | * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
|
---|
746 | */
|
---|
747 | rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, SVM_VMCB_PAGES << PAGE_SHIFT, false /* fExecutable */);
|
---|
748 | if (RT_FAILURE(rc))
|
---|
749 | goto failure_cleanup;
|
---|
750 |
|
---|
751 | void *pvVmcbHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost);
|
---|
752 | pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 /* iPage */);
|
---|
753 | Assert(pVCpu->hm.s.svm.HCPhysVmcbHost < _4G);
|
---|
754 | ASMMemZeroPage(pvVmcbHost);
|
---|
755 |
|
---|
756 | /*
|
---|
757 | * Allocate one page for the guest-state VMCB.
|
---|
758 | */
|
---|
759 | rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, SVM_VMCB_PAGES << PAGE_SHIFT, false /* fExecutable */);
|
---|
760 | if (RT_FAILURE(rc))
|
---|
761 | goto failure_cleanup;
|
---|
762 |
|
---|
763 | pVCpu->hm.s.svm.pVmcb = (PSVMVMCB)RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);
|
---|
764 | pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */);
|
---|
765 | Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G);
|
---|
766 | ASMMemZeroPage(pVCpu->hm.s.svm.pVmcb);
|
---|
767 |
|
---|
768 | /*
|
---|
769 | * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
|
---|
770 | * SVM to not require one.
|
---|
771 | */
|
---|
772 | rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT,
|
---|
773 | false /* fExecutable */);
|
---|
774 | if (RT_FAILURE(rc))
|
---|
775 | goto failure_cleanup;
|
---|
776 |
|
---|
777 | pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
|
---|
778 | pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
|
---|
779 | /* Set all bits to intercept all MSR accesses (changed later on). */
|
---|
780 | ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
|
---|
781 | }
|
---|
782 |
|
---|
783 | return VINF_SUCCESS;
|
---|
784 |
|
---|
785 | failure_cleanup:
|
---|
786 | hmR0SvmFreeStructs(pVM);
|
---|
787 | return rc;
|
---|
788 | }
|
---|
789 |
|
---|
790 |
|
---|
791 | /**
|
---|
792 | * Does per-VM AMD-V termination.
|
---|
793 | *
|
---|
794 | * @returns VBox status code.
|
---|
795 | * @param pVM The cross context VM structure.
|
---|
796 | */
|
---|
797 | VMMR0DECL(int) SVMR0TermVM(PVM pVM)
|
---|
798 | {
|
---|
799 | hmR0SvmFreeStructs(pVM);
|
---|
800 | return VINF_SUCCESS;
|
---|
801 | }
|
---|
802 |
|
---|
803 |
|
---|
804 | /**
|
---|
805 | * Returns whether the VMCB Clean Bits feature is supported.
|
---|
806 | *
|
---|
807 | * @return @c true if supported, @c false otherwise.
|
---|
808 | * @param pVCpu The cross context virtual CPU structure.
|
---|
809 | */
|
---|
810 | DECLINLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPU pVCpu)
|
---|
811 | {
|
---|
812 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
813 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
814 | if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
|
---|
815 | {
|
---|
816 | return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN)
|
---|
817 | && pVM->cpum.ro.GuestFeatures.fSvmVmcbClean;
|
---|
818 | }
|
---|
819 | #endif
|
---|
820 | return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN);
|
---|
821 | }
|
---|
822 |
|
---|
823 |
|
---|
824 | /**
|
---|
825 | * Returns whether the decode assists feature is supported.
|
---|
826 | *
|
---|
827 | * @return @c true if supported, @c false otherwise.
|
---|
828 | * @param pVCpu The cross context virtual CPU structure.
|
---|
829 | */
|
---|
830 | DECLINLINE(bool) hmR0SvmSupportsDecodeAssists(PVMCPU pVCpu)
|
---|
831 | {
|
---|
832 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
833 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
834 | if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
|
---|
835 | {
|
---|
836 | return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS)
|
---|
837 | && pVM->cpum.ro.GuestFeatures.fSvmDecodeAssists;
|
---|
838 | }
|
---|
839 | #endif
|
---|
840 | return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
|
---|
841 | }
|
---|
842 |
|
---|
843 |
|
---|
844 | /**
|
---|
845 | * Returns whether the NRIP_SAVE feature is supported.
|
---|
846 | *
|
---|
847 | * @return @c true if supported, @c false otherwise.
|
---|
848 | * @param pVCpu The cross context virtual CPU structure.
|
---|
849 | */
|
---|
850 | DECLINLINE(bool) hmR0SvmSupportsNextRipSave(PVMCPU pVCpu)
|
---|
851 | {
|
---|
852 | #if 0
|
---|
853 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
854 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
855 | if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
|
---|
856 | {
|
---|
857 | return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
|
---|
858 | && pVM->cpum.ro.GuestFeatures.fSvmNextRipSave;
|
---|
859 | }
|
---|
860 | #endif
|
---|
861 | return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
|
---|
862 | #endif
|
---|
863 |
|
---|
864 | /** @todo Temporarily disabled NRIP_SAVE for testing. re-enable once its working. */
|
---|
865 | NOREF(pVCpu);
|
---|
866 | return false;
|
---|
867 | }
|
---|
868 |
|
---|
869 |
|
---|
870 | /**
|
---|
871 | * Sets the permission bits for the specified MSR in the MSRPM bitmap.
|
---|
872 | *
|
---|
873 | * @param pVCpu The cross context virtual CPU structure.
|
---|
874 | * @param pbMsrBitmap Pointer to the MSR bitmap.
|
---|
875 | * @param idMsr The MSR for which the permissions are being set.
|
---|
876 | * @param enmRead MSR read permissions.
|
---|
877 | * @param enmWrite MSR write permissions.
|
---|
878 | *
|
---|
879 | * @remarks This function does -not- clear the VMCB clean bits for MSRPM. The
|
---|
880 | * caller needs to take care of this.
|
---|
881 | */
|
---|
882 | static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead,
|
---|
883 | SVMMSREXITWRITE enmWrite)
|
---|
884 | {
|
---|
885 | bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
|
---|
886 | uint16_t offMsrpm;
|
---|
887 | uint8_t uMsrpmBit;
|
---|
888 | int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
|
---|
889 | AssertRC(rc);
|
---|
890 |
|
---|
891 | Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
|
---|
892 | Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
|
---|
893 |
|
---|
894 | pbMsrBitmap += offMsrpm;
|
---|
895 | if (enmRead == SVMMSREXIT_INTERCEPT_READ)
|
---|
896 | *pbMsrBitmap |= RT_BIT(uMsrpmBit);
|
---|
897 | else
|
---|
898 | {
|
---|
899 | if (!fInNestedGuestMode)
|
---|
900 | *pbMsrBitmap &= ~RT_BIT(uMsrpmBit);
|
---|
901 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
902 | else
|
---|
903 | {
|
---|
904 | /* Only clear the bit if the nested-guest is also not intercepting the MSR read.*/
|
---|
905 | uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap);
|
---|
906 | pbNstGstMsrBitmap += offMsrpm;
|
---|
907 | if (!(*pbNstGstMsrBitmap & RT_BIT(uMsrpmBit)))
|
---|
908 | *pbMsrBitmap &= ~RT_BIT(uMsrpmBit);
|
---|
909 | else
|
---|
910 | Assert(*pbMsrBitmap & RT_BIT(uMsrpmBit));
|
---|
911 | }
|
---|
912 | #endif
|
---|
913 | }
|
---|
914 |
|
---|
915 | if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
|
---|
916 | *pbMsrBitmap |= RT_BIT(uMsrpmBit + 1);
|
---|
917 | else
|
---|
918 | {
|
---|
919 | if (!fInNestedGuestMode)
|
---|
920 | *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1);
|
---|
921 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
922 | else
|
---|
923 | {
|
---|
924 | /* Only clear the bit if the nested-guest is also not intercepting the MSR write.*/
|
---|
925 | uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap);
|
---|
926 | pbNstGstMsrBitmap += offMsrpm;
|
---|
927 | if (!(*pbNstGstMsrBitmap & RT_BIT(uMsrpmBit + 1)))
|
---|
928 | *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1);
|
---|
929 | else
|
---|
930 | Assert(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1));
|
---|
931 | }
|
---|
932 | #endif
|
---|
933 | }
|
---|
934 | }
|
---|
935 |
|
---|
936 |
|
---|
937 | /**
|
---|
938 | * Sets up AMD-V for the specified VM.
|
---|
939 | * This function is only called once per-VM during initalization.
|
---|
940 | *
|
---|
941 | * @returns VBox status code.
|
---|
942 | * @param pVM The cross context VM structure.
|
---|
943 | */
|
---|
944 | VMMR0DECL(int) SVMR0SetupVM(PVM pVM)
|
---|
945 | {
|
---|
946 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
947 | AssertReturn(pVM, VERR_INVALID_PARAMETER);
|
---|
948 | Assert(pVM->hm.s.svm.fSupported);
|
---|
949 |
|
---|
950 | bool const fPauseFilter = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
|
---|
951 | bool const fPauseFilterThreshold = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
|
---|
952 | bool const fUsePauseFilter = fPauseFilter && pVM->hm.s.svm.cPauseFilter;
|
---|
953 |
|
---|
954 | bool const fLbrVirt = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT);
|
---|
955 | bool const fUseLbrVirt = fLbrVirt; /** @todo CFGM, IEM implementation etc. */
|
---|
956 |
|
---|
957 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
958 | bool const fVirtVmsaveVmload = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD);
|
---|
959 | bool const fUseVirtVmsaveVmload = fVirtVmsaveVmload && pVM->hm.s.svm.fVirtVmsaveVmload && pVM->hm.s.fNestedPaging;
|
---|
960 |
|
---|
961 | bool const fVGif = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
|
---|
962 | bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
|
---|
963 | #endif
|
---|
964 |
|
---|
965 | PVMCPU pVCpu = &pVM->aCpus[0];
|
---|
966 | PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
|
---|
967 | AssertMsgReturn(pVmcb, ("Invalid pVmcb for vcpu[0]\n"), VERR_SVM_INVALID_PVMCB);
|
---|
968 | PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
|
---|
969 |
|
---|
970 | /* Always trap #AC for reasons of security. */
|
---|
971 | pVmcbCtrl->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_AC);
|
---|
972 |
|
---|
973 | /* Always trap #DB for reasons of security. */
|
---|
974 | pVmcbCtrl->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_DB);
|
---|
975 |
|
---|
976 | /* Trap exceptions unconditionally (debug purposes). */
|
---|
977 | #ifdef HMSVM_ALWAYS_TRAP_PF
|
---|
978 | pVmcbCtrl->u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
|
---|
979 | #endif
|
---|
980 | #ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
|
---|
981 | /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
|
---|
982 | pVmcbCtrl->u32InterceptXcpt |= 0
|
---|
983 | | RT_BIT(X86_XCPT_BP)
|
---|
984 | | RT_BIT(X86_XCPT_DE)
|
---|
985 | | RT_BIT(X86_XCPT_NM)
|
---|
986 | | RT_BIT(X86_XCPT_UD)
|
---|
987 | | RT_BIT(X86_XCPT_NP)
|
---|
988 | | RT_BIT(X86_XCPT_SS)
|
---|
989 | | RT_BIT(X86_XCPT_GP)
|
---|
990 | | RT_BIT(X86_XCPT_PF)
|
---|
991 | | RT_BIT(X86_XCPT_MF)
|
---|
992 | ;
|
---|
993 | #endif
|
---|
994 |
|
---|
995 | /* Apply the exceptions intercepts needed by the GIM provider. */
|
---|
996 | if (pVCpu->hm.s.fGIMTrapXcptUD)
|
---|
997 | pVmcbCtrl->u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
|
---|
998 |
|
---|
999 | /* Set up unconditional intercepts and conditions. */
|
---|
1000 | pVmcbCtrl->u64InterceptCtrl = HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS
|
---|
1001 | | SVM_CTRL_INTERCEPT_VMMCALL;
|
---|
1002 |
|
---|
1003 | #ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
|
---|
1004 | pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH;
|
---|
1005 | #endif
|
---|
1006 |
|
---|
1007 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
1008 | /* Virtualized VMSAVE/VMLOAD. */
|
---|
1009 | pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload = fUseVirtVmsaveVmload;
|
---|
1010 | if (!fUseVirtVmsaveVmload)
|
---|
1011 | {
|
---|
1012 | pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
|
---|
1013 | | SVM_CTRL_INTERCEPT_VMLOAD;
|
---|
1014 | }
|
---|
1015 |
|
---|
1016 | /* Virtual GIF. */
|
---|
1017 | pVmcbCtrl->IntCtrl.n.u1VGifEnable = fUseVGif;
|
---|
1018 | if (!fUseVGif)
|
---|
1019 | {
|
---|
1020 | pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
|
---|
1021 | | SVM_CTRL_INTERCEPT_STGI;
|
---|
1022 | }
|
---|
1023 | #endif
|
---|
1024 |
|
---|
1025 | /* CR4 writes must always be intercepted for tracking PGM mode changes. */
|
---|
1026 | pVmcbCtrl->u16InterceptWrCRx = RT_BIT(4);
|
---|
1027 |
|
---|
1028 | /* Intercept all DRx reads and writes by default. Changed later on. */
|
---|
1029 | pVmcbCtrl->u16InterceptRdDRx = 0xffff;
|
---|
1030 | pVmcbCtrl->u16InterceptWrDRx = 0xffff;
|
---|
1031 |
|
---|
1032 | /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
|
---|
1033 | pVmcbCtrl->IntCtrl.n.u1VIntrMasking = 1;
|
---|
1034 |
|
---|
1035 | /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts
|
---|
1036 | and we currently deliver both PIC and APIC interrupts alike, see hmR0SvmEvaluatePendingEvent() */
|
---|
1037 | pVmcbCtrl->IntCtrl.n.u1IgnoreTPR = 1;
|
---|
1038 |
|
---|
1039 | /* Set the IO permission bitmap physical addresses. */
|
---|
1040 | pVmcbCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
|
---|
1041 |
|
---|
1042 | /* LBR virtualization. */
|
---|
1043 | pVmcbCtrl->LbrVirt.n.u1LbrVirt = fUseLbrVirt;
|
---|
1044 |
|
---|
1045 | /* The host ASID MBZ, for the guest start with 1. */
|
---|
1046 | pVmcbCtrl->TLBCtrl.n.u32ASID = 1;
|
---|
1047 |
|
---|
1048 | /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
|
---|
1049 | pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
|
---|
1050 |
|
---|
1051 | /* Without Nested Paging, we need additionally intercepts. */
|
---|
1052 | if (!pVM->hm.s.fNestedPaging)
|
---|
1053 | {
|
---|
1054 | /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
|
---|
1055 | pVmcbCtrl->u16InterceptRdCRx |= RT_BIT(3);
|
---|
1056 | pVmcbCtrl->u16InterceptWrCRx |= RT_BIT(3);
|
---|
1057 |
|
---|
1058 | /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
|
---|
1059 | pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG
|
---|
1060 | | SVM_CTRL_INTERCEPT_TASK_SWITCH;
|
---|
1061 |
|
---|
1062 | /* Page faults must be intercepted to implement shadow paging. */
|
---|
1063 | pVmcbCtrl->u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
|
---|
1064 | }
|
---|
1065 |
|
---|
1066 | /* Setup Pause Filter for guest pause-loop (spinlock) exiting. */
|
---|
1067 | if (fUsePauseFilter)
|
---|
1068 | {
|
---|
1069 | Assert(pVM->hm.s.svm.cPauseFilter > 0);
|
---|
1070 | pVmcbCtrl->u16PauseFilterCount = pVM->hm.s.svm.cPauseFilter;
|
---|
1071 | if (fPauseFilterThreshold)
|
---|
1072 | pVmcbCtrl->u16PauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
|
---|
1073 | pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_PAUSE;
|
---|
1074 | }
|
---|
1075 |
|
---|
1076 | /*
|
---|
1077 | * Setup the MSR permission bitmap.
|
---|
1078 | * The following MSRs are saved/restored automatically during the world-switch.
|
---|
1079 | * Don't intercept guest read/write accesses to these MSRs.
|
---|
1080 | */
|
---|
1081 | uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
|
---|
1082 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
1083 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
1084 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
1085 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
1086 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
1087 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
1088 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
1089 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
1090 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
1091 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
1092 | pVmcbCtrl->u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
|
---|
1093 |
|
---|
1094 | /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
|
---|
1095 | Assert(pVmcbCtrl->u32VmcbCleanBits == 0);
|
---|
1096 |
|
---|
1097 | for (VMCPUID i = 1; i < pVM->cCpus; i++)
|
---|
1098 | {
|
---|
1099 | PVMCPU pVCpuCur = &pVM->aCpus[i];
|
---|
1100 | PSVMVMCB pVmcbCur = pVM->aCpus[i].hm.s.svm.pVmcb;
|
---|
1101 | AssertMsgReturn(pVmcbCur, ("Invalid pVmcb for vcpu[%u]\n", i), VERR_SVM_INVALID_PVMCB);
|
---|
1102 | PSVMVMCBCTRL pVmcbCtrlCur = &pVmcbCur->ctrl;
|
---|
1103 |
|
---|
1104 | /* Copy the VMCB control area. */
|
---|
1105 | memcpy(pVmcbCtrlCur, pVmcbCtrl, sizeof(*pVmcbCtrlCur));
|
---|
1106 |
|
---|
1107 | /* Copy the MSR bitmap and setup the VCPU-specific host physical address. */
|
---|
1108 | uint8_t *pbMsrBitmapCur = (uint8_t *)pVCpuCur->hm.s.svm.pvMsrBitmap;
|
---|
1109 | memcpy(pbMsrBitmapCur, pbMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
|
---|
1110 | pVmcbCtrlCur->u64MSRPMPhysAddr = pVCpuCur->hm.s.svm.HCPhysMsrBitmap;
|
---|
1111 |
|
---|
1112 | /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
|
---|
1113 | Assert(pVmcbCtrlCur->u32VmcbCleanBits == 0);
|
---|
1114 |
|
---|
1115 | /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs initially. */
|
---|
1116 | Assert(pVCpuCur->hm.s.fGIMTrapXcptUD == pVCpu->hm.s.fGIMTrapXcptUD);
|
---|
1117 | }
|
---|
1118 |
|
---|
1119 | return VINF_SUCCESS;
|
---|
1120 | }
|
---|
1121 |
|
---|
1122 |
|
---|
1123 | /**
|
---|
1124 | * Gets a pointer to the currently active guest (or nested-guest) VMCB.
|
---|
1125 | *
|
---|
1126 | * @returns Pointer to the current context VMCB.
|
---|
1127 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1128 | */
|
---|
1129 | DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPU pVCpu)
|
---|
1130 | {
|
---|
1131 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
1132 | if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
|
---|
1133 | return pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
|
---|
1134 | #endif
|
---|
1135 | return pVCpu->hm.s.svm.pVmcb;
|
---|
1136 | }
|
---|
1137 |
|
---|
1138 |
|
---|
1139 | /**
|
---|
1140 | * Gets a pointer to the nested-guest VMCB cache.
|
---|
1141 | *
|
---|
1142 | * @returns Pointer to the nested-guest VMCB cache.
|
---|
1143 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1144 | */
|
---|
1145 | DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPU pVCpu)
|
---|
1146 | {
|
---|
1147 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
1148 | Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
|
---|
1149 | return &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
1150 | #else
|
---|
1151 | RT_NOREF(pVCpu);
|
---|
1152 | return NULL;
|
---|
1153 | #endif
|
---|
1154 | }
|
---|
1155 |
|
---|
1156 |
|
---|
1157 | /**
|
---|
1158 | * Invalidates a guest page by guest virtual address.
|
---|
1159 | *
|
---|
1160 | * @returns VBox status code.
|
---|
1161 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1162 | * @param GCVirt Guest virtual address of the page to invalidate.
|
---|
1163 | */
|
---|
1164 | VMMR0DECL(int) SVMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
|
---|
1165 | {
|
---|
1166 | Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
|
---|
1167 |
|
---|
1168 | bool const fFlushPending = pVCpu->CTX_SUFF(pVM)->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
|
---|
1169 |
|
---|
1170 | /* Skip it if a TLB flush is already pending. */
|
---|
1171 | if (!fFlushPending)
|
---|
1172 | {
|
---|
1173 | Log4Func(("%#RGv\n", GCVirt));
|
---|
1174 |
|
---|
1175 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
1176 | AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
|
---|
1177 |
|
---|
1178 | #if HC_ARCH_BITS == 32
|
---|
1179 | /* If we get a flush in 64-bit guest mode, then force a full TLB flush. INVLPGA takes only 32-bit addresses. */
|
---|
1180 | if (CPUMIsGuestInLongMode(pVCpu))
|
---|
1181 | VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
|
---|
1182 | else
|
---|
1183 | #endif
|
---|
1184 | {
|
---|
1185 | SVMR0InvlpgA(GCVirt, pVmcb->ctrl.TLBCtrl.n.u32ASID);
|
---|
1186 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
|
---|
1187 | }
|
---|
1188 | }
|
---|
1189 | return VINF_SUCCESS;
|
---|
1190 | }
|
---|
1191 |
|
---|
1192 |
|
---|
1193 | /**
|
---|
1194 | * Flushes the appropriate tagged-TLB entries.
|
---|
1195 | *
|
---|
1196 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1197 | * @param pVmcb Pointer to the VM control block.
|
---|
1198 | * @param pHostCpu Pointer to the HM host-CPU info.
|
---|
1199 | */
|
---|
1200 | static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PSVMVMCB pVmcb, PHMGLOBALCPUINFO pHostCpu)
|
---|
1201 | {
|
---|
1202 | /*
|
---|
1203 | * Force a TLB flush for the first world switch if the current CPU differs from the one
|
---|
1204 | * we ran on last. This can happen both for start & resume due to long jumps back to
|
---|
1205 | * ring-3.
|
---|
1206 | *
|
---|
1207 | * We also force a TLB flush every time when executing a nested-guest VCPU as there is no
|
---|
1208 | * correlation between it and the physical CPU.
|
---|
1209 | *
|
---|
1210 | * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while
|
---|
1211 | * flushing the TLB, so we cannot reuse the ASIDs without flushing.
|
---|
1212 | */
|
---|
1213 | bool fNewAsid = false;
|
---|
1214 | Assert(pHostCpu->idCpu != NIL_RTCPUID);
|
---|
1215 | if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu
|
---|
1216 | || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes
|
---|
1217 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
1218 | || CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)
|
---|
1219 | #endif
|
---|
1220 | )
|
---|
1221 | {
|
---|
1222 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
|
---|
1223 | pVCpu->hm.s.fForceTLBFlush = true;
|
---|
1224 | fNewAsid = true;
|
---|
1225 | }
|
---|
1226 |
|
---|
1227 | /* Set TLB flush state as checked until we return from the world switch. */
|
---|
1228 | ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
|
---|
1229 |
|
---|
1230 | /* Check for explicit TLB flushes. */
|
---|
1231 | if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
|
---|
1232 | {
|
---|
1233 | pVCpu->hm.s.fForceTLBFlush = true;
|
---|
1234 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
|
---|
1235 | }
|
---|
1236 |
|
---|
1237 | /*
|
---|
1238 | * If the AMD CPU erratum 170, We need to flush the entire TLB for each world switch. Sad.
|
---|
1239 | * This Host CPU requirement takes precedence.
|
---|
1240 | */
|
---|
1241 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
1242 | if (pVM->hm.s.svm.fAlwaysFlushTLB)
|
---|
1243 | {
|
---|
1244 | pHostCpu->uCurrentAsid = 1;
|
---|
1245 | pVCpu->hm.s.uCurrentAsid = 1;
|
---|
1246 | pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
|
---|
1247 | pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
|
---|
1248 | pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
|
---|
1249 |
|
---|
1250 | /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
|
---|
1251 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
|
---|
1252 | }
|
---|
1253 | else
|
---|
1254 | {
|
---|
1255 | pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
|
---|
1256 | if (pVCpu->hm.s.fForceTLBFlush)
|
---|
1257 | {
|
---|
1258 | /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
|
---|
1259 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
|
---|
1260 |
|
---|
1261 | if (fNewAsid)
|
---|
1262 | {
|
---|
1263 | ++pHostCpu->uCurrentAsid;
|
---|
1264 |
|
---|
1265 | bool fHitASIDLimit = false;
|
---|
1266 | if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
|
---|
1267 | {
|
---|
1268 | pHostCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */
|
---|
1269 | pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new ASID. */
|
---|
1270 | fHitASIDLimit = true;
|
---|
1271 | }
|
---|
1272 |
|
---|
1273 | if ( fHitASIDLimit
|
---|
1274 | || pHostCpu->fFlushAsidBeforeUse)
|
---|
1275 | {
|
---|
1276 | pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
|
---|
1277 | pHostCpu->fFlushAsidBeforeUse = false;
|
---|
1278 | }
|
---|
1279 |
|
---|
1280 | pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid;
|
---|
1281 | pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
|
---|
1282 | pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
|
---|
1283 | }
|
---|
1284 | else
|
---|
1285 | {
|
---|
1286 | if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
|
---|
1287 | pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
|
---|
1288 | else
|
---|
1289 | pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
|
---|
1290 | }
|
---|
1291 |
|
---|
1292 | pVCpu->hm.s.fForceTLBFlush = false;
|
---|
1293 | }
|
---|
1294 | }
|
---|
1295 |
|
---|
1296 | /* Update VMCB with the ASID. */
|
---|
1297 | if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hm.s.uCurrentAsid)
|
---|
1298 | {
|
---|
1299 | pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
|
---|
1300 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
|
---|
1301 | }
|
---|
1302 |
|
---|
1303 | AssertMsg(pVCpu->hm.s.idLastCpu == pHostCpu->idCpu,
|
---|
1304 | ("vcpu idLastCpu=%u hostcpu idCpu=%u\n", pVCpu->hm.s.idLastCpu, pHostCpu->idCpu));
|
---|
1305 | AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
|
---|
1306 | ("Flush count mismatch for cpu %u (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
|
---|
1307 | AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
|
---|
1308 | ("cpu%d uCurrentAsid = %x\n", pHostCpu->idCpu, pHostCpu->uCurrentAsid));
|
---|
1309 | AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
|
---|
1310 | ("cpu%d VM uCurrentAsid = %x\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
|
---|
1311 |
|
---|
1312 | #ifdef VBOX_WITH_STATISTICS
|
---|
1313 | if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
|
---|
1314 | STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
|
---|
1315 | else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
|
---|
1316 | || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
|
---|
1317 | {
|
---|
1318 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
|
---|
1319 | }
|
---|
1320 | else
|
---|
1321 | {
|
---|
1322 | Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE);
|
---|
1323 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushEntire);
|
---|
1324 | }
|
---|
1325 | #endif
|
---|
1326 | }
|
---|
1327 |
|
---|
1328 |
|
---|
1329 | /** @name 64-bit guest on 32-bit host OS helper functions.
|
---|
1330 | *
|
---|
1331 | * The host CPU is still 64-bit capable but the host OS is running in 32-bit
|
---|
1332 | * mode (code segment, paging). These wrappers/helpers perform the necessary
|
---|
1333 | * bits for the 32->64 switcher.
|
---|
1334 | *
|
---|
1335 | * @{ */
|
---|
1336 | #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
|
---|
1337 | /**
|
---|
1338 | * Prepares for and executes VMRUN (64-bit guests on a 32-bit host).
|
---|
1339 | *
|
---|
1340 | * @returns VBox status code.
|
---|
1341 | * @param HCPhysVmcbHost Physical address of host VMCB.
|
---|
1342 | * @param HCPhysVmcb Physical address of the VMCB.
|
---|
1343 | * @param pCtx Pointer to the guest-CPU context.
|
---|
1344 | * @param pVM The cross context VM structure.
|
---|
1345 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1346 | */
|
---|
1347 | DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
|
---|
1348 | {
|
---|
1349 | RT_NOREF2(pVM, pCtx);
|
---|
1350 | uint32_t aParam[8];
|
---|
1351 | aParam[0] = RT_LO_U32(HCPhysVmcbHost); /* Param 1: HCPhysVmcbHost - Lo. */
|
---|
1352 | aParam[1] = RT_HI_U32(HCPhysVmcbHost); /* Param 1: HCPhysVmcbHost - Hi. */
|
---|
1353 | aParam[2] = RT_LO_U32(HCPhysVmcb); /* Param 2: HCPhysVmcb - Lo. */
|
---|
1354 | aParam[3] = RT_HI_U32(HCPhysVmcb); /* Param 2: HCPhysVmcb - Hi. */
|
---|
1355 | aParam[4] = VM_RC_ADDR(pVM, pVM);
|
---|
1356 | aParam[5] = 0;
|
---|
1357 | aParam[6] = VM_RC_ADDR(pVM, pVCpu);
|
---|
1358 | aParam[7] = 0;
|
---|
1359 |
|
---|
1360 | return SVMR0Execute64BitsHandler(pVCpu, HM64ON32OP_SVMRCVMRun64, RT_ELEMENTS(aParam), &aParam[0]);
|
---|
1361 | }
|
---|
1362 |
|
---|
1363 |
|
---|
1364 | /**
|
---|
1365 | * Executes the specified VMRUN handler in 64-bit mode.
|
---|
1366 | *
|
---|
1367 | * @returns VBox status code.
|
---|
1368 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1369 | * @param enmOp The operation to perform.
|
---|
1370 | * @param cParams Number of parameters.
|
---|
1371 | * @param paParam Array of 32-bit parameters.
|
---|
1372 | */
|
---|
1373 | VMMR0DECL(int) SVMR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)
|
---|
1374 | {
|
---|
1375 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
1376 | AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
|
---|
1377 | Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
|
---|
1378 |
|
---|
1379 | /* Disable interrupts. */
|
---|
1380 | RTHCUINTREG const fEFlags = ASMIntDisableFlags();
|
---|
1381 |
|
---|
1382 | #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
|
---|
1383 | RTCPUID idHostCpu = RTMpCpuId();
|
---|
1384 | CPUMR0SetLApic(pVCpu, idHostCpu);
|
---|
1385 | #endif
|
---|
1386 |
|
---|
1387 | CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
|
---|
1388 | CPUMSetHyperEIP(pVCpu, enmOp);
|
---|
1389 | for (int i = (int)cParams - 1; i >= 0; i--)
|
---|
1390 | CPUMPushHyper(pVCpu, paParam[i]);
|
---|
1391 |
|
---|
1392 | /* Call the switcher. */
|
---|
1393 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
|
---|
1394 | int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
|
---|
1395 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
|
---|
1396 |
|
---|
1397 | /* Restore interrupts. */
|
---|
1398 | ASMSetFlags(fEFlags);
|
---|
1399 | return rc;
|
---|
1400 | }
|
---|
1401 |
|
---|
1402 | #endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
|
---|
1403 | /** @} */
|
---|
1404 |
|
---|
1405 |
|
---|
1406 | /**
|
---|
1407 | * Sets an exception intercept in the specified VMCB.
|
---|
1408 | *
|
---|
1409 | * @param pVmcb Pointer to the VM control block.
|
---|
1410 | * @param uXcpt The exception (X86_XCPT_*).
|
---|
1411 | */
|
---|
1412 | DECLINLINE(void) hmR0SvmSetXcptIntercept(PSVMVMCB pVmcb, uint8_t uXcpt)
|
---|
1413 | {
|
---|
1414 | if (!(pVmcb->ctrl.u32InterceptXcpt & RT_BIT(uXcpt)))
|
---|
1415 | {
|
---|
1416 | pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(uXcpt);
|
---|
1417 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
1418 | }
|
---|
1419 | }
|
---|
1420 |
|
---|
1421 |
|
---|
1422 | /**
|
---|
1423 | * Clears an exception intercept in the specified VMCB.
|
---|
1424 | *
|
---|
1425 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1426 | * @param pVmcb Pointer to the VM control block.
|
---|
1427 | * @param uXcpt The exception (X86_XCPT_*).
|
---|
1428 | *
|
---|
1429 | * @remarks This takes into account if we're executing a nested-guest and only
|
---|
1430 | * removes the exception intercept if both the guest -and- nested-guest
|
---|
1431 | * are not intercepting it.
|
---|
1432 | */
|
---|
1433 | DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPU pVCpu, PSVMVMCB pVmcb, uint8_t uXcpt)
|
---|
1434 | {
|
---|
1435 | Assert(uXcpt != X86_XCPT_DB);
|
---|
1436 | Assert(uXcpt != X86_XCPT_AC);
|
---|
1437 | #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
|
---|
1438 | if (pVmcb->ctrl.u32InterceptXcpt & RT_BIT(uXcpt))
|
---|
1439 | {
|
---|
1440 | bool fRemove = true;
|
---|
1441 | # ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
1442 | /* Only remove the intercept if the nested-guest is also not intercepting it! */
|
---|
1443 | PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
1444 | if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
1445 | {
|
---|
1446 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
|
---|
1447 | fRemove = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(uXcpt));
|
---|
1448 | }
|
---|
1449 | # else
|
---|
1450 | RT_NOREF(pVCpu);
|
---|
1451 | # endif
|
---|
1452 | if (fRemove)
|
---|
1453 | {
|
---|
1454 | pVmcb->ctrl.u32InterceptXcpt &= ~RT_BIT(uXcpt);
|
---|
1455 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
1456 | }
|
---|
1457 | }
|
---|
1458 | #else
|
---|
1459 | RT_NOREF3(pVCpu, pVmcb, uXcpt);
|
---|
1460 | #endif
|
---|
1461 | }
|
---|
1462 |
|
---|
1463 |
|
---|
1464 | /**
|
---|
1465 | * Sets a control intercept in the specified VMCB.
|
---|
1466 | *
|
---|
1467 | * @param pVmcb Pointer to the VM control block.
|
---|
1468 | * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*).
|
---|
1469 | */
|
---|
1470 | DECLINLINE(void) hmR0SvmSetCtrlIntercept(PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
|
---|
1471 | {
|
---|
1472 | if (!(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept))
|
---|
1473 | {
|
---|
1474 | pVmcb->ctrl.u64InterceptCtrl |= fCtrlIntercept;
|
---|
1475 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
1476 | }
|
---|
1477 | }
|
---|
1478 |
|
---|
1479 |
|
---|
1480 | /**
|
---|
1481 | * Clears a control intercept in the specified VMCB.
|
---|
1482 | *
|
---|
1483 | * @returns @c true if the intercept is still set, @c false otherwise.
|
---|
1484 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1485 | * @param pVmcb Pointer to the VM control block.
|
---|
1486 | * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*).
|
---|
1487 | *
|
---|
1488 | * @remarks This takes into account if we're executing a nested-guest and only
|
---|
1489 | * removes the control intercept if both the guest -and- nested-guest
|
---|
1490 | * are not intercepting it.
|
---|
1491 | */
|
---|
1492 | static bool hmR0SvmClearCtrlIntercept(PVMCPU pVCpu, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
|
---|
1493 | {
|
---|
1494 | if (pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept)
|
---|
1495 | {
|
---|
1496 | bool fRemove = true;
|
---|
1497 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
1498 | /* Only remove the control intercept if the nested-guest is also not intercepting it! */
|
---|
1499 | if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
|
---|
1500 | {
|
---|
1501 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
|
---|
1502 | fRemove = !(pVmcbNstGstCache->u64InterceptCtrl & fCtrlIntercept);
|
---|
1503 | }
|
---|
1504 | #else
|
---|
1505 | RT_NOREF(pVCpu);
|
---|
1506 | #endif
|
---|
1507 | if (fRemove)
|
---|
1508 | {
|
---|
1509 | pVmcb->ctrl.u64InterceptCtrl &= ~fCtrlIntercept;
|
---|
1510 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
1511 | }
|
---|
1512 | }
|
---|
1513 |
|
---|
1514 | return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept);
|
---|
1515 | }
|
---|
1516 |
|
---|
1517 |
|
---|
1518 | /**
|
---|
1519 | * Exports the guest (or nested-guest) CR0 into the VMCB.
|
---|
1520 | *
|
---|
1521 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1522 | * @param pVmcb Pointer to the VM control block.
|
---|
1523 | *
|
---|
1524 | * @remarks This assumes we always pre-load the guest FPU.
|
---|
1525 | * @remarks No-long-jump zone!!!
|
---|
1526 | */
|
---|
1527 | static void hmR0SvmExportGuestCR0(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
1528 | {
|
---|
1529 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
1530 |
|
---|
1531 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
1532 | uint64_t const uGuestCr0 = pCtx->cr0;
|
---|
1533 | uint64_t uShadowCr0 = uGuestCr0;
|
---|
1534 |
|
---|
1535 | /* Always enable caching. */
|
---|
1536 | uShadowCr0 &= ~(X86_CR0_CD | X86_CR0_NW);
|
---|
1537 |
|
---|
1538 | /* When Nested Paging is not available use shadow page tables and intercept #PFs (latter done in SVMR0SetupVM()). */
|
---|
1539 | if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
|
---|
1540 | {
|
---|
1541 | uShadowCr0 |= X86_CR0_PG /* Use shadow page tables. */
|
---|
1542 | | X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
|
---|
1543 | }
|
---|
1544 |
|
---|
1545 | /*
|
---|
1546 | * Use the #MF style of legacy-FPU error reporting for now. Although AMD-V has MSRs that
|
---|
1547 | * lets us isolate the host from it, IEM/REM still needs work to emulate it properly,
|
---|
1548 | * see @bugref{7243#c103}.
|
---|
1549 | */
|
---|
1550 | if (!(uGuestCr0 & X86_CR0_NE))
|
---|
1551 | {
|
---|
1552 | uShadowCr0 |= X86_CR0_NE;
|
---|
1553 | hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_MF);
|
---|
1554 | }
|
---|
1555 | else
|
---|
1556 | hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_MF);
|
---|
1557 |
|
---|
1558 | /*
|
---|
1559 | * If the shadow and guest CR0 are identical we can avoid intercepting CR0 reads.
|
---|
1560 | *
|
---|
1561 | * CR0 writes still needs interception as PGM requires tracking paging mode changes,
|
---|
1562 | * see @bugref{6944}.
|
---|
1563 | *
|
---|
1564 | * We also don't ever want to honor weird things like cache disable from the guest.
|
---|
1565 | * However, we can avoid intercepting changes to the TS & MP bits by clearing the CR0
|
---|
1566 | * write intercept below and keeping SVM_CTRL_INTERCEPT_CR0_SEL_WRITE instead.
|
---|
1567 | */
|
---|
1568 | if (uShadowCr0 == uGuestCr0)
|
---|
1569 | {
|
---|
1570 | if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
1571 | {
|
---|
1572 | pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(0);
|
---|
1573 | pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(0);
|
---|
1574 | Assert(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_CR0_SEL_WRITE);
|
---|
1575 | }
|
---|
1576 | else
|
---|
1577 | {
|
---|
1578 | /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */
|
---|
1579 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
|
---|
1580 | pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(0))
|
---|
1581 | | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(0));
|
---|
1582 | pVmcb->ctrl.u16InterceptWrCRx = (pVmcb->ctrl.u16InterceptWrCRx & ~RT_BIT(0))
|
---|
1583 | | (pVmcbNstGstCache->u16InterceptWrCRx & RT_BIT(0));
|
---|
1584 | }
|
---|
1585 | }
|
---|
1586 | else
|
---|
1587 | {
|
---|
1588 | pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(0);
|
---|
1589 | pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(0);
|
---|
1590 | }
|
---|
1591 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
1592 |
|
---|
1593 | Assert(!RT_HI_U32(uShadowCr0));
|
---|
1594 | if (pVmcb->guest.u64CR0 != uShadowCr0)
|
---|
1595 | {
|
---|
1596 | pVmcb->guest.u64CR0 = uShadowCr0;
|
---|
1597 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
|
---|
1598 | }
|
---|
1599 | }
|
---|
1600 |
|
---|
1601 |
|
---|
1602 | /**
|
---|
1603 | * Exports the guest (or nested-guest) CR3 into the VMCB.
|
---|
1604 | *
|
---|
1605 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1606 | * @param pVmcb Pointer to the VM control block.
|
---|
1607 | *
|
---|
1608 | * @remarks No-long-jump zone!!!
|
---|
1609 | */
|
---|
1610 | static void hmR0SvmExportGuestCR3(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
1611 | {
|
---|
1612 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
1613 |
|
---|
1614 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
1615 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
1616 | if (pVM->hm.s.fNestedPaging)
|
---|
1617 | {
|
---|
1618 | PGMMODE enmShwPagingMode;
|
---|
1619 | #if HC_ARCH_BITS == 32
|
---|
1620 | if (CPUMIsGuestInLongModeEx(pCtx))
|
---|
1621 | enmShwPagingMode = PGMMODE_AMD64_NX;
|
---|
1622 | else
|
---|
1623 | #endif
|
---|
1624 | enmShwPagingMode = PGMGetHostMode(pVM);
|
---|
1625 |
|
---|
1626 | pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
|
---|
1627 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
|
---|
1628 | pVmcb->guest.u64CR3 = pCtx->cr3;
|
---|
1629 | Assert(pVmcb->ctrl.u64NestedPagingCR3);
|
---|
1630 | }
|
---|
1631 | else
|
---|
1632 | pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
|
---|
1633 |
|
---|
1634 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
|
---|
1635 | }
|
---|
1636 |
|
---|
1637 |
|
---|
1638 | /**
|
---|
1639 | * Exports the guest (or nested-guest) CR4 into the VMCB.
|
---|
1640 | *
|
---|
1641 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1642 | * @param pVmcb Pointer to the VM control block.
|
---|
1643 | *
|
---|
1644 | * @remarks No-long-jump zone!!!
|
---|
1645 | */
|
---|
1646 | static int hmR0SvmExportGuestCR4(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
1647 | {
|
---|
1648 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
1649 |
|
---|
1650 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
1651 | uint64_t uShadowCr4 = pCtx->cr4;
|
---|
1652 | if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
|
---|
1653 | {
|
---|
1654 | switch (pVCpu->hm.s.enmShadowMode)
|
---|
1655 | {
|
---|
1656 | case PGMMODE_REAL:
|
---|
1657 | case PGMMODE_PROTECTED: /* Protected mode, no paging. */
|
---|
1658 | return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
|
---|
1659 |
|
---|
1660 | case PGMMODE_32_BIT: /* 32-bit paging. */
|
---|
1661 | uShadowCr4 &= ~X86_CR4_PAE;
|
---|
1662 | break;
|
---|
1663 |
|
---|
1664 | case PGMMODE_PAE: /* PAE paging. */
|
---|
1665 | case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
|
---|
1666 | /** Must use PAE paging as we could use physical memory > 4 GB */
|
---|
1667 | uShadowCr4 |= X86_CR4_PAE;
|
---|
1668 | break;
|
---|
1669 |
|
---|
1670 | case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
|
---|
1671 | case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
|
---|
1672 | #ifdef VBOX_ENABLE_64_BITS_GUESTS
|
---|
1673 | break;
|
---|
1674 | #else
|
---|
1675 | return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
|
---|
1676 | #endif
|
---|
1677 |
|
---|
1678 | default: /* shut up gcc */
|
---|
1679 | return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
|
---|
1680 | }
|
---|
1681 | }
|
---|
1682 |
|
---|
1683 | /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
|
---|
1684 | pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
|
---|
1685 |
|
---|
1686 | /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */
|
---|
1687 | if (uShadowCr4 == pCtx->cr4)
|
---|
1688 | {
|
---|
1689 | if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
1690 | pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(4);
|
---|
1691 | else
|
---|
1692 | {
|
---|
1693 | /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */
|
---|
1694 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
|
---|
1695 | pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(4))
|
---|
1696 | | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(4));
|
---|
1697 | }
|
---|
1698 | }
|
---|
1699 | else
|
---|
1700 | pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(4);
|
---|
1701 |
|
---|
1702 | /* CR4 writes are always intercepted (both guest, nested-guest) for tracking PGM mode changes. */
|
---|
1703 | Assert(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(4));
|
---|
1704 |
|
---|
1705 | /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */
|
---|
1706 | Assert(!RT_HI_U32(uShadowCr4));
|
---|
1707 | pVmcb->guest.u64CR4 = uShadowCr4;
|
---|
1708 | pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS);
|
---|
1709 |
|
---|
1710 | return VINF_SUCCESS;
|
---|
1711 | }
|
---|
1712 |
|
---|
1713 |
|
---|
1714 | /**
|
---|
1715 | * Exports the guest (or nested-guest) control registers into the VMCB.
|
---|
1716 | *
|
---|
1717 | * @returns VBox status code.
|
---|
1718 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1719 | * @param pVmcb Pointer to the VM control block.
|
---|
1720 | *
|
---|
1721 | * @remarks No-long-jump zone!!!
|
---|
1722 | */
|
---|
1723 | static int hmR0SvmExportGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
1724 | {
|
---|
1725 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
1726 |
|
---|
1727 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR_MASK)
|
---|
1728 | {
|
---|
1729 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR0)
|
---|
1730 | hmR0SvmExportGuestCR0(pVCpu, pVmcb);
|
---|
1731 |
|
---|
1732 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR2)
|
---|
1733 | {
|
---|
1734 | pVmcb->guest.u64CR2 = pVCpu->cpum.GstCtx.cr2;
|
---|
1735 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
|
---|
1736 | }
|
---|
1737 |
|
---|
1738 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR3)
|
---|
1739 | hmR0SvmExportGuestCR3(pVCpu, pVmcb);
|
---|
1740 |
|
---|
1741 | /* CR4 re-loading is ASSUMED to be done everytime we get in from ring-3! (XCR0) */
|
---|
1742 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR4)
|
---|
1743 | {
|
---|
1744 | int rc = hmR0SvmExportGuestCR4(pVCpu, pVmcb);
|
---|
1745 | if (RT_FAILURE(rc))
|
---|
1746 | return rc;
|
---|
1747 | }
|
---|
1748 |
|
---|
1749 | pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_CR_MASK;
|
---|
1750 | }
|
---|
1751 | return VINF_SUCCESS;
|
---|
1752 | }
|
---|
1753 |
|
---|
1754 |
|
---|
1755 | /**
|
---|
1756 | * Exports the guest (or nested-guest) segment registers into the VMCB.
|
---|
1757 | *
|
---|
1758 | * @returns VBox status code.
|
---|
1759 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1760 | * @param pVmcb Pointer to the VM control block.
|
---|
1761 | *
|
---|
1762 | * @remarks No-long-jump zone!!!
|
---|
1763 | */
|
---|
1764 | static void hmR0SvmExportGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
1765 | {
|
---|
1766 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
1767 | PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
1768 |
|
---|
1769 | /* Guest segment registers. */
|
---|
1770 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SREG_MASK)
|
---|
1771 | {
|
---|
1772 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CS)
|
---|
1773 | HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, CS, cs);
|
---|
1774 |
|
---|
1775 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SS)
|
---|
1776 | {
|
---|
1777 | HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, SS, ss);
|
---|
1778 | pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
|
---|
1779 | }
|
---|
1780 |
|
---|
1781 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DS)
|
---|
1782 | HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, DS, ds);
|
---|
1783 |
|
---|
1784 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_ES)
|
---|
1785 | HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, ES, es);
|
---|
1786 |
|
---|
1787 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_FS)
|
---|
1788 | HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs);
|
---|
1789 |
|
---|
1790 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GS)
|
---|
1791 | HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs);
|
---|
1792 |
|
---|
1793 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
|
---|
1794 | }
|
---|
1795 |
|
---|
1796 | /* Guest TR. */
|
---|
1797 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_TR)
|
---|
1798 | HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr);
|
---|
1799 |
|
---|
1800 | /* Guest LDTR. */
|
---|
1801 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_LDTR)
|
---|
1802 | HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);
|
---|
1803 |
|
---|
1804 | /* Guest GDTR. */
|
---|
1805 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GDTR)
|
---|
1806 | {
|
---|
1807 | pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
|
---|
1808 | pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
|
---|
1809 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
|
---|
1810 | }
|
---|
1811 |
|
---|
1812 | /* Guest IDTR. */
|
---|
1813 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_IDTR)
|
---|
1814 | {
|
---|
1815 | pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
|
---|
1816 | pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
|
---|
1817 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
|
---|
1818 | }
|
---|
1819 |
|
---|
1820 | pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SREG_MASK
|
---|
1821 | | HM_CHANGED_GUEST_TABLE_MASK);
|
---|
1822 | }
|
---|
1823 |
|
---|
1824 |
|
---|
1825 | /**
|
---|
1826 | * Exports the guest (or nested-guest) MSRs into the VMCB.
|
---|
1827 | *
|
---|
1828 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1829 | * @param pVmcb Pointer to the VM control block.
|
---|
1830 | *
|
---|
1831 | * @remarks No-long-jump zone!!!
|
---|
1832 | */
|
---|
1833 | static void hmR0SvmExportGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
1834 | {
|
---|
1835 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
1836 | PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
1837 |
|
---|
1838 | /* Guest Sysenter MSRs. */
|
---|
1839 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
|
---|
1840 | {
|
---|
1841 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
|
---|
1842 | pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
|
---|
1843 |
|
---|
1844 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
|
---|
1845 | pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
|
---|
1846 |
|
---|
1847 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
|
---|
1848 | pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
|
---|
1849 | }
|
---|
1850 |
|
---|
1851 | /*
|
---|
1852 | * Guest EFER MSR.
|
---|
1853 | * AMD-V requires guest EFER.SVME to be set. Weird.
|
---|
1854 | * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
|
---|
1855 | */
|
---|
1856 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_EFER_MSR)
|
---|
1857 | {
|
---|
1858 | pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
|
---|
1859 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
|
---|
1860 | }
|
---|
1861 |
|
---|
1862 | /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit, otherwise SVM expects amd64 shadow paging. */
|
---|
1863 | if ( !CPUMIsGuestInLongModeEx(pCtx)
|
---|
1864 | && (pCtx->msrEFER & MSR_K6_EFER_LME))
|
---|
1865 | {
|
---|
1866 | pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
|
---|
1867 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
|
---|
1868 | }
|
---|
1869 |
|
---|
1870 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSCALL_MSRS)
|
---|
1871 | {
|
---|
1872 | pVmcb->guest.u64STAR = pCtx->msrSTAR;
|
---|
1873 | pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
|
---|
1874 | pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
|
---|
1875 | pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
|
---|
1876 | }
|
---|
1877 |
|
---|
1878 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_KERNEL_GS_BASE)
|
---|
1879 | pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
|
---|
1880 |
|
---|
1881 | pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SYSENTER_MSR_MASK
|
---|
1882 | | HM_CHANGED_GUEST_EFER_MSR
|
---|
1883 | | HM_CHANGED_GUEST_SYSCALL_MSRS
|
---|
1884 | | HM_CHANGED_GUEST_KERNEL_GS_BASE);
|
---|
1885 |
|
---|
1886 | /*
|
---|
1887 | * Setup the PAT MSR (applicable for Nested Paging only).
|
---|
1888 | *
|
---|
1889 | * While guests can modify and see the modified values through the shadow values,
|
---|
1890 | * we shall not honor any guest modifications of this MSR to ensure caching is always
|
---|
1891 | * enabled similar to how we clear CR0.CD and NW bits.
|
---|
1892 | *
|
---|
1893 | * For nested-guests this needs to always be set as well, see @bugref{7243#c109}.
|
---|
1894 | */
|
---|
1895 | pVmcb->guest.u64PAT = MSR_IA32_CR_PAT_INIT_VAL;
|
---|
1896 |
|
---|
1897 | /* Enable the last branch record bit if LBR virtualization is enabled. */
|
---|
1898 | if (pVmcb->ctrl.LbrVirt.n.u1LbrVirt)
|
---|
1899 | pVmcb->guest.u64DBGCTL = MSR_IA32_DEBUGCTL_LBR;
|
---|
1900 | }
|
---|
1901 |
|
---|
1902 |
|
---|
1903 | /**
|
---|
1904 | * Exports the guest (or nested-guest) debug state into the VMCB and programs
|
---|
1905 | * the necessary intercepts accordingly.
|
---|
1906 | *
|
---|
1907 | * @param pVCpu The cross context virtual CPU structure.
|
---|
1908 | * @param pVmcb Pointer to the VM control block.
|
---|
1909 | *
|
---|
1910 | * @remarks No-long-jump zone!!!
|
---|
1911 | * @remarks Requires EFLAGS to be up-to-date in the VMCB!
|
---|
1912 | */
|
---|
1913 | static void hmR0SvmExportSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
1914 | {
|
---|
1915 | PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
1916 |
|
---|
1917 | /*
|
---|
1918 | * Anyone single stepping on the host side? If so, we'll have to use the
|
---|
1919 | * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
|
---|
1920 | * the VMM level like the VT-x implementations does.
|
---|
1921 | */
|
---|
1922 | bool fInterceptMovDRx = false;
|
---|
1923 | bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
|
---|
1924 | if (fStepping)
|
---|
1925 | {
|
---|
1926 | pVCpu->hm.s.fClearTrapFlag = true;
|
---|
1927 | pVmcb->guest.u64RFlags |= X86_EFL_TF;
|
---|
1928 | fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
|
---|
1929 | }
|
---|
1930 |
|
---|
1931 | if ( fStepping
|
---|
1932 | || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
|
---|
1933 | {
|
---|
1934 | /*
|
---|
1935 | * Use the combined guest and host DRx values found in the hypervisor
|
---|
1936 | * register set because the debugger has breakpoints active or someone
|
---|
1937 | * is single stepping on the host side.
|
---|
1938 | *
|
---|
1939 | * Note! DBGF expects a clean DR6 state before executing guest code.
|
---|
1940 | */
|
---|
1941 | #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
|
---|
1942 | if ( CPUMIsGuestInLongModeEx(pCtx)
|
---|
1943 | && !CPUMIsHyperDebugStateActivePending(pVCpu))
|
---|
1944 | {
|
---|
1945 | CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
|
---|
1946 | Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
|
---|
1947 | Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
|
---|
1948 | }
|
---|
1949 | else
|
---|
1950 | #endif
|
---|
1951 | if (!CPUMIsHyperDebugStateActive(pVCpu))
|
---|
1952 | {
|
---|
1953 | CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
|
---|
1954 | Assert(!CPUMIsGuestDebugStateActive(pVCpu));
|
---|
1955 | Assert(CPUMIsHyperDebugStateActive(pVCpu));
|
---|
1956 | }
|
---|
1957 |
|
---|
1958 | /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
|
---|
1959 | if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
|
---|
1960 | || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu))
|
---|
1961 | {
|
---|
1962 | pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
|
---|
1963 | pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
|
---|
1964 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
|
---|
1965 | }
|
---|
1966 |
|
---|
1967 | /** @todo If we cared, we could optimize to allow the guest to read registers
|
---|
1968 | * with the same values. */
|
---|
1969 | fInterceptMovDRx = true;
|
---|
1970 | pVCpu->hm.s.fUsingHyperDR7 = true;
|
---|
1971 | Log5(("hmR0SvmExportSharedDebugState: Loaded hyper DRx\n"));
|
---|
1972 | }
|
---|
1973 | else
|
---|
1974 | {
|
---|
1975 | /*
|
---|
1976 | * Update DR6, DR7 with the guest values if necessary.
|
---|
1977 | */
|
---|
1978 | if ( pVmcb->guest.u64DR7 != pCtx->dr[7]
|
---|
1979 | || pVmcb->guest.u64DR6 != pCtx->dr[6])
|
---|
1980 | {
|
---|
1981 | pVmcb->guest.u64DR7 = pCtx->dr[7];
|
---|
1982 | pVmcb->guest.u64DR6 = pCtx->dr[6];
|
---|
1983 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
|
---|
1984 | }
|
---|
1985 | pVCpu->hm.s.fUsingHyperDR7 = false;
|
---|
1986 |
|
---|
1987 | /*
|
---|
1988 | * If the guest has enabled debug registers, we need to load them prior to
|
---|
1989 | * executing guest code so they'll trigger at the right time.
|
---|
1990 | */
|
---|
1991 | if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
|
---|
1992 | {
|
---|
1993 | #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
|
---|
1994 | if ( CPUMIsGuestInLongModeEx(pCtx)
|
---|
1995 | && !CPUMIsGuestDebugStateActivePending(pVCpu))
|
---|
1996 | {
|
---|
1997 | CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
|
---|
1998 | STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
|
---|
1999 | Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
|
---|
2000 | Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
|
---|
2001 | }
|
---|
2002 | else
|
---|
2003 | #endif
|
---|
2004 | if (!CPUMIsGuestDebugStateActive(pVCpu))
|
---|
2005 | {
|
---|
2006 | CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
|
---|
2007 | STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
|
---|
2008 | Assert(!CPUMIsHyperDebugStateActive(pVCpu));
|
---|
2009 | Assert(CPUMIsGuestDebugStateActive(pVCpu));
|
---|
2010 | }
|
---|
2011 | Log5(("hmR0SvmExportSharedDebugState: Loaded guest DRx\n"));
|
---|
2012 | }
|
---|
2013 | /*
|
---|
2014 | * If no debugging enabled, we'll lazy load DR0-3. We don't need to
|
---|
2015 | * intercept #DB as DR6 is updated in the VMCB.
|
---|
2016 | *
|
---|
2017 | * Note! If we cared and dared, we could skip intercepting \#DB here.
|
---|
2018 | * However, \#DB shouldn't be performance critical, so we'll play safe
|
---|
2019 | * and keep the code similar to the VT-x code and always intercept it.
|
---|
2020 | */
|
---|
2021 | #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
|
---|
2022 | else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
|
---|
2023 | && !CPUMIsGuestDebugStateActive(pVCpu))
|
---|
2024 | #else
|
---|
2025 | else if (!CPUMIsGuestDebugStateActive(pVCpu))
|
---|
2026 | #endif
|
---|
2027 | {
|
---|
2028 | fInterceptMovDRx = true;
|
---|
2029 | }
|
---|
2030 | }
|
---|
2031 |
|
---|
2032 | Assert(pVmcb->ctrl.u32InterceptXcpt & RT_BIT_32(X86_XCPT_DB));
|
---|
2033 | if (fInterceptMovDRx)
|
---|
2034 | {
|
---|
2035 | if ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
|
---|
2036 | || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
|
---|
2037 | {
|
---|
2038 | pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
|
---|
2039 | pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
|
---|
2040 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
2041 | }
|
---|
2042 | }
|
---|
2043 | else
|
---|
2044 | {
|
---|
2045 | if ( pVmcb->ctrl.u16InterceptRdDRx
|
---|
2046 | || pVmcb->ctrl.u16InterceptWrDRx)
|
---|
2047 | {
|
---|
2048 | pVmcb->ctrl.u16InterceptRdDRx = 0;
|
---|
2049 | pVmcb->ctrl.u16InterceptWrDRx = 0;
|
---|
2050 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
2051 | }
|
---|
2052 | }
|
---|
2053 | Log4Func(("DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7]));
|
---|
2054 | }
|
---|
2055 |
|
---|
2056 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
2057 | /**
|
---|
2058 | * Exports the nested-guest hardware virtualization state into the nested-guest
|
---|
2059 | * VMCB.
|
---|
2060 | *
|
---|
2061 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2062 | * @param pVmcbNstGst Pointer to the nested-guest VM control block.
|
---|
2063 | *
|
---|
2064 | * @remarks No-long-jump zone!!!
|
---|
2065 | */
|
---|
2066 | static void hmR0SvmExportGuestHwvirtStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst)
|
---|
2067 | {
|
---|
2068 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
2069 |
|
---|
2070 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_HWVIRT)
|
---|
2071 | {
|
---|
2072 | /*
|
---|
2073 | * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp.
|
---|
2074 | * since SVM doesn't have a preemption timer.
|
---|
2075 | *
|
---|
2076 | * We do this here rather than in hmR0SvmSetupVmcbNested() as we may have been executing the
|
---|
2077 | * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters
|
---|
2078 | * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between.
|
---|
2079 | */
|
---|
2080 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
2081 | PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
|
---|
2082 | uint16_t const uGuestPauseFilterCount = pVM->hm.s.svm.cPauseFilter;
|
---|
2083 | uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
|
---|
2084 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE))
|
---|
2085 | {
|
---|
2086 | PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
2087 | pVmcbNstGstCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount);
|
---|
2088 | pVmcbNstGstCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold);
|
---|
2089 | pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
2090 | }
|
---|
2091 | else
|
---|
2092 | {
|
---|
2093 | pVmcbNstGstCtrl->u16PauseFilterCount = uGuestPauseFilterCount;
|
---|
2094 | pVmcbNstGstCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold;
|
---|
2095 | }
|
---|
2096 |
|
---|
2097 | pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_HWVIRT;
|
---|
2098 | }
|
---|
2099 | }
|
---|
2100 | #endif
|
---|
2101 |
|
---|
2102 | /**
|
---|
2103 | * Exports the guest APIC TPR state into the VMCB.
|
---|
2104 | *
|
---|
2105 | * @returns VBox status code.
|
---|
2106 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2107 | * @param pVmcb Pointer to the VM control block.
|
---|
2108 | */
|
---|
2109 | static int hmR0SvmExportGuestApicTpr(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
2110 | {
|
---|
2111 | if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
|
---|
2112 | {
|
---|
2113 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
2114 | if ( PDMHasApic(pVM)
|
---|
2115 | && APICIsEnabled(pVCpu))
|
---|
2116 | {
|
---|
2117 | bool fPendingIntr;
|
---|
2118 | uint8_t u8Tpr;
|
---|
2119 | int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
|
---|
2120 | AssertRCReturn(rc, rc);
|
---|
2121 |
|
---|
2122 | /* Assume that we need to trap all TPR accesses and thus need not check on
|
---|
2123 | every #VMEXIT if we should update the TPR. */
|
---|
2124 | Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking);
|
---|
2125 | pVCpu->hm.s.svm.fSyncVTpr = false;
|
---|
2126 |
|
---|
2127 | if (!pVM->hm.s.fTPRPatchingActive)
|
---|
2128 | {
|
---|
2129 | /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
|
---|
2130 | pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
|
---|
2131 |
|
---|
2132 | /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we
|
---|
2133 | can deliver the interrupt to the guest. */
|
---|
2134 | if (fPendingIntr)
|
---|
2135 | pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
|
---|
2136 | else
|
---|
2137 | {
|
---|
2138 | pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
|
---|
2139 | pVCpu->hm.s.svm.fSyncVTpr = true;
|
---|
2140 | }
|
---|
2141 |
|
---|
2142 | pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL);
|
---|
2143 | }
|
---|
2144 | else
|
---|
2145 | {
|
---|
2146 | /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
|
---|
2147 | pVmcb->guest.u64LSTAR = u8Tpr;
|
---|
2148 | uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
|
---|
2149 |
|
---|
2150 | /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
|
---|
2151 | if (fPendingIntr)
|
---|
2152 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
|
---|
2153 | else
|
---|
2154 | {
|
---|
2155 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
2156 | pVCpu->hm.s.svm.fSyncVTpr = true;
|
---|
2157 | }
|
---|
2158 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
|
---|
2159 | }
|
---|
2160 | }
|
---|
2161 | ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
|
---|
2162 | }
|
---|
2163 | return VINF_SUCCESS;
|
---|
2164 | }
|
---|
2165 |
|
---|
2166 |
|
---|
2167 | /**
|
---|
2168 | * Sets up the exception interrupts required for guest (or nested-guest)
|
---|
2169 | * execution in the VMCB.
|
---|
2170 | *
|
---|
2171 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2172 | * @param pVmcb Pointer to the VM control block.
|
---|
2173 | *
|
---|
2174 | * @remarks No-long-jump zone!!!
|
---|
2175 | */
|
---|
2176 | static void hmR0SvmExportGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
2177 | {
|
---|
2178 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
2179 |
|
---|
2180 | /* If we modify intercepts from here, please check & adjust hmR0SvmMergeVmcbCtrlsNested() if required. */
|
---|
2181 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS)
|
---|
2182 | {
|
---|
2183 | /* Trap #UD for GIM provider (e.g. for hypercalls). */
|
---|
2184 | if (pVCpu->hm.s.fGIMTrapXcptUD)
|
---|
2185 | hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_UD);
|
---|
2186 | else
|
---|
2187 | hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_UD);
|
---|
2188 |
|
---|
2189 | /* Trap #BP for INT3 debug breakpoints set by the VM debugger. */
|
---|
2190 | if (pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
|
---|
2191 | hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_BP);
|
---|
2192 | else
|
---|
2193 | hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_BP);
|
---|
2194 |
|
---|
2195 | /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmExportGuestCR0(). */
|
---|
2196 | pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS;
|
---|
2197 | }
|
---|
2198 | }
|
---|
2199 |
|
---|
2200 |
|
---|
2201 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
2202 | /**
|
---|
2203 | * Merges guest and nested-guest intercepts for executing the nested-guest using
|
---|
2204 | * hardware-assisted SVM.
|
---|
2205 | *
|
---|
2206 | * This merges the guest and nested-guest intercepts in a way that if the outer
|
---|
2207 | * guest intercept is set we need to intercept it in the nested-guest as
|
---|
2208 | * well.
|
---|
2209 | *
|
---|
2210 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2211 | * @param pVmcbNstGst Pointer to the nested-guest VM control block.
|
---|
2212 | */
|
---|
2213 | static void hmR0SvmMergeVmcbCtrlsNested(PVMCPU pVCpu)
|
---|
2214 | {
|
---|
2215 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
2216 | PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
|
---|
2217 | PSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
|
---|
2218 | PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
|
---|
2219 |
|
---|
2220 | /* Merge the guest's CR intercepts into the nested-guest VMCB. */
|
---|
2221 | pVmcbNstGstCtrl->u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
|
---|
2222 | pVmcbNstGstCtrl->u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
|
---|
2223 |
|
---|
2224 | /* Always intercept CR4 writes for tracking PGM mode changes. */
|
---|
2225 | pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(4);
|
---|
2226 |
|
---|
2227 | /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */
|
---|
2228 | if (!pVM->hm.s.fNestedPaging)
|
---|
2229 | {
|
---|
2230 | pVmcbNstGstCtrl->u16InterceptRdCRx |= RT_BIT(3);
|
---|
2231 | pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(3);
|
---|
2232 | }
|
---|
2233 |
|
---|
2234 | /** @todo Figure out debugging with nested-guests, till then just intercept
|
---|
2235 | * all DR[0-15] accesses. */
|
---|
2236 | pVmcbNstGstCtrl->u16InterceptRdDRx |= 0xffff;
|
---|
2237 | pVmcbNstGstCtrl->u16InterceptWrDRx |= 0xffff;
|
---|
2238 |
|
---|
2239 | /*
|
---|
2240 | * Merge the guest's exception intercepts into the nested-guest VMCB.
|
---|
2241 | *
|
---|
2242 | * - \#UD: Exclude these as the outer guest's GIM hypercalls are not applicable
|
---|
2243 | * while executing the nested-guest.
|
---|
2244 | *
|
---|
2245 | * - \#BP: Exclude breakpoints set by the VM debugger for the outer guest. This can
|
---|
2246 | * be tweaked later depending on how we wish to implement breakpoints.
|
---|
2247 | *
|
---|
2248 | * Warning!! This ASSUMES we only intercept \#UD for hypercall purposes and \#BP
|
---|
2249 | * for VM debugger breakpoints, see hmR0SvmExportGuestXcptIntercepts().
|
---|
2250 | */
|
---|
2251 | #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
|
---|
2252 | pVmcbNstGstCtrl->u32InterceptXcpt |= (pVmcb->ctrl.u32InterceptXcpt & ~( RT_BIT(X86_XCPT_UD)
|
---|
2253 | | RT_BIT(X86_XCPT_BP)));
|
---|
2254 | #else
|
---|
2255 | pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt;
|
---|
2256 | #endif
|
---|
2257 |
|
---|
2258 | /*
|
---|
2259 | * Adjust intercepts while executing the nested-guest that differ from the
|
---|
2260 | * outer guest intercepts.
|
---|
2261 | *
|
---|
2262 | * - VINTR: Exclude the outer guest intercept as we don't need to cause VINTR #VMEXITs
|
---|
2263 | * that belong to the nested-guest to the outer guest.
|
---|
2264 | *
|
---|
2265 | * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by
|
---|
2266 | * the nested-guest, the physical CPU raises a \#UD exception as expected.
|
---|
2267 | */
|
---|
2268 | pVmcbNstGstCtrl->u64InterceptCtrl |= (pVmcb->ctrl.u64InterceptCtrl & ~( SVM_CTRL_INTERCEPT_VINTR
|
---|
2269 | | SVM_CTRL_INTERCEPT_VMMCALL))
|
---|
2270 | | HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;
|
---|
2271 |
|
---|
2272 | Assert( (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
|
---|
2273 | == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
|
---|
2274 |
|
---|
2275 | /* Finally, update the VMCB clean bits. */
|
---|
2276 | pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
2277 | }
|
---|
2278 | #endif
|
---|
2279 |
|
---|
2280 |
|
---|
2281 | /**
|
---|
2282 | * Selects the appropriate function to run guest code.
|
---|
2283 | *
|
---|
2284 | * @returns VBox status code.
|
---|
2285 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2286 | *
|
---|
2287 | * @remarks No-long-jump zone!!!
|
---|
2288 | */
|
---|
2289 | static int hmR0SvmSelectVMRunHandler(PVMCPU pVCpu)
|
---|
2290 | {
|
---|
2291 | if (CPUMIsGuestInLongMode(pVCpu))
|
---|
2292 | {
|
---|
2293 | #ifndef VBOX_ENABLE_64_BITS_GUESTS
|
---|
2294 | return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
|
---|
2295 | #endif
|
---|
2296 | Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
|
---|
2297 | #if HC_ARCH_BITS == 32
|
---|
2298 | /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
|
---|
2299 | pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
|
---|
2300 | #else
|
---|
2301 | /* 64-bit host or hybrid host. */
|
---|
2302 | pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64;
|
---|
2303 | #endif
|
---|
2304 | }
|
---|
2305 | else
|
---|
2306 | {
|
---|
2307 | /* Guest is not in long mode, use the 32-bit handler. */
|
---|
2308 | pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
|
---|
2309 | }
|
---|
2310 | return VINF_SUCCESS;
|
---|
2311 | }
|
---|
2312 |
|
---|
2313 |
|
---|
2314 | /**
|
---|
2315 | * Enters the AMD-V session.
|
---|
2316 | *
|
---|
2317 | * @returns VBox status code.
|
---|
2318 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2319 | * @param pHostCpu Pointer to the CPU info struct.
|
---|
2320 | */
|
---|
2321 | VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu)
|
---|
2322 | {
|
---|
2323 | AssertPtr(pVCpu);
|
---|
2324 | Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
|
---|
2325 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
2326 | RT_NOREF(pHostCpu);
|
---|
2327 |
|
---|
2328 | LogFlowFunc(("pVCpu=%p\n", pVCpu));
|
---|
2329 | Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
|
---|
2330 | == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
|
---|
2331 |
|
---|
2332 | pVCpu->hm.s.fLeaveDone = false;
|
---|
2333 | return VINF_SUCCESS;
|
---|
2334 | }
|
---|
2335 |
|
---|
2336 |
|
---|
2337 | /**
|
---|
2338 | * Thread-context callback for AMD-V.
|
---|
2339 | *
|
---|
2340 | * @param enmEvent The thread-context event.
|
---|
2341 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2342 | * @param fGlobalInit Whether global VT-x/AMD-V init. is used.
|
---|
2343 | * @thread EMT(pVCpu)
|
---|
2344 | */
|
---|
2345 | VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
|
---|
2346 | {
|
---|
2347 | NOREF(fGlobalInit);
|
---|
2348 |
|
---|
2349 | switch (enmEvent)
|
---|
2350 | {
|
---|
2351 | case RTTHREADCTXEVENT_OUT:
|
---|
2352 | {
|
---|
2353 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
2354 | Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
|
---|
2355 | VMCPU_ASSERT_EMT(pVCpu);
|
---|
2356 |
|
---|
2357 | /* No longjmps (log-flush, locks) in this fragile context. */
|
---|
2358 | VMMRZCallRing3Disable(pVCpu);
|
---|
2359 |
|
---|
2360 | if (!pVCpu->hm.s.fLeaveDone)
|
---|
2361 | {
|
---|
2362 | hmR0SvmLeave(pVCpu, false /* fImportState */);
|
---|
2363 | pVCpu->hm.s.fLeaveDone = true;
|
---|
2364 | }
|
---|
2365 |
|
---|
2366 | /* Leave HM context, takes care of local init (term). */
|
---|
2367 | int rc = HMR0LeaveCpu(pVCpu);
|
---|
2368 | AssertRC(rc); NOREF(rc);
|
---|
2369 |
|
---|
2370 | /* Restore longjmp state. */
|
---|
2371 | VMMRZCallRing3Enable(pVCpu);
|
---|
2372 | STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
|
---|
2373 | break;
|
---|
2374 | }
|
---|
2375 |
|
---|
2376 | case RTTHREADCTXEVENT_IN:
|
---|
2377 | {
|
---|
2378 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
2379 | Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
|
---|
2380 | VMCPU_ASSERT_EMT(pVCpu);
|
---|
2381 |
|
---|
2382 | /* No longjmps (log-flush, locks) in this fragile context. */
|
---|
2383 | VMMRZCallRing3Disable(pVCpu);
|
---|
2384 |
|
---|
2385 | /*
|
---|
2386 | * Initialize the bare minimum state required for HM. This takes care of
|
---|
2387 | * initializing AMD-V if necessary (onlined CPUs, local init etc.)
|
---|
2388 | */
|
---|
2389 | int rc = hmR0EnterCpu(pVCpu);
|
---|
2390 | AssertRC(rc); NOREF(rc);
|
---|
2391 | Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
|
---|
2392 | == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
|
---|
2393 |
|
---|
2394 | pVCpu->hm.s.fLeaveDone = false;
|
---|
2395 |
|
---|
2396 | /* Restore longjmp state. */
|
---|
2397 | VMMRZCallRing3Enable(pVCpu);
|
---|
2398 | break;
|
---|
2399 | }
|
---|
2400 |
|
---|
2401 | default:
|
---|
2402 | break;
|
---|
2403 | }
|
---|
2404 | }
|
---|
2405 |
|
---|
2406 |
|
---|
2407 | /**
|
---|
2408 | * Saves the host state.
|
---|
2409 | *
|
---|
2410 | * @returns VBox status code.
|
---|
2411 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2412 | *
|
---|
2413 | * @remarks No-long-jump zone!!!
|
---|
2414 | */
|
---|
2415 | VMMR0DECL(int) SVMR0ExportHostState(PVMCPU pVCpu)
|
---|
2416 | {
|
---|
2417 | NOREF(pVCpu);
|
---|
2418 |
|
---|
2419 | /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
|
---|
2420 | ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_HOST_CONTEXT);
|
---|
2421 | return VINF_SUCCESS;
|
---|
2422 | }
|
---|
2423 |
|
---|
2424 |
|
---|
2425 | /**
|
---|
2426 | * Exports the guest state from the guest-CPU context into the VMCB.
|
---|
2427 | *
|
---|
2428 | * The CPU state will be loaded from these fields on every successful VM-entry.
|
---|
2429 | * Also sets up the appropriate VMRUN function to execute guest code based on
|
---|
2430 | * the guest CPU mode.
|
---|
2431 | *
|
---|
2432 | * @returns VBox status code.
|
---|
2433 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2434 | *
|
---|
2435 | * @remarks No-long-jump zone!!!
|
---|
2436 | */
|
---|
2437 | static int hmR0SvmExportGuestState(PVMCPU pVCpu)
|
---|
2438 | {
|
---|
2439 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
|
---|
2440 |
|
---|
2441 | PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
|
---|
2442 | PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
2443 |
|
---|
2444 | Assert(pVmcb);
|
---|
2445 | HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
|
---|
2446 |
|
---|
2447 | pVmcb->guest.u64RIP = pCtx->rip;
|
---|
2448 | pVmcb->guest.u64RSP = pCtx->rsp;
|
---|
2449 | pVmcb->guest.u64RFlags = pCtx->eflags.u32;
|
---|
2450 | pVmcb->guest.u64RAX = pCtx->rax;
|
---|
2451 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
2452 | if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
|
---|
2453 | {
|
---|
2454 | Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
|
---|
2455 | pVmcb->ctrl.IntCtrl.n.u1VGif = pCtx->hwvirt.fGif;
|
---|
2456 | }
|
---|
2457 | #endif
|
---|
2458 |
|
---|
2459 | RTCCUINTREG const fEFlags = ASMIntDisableFlags();
|
---|
2460 |
|
---|
2461 | int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcb);
|
---|
2462 | AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc);
|
---|
2463 |
|
---|
2464 | hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcb);
|
---|
2465 | hmR0SvmExportGuestMsrs(pVCpu, pVmcb);
|
---|
2466 | hmR0SvmExportGuestXcptIntercepts(pVCpu, pVmcb);
|
---|
2467 |
|
---|
2468 | ASMSetFlags(fEFlags);
|
---|
2469 |
|
---|
2470 | /* hmR0SvmExportGuestApicTpr() must be called -after- hmR0SvmExportGuestMsrs() as we
|
---|
2471 | otherwise we would overwrite the LSTAR MSR that we use for TPR patching. */
|
---|
2472 | hmR0SvmExportGuestApicTpr(pVCpu, pVmcb);
|
---|
2473 |
|
---|
2474 | rc = hmR0SvmSelectVMRunHandler(pVCpu);
|
---|
2475 | AssertRCReturn(rc, rc);
|
---|
2476 |
|
---|
2477 | /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
|
---|
2478 | ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( HM_CHANGED_GUEST_RIP
|
---|
2479 | | HM_CHANGED_GUEST_RFLAGS
|
---|
2480 | | HM_CHANGED_GUEST_GPRS_MASK
|
---|
2481 | | HM_CHANGED_GUEST_X87
|
---|
2482 | | HM_CHANGED_GUEST_SSE_AVX
|
---|
2483 | | HM_CHANGED_GUEST_OTHER_XSAVE
|
---|
2484 | | HM_CHANGED_GUEST_XCRx
|
---|
2485 | | HM_CHANGED_GUEST_TSC_AUX
|
---|
2486 | | HM_CHANGED_GUEST_OTHER_MSRS
|
---|
2487 | | HM_CHANGED_GUEST_HWVIRT
|
---|
2488 | | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS)));
|
---|
2489 |
|
---|
2490 | #ifdef VBOX_STRICT
|
---|
2491 | /*
|
---|
2492 | * All of the guest-CPU state and SVM keeper bits should be exported here by now,
|
---|
2493 | * except for the host-context and/or shared host-guest context bits.
|
---|
2494 | */
|
---|
2495 | uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
|
---|
2496 | RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
|
---|
2497 | AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)),
|
---|
2498 | ("fCtxChanged=%#RX64\n", fCtxChanged));
|
---|
2499 |
|
---|
2500 | /*
|
---|
2501 | * If we need to log state that isn't always imported, we'll need to import them here.
|
---|
2502 | * See hmR0SvmPostRunGuest() for which part of the state is imported uncondtionally.
|
---|
2503 | */
|
---|
2504 | hmR0SvmLogState(pVCpu, pVmcb, "hmR0SvmExportGuestState", 0 /* fFlags */, 0 /* uVerbose */);
|
---|
2505 | #endif
|
---|
2506 |
|
---|
2507 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
|
---|
2508 | return VINF_SUCCESS;
|
---|
2509 | }
|
---|
2510 |
|
---|
2511 |
|
---|
2512 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
2513 | /**
|
---|
2514 | * Merges the guest and nested-guest MSR permission bitmap.
|
---|
2515 | *
|
---|
2516 | * If the guest is intercepting an MSR we need to intercept it regardless of
|
---|
2517 | * whether the nested-guest is intercepting it or not.
|
---|
2518 | *
|
---|
2519 | * @param pHostCpu Pointer to the physical CPU HM info. struct.
|
---|
2520 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2521 | *
|
---|
2522 | * @remarks No-long-jmp zone!!!
|
---|
2523 | */
|
---|
2524 | DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu)
|
---|
2525 | {
|
---|
2526 | uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hm.s.svm.pvMsrBitmap;
|
---|
2527 | uint64_t const *pu64NstGstMsrpm = (uint64_t const *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap);
|
---|
2528 | uint64_t *pu64DstMsrpm = (uint64_t *)pHostCpu->n.svm.pvNstGstMsrpm;
|
---|
2529 |
|
---|
2530 | /* MSRPM bytes from offset 0x1800 are reserved, so we stop merging there. */
|
---|
2531 | uint32_t const offRsvdQwords = 0x1800 >> 3;
|
---|
2532 | for (uint32_t i = 0; i < offRsvdQwords; i++)
|
---|
2533 | pu64DstMsrpm[i] = pu64NstGstMsrpm[i] | pu64GstMsrpm[i];
|
---|
2534 | }
|
---|
2535 |
|
---|
2536 |
|
---|
2537 | /**
|
---|
2538 | * Caches the nested-guest VMCB fields before we modify them for execution using
|
---|
2539 | * hardware-assisted SVM.
|
---|
2540 | *
|
---|
2541 | * @returns true if the VMCB was previously already cached, false otherwise.
|
---|
2542 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2543 | *
|
---|
2544 | * @sa HMSvmNstGstVmExitNotify.
|
---|
2545 | */
|
---|
2546 | static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu)
|
---|
2547 | {
|
---|
2548 | /*
|
---|
2549 | * Cache the nested-guest programmed VMCB fields if we have not cached it yet.
|
---|
2550 | * Otherwise we risk re-caching the values we may have modified, see @bugref{7243#c44}.
|
---|
2551 | *
|
---|
2552 | * Nested-paging CR3 is not saved back into the VMCB on #VMEXIT, hence no need to
|
---|
2553 | * cache and restore it, see AMD spec. 15.25.4 "Nested Paging and VMRUN/#VMEXIT".
|
---|
2554 | */
|
---|
2555 | PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
2556 | bool const fWasCached = pVmcbNstGstCache->fCacheValid;
|
---|
2557 | if (!fWasCached)
|
---|
2558 | {
|
---|
2559 | PCSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
|
---|
2560 | PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
|
---|
2561 | pVmcbNstGstCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
|
---|
2562 | pVmcbNstGstCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
|
---|
2563 | pVmcbNstGstCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
|
---|
2564 | pVmcbNstGstCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
|
---|
2565 | pVmcbNstGstCache->u16PauseFilterThreshold = pVmcbNstGstCtrl->u16PauseFilterThreshold;
|
---|
2566 | pVmcbNstGstCache->u16PauseFilterCount = pVmcbNstGstCtrl->u16PauseFilterCount;
|
---|
2567 | pVmcbNstGstCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt;
|
---|
2568 | pVmcbNstGstCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl;
|
---|
2569 | pVmcbNstGstCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset;
|
---|
2570 | pVmcbNstGstCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
|
---|
2571 | pVmcbNstGstCache->fNestedPaging = pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging;
|
---|
2572 | pVmcbNstGstCache->fLbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
|
---|
2573 | pVmcbNstGstCache->fCacheValid = true;
|
---|
2574 | Log4Func(("Cached VMCB fields\n"));
|
---|
2575 | }
|
---|
2576 |
|
---|
2577 | return fWasCached;
|
---|
2578 | }
|
---|
2579 |
|
---|
2580 |
|
---|
2581 | /**
|
---|
2582 | * Sets up the nested-guest VMCB for execution using hardware-assisted SVM.
|
---|
2583 | *
|
---|
2584 | * This is done the first time we enter nested-guest execution using SVM R0
|
---|
2585 | * until the nested-guest \#VMEXIT (not to be confused with physical CPU
|
---|
2586 | * \#VMEXITs which may or may not cause a corresponding nested-guest \#VMEXIT).
|
---|
2587 | *
|
---|
2588 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2589 | */
|
---|
2590 | static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu)
|
---|
2591 | {
|
---|
2592 | PSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
|
---|
2593 | PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
|
---|
2594 |
|
---|
2595 | /*
|
---|
2596 | * First cache the nested-guest VMCB fields we may potentially modify.
|
---|
2597 | */
|
---|
2598 | bool const fVmcbCached = hmR0SvmCacheVmcbNested(pVCpu);
|
---|
2599 | if (!fVmcbCached)
|
---|
2600 | {
|
---|
2601 | /*
|
---|
2602 | * The IOPM of the nested-guest can be ignored because the the guest always
|
---|
2603 | * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
|
---|
2604 | * than the nested-guest IOPM and swap the field back on the #VMEXIT.
|
---|
2605 | */
|
---|
2606 | pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
|
---|
2607 |
|
---|
2608 | /*
|
---|
2609 | * Use the same nested-paging as the outer guest. We can't dynamically switch off
|
---|
2610 | * nested-paging suddenly while executing a VM (see assertion at the end of
|
---|
2611 | * Trap0eHandler() in PGMAllBth.h).
|
---|
2612 | */
|
---|
2613 | pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
|
---|
2614 |
|
---|
2615 | /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */
|
---|
2616 | pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = 1;
|
---|
2617 |
|
---|
2618 | /*
|
---|
2619 | * Turn off TPR syncing on #VMEXIT for nested-guests as CR8 intercepts are subject
|
---|
2620 | * to the nested-guest intercepts and we always run with V_INTR_MASKING.
|
---|
2621 | */
|
---|
2622 | pVCpu->hm.s.svm.fSyncVTpr = false;
|
---|
2623 |
|
---|
2624 | #ifdef DEBUG_ramshankar
|
---|
2625 | /* For debugging purposes - copy the LBR info. from outer guest VMCB. */
|
---|
2626 | pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt;
|
---|
2627 | #endif
|
---|
2628 |
|
---|
2629 | /*
|
---|
2630 | * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we
|
---|
2631 | * need to intercept VMSAVE/VMLOAD instructions executed by the nested-guest.
|
---|
2632 | */
|
---|
2633 | if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
|
---|
2634 | pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
|
---|
2635 | | SVM_CTRL_INTERCEPT_VMLOAD;
|
---|
2636 |
|
---|
2637 | /*
|
---|
2638 | * If we don't expose Virtual GIF feature to the outer guest, we need to intercept
|
---|
2639 | * CLGI/STGI instructions executed by the nested-guest.
|
---|
2640 | */
|
---|
2641 | if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif)
|
---|
2642 | pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
|
---|
2643 | | SVM_CTRL_INTERCEPT_STGI;
|
---|
2644 |
|
---|
2645 | /* Merge the guest and nested-guest intercepts. */
|
---|
2646 | hmR0SvmMergeVmcbCtrlsNested(pVCpu);
|
---|
2647 |
|
---|
2648 | /* Update the VMCB clean bits. */
|
---|
2649 | pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
2650 | }
|
---|
2651 | else
|
---|
2652 | {
|
---|
2653 | Assert(!pVCpu->hm.s.svm.fSyncVTpr);
|
---|
2654 | Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap);
|
---|
2655 | Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
|
---|
2656 | }
|
---|
2657 | }
|
---|
2658 |
|
---|
2659 |
|
---|
2660 | /**
|
---|
2661 | * Exports the nested-guest state into the VMCB.
|
---|
2662 | *
|
---|
2663 | * We need to export the entire state as we could be continuing nested-guest
|
---|
2664 | * execution at any point (not just immediately after VMRUN) and thus the VMCB
|
---|
2665 | * can be out-of-sync with the nested-guest state if it was executed in IEM.
|
---|
2666 | *
|
---|
2667 | * @returns VBox status code.
|
---|
2668 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2669 | * @param pCtx Pointer to the guest-CPU context.
|
---|
2670 | *
|
---|
2671 | * @remarks No-long-jump zone!!!
|
---|
2672 | */
|
---|
2673 | static int hmR0SvmExportGuestStateNested(PVMCPU pVCpu)
|
---|
2674 | {
|
---|
2675 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
|
---|
2676 |
|
---|
2677 | PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
2678 | PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
|
---|
2679 | Assert(pVmcbNstGst);
|
---|
2680 |
|
---|
2681 | hmR0SvmSetupVmcbNested(pVCpu);
|
---|
2682 |
|
---|
2683 | pVmcbNstGst->guest.u64RIP = pCtx->rip;
|
---|
2684 | pVmcbNstGst->guest.u64RSP = pCtx->rsp;
|
---|
2685 | pVmcbNstGst->guest.u64RFlags = pCtx->eflags.u32;
|
---|
2686 | pVmcbNstGst->guest.u64RAX = pCtx->rax;
|
---|
2687 |
|
---|
2688 | RTCCUINTREG const fEFlags = ASMIntDisableFlags();
|
---|
2689 |
|
---|
2690 | int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcbNstGst);
|
---|
2691 | AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc);
|
---|
2692 |
|
---|
2693 | hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcbNstGst);
|
---|
2694 | hmR0SvmExportGuestMsrs(pVCpu, pVmcbNstGst);
|
---|
2695 | hmR0SvmExportGuestHwvirtStateNested(pVCpu, pVmcbNstGst);
|
---|
2696 |
|
---|
2697 | ASMSetFlags(fEFlags);
|
---|
2698 |
|
---|
2699 | /* Nested VGIF not supported yet. */
|
---|
2700 | Assert(!pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable);
|
---|
2701 |
|
---|
2702 | rc = hmR0SvmSelectVMRunHandler(pVCpu);
|
---|
2703 | AssertRCReturn(rc, rc);
|
---|
2704 |
|
---|
2705 | /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
|
---|
2706 | ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( HM_CHANGED_GUEST_RIP
|
---|
2707 | | HM_CHANGED_GUEST_RFLAGS
|
---|
2708 | | HM_CHANGED_GUEST_GPRS_MASK
|
---|
2709 | | HM_CHANGED_GUEST_APIC_TPR
|
---|
2710 | | HM_CHANGED_GUEST_X87
|
---|
2711 | | HM_CHANGED_GUEST_SSE_AVX
|
---|
2712 | | HM_CHANGED_GUEST_OTHER_XSAVE
|
---|
2713 | | HM_CHANGED_GUEST_XCRx
|
---|
2714 | | HM_CHANGED_GUEST_TSC_AUX
|
---|
2715 | | HM_CHANGED_GUEST_OTHER_MSRS
|
---|
2716 | | HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS
|
---|
2717 | | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_MASK)));
|
---|
2718 |
|
---|
2719 | #ifdef VBOX_STRICT
|
---|
2720 | /*
|
---|
2721 | * All of the guest-CPU state and SVM keeper bits should be exported here by now, except
|
---|
2722 | * for the host-context and/or shared host-guest context bits.
|
---|
2723 | */
|
---|
2724 | uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
|
---|
2725 | RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
|
---|
2726 | AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)),
|
---|
2727 | ("fCtxChanged=%#RX64\n", fCtxChanged));
|
---|
2728 |
|
---|
2729 | /*
|
---|
2730 | * If we need to log state that isn't always imported, we'll need to import them here.
|
---|
2731 | * See hmR0SvmPostRunGuest() for which part of the state is imported uncondtionally.
|
---|
2732 | */
|
---|
2733 | hmR0SvmLogState(pVCpu, pVmcbNstGst, "hmR0SvmExportGuestStateNested", 0 /* fFlags */, 0 /* uVerbose */);
|
---|
2734 | #endif
|
---|
2735 |
|
---|
2736 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
|
---|
2737 | return rc;
|
---|
2738 | }
|
---|
2739 | #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
|
---|
2740 |
|
---|
2741 |
|
---|
2742 | /**
|
---|
2743 | * Exports the state shared between the host and guest (or nested-guest) into
|
---|
2744 | * the VMCB.
|
---|
2745 | *
|
---|
2746 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2747 | * @param pVmcb Pointer to the VM control block.
|
---|
2748 | *
|
---|
2749 | * @remarks No-long-jump zone!!!
|
---|
2750 | */
|
---|
2751 | static void hmR0SvmExportSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
2752 | {
|
---|
2753 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
2754 | Assert(!VMMRZCallRing3IsEnabled(pVCpu));
|
---|
2755 |
|
---|
2756 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
|
---|
2757 | {
|
---|
2758 | /** @todo Figure out stepping with nested-guest. */
|
---|
2759 | PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
2760 | if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
2761 | hmR0SvmExportSharedDebugState(pVCpu, pVmcb);
|
---|
2762 | else
|
---|
2763 | {
|
---|
2764 | pVmcb->guest.u64DR6 = pCtx->dr[6];
|
---|
2765 | pVmcb->guest.u64DR7 = pCtx->dr[7];
|
---|
2766 | }
|
---|
2767 | }
|
---|
2768 |
|
---|
2769 | pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
|
---|
2770 | AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE),
|
---|
2771 | ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
|
---|
2772 | }
|
---|
2773 |
|
---|
2774 |
|
---|
2775 | /**
|
---|
2776 | * Worker for SVMR0ImportStateOnDemand.
|
---|
2777 | *
|
---|
2778 | * @param pVCpu The cross context virtual CPU structure.
|
---|
2779 | * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
|
---|
2780 | */
|
---|
2781 | static void hmR0SvmImportGuestState(PVMCPU pVCpu, uint64_t fWhat)
|
---|
2782 | {
|
---|
2783 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
|
---|
2784 |
|
---|
2785 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
2786 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
2787 | PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
|
---|
2788 | PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
|
---|
2789 |
|
---|
2790 | Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat));
|
---|
2791 |
|
---|
2792 | /*
|
---|
2793 | * We disable interrupts to make the updating of the state and in particular
|
---|
2794 | * the fExtrn modification atomic wrt to preemption hooks.
|
---|
2795 | */
|
---|
2796 | RTCCUINTREG const fEFlags = ASMIntDisableFlags();
|
---|
2797 |
|
---|
2798 | fWhat &= pCtx->fExtrn;
|
---|
2799 | if (fWhat)
|
---|
2800 | {
|
---|
2801 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
2802 | if (fWhat & CPUMCTX_EXTRN_HWVIRT)
|
---|
2803 | {
|
---|
2804 | if ( !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
|
---|
2805 | && pVmcbCtrl->IntCtrl.n.u1VGifEnable)
|
---|
2806 | {
|
---|
2807 | /* We don't yet support passing VGIF feature to the guest. */
|
---|
2808 | Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fVGif);
|
---|
2809 | pCtx->hwvirt.fGif = pVmcbCtrl->IntCtrl.n.u1VGif;
|
---|
2810 | }
|
---|
2811 | }
|
---|
2812 |
|
---|
2813 | if (fWhat & CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)
|
---|
2814 | {
|
---|
2815 | if ( !pVmcbCtrl->IntCtrl.n.u1VIrqPending
|
---|
2816 | && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
|
---|
2817 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
|
---|
2818 | }
|
---|
2819 | #endif
|
---|
2820 |
|
---|
2821 | if (fWhat & CPUMCTX_EXTRN_HM_SVM_INT_SHADOW)
|
---|
2822 | {
|
---|
2823 | if (pVmcbCtrl->IntShadow.n.u1IntShadow)
|
---|
2824 | EMSetInhibitInterruptsPC(pVCpu, pVmcbGuest->u64RIP);
|
---|
2825 | else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
|
---|
2826 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
|
---|
2827 | }
|
---|
2828 |
|
---|
2829 | if (fWhat & CPUMCTX_EXTRN_RIP)
|
---|
2830 | pCtx->rip = pVmcbGuest->u64RIP;
|
---|
2831 |
|
---|
2832 | if (fWhat & CPUMCTX_EXTRN_RFLAGS)
|
---|
2833 | pCtx->eflags.u32 = pVmcbGuest->u64RFlags;
|
---|
2834 |
|
---|
2835 | if (fWhat & CPUMCTX_EXTRN_RSP)
|
---|
2836 | pCtx->rsp = pVmcbGuest->u64RSP;
|
---|
2837 |
|
---|
2838 | if (fWhat & CPUMCTX_EXTRN_RAX)
|
---|
2839 | pCtx->rax = pVmcbGuest->u64RAX;
|
---|
2840 |
|
---|
2841 | if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
|
---|
2842 | {
|
---|
2843 | if (fWhat & CPUMCTX_EXTRN_CS)
|
---|
2844 | {
|
---|
2845 | HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, CS, cs);
|
---|
2846 | /* Correct the CS granularity bit. Haven't seen it being wrong in any other register (yet). */
|
---|
2847 | /** @todo SELM might need to be fixed as it too should not care about the
|
---|
2848 | * granularity bit. See @bugref{6785}. */
|
---|
2849 | if ( !pCtx->cs.Attr.n.u1Granularity
|
---|
2850 | && pCtx->cs.Attr.n.u1Present
|
---|
2851 | && pCtx->cs.u32Limit > UINT32_C(0xfffff))
|
---|
2852 | {
|
---|
2853 | Assert((pCtx->cs.u32Limit & 0xfff) == 0xfff);
|
---|
2854 | pCtx->cs.Attr.n.u1Granularity = 1;
|
---|
2855 | }
|
---|
2856 | HMSVM_ASSERT_SEG_GRANULARITY(pCtx, cs);
|
---|
2857 | }
|
---|
2858 | if (fWhat & CPUMCTX_EXTRN_SS)
|
---|
2859 | {
|
---|
2860 | HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, SS, ss);
|
---|
2861 | HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ss);
|
---|
2862 | /*
|
---|
2863 | * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the
|
---|
2864 | * VMCB and uses that and thus it's possible that when the CPL changes during
|
---|
2865 | * guest execution that the SS DPL isn't updated by AMD-V. Observed on some
|
---|
2866 | * AMD Fusion CPUs with 64-bit guests.
|
---|
2867 | *
|
---|
2868 | * See AMD spec. 15.5.1 "Basic operation".
|
---|
2869 | */
|
---|
2870 | Assert(!(pVmcbGuest->u8CPL & ~0x3));
|
---|
2871 | uint8_t const uCpl = pVmcbGuest->u8CPL;
|
---|
2872 | if (pCtx->ss.Attr.n.u2Dpl != uCpl)
|
---|
2873 | pCtx->ss.Attr.n.u2Dpl = uCpl & 0x3;
|
---|
2874 | }
|
---|
2875 | if (fWhat & CPUMCTX_EXTRN_DS)
|
---|
2876 | {
|
---|
2877 | HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, DS, ds);
|
---|
2878 | HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ds);
|
---|
2879 | }
|
---|
2880 | if (fWhat & CPUMCTX_EXTRN_ES)
|
---|
2881 | {
|
---|
2882 | HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, ES, es);
|
---|
2883 | HMSVM_ASSERT_SEG_GRANULARITY(pCtx, es);
|
---|
2884 | }
|
---|
2885 | if (fWhat & CPUMCTX_EXTRN_FS)
|
---|
2886 | {
|
---|
2887 | HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, FS, fs);
|
---|
2888 | HMSVM_ASSERT_SEG_GRANULARITY(pCtx, fs);
|
---|
2889 | }
|
---|
2890 | if (fWhat & CPUMCTX_EXTRN_GS)
|
---|
2891 | {
|
---|
2892 | HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, GS, gs);
|
---|
2893 | HMSVM_ASSERT_SEG_GRANULARITY(pCtx, gs);
|
---|
2894 | }
|
---|
2895 | }
|
---|
2896 |
|
---|
2897 | if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
|
---|
2898 | {
|
---|
2899 | if (fWhat & CPUMCTX_EXTRN_TR)
|
---|
2900 | {
|
---|
2901 | /*
|
---|
2902 | * Fixup TR attributes so it's compatible with Intel. Important when saved-states
|
---|
2903 | * are used between Intel and AMD, see @bugref{6208#c39}.
|
---|
2904 | * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode.
|
---|
2905 | */
|
---|
2906 | HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, TR, tr);
|
---|
2907 | if (pCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
|
---|
2908 | {
|
---|
2909 | if ( pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
|
---|
2910 | || CPUMIsGuestInLongModeEx(pCtx))
|
---|
2911 | pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
|
---|
2912 | else if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
|
---|
2913 | pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
|
---|
2914 | }
|
---|
2915 | }
|
---|
2916 |
|
---|
2917 | if (fWhat & CPUMCTX_EXTRN_LDTR)
|
---|
2918 | HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, LDTR, ldtr);
|
---|
2919 |
|
---|
2920 | if (fWhat & CPUMCTX_EXTRN_GDTR)
|
---|
2921 | {
|
---|
2922 | pCtx->gdtr.cbGdt = pVmcbGuest->GDTR.u32Limit;
|
---|
2923 | pCtx->gdtr.pGdt = pVmcbGuest->GDTR.u64Base;
|
---|
2924 | }
|
---|
2925 |
|
---|
2926 | if (fWhat & CPUMCTX_EXTRN_IDTR)
|
---|
2927 | {
|
---|
2928 | pCtx->idtr.cbIdt = pVmcbGuest->IDTR.u32Limit;
|
---|
2929 | pCtx->idtr.pIdt = pVmcbGuest->IDTR.u64Base;
|
---|
2930 | }
|
---|
2931 | }
|
---|
2932 |
|
---|
2933 | if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
|
---|
2934 | {
|
---|
2935 | pCtx->msrSTAR = pVmcbGuest->u64STAR;
|
---|
2936 | pCtx->msrLSTAR = pVmcbGuest->u64LSTAR;
|
---|
2937 | pCtx->msrCSTAR = pVmcbGuest->u64CSTAR;
|
---|
2938 | pCtx->msrSFMASK = pVmcbGuest->u64SFMASK;
|
---|
2939 | }
|
---|
2940 |
|
---|
2941 | if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
|
---|
2942 | {
|
---|
2943 | pCtx->SysEnter.cs = pVmcbGuest->u64SysEnterCS;
|
---|
2944 | pCtx->SysEnter.eip = pVmcbGuest->u64SysEnterEIP;
|
---|
2945 | pCtx->SysEnter.esp = pVmcbGuest->u64SysEnterESP;
|
---|
2946 | }
|
---|
2947 |
|
---|
2948 | if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
|
---|
2949 | pCtx->msrKERNELGSBASE = pVmcbGuest->u64KernelGSBase;
|
---|
2950 |
|
---|
2951 | if (fWhat & CPUMCTX_EXTRN_DR_MASK)
|
---|
2952 | {
|
---|
2953 | if (fWhat & CPUMCTX_EXTRN_DR6)
|
---|
2954 | {
|
---|
2955 | if (!pVCpu->hm.s.fUsingHyperDR7)
|
---|
2956 | pCtx->dr[6] = pVmcbGuest->u64DR6;
|
---|
2957 | else
|
---|
2958 | CPUMSetHyperDR6(pVCpu, pVmcbGuest->u64DR6);
|
---|
2959 | }
|
---|
2960 |
|
---|
2961 | if (fWhat & CPUMCTX_EXTRN_DR7)
|
---|
2962 | {
|
---|
2963 | if (!pVCpu->hm.s.fUsingHyperDR7)
|
---|
2964 | pCtx->dr[7] = pVmcbGuest->u64DR7;
|
---|
2965 | else
|
---|
2966 | Assert(pVmcbGuest->u64DR7 == CPUMGetHyperDR7(pVCpu));
|
---|
2967 | }
|
---|
2968 | }
|
---|
2969 |
|
---|
2970 | if (fWhat & CPUMCTX_EXTRN_CR_MASK)
|
---|
2971 | {
|
---|
2972 | if (fWhat & CPUMCTX_EXTRN_CR0)
|
---|
2973 | {
|
---|
2974 | /* We intercept changes to all CR0 bits except maybe TS & MP bits. */
|
---|
2975 | uint64_t const uCr0 = (pCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP))
|
---|
2976 | | (pVmcbGuest->u64CR0 & (X86_CR0_TS | X86_CR0_MP));
|
---|
2977 | VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */
|
---|
2978 | CPUMSetGuestCR0(pVCpu, uCr0);
|
---|
2979 | VMMRZCallRing3Enable(pVCpu);
|
---|
2980 | }
|
---|
2981 |
|
---|
2982 | if (fWhat & CPUMCTX_EXTRN_CR2)
|
---|
2983 | pCtx->cr2 = pVmcbGuest->u64CR2;
|
---|
2984 |
|
---|
2985 | if (fWhat & CPUMCTX_EXTRN_CR3)
|
---|
2986 | {
|
---|
2987 | if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
|
---|
2988 | && pCtx->cr3 != pVmcbGuest->u64CR3)
|
---|
2989 | {
|
---|
2990 | CPUMSetGuestCR3(pVCpu, pVmcbGuest->u64CR3);
|
---|
2991 | VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
|
---|
2992 | }
|
---|
2993 | }
|
---|
2994 |
|
---|
2995 | /* Changes to CR4 are always intercepted. */
|
---|
2996 | }
|
---|
2997 |
|
---|
2998 | /* Update fExtrn. */
|
---|
2999 | pCtx->fExtrn &= ~fWhat;
|
---|
3000 |
|
---|
3001 | /* If everything has been imported, clear the HM keeper bit. */
|
---|
3002 | if (!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL))
|
---|
3003 | {
|
---|
3004 | pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
|
---|
3005 | Assert(!pCtx->fExtrn);
|
---|
3006 | }
|
---|
3007 | }
|
---|
3008 | else
|
---|
3009 | Assert(!pCtx->fExtrn || (pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
|
---|
3010 |
|
---|
3011 | ASMSetFlags(fEFlags);
|
---|
3012 |
|
---|
3013 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x);
|
---|
3014 |
|
---|
3015 | /*
|
---|
3016 | * Honor any pending CR3 updates.
|
---|
3017 | *
|
---|
3018 | * Consider this scenario: #VMEXIT -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp
|
---|
3019 | * -> hmR0SvmCallRing3Callback() -> VMMRZCallRing3Disable() -> hmR0SvmImportGuestState()
|
---|
3020 | * -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp -> continue with #VMEXIT
|
---|
3021 | * handling -> hmR0SvmImportGuestState() and here we are.
|
---|
3022 | *
|
---|
3023 | * The reason for such complicated handling is because VM-exits that call into PGM expect
|
---|
3024 | * CR3 to be up-to-date and thus any CR3-saves -before- the VM-exit (longjmp) would've
|
---|
3025 | * postponed the CR3 update via the force-flag and cleared CR3 from fExtrn. Any SVM R0
|
---|
3026 | * VM-exit handler that requests CR3 to be saved will end up here and we call PGMUpdateCR3().
|
---|
3027 | *
|
---|
3028 | * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again,
|
---|
3029 | * and does not process force-flag like regular exits to ring-3 either, we cover for it here.
|
---|
3030 | */
|
---|
3031 | if ( VMMRZCallRing3IsEnabled(pVCpu)
|
---|
3032 | && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
|
---|
3033 | {
|
---|
3034 | Assert(pCtx->cr3 == pVmcbGuest->u64CR3);
|
---|
3035 | PGMUpdateCR3(pVCpu, pCtx->cr3);
|
---|
3036 | }
|
---|
3037 | }
|
---|
3038 |
|
---|
3039 |
|
---|
3040 | /**
|
---|
3041 | * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU
|
---|
3042 | * context.
|
---|
3043 | *
|
---|
3044 | * Currently there is no residual state left in the CPU that is not updated in the
|
---|
3045 | * VMCB.
|
---|
3046 | *
|
---|
3047 | * @returns VBox status code.
|
---|
3048 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3049 | * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
|
---|
3050 | */
|
---|
3051 | VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
|
---|
3052 | {
|
---|
3053 | hmR0SvmImportGuestState(pVCpu, fWhat);
|
---|
3054 | return VINF_SUCCESS;
|
---|
3055 | }
|
---|
3056 |
|
---|
3057 |
|
---|
3058 | /**
|
---|
3059 | * Does the necessary state syncing before returning to ring-3 for any reason
|
---|
3060 | * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
|
---|
3061 | *
|
---|
3062 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3063 | * @param fImportState Whether to import the guest state from the VMCB back
|
---|
3064 | * to the guest-CPU context.
|
---|
3065 | *
|
---|
3066 | * @remarks No-long-jmp zone!!!
|
---|
3067 | */
|
---|
3068 | static void hmR0SvmLeave(PVMCPU pVCpu, bool fImportState)
|
---|
3069 | {
|
---|
3070 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
3071 | Assert(!VMMRZCallRing3IsEnabled(pVCpu));
|
---|
3072 | Assert(VMMR0IsLogFlushDisabled(pVCpu));
|
---|
3073 |
|
---|
3074 | /*
|
---|
3075 | * !!! IMPORTANT !!!
|
---|
3076 | * If you modify code here, make sure to check whether hmR0SvmCallRing3Callback() needs to be updated too.
|
---|
3077 | */
|
---|
3078 |
|
---|
3079 | /* Save the guest state if necessary. */
|
---|
3080 | if (fImportState)
|
---|
3081 | hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
3082 |
|
---|
3083 | /* Restore host FPU state if necessary and resync on next R0 reentry. */
|
---|
3084 | CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
|
---|
3085 | Assert(!CPUMIsGuestFPUStateActive(pVCpu));
|
---|
3086 |
|
---|
3087 | /*
|
---|
3088 | * Restore host debug registers if necessary and resync on next R0 reentry.
|
---|
3089 | */
|
---|
3090 | #ifdef VBOX_STRICT
|
---|
3091 | if (CPUMIsHyperDebugStateActive(pVCpu))
|
---|
3092 | {
|
---|
3093 | PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; /** @todo nested-guest. */
|
---|
3094 | Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
|
---|
3095 | Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
|
---|
3096 | }
|
---|
3097 | #endif
|
---|
3098 | CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
|
---|
3099 | Assert(!CPUMIsHyperDebugStateActive(pVCpu));
|
---|
3100 | Assert(!CPUMIsGuestDebugStateActive(pVCpu));
|
---|
3101 |
|
---|
3102 | STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
|
---|
3103 | STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
|
---|
3104 | STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
|
---|
3105 | STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
|
---|
3106 | STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
|
---|
3107 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
|
---|
3108 |
|
---|
3109 | VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
|
---|
3110 | }
|
---|
3111 |
|
---|
3112 |
|
---|
3113 | /**
|
---|
3114 | * Leaves the AMD-V session.
|
---|
3115 | *
|
---|
3116 | * Only used while returning to ring-3 either due to longjump or exits to
|
---|
3117 | * ring-3.
|
---|
3118 | *
|
---|
3119 | * @returns VBox status code.
|
---|
3120 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3121 | */
|
---|
3122 | static int hmR0SvmLeaveSession(PVMCPU pVCpu)
|
---|
3123 | {
|
---|
3124 | HM_DISABLE_PREEMPT(pVCpu);
|
---|
3125 | Assert(!VMMRZCallRing3IsEnabled(pVCpu));
|
---|
3126 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
3127 |
|
---|
3128 | /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
|
---|
3129 | and done this from the SVMR0ThreadCtxCallback(). */
|
---|
3130 | if (!pVCpu->hm.s.fLeaveDone)
|
---|
3131 | {
|
---|
3132 | hmR0SvmLeave(pVCpu, true /* fImportState */);
|
---|
3133 | pVCpu->hm.s.fLeaveDone = true;
|
---|
3134 | }
|
---|
3135 |
|
---|
3136 | /*
|
---|
3137 | * !!! IMPORTANT !!!
|
---|
3138 | * If you modify code here, make sure to check whether hmR0SvmCallRing3Callback() needs to be updated too.
|
---|
3139 | */
|
---|
3140 |
|
---|
3141 | /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
|
---|
3142 | /* Deregister hook now that we've left HM context before re-enabling preemption. */
|
---|
3143 | VMMR0ThreadCtxHookDisable(pVCpu);
|
---|
3144 |
|
---|
3145 | /* Leave HM context. This takes care of local init (term). */
|
---|
3146 | int rc = HMR0LeaveCpu(pVCpu);
|
---|
3147 |
|
---|
3148 | HM_RESTORE_PREEMPT();
|
---|
3149 | return rc;
|
---|
3150 | }
|
---|
3151 |
|
---|
3152 |
|
---|
3153 | /**
|
---|
3154 | * Does the necessary state syncing before doing a longjmp to ring-3.
|
---|
3155 | *
|
---|
3156 | * @returns VBox status code.
|
---|
3157 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3158 | *
|
---|
3159 | * @remarks No-long-jmp zone!!!
|
---|
3160 | */
|
---|
3161 | static int hmR0SvmLongJmpToRing3(PVMCPU pVCpu)
|
---|
3162 | {
|
---|
3163 | return hmR0SvmLeaveSession(pVCpu);
|
---|
3164 | }
|
---|
3165 |
|
---|
3166 |
|
---|
3167 | /**
|
---|
3168 | * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
|
---|
3169 | * any remaining host state) before we longjump to ring-3 and possibly get
|
---|
3170 | * preempted.
|
---|
3171 | *
|
---|
3172 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3173 | * @param enmOperation The operation causing the ring-3 longjump.
|
---|
3174 | * @param pvUser The user argument, NULL (currently unused).
|
---|
3175 | */
|
---|
3176 | static DECLCALLBACK(int) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
|
---|
3177 | {
|
---|
3178 | RT_NOREF_PV(pvUser);
|
---|
3179 |
|
---|
3180 | if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
|
---|
3181 | {
|
---|
3182 | /*
|
---|
3183 | * !!! IMPORTANT !!!
|
---|
3184 | * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
|
---|
3185 | * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
|
---|
3186 | */
|
---|
3187 | VMMRZCallRing3RemoveNotification(pVCpu);
|
---|
3188 | VMMRZCallRing3Disable(pVCpu);
|
---|
3189 | HM_DISABLE_PREEMPT(pVCpu);
|
---|
3190 |
|
---|
3191 | /* Import the entire guest state. */
|
---|
3192 | hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
3193 |
|
---|
3194 | /* Restore host FPU state if necessary and resync on next R0 reentry. */
|
---|
3195 | CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
|
---|
3196 |
|
---|
3197 | /* Restore host debug registers if necessary and resync on next R0 reentry. */
|
---|
3198 | CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
|
---|
3199 |
|
---|
3200 | /* Deregister the hook now that we've left HM context before re-enabling preemption. */
|
---|
3201 | /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
|
---|
3202 | VMMR0ThreadCtxHookDisable(pVCpu);
|
---|
3203 |
|
---|
3204 | /* Leave HM context. This takes care of local init (term). */
|
---|
3205 | HMR0LeaveCpu(pVCpu);
|
---|
3206 |
|
---|
3207 | HM_RESTORE_PREEMPT();
|
---|
3208 | return VINF_SUCCESS;
|
---|
3209 | }
|
---|
3210 |
|
---|
3211 | Assert(pVCpu);
|
---|
3212 | Assert(VMMRZCallRing3IsEnabled(pVCpu));
|
---|
3213 | HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
|
---|
3214 |
|
---|
3215 | VMMRZCallRing3Disable(pVCpu);
|
---|
3216 | Assert(VMMR0IsLogFlushDisabled(pVCpu));
|
---|
3217 |
|
---|
3218 | Log4Func(("Calling hmR0SvmLongJmpToRing3\n"));
|
---|
3219 | int rc = hmR0SvmLongJmpToRing3(pVCpu);
|
---|
3220 | AssertRCReturn(rc, rc);
|
---|
3221 |
|
---|
3222 | VMMRZCallRing3Enable(pVCpu);
|
---|
3223 | return VINF_SUCCESS;
|
---|
3224 | }
|
---|
3225 |
|
---|
3226 |
|
---|
3227 | /**
|
---|
3228 | * Take necessary actions before going back to ring-3.
|
---|
3229 | *
|
---|
3230 | * An action requires us to go back to ring-3. This function does the necessary
|
---|
3231 | * steps before we can safely return to ring-3. This is not the same as longjmps
|
---|
3232 | * to ring-3, this is voluntary.
|
---|
3233 | *
|
---|
3234 | * @returns VBox status code.
|
---|
3235 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3236 | * @param rcExit The reason for exiting to ring-3. Can be
|
---|
3237 | * VINF_VMM_UNKNOWN_RING3_CALL.
|
---|
3238 | */
|
---|
3239 | static int hmR0SvmExitToRing3(PVMCPU pVCpu, int rcExit)
|
---|
3240 | {
|
---|
3241 | Assert(pVCpu);
|
---|
3242 | HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
|
---|
3243 |
|
---|
3244 | /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
|
---|
3245 | VMMRZCallRing3Disable(pVCpu);
|
---|
3246 | Log4Func(("rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", rcExit, pVCpu->fLocalForcedActions,
|
---|
3247 | pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions));
|
---|
3248 |
|
---|
3249 | /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
|
---|
3250 | if (pVCpu->hm.s.Event.fPending)
|
---|
3251 | {
|
---|
3252 | hmR0SvmPendingEventToTrpmTrap(pVCpu);
|
---|
3253 | Assert(!pVCpu->hm.s.Event.fPending);
|
---|
3254 | }
|
---|
3255 |
|
---|
3256 | /* Sync. the necessary state for going back to ring-3. */
|
---|
3257 | hmR0SvmLeaveSession(pVCpu);
|
---|
3258 | STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
|
---|
3259 |
|
---|
3260 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
|
---|
3261 | CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
|
---|
3262 | | CPUM_CHANGED_LDTR
|
---|
3263 | | CPUM_CHANGED_GDTR
|
---|
3264 | | CPUM_CHANGED_IDTR
|
---|
3265 | | CPUM_CHANGED_TR
|
---|
3266 | | CPUM_CHANGED_HIDDEN_SEL_REGS);
|
---|
3267 | if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
|
---|
3268 | && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
|
---|
3269 | {
|
---|
3270 | CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
|
---|
3271 | }
|
---|
3272 |
|
---|
3273 | /* Update the exit-to-ring 3 reason. */
|
---|
3274 | pVCpu->hm.s.rcLastExitToR3 = rcExit;
|
---|
3275 |
|
---|
3276 | /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
|
---|
3277 | if (rcExit != VINF_EM_RAW_INTERRUPT)
|
---|
3278 | {
|
---|
3279 | Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
|
---|
3280 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
|
---|
3281 | }
|
---|
3282 |
|
---|
3283 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
|
---|
3284 |
|
---|
3285 | /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
|
---|
3286 | VMMRZCallRing3RemoveNotification(pVCpu);
|
---|
3287 | VMMRZCallRing3Enable(pVCpu);
|
---|
3288 |
|
---|
3289 | /*
|
---|
3290 | * If we're emulating an instruction, we shouldn't have any TRPM traps pending
|
---|
3291 | * and if we're injecting an event we should have a TRPM trap pending.
|
---|
3292 | */
|
---|
3293 | AssertReturnStmt(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu),
|
---|
3294 | pVCpu->hm.s.u32HMError = rcExit,
|
---|
3295 | VERR_SVM_IPE_5);
|
---|
3296 | AssertReturnStmt(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu),
|
---|
3297 | pVCpu->hm.s.u32HMError = rcExit,
|
---|
3298 | VERR_SVM_IPE_4);
|
---|
3299 |
|
---|
3300 | return rcExit;
|
---|
3301 | }
|
---|
3302 |
|
---|
3303 |
|
---|
3304 | /**
|
---|
3305 | * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
|
---|
3306 | * intercepts.
|
---|
3307 | *
|
---|
3308 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3309 | * @param pVmcb Pointer to the VM control block.
|
---|
3310 | *
|
---|
3311 | * @remarks No-long-jump zone!!!
|
---|
3312 | */
|
---|
3313 | static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
3314 | {
|
---|
3315 | /*
|
---|
3316 | * Avoid intercepting RDTSC/RDTSCP if we determined the host TSC (++) is stable
|
---|
3317 | * and in case of a nested-guest, if the nested-VMCB specifies it is not intercepting
|
---|
3318 | * RDTSC/RDTSCP as well.
|
---|
3319 | */
|
---|
3320 | bool fParavirtTsc;
|
---|
3321 | uint64_t uTscOffset;
|
---|
3322 | bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu->CTX_SUFF(pVM), pVCpu, &uTscOffset, &fParavirtTsc);
|
---|
3323 |
|
---|
3324 | bool fIntercept;
|
---|
3325 | if (fCanUseRealTsc)
|
---|
3326 | fIntercept = hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
|
---|
3327 | else
|
---|
3328 | {
|
---|
3329 | hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
|
---|
3330 | fIntercept = true;
|
---|
3331 | }
|
---|
3332 |
|
---|
3333 | if (!fIntercept)
|
---|
3334 | {
|
---|
3335 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
3336 | /* Apply the nested-guest VMCB's TSC offset over the guest TSC offset. */
|
---|
3337 | if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
|
---|
3338 | uTscOffset = HMSvmNstGstApplyTscOffset(pVCpu, uTscOffset);
|
---|
3339 | #endif
|
---|
3340 |
|
---|
3341 | /* Update the TSC offset in the VMCB and the relevant clean bits. */
|
---|
3342 | pVmcb->ctrl.u64TSCOffset = uTscOffset;
|
---|
3343 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
3344 |
|
---|
3345 | STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
|
---|
3346 | }
|
---|
3347 | else
|
---|
3348 | STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
|
---|
3349 |
|
---|
3350 | /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
|
---|
3351 | information before every VM-entry, hence we have nothing to do here at the moment. */
|
---|
3352 | if (fParavirtTsc)
|
---|
3353 | STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
|
---|
3354 | }
|
---|
3355 |
|
---|
3356 |
|
---|
3357 | /**
|
---|
3358 | * Sets an event as a pending event to be injected into the guest.
|
---|
3359 | *
|
---|
3360 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3361 | * @param pEvent Pointer to the SVM event.
|
---|
3362 | * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
|
---|
3363 | * page-fault.
|
---|
3364 | *
|
---|
3365 | * @remarks Statistics counter assumes this is a guest event being reflected to
|
---|
3366 | * the guest i.e. 'StatInjectPendingReflect' is incremented always.
|
---|
3367 | */
|
---|
3368 | DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
|
---|
3369 | {
|
---|
3370 | Assert(!pVCpu->hm.s.Event.fPending);
|
---|
3371 | Assert(pEvent->n.u1Valid);
|
---|
3372 |
|
---|
3373 | pVCpu->hm.s.Event.u64IntInfo = pEvent->u;
|
---|
3374 | pVCpu->hm.s.Event.fPending = true;
|
---|
3375 | pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
|
---|
3376 |
|
---|
3377 | Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
|
---|
3378 | (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
|
---|
3379 | }
|
---|
3380 |
|
---|
3381 |
|
---|
3382 | /**
|
---|
3383 | * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
|
---|
3384 | *
|
---|
3385 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3386 | */
|
---|
3387 | DECLINLINE(void) hmR0SvmSetPendingXcptUD(PVMCPU pVCpu)
|
---|
3388 | {
|
---|
3389 | SVMEVENT Event;
|
---|
3390 | Event.u = 0;
|
---|
3391 | Event.n.u1Valid = 1;
|
---|
3392 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
3393 | Event.n.u8Vector = X86_XCPT_UD;
|
---|
3394 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
3395 | }
|
---|
3396 |
|
---|
3397 |
|
---|
3398 | /**
|
---|
3399 | * Sets a debug (\#DB) exception as pending-for-injection into the VM.
|
---|
3400 | *
|
---|
3401 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3402 | */
|
---|
3403 | DECLINLINE(void) hmR0SvmSetPendingXcptDB(PVMCPU pVCpu)
|
---|
3404 | {
|
---|
3405 | SVMEVENT Event;
|
---|
3406 | Event.u = 0;
|
---|
3407 | Event.n.u1Valid = 1;
|
---|
3408 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
3409 | Event.n.u8Vector = X86_XCPT_DB;
|
---|
3410 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
3411 | }
|
---|
3412 |
|
---|
3413 |
|
---|
3414 | /**
|
---|
3415 | * Sets a page fault (\#PF) exception as pending-for-injection into the VM.
|
---|
3416 | *
|
---|
3417 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3418 | * @param u32ErrCode The error-code for the page-fault.
|
---|
3419 | * @param uFaultAddress The page fault address (CR2).
|
---|
3420 | *
|
---|
3421 | * @remarks This updates the guest CR2 with @a uFaultAddress!
|
---|
3422 | */
|
---|
3423 | DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPU pVCpu, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)
|
---|
3424 | {
|
---|
3425 | SVMEVENT Event;
|
---|
3426 | Event.u = 0;
|
---|
3427 | Event.n.u1Valid = 1;
|
---|
3428 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
3429 | Event.n.u8Vector = X86_XCPT_PF;
|
---|
3430 | Event.n.u1ErrorCodeValid = 1;
|
---|
3431 | Event.n.u32ErrorCode = u32ErrCode;
|
---|
3432 |
|
---|
3433 | /* Update CR2 of the guest. */
|
---|
3434 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR2);
|
---|
3435 | if (pVCpu->cpum.GstCtx.cr2 != uFaultAddress)
|
---|
3436 | {
|
---|
3437 | pVCpu->cpum.GstCtx.cr2 = uFaultAddress;
|
---|
3438 | /* The VMCB clean bit for CR2 will be updated while re-loading the guest state. */
|
---|
3439 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2);
|
---|
3440 | }
|
---|
3441 |
|
---|
3442 | hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress);
|
---|
3443 | }
|
---|
3444 |
|
---|
3445 |
|
---|
3446 | /**
|
---|
3447 | * Sets a math-fault (\#MF) exception as pending-for-injection into the VM.
|
---|
3448 | *
|
---|
3449 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3450 | */
|
---|
3451 | DECLINLINE(void) hmR0SvmSetPendingXcptMF(PVMCPU pVCpu)
|
---|
3452 | {
|
---|
3453 | SVMEVENT Event;
|
---|
3454 | Event.u = 0;
|
---|
3455 | Event.n.u1Valid = 1;
|
---|
3456 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
3457 | Event.n.u8Vector = X86_XCPT_MF;
|
---|
3458 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
3459 | }
|
---|
3460 |
|
---|
3461 |
|
---|
3462 | /**
|
---|
3463 | * Sets a double fault (\#DF) exception as pending-for-injection into the VM.
|
---|
3464 | *
|
---|
3465 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3466 | */
|
---|
3467 | DECLINLINE(void) hmR0SvmSetPendingXcptDF(PVMCPU pVCpu)
|
---|
3468 | {
|
---|
3469 | SVMEVENT Event;
|
---|
3470 | Event.u = 0;
|
---|
3471 | Event.n.u1Valid = 1;
|
---|
3472 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
3473 | Event.n.u8Vector = X86_XCPT_DF;
|
---|
3474 | Event.n.u1ErrorCodeValid = 1;
|
---|
3475 | Event.n.u32ErrorCode = 0;
|
---|
3476 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
3477 | }
|
---|
3478 |
|
---|
3479 |
|
---|
3480 | /**
|
---|
3481 | * Injects an event into the guest upon VMRUN by updating the relevant field
|
---|
3482 | * in the VMCB.
|
---|
3483 | *
|
---|
3484 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3485 | * @param pVmcb Pointer to the guest VM control block.
|
---|
3486 | * @param pEvent Pointer to the event.
|
---|
3487 | *
|
---|
3488 | * @remarks No-long-jump zone!!!
|
---|
3489 | * @remarks Requires CR0!
|
---|
3490 | */
|
---|
3491 | DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMEVENT pEvent)
|
---|
3492 | {
|
---|
3493 | Assert(!pVmcb->ctrl.EventInject.n.u1Valid);
|
---|
3494 | pVmcb->ctrl.EventInject.u = pEvent->u;
|
---|
3495 | STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
|
---|
3496 | RT_NOREF(pVCpu);
|
---|
3497 |
|
---|
3498 | Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
|
---|
3499 | (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
|
---|
3500 | }
|
---|
3501 |
|
---|
3502 |
|
---|
3503 |
|
---|
3504 | /**
|
---|
3505 | * Converts any TRPM trap into a pending HM event. This is typically used when
|
---|
3506 | * entering from ring-3 (not longjmp returns).
|
---|
3507 | *
|
---|
3508 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3509 | */
|
---|
3510 | static void hmR0SvmTrpmTrapToPendingEvent(PVMCPU pVCpu)
|
---|
3511 | {
|
---|
3512 | Assert(TRPMHasTrap(pVCpu));
|
---|
3513 | Assert(!pVCpu->hm.s.Event.fPending);
|
---|
3514 |
|
---|
3515 | uint8_t uVector;
|
---|
3516 | TRPMEVENT enmTrpmEvent;
|
---|
3517 | RTGCUINT uErrCode;
|
---|
3518 | RTGCUINTPTR GCPtrFaultAddress;
|
---|
3519 | uint8_t cbInstr;
|
---|
3520 |
|
---|
3521 | int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
|
---|
3522 | AssertRC(rc);
|
---|
3523 |
|
---|
3524 | SVMEVENT Event;
|
---|
3525 | Event.u = 0;
|
---|
3526 | Event.n.u1Valid = 1;
|
---|
3527 | Event.n.u8Vector = uVector;
|
---|
3528 |
|
---|
3529 | /* Refer AMD spec. 15.20 "Event Injection" for the format. */
|
---|
3530 | if (enmTrpmEvent == TRPM_TRAP)
|
---|
3531 | {
|
---|
3532 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
3533 | switch (uVector)
|
---|
3534 | {
|
---|
3535 | case X86_XCPT_NMI:
|
---|
3536 | {
|
---|
3537 | Event.n.u3Type = SVM_EVENT_NMI;
|
---|
3538 | break;
|
---|
3539 | }
|
---|
3540 |
|
---|
3541 | case X86_XCPT_PF:
|
---|
3542 | case X86_XCPT_DF:
|
---|
3543 | case X86_XCPT_TS:
|
---|
3544 | case X86_XCPT_NP:
|
---|
3545 | case X86_XCPT_SS:
|
---|
3546 | case X86_XCPT_GP:
|
---|
3547 | case X86_XCPT_AC:
|
---|
3548 | {
|
---|
3549 | Event.n.u1ErrorCodeValid = 1;
|
---|
3550 | Event.n.u32ErrorCode = uErrCode;
|
---|
3551 | break;
|
---|
3552 | }
|
---|
3553 | }
|
---|
3554 | }
|
---|
3555 | else if (enmTrpmEvent == TRPM_HARDWARE_INT)
|
---|
3556 | Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
|
---|
3557 | else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
|
---|
3558 | Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
|
---|
3559 | else
|
---|
3560 | AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
|
---|
3561 |
|
---|
3562 | rc = TRPMResetTrap(pVCpu);
|
---|
3563 | AssertRC(rc);
|
---|
3564 |
|
---|
3565 | Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
|
---|
3566 | !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode));
|
---|
3567 |
|
---|
3568 | hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress);
|
---|
3569 | }
|
---|
3570 |
|
---|
3571 |
|
---|
3572 | /**
|
---|
3573 | * Converts any pending SVM event into a TRPM trap. Typically used when leaving
|
---|
3574 | * AMD-V to execute any instruction.
|
---|
3575 | *
|
---|
3576 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3577 | */
|
---|
3578 | static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu)
|
---|
3579 | {
|
---|
3580 | Assert(pVCpu->hm.s.Event.fPending);
|
---|
3581 | Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
|
---|
3582 |
|
---|
3583 | SVMEVENT Event;
|
---|
3584 | Event.u = pVCpu->hm.s.Event.u64IntInfo;
|
---|
3585 |
|
---|
3586 | uint8_t uVector = Event.n.u8Vector;
|
---|
3587 | uint8_t uVectorType = Event.n.u3Type;
|
---|
3588 | TRPMEVENT enmTrapType = HMSvmEventToTrpmEventType(&Event);
|
---|
3589 |
|
---|
3590 | Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, uVectorType));
|
---|
3591 |
|
---|
3592 | int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
|
---|
3593 | AssertRC(rc);
|
---|
3594 |
|
---|
3595 | if (Event.n.u1ErrorCodeValid)
|
---|
3596 | TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode);
|
---|
3597 |
|
---|
3598 | if ( uVectorType == SVM_EVENT_EXCEPTION
|
---|
3599 | && uVector == X86_XCPT_PF)
|
---|
3600 | {
|
---|
3601 | TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
|
---|
3602 | Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
|
---|
3603 | }
|
---|
3604 | else if (uVectorType == SVM_EVENT_SOFTWARE_INT)
|
---|
3605 | {
|
---|
3606 | AssertMsg( uVectorType == SVM_EVENT_SOFTWARE_INT
|
---|
3607 | || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
|
---|
3608 | ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
|
---|
3609 | TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
|
---|
3610 | }
|
---|
3611 | pVCpu->hm.s.Event.fPending = false;
|
---|
3612 | }
|
---|
3613 |
|
---|
3614 |
|
---|
3615 | /**
|
---|
3616 | * Checks if the guest (or nested-guest) has an interrupt shadow active right
|
---|
3617 | * now.
|
---|
3618 | *
|
---|
3619 | * @returns @c true if the interrupt shadow is active, @c false otherwise.
|
---|
3620 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3621 | *
|
---|
3622 | * @remarks No-long-jump zone!!!
|
---|
3623 | * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
|
---|
3624 | */
|
---|
3625 | static bool hmR0SvmIsIntrShadowActive(PVMCPU pVCpu)
|
---|
3626 | {
|
---|
3627 | /*
|
---|
3628 | * Instructions like STI and MOV SS inhibit interrupts till the next instruction
|
---|
3629 | * completes. Check if we should inhibit interrupts or clear any existing
|
---|
3630 | * interrupt inhibition.
|
---|
3631 | */
|
---|
3632 | if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
|
---|
3633 | {
|
---|
3634 | if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
|
---|
3635 | {
|
---|
3636 | /*
|
---|
3637 | * We can clear the inhibit force flag as even if we go back to the recompiler
|
---|
3638 | * without executing guest code in AMD-V, the flag's condition to be cleared is
|
---|
3639 | * met and thus the cleared state is correct.
|
---|
3640 | */
|
---|
3641 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
|
---|
3642 | return false;
|
---|
3643 | }
|
---|
3644 | return true;
|
---|
3645 | }
|
---|
3646 | return false;
|
---|
3647 | }
|
---|
3648 |
|
---|
3649 |
|
---|
3650 | /**
|
---|
3651 | * Sets the virtual interrupt intercept control in the VMCB.
|
---|
3652 | *
|
---|
3653 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3654 | * @param pVmcb Pointer to the VM control block.
|
---|
3655 | */
|
---|
3656 | static void hmR0SvmSetIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
3657 | {
|
---|
3658 | /*
|
---|
3659 | * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when the guest
|
---|
3660 | * is ready to accept interrupts. At #VMEXIT, we then get the interrupt from the APIC
|
---|
3661 | * (updating ISR at the right time) and inject the interrupt.
|
---|
3662 | *
|
---|
3663 | * With AVIC is supported, we could make use of the asynchronously delivery without
|
---|
3664 | * #VMEXIT and we would be passing the AVIC page to SVM.
|
---|
3665 | *
|
---|
3666 | * In AMD-V, an interrupt window is achieved using a combination of V_IRQ (an interrupt
|
---|
3667 | * is pending), V_IGN_TPR (ignore TPR priorities) and the VINTR intercept all being set.
|
---|
3668 | */
|
---|
3669 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
3670 | /*
|
---|
3671 | * Currently we don't overlay interupt windows and if there's any V_IRQ pending in the
|
---|
3672 | * nested-guest VMCB, we avoid setting up any interrupt window on behalf of the outer
|
---|
3673 | * guest.
|
---|
3674 | */
|
---|
3675 | /** @todo Does this mean we end up prioritizing virtual interrupt
|
---|
3676 | * delivery/window over a physical interrupt (from the outer guest)
|
---|
3677 | * might be pending? */
|
---|
3678 | bool const fEnableIntWindow = !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
|
---|
3679 | if (!fEnableIntWindow)
|
---|
3680 | {
|
---|
3681 | Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx));
|
---|
3682 | Log4(("Nested-guest V_IRQ already pending\n"));
|
---|
3683 | }
|
---|
3684 | #else
|
---|
3685 | bool const fEnableIntWindow = true;
|
---|
3686 | RT_NOREF(pVCpu);
|
---|
3687 | #endif
|
---|
3688 | if (fEnableIntWindow)
|
---|
3689 | {
|
---|
3690 | Assert(pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR);
|
---|
3691 | pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
|
---|
3692 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
|
---|
3693 | hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_VINTR);
|
---|
3694 | Log4(("Set VINTR intercept\n"));
|
---|
3695 | }
|
---|
3696 | }
|
---|
3697 |
|
---|
3698 |
|
---|
3699 | /**
|
---|
3700 | * Clears the virtual interrupt intercept control in the VMCB as
|
---|
3701 | * we are figured the guest is unable process any interrupts
|
---|
3702 | * at this point of time.
|
---|
3703 | *
|
---|
3704 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3705 | * @param pVmcb Pointer to the VM control block.
|
---|
3706 | */
|
---|
3707 | static void hmR0SvmClearIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
3708 | {
|
---|
3709 | PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
|
---|
3710 | if ( pVmcbCtrl->IntCtrl.n.u1VIrqPending
|
---|
3711 | || (pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
|
---|
3712 | {
|
---|
3713 | pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
|
---|
3714 | pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
|
---|
3715 | hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_VINTR);
|
---|
3716 | Log4(("Cleared VINTR intercept\n"));
|
---|
3717 | }
|
---|
3718 | }
|
---|
3719 |
|
---|
3720 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
3721 | /**
|
---|
3722 | * Evaluates the event to be delivered to the nested-guest and sets it as the
|
---|
3723 | * pending event.
|
---|
3724 | *
|
---|
3725 | * @returns VBox strict status code.
|
---|
3726 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3727 | */
|
---|
3728 | static VBOXSTRICTRC hmR0SvmEvaluatePendingEventNested(PVMCPU pVCpu)
|
---|
3729 | {
|
---|
3730 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
3731 | HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
|
---|
3732 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT
|
---|
3733 | | CPUMCTX_EXTRN_RFLAGS
|
---|
3734 | | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW
|
---|
3735 | | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ);
|
---|
3736 |
|
---|
3737 | Assert(!pVCpu->hm.s.Event.fPending);
|
---|
3738 | Assert(pCtx->hwvirt.fGif);
|
---|
3739 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
3740 | Assert(pVmcb);
|
---|
3741 |
|
---|
3742 | bool const fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
|
---|
3743 | bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);
|
---|
3744 | bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
|
---|
3745 |
|
---|
3746 | Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntPending=%RTbool fNmiPending=%RTbool\n",
|
---|
3747 | fVirtualGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
|
---|
3748 | VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
|
---|
3749 |
|
---|
3750 | /** @todo SMI. SMIs take priority over NMIs. */
|
---|
3751 |
|
---|
3752 | /*
|
---|
3753 | * Check if the guest can receive NMIs.
|
---|
3754 | * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
|
---|
3755 | * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
|
---|
3756 | */
|
---|
3757 | if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)
|
---|
3758 | && !fBlockNmi)
|
---|
3759 | {
|
---|
3760 | if ( fVirtualGif
|
---|
3761 | && !fIntShadow)
|
---|
3762 | {
|
---|
3763 | if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI))
|
---|
3764 | {
|
---|
3765 | Log4(("Intercepting NMI -> #VMEXIT\n"));
|
---|
3766 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
|
---|
3767 | return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
|
---|
3768 | }
|
---|
3769 |
|
---|
3770 | Log4(("Setting NMI pending for injection\n"));
|
---|
3771 | SVMEVENT Event;
|
---|
3772 | Event.u = 0;
|
---|
3773 | Event.n.u1Valid = 1;
|
---|
3774 | Event.n.u8Vector = X86_XCPT_NMI;
|
---|
3775 | Event.n.u3Type = SVM_EVENT_NMI;
|
---|
3776 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
3777 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
|
---|
3778 | }
|
---|
3779 | else if (!fVirtualGif)
|
---|
3780 | hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
|
---|
3781 | else
|
---|
3782 | hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
|
---|
3783 | }
|
---|
3784 | /*
|
---|
3785 | * Check if the nested-guest can receive external interrupts (generated by the guest's
|
---|
3786 | * PIC/APIC).
|
---|
3787 | *
|
---|
3788 | * External intercepts, NMI, SMI etc. from the physical CPU are -always- intercepted
|
---|
3789 | * when executing using hardware-assisted SVM, see HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS.
|
---|
3790 | *
|
---|
3791 | * External interrupts that are generated for the outer guest may be intercepted
|
---|
3792 | * depending on how the nested-guest VMCB was programmed by guest software.
|
---|
3793 | *
|
---|
3794 | * Physical interrupts always take priority over virtual interrupts,
|
---|
3795 | * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
|
---|
3796 | */
|
---|
3797 | else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
|
---|
3798 | && !pVCpu->hm.s.fSingleInstruction)
|
---|
3799 | {
|
---|
3800 | if ( fVirtualGif
|
---|
3801 | && !fIntShadow
|
---|
3802 | && CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
|
---|
3803 | {
|
---|
3804 | if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
|
---|
3805 | {
|
---|
3806 | Log4(("Intercepting INTR -> #VMEXIT\n"));
|
---|
3807 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
|
---|
3808 | return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
|
---|
3809 | }
|
---|
3810 |
|
---|
3811 | uint8_t u8Interrupt;
|
---|
3812 | int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
|
---|
3813 | if (RT_SUCCESS(rc))
|
---|
3814 | {
|
---|
3815 | Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
|
---|
3816 | SVMEVENT Event;
|
---|
3817 | Event.u = 0;
|
---|
3818 | Event.n.u1Valid = 1;
|
---|
3819 | Event.n.u8Vector = u8Interrupt;
|
---|
3820 | Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
|
---|
3821 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
3822 | }
|
---|
3823 | else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
|
---|
3824 | {
|
---|
3825 | /*
|
---|
3826 | * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
|
---|
3827 | * updated eventually when the TPR is written by the guest.
|
---|
3828 | */
|
---|
3829 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
|
---|
3830 | }
|
---|
3831 | else
|
---|
3832 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
|
---|
3833 | }
|
---|
3834 | else if (!fVirtualGif)
|
---|
3835 | hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
|
---|
3836 | else
|
---|
3837 | hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
|
---|
3838 | }
|
---|
3839 |
|
---|
3840 | return VINF_SUCCESS;
|
---|
3841 | }
|
---|
3842 | #endif
|
---|
3843 |
|
---|
3844 | /**
|
---|
3845 | * Evaluates the event to be delivered to the guest and sets it as the pending
|
---|
3846 | * event.
|
---|
3847 | *
|
---|
3848 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3849 | */
|
---|
3850 | static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu)
|
---|
3851 | {
|
---|
3852 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
3853 | HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
|
---|
3854 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT
|
---|
3855 | | CPUMCTX_EXTRN_RFLAGS
|
---|
3856 | | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW);
|
---|
3857 |
|
---|
3858 | Assert(!pVCpu->hm.s.Event.fPending);
|
---|
3859 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
3860 | Assert(pVmcb);
|
---|
3861 |
|
---|
3862 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
3863 | bool const fGif = pCtx->hwvirt.fGif;
|
---|
3864 | #else
|
---|
3865 | bool const fGif = true;
|
---|
3866 | #endif
|
---|
3867 | bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);
|
---|
3868 | bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
|
---|
3869 | bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
|
---|
3870 |
|
---|
3871 | Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool fIntPending=%RTbool NMI pending=%RTbool\n",
|
---|
3872 | fGif, fBlockNmi, fBlockInt, fIntShadow,
|
---|
3873 | VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
|
---|
3874 | VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
|
---|
3875 |
|
---|
3876 | /** @todo SMI. SMIs take priority over NMIs. */
|
---|
3877 |
|
---|
3878 | /*
|
---|
3879 | * Check if the guest can receive NMIs.
|
---|
3880 | * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
|
---|
3881 | * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
|
---|
3882 | */
|
---|
3883 | if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)
|
---|
3884 | && !fBlockNmi)
|
---|
3885 | {
|
---|
3886 | if ( fGif
|
---|
3887 | && !fIntShadow)
|
---|
3888 | {
|
---|
3889 | Log4(("Setting NMI pending for injection\n"));
|
---|
3890 | SVMEVENT Event;
|
---|
3891 | Event.u = 0;
|
---|
3892 | Event.n.u1Valid = 1;
|
---|
3893 | Event.n.u8Vector = X86_XCPT_NMI;
|
---|
3894 | Event.n.u3Type = SVM_EVENT_NMI;
|
---|
3895 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
3896 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
|
---|
3897 | }
|
---|
3898 | else if (!fGif)
|
---|
3899 | hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
|
---|
3900 | else
|
---|
3901 | hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
|
---|
3902 | }
|
---|
3903 | /*
|
---|
3904 | * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt()
|
---|
3905 | * returns a valid interrupt we -must- deliver the interrupt. We can no longer re-request
|
---|
3906 | * it from the APIC device.
|
---|
3907 | */
|
---|
3908 | else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
|
---|
3909 | && !pVCpu->hm.s.fSingleInstruction)
|
---|
3910 | {
|
---|
3911 | if ( fGif
|
---|
3912 | && !fBlockInt
|
---|
3913 | && !fIntShadow)
|
---|
3914 | {
|
---|
3915 | uint8_t u8Interrupt;
|
---|
3916 | int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
|
---|
3917 | if (RT_SUCCESS(rc))
|
---|
3918 | {
|
---|
3919 | Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
|
---|
3920 | SVMEVENT Event;
|
---|
3921 | Event.u = 0;
|
---|
3922 | Event.n.u1Valid = 1;
|
---|
3923 | Event.n.u8Vector = u8Interrupt;
|
---|
3924 | Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
|
---|
3925 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
3926 | }
|
---|
3927 | else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
|
---|
3928 | {
|
---|
3929 | /*
|
---|
3930 | * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
|
---|
3931 | * updated eventually when the TPR is written by the guest.
|
---|
3932 | */
|
---|
3933 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
|
---|
3934 | }
|
---|
3935 | else
|
---|
3936 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
|
---|
3937 | }
|
---|
3938 | else if (!fGif)
|
---|
3939 | hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
|
---|
3940 | else
|
---|
3941 | hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
|
---|
3942 | }
|
---|
3943 | }
|
---|
3944 |
|
---|
3945 |
|
---|
3946 | /**
|
---|
3947 | * Injects any pending events into the guest (or nested-guest).
|
---|
3948 | *
|
---|
3949 | * @param pVCpu The cross context virtual CPU structure.
|
---|
3950 | * @param pVmcb Pointer to the VM control block.
|
---|
3951 | *
|
---|
3952 | * @remarks Must only be called when we are guaranteed to enter
|
---|
3953 | * hardware-assisted SVM execution and not return to ring-3
|
---|
3954 | * prematurely.
|
---|
3955 | */
|
---|
3956 | static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
3957 | {
|
---|
3958 | Assert(!TRPMHasTrap(pVCpu));
|
---|
3959 | Assert(!VMMRZCallRing3IsEnabled(pVCpu));
|
---|
3960 |
|
---|
3961 | bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);
|
---|
3962 | #ifdef VBOX_STRICT
|
---|
3963 | PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
3964 | bool const fGif = pCtx->hwvirt.fGif;
|
---|
3965 | bool fAllowInt = fGif;
|
---|
3966 | if (fGif)
|
---|
3967 | {
|
---|
3968 | /*
|
---|
3969 | * For nested-guests we have no way to determine if we're injecting a physical or
|
---|
3970 | * virtual interrupt at this point. Hence the partial verification below.
|
---|
3971 | */
|
---|
3972 | if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
3973 | fAllowInt = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx) || CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx);
|
---|
3974 | else
|
---|
3975 | fAllowInt = RT_BOOL(pCtx->eflags.u32 & X86_EFL_IF);
|
---|
3976 | }
|
---|
3977 | #endif
|
---|
3978 |
|
---|
3979 | if (pVCpu->hm.s.Event.fPending)
|
---|
3980 | {
|
---|
3981 | SVMEVENT Event;
|
---|
3982 | Event.u = pVCpu->hm.s.Event.u64IntInfo;
|
---|
3983 | Assert(Event.n.u1Valid);
|
---|
3984 |
|
---|
3985 | /*
|
---|
3986 | * Validate event injection pre-conditions.
|
---|
3987 | */
|
---|
3988 | if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
|
---|
3989 | {
|
---|
3990 | Assert(fAllowInt);
|
---|
3991 | Assert(!fIntShadow);
|
---|
3992 | }
|
---|
3993 | else if (Event.n.u3Type == SVM_EVENT_NMI)
|
---|
3994 | {
|
---|
3995 | Assert(fGif);
|
---|
3996 | Assert(!fIntShadow);
|
---|
3997 | }
|
---|
3998 |
|
---|
3999 | /*
|
---|
4000 | * Before injecting an NMI we must set VMCPU_FF_BLOCK_NMIS to prevent nested NMIs. We
|
---|
4001 | * do this only when we are surely going to inject the NMI as otherwise if we return
|
---|
4002 | * to ring-3 prematurely we could leave NMIs blocked indefinitely upon re-entry into
|
---|
4003 | * SVM R0.
|
---|
4004 | *
|
---|
4005 | * With VT-x, this is handled by the Guest interruptibility information VMCS field
|
---|
4006 | * which will set the VMCS field after actually delivering the NMI which we read on
|
---|
4007 | * VM-exit to determine the state.
|
---|
4008 | */
|
---|
4009 | if ( Event.n.u3Type == SVM_EVENT_NMI
|
---|
4010 | && Event.n.u8Vector == X86_XCPT_NMI
|
---|
4011 | && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
|
---|
4012 | {
|
---|
4013 | VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
|
---|
4014 | }
|
---|
4015 |
|
---|
4016 | /*
|
---|
4017 | * Inject it (update VMCB for injection by the hardware).
|
---|
4018 | */
|
---|
4019 | Log4(("Injecting pending HM event\n"));
|
---|
4020 | hmR0SvmInjectEventVmcb(pVCpu, pVmcb, &Event);
|
---|
4021 | pVCpu->hm.s.Event.fPending = false;
|
---|
4022 |
|
---|
4023 | if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
|
---|
4024 | STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
|
---|
4025 | else
|
---|
4026 | STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
|
---|
4027 | }
|
---|
4028 | else
|
---|
4029 | Assert(pVmcb->ctrl.EventInject.n.u1Valid == 0);
|
---|
4030 |
|
---|
4031 | /*
|
---|
4032 | * We could have injected an NMI through IEM and continue guest execution using
|
---|
4033 | * hardware-assisted SVM. In which case, we would not have any events pending (above)
|
---|
4034 | * but we still need to intercept IRET in order to eventually clear NMI inhibition.
|
---|
4035 | */
|
---|
4036 | if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
|
---|
4037 | hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET);
|
---|
4038 |
|
---|
4039 | /*
|
---|
4040 | * Update the guest interrupt shadow in the guest (or nested-guest) VMCB.
|
---|
4041 | *
|
---|
4042 | * For nested-guests: We need to update it too for the scenario where IEM executes
|
---|
4043 | * the nested-guest but execution later continues here with an interrupt shadow active.
|
---|
4044 | */
|
---|
4045 | pVmcb->ctrl.IntShadow.n.u1IntShadow = fIntShadow;
|
---|
4046 | }
|
---|
4047 |
|
---|
4048 |
|
---|
4049 | /**
|
---|
4050 | * Reports world-switch error and dumps some useful debug info.
|
---|
4051 | *
|
---|
4052 | * @param pVCpu The cross context virtual CPU structure.
|
---|
4053 | * @param rcVMRun The return code from VMRUN (or
|
---|
4054 | * VERR_SVM_INVALID_GUEST_STATE for invalid
|
---|
4055 | * guest-state).
|
---|
4056 | */
|
---|
4057 | static void hmR0SvmReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun)
|
---|
4058 | {
|
---|
4059 | HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
|
---|
4060 | HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
|
---|
4061 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
4062 |
|
---|
4063 | if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
|
---|
4064 | {
|
---|
4065 | #ifdef VBOX_STRICT
|
---|
4066 | hmR0DumpRegs(pVCpu);
|
---|
4067 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
4068 | Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits));
|
---|
4069 | Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx));
|
---|
4070 | Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx));
|
---|
4071 | Log4(("ctrl.u16InterceptRdDRx %#x\n", pVmcb->ctrl.u16InterceptRdDRx));
|
---|
4072 | Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx));
|
---|
4073 | Log4(("ctrl.u32InterceptXcpt %#x\n", pVmcb->ctrl.u32InterceptXcpt));
|
---|
4074 | Log4(("ctrl.u64InterceptCtrl %#RX64\n", pVmcb->ctrl.u64InterceptCtrl));
|
---|
4075 | Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr));
|
---|
4076 | Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr));
|
---|
4077 | Log4(("ctrl.u64TSCOffset %#RX64\n", pVmcb->ctrl.u64TSCOffset));
|
---|
4078 |
|
---|
4079 | Log4(("ctrl.TLBCtrl.u32ASID %#x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID));
|
---|
4080 | Log4(("ctrl.TLBCtrl.u8TLBFlush %#x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush));
|
---|
4081 | Log4(("ctrl.TLBCtrl.u24Reserved %#x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved));
|
---|
4082 |
|
---|
4083 | Log4(("ctrl.IntCtrl.u8VTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR));
|
---|
4084 | Log4(("ctrl.IntCtrl.u1VIrqPending %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqPending));
|
---|
4085 | Log4(("ctrl.IntCtrl.u1VGif %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGif));
|
---|
4086 | Log4(("ctrl.IntCtrl.u6Reserved0 %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved));
|
---|
4087 | Log4(("ctrl.IntCtrl.u4VIntrPrio %#x\n", pVmcb->ctrl.IntCtrl.n.u4VIntrPrio));
|
---|
4088 | Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR));
|
---|
4089 | Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved));
|
---|
4090 | Log4(("ctrl.IntCtrl.u1VIntrMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIntrMasking));
|
---|
4091 | Log4(("ctrl.IntCtrl.u1VGifEnable %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGifEnable));
|
---|
4092 | Log4(("ctrl.IntCtrl.u5Reserved1 %#x\n", pVmcb->ctrl.IntCtrl.n.u5Reserved));
|
---|
4093 | Log4(("ctrl.IntCtrl.u8VIntrVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIntrVector));
|
---|
4094 | Log4(("ctrl.IntCtrl.u24Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved));
|
---|
4095 |
|
---|
4096 | Log4(("ctrl.IntShadow.u1IntShadow %#x\n", pVmcb->ctrl.IntShadow.n.u1IntShadow));
|
---|
4097 | Log4(("ctrl.IntShadow.u1GuestIntMask %#x\n", pVmcb->ctrl.IntShadow.n.u1GuestIntMask));
|
---|
4098 | Log4(("ctrl.u64ExitCode %#RX64\n", pVmcb->ctrl.u64ExitCode));
|
---|
4099 | Log4(("ctrl.u64ExitInfo1 %#RX64\n", pVmcb->ctrl.u64ExitInfo1));
|
---|
4100 | Log4(("ctrl.u64ExitInfo2 %#RX64\n", pVmcb->ctrl.u64ExitInfo2));
|
---|
4101 | Log4(("ctrl.ExitIntInfo.u8Vector %#x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector));
|
---|
4102 | Log4(("ctrl.ExitIntInfo.u3Type %#x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type));
|
---|
4103 | Log4(("ctrl.ExitIntInfo.u1ErrorCodeValid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
|
---|
4104 | Log4(("ctrl.ExitIntInfo.u19Reserved %#x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved));
|
---|
4105 | Log4(("ctrl.ExitIntInfo.u1Valid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid));
|
---|
4106 | Log4(("ctrl.ExitIntInfo.u32ErrorCode %#x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
|
---|
4107 | Log4(("ctrl.NestedPagingCtrl.u1NestedPaging %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging));
|
---|
4108 | Log4(("ctrl.NestedPagingCtrl.u1Sev %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1Sev));
|
---|
4109 | Log4(("ctrl.NestedPagingCtrl.u1SevEs %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1SevEs));
|
---|
4110 | Log4(("ctrl.EventInject.u8Vector %#x\n", pVmcb->ctrl.EventInject.n.u8Vector));
|
---|
4111 | Log4(("ctrl.EventInject.u3Type %#x\n", pVmcb->ctrl.EventInject.n.u3Type));
|
---|
4112 | Log4(("ctrl.EventInject.u1ErrorCodeValid %#x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid));
|
---|
4113 | Log4(("ctrl.EventInject.u19Reserved %#x\n", pVmcb->ctrl.EventInject.n.u19Reserved));
|
---|
4114 | Log4(("ctrl.EventInject.u1Valid %#x\n", pVmcb->ctrl.EventInject.n.u1Valid));
|
---|
4115 | Log4(("ctrl.EventInject.u32ErrorCode %#x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode));
|
---|
4116 |
|
---|
4117 | Log4(("ctrl.u64NestedPagingCR3 %#RX64\n", pVmcb->ctrl.u64NestedPagingCR3));
|
---|
4118 |
|
---|
4119 | Log4(("ctrl.LbrVirt.u1LbrVirt %#x\n", pVmcb->ctrl.LbrVirt.n.u1LbrVirt));
|
---|
4120 | Log4(("ctrl.LbrVirt.u1VirtVmsaveVmload %#x\n", pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload));
|
---|
4121 |
|
---|
4122 | Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel));
|
---|
4123 | Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr));
|
---|
4124 | Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit));
|
---|
4125 | Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base));
|
---|
4126 | Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel));
|
---|
4127 | Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr));
|
---|
4128 | Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit));
|
---|
4129 | Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base));
|
---|
4130 | Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel));
|
---|
4131 | Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr));
|
---|
4132 | Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit));
|
---|
4133 | Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base));
|
---|
4134 | Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel));
|
---|
4135 | Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr));
|
---|
4136 | Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit));
|
---|
4137 | Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base));
|
---|
4138 | Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel));
|
---|
4139 | Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr));
|
---|
4140 | Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit));
|
---|
4141 | Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base));
|
---|
4142 |
|
---|
4143 | Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit));
|
---|
4144 | Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base));
|
---|
4145 |
|
---|
4146 | Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel));
|
---|
4147 | Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr));
|
---|
4148 | Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit));
|
---|
4149 | Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base));
|
---|
4150 |
|
---|
4151 | Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit));
|
---|
4152 | Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base));
|
---|
4153 |
|
---|
4154 | Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel));
|
---|
4155 | Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr));
|
---|
4156 | Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit));
|
---|
4157 | Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base));
|
---|
4158 |
|
---|
4159 | Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL));
|
---|
4160 | Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0));
|
---|
4161 | Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2));
|
---|
4162 | Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3));
|
---|
4163 | Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4));
|
---|
4164 | Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6));
|
---|
4165 | Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7));
|
---|
4166 |
|
---|
4167 | Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP));
|
---|
4168 | Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP));
|
---|
4169 | Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX));
|
---|
4170 | Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags));
|
---|
4171 |
|
---|
4172 | Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS));
|
---|
4173 | Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP));
|
---|
4174 | Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP));
|
---|
4175 |
|
---|
4176 | Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER));
|
---|
4177 | Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR));
|
---|
4178 | Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR));
|
---|
4179 | Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR));
|
---|
4180 | Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK));
|
---|
4181 | Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase));
|
---|
4182 | Log4(("guest.u64PAT %#RX64\n", pVmcb->guest.u64PAT));
|
---|
4183 | Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL));
|
---|
4184 | Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM));
|
---|
4185 | Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO));
|
---|
4186 | Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM));
|
---|
4187 | Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO));
|
---|
4188 |
|
---|
4189 | NOREF(pVmcb);
|
---|
4190 | #endif /* VBOX_STRICT */
|
---|
4191 | }
|
---|
4192 | else
|
---|
4193 | Log4Func(("rcVMRun=%d\n", rcVMRun));
|
---|
4194 | }
|
---|
4195 |
|
---|
4196 |
|
---|
4197 | /**
|
---|
4198 | * Check per-VM and per-VCPU force flag actions that require us to go back to
|
---|
4199 | * ring-3 for one reason or another.
|
---|
4200 | *
|
---|
4201 | * @returns VBox status code (information status code included).
|
---|
4202 | * @retval VINF_SUCCESS if we don't have any actions that require going back to
|
---|
4203 | * ring-3.
|
---|
4204 | * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
|
---|
4205 | * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
|
---|
4206 | * interrupts)
|
---|
4207 | * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
|
---|
4208 | * all EMTs to be in ring-3.
|
---|
4209 | * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
|
---|
4210 | * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
|
---|
4211 | * to the EM loop.
|
---|
4212 | *
|
---|
4213 | * @param pVCpu The cross context virtual CPU structure.
|
---|
4214 | */
|
---|
4215 | static int hmR0SvmCheckForceFlags(PVMCPU pVCpu)
|
---|
4216 | {
|
---|
4217 | Assert(VMMRZCallRing3IsEnabled(pVCpu));
|
---|
4218 | Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
|
---|
4219 |
|
---|
4220 | /* Could happen as a result of longjump. */
|
---|
4221 | if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
|
---|
4222 | PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
|
---|
4223 |
|
---|
4224 | /* Update pending interrupts into the APIC's IRR. */
|
---|
4225 | if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
|
---|
4226 | APICUpdatePendingInterrupts(pVCpu);
|
---|
4227 |
|
---|
4228 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
4229 | if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
|
---|
4230 | ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
|
---|
4231 | || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
|
---|
4232 | ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
|
---|
4233 | {
|
---|
4234 | /* Pending PGM C3 sync. */
|
---|
4235 | if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
|
---|
4236 | {
|
---|
4237 | int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4,
|
---|
4238 | VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
|
---|
4239 | if (rc != VINF_SUCCESS)
|
---|
4240 | {
|
---|
4241 | Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
|
---|
4242 | return rc;
|
---|
4243 | }
|
---|
4244 | }
|
---|
4245 |
|
---|
4246 | /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
|
---|
4247 | /* -XXX- what was that about single stepping? */
|
---|
4248 | if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
|
---|
4249 | || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
|
---|
4250 | {
|
---|
4251 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
|
---|
4252 | int rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
|
---|
4253 | Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
|
---|
4254 | return rc;
|
---|
4255 | }
|
---|
4256 |
|
---|
4257 | /* Pending VM request packets, such as hardware interrupts. */
|
---|
4258 | if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
|
---|
4259 | || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
|
---|
4260 | {
|
---|
4261 | Log4Func(("Pending VM request forcing us back to ring-3\n"));
|
---|
4262 | return VINF_EM_PENDING_REQUEST;
|
---|
4263 | }
|
---|
4264 |
|
---|
4265 | /* Pending PGM pool flushes. */
|
---|
4266 | if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
|
---|
4267 | {
|
---|
4268 | Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
|
---|
4269 | return VINF_PGM_POOL_FLUSH_PENDING;
|
---|
4270 | }
|
---|
4271 |
|
---|
4272 | /* Pending DMA requests. */
|
---|
4273 | if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
|
---|
4274 | {
|
---|
4275 | Log4Func(("Pending DMA request forcing us back to ring-3\n"));
|
---|
4276 | return VINF_EM_RAW_TO_R3;
|
---|
4277 | }
|
---|
4278 | }
|
---|
4279 |
|
---|
4280 | return VINF_SUCCESS;
|
---|
4281 | }
|
---|
4282 |
|
---|
4283 |
|
---|
4284 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
4285 | /**
|
---|
4286 | * Does the preparations before executing nested-guest code in AMD-V.
|
---|
4287 | *
|
---|
4288 | * @returns VBox status code (informational status codes included).
|
---|
4289 | * @retval VINF_SUCCESS if we can proceed with running the guest.
|
---|
4290 | * @retval VINF_* scheduling changes, we have to go back to ring-3.
|
---|
4291 | *
|
---|
4292 | * @param pVCpu The cross context virtual CPU structure.
|
---|
4293 | * @param pSvmTransient Pointer to the SVM transient structure.
|
---|
4294 | *
|
---|
4295 | * @remarks Same caveats regarding longjumps as hmR0SvmPreRunGuest applies.
|
---|
4296 | * @sa hmR0SvmPreRunGuest.
|
---|
4297 | */
|
---|
4298 | static int hmR0SvmPreRunGuestNested(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
4299 | {
|
---|
4300 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
4301 | HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
|
---|
4302 | HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
|
---|
4303 |
|
---|
4304 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM_ONLY_IN_IEM
|
---|
4305 | Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
|
---|
4306 | return VINF_EM_RESCHEDULE_REM;
|
---|
4307 | #endif
|
---|
4308 |
|
---|
4309 | /* Check force flag actions that might require us to go back to ring-3. */
|
---|
4310 | int rc = hmR0SvmCheckForceFlags(pVCpu);
|
---|
4311 | if (rc != VINF_SUCCESS)
|
---|
4312 | return rc;
|
---|
4313 |
|
---|
4314 | if (TRPMHasTrap(pVCpu))
|
---|
4315 | hmR0SvmTrpmTrapToPendingEvent(pVCpu);
|
---|
4316 | else if (!pVCpu->hm.s.Event.fPending)
|
---|
4317 | {
|
---|
4318 | VBOXSTRICTRC rcStrict = hmR0SvmEvaluatePendingEventNested(pVCpu);
|
---|
4319 | if ( rcStrict != VINF_SUCCESS
|
---|
4320 | || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
4321 | return VBOXSTRICTRC_VAL(rcStrict);
|
---|
4322 | }
|
---|
4323 |
|
---|
4324 | HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
|
---|
4325 |
|
---|
4326 | /*
|
---|
4327 | * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.
|
---|
4328 | * Just do it in software, see @bugref{8411}.
|
---|
4329 | * NB: If we could continue a task switch exit we wouldn't need to do this.
|
---|
4330 | */
|
---|
4331 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
4332 | if (RT_UNLIKELY( !pVM->hm.s.svm.u32Features
|
---|
4333 | && pVCpu->hm.s.Event.fPending
|
---|
4334 | && SVM_EVENT_GET_TYPE(pVCpu->hm.s.Event.u64IntInfo) == SVM_EVENT_NMI))
|
---|
4335 | {
|
---|
4336 | return VINF_EM_RAW_INJECT_TRPM_EVENT;
|
---|
4337 | }
|
---|
4338 |
|
---|
4339 | #ifdef HMSVM_SYNC_FULL_GUEST_STATE
|
---|
4340 | Assert(!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
|
---|
4341 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
|
---|
4342 | #endif
|
---|
4343 |
|
---|
4344 | /*
|
---|
4345 | * Export the nested-guest state bits that are not shared with the host in any way as we
|
---|
4346 | * can longjmp or get preempted in the midst of exporting some of the state.
|
---|
4347 | */
|
---|
4348 | rc = hmR0SvmExportGuestStateNested(pVCpu);
|
---|
4349 | AssertRCReturn(rc, rc);
|
---|
4350 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
|
---|
4351 |
|
---|
4352 | /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware-assisted SVM. */
|
---|
4353 | Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
|
---|
4354 |
|
---|
4355 | /*
|
---|
4356 | * No longjmps to ring-3 from this point on!!!
|
---|
4357 | *
|
---|
4358 | * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
|
---|
4359 | * better than a kernel panic. This also disables flushing of the R0-logger instance.
|
---|
4360 | */
|
---|
4361 | VMMRZCallRing3Disable(pVCpu);
|
---|
4362 |
|
---|
4363 | /*
|
---|
4364 | * We disable interrupts so that we don't miss any interrupts that would flag preemption
|
---|
4365 | * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
|
---|
4366 | * preemption disabled for a while. Since this is purly to aid the
|
---|
4367 | * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
|
---|
4368 | * disable interrupt on NT.
|
---|
4369 | *
|
---|
4370 | * We need to check for force-flags that could've possible been altered since we last
|
---|
4371 | * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
|
---|
4372 | * see @bugref{6398}).
|
---|
4373 | *
|
---|
4374 | * We also check a couple of other force-flags as a last opportunity to get the EMT back
|
---|
4375 | * to ring-3 before executing guest code.
|
---|
4376 | */
|
---|
4377 | pSvmTransient->fEFlags = ASMIntDisableFlags();
|
---|
4378 | if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
|
---|
4379 | || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
|
---|
4380 | {
|
---|
4381 | ASMSetFlags(pSvmTransient->fEFlags);
|
---|
4382 | VMMRZCallRing3Enable(pVCpu);
|
---|
4383 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
|
---|
4384 | return VINF_EM_RAW_TO_R3;
|
---|
4385 | }
|
---|
4386 | if (RTThreadPreemptIsPending(NIL_RTTHREAD))
|
---|
4387 | {
|
---|
4388 | ASMSetFlags(pSvmTransient->fEFlags);
|
---|
4389 | VMMRZCallRing3Enable(pVCpu);
|
---|
4390 | STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
|
---|
4391 | return VINF_EM_RAW_INTERRUPT;
|
---|
4392 | }
|
---|
4393 | return VINF_SUCCESS;
|
---|
4394 | }
|
---|
4395 | #endif
|
---|
4396 |
|
---|
4397 |
|
---|
4398 | /**
|
---|
4399 | * Does the preparations before executing guest code in AMD-V.
|
---|
4400 | *
|
---|
4401 | * This may cause longjmps to ring-3 and may even result in rescheduling to the
|
---|
4402 | * recompiler. We must be cautious what we do here regarding committing
|
---|
4403 | * guest-state information into the VMCB assuming we assuredly execute the guest
|
---|
4404 | * in AMD-V. If we fall back to the recompiler after updating the VMCB and
|
---|
4405 | * clearing the common-state (TRPM/forceflags), we must undo those changes so
|
---|
4406 | * that the recompiler can (and should) use them when it resumes guest
|
---|
4407 | * execution. Otherwise such operations must be done when we can no longer
|
---|
4408 | * exit to ring-3.
|
---|
4409 | *
|
---|
4410 | * @returns VBox status code (informational status codes included).
|
---|
4411 | * @retval VINF_SUCCESS if we can proceed with running the guest.
|
---|
4412 | * @retval VINF_* scheduling changes, we have to go back to ring-3.
|
---|
4413 | *
|
---|
4414 | * @param pVCpu The cross context virtual CPU structure.
|
---|
4415 | * @param pSvmTransient Pointer to the SVM transient structure.
|
---|
4416 | */
|
---|
4417 | static int hmR0SvmPreRunGuest(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
4418 | {
|
---|
4419 | HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
|
---|
4420 | HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
|
---|
4421 |
|
---|
4422 | /* Check force flag actions that might require us to go back to ring-3. */
|
---|
4423 | int rc = hmR0SvmCheckForceFlags(pVCpu);
|
---|
4424 | if (rc != VINF_SUCCESS)
|
---|
4425 | return rc;
|
---|
4426 |
|
---|
4427 | if (TRPMHasTrap(pVCpu))
|
---|
4428 | hmR0SvmTrpmTrapToPendingEvent(pVCpu);
|
---|
4429 | else if (!pVCpu->hm.s.Event.fPending)
|
---|
4430 | hmR0SvmEvaluatePendingEvent(pVCpu);
|
---|
4431 |
|
---|
4432 | /*
|
---|
4433 | * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.
|
---|
4434 | * Just do it in software, see @bugref{8411}.
|
---|
4435 | * NB: If we could continue a task switch exit we wouldn't need to do this.
|
---|
4436 | */
|
---|
4437 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
4438 | if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending && (((pVCpu->hm.s.Event.u64IntInfo >> 8) & 7) == SVM_EVENT_NMI)))
|
---|
4439 | if (RT_UNLIKELY(!pVM->hm.s.svm.u32Features))
|
---|
4440 | return VINF_EM_RAW_INJECT_TRPM_EVENT;
|
---|
4441 |
|
---|
4442 | #ifdef HMSVM_SYNC_FULL_GUEST_STATE
|
---|
4443 | Assert(!(pVCpu->cpum.GstCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
|
---|
4444 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
|
---|
4445 | #endif
|
---|
4446 |
|
---|
4447 | /*
|
---|
4448 | * Export the guest state bits that are not shared with the host in any way as we can
|
---|
4449 | * longjmp or get preempted in the midst of exporting some of the state.
|
---|
4450 | */
|
---|
4451 | rc = hmR0SvmExportGuestState(pVCpu);
|
---|
4452 | AssertRCReturn(rc, rc);
|
---|
4453 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
|
---|
4454 |
|
---|
4455 | /*
|
---|
4456 | * If we're not intercepting TPR changes in the guest, save the guest TPR before the
|
---|
4457 | * world-switch so we can update it on the way back if the guest changed the TPR.
|
---|
4458 | */
|
---|
4459 | if (pVCpu->hm.s.svm.fSyncVTpr)
|
---|
4460 | {
|
---|
4461 | PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
|
---|
4462 | if (pVM->hm.s.fTPRPatchingActive)
|
---|
4463 | pSvmTransient->u8GuestTpr = pVmcb->guest.u64LSTAR;
|
---|
4464 | else
|
---|
4465 | pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
|
---|
4466 | }
|
---|
4467 |
|
---|
4468 | /*
|
---|
4469 | * No longjmps to ring-3 from this point on!!!
|
---|
4470 | *
|
---|
4471 | * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
|
---|
4472 | * better than a kernel panic. This also disables flushing of the R0-logger instance.
|
---|
4473 | */
|
---|
4474 | VMMRZCallRing3Disable(pVCpu);
|
---|
4475 |
|
---|
4476 | /*
|
---|
4477 | * We disable interrupts so that we don't miss any interrupts that would flag preemption
|
---|
4478 | * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
|
---|
4479 | * preemption disabled for a while. Since this is purly to aid the
|
---|
4480 | * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
|
---|
4481 | * disable interrupt on NT.
|
---|
4482 | *
|
---|
4483 | * We need to check for force-flags that could've possible been altered since we last
|
---|
4484 | * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
|
---|
4485 | * see @bugref{6398}).
|
---|
4486 | *
|
---|
4487 | * We also check a couple of other force-flags as a last opportunity to get the EMT back
|
---|
4488 | * to ring-3 before executing guest code.
|
---|
4489 | */
|
---|
4490 | pSvmTransient->fEFlags = ASMIntDisableFlags();
|
---|
4491 | if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
|
---|
4492 | || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
|
---|
4493 | {
|
---|
4494 | ASMSetFlags(pSvmTransient->fEFlags);
|
---|
4495 | VMMRZCallRing3Enable(pVCpu);
|
---|
4496 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
|
---|
4497 | return VINF_EM_RAW_TO_R3;
|
---|
4498 | }
|
---|
4499 | if (RTThreadPreemptIsPending(NIL_RTTHREAD))
|
---|
4500 | {
|
---|
4501 | ASMSetFlags(pSvmTransient->fEFlags);
|
---|
4502 | VMMRZCallRing3Enable(pVCpu);
|
---|
4503 | STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
|
---|
4504 | return VINF_EM_RAW_INTERRUPT;
|
---|
4505 | }
|
---|
4506 |
|
---|
4507 | return VINF_SUCCESS;
|
---|
4508 | }
|
---|
4509 |
|
---|
4510 |
|
---|
4511 | /**
|
---|
4512 | * Prepares to run guest (or nested-guest) code in AMD-V and we've committed to
|
---|
4513 | * doing so.
|
---|
4514 | *
|
---|
4515 | * This means there is no backing out to ring-3 or anywhere else at this point.
|
---|
4516 | *
|
---|
4517 | * @param pVCpu The cross context virtual CPU structure.
|
---|
4518 | * @param pSvmTransient Pointer to the SVM transient structure.
|
---|
4519 | *
|
---|
4520 | * @remarks Called with preemption disabled.
|
---|
4521 | * @remarks No-long-jump zone!!!
|
---|
4522 | */
|
---|
4523 | static void hmR0SvmPreRunGuestCommitted(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
4524 | {
|
---|
4525 | Assert(!VMMRZCallRing3IsEnabled(pVCpu));
|
---|
4526 | Assert(VMMR0IsLogFlushDisabled(pVCpu));
|
---|
4527 | Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
|
---|
4528 |
|
---|
4529 | VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
|
---|
4530 | VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
|
---|
4531 |
|
---|
4532 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
4533 | PSVMVMCB pVmcb = pSvmTransient->pVmcb;
|
---|
4534 |
|
---|
4535 | hmR0SvmInjectPendingEvent(pVCpu, pVmcb);
|
---|
4536 |
|
---|
4537 | if (!CPUMIsGuestFPUStateActive(pVCpu))
|
---|
4538 | {
|
---|
4539 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
|
---|
4540 | CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
|
---|
4541 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
|
---|
4542 | STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
|
---|
4543 | }
|
---|
4544 |
|
---|
4545 | /* Load the state shared between host and guest (FPU, debug). */
|
---|
4546 | if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)
|
---|
4547 | hmR0SvmExportSharedState(pVCpu, pVmcb);
|
---|
4548 |
|
---|
4549 | pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT; /* Preemption might set this, nothing to do on AMD-V. */
|
---|
4550 | AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
|
---|
4551 |
|
---|
4552 | PHMGLOBALCPUINFO pHostCpu = hmR0GetCurrentCpu();
|
---|
4553 | RTCPUID const idHostCpu = pHostCpu->idCpu;
|
---|
4554 | bool const fMigratedHostCpu = idHostCpu != pVCpu->hm.s.idLastCpu;
|
---|
4555 |
|
---|
4556 | /* Setup TSC offsetting. */
|
---|
4557 | if ( pSvmTransient->fUpdateTscOffsetting
|
---|
4558 | || fMigratedHostCpu)
|
---|
4559 | {
|
---|
4560 | hmR0SvmUpdateTscOffsetting(pVCpu, pVmcb);
|
---|
4561 | pSvmTransient->fUpdateTscOffsetting = false;
|
---|
4562 | }
|
---|
4563 |
|
---|
4564 | /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
|
---|
4565 | if (fMigratedHostCpu)
|
---|
4566 | pVmcb->ctrl.u32VmcbCleanBits = 0;
|
---|
4567 |
|
---|
4568 | /* Store status of the shared guest-host state at the time of VMRUN. */
|
---|
4569 | #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
|
---|
4570 | if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
|
---|
4571 | {
|
---|
4572 | pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
|
---|
4573 | pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
|
---|
4574 | }
|
---|
4575 | else
|
---|
4576 | #endif
|
---|
4577 | {
|
---|
4578 | pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
|
---|
4579 | pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
|
---|
4580 | }
|
---|
4581 |
|
---|
4582 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
4583 | uint8_t *pbMsrBitmap;
|
---|
4584 | if (!pSvmTransient->fIsNestedGuest)
|
---|
4585 | pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
|
---|
4586 | else
|
---|
4587 | {
|
---|
4588 | hmR0SvmMergeMsrpmNested(pHostCpu, pVCpu);
|
---|
4589 |
|
---|
4590 | /* Update the nested-guest VMCB with the newly merged MSRPM (clean bits updated below). */
|
---|
4591 | pVmcb->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm;
|
---|
4592 | pbMsrBitmap = (uint8_t *)pHostCpu->n.svm.pvNstGstMsrpm;
|
---|
4593 | }
|
---|
4594 | #else
|
---|
4595 | uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
|
---|
4596 | #endif
|
---|
4597 |
|
---|
4598 | ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
|
---|
4599 | /* Flush the appropriate tagged-TLB entries. */
|
---|
4600 | hmR0SvmFlushTaggedTlb(pVCpu, pVmcb, pHostCpu);
|
---|
4601 | Assert(pVCpu->hm.s.idLastCpu == idHostCpu);
|
---|
4602 |
|
---|
4603 | STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
|
---|
4604 |
|
---|
4605 | TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
|
---|
4606 | to start executing. */
|
---|
4607 |
|
---|
4608 | /*
|
---|
4609 | * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that RDTSCPs
|
---|
4610 | * (that don't cause exits) reads the guest MSR, see @bugref{3324}.
|
---|
4611 | *
|
---|
4612 | * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
|
---|
4613 | */
|
---|
4614 | if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
|
---|
4615 | && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
|
---|
4616 | {
|
---|
4617 | uint64_t const uGuestTscAux = CPUMGetGuestTscAux(pVCpu);
|
---|
4618 | pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
|
---|
4619 | if (uGuestTscAux != pVCpu->hm.s.u64HostTscAux)
|
---|
4620 | ASMWrMsr(MSR_K8_TSC_AUX, uGuestTscAux);
|
---|
4621 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
|
---|
4622 | pSvmTransient->fRestoreTscAuxMsr = true;
|
---|
4623 | }
|
---|
4624 | else
|
---|
4625 | {
|
---|
4626 | hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
|
---|
4627 | pSvmTransient->fRestoreTscAuxMsr = false;
|
---|
4628 | }
|
---|
4629 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
|
---|
4630 |
|
---|
4631 | /*
|
---|
4632 | * If VMCB Clean bits isn't supported by the CPU or exposed to the guest in the nested
|
---|
4633 | * virtualization case, mark all state-bits as dirty indicating to the CPU to re-load
|
---|
4634 | * from the VMCB.
|
---|
4635 | */
|
---|
4636 | bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu);
|
---|
4637 | if (!fSupportsVmcbCleanBits)
|
---|
4638 | pVmcb->ctrl.u32VmcbCleanBits = 0;
|
---|
4639 | }
|
---|
4640 |
|
---|
4641 |
|
---|
4642 | /**
|
---|
4643 | * Wrapper for running the guest (or nested-guest) code in AMD-V.
|
---|
4644 | *
|
---|
4645 | * @returns VBox strict status code.
|
---|
4646 | * @param pVCpu The cross context virtual CPU structure.
|
---|
4647 | * @param HCPhysVmcb The host physical address of the VMCB.
|
---|
4648 | *
|
---|
4649 | * @remarks No-long-jump zone!!!
|
---|
4650 | */
|
---|
4651 | DECLINLINE(int) hmR0SvmRunGuest(PVMCPU pVCpu, RTHCPHYS HCPhysVmcb)
|
---|
4652 | {
|
---|
4653 | /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
|
---|
4654 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
4655 | pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
|
---|
4656 |
|
---|
4657 | /*
|
---|
4658 | * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses
|
---|
4659 | * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are
|
---|
4660 | * callee-saved and thus the need for this XMM wrapper.
|
---|
4661 | *
|
---|
4662 | * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
|
---|
4663 | */
|
---|
4664 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
4665 | #ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
4666 | return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu, pVCpu->hm.s.svm.pfnVMRun);
|
---|
4667 | #else
|
---|
4668 | return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, HCPhysVmcb, pCtx, pVM, pVCpu);
|
---|
4669 | #endif
|
---|
4670 | }
|
---|
4671 |
|
---|
4672 |
|
---|
4673 | /**
|
---|
4674 | * Undoes the TSC offset applied for an SVM nested-guest and returns the TSC
|
---|
4675 | * value for the guest.
|
---|
4676 | *
|
---|
4677 | * @returns The TSC offset after undoing any nested-guest TSC offset.
|
---|
4678 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
4679 | * @param uTicks The nested-guest TSC.
|
---|
4680 | *
|
---|
4681 | * @note If you make any changes to this function, please check if
|
---|
4682 | * hmR0SvmNstGstUndoTscOffset() needs adjusting.
|
---|
4683 | *
|
---|
4684 | * @sa HMSvmNstGstApplyTscOffset().
|
---|
4685 | */
|
---|
4686 | DECLINLINE(uint64_t) hmR0SvmNstGstUndoTscOffset(PVMCPU pVCpu, uint64_t uTicks)
|
---|
4687 | {
|
---|
4688 | PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
|
---|
4689 | Assert(pVmcbNstGstCache->fCacheValid);
|
---|
4690 | return uTicks - pVmcbNstGstCache->u64TSCOffset;
|
---|
4691 | }
|
---|
4692 |
|
---|
4693 |
|
---|
4694 | /**
|
---|
4695 | * Performs some essential restoration of state after running guest (or
|
---|
4696 | * nested-guest) code in AMD-V.
|
---|
4697 | *
|
---|
4698 | * @param pVCpu The cross context virtual CPU structure.
|
---|
4699 | * @param pSvmTransient Pointer to the SVM transient structure.
|
---|
4700 | * @param rcVMRun Return code of VMRUN.
|
---|
4701 | *
|
---|
4702 | * @remarks Called with interrupts disabled.
|
---|
4703 | * @remarks No-long-jump zone!!! This function will however re-enable longjmps
|
---|
4704 | * unconditionally when it is safe to do so.
|
---|
4705 | */
|
---|
4706 | static void hmR0SvmPostRunGuest(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient, int rcVMRun)
|
---|
4707 | {
|
---|
4708 | Assert(!VMMRZCallRing3IsEnabled(pVCpu));
|
---|
4709 |
|
---|
4710 | uint64_t const uHostTsc = ASMReadTSC(); /* Read the TSC as soon as possible. */
|
---|
4711 | ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
|
---|
4712 | ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
|
---|
4713 |
|
---|
4714 | PSVMVMCB pVmcb = pSvmTransient->pVmcb;
|
---|
4715 | PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
|
---|
4716 |
|
---|
4717 | /* TSC read must be done early for maximum accuracy. */
|
---|
4718 | if (!(pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
|
---|
4719 | {
|
---|
4720 | if (!pSvmTransient->fIsNestedGuest)
|
---|
4721 | TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);
|
---|
4722 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
4723 | else
|
---|
4724 | {
|
---|
4725 | /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMSvmNstGstVmExitNotify(). */
|
---|
4726 | uint64_t const uGstTsc = hmR0SvmNstGstUndoTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);
|
---|
4727 | TMCpuTickSetLastSeen(pVCpu, uGstTsc);
|
---|
4728 | }
|
---|
4729 | #endif
|
---|
4730 | }
|
---|
4731 |
|
---|
4732 | if (pSvmTransient->fRestoreTscAuxMsr)
|
---|
4733 | {
|
---|
4734 | uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
|
---|
4735 | CPUMSetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
|
---|
4736 | if (u64GuestTscAuxMsr != pVCpu->hm.s.u64HostTscAux)
|
---|
4737 | ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
|
---|
4738 | }
|
---|
4739 |
|
---|
4740 | STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
|
---|
4741 | TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
|
---|
4742 | VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
|
---|
4743 |
|
---|
4744 | Assert(!(ASMGetFlags() & X86_EFL_IF));
|
---|
4745 | ASMSetFlags(pSvmTransient->fEFlags); /* Enable interrupts. */
|
---|
4746 | VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
|
---|
4747 |
|
---|
4748 | /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
|
---|
4749 | if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
|
---|
4750 | {
|
---|
4751 | Log4Func(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun));
|
---|
4752 | return;
|
---|
4753 | }
|
---|
4754 |
|
---|
4755 | pSvmTransient->u64ExitCode = pVmcbCtrl->u64ExitCode; /* Save the #VMEXIT reason. */
|
---|
4756 | pVmcbCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
|
---|
4757 | pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
|
---|
4758 | pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
|
---|
4759 |
|
---|
4760 | #ifdef HMSVM_SYNC_FULL_GUEST_STATE
|
---|
4761 | hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
4762 | Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
|
---|
4763 | #else
|
---|
4764 | /*
|
---|
4765 | * Always import the following:
|
---|
4766 | *
|
---|
4767 | * - RIP for exit optimizations and evaluating event injection on re-entry.
|
---|
4768 | * - RFLAGS for evaluating event injection on VM re-entry and for exporting shared debug
|
---|
4769 | * state on preemption.
|
---|
4770 | * - Interrupt shadow, GIF for evaluating event injection on VM re-entry.
|
---|
4771 | * - CS for exit optimizations.
|
---|
4772 | * - RAX, RSP for simplifying assumptions on GPRs. All other GPRs are swapped by the
|
---|
4773 | * assembly switcher code.
|
---|
4774 | * - Shared state (only DR7 currently) for exporting shared debug state on preemption.
|
---|
4775 | */
|
---|
4776 | hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP
|
---|
4777 | | CPUMCTX_EXTRN_RFLAGS
|
---|
4778 | | CPUMCTX_EXTRN_RAX
|
---|
4779 | | CPUMCTX_EXTRN_RSP
|
---|
4780 | | CPUMCTX_EXTRN_CS
|
---|
4781 | | CPUMCTX_EXTRN_HWVIRT
|
---|
4782 | | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW
|
---|
4783 | | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ
|
---|
4784 | | HMSVM_CPUMCTX_SHARED_STATE);
|
---|
4785 | #endif
|
---|
4786 |
|
---|
4787 | if ( pSvmTransient->u64ExitCode != SVM_EXIT_INVALID
|
---|
4788 | && pVCpu->hm.s.svm.fSyncVTpr)
|
---|
4789 | {
|
---|
4790 | Assert(!pSvmTransient->fIsNestedGuest);
|
---|
4791 | /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
|
---|
4792 | if ( pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive
|
---|
4793 | && (pVmcb->guest.u64LSTAR & 0xff) != pSvmTransient->u8GuestTpr)
|
---|
4794 | {
|
---|
4795 | int rc = APICSetTpr(pVCpu, pVmcb->guest.u64LSTAR & 0xff);
|
---|
4796 | AssertRC(rc);
|
---|
4797 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
|
---|
4798 | }
|
---|
4799 | /* Sync TPR when we aren't intercepting CR8 writes. */
|
---|
4800 | else if (pSvmTransient->u8GuestTpr != pVmcbCtrl->IntCtrl.n.u8VTPR)
|
---|
4801 | {
|
---|
4802 | int rc = APICSetTpr(pVCpu, pVmcbCtrl->IntCtrl.n.u8VTPR << 4);
|
---|
4803 | AssertRC(rc);
|
---|
4804 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
|
---|
4805 | }
|
---|
4806 | }
|
---|
4807 |
|
---|
4808 | #ifdef DEBUG_ramshankar
|
---|
4809 | if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
|
---|
4810 | {
|
---|
4811 | hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
4812 | hmR0SvmLogState(pVCpu, pVmcb, pVCpu->cpum.GstCtx, "hmR0SvmPostRunGuestNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR,
|
---|
4813 | 0 /* uVerbose */);
|
---|
4814 | }
|
---|
4815 | #endif
|
---|
4816 |
|
---|
4817 | HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
|
---|
4818 | EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_SVM, pSvmTransient->u64ExitCode & EMEXIT_F_TYPE_MASK),
|
---|
4819 | pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
|
---|
4820 | }
|
---|
4821 |
|
---|
4822 |
|
---|
4823 | /**
|
---|
4824 | * Runs the guest code using AMD-V.
|
---|
4825 | *
|
---|
4826 | * @returns VBox status code.
|
---|
4827 | * @param pVCpu The cross context virtual CPU structure.
|
---|
4828 | * @param pcLoops Pointer to the number of executed loops.
|
---|
4829 | */
|
---|
4830 | static int hmR0SvmRunGuestCodeNormal(PVMCPU pVCpu, uint32_t *pcLoops)
|
---|
4831 | {
|
---|
4832 | uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
|
---|
4833 | Assert(pcLoops);
|
---|
4834 | Assert(*pcLoops <= cMaxResumeLoops);
|
---|
4835 |
|
---|
4836 | SVMTRANSIENT SvmTransient;
|
---|
4837 | RT_ZERO(SvmTransient);
|
---|
4838 | SvmTransient.fUpdateTscOffsetting = true;
|
---|
4839 | SvmTransient.pVmcb = pVCpu->hm.s.svm.pVmcb;
|
---|
4840 |
|
---|
4841 | int rc = VERR_INTERNAL_ERROR_5;
|
---|
4842 | for (;;)
|
---|
4843 | {
|
---|
4844 | Assert(!HMR0SuspendPending());
|
---|
4845 | HMSVM_ASSERT_CPU_SAFE(pVCpu);
|
---|
4846 |
|
---|
4847 | /* Preparatory work for running nested-guest code, this may force us to return to
|
---|
4848 | ring-3. This bugger disables interrupts on VINF_SUCCESS! */
|
---|
4849 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
|
---|
4850 | rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
|
---|
4851 | if (rc != VINF_SUCCESS)
|
---|
4852 | break;
|
---|
4853 |
|
---|
4854 | /*
|
---|
4855 | * No longjmps to ring-3 from this point on!!!
|
---|
4856 | *
|
---|
4857 | * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
|
---|
4858 | * better than a kernel panic. This also disables flushing of the R0-logger instance.
|
---|
4859 | */
|
---|
4860 | hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
|
---|
4861 | rc = hmR0SvmRunGuest(pVCpu, pVCpu->hm.s.svm.HCPhysVmcb);
|
---|
4862 |
|
---|
4863 | /* Restore any residual host-state and save any bits shared between host and guest
|
---|
4864 | into the guest-CPU state. Re-enables interrupts! */
|
---|
4865 | hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
|
---|
4866 |
|
---|
4867 | if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
|
---|
4868 | || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
|
---|
4869 | {
|
---|
4870 | if (rc == VINF_SUCCESS)
|
---|
4871 | rc = VERR_SVM_INVALID_GUEST_STATE;
|
---|
4872 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
|
---|
4873 | hmR0SvmReportWorldSwitchError(pVCpu, rc);
|
---|
4874 | break;
|
---|
4875 | }
|
---|
4876 |
|
---|
4877 | /* Handle the #VMEXIT. */
|
---|
4878 | HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
|
---|
4879 | STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
|
---|
4880 | VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
|
---|
4881 | rc = hmR0SvmHandleExit(pVCpu, &SvmTransient);
|
---|
4882 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
|
---|
4883 | if (rc != VINF_SUCCESS)
|
---|
4884 | break;
|
---|
4885 | if (++(*pcLoops) >= cMaxResumeLoops)
|
---|
4886 | {
|
---|
4887 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
|
---|
4888 | rc = VINF_EM_RAW_INTERRUPT;
|
---|
4889 | break;
|
---|
4890 | }
|
---|
4891 | }
|
---|
4892 |
|
---|
4893 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
|
---|
4894 | return rc;
|
---|
4895 | }
|
---|
4896 |
|
---|
4897 |
|
---|
4898 | /**
|
---|
4899 | * Runs the guest code using AMD-V in single step mode.
|
---|
4900 | *
|
---|
4901 | * @returns VBox status code.
|
---|
4902 | * @param pVCpu The cross context virtual CPU structure.
|
---|
4903 | * @param pcLoops Pointer to the number of executed loops.
|
---|
4904 | */
|
---|
4905 | static int hmR0SvmRunGuestCodeStep(PVMCPU pVCpu, uint32_t *pcLoops)
|
---|
4906 | {
|
---|
4907 | uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
|
---|
4908 | Assert(pcLoops);
|
---|
4909 | Assert(*pcLoops <= cMaxResumeLoops);
|
---|
4910 |
|
---|
4911 | SVMTRANSIENT SvmTransient;
|
---|
4912 | RT_ZERO(SvmTransient);
|
---|
4913 | SvmTransient.fUpdateTscOffsetting = true;
|
---|
4914 | SvmTransient.pVmcb = pVCpu->hm.s.svm.pVmcb;
|
---|
4915 |
|
---|
4916 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
4917 | uint16_t uCsStart = pCtx->cs.Sel;
|
---|
4918 | uint64_t uRipStart = pCtx->rip;
|
---|
4919 |
|
---|
4920 | int rc = VERR_INTERNAL_ERROR_5;
|
---|
4921 | for (;;)
|
---|
4922 | {
|
---|
4923 | Assert(!HMR0SuspendPending());
|
---|
4924 | AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
|
---|
4925 | ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
|
---|
4926 | (unsigned)RTMpCpuId(), *pcLoops));
|
---|
4927 |
|
---|
4928 | /* Preparatory work for running nested-guest code, this may force us to return to
|
---|
4929 | ring-3. This bugger disables interrupts on VINF_SUCCESS! */
|
---|
4930 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
|
---|
4931 | rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
|
---|
4932 | if (rc != VINF_SUCCESS)
|
---|
4933 | break;
|
---|
4934 |
|
---|
4935 | /*
|
---|
4936 | * No longjmps to ring-3 from this point on!!!
|
---|
4937 | *
|
---|
4938 | * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
|
---|
4939 | * better than a kernel panic. This also disables flushing of the R0-logger instance.
|
---|
4940 | */
|
---|
4941 | VMMRZCallRing3Disable(pVCpu);
|
---|
4942 | VMMRZCallRing3RemoveNotification(pVCpu);
|
---|
4943 | hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
|
---|
4944 |
|
---|
4945 | rc = hmR0SvmRunGuest(pVCpu, pVCpu->hm.s.svm.HCPhysVmcb);
|
---|
4946 |
|
---|
4947 | /* Restore any residual host-state and save any bits shared between host and guest
|
---|
4948 | into the guest-CPU state. Re-enables interrupts! */
|
---|
4949 | hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
|
---|
4950 |
|
---|
4951 | if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
|
---|
4952 | || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
|
---|
4953 | {
|
---|
4954 | if (rc == VINF_SUCCESS)
|
---|
4955 | rc = VERR_SVM_INVALID_GUEST_STATE;
|
---|
4956 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
|
---|
4957 | hmR0SvmReportWorldSwitchError(pVCpu, rc);
|
---|
4958 | return rc;
|
---|
4959 | }
|
---|
4960 |
|
---|
4961 | /* Handle the #VMEXIT. */
|
---|
4962 | HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
|
---|
4963 | STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
|
---|
4964 | VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
|
---|
4965 | rc = hmR0SvmHandleExit(pVCpu, &SvmTransient);
|
---|
4966 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
|
---|
4967 | if (rc != VINF_SUCCESS)
|
---|
4968 | break;
|
---|
4969 | if (++(*pcLoops) >= cMaxResumeLoops)
|
---|
4970 | {
|
---|
4971 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
|
---|
4972 | rc = VINF_EM_RAW_INTERRUPT;
|
---|
4973 | break;
|
---|
4974 | }
|
---|
4975 |
|
---|
4976 | /*
|
---|
4977 | * Did the RIP change, if so, consider it a single step.
|
---|
4978 | * Otherwise, make sure one of the TFs gets set.
|
---|
4979 | */
|
---|
4980 | if ( pCtx->rip != uRipStart
|
---|
4981 | || pCtx->cs.Sel != uCsStart)
|
---|
4982 | {
|
---|
4983 | rc = VINF_EM_DBG_STEPPED;
|
---|
4984 | break;
|
---|
4985 | }
|
---|
4986 | pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR_MASK;
|
---|
4987 | }
|
---|
4988 |
|
---|
4989 | /*
|
---|
4990 | * Clear the X86_EFL_TF if necessary.
|
---|
4991 | */
|
---|
4992 | if (pVCpu->hm.s.fClearTrapFlag)
|
---|
4993 | {
|
---|
4994 | pVCpu->hm.s.fClearTrapFlag = false;
|
---|
4995 | pCtx->eflags.Bits.u1TF = 0;
|
---|
4996 | }
|
---|
4997 |
|
---|
4998 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
|
---|
4999 | return rc;
|
---|
5000 | }
|
---|
5001 |
|
---|
5002 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
5003 | /**
|
---|
5004 | * Runs the nested-guest code using AMD-V.
|
---|
5005 | *
|
---|
5006 | * @returns VBox status code.
|
---|
5007 | * @param pVCpu The cross context virtual CPU structure.
|
---|
5008 | * @param pcLoops Pointer to the number of executed loops. If we're switching
|
---|
5009 | * from the guest-code execution loop to this nested-guest
|
---|
5010 | * execution loop pass the remainder value, else pass 0.
|
---|
5011 | */
|
---|
5012 | static int hmR0SvmRunGuestCodeNested(PVMCPU pVCpu, uint32_t *pcLoops)
|
---|
5013 | {
|
---|
5014 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
5015 | HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
|
---|
5016 | Assert(pcLoops);
|
---|
5017 | Assert(*pcLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops);
|
---|
5018 |
|
---|
5019 | SVMTRANSIENT SvmTransient;
|
---|
5020 | RT_ZERO(SvmTransient);
|
---|
5021 | SvmTransient.fUpdateTscOffsetting = true;
|
---|
5022 | SvmTransient.pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
|
---|
5023 | SvmTransient.fIsNestedGuest = true;
|
---|
5024 |
|
---|
5025 | int rc = VERR_INTERNAL_ERROR_4;
|
---|
5026 | for (;;)
|
---|
5027 | {
|
---|
5028 | Assert(!HMR0SuspendPending());
|
---|
5029 | HMSVM_ASSERT_CPU_SAFE(pVCpu);
|
---|
5030 |
|
---|
5031 | /* Preparatory work for running nested-guest code, this may force us to return to
|
---|
5032 | ring-3. This bugger disables interrupts on VINF_SUCCESS! */
|
---|
5033 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
|
---|
5034 | rc = hmR0SvmPreRunGuestNested(pVCpu, &SvmTransient);
|
---|
5035 | if ( rc != VINF_SUCCESS
|
---|
5036 | || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
5037 | {
|
---|
5038 | break;
|
---|
5039 | }
|
---|
5040 |
|
---|
5041 | /*
|
---|
5042 | * No longjmps to ring-3 from this point on!!!
|
---|
5043 | *
|
---|
5044 | * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
|
---|
5045 | * better than a kernel panic. This also disables flushing of the R0-logger instance.
|
---|
5046 | */
|
---|
5047 | hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
|
---|
5048 |
|
---|
5049 | rc = hmR0SvmRunGuest(pVCpu, pCtx->hwvirt.svm.HCPhysVmcb);
|
---|
5050 |
|
---|
5051 | /* Restore any residual host-state and save any bits shared between host and guest
|
---|
5052 | into the guest-CPU state. Re-enables interrupts! */
|
---|
5053 | hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
|
---|
5054 |
|
---|
5055 | if (RT_LIKELY( rc == VINF_SUCCESS
|
---|
5056 | && SvmTransient.u64ExitCode != SVM_EXIT_INVALID))
|
---|
5057 | { /* extremely likely */ }
|
---|
5058 | else
|
---|
5059 | {
|
---|
5060 | /* VMRUN failed, shouldn't really happen, Guru. */
|
---|
5061 | if (rc != VINF_SUCCESS)
|
---|
5062 | break;
|
---|
5063 |
|
---|
5064 | /* Invalid nested-guest state. Cause a #VMEXIT but assert on strict builds. */
|
---|
5065 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
5066 | AssertMsgFailed(("Invalid nested-guest state. rc=%Rrc u64ExitCode=%#RX64\n", rc, SvmTransient.u64ExitCode));
|
---|
5067 | rc = VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0, 0));
|
---|
5068 | break;
|
---|
5069 | }
|
---|
5070 |
|
---|
5071 | /* Handle the #VMEXIT. */
|
---|
5072 | HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
|
---|
5073 | STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
|
---|
5074 | VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pCtx->hwvirt.svm.CTX_SUFF(pVmcb));
|
---|
5075 | rc = hmR0SvmHandleExitNested(pVCpu, &SvmTransient);
|
---|
5076 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
|
---|
5077 | if ( rc != VINF_SUCCESS
|
---|
5078 | || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
5079 | break;
|
---|
5080 | if (++(*pcLoops) >= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
|
---|
5081 | {
|
---|
5082 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
|
---|
5083 | rc = VINF_EM_RAW_INTERRUPT;
|
---|
5084 | break;
|
---|
5085 | }
|
---|
5086 |
|
---|
5087 | /** @todo handle single-stepping */
|
---|
5088 | }
|
---|
5089 |
|
---|
5090 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
|
---|
5091 | return rc;
|
---|
5092 | }
|
---|
5093 | #endif
|
---|
5094 |
|
---|
5095 |
|
---|
5096 | /**
|
---|
5097 | * Runs the guest code using AMD-V.
|
---|
5098 | *
|
---|
5099 | * @returns Strict VBox status code.
|
---|
5100 | * @param pVCpu The cross context virtual CPU structure.
|
---|
5101 | */
|
---|
5102 | VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPU pVCpu)
|
---|
5103 | {
|
---|
5104 | Assert(VMMRZCallRing3IsEnabled(pVCpu));
|
---|
5105 | HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
|
---|
5106 | VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, NULL /* pvUser */);
|
---|
5107 |
|
---|
5108 | uint32_t cLoops = 0;
|
---|
5109 | int rc;
|
---|
5110 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
5111 | if (!CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
|
---|
5112 | #endif
|
---|
5113 | {
|
---|
5114 | if (!pVCpu->hm.s.fSingleInstruction)
|
---|
5115 | rc = hmR0SvmRunGuestCodeNormal(pVCpu, &cLoops);
|
---|
5116 | else
|
---|
5117 | rc = hmR0SvmRunGuestCodeStep(pVCpu, &cLoops);
|
---|
5118 | }
|
---|
5119 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
5120 | else
|
---|
5121 | {
|
---|
5122 | rc = VINF_SVM_VMRUN;
|
---|
5123 | }
|
---|
5124 |
|
---|
5125 | /* Re-check the nested-guest condition here as we may be transitioning from the normal
|
---|
5126 | execution loop into the nested-guest, hence this is not placed in the 'else' part above. */
|
---|
5127 | if (rc == VINF_SVM_VMRUN)
|
---|
5128 | {
|
---|
5129 | rc = hmR0SvmRunGuestCodeNested(pVCpu, &cLoops);
|
---|
5130 | if (rc == VINF_SVM_VMEXIT)
|
---|
5131 | rc = VINF_SUCCESS;
|
---|
5132 | }
|
---|
5133 | #endif
|
---|
5134 |
|
---|
5135 | /* Fixup error codes. */
|
---|
5136 | if (rc == VERR_EM_INTERPRETER)
|
---|
5137 | rc = VINF_EM_RAW_EMULATE_INSTR;
|
---|
5138 | else if (rc == VINF_EM_RESET)
|
---|
5139 | rc = VINF_EM_TRIPLE_FAULT;
|
---|
5140 |
|
---|
5141 | /* Prepare to return to ring-3. This will remove longjmp notifications. */
|
---|
5142 | rc = hmR0SvmExitToRing3(pVCpu, rc);
|
---|
5143 | Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
|
---|
5144 | return rc;
|
---|
5145 | }
|
---|
5146 |
|
---|
5147 |
|
---|
5148 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
5149 | /**
|
---|
5150 | * Determines whether an IOIO intercept is active for the nested-guest or not.
|
---|
5151 | *
|
---|
5152 | * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
|
---|
5153 | * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO.
|
---|
5154 | */
|
---|
5155 | static bool hmR0SvmIsIoInterceptActive(void *pvIoBitmap, PSVMIOIOEXITINFO pIoExitInfo)
|
---|
5156 | {
|
---|
5157 | const uint16_t u16Port = pIoExitInfo->n.u16Port;
|
---|
5158 | const SVMIOIOTYPE enmIoType = (SVMIOIOTYPE)pIoExitInfo->n.u1Type;
|
---|
5159 | const uint8_t cbReg = (pIoExitInfo->u >> SVM_IOIO_OP_SIZE_SHIFT) & 7;
|
---|
5160 | const uint8_t cAddrSizeBits = ((pIoExitInfo->u >> SVM_IOIO_ADDR_SIZE_SHIFT) & 7) << 4;
|
---|
5161 | const uint8_t iEffSeg = pIoExitInfo->n.u3Seg;
|
---|
5162 | const bool fRep = pIoExitInfo->n.u1Rep;
|
---|
5163 | const bool fStrIo = pIoExitInfo->n.u1Str;
|
---|
5164 |
|
---|
5165 | return HMSvmIsIOInterceptActive(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo,
|
---|
5166 | NULL /* pIoExitInfo */);
|
---|
5167 | }
|
---|
5168 |
|
---|
5169 |
|
---|
5170 | /**
|
---|
5171 | * Handles a nested-guest \#VMEXIT (for all EXITCODE values except
|
---|
5172 | * SVM_EXIT_INVALID).
|
---|
5173 | *
|
---|
5174 | * @returns VBox status code (informational status codes included).
|
---|
5175 | * @param pVCpu The cross context virtual CPU structure.
|
---|
5176 | * @param pSvmTransient Pointer to the SVM transient structure.
|
---|
5177 | */
|
---|
5178 | static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
5179 | {
|
---|
5180 | HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
|
---|
5181 | Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
|
---|
5182 | Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
|
---|
5183 |
|
---|
5184 | /** @todo Figure out why using IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK instead of
|
---|
5185 | * HMSVM_CPUMCTX_EXTRN_ALL breaks nested guests (XP Pro, DSL etc.), see
|
---|
5186 | * also HMSvmNstGstVmExitNotify(). */
|
---|
5187 | #define NST_GST_VMEXIT_CALL_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
|
---|
5188 | do { \
|
---|
5189 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \
|
---|
5190 | return VBOXSTRICTRC_TODO(IEMExecSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2))); \
|
---|
5191 | } while (0)
|
---|
5192 |
|
---|
5193 | /*
|
---|
5194 | * For all the #VMEXITs here we primarily figure out if the #VMEXIT is expected by the
|
---|
5195 | * nested-guest. If it isn't, it should be handled by the (outer) guest.
|
---|
5196 | */
|
---|
5197 | PSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
|
---|
5198 | PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
|
---|
5199 | uint64_t const uExitCode = pVmcbNstGstCtrl->u64ExitCode;
|
---|
5200 | uint64_t const uExitInfo1 = pVmcbNstGstCtrl->u64ExitInfo1;
|
---|
5201 | uint64_t const uExitInfo2 = pVmcbNstGstCtrl->u64ExitInfo2;
|
---|
5202 |
|
---|
5203 | Assert(uExitCode == pVmcbNstGstCtrl->u64ExitCode);
|
---|
5204 | switch (uExitCode)
|
---|
5205 | {
|
---|
5206 | case SVM_EXIT_CPUID:
|
---|
5207 | {
|
---|
5208 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
|
---|
5209 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5210 | return hmR0SvmExitCpuid(pVCpu, pSvmTransient);
|
---|
5211 | }
|
---|
5212 |
|
---|
5213 | case SVM_EXIT_RDTSC:
|
---|
5214 | {
|
---|
5215 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
|
---|
5216 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5217 | return hmR0SvmExitRdtsc(pVCpu, pSvmTransient);
|
---|
5218 | }
|
---|
5219 |
|
---|
5220 | case SVM_EXIT_RDTSCP:
|
---|
5221 | {
|
---|
5222 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
|
---|
5223 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5224 | return hmR0SvmExitRdtscp(pVCpu, pSvmTransient);
|
---|
5225 | }
|
---|
5226 |
|
---|
5227 | case SVM_EXIT_MONITOR:
|
---|
5228 | {
|
---|
5229 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
|
---|
5230 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5231 | return hmR0SvmExitMonitor(pVCpu, pSvmTransient);
|
---|
5232 | }
|
---|
5233 |
|
---|
5234 | case SVM_EXIT_MWAIT:
|
---|
5235 | {
|
---|
5236 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
|
---|
5237 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5238 | return hmR0SvmExitMwait(pVCpu, pSvmTransient);
|
---|
5239 | }
|
---|
5240 |
|
---|
5241 | case SVM_EXIT_HLT:
|
---|
5242 | {
|
---|
5243 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_HLT))
|
---|
5244 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5245 | return hmR0SvmExitHlt(pVCpu, pSvmTransient);
|
---|
5246 | }
|
---|
5247 |
|
---|
5248 | case SVM_EXIT_MSR:
|
---|
5249 | {
|
---|
5250 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
|
---|
5251 | {
|
---|
5252 | uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
|
---|
5253 | uint16_t offMsrpm;
|
---|
5254 | uint8_t uMsrpmBit;
|
---|
5255 | int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
|
---|
5256 | if (RT_SUCCESS(rc))
|
---|
5257 | {
|
---|
5258 | Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
|
---|
5259 | Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
|
---|
5260 |
|
---|
5261 | uint8_t const *pbMsrBitmap = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap);
|
---|
5262 | pbMsrBitmap += offMsrpm;
|
---|
5263 | bool const fInterceptRead = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit));
|
---|
5264 | bool const fInterceptWrite = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1));
|
---|
5265 |
|
---|
5266 | if ( (fInterceptWrite && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
|
---|
5267 | || (fInterceptRead && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ))
|
---|
5268 | {
|
---|
5269 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5270 | }
|
---|
5271 | }
|
---|
5272 | else
|
---|
5273 | {
|
---|
5274 | /*
|
---|
5275 | * MSRs not covered by the MSRPM automatically cause an #VMEXIT.
|
---|
5276 | * See AMD-V spec. "15.11 MSR Intercepts".
|
---|
5277 | */
|
---|
5278 | Assert(rc == VERR_OUT_OF_RANGE);
|
---|
5279 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5280 | }
|
---|
5281 | }
|
---|
5282 | return hmR0SvmExitMsr(pVCpu, pSvmTransient);
|
---|
5283 | }
|
---|
5284 |
|
---|
5285 | case SVM_EXIT_IOIO:
|
---|
5286 | {
|
---|
5287 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
|
---|
5288 | {
|
---|
5289 | void *pvIoBitmap = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap);
|
---|
5290 | SVMIOIOEXITINFO IoExitInfo;
|
---|
5291 | IoExitInfo.u = pVmcbNstGst->ctrl.u64ExitInfo1;
|
---|
5292 | bool const fIntercept = hmR0SvmIsIoInterceptActive(pvIoBitmap, &IoExitInfo);
|
---|
5293 | if (fIntercept)
|
---|
5294 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5295 | }
|
---|
5296 | return hmR0SvmExitIOInstr(pVCpu, pSvmTransient);
|
---|
5297 | }
|
---|
5298 |
|
---|
5299 | case SVM_EXIT_XCPT_PF:
|
---|
5300 | {
|
---|
5301 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
5302 | if (pVM->hm.s.fNestedPaging)
|
---|
5303 | {
|
---|
5304 | uint32_t const u32ErrCode = pVmcbNstGstCtrl->u64ExitInfo1;
|
---|
5305 | uint64_t const uFaultAddress = pVmcbNstGstCtrl->u64ExitInfo2;
|
---|
5306 |
|
---|
5307 | /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
|
---|
5308 | if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF))
|
---|
5309 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, u32ErrCode, uFaultAddress);
|
---|
5310 |
|
---|
5311 | /* If the nested-guest is not intercepting #PFs, forward the #PF to the guest. */
|
---|
5312 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2);
|
---|
5313 | hmR0SvmSetPendingXcptPF(pVCpu, u32ErrCode, uFaultAddress);
|
---|
5314 | return VINF_SUCCESS;
|
---|
5315 | }
|
---|
5316 | return hmR0SvmExitXcptPF(pVCpu, pSvmTransient);
|
---|
5317 | }
|
---|
5318 |
|
---|
5319 | case SVM_EXIT_XCPT_UD:
|
---|
5320 | {
|
---|
5321 | if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_UD))
|
---|
5322 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5323 | hmR0SvmSetPendingXcptUD(pVCpu);
|
---|
5324 | return VINF_SUCCESS;
|
---|
5325 | }
|
---|
5326 |
|
---|
5327 | case SVM_EXIT_XCPT_MF:
|
---|
5328 | {
|
---|
5329 | if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_MF))
|
---|
5330 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5331 | return hmR0SvmExitXcptMF(pVCpu, pSvmTransient);
|
---|
5332 | }
|
---|
5333 |
|
---|
5334 | case SVM_EXIT_XCPT_DB:
|
---|
5335 | {
|
---|
5336 | if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_DB))
|
---|
5337 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5338 | return hmR0SvmNestedExitXcptDB(pVCpu, pSvmTransient);
|
---|
5339 | }
|
---|
5340 |
|
---|
5341 | case SVM_EXIT_XCPT_AC:
|
---|
5342 | {
|
---|
5343 | if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_AC))
|
---|
5344 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5345 | return hmR0SvmExitXcptAC(pVCpu, pSvmTransient);
|
---|
5346 | }
|
---|
5347 |
|
---|
5348 | case SVM_EXIT_XCPT_BP:
|
---|
5349 | {
|
---|
5350 | if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_BP))
|
---|
5351 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5352 | return hmR0SvmNestedExitXcptBP(pVCpu, pSvmTransient);
|
---|
5353 | }
|
---|
5354 |
|
---|
5355 | case SVM_EXIT_READ_CR0:
|
---|
5356 | case SVM_EXIT_READ_CR3:
|
---|
5357 | case SVM_EXIT_READ_CR4:
|
---|
5358 | {
|
---|
5359 | uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0;
|
---|
5360 | if (HMIsGuestSvmReadCRxInterceptSet(pVCpu, uCr))
|
---|
5361 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5362 | return hmR0SvmExitReadCRx(pVCpu, pSvmTransient);
|
---|
5363 | }
|
---|
5364 |
|
---|
5365 | case SVM_EXIT_CR0_SEL_WRITE:
|
---|
5366 | {
|
---|
5367 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
|
---|
5368 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5369 | return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
|
---|
5370 | }
|
---|
5371 |
|
---|
5372 | case SVM_EXIT_WRITE_CR0:
|
---|
5373 | case SVM_EXIT_WRITE_CR3:
|
---|
5374 | case SVM_EXIT_WRITE_CR4:
|
---|
5375 | case SVM_EXIT_WRITE_CR8: /* CR8 writes would go to the V_TPR rather than here, since we run with V_INTR_MASKING. */
|
---|
5376 | {
|
---|
5377 | uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;
|
---|
5378 | Log4Func(("Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2));
|
---|
5379 |
|
---|
5380 | if (HMIsGuestSvmWriteCRxInterceptSet(pVCpu, uCr))
|
---|
5381 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5382 | return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
|
---|
5383 | }
|
---|
5384 |
|
---|
5385 | case SVM_EXIT_PAUSE:
|
---|
5386 | {
|
---|
5387 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE))
|
---|
5388 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5389 | return hmR0SvmExitPause(pVCpu, pSvmTransient);
|
---|
5390 | }
|
---|
5391 |
|
---|
5392 | case SVM_EXIT_VINTR:
|
---|
5393 | {
|
---|
5394 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VINTR))
|
---|
5395 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5396 | return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
|
---|
5397 | }
|
---|
5398 |
|
---|
5399 | case SVM_EXIT_INTR:
|
---|
5400 | case SVM_EXIT_NMI:
|
---|
5401 | case SVM_EXIT_SMI:
|
---|
5402 | case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */
|
---|
5403 | {
|
---|
5404 | /*
|
---|
5405 | * We shouldn't direct physical interrupts, NMIs, SMIs to the nested-guest.
|
---|
5406 | *
|
---|
5407 | * Although we don't intercept SMIs, the nested-guest might. Therefore, we might
|
---|
5408 | * get an SMI #VMEXIT here so simply ignore rather than causing a corresponding
|
---|
5409 | * nested-guest #VMEXIT.
|
---|
5410 | */
|
---|
5411 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
|
---|
5412 | return hmR0SvmExitIntr(pVCpu, pSvmTransient);
|
---|
5413 | }
|
---|
5414 |
|
---|
5415 | case SVM_EXIT_FERR_FREEZE:
|
---|
5416 | {
|
---|
5417 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_FERR_FREEZE))
|
---|
5418 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5419 | return hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient);
|
---|
5420 | }
|
---|
5421 |
|
---|
5422 | case SVM_EXIT_INVLPG:
|
---|
5423 | {
|
---|
5424 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
|
---|
5425 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5426 | return hmR0SvmExitInvlpg(pVCpu, pSvmTransient);
|
---|
5427 | }
|
---|
5428 |
|
---|
5429 | case SVM_EXIT_WBINVD:
|
---|
5430 | {
|
---|
5431 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_WBINVD))
|
---|
5432 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5433 | return hmR0SvmExitWbinvd(pVCpu, pSvmTransient);
|
---|
5434 | }
|
---|
5435 |
|
---|
5436 | case SVM_EXIT_INVD:
|
---|
5437 | {
|
---|
5438 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVD))
|
---|
5439 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5440 | return hmR0SvmExitInvd(pVCpu, pSvmTransient);
|
---|
5441 | }
|
---|
5442 |
|
---|
5443 | case SVM_EXIT_RDPMC:
|
---|
5444 | {
|
---|
5445 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
|
---|
5446 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5447 | return hmR0SvmExitRdpmc(pVCpu, pSvmTransient);
|
---|
5448 | }
|
---|
5449 |
|
---|
5450 | default:
|
---|
5451 | {
|
---|
5452 | switch (uExitCode)
|
---|
5453 | {
|
---|
5454 | case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
|
---|
5455 | case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
|
---|
5456 | case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
|
---|
5457 | case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
|
---|
5458 | {
|
---|
5459 | uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0;
|
---|
5460 | if (HMIsGuestSvmReadDRxInterceptSet(pVCpu, uDr))
|
---|
5461 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5462 | return hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
|
---|
5463 | }
|
---|
5464 |
|
---|
5465 | case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
|
---|
5466 | case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
|
---|
5467 | case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
|
---|
5468 | case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
|
---|
5469 | {
|
---|
5470 | uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0;
|
---|
5471 | if (HMIsGuestSvmWriteDRxInterceptSet(pVCpu, uDr))
|
---|
5472 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5473 | return hmR0SvmExitWriteDRx(pVCpu, pSvmTransient);
|
---|
5474 | }
|
---|
5475 |
|
---|
5476 | case SVM_EXIT_XCPT_DE:
|
---|
5477 | /* SVM_EXIT_XCPT_DB: */ /* Handled above. */
|
---|
5478 | /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */
|
---|
5479 | /* SVM_EXIT_XCPT_BP: */ /* Handled above. */
|
---|
5480 | case SVM_EXIT_XCPT_OF:
|
---|
5481 | case SVM_EXIT_XCPT_BR:
|
---|
5482 | /* SVM_EXIT_XCPT_UD: */ /* Handled above. */
|
---|
5483 | case SVM_EXIT_XCPT_NM:
|
---|
5484 | case SVM_EXIT_XCPT_DF:
|
---|
5485 | case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
|
---|
5486 | case SVM_EXIT_XCPT_TS:
|
---|
5487 | case SVM_EXIT_XCPT_NP:
|
---|
5488 | case SVM_EXIT_XCPT_SS:
|
---|
5489 | case SVM_EXIT_XCPT_GP:
|
---|
5490 | /* SVM_EXIT_XCPT_PF: */ /* Handled above. */
|
---|
5491 | case SVM_EXIT_XCPT_15: /* Reserved. */
|
---|
5492 | /* SVM_EXIT_XCPT_MF: */ /* Handled above. */
|
---|
5493 | /* SVM_EXIT_XCPT_AC: */ /* Handled above. */
|
---|
5494 | case SVM_EXIT_XCPT_MC:
|
---|
5495 | case SVM_EXIT_XCPT_XF:
|
---|
5496 | case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
|
---|
5497 | case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
|
---|
5498 | case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
|
---|
5499 | {
|
---|
5500 | uint8_t const uVector = uExitCode - SVM_EXIT_XCPT_0;
|
---|
5501 | if (HMIsGuestSvmXcptInterceptSet(pVCpu, uVector))
|
---|
5502 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5503 | return hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient);
|
---|
5504 | }
|
---|
5505 |
|
---|
5506 | case SVM_EXIT_XSETBV:
|
---|
5507 | {
|
---|
5508 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
|
---|
5509 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5510 | return hmR0SvmExitXsetbv(pVCpu, pSvmTransient);
|
---|
5511 | }
|
---|
5512 |
|
---|
5513 | case SVM_EXIT_TASK_SWITCH:
|
---|
5514 | {
|
---|
5515 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
|
---|
5516 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5517 | return hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient);
|
---|
5518 | }
|
---|
5519 |
|
---|
5520 | case SVM_EXIT_IRET:
|
---|
5521 | {
|
---|
5522 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IRET))
|
---|
5523 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5524 | return hmR0SvmExitIret(pVCpu, pSvmTransient);
|
---|
5525 | }
|
---|
5526 |
|
---|
5527 | case SVM_EXIT_SHUTDOWN:
|
---|
5528 | {
|
---|
5529 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
|
---|
5530 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5531 | return hmR0SvmExitShutdown(pVCpu, pSvmTransient);
|
---|
5532 | }
|
---|
5533 |
|
---|
5534 | case SVM_EXIT_VMMCALL:
|
---|
5535 | {
|
---|
5536 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
|
---|
5537 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5538 | return hmR0SvmExitVmmCall(pVCpu, pSvmTransient);
|
---|
5539 | }
|
---|
5540 |
|
---|
5541 | case SVM_EXIT_CLGI:
|
---|
5542 | {
|
---|
5543 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
|
---|
5544 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5545 | return hmR0SvmExitClgi(pVCpu, pSvmTransient);
|
---|
5546 | }
|
---|
5547 |
|
---|
5548 | case SVM_EXIT_STGI:
|
---|
5549 | {
|
---|
5550 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_STGI))
|
---|
5551 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5552 | return hmR0SvmExitStgi(pVCpu, pSvmTransient);
|
---|
5553 | }
|
---|
5554 |
|
---|
5555 | case SVM_EXIT_VMLOAD:
|
---|
5556 | {
|
---|
5557 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
|
---|
5558 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5559 | return hmR0SvmExitVmload(pVCpu, pSvmTransient);
|
---|
5560 | }
|
---|
5561 |
|
---|
5562 | case SVM_EXIT_VMSAVE:
|
---|
5563 | {
|
---|
5564 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
|
---|
5565 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5566 | return hmR0SvmExitVmsave(pVCpu, pSvmTransient);
|
---|
5567 | }
|
---|
5568 |
|
---|
5569 | case SVM_EXIT_INVLPGA:
|
---|
5570 | {
|
---|
5571 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
|
---|
5572 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5573 | return hmR0SvmExitInvlpga(pVCpu, pSvmTransient);
|
---|
5574 | }
|
---|
5575 |
|
---|
5576 | case SVM_EXIT_VMRUN:
|
---|
5577 | {
|
---|
5578 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
|
---|
5579 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5580 | return hmR0SvmExitVmrun(pVCpu, pSvmTransient);
|
---|
5581 | }
|
---|
5582 |
|
---|
5583 | case SVM_EXIT_RSM:
|
---|
5584 | {
|
---|
5585 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RSM))
|
---|
5586 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5587 | hmR0SvmSetPendingXcptUD(pVCpu);
|
---|
5588 | return VINF_SUCCESS;
|
---|
5589 | }
|
---|
5590 |
|
---|
5591 | case SVM_EXIT_SKINIT:
|
---|
5592 | {
|
---|
5593 | if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
|
---|
5594 | NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
|
---|
5595 | hmR0SvmSetPendingXcptUD(pVCpu);
|
---|
5596 | return VINF_SUCCESS;
|
---|
5597 | }
|
---|
5598 |
|
---|
5599 | case SVM_EXIT_NPF:
|
---|
5600 | {
|
---|
5601 | Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
|
---|
5602 | return hmR0SvmExitNestedPF(pVCpu, pSvmTransient);
|
---|
5603 | }
|
---|
5604 |
|
---|
5605 | case SVM_EXIT_INIT: /* We shouldn't get INIT signals while executing a nested-guest. */
|
---|
5606 | return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
|
---|
5607 |
|
---|
5608 | default:
|
---|
5609 | {
|
---|
5610 | AssertMsgFailed(("hmR0SvmHandleExitNested: Unknown exit code %#x\n", pSvmTransient->u64ExitCode));
|
---|
5611 | pVCpu->hm.s.u32HMError = pSvmTransient->u64ExitCode;
|
---|
5612 | return VERR_SVM_UNKNOWN_EXIT;
|
---|
5613 | }
|
---|
5614 | }
|
---|
5615 | }
|
---|
5616 | }
|
---|
5617 | /* not reached */
|
---|
5618 |
|
---|
5619 | #undef NST_GST_VMEXIT_CALL_RET
|
---|
5620 | }
|
---|
5621 | #endif
|
---|
5622 |
|
---|
5623 |
|
---|
5624 | /**
|
---|
5625 | * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
|
---|
5626 | *
|
---|
5627 | * @returns VBox status code (informational status codes included).
|
---|
5628 | * @param pVCpu The cross context virtual CPU structure.
|
---|
5629 | * @param pSvmTransient Pointer to the SVM transient structure.
|
---|
5630 | */
|
---|
5631 | static int hmR0SvmHandleExit(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
5632 | {
|
---|
5633 | Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
|
---|
5634 | Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
|
---|
5635 |
|
---|
5636 | #ifdef DEBUG_ramshankar
|
---|
5637 | # define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) \
|
---|
5638 | do { \
|
---|
5639 | if ((a_fDbg) == 1) \
|
---|
5640 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \
|
---|
5641 | int rc = a_CallExpr; \
|
---|
5642 | if ((a_fDbg) == 1) \
|
---|
5643 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
|
---|
5644 | return rc; \
|
---|
5645 | } while (0)
|
---|
5646 | #else
|
---|
5647 | # define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) return a_CallExpr
|
---|
5648 | #endif
|
---|
5649 |
|
---|
5650 | /*
|
---|
5651 | * The ordering of the case labels is based on most-frequently-occurring #VMEXITs
|
---|
5652 | * for most guests under normal workloads (for some definition of "normal").
|
---|
5653 | */
|
---|
5654 | uint64_t const uExitCode = pSvmTransient->u64ExitCode;
|
---|
5655 | switch (uExitCode)
|
---|
5656 | {
|
---|
5657 | case SVM_EXIT_NPF: VMEXIT_CALL_RET(0, hmR0SvmExitNestedPF(pVCpu, pSvmTransient));
|
---|
5658 | case SVM_EXIT_IOIO: VMEXIT_CALL_RET(0, hmR0SvmExitIOInstr(pVCpu, pSvmTransient));
|
---|
5659 | case SVM_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0SvmExitRdtsc(pVCpu, pSvmTransient));
|
---|
5660 | case SVM_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0SvmExitRdtscp(pVCpu, pSvmTransient));
|
---|
5661 | case SVM_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0SvmExitCpuid(pVCpu, pSvmTransient));
|
---|
5662 | case SVM_EXIT_XCPT_PF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptPF(pVCpu, pSvmTransient));
|
---|
5663 | case SVM_EXIT_MSR: VMEXIT_CALL_RET(0, hmR0SvmExitMsr(pVCpu, pSvmTransient));
|
---|
5664 | case SVM_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0SvmExitMonitor(pVCpu, pSvmTransient));
|
---|
5665 | case SVM_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0SvmExitMwait(pVCpu, pSvmTransient));
|
---|
5666 | case SVM_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0SvmExitHlt(pVCpu, pSvmTransient));
|
---|
5667 |
|
---|
5668 | case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */
|
---|
5669 | case SVM_EXIT_INTR:
|
---|
5670 | case SVM_EXIT_NMI: VMEXIT_CALL_RET(0, hmR0SvmExitIntr(pVCpu, pSvmTransient));
|
---|
5671 |
|
---|
5672 | case SVM_EXIT_READ_CR0:
|
---|
5673 | case SVM_EXIT_READ_CR3:
|
---|
5674 | case SVM_EXIT_READ_CR4: VMEXIT_CALL_RET(0, hmR0SvmExitReadCRx(pVCpu, pSvmTransient));
|
---|
5675 |
|
---|
5676 | case SVM_EXIT_CR0_SEL_WRITE:
|
---|
5677 | case SVM_EXIT_WRITE_CR0:
|
---|
5678 | case SVM_EXIT_WRITE_CR3:
|
---|
5679 | case SVM_EXIT_WRITE_CR4:
|
---|
5680 | case SVM_EXIT_WRITE_CR8: VMEXIT_CALL_RET(0, hmR0SvmExitWriteCRx(pVCpu, pSvmTransient));
|
---|
5681 |
|
---|
5682 | case SVM_EXIT_VINTR: VMEXIT_CALL_RET(0, hmR0SvmExitVIntr(pVCpu, pSvmTransient));
|
---|
5683 | case SVM_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0SvmExitPause(pVCpu, pSvmTransient));
|
---|
5684 | case SVM_EXIT_VMMCALL: VMEXIT_CALL_RET(0, hmR0SvmExitVmmCall(pVCpu, pSvmTransient));
|
---|
5685 | case SVM_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpg(pVCpu, pSvmTransient));
|
---|
5686 | case SVM_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0SvmExitWbinvd(pVCpu, pSvmTransient));
|
---|
5687 | case SVM_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0SvmExitInvd(pVCpu, pSvmTransient));
|
---|
5688 | case SVM_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0SvmExitRdpmc(pVCpu, pSvmTransient));
|
---|
5689 | case SVM_EXIT_IRET: VMEXIT_CALL_RET(0, hmR0SvmExitIret(pVCpu, pSvmTransient));
|
---|
5690 | case SVM_EXIT_XCPT_UD: VMEXIT_CALL_RET(0, hmR0SvmExitXcptUD(pVCpu, pSvmTransient));
|
---|
5691 | case SVM_EXIT_XCPT_MF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptMF(pVCpu, pSvmTransient));
|
---|
5692 | case SVM_EXIT_XCPT_DB: VMEXIT_CALL_RET(0, hmR0SvmExitXcptDB(pVCpu, pSvmTransient));
|
---|
5693 | case SVM_EXIT_XCPT_AC: VMEXIT_CALL_RET(0, hmR0SvmExitXcptAC(pVCpu, pSvmTransient));
|
---|
5694 | case SVM_EXIT_XCPT_BP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptBP(pVCpu, pSvmTransient));
|
---|
5695 | case SVM_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0SvmExitXsetbv(pVCpu, pSvmTransient));
|
---|
5696 | case SVM_EXIT_FERR_FREEZE: VMEXIT_CALL_RET(0, hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient));
|
---|
5697 |
|
---|
5698 | default:
|
---|
5699 | {
|
---|
5700 | switch (pSvmTransient->u64ExitCode)
|
---|
5701 | {
|
---|
5702 | case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
|
---|
5703 | case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
|
---|
5704 | case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
|
---|
5705 | case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
|
---|
5706 | VMEXIT_CALL_RET(0, hmR0SvmExitReadDRx(pVCpu, pSvmTransient));
|
---|
5707 |
|
---|
5708 | case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
|
---|
5709 | case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
|
---|
5710 | case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
|
---|
5711 | case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
|
---|
5712 | VMEXIT_CALL_RET(0, hmR0SvmExitWriteDRx(pVCpu, pSvmTransient));
|
---|
5713 |
|
---|
5714 | case SVM_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient));
|
---|
5715 | case SVM_EXIT_SHUTDOWN: VMEXIT_CALL_RET(0, hmR0SvmExitShutdown(pVCpu, pSvmTransient));
|
---|
5716 |
|
---|
5717 | case SVM_EXIT_SMI:
|
---|
5718 | case SVM_EXIT_INIT:
|
---|
5719 | {
|
---|
5720 | /*
|
---|
5721 | * We don't intercept SMIs. As for INIT signals, it really shouldn't ever happen here.
|
---|
5722 | * If it ever does, we want to know about it so log the exit code and bail.
|
---|
5723 | */
|
---|
5724 | VMEXIT_CALL_RET(0, hmR0SvmExitUnexpected(pVCpu, pSvmTransient));
|
---|
5725 | }
|
---|
5726 |
|
---|
5727 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
5728 | case SVM_EXIT_CLGI: VMEXIT_CALL_RET(0, hmR0SvmExitClgi(pVCpu, pSvmTransient));
|
---|
5729 | case SVM_EXIT_STGI: VMEXIT_CALL_RET(0, hmR0SvmExitStgi(pVCpu, pSvmTransient));
|
---|
5730 | case SVM_EXIT_VMLOAD: VMEXIT_CALL_RET(0, hmR0SvmExitVmload(pVCpu, pSvmTransient));
|
---|
5731 | case SVM_EXIT_VMSAVE: VMEXIT_CALL_RET(0, hmR0SvmExitVmsave(pVCpu, pSvmTransient));
|
---|
5732 | case SVM_EXIT_INVLPGA: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpga(pVCpu, pSvmTransient));
|
---|
5733 | case SVM_EXIT_VMRUN: VMEXIT_CALL_RET(0, hmR0SvmExitVmrun(pVCpu, pSvmTransient));
|
---|
5734 | #else
|
---|
5735 | case SVM_EXIT_CLGI:
|
---|
5736 | case SVM_EXIT_STGI:
|
---|
5737 | case SVM_EXIT_VMLOAD:
|
---|
5738 | case SVM_EXIT_VMSAVE:
|
---|
5739 | case SVM_EXIT_INVLPGA:
|
---|
5740 | case SVM_EXIT_VMRUN:
|
---|
5741 | #endif
|
---|
5742 | case SVM_EXIT_RSM:
|
---|
5743 | case SVM_EXIT_SKINIT:
|
---|
5744 | {
|
---|
5745 | hmR0SvmSetPendingXcptUD(pVCpu);
|
---|
5746 | return VINF_SUCCESS;
|
---|
5747 | }
|
---|
5748 |
|
---|
5749 | #ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
|
---|
5750 | case SVM_EXIT_XCPT_DE:
|
---|
5751 | /* SVM_EXIT_XCPT_DB: */ /* Handled above. */
|
---|
5752 | /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */
|
---|
5753 | /* SVM_EXIT_XCPT_BP: */ /* Handled above. */
|
---|
5754 | case SVM_EXIT_XCPT_OF:
|
---|
5755 | case SVM_EXIT_XCPT_BR:
|
---|
5756 | /* SVM_EXIT_XCPT_UD: */ /* Handled above. */
|
---|
5757 | case SVM_EXIT_XCPT_NM:
|
---|
5758 | case SVM_EXIT_XCPT_DF:
|
---|
5759 | case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
|
---|
5760 | case SVM_EXIT_XCPT_TS:
|
---|
5761 | case SVM_EXIT_XCPT_NP:
|
---|
5762 | case SVM_EXIT_XCPT_SS:
|
---|
5763 | case SVM_EXIT_XCPT_GP:
|
---|
5764 | /* SVM_EXIT_XCPT_PF: */
|
---|
5765 | case SVM_EXIT_XCPT_15: /* Reserved. */
|
---|
5766 | /* SVM_EXIT_XCPT_MF: */ /* Handled above. */
|
---|
5767 | /* SVM_EXIT_XCPT_AC: */ /* Handled above. */
|
---|
5768 | case SVM_EXIT_XCPT_MC:
|
---|
5769 | case SVM_EXIT_XCPT_XF:
|
---|
5770 | case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
|
---|
5771 | case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
|
---|
5772 | case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
|
---|
5773 | VMEXIT_CALL_RET(0, hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient));
|
---|
5774 | #endif /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */
|
---|
5775 |
|
---|
5776 | default:
|
---|
5777 | {
|
---|
5778 | AssertMsgFailed(("hmR0SvmHandleExit: Unknown exit code %#RX64\n", uExitCode));
|
---|
5779 | pVCpu->hm.s.u32HMError = uExitCode;
|
---|
5780 | return VERR_SVM_UNKNOWN_EXIT;
|
---|
5781 | }
|
---|
5782 | }
|
---|
5783 | }
|
---|
5784 | }
|
---|
5785 | /* not reached */
|
---|
5786 | #undef VMEXIT_CALL_RET
|
---|
5787 | }
|
---|
5788 |
|
---|
5789 |
|
---|
5790 | #ifdef VBOX_STRICT
|
---|
5791 | /* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
|
---|
5792 | # define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
|
---|
5793 | RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
|
---|
5794 |
|
---|
5795 | # define HMSVM_ASSERT_PREEMPT_CPUID() \
|
---|
5796 | do \
|
---|
5797 | { \
|
---|
5798 | RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
|
---|
5799 | AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
|
---|
5800 | } while (0)
|
---|
5801 |
|
---|
5802 | # define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \
|
---|
5803 | do { \
|
---|
5804 | AssertPtr((a_pVCpu)); \
|
---|
5805 | AssertPtr((a_pSvmTransient)); \
|
---|
5806 | Assert(ASMIntAreEnabled()); \
|
---|
5807 | HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \
|
---|
5808 | HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
|
---|
5809 | Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu)); \
|
---|
5810 | HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \
|
---|
5811 | if (VMMR0IsLogFlushDisabled((a_pVCpu))) \
|
---|
5812 | HMSVM_ASSERT_PREEMPT_CPUID(); \
|
---|
5813 | } while (0)
|
---|
5814 | #else
|
---|
5815 | # define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \
|
---|
5816 | do { \
|
---|
5817 | RT_NOREF2(a_pVCpu, a_pSvmTransient); \
|
---|
5818 | } while (0)
|
---|
5819 | #endif
|
---|
5820 |
|
---|
5821 |
|
---|
5822 | /**
|
---|
5823 | * Gets the IEM exception flags for the specified SVM event.
|
---|
5824 | *
|
---|
5825 | * @returns The IEM exception flags.
|
---|
5826 | * @param pEvent Pointer to the SVM event.
|
---|
5827 | *
|
---|
5828 | * @remarks This function currently only constructs flags required for
|
---|
5829 | * IEMEvaluateRecursiveXcpt and not the complete flags (e.g. error-code
|
---|
5830 | * and CR2 aspects of an exception are not included).
|
---|
5831 | */
|
---|
5832 | static uint32_t hmR0SvmGetIemXcptFlags(PCSVMEVENT pEvent)
|
---|
5833 | {
|
---|
5834 | uint8_t const uEventType = pEvent->n.u3Type;
|
---|
5835 | uint32_t fIemXcptFlags;
|
---|
5836 | switch (uEventType)
|
---|
5837 | {
|
---|
5838 | case SVM_EVENT_EXCEPTION:
|
---|
5839 | /*
|
---|
5840 | * Only INT3 and INTO instructions can raise #BP and #OF exceptions.
|
---|
5841 | * See AMD spec. Table 8-1. "Interrupt Vector Source and Cause".
|
---|
5842 | */
|
---|
5843 | if (pEvent->n.u8Vector == X86_XCPT_BP)
|
---|
5844 | {
|
---|
5845 | fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR;
|
---|
5846 | break;
|
---|
5847 | }
|
---|
5848 | if (pEvent->n.u8Vector == X86_XCPT_OF)
|
---|
5849 | {
|
---|
5850 | fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_OF_INSTR;
|
---|
5851 | break;
|
---|
5852 | }
|
---|
5853 | /** @todo How do we distinguish ICEBP \#DB from the regular one? */
|
---|
5854 | RT_FALL_THRU();
|
---|
5855 | case SVM_EVENT_NMI:
|
---|
5856 | fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
|
---|
5857 | break;
|
---|
5858 |
|
---|
5859 | case SVM_EVENT_EXTERNAL_IRQ:
|
---|
5860 | fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
|
---|
5861 | break;
|
---|
5862 |
|
---|
5863 | case SVM_EVENT_SOFTWARE_INT:
|
---|
5864 | fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
|
---|
5865 | break;
|
---|
5866 |
|
---|
5867 | default:
|
---|
5868 | fIemXcptFlags = 0;
|
---|
5869 | AssertMsgFailed(("Unexpected event type! uEventType=%#x uVector=%#x", uEventType, pEvent->n.u8Vector));
|
---|
5870 | break;
|
---|
5871 | }
|
---|
5872 | return fIemXcptFlags;
|
---|
5873 | }
|
---|
5874 |
|
---|
5875 |
|
---|
5876 | /**
|
---|
5877 | * Handle a condition that occurred while delivering an event through the guest
|
---|
5878 | * IDT.
|
---|
5879 | *
|
---|
5880 | * @returns VBox status code (informational error codes included).
|
---|
5881 | * @retval VINF_SUCCESS if we should continue handling the \#VMEXIT.
|
---|
5882 | * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought to
|
---|
5883 | * continue execution of the guest which will delivery the \#DF.
|
---|
5884 | * @retval VINF_EM_RESET if we detected a triple-fault condition.
|
---|
5885 | * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
|
---|
5886 | *
|
---|
5887 | * @param pVCpu The cross context virtual CPU structure.
|
---|
5888 | * @param pSvmTransient Pointer to the SVM transient structure.
|
---|
5889 | *
|
---|
5890 | * @remarks No-long-jump zone!!!
|
---|
5891 | */
|
---|
5892 | static int hmR0SvmCheckExitDueToEventDelivery(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
5893 | {
|
---|
5894 | int rc = VINF_SUCCESS;
|
---|
5895 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
5896 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2);
|
---|
5897 |
|
---|
5898 | Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n",
|
---|
5899 | pVmcb->ctrl.ExitIntInfo.u, !!pVmcb->ctrl.ExitIntInfo.n.u1Valid, !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid,
|
---|
5900 | pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, pVmcb->ctrl.ExitIntInfo.n.u3Type, pVmcb->ctrl.ExitIntInfo.n.u8Vector));
|
---|
5901 |
|
---|
5902 | /*
|
---|
5903 | * The EXITINTINFO (if valid) contains the prior exception (IDT vector) that was trying to
|
---|
5904 | * be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector).
|
---|
5905 | *
|
---|
5906 | * See AMD spec. 15.7.3 "EXITINFO Pseudo-Code".
|
---|
5907 | */
|
---|
5908 | if (pVmcb->ctrl.ExitIntInfo.n.u1Valid)
|
---|
5909 | {
|
---|
5910 | IEMXCPTRAISE enmRaise;
|
---|
5911 | IEMXCPTRAISEINFO fRaiseInfo;
|
---|
5912 | bool const fExitIsHwXcpt = pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0 <= SVM_EXIT_XCPT_31;
|
---|
5913 | uint8_t const uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
|
---|
5914 | if (fExitIsHwXcpt)
|
---|
5915 | {
|
---|
5916 | uint8_t const uExitVector = pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0;
|
---|
5917 | uint32_t const fIdtVectorFlags = hmR0SvmGetIemXcptFlags(&pVmcb->ctrl.ExitIntInfo);
|
---|
5918 | uint32_t const fExitVectorFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
|
---|
5919 | enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
|
---|
5920 | }
|
---|
5921 | else
|
---|
5922 | {
|
---|
5923 | /*
|
---|
5924 | * If delivery of an event caused a #VMEXIT that is not an exception (e.g. #NPF)
|
---|
5925 | * then we end up here.
|
---|
5926 | *
|
---|
5927 | * If the event was:
|
---|
5928 | * - a software interrupt, we can re-execute the instruction which will
|
---|
5929 | * regenerate the event.
|
---|
5930 | * - an NMI, we need to clear NMI blocking and re-inject the NMI.
|
---|
5931 | * - a hardware exception or external interrupt, we re-inject it.
|
---|
5932 | */
|
---|
5933 | fRaiseInfo = IEMXCPTRAISEINFO_NONE;
|
---|
5934 | if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_SOFTWARE_INT)
|
---|
5935 | enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
|
---|
5936 | else
|
---|
5937 | enmRaise = IEMXCPTRAISE_PREV_EVENT;
|
---|
5938 | }
|
---|
5939 |
|
---|
5940 | switch (enmRaise)
|
---|
5941 | {
|
---|
5942 | case IEMXCPTRAISE_CURRENT_XCPT:
|
---|
5943 | case IEMXCPTRAISE_PREV_EVENT:
|
---|
5944 | {
|
---|
5945 | /* For software interrupts, we shall re-execute the instruction. */
|
---|
5946 | if (!(fRaiseInfo & IEMXCPTRAISEINFO_SOFT_INT_XCPT))
|
---|
5947 | {
|
---|
5948 | RTGCUINTPTR GCPtrFaultAddress = 0;
|
---|
5949 |
|
---|
5950 | /* If we are re-injecting an NMI, clear NMI blocking. */
|
---|
5951 | if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)
|
---|
5952 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
|
---|
5953 |
|
---|
5954 | /* Determine a vectoring #PF condition, see comment in hmR0SvmExitXcptPF(). */
|
---|
5955 | if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
|
---|
5956 | {
|
---|
5957 | pSvmTransient->fVectoringPF = true;
|
---|
5958 | Log4Func(("IDT: Pending vectoring #PF due to delivery of Ext-Int/NMI. uCR2=%#RX64\n",
|
---|
5959 | pVCpu->cpum.GstCtx.cr2));
|
---|
5960 | }
|
---|
5961 | else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION
|
---|
5962 | && uIdtVector == X86_XCPT_PF)
|
---|
5963 | {
|
---|
5964 | /*
|
---|
5965 | * If the previous exception was a #PF, we need to recover the CR2 value.
|
---|
5966 | * This can't happen with shadow paging.
|
---|
5967 | */
|
---|
5968 | GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
|
---|
5969 | }
|
---|
5970 |
|
---|
5971 | /*
|
---|
5972 | * Without nested paging, when uExitVector is #PF, CR2 value will be updated from the VMCB's
|
---|
5973 | * exit info. fields, if it's a guest #PF, see hmR0SvmExitXcptPF().
|
---|
5974 | */
|
---|
5975 | Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);
|
---|
5976 | STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
|
---|
5977 | hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, GCPtrFaultAddress);
|
---|
5978 |
|
---|
5979 | Log4Func(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32 GCPtrFaultAddress=%#RX64\n",
|
---|
5980 | pVmcb->ctrl.ExitIntInfo.u, RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid),
|
---|
5981 | pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, GCPtrFaultAddress));
|
---|
5982 | }
|
---|
5983 | break;
|
---|
5984 | }
|
---|
5985 |
|
---|
5986 | case IEMXCPTRAISE_REEXEC_INSTR:
|
---|
5987 | {
|
---|
5988 | Assert(rc == VINF_SUCCESS);
|
---|
5989 | break;
|
---|
5990 | }
|
---|
5991 |
|
---|
5992 | case IEMXCPTRAISE_DOUBLE_FAULT:
|
---|
5993 | {
|
---|
5994 | /*
|
---|
5995 | * Determing a vectoring double #PF condition. Used later, when PGM evaluates
|
---|
5996 | * the second #PF as a guest #PF (and not a shadow #PF) and needs to be
|
---|
5997 | * converted into a #DF.
|
---|
5998 | */
|
---|
5999 | if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
|
---|
6000 | {
|
---|
6001 | Log4Func(("IDT: Pending vectoring double #PF uCR2=%#RX64\n", pVCpu->cpum.GstCtx.cr2));
|
---|
6002 | pSvmTransient->fVectoringDoublePF = true;
|
---|
6003 | Assert(rc == VINF_SUCCESS);
|
---|
6004 | }
|
---|
6005 | else
|
---|
6006 | {
|
---|
6007 | STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
|
---|
6008 | hmR0SvmSetPendingXcptDF(pVCpu);
|
---|
6009 | rc = VINF_HM_DOUBLE_FAULT;
|
---|
6010 | }
|
---|
6011 | break;
|
---|
6012 | }
|
---|
6013 |
|
---|
6014 | case IEMXCPTRAISE_TRIPLE_FAULT:
|
---|
6015 | {
|
---|
6016 | rc = VINF_EM_RESET;
|
---|
6017 | break;
|
---|
6018 | }
|
---|
6019 |
|
---|
6020 | case IEMXCPTRAISE_CPU_HANG:
|
---|
6021 | {
|
---|
6022 | rc = VERR_EM_GUEST_CPU_HANG;
|
---|
6023 | break;
|
---|
6024 | }
|
---|
6025 |
|
---|
6026 | default:
|
---|
6027 | AssertMsgFailedBreakStmt(("Bogus enmRaise value: %d (%#x)\n", enmRaise, enmRaise), rc = VERR_SVM_IPE_2);
|
---|
6028 | }
|
---|
6029 | }
|
---|
6030 | Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG);
|
---|
6031 | return rc;
|
---|
6032 | }
|
---|
6033 |
|
---|
6034 |
|
---|
6035 | /**
|
---|
6036 | * Advances the guest RIP making use of the CPU's NRIP_SAVE feature if
|
---|
6037 | * supported, otherwise advances the RIP by the number of bytes specified in
|
---|
6038 | * @a cb.
|
---|
6039 | *
|
---|
6040 | * @param pVCpu The cross context virtual CPU structure.
|
---|
6041 | * @param cb RIP increment value in bytes when the CPU doesn't support
|
---|
6042 | * NRIP_SAVE.
|
---|
6043 | */
|
---|
6044 | DECLINLINE(void) hmR0SvmAdvanceRipHwAssist(PVMCPU pVCpu, uint32_t cb)
|
---|
6045 | {
|
---|
6046 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6047 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6048 | if (fSupportsNextRipSave)
|
---|
6049 | {
|
---|
6050 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
6051 | Assert(pVmcb);
|
---|
6052 | Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
|
---|
6053 | Assert(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb);
|
---|
6054 | pCtx->rip = pVmcb->ctrl.u64NextRIP;
|
---|
6055 | }
|
---|
6056 | else
|
---|
6057 | pCtx->rip += cb;
|
---|
6058 |
|
---|
6059 | HMSVM_UPDATE_INTR_SHADOW(pVCpu);
|
---|
6060 | }
|
---|
6061 |
|
---|
6062 |
|
---|
6063 | /**
|
---|
6064 | * Gets the length of the current instruction when the CPU supports the NRIP_SAVE
|
---|
6065 | * feature.
|
---|
6066 | *
|
---|
6067 | * @returns The current instruction length in bytes.
|
---|
6068 | * @param pVCpu The cross context virtual CPU structure.
|
---|
6069 | *
|
---|
6070 | * @remarks Requires the NRIP_SAVE feature to be supported by the CPU.
|
---|
6071 | */
|
---|
6072 | DECLINLINE(uint8_t) hmR0SvmGetInstrLength(PVMCPU pVCpu)
|
---|
6073 | {
|
---|
6074 | Assert(hmR0SvmSupportsNextRipSave(pVCpu));
|
---|
6075 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
6076 | return pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
|
---|
6077 | }
|
---|
6078 |
|
---|
6079 |
|
---|
6080 | /**
|
---|
6081 | * Advances the guest RIP by the number of bytes specified in @a cb. This does
|
---|
6082 | * not make use of any hardware features to determine the instruction length.
|
---|
6083 | *
|
---|
6084 | * @param pVCpu The cross context virtual CPU structure.
|
---|
6085 | * @param cb RIP increment value in bytes.
|
---|
6086 | */
|
---|
6087 | DECLINLINE(void) hmR0SvmAdvanceRipDumb(PVMCPU pVCpu, uint32_t cb)
|
---|
6088 | {
|
---|
6089 | pVCpu->cpum.GstCtx.rip += cb;
|
---|
6090 | HMSVM_UPDATE_INTR_SHADOW(pVCpu);
|
---|
6091 | }
|
---|
6092 | #undef HMSVM_UPDATE_INTR_SHADOW
|
---|
6093 |
|
---|
6094 |
|
---|
6095 | /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
|
---|
6096 | /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
|
---|
6097 | /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
|
---|
6098 |
|
---|
6099 | /** @name \#VMEXIT handlers.
|
---|
6100 | * @{
|
---|
6101 | */
|
---|
6102 |
|
---|
6103 | /**
|
---|
6104 | * \#VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT
|
---|
6105 | * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT).
|
---|
6106 | */
|
---|
6107 | HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6108 | {
|
---|
6109 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6110 |
|
---|
6111 | if (pSvmTransient->u64ExitCode == SVM_EXIT_NMI)
|
---|
6112 | STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
|
---|
6113 | else if (pSvmTransient->u64ExitCode == SVM_EXIT_INTR)
|
---|
6114 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
|
---|
6115 |
|
---|
6116 | /*
|
---|
6117 | * AMD-V has no preemption timer and the generic periodic preemption timer has no way to
|
---|
6118 | * signal -before- the timer fires if the current interrupt is our own timer or a some
|
---|
6119 | * other host interrupt. We also cannot examine what interrupt it is until the host
|
---|
6120 | * actually take the interrupt.
|
---|
6121 | *
|
---|
6122 | * Going back to executing guest code here unconditionally causes random scheduling
|
---|
6123 | * problems (observed on an AMD Phenom 9850 Quad-Core on Windows 64-bit host).
|
---|
6124 | */
|
---|
6125 | return VINF_EM_RAW_INTERRUPT;
|
---|
6126 | }
|
---|
6127 |
|
---|
6128 |
|
---|
6129 | /**
|
---|
6130 | * \#VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional \#VMEXIT.
|
---|
6131 | */
|
---|
6132 | HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6133 | {
|
---|
6134 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6135 |
|
---|
6136 | VBOXSTRICTRC rcStrict;
|
---|
6137 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6138 | if (fSupportsNextRipSave)
|
---|
6139 | {
|
---|
6140 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
|
---|
6141 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
6142 | rcStrict = IEMExecDecodedWbinvd(pVCpu, cbInstr);
|
---|
6143 | }
|
---|
6144 | else
|
---|
6145 | {
|
---|
6146 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6147 | rcStrict = IEMExecOne(pVCpu);
|
---|
6148 | }
|
---|
6149 |
|
---|
6150 | if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
6151 | {
|
---|
6152 | rcStrict = VINF_SUCCESS;
|
---|
6153 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
6154 | }
|
---|
6155 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6156 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
6157 | }
|
---|
6158 |
|
---|
6159 |
|
---|
6160 | /**
|
---|
6161 | * \#VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional \#VMEXIT.
|
---|
6162 | */
|
---|
6163 | HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6164 | {
|
---|
6165 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6166 |
|
---|
6167 | VBOXSTRICTRC rcStrict;
|
---|
6168 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6169 | if (fSupportsNextRipSave)
|
---|
6170 | {
|
---|
6171 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
|
---|
6172 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
6173 | rcStrict = IEMExecDecodedInvd(pVCpu, cbInstr);
|
---|
6174 | }
|
---|
6175 | else
|
---|
6176 | {
|
---|
6177 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6178 | rcStrict = IEMExecOne(pVCpu);
|
---|
6179 | }
|
---|
6180 |
|
---|
6181 | if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
6182 | {
|
---|
6183 | rcStrict = VINF_SUCCESS;
|
---|
6184 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
6185 | }
|
---|
6186 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6187 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
6188 | }
|
---|
6189 |
|
---|
6190 |
|
---|
6191 | /**
|
---|
6192 | * \#VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional \#VMEXIT.
|
---|
6193 | */
|
---|
6194 | HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6195 | {
|
---|
6196 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6197 |
|
---|
6198 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
|
---|
6199 | VBOXSTRICTRC rcStrict;
|
---|
6200 | PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
|
---|
6201 | EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
|
---|
6202 | pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
|
---|
6203 | if (!pExitRec)
|
---|
6204 | {
|
---|
6205 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6206 | if (fSupportsNextRipSave)
|
---|
6207 | {
|
---|
6208 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
6209 | rcStrict = IEMExecDecodedCpuid(pVCpu, cbInstr);
|
---|
6210 | }
|
---|
6211 | else
|
---|
6212 | {
|
---|
6213 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6214 | rcStrict = IEMExecOne(pVCpu);
|
---|
6215 | }
|
---|
6216 |
|
---|
6217 | if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
6218 | {
|
---|
6219 | rcStrict = VINF_SUCCESS;
|
---|
6220 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
6221 | }
|
---|
6222 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6223 | }
|
---|
6224 | else
|
---|
6225 | {
|
---|
6226 | /*
|
---|
6227 | * Frequent exit or something needing probing. Get state and call EMHistoryExec.
|
---|
6228 | */
|
---|
6229 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6230 |
|
---|
6231 | Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
|
---|
6232 | pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
|
---|
6233 |
|
---|
6234 | rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
|
---|
6235 |
|
---|
6236 | Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
|
---|
6237 | pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
|
---|
6238 | VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
6239 | }
|
---|
6240 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
6241 | }
|
---|
6242 |
|
---|
6243 |
|
---|
6244 | /**
|
---|
6245 | * \#VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional \#VMEXIT.
|
---|
6246 | */
|
---|
6247 | HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6248 | {
|
---|
6249 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6250 |
|
---|
6251 | VBOXSTRICTRC rcStrict;
|
---|
6252 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6253 | if (fSupportsNextRipSave)
|
---|
6254 | {
|
---|
6255 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
|
---|
6256 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
6257 | rcStrict = IEMExecDecodedRdtsc(pVCpu, cbInstr);
|
---|
6258 | }
|
---|
6259 | else
|
---|
6260 | {
|
---|
6261 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6262 | rcStrict = IEMExecOne(pVCpu);
|
---|
6263 | }
|
---|
6264 |
|
---|
6265 | if (rcStrict == VINF_SUCCESS)
|
---|
6266 | pSvmTransient->fUpdateTscOffsetting = true;
|
---|
6267 | else if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
6268 | {
|
---|
6269 | rcStrict = VINF_SUCCESS;
|
---|
6270 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
6271 | }
|
---|
6272 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6273 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
6274 | }
|
---|
6275 |
|
---|
6276 |
|
---|
6277 | /**
|
---|
6278 | * \#VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional \#VMEXIT.
|
---|
6279 | */
|
---|
6280 | HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6281 | {
|
---|
6282 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6283 |
|
---|
6284 | VBOXSTRICTRC rcStrict;
|
---|
6285 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6286 | if (fSupportsNextRipSave)
|
---|
6287 | {
|
---|
6288 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4
|
---|
6289 | | CPUMCTX_EXTRN_TSC_AUX);
|
---|
6290 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
6291 | rcStrict = IEMExecDecodedRdtscp(pVCpu, cbInstr);
|
---|
6292 | }
|
---|
6293 | else
|
---|
6294 | {
|
---|
6295 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6296 | rcStrict = IEMExecOne(pVCpu);
|
---|
6297 | }
|
---|
6298 |
|
---|
6299 | if (rcStrict == VINF_SUCCESS)
|
---|
6300 | pSvmTransient->fUpdateTscOffsetting = true;
|
---|
6301 | else if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
6302 | {
|
---|
6303 | rcStrict = VINF_SUCCESS;
|
---|
6304 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
6305 | }
|
---|
6306 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6307 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
6308 | }
|
---|
6309 |
|
---|
6310 |
|
---|
6311 | /**
|
---|
6312 | * \#VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional \#VMEXIT.
|
---|
6313 | */
|
---|
6314 | HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6315 | {
|
---|
6316 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6317 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_SS);
|
---|
6318 |
|
---|
6319 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6320 | int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
|
---|
6321 | if (RT_LIKELY(rc == VINF_SUCCESS))
|
---|
6322 | {
|
---|
6323 | hmR0SvmAdvanceRipHwAssist(pVCpu, 2);
|
---|
6324 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
|
---|
6325 | }
|
---|
6326 | else
|
---|
6327 | {
|
---|
6328 | AssertMsgFailed(("hmR0SvmExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
|
---|
6329 | rc = VERR_EM_INTERPRETER;
|
---|
6330 | }
|
---|
6331 | return rc;
|
---|
6332 | }
|
---|
6333 |
|
---|
6334 |
|
---|
6335 | /**
|
---|
6336 | * \#VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional \#VMEXIT.
|
---|
6337 | */
|
---|
6338 | HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6339 | {
|
---|
6340 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6341 | Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
|
---|
6342 |
|
---|
6343 | VBOXSTRICTRC rcStrict;
|
---|
6344 | bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
|
---|
6345 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6346 | if ( fSupportsDecodeAssists
|
---|
6347 | && fSupportsNextRipSave)
|
---|
6348 | {
|
---|
6349 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6350 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
6351 | uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
|
---|
6352 | RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1;
|
---|
6353 | rcStrict = IEMExecDecodedInvlpg(pVCpu, cbInstr, GCPtrPage);
|
---|
6354 | }
|
---|
6355 | else
|
---|
6356 | {
|
---|
6357 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6358 | rcStrict = IEMExecOne(pVCpu);
|
---|
6359 | }
|
---|
6360 |
|
---|
6361 | if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
6362 | {
|
---|
6363 | rcStrict = VINF_SUCCESS;
|
---|
6364 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
6365 | }
|
---|
6366 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6367 | return VBOXSTRICTRC_VAL(rcStrict);
|
---|
6368 | }
|
---|
6369 |
|
---|
6370 |
|
---|
6371 | /**
|
---|
6372 | * \#VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional \#VMEXIT.
|
---|
6373 | */
|
---|
6374 | HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6375 | {
|
---|
6376 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6377 |
|
---|
6378 | hmR0SvmAdvanceRipHwAssist(pVCpu, 1);
|
---|
6379 | int rc = EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx) ? VINF_SUCCESS : VINF_EM_HALT;
|
---|
6380 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
|
---|
6381 |
|
---|
6382 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
|
---|
6383 | if (rc != VINF_SUCCESS)
|
---|
6384 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
|
---|
6385 | return rc;
|
---|
6386 | }
|
---|
6387 |
|
---|
6388 |
|
---|
6389 | /**
|
---|
6390 | * \#VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional \#VMEXIT.
|
---|
6391 | */
|
---|
6392 | HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6393 | {
|
---|
6394 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6395 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_SS);
|
---|
6396 |
|
---|
6397 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6398 | int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
|
---|
6399 | if (RT_LIKELY(rc == VINF_SUCCESS))
|
---|
6400 | {
|
---|
6401 | hmR0SvmAdvanceRipHwAssist(pVCpu, 3);
|
---|
6402 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
|
---|
6403 | }
|
---|
6404 | else
|
---|
6405 | {
|
---|
6406 | AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
|
---|
6407 | rc = VERR_EM_INTERPRETER;
|
---|
6408 | }
|
---|
6409 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
|
---|
6410 | return rc;
|
---|
6411 | }
|
---|
6412 |
|
---|
6413 |
|
---|
6414 | /**
|
---|
6415 | * \#VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional \#VMEXIT.
|
---|
6416 | */
|
---|
6417 | HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6418 | {
|
---|
6419 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6420 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_SS);
|
---|
6421 |
|
---|
6422 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6423 | VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
|
---|
6424 | int rc = VBOXSTRICTRC_VAL(rc2);
|
---|
6425 | if ( rc == VINF_EM_HALT
|
---|
6426 | || rc == VINF_SUCCESS)
|
---|
6427 | {
|
---|
6428 | hmR0SvmAdvanceRipHwAssist(pVCpu, 3);
|
---|
6429 |
|
---|
6430 | if ( rc == VINF_EM_HALT
|
---|
6431 | && EMMonitorWaitShouldContinue(pVCpu, pCtx))
|
---|
6432 | {
|
---|
6433 | rc = VINF_SUCCESS;
|
---|
6434 | }
|
---|
6435 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
|
---|
6436 | }
|
---|
6437 | else
|
---|
6438 | {
|
---|
6439 | AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
|
---|
6440 | rc = VERR_EM_INTERPRETER;
|
---|
6441 | }
|
---|
6442 | AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
|
---|
6443 | ("hmR0SvmExitMwait: EMInterpretMWait failed rc=%Rrc\n", rc));
|
---|
6444 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
|
---|
6445 | return rc;
|
---|
6446 | }
|
---|
6447 |
|
---|
6448 |
|
---|
6449 | /**
|
---|
6450 | * \#VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN). Conditional
|
---|
6451 | * \#VMEXIT.
|
---|
6452 | */
|
---|
6453 | HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6454 | {
|
---|
6455 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6456 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
6457 | return VINF_EM_RESET;
|
---|
6458 | }
|
---|
6459 |
|
---|
6460 |
|
---|
6461 | /**
|
---|
6462 | * \#VMEXIT handler for unexpected exits. Conditional \#VMEXIT.
|
---|
6463 | */
|
---|
6464 | HMSVM_EXIT_DECL hmR0SvmExitUnexpected(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6465 | {
|
---|
6466 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
6467 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
6468 | AssertMsgFailed(("hmR0SvmExitUnexpected: ExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pSvmTransient->u64ExitCode,
|
---|
6469 | pVmcb->ctrl.u64ExitInfo1, pVmcb->ctrl.u64ExitInfo2));
|
---|
6470 | RT_NOREF(pVmcb);
|
---|
6471 | pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
|
---|
6472 | return VERR_SVM_UNEXPECTED_EXIT;
|
---|
6473 | }
|
---|
6474 |
|
---|
6475 |
|
---|
6476 | /**
|
---|
6477 | * \#VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional \#VMEXIT.
|
---|
6478 | */
|
---|
6479 | HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6480 | {
|
---|
6481 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6482 |
|
---|
6483 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6484 | Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
|
---|
6485 | #ifdef VBOX_WITH_STATISTICS
|
---|
6486 | switch (pSvmTransient->u64ExitCode)
|
---|
6487 | {
|
---|
6488 | case SVM_EXIT_READ_CR0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
|
---|
6489 | case SVM_EXIT_READ_CR2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
|
---|
6490 | case SVM_EXIT_READ_CR3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
|
---|
6491 | case SVM_EXIT_READ_CR4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
|
---|
6492 | case SVM_EXIT_READ_CR8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
|
---|
6493 | }
|
---|
6494 | #endif
|
---|
6495 |
|
---|
6496 | bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
|
---|
6497 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6498 | if ( fSupportsDecodeAssists
|
---|
6499 | && fSupportsNextRipSave)
|
---|
6500 | {
|
---|
6501 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
6502 | bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
|
---|
6503 | if (fMovCRx)
|
---|
6504 | {
|
---|
6505 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6506 | uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
|
---|
6507 | uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0;
|
---|
6508 | uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
|
---|
6509 | VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
|
---|
6510 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6511 | return VBOXSTRICTRC_VAL(rcStrict);
|
---|
6512 | }
|
---|
6513 | /* else: SMSW instruction, fall back below to IEM for this. */
|
---|
6514 | }
|
---|
6515 |
|
---|
6516 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
6517 | VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
|
---|
6518 | int rc = VBOXSTRICTRC_VAL(rc2);
|
---|
6519 | AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3,
|
---|
6520 | ("hmR0SvmExitReadCRx: EMInterpretInstruction failed rc=%Rrc\n", rc));
|
---|
6521 | Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15);
|
---|
6522 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
|
---|
6523 | return rc;
|
---|
6524 | }
|
---|
6525 |
|
---|
6526 |
|
---|
6527 | /**
|
---|
6528 | * \#VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional \#VMEXIT.
|
---|
6529 | */
|
---|
6530 | HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6531 | {
|
---|
6532 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6533 |
|
---|
6534 | uint64_t const uExitCode = pSvmTransient->u64ExitCode;
|
---|
6535 | uint8_t const iCrReg = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0);
|
---|
6536 | Assert(iCrReg <= 15);
|
---|
6537 |
|
---|
6538 | VBOXSTRICTRC rcStrict = VERR_SVM_IPE_5;
|
---|
6539 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6540 | bool fDecodedInstr = false;
|
---|
6541 | bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
|
---|
6542 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6543 | if ( fSupportsDecodeAssists
|
---|
6544 | && fSupportsNextRipSave)
|
---|
6545 | {
|
---|
6546 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
6547 | bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
|
---|
6548 | if (fMovCRx)
|
---|
6549 | {
|
---|
6550 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6551 | uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
|
---|
6552 | uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
|
---|
6553 | Log4Func(("Mov CR%u w/ iGReg=%#x\n", iCrReg, iGReg));
|
---|
6554 | rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
|
---|
6555 | fDecodedInstr = true;
|
---|
6556 | }
|
---|
6557 | /* else: LMSW or CLTS instruction, fall back below to IEM for this. */
|
---|
6558 | }
|
---|
6559 |
|
---|
6560 | if (!fDecodedInstr)
|
---|
6561 | {
|
---|
6562 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6563 | Log4Func(("iCrReg=%#x\n", iCrReg));
|
---|
6564 | rcStrict = IEMExecOneBypassEx(pVCpu, CPUMCTX2CORE(pCtx), NULL);
|
---|
6565 | if (RT_UNLIKELY( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
|
---|
6566 | || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED))
|
---|
6567 | rcStrict = VERR_EM_INTERPRETER;
|
---|
6568 | }
|
---|
6569 |
|
---|
6570 | if (rcStrict == VINF_SUCCESS)
|
---|
6571 | {
|
---|
6572 | switch (iCrReg)
|
---|
6573 | {
|
---|
6574 | case 0:
|
---|
6575 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
|
---|
6576 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
|
---|
6577 | break;
|
---|
6578 |
|
---|
6579 | case 2:
|
---|
6580 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2);
|
---|
6581 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
|
---|
6582 | break;
|
---|
6583 |
|
---|
6584 | case 3:
|
---|
6585 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR3);
|
---|
6586 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
|
---|
6587 | break;
|
---|
6588 |
|
---|
6589 | case 4:
|
---|
6590 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
|
---|
6591 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
|
---|
6592 | break;
|
---|
6593 |
|
---|
6594 | case 8:
|
---|
6595 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
|
---|
6596 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
|
---|
6597 | break;
|
---|
6598 |
|
---|
6599 | default:
|
---|
6600 | {
|
---|
6601 | AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x\n",
|
---|
6602 | pSvmTransient->u64ExitCode, iCrReg));
|
---|
6603 | break;
|
---|
6604 | }
|
---|
6605 | }
|
---|
6606 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6607 | }
|
---|
6608 | else
|
---|
6609 | Assert(rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_PGM_CHANGE_MODE || rcStrict == VINF_PGM_SYNC_CR3);
|
---|
6610 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
6611 | }
|
---|
6612 |
|
---|
6613 |
|
---|
6614 | /**
|
---|
6615 | * \#VMEXIT helper for read MSRs, see hmR0SvmExitMsr.
|
---|
6616 | *
|
---|
6617 | * @returns Strict VBox status code.
|
---|
6618 | * @param pVCpu The cross context virtual CPU structure.
|
---|
6619 | * @param pVmcb Pointer to the VM control block.
|
---|
6620 | */
|
---|
6621 | static VBOXSTRICTRC hmR0SvmExitReadMsr(PVMCPU pVCpu, PSVMVMCB pVmcb)
|
---|
6622 | {
|
---|
6623 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6624 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
|
---|
6625 | Log4Func(("idMsr=%#RX32\n", pCtx->ecx));
|
---|
6626 |
|
---|
6627 | VBOXSTRICTRC rcStrict;
|
---|
6628 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6629 | if (fSupportsNextRipSave)
|
---|
6630 | {
|
---|
6631 | /** @todo Optimize this: Only retrieve the MSR bits we need here. CPUMAllMsrs.cpp
|
---|
6632 | * can ask for what it needs instead of using CPUMCTX_EXTRN_ALL_MSRS. */
|
---|
6633 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
|
---|
6634 | rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmcb->ctrl.u64NextRIP - pCtx->rip);
|
---|
6635 | }
|
---|
6636 | else
|
---|
6637 | {
|
---|
6638 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
|
---|
6639 | rcStrict = IEMExecOne(pVCpu);
|
---|
6640 | }
|
---|
6641 |
|
---|
6642 | AssertMsg( rcStrict == VINF_SUCCESS
|
---|
6643 | || rcStrict == VINF_IEM_RAISED_XCPT
|
---|
6644 | || rcStrict == VINF_CPUM_R3_MSR_READ,
|
---|
6645 | ("hmR0SvmExitReadMsr: Unexpected status %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
|
---|
6646 |
|
---|
6647 | if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
6648 | {
|
---|
6649 | rcStrict = VINF_SUCCESS;
|
---|
6650 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
6651 | }
|
---|
6652 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6653 | return rcStrict;
|
---|
6654 | }
|
---|
6655 |
|
---|
6656 |
|
---|
6657 | /**
|
---|
6658 | * \#VMEXIT helper for write MSRs, see hmR0SvmExitMsr.
|
---|
6659 | *
|
---|
6660 | * @returns Strict VBox status code.
|
---|
6661 | * @param pVCpu The cross context virtual CPU structure.
|
---|
6662 | * @param pVmcb Pointer to the VM control block.
|
---|
6663 | * @param pSvmTransient Pointer to the SVM-transient structure.
|
---|
6664 | */
|
---|
6665 | static VBOXSTRICTRC hmR0SvmExitWriteMsr(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMTRANSIENT pSvmTransient)
|
---|
6666 | {
|
---|
6667 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6668 | uint32_t const idMsr = pCtx->ecx;
|
---|
6669 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
|
---|
6670 | Log4Func(("idMsr=%#RX32\n", idMsr));
|
---|
6671 |
|
---|
6672 | /*
|
---|
6673 | * Handle TPR patching MSR writes.
|
---|
6674 | * We utilitize the LSTAR MSR for patching.
|
---|
6675 | */
|
---|
6676 | if ( pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive
|
---|
6677 | && idMsr == MSR_K8_LSTAR)
|
---|
6678 | {
|
---|
6679 | if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr)
|
---|
6680 | {
|
---|
6681 | /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */
|
---|
6682 | int rc2 = APICSetTpr(pVCpu, pCtx->eax & 0xff);
|
---|
6683 | AssertRC(rc2);
|
---|
6684 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
|
---|
6685 | }
|
---|
6686 |
|
---|
6687 | int rc = VINF_SUCCESS;
|
---|
6688 | hmR0SvmAdvanceRipHwAssist(pVCpu, 2);
|
---|
6689 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
|
---|
6690 | return rc;
|
---|
6691 | }
|
---|
6692 |
|
---|
6693 | /*
|
---|
6694 | * Handle regular MSR writes.
|
---|
6695 | */
|
---|
6696 | VBOXSTRICTRC rcStrict;
|
---|
6697 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6698 | if (fSupportsNextRipSave)
|
---|
6699 | {
|
---|
6700 | /** @todo Optimize this: We don't need to get much of the MSR state here
|
---|
6701 | * since we're only updating. CPUMAllMsrs.cpp can ask for what it needs and
|
---|
6702 | * clear the applicable extern flags. */
|
---|
6703 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
|
---|
6704 | rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmcb->ctrl.u64NextRIP - pCtx->rip);
|
---|
6705 | }
|
---|
6706 | else
|
---|
6707 | {
|
---|
6708 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
|
---|
6709 | rcStrict = IEMExecOne(pVCpu);
|
---|
6710 | }
|
---|
6711 |
|
---|
6712 | AssertMsg( rcStrict == VINF_SUCCESS
|
---|
6713 | || rcStrict == VINF_IEM_RAISED_XCPT
|
---|
6714 | || rcStrict == VINF_CPUM_R3_MSR_WRITE,
|
---|
6715 | ("hmR0SvmExitWriteMsr: Unexpected status %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
|
---|
6716 |
|
---|
6717 | if (rcStrict == VINF_SUCCESS)
|
---|
6718 | {
|
---|
6719 | /* If this is an X2APIC WRMSR access, update the APIC TPR state. */
|
---|
6720 | if ( idMsr >= MSR_IA32_X2APIC_START
|
---|
6721 | && idMsr <= MSR_IA32_X2APIC_END)
|
---|
6722 | {
|
---|
6723 | /*
|
---|
6724 | * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest().
|
---|
6725 | * When full APIC register virtualization is implemented we'll have to make sure
|
---|
6726 | * APIC state is saved from the VMCB before IEM changes it.
|
---|
6727 | */
|
---|
6728 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
|
---|
6729 | }
|
---|
6730 | else
|
---|
6731 | {
|
---|
6732 | switch (idMsr)
|
---|
6733 | {
|
---|
6734 | case MSR_IA32_TSC: pSvmTransient->fUpdateTscOffsetting = true; break;
|
---|
6735 | case MSR_K6_EFER: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR); break;
|
---|
6736 | case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
|
---|
6737 | case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
|
---|
6738 | case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
|
---|
6739 | case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
|
---|
6740 | case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
|
---|
6741 | }
|
---|
6742 | }
|
---|
6743 | }
|
---|
6744 | else if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
6745 | {
|
---|
6746 | rcStrict = VINF_SUCCESS;
|
---|
6747 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
6748 | }
|
---|
6749 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6750 | return rcStrict;
|
---|
6751 | }
|
---|
6752 |
|
---|
6753 |
|
---|
6754 | /**
|
---|
6755 | * \#VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional
|
---|
6756 | * \#VMEXIT.
|
---|
6757 | */
|
---|
6758 | HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6759 | {
|
---|
6760 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6761 |
|
---|
6762 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
6763 | if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ)
|
---|
6764 | return VBOXSTRICTRC_TODO(hmR0SvmExitReadMsr(pVCpu, pVmcb));
|
---|
6765 |
|
---|
6766 | Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE);
|
---|
6767 | return VBOXSTRICTRC_TODO(hmR0SvmExitWriteMsr(pVCpu, pVmcb, pSvmTransient));
|
---|
6768 | }
|
---|
6769 |
|
---|
6770 |
|
---|
6771 | /**
|
---|
6772 | * \#VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional \#VMEXIT.
|
---|
6773 | */
|
---|
6774 | HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6775 | {
|
---|
6776 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6777 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
6778 |
|
---|
6779 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
|
---|
6780 |
|
---|
6781 | /** @todo Stepping with nested-guest. */
|
---|
6782 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6783 | if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
6784 | {
|
---|
6785 | /* We should -not- get this #VMEXIT if the guest's debug registers were active. */
|
---|
6786 | if (pSvmTransient->fWasGuestDebugStateActive)
|
---|
6787 | {
|
---|
6788 | AssertMsgFailed(("hmR0SvmExitReadDRx: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
|
---|
6789 | pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
|
---|
6790 | return VERR_SVM_UNEXPECTED_EXIT;
|
---|
6791 | }
|
---|
6792 |
|
---|
6793 | /*
|
---|
6794 | * Lazy DR0-3 loading.
|
---|
6795 | */
|
---|
6796 | if (!pSvmTransient->fWasHyperDebugStateActive)
|
---|
6797 | {
|
---|
6798 | Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
|
---|
6799 | Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
|
---|
6800 |
|
---|
6801 | /* Don't intercept DRx read and writes. */
|
---|
6802 | PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
|
---|
6803 | pVmcb->ctrl.u16InterceptRdDRx = 0;
|
---|
6804 | pVmcb->ctrl.u16InterceptWrDRx = 0;
|
---|
6805 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
|
---|
6806 |
|
---|
6807 | /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
|
---|
6808 | VMMRZCallRing3Disable(pVCpu);
|
---|
6809 | HM_DISABLE_PREEMPT(pVCpu);
|
---|
6810 |
|
---|
6811 | /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
|
---|
6812 | CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
|
---|
6813 | Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
|
---|
6814 |
|
---|
6815 | HM_RESTORE_PREEMPT();
|
---|
6816 | VMMRZCallRing3Enable(pVCpu);
|
---|
6817 |
|
---|
6818 | STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
|
---|
6819 | return VINF_SUCCESS;
|
---|
6820 | }
|
---|
6821 | }
|
---|
6822 |
|
---|
6823 | /*
|
---|
6824 | * Interpret the read/writing of DRx.
|
---|
6825 | */
|
---|
6826 | /** @todo Decode assist. */
|
---|
6827 | VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
|
---|
6828 | Log5(("hmR0SvmExitReadDRx: Emulated DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
|
---|
6829 | if (RT_LIKELY(rc == VINF_SUCCESS))
|
---|
6830 | {
|
---|
6831 | /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
|
---|
6832 | /** @todo CPUM should set this flag! */
|
---|
6833 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
|
---|
6834 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
|
---|
6835 | }
|
---|
6836 | else
|
---|
6837 | Assert(rc == VERR_EM_INTERPRETER);
|
---|
6838 | return VBOXSTRICTRC_TODO(rc);
|
---|
6839 | }
|
---|
6840 |
|
---|
6841 |
|
---|
6842 | /**
|
---|
6843 | * \#VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional \#VMEXIT.
|
---|
6844 | */
|
---|
6845 | HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6846 | {
|
---|
6847 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6848 | /* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
|
---|
6849 | int rc = hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
|
---|
6850 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
|
---|
6851 | STAM_COUNTER_DEC(&pVCpu->hm.s.StatExitDRxRead);
|
---|
6852 | return rc;
|
---|
6853 | }
|
---|
6854 |
|
---|
6855 |
|
---|
6856 | /**
|
---|
6857 | * \#VMEXIT handler for XCRx write (SVM_EXIT_XSETBV). Conditional \#VMEXIT.
|
---|
6858 | */
|
---|
6859 | HMSVM_EXIT_DECL hmR0SvmExitXsetbv(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6860 | {
|
---|
6861 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6862 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
6863 |
|
---|
6864 | /** @todo decode assists... */
|
---|
6865 | VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
|
---|
6866 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
6867 | {
|
---|
6868 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6869 | pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
|
---|
6870 | Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], pVCpu->hm.s.fLoadSaveGuestXcr0,
|
---|
6871 | pCtx->cr4));
|
---|
6872 | }
|
---|
6873 | else if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
6874 | {
|
---|
6875 | rcStrict = VINF_SUCCESS;
|
---|
6876 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
6877 | }
|
---|
6878 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
6879 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
6880 | }
|
---|
6881 |
|
---|
6882 |
|
---|
6883 | /**
|
---|
6884 | * \#VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional \#VMEXIT.
|
---|
6885 | */
|
---|
6886 | HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
6887 | {
|
---|
6888 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
6889 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK);
|
---|
6890 |
|
---|
6891 | /* I/O operation lookup arrays. */
|
---|
6892 | static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
|
---|
6893 | static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
|
---|
6894 | the result (in AL/AX/EAX). */
|
---|
6895 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
6896 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
6897 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
6898 |
|
---|
6899 | Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
|
---|
6900 |
|
---|
6901 | /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
|
---|
6902 | SVMIOIOEXITINFO IoExitInfo;
|
---|
6903 | IoExitInfo.u = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
|
---|
6904 | uint32_t uIOWidth = (IoExitInfo.u >> 4) & 0x7;
|
---|
6905 | uint32_t cbValue = s_aIOSize[uIOWidth];
|
---|
6906 | uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
|
---|
6907 |
|
---|
6908 | if (RT_UNLIKELY(!cbValue))
|
---|
6909 | {
|
---|
6910 | AssertMsgFailed(("hmR0SvmExitIOInstr: Invalid IO operation. uIOWidth=%u\n", uIOWidth));
|
---|
6911 | return VERR_EM_INTERPRETER;
|
---|
6912 | }
|
---|
6913 |
|
---|
6914 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
|
---|
6915 | VBOXSTRICTRC rcStrict;
|
---|
6916 | PCEMEXITREC pExitRec = NULL;
|
---|
6917 | if ( !pVCpu->hm.s.fSingleInstruction
|
---|
6918 | && !pVCpu->cpum.GstCtx.eflags.Bits.u1TF)
|
---|
6919 | pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
|
---|
6920 | !IoExitInfo.n.u1Str
|
---|
6921 | ? IoExitInfo.n.u1Type == SVM_IOIO_READ
|
---|
6922 | ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
|
---|
6923 | : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
|
---|
6924 | : IoExitInfo.n.u1Type == SVM_IOIO_READ
|
---|
6925 | ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
|
---|
6926 | : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
|
---|
6927 | pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
|
---|
6928 | if (!pExitRec)
|
---|
6929 | {
|
---|
6930 | bool fUpdateRipAlready = false;
|
---|
6931 | if (IoExitInfo.n.u1Str)
|
---|
6932 | {
|
---|
6933 | /* INS/OUTS - I/O String instruction. */
|
---|
6934 | /** @todo Huh? why can't we use the segment prefix information given by AMD-V
|
---|
6935 | * in EXITINFO1? Investigate once this thing is up and running. */
|
---|
6936 | Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, IoExitInfo.n.u16Port, cbValue,
|
---|
6937 | IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? 'w' : 'r'));
|
---|
6938 | AssertReturn(pCtx->dx == IoExitInfo.n.u16Port, VERR_SVM_IPE_2);
|
---|
6939 | static IEMMODE const s_aenmAddrMode[8] =
|
---|
6940 | {
|
---|
6941 | (IEMMODE)-1, IEMMODE_16BIT, IEMMODE_32BIT, (IEMMODE)-1, IEMMODE_64BIT, (IEMMODE)-1, (IEMMODE)-1, (IEMMODE)-1
|
---|
6942 | };
|
---|
6943 | IEMMODE enmAddrMode = s_aenmAddrMode[(IoExitInfo.u >> 7) & 0x7];
|
---|
6944 | if (enmAddrMode != (IEMMODE)-1)
|
---|
6945 | {
|
---|
6946 | uint64_t cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip;
|
---|
6947 | if (cbInstr <= 15 && cbInstr >= 1)
|
---|
6948 | {
|
---|
6949 | Assert(cbInstr >= 1U + IoExitInfo.n.u1Rep);
|
---|
6950 | if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
|
---|
6951 | {
|
---|
6952 | /* Don't know exactly how to detect whether u3Seg is valid, currently
|
---|
6953 | only enabling it for Bulldozer and later with NRIP. OS/2 broke on
|
---|
6954 | 2384 Opterons when only checking NRIP. */
|
---|
6955 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
6956 | if ( fSupportsNextRipSave
|
---|
6957 | && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First)
|
---|
6958 | {
|
---|
6959 | AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_DS || cbInstr > 1U + IoExitInfo.n.u1Rep,
|
---|
6960 | ("u32Seg=%d cbInstr=%d u1REP=%d", IoExitInfo.n.u3Seg, cbInstr, IoExitInfo.n.u1Rep));
|
---|
6961 | rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
|
---|
6962 | IoExitInfo.n.u3Seg, true /*fIoChecked*/);
|
---|
6963 | }
|
---|
6964 | else if (cbInstr == 1U + IoExitInfo.n.u1Rep)
|
---|
6965 | rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
|
---|
6966 | X86_SREG_DS, true /*fIoChecked*/);
|
---|
6967 | else
|
---|
6968 | rcStrict = IEMExecOne(pVCpu);
|
---|
6969 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
|
---|
6970 | }
|
---|
6971 | else
|
---|
6972 | {
|
---|
6973 | AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_ES /*=0*/, ("%#x\n", IoExitInfo.n.u3Seg));
|
---|
6974 | rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
|
---|
6975 | true /*fIoChecked*/);
|
---|
6976 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
|
---|
6977 | }
|
---|
6978 | }
|
---|
6979 | else
|
---|
6980 | {
|
---|
6981 | AssertMsgFailed(("rip=%RX64 nrip=%#RX64 cbInstr=%#RX64\n", pCtx->rip, pVmcb->ctrl.u64ExitInfo2, cbInstr));
|
---|
6982 | rcStrict = IEMExecOne(pVCpu);
|
---|
6983 | }
|
---|
6984 | }
|
---|
6985 | else
|
---|
6986 | {
|
---|
6987 | AssertMsgFailed(("IoExitInfo=%RX64\n", IoExitInfo.u));
|
---|
6988 | rcStrict = IEMExecOne(pVCpu);
|
---|
6989 | }
|
---|
6990 | fUpdateRipAlready = true;
|
---|
6991 | }
|
---|
6992 | else
|
---|
6993 | {
|
---|
6994 | /* IN/OUT - I/O instruction. */
|
---|
6995 | Assert(!IoExitInfo.n.u1Rep);
|
---|
6996 |
|
---|
6997 | if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
|
---|
6998 | {
|
---|
6999 | rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue);
|
---|
7000 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
|
---|
7001 | }
|
---|
7002 | else
|
---|
7003 | {
|
---|
7004 | uint32_t u32Val = 0;
|
---|
7005 | rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue);
|
---|
7006 | if (IOM_SUCCESS(rcStrict))
|
---|
7007 | {
|
---|
7008 | /* Save result of I/O IN instr. in AL/AX/EAX. */
|
---|
7009 | /** @todo r=bird: 32-bit op size should clear high bits of rax! */
|
---|
7010 | pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
|
---|
7011 | }
|
---|
7012 | else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
|
---|
7013 | {
|
---|
7014 | HMR0SavePendingIOPortRead(pVCpu, pVCpu->cpum.GstCtx.rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
|
---|
7015 | uAndVal, cbValue);
|
---|
7016 | }
|
---|
7017 |
|
---|
7018 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
|
---|
7019 | }
|
---|
7020 | }
|
---|
7021 |
|
---|
7022 | if (IOM_SUCCESS(rcStrict))
|
---|
7023 | {
|
---|
7024 | /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */
|
---|
7025 | if (!fUpdateRipAlready)
|
---|
7026 | pCtx->rip = pVmcb->ctrl.u64ExitInfo2;
|
---|
7027 |
|
---|
7028 | /*
|
---|
7029 | * If any I/O breakpoints are armed, we need to check if one triggered
|
---|
7030 | * and take appropriate action.
|
---|
7031 | * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
|
---|
7032 | */
|
---|
7033 | /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
|
---|
7034 | * execution engines about whether hyper BPs and such are pending. */
|
---|
7035 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);
|
---|
7036 | uint32_t const uDr7 = pCtx->dr[7];
|
---|
7037 | if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
|
---|
7038 | && X86_DR7_ANY_RW_IO(uDr7)
|
---|
7039 | && (pCtx->cr4 & X86_CR4_DE))
|
---|
7040 | || DBGFBpIsHwIoArmed(pVM)))
|
---|
7041 | {
|
---|
7042 | /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
|
---|
7043 | VMMRZCallRing3Disable(pVCpu);
|
---|
7044 | HM_DISABLE_PREEMPT(pVCpu);
|
---|
7045 |
|
---|
7046 | STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
|
---|
7047 | CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/);
|
---|
7048 |
|
---|
7049 | VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, &pVCpu->cpum.GstCtx, IoExitInfo.n.u16Port, cbValue);
|
---|
7050 | if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
|
---|
7051 | {
|
---|
7052 | /* Raise #DB. */
|
---|
7053 | pVmcb->guest.u64DR6 = pCtx->dr[6];
|
---|
7054 | pVmcb->guest.u64DR7 = pCtx->dr[7];
|
---|
7055 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
|
---|
7056 | hmR0SvmSetPendingXcptDB(pVCpu);
|
---|
7057 | }
|
---|
7058 | /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
|
---|
7059 | however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
|
---|
7060 | else if ( rcStrict2 != VINF_SUCCESS
|
---|
7061 | && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
|
---|
7062 | rcStrict = rcStrict2;
|
---|
7063 | AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
|
---|
7064 |
|
---|
7065 | HM_RESTORE_PREEMPT();
|
---|
7066 | VMMRZCallRing3Enable(pVCpu);
|
---|
7067 | }
|
---|
7068 |
|
---|
7069 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
7070 | }
|
---|
7071 |
|
---|
7072 | #ifdef VBOX_STRICT
|
---|
7073 | if (rcStrict == VINF_IOM_R3_IOPORT_READ)
|
---|
7074 | Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ);
|
---|
7075 | else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE)
|
---|
7076 | Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE);
|
---|
7077 | else
|
---|
7078 | {
|
---|
7079 | /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
|
---|
7080 | * statuses, that the VMM device and some others may return. See
|
---|
7081 | * IOM_SUCCESS() for guidance. */
|
---|
7082 | AssertMsg( RT_FAILURE(rcStrict)
|
---|
7083 | || rcStrict == VINF_SUCCESS
|
---|
7084 | || rcStrict == VINF_EM_RAW_EMULATE_INSTR
|
---|
7085 | || rcStrict == VINF_EM_DBG_BREAKPOINT
|
---|
7086 | || rcStrict == VINF_EM_RAW_GUEST_TRAP
|
---|
7087 | || rcStrict == VINF_EM_RAW_TO_R3
|
---|
7088 | || rcStrict == VINF_TRPM_XCPT_DISPATCHED
|
---|
7089 | || rcStrict == VINF_EM_TRIPLE_FAULT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
|
---|
7090 | }
|
---|
7091 | #endif
|
---|
7092 | }
|
---|
7093 | else
|
---|
7094 | {
|
---|
7095 | /*
|
---|
7096 | * Frequent exit or something needing probing. Get state and call EMHistoryExec.
|
---|
7097 | */
|
---|
7098 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
7099 | STAM_COUNTER_INC(!IoExitInfo.n.u1Str
|
---|
7100 | ? IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
|
---|
7101 | : IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
|
---|
7102 | Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
|
---|
7103 | pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IoExitInfo.n.u1Rep ? "REP " : "",
|
---|
7104 | IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? "OUT" : "IN", IoExitInfo.n.u1Str ? "S" : "", IoExitInfo.n.u16Port, uIOWidth));
|
---|
7105 |
|
---|
7106 | rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
|
---|
7107 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
|
---|
7108 |
|
---|
7109 | Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
|
---|
7110 | pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
|
---|
7111 | VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
7112 | }
|
---|
7113 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
7114 | }
|
---|
7115 |
|
---|
7116 |
|
---|
7117 | /**
|
---|
7118 | * \#VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional \#VMEXIT.
|
---|
7119 | */
|
---|
7120 | HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7121 | {
|
---|
7122 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7123 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
7124 | HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
|
---|
7125 |
|
---|
7126 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
7127 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
7128 | Assert(pVM->hm.s.fNestedPaging);
|
---|
7129 |
|
---|
7130 | /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
|
---|
7131 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
7132 | RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
|
---|
7133 | uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1; /* Note! High bits in EXITINFO1 may contain additional info and are
|
---|
7134 | thus intentionally not copied into u32ErrCode. */
|
---|
7135 |
|
---|
7136 | Log4Func(("#NPF at CS:RIP=%04x:%#RX64 GCPhysFaultAddr=%RGp ErrCode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr,
|
---|
7137 | u32ErrCode));
|
---|
7138 |
|
---|
7139 | /*
|
---|
7140 | * TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions.
|
---|
7141 | */
|
---|
7142 | if ( pVM->hm.s.fTprPatchingAllowed
|
---|
7143 | && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == XAPIC_OFF_TPR
|
---|
7144 | && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */
|
---|
7145 | || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */
|
---|
7146 | && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
|
---|
7147 | && !CPUMIsGuestInLongModeEx(pCtx)
|
---|
7148 | && !CPUMGetGuestCPL(pVCpu)
|
---|
7149 | && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
|
---|
7150 | {
|
---|
7151 | RTGCPHYS GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
|
---|
7152 | GCPhysApicBase &= PAGE_BASE_GC_MASK;
|
---|
7153 |
|
---|
7154 | if (GCPhysFaultAddr == GCPhysApicBase + XAPIC_OFF_TPR)
|
---|
7155 | {
|
---|
7156 | /* Only attempt to patch the instruction once. */
|
---|
7157 | PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
|
---|
7158 | if (!pPatch)
|
---|
7159 | return VINF_EM_HM_PATCH_TPR_INSTR;
|
---|
7160 | }
|
---|
7161 | }
|
---|
7162 |
|
---|
7163 | /*
|
---|
7164 | * Determine the nested paging mode.
|
---|
7165 | */
|
---|
7166 | PGMMODE enmNestedPagingMode;
|
---|
7167 | #if HC_ARCH_BITS == 32
|
---|
7168 | if (CPUMIsGuestInLongModeEx(pCtx))
|
---|
7169 | enmNestedPagingMode = PGMMODE_AMD64_NX;
|
---|
7170 | else
|
---|
7171 | #endif
|
---|
7172 | enmNestedPagingMode = PGMGetHostMode(pVM);
|
---|
7173 |
|
---|
7174 | /*
|
---|
7175 | * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
|
---|
7176 | */
|
---|
7177 | Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
|
---|
7178 | if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
|
---|
7179 | {
|
---|
7180 | /*
|
---|
7181 | * If event delivery causes an MMIO #NPF, go back to instruction emulation as otherwise
|
---|
7182 | * injecting the original pending event would most likely cause the same MMIO #NPF.
|
---|
7183 | */
|
---|
7184 | if (pVCpu->hm.s.Event.fPending)
|
---|
7185 | return VINF_EM_RAW_INJECT_TRPM_EVENT;
|
---|
7186 |
|
---|
7187 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
|
---|
7188 | VBOXSTRICTRC rcStrict;
|
---|
7189 | PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
|
---|
7190 | EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
|
---|
7191 | pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
|
---|
7192 | if (!pExitRec)
|
---|
7193 | {
|
---|
7194 |
|
---|
7195 | rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr,
|
---|
7196 | u32ErrCode);
|
---|
7197 |
|
---|
7198 | /*
|
---|
7199 | * If we succeed, resume guest execution.
|
---|
7200 | *
|
---|
7201 | * If we fail in interpreting the instruction because we couldn't get the guest
|
---|
7202 | * physical address of the page containing the instruction via the guest's page
|
---|
7203 | * tables (we would invalidate the guest page in the host TLB), resume execution
|
---|
7204 | * which would cause a guest page fault to let the guest handle this weird case.
|
---|
7205 | *
|
---|
7206 | * See @bugref{6043}.
|
---|
7207 | */
|
---|
7208 | if ( rcStrict == VINF_SUCCESS
|
---|
7209 | || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
|
---|
7210 | || rcStrict == VERR_PAGE_NOT_PRESENT)
|
---|
7211 | {
|
---|
7212 | /* Successfully handled MMIO operation. */
|
---|
7213 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
|
---|
7214 | rcStrict = VINF_SUCCESS;
|
---|
7215 | }
|
---|
7216 | }
|
---|
7217 | else
|
---|
7218 | {
|
---|
7219 | /*
|
---|
7220 | * Frequent exit or something needing probing. Get state and call EMHistoryExec.
|
---|
7221 | */
|
---|
7222 | Assert(pCtx == &pVCpu->cpum.GstCtx);
|
---|
7223 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
7224 | Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
|
---|
7225 | pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysFaultAddr));
|
---|
7226 |
|
---|
7227 | rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
|
---|
7228 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
|
---|
7229 |
|
---|
7230 | Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
|
---|
7231 | pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
|
---|
7232 | VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
7233 | }
|
---|
7234 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
7235 | }
|
---|
7236 |
|
---|
7237 | TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode);
|
---|
7238 | int rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr);
|
---|
7239 | TRPMResetTrap(pVCpu);
|
---|
7240 |
|
---|
7241 | Log4Func(("#NPF: PGMR0Trap0eHandlerNestedPaging returns %Rrc CS:RIP=%04x:%#RX64\n", rc, pCtx->cs.Sel, pCtx->rip));
|
---|
7242 |
|
---|
7243 | /*
|
---|
7244 | * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
|
---|
7245 | */
|
---|
7246 | if ( rc == VINF_SUCCESS
|
---|
7247 | || rc == VERR_PAGE_TABLE_NOT_PRESENT
|
---|
7248 | || rc == VERR_PAGE_NOT_PRESENT)
|
---|
7249 | {
|
---|
7250 | /* We've successfully synced our shadow page tables. */
|
---|
7251 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
|
---|
7252 | rc = VINF_SUCCESS;
|
---|
7253 | }
|
---|
7254 |
|
---|
7255 | return rc;
|
---|
7256 | }
|
---|
7257 |
|
---|
7258 |
|
---|
7259 | /**
|
---|
7260 | * \#VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional
|
---|
7261 | * \#VMEXIT.
|
---|
7262 | */
|
---|
7263 | HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7264 | {
|
---|
7265 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7266 | HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
|
---|
7267 |
|
---|
7268 | /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
|
---|
7269 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
7270 | hmR0SvmClearIntWindowExiting(pVCpu, pVmcb);
|
---|
7271 |
|
---|
7272 | /* Deliver the pending interrupt via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
|
---|
7273 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
|
---|
7274 | return VINF_SUCCESS;
|
---|
7275 | }
|
---|
7276 |
|
---|
7277 |
|
---|
7278 | /**
|
---|
7279 | * \#VMEXIT handler for task switches (SVM_EXIT_TASK_SWITCH). Conditional
|
---|
7280 | * \#VMEXIT.
|
---|
7281 | */
|
---|
7282 | HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7283 | {
|
---|
7284 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7285 | HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
|
---|
7286 |
|
---|
7287 | #ifndef HMSVM_ALWAYS_TRAP_TASK_SWITCH
|
---|
7288 | Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
|
---|
7289 | #endif
|
---|
7290 |
|
---|
7291 | /* Check if this task-switch occurred while delivering an event through the guest IDT. */
|
---|
7292 | if (pVCpu->hm.s.Event.fPending) /* Can happen with exceptions/NMI. See @bugref{8411}. */
|
---|
7293 | {
|
---|
7294 | /*
|
---|
7295 | * AMD-V provides us with the exception which caused the TS; we collect
|
---|
7296 | * the information in the call to hmR0SvmCheckExitDueToEventDelivery().
|
---|
7297 | */
|
---|
7298 | Log4Func(("TS occurred during event delivery\n"));
|
---|
7299 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
|
---|
7300 | return VINF_EM_RAW_INJECT_TRPM_EVENT;
|
---|
7301 | }
|
---|
7302 |
|
---|
7303 | /** @todo Emulate task switch someday, currently just going back to ring-3 for
|
---|
7304 | * emulation. */
|
---|
7305 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
|
---|
7306 | return VERR_EM_INTERPRETER;
|
---|
7307 | }
|
---|
7308 |
|
---|
7309 |
|
---|
7310 | /**
|
---|
7311 | * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
|
---|
7312 | */
|
---|
7313 | HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7314 | {
|
---|
7315 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7316 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
7317 |
|
---|
7318 | if (pVCpu->CTX_SUFF(pVM)->hm.s.fTprPatchingAllowed)
|
---|
7319 | {
|
---|
7320 | int rc = hmSvmEmulateMovTpr(pVCpu);
|
---|
7321 | if (rc != VERR_NOT_FOUND)
|
---|
7322 | {
|
---|
7323 | Log4Func(("hmSvmEmulateMovTpr returns %Rrc\n", rc));
|
---|
7324 | return rc;
|
---|
7325 | }
|
---|
7326 | }
|
---|
7327 |
|
---|
7328 | if (EMAreHypercallInstructionsEnabled(pVCpu))
|
---|
7329 | {
|
---|
7330 | VBOXSTRICTRC rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
|
---|
7331 | if (RT_SUCCESS(rcStrict))
|
---|
7332 | {
|
---|
7333 | /* Only update the RIP if we're continuing guest execution and not in the case
|
---|
7334 | of say VINF_GIM_R3_HYPERCALL. */
|
---|
7335 | if (rcStrict == VINF_SUCCESS)
|
---|
7336 | hmR0SvmAdvanceRipHwAssist(pVCpu, 3 /* cbInstr */);
|
---|
7337 |
|
---|
7338 | return VBOXSTRICTRC_VAL(rcStrict);
|
---|
7339 | }
|
---|
7340 | else
|
---|
7341 | Log4Func(("GIMHypercall returns %Rrc -> #UD\n", VBOXSTRICTRC_VAL(rcStrict)));
|
---|
7342 | }
|
---|
7343 |
|
---|
7344 | hmR0SvmSetPendingXcptUD(pVCpu);
|
---|
7345 | return VINF_SUCCESS;
|
---|
7346 | }
|
---|
7347 |
|
---|
7348 |
|
---|
7349 | /**
|
---|
7350 | * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
|
---|
7351 | */
|
---|
7352 | HMSVM_EXIT_DECL hmR0SvmExitPause(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7353 | {
|
---|
7354 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7355 | hmR0SvmAdvanceRipHwAssist(pVCpu, 2);
|
---|
7356 | /** @todo The guest has likely hit a contended spinlock. We might want to
|
---|
7357 | * poke a schedule different guest VCPU. */
|
---|
7358 | return VINF_EM_RAW_INTERRUPT;
|
---|
7359 | }
|
---|
7360 |
|
---|
7361 |
|
---|
7362 | /**
|
---|
7363 | * \#VMEXIT handler for FERR intercept (SVM_EXIT_FERR_FREEZE). Conditional
|
---|
7364 | * \#VMEXIT.
|
---|
7365 | */
|
---|
7366 | HMSVM_EXIT_DECL hmR0SvmExitFerrFreeze(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7367 | {
|
---|
7368 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7369 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);
|
---|
7370 | Assert(!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE));
|
---|
7371 |
|
---|
7372 | Log4Func(("Raising IRQ 13 in response to #FERR\n"));
|
---|
7373 | return PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
|
---|
7374 | }
|
---|
7375 |
|
---|
7376 |
|
---|
7377 | /**
|
---|
7378 | * \#VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional \#VMEXIT.
|
---|
7379 | */
|
---|
7380 | HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7381 | {
|
---|
7382 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7383 |
|
---|
7384 | /* Clear NMI blocking. */
|
---|
7385 | if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
|
---|
7386 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
|
---|
7387 |
|
---|
7388 | /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
|
---|
7389 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
7390 | hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_IRET);
|
---|
7391 |
|
---|
7392 | /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
|
---|
7393 | return VINF_SUCCESS;
|
---|
7394 | }
|
---|
7395 |
|
---|
7396 |
|
---|
7397 | /**
|
---|
7398 | * \#VMEXIT handler for page-fault exceptions (SVM_EXIT_XCPT_14).
|
---|
7399 | * Conditional \#VMEXIT.
|
---|
7400 | */
|
---|
7401 | HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7402 | {
|
---|
7403 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7404 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
7405 | HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
|
---|
7406 |
|
---|
7407 | /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
|
---|
7408 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
7409 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
7410 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
7411 | uint32_t uErrCode = pVmcb->ctrl.u64ExitInfo1;
|
---|
7412 | uint64_t const uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
|
---|
7413 |
|
---|
7414 | #if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(HMSVM_ALWAYS_TRAP_PF)
|
---|
7415 | if (pVM->hm.s.fNestedPaging)
|
---|
7416 | {
|
---|
7417 | pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
|
---|
7418 | if ( !pSvmTransient->fVectoringDoublePF
|
---|
7419 | || CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
7420 | {
|
---|
7421 | /* A genuine guest #PF, reflect it to the guest. */
|
---|
7422 | hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress);
|
---|
7423 | Log4Func(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RX64 ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip,
|
---|
7424 | uFaultAddress, uErrCode));
|
---|
7425 | }
|
---|
7426 | else
|
---|
7427 | {
|
---|
7428 | /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
|
---|
7429 | hmR0SvmSetPendingXcptDF(pVCpu);
|
---|
7430 | Log4Func(("Pending #DF due to vectoring #PF. NP\n"));
|
---|
7431 | }
|
---|
7432 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
|
---|
7433 | return VINF_SUCCESS;
|
---|
7434 | }
|
---|
7435 | #endif
|
---|
7436 |
|
---|
7437 | Assert(!pVM->hm.s.fNestedPaging);
|
---|
7438 |
|
---|
7439 | /*
|
---|
7440 | * TPR patching shortcut for APIC TPR reads and writes; only applicable to 32-bit guests.
|
---|
7441 | */
|
---|
7442 | if ( pVM->hm.s.fTprPatchingAllowed
|
---|
7443 | && (uFaultAddress & 0xfff) == XAPIC_OFF_TPR
|
---|
7444 | && !(uErrCode & X86_TRAP_PF_P) /* Not present. */
|
---|
7445 | && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
|
---|
7446 | && !CPUMIsGuestInLongModeEx(pCtx)
|
---|
7447 | && !CPUMGetGuestCPL(pVCpu)
|
---|
7448 | && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
|
---|
7449 | {
|
---|
7450 | RTGCPHYS GCPhysApicBase;
|
---|
7451 | GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
|
---|
7452 | GCPhysApicBase &= PAGE_BASE_GC_MASK;
|
---|
7453 |
|
---|
7454 | /* Check if the page at the fault-address is the APIC base. */
|
---|
7455 | RTGCPHYS GCPhysPage;
|
---|
7456 | int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);
|
---|
7457 | if ( rc2 == VINF_SUCCESS
|
---|
7458 | && GCPhysPage == GCPhysApicBase)
|
---|
7459 | {
|
---|
7460 | /* Only attempt to patch the instruction once. */
|
---|
7461 | PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
|
---|
7462 | if (!pPatch)
|
---|
7463 | return VINF_EM_HM_PATCH_TPR_INSTR;
|
---|
7464 | }
|
---|
7465 | }
|
---|
7466 |
|
---|
7467 | Log4Func(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
|
---|
7468 | pCtx->rip, uErrCode, pCtx->cr3));
|
---|
7469 |
|
---|
7470 | /*
|
---|
7471 | * If it's a vectoring #PF, emulate injecting the original event injection as
|
---|
7472 | * PGMTrap0eHandler() is incapable of differentiating between instruction emulation and
|
---|
7473 | * event injection that caused a #PF. See @bugref{6607}.
|
---|
7474 | */
|
---|
7475 | if (pSvmTransient->fVectoringPF)
|
---|
7476 | {
|
---|
7477 | Assert(pVCpu->hm.s.Event.fPending);
|
---|
7478 | return VINF_EM_RAW_INJECT_TRPM_EVENT;
|
---|
7479 | }
|
---|
7480 |
|
---|
7481 | TRPMAssertXcptPF(pVCpu, uFaultAddress, uErrCode);
|
---|
7482 | int rc = PGMTrap0eHandler(pVCpu, uErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
|
---|
7483 |
|
---|
7484 | Log4Func(("#PF: rc=%Rrc\n", rc));
|
---|
7485 |
|
---|
7486 | if (rc == VINF_SUCCESS)
|
---|
7487 | {
|
---|
7488 | /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
|
---|
7489 | TRPMResetTrap(pVCpu);
|
---|
7490 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
|
---|
7491 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
|
---|
7492 | return rc;
|
---|
7493 | }
|
---|
7494 |
|
---|
7495 | if (rc == VINF_EM_RAW_GUEST_TRAP)
|
---|
7496 | {
|
---|
7497 | pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
|
---|
7498 |
|
---|
7499 | /*
|
---|
7500 | * If a nested-guest delivers a #PF and that causes a #PF which is -not- a shadow #PF,
|
---|
7501 | * we should simply forward the #PF to the guest and is up to the nested-hypervisor to
|
---|
7502 | * determine whether it is a nested-shadow #PF or a #DF, see @bugref{7243#c121}.
|
---|
7503 | */
|
---|
7504 | if ( !pSvmTransient->fVectoringDoublePF
|
---|
7505 | || CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
|
---|
7506 | {
|
---|
7507 | /* It's a guest (or nested-guest) page fault and needs to be reflected. */
|
---|
7508 | uErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */
|
---|
7509 | TRPMResetTrap(pVCpu);
|
---|
7510 |
|
---|
7511 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
7512 | /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
|
---|
7513 | if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
|
---|
7514 | && HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF))
|
---|
7515 | return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_XCPT_PF, uErrCode, uFaultAddress));
|
---|
7516 | #endif
|
---|
7517 |
|
---|
7518 | hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress);
|
---|
7519 | }
|
---|
7520 | else
|
---|
7521 | {
|
---|
7522 | /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
|
---|
7523 | TRPMResetTrap(pVCpu);
|
---|
7524 | hmR0SvmSetPendingXcptDF(pVCpu);
|
---|
7525 | Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
|
---|
7526 | }
|
---|
7527 |
|
---|
7528 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
|
---|
7529 | return VINF_SUCCESS;
|
---|
7530 | }
|
---|
7531 |
|
---|
7532 | TRPMResetTrap(pVCpu);
|
---|
7533 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
|
---|
7534 | return rc;
|
---|
7535 | }
|
---|
7536 |
|
---|
7537 |
|
---|
7538 | /**
|
---|
7539 | * \#VMEXIT handler for undefined opcode (SVM_EXIT_XCPT_6).
|
---|
7540 | * Conditional \#VMEXIT.
|
---|
7541 | */
|
---|
7542 | HMSVM_EXIT_DECL hmR0SvmExitXcptUD(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7543 | {
|
---|
7544 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7545 | HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
|
---|
7546 |
|
---|
7547 | /* Paranoia; Ensure we cannot be called as a result of event delivery. */
|
---|
7548 | PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
|
---|
7549 | Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
|
---|
7550 |
|
---|
7551 | int rc = VERR_SVM_UNEXPECTED_XCPT_EXIT;
|
---|
7552 | if (pVCpu->hm.s.fGIMTrapXcptUD)
|
---|
7553 | {
|
---|
7554 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
7555 | uint8_t cbInstr = 0;
|
---|
7556 | VBOXSTRICTRC rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
|
---|
7557 | if (rcStrict == VINF_SUCCESS)
|
---|
7558 | {
|
---|
7559 | /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */
|
---|
7560 | hmR0SvmAdvanceRipDumb(pVCpu, cbInstr);
|
---|
7561 | rc = VINF_SUCCESS;
|
---|
7562 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
|
---|
7563 | }
|
---|
7564 | else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
|
---|
7565 | rc = VINF_SUCCESS;
|
---|
7566 | else if (rcStrict == VINF_GIM_R3_HYPERCALL)
|
---|
7567 | rc = VINF_GIM_R3_HYPERCALL;
|
---|
7568 | else
|
---|
7569 | Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
|
---|
7570 | }
|
---|
7571 |
|
---|
7572 | /* If the GIM #UD exception handler didn't succeed for some reason or wasn't needed, raise #UD. */
|
---|
7573 | if (RT_FAILURE(rc))
|
---|
7574 | {
|
---|
7575 | hmR0SvmSetPendingXcptUD(pVCpu);
|
---|
7576 | rc = VINF_SUCCESS;
|
---|
7577 | }
|
---|
7578 |
|
---|
7579 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
|
---|
7580 | return rc;
|
---|
7581 | }
|
---|
7582 |
|
---|
7583 |
|
---|
7584 | /**
|
---|
7585 | * \#VMEXIT handler for math-fault exceptions (SVM_EXIT_XCPT_16).
|
---|
7586 | * Conditional \#VMEXIT.
|
---|
7587 | */
|
---|
7588 | HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7589 | {
|
---|
7590 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7591 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
7592 |
|
---|
7593 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
7594 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
7595 |
|
---|
7596 | /* Paranoia; Ensure we cannot be called as a result of event delivery. */
|
---|
7597 | Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
|
---|
7598 |
|
---|
7599 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
|
---|
7600 |
|
---|
7601 | if (!(pCtx->cr0 & X86_CR0_NE))
|
---|
7602 | {
|
---|
7603 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
7604 | PDISSTATE pDis = &pVCpu->hm.s.DisState;
|
---|
7605 | unsigned cbOp;
|
---|
7606 | int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
|
---|
7607 | if (RT_SUCCESS(rc))
|
---|
7608 | {
|
---|
7609 | /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
|
---|
7610 | rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
|
---|
7611 | if (RT_SUCCESS(rc))
|
---|
7612 | pCtx->rip += cbOp;
|
---|
7613 | }
|
---|
7614 | else
|
---|
7615 | Log4Func(("EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
|
---|
7616 | return rc;
|
---|
7617 | }
|
---|
7618 |
|
---|
7619 | hmR0SvmSetPendingXcptMF(pVCpu);
|
---|
7620 | return VINF_SUCCESS;
|
---|
7621 | }
|
---|
7622 |
|
---|
7623 |
|
---|
7624 | /**
|
---|
7625 | * \#VMEXIT handler for debug exceptions (SVM_EXIT_XCPT_1). Conditional
|
---|
7626 | * \#VMEXIT.
|
---|
7627 | */
|
---|
7628 | HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7629 | {
|
---|
7630 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7631 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
7632 | HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
|
---|
7633 |
|
---|
7634 | if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
|
---|
7635 | {
|
---|
7636 | STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
|
---|
7637 | return VINF_EM_RAW_INJECT_TRPM_EVENT;
|
---|
7638 | }
|
---|
7639 |
|
---|
7640 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
|
---|
7641 |
|
---|
7642 | /*
|
---|
7643 | * This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data
|
---|
7644 | * breakpoint). However, for both cases DR6 and DR7 are updated to what the exception
|
---|
7645 | * handler expects. See AMD spec. 15.12.2 "#DB (Debug)".
|
---|
7646 | */
|
---|
7647 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
7648 | PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
|
---|
7649 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
7650 | int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
|
---|
7651 | if (rc == VINF_EM_RAW_GUEST_TRAP)
|
---|
7652 | {
|
---|
7653 | Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
|
---|
7654 | if (CPUMIsHyperDebugStateActive(pVCpu))
|
---|
7655 | CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
|
---|
7656 |
|
---|
7657 | /* Reflect the exception back to the guest. */
|
---|
7658 | hmR0SvmSetPendingXcptDB(pVCpu);
|
---|
7659 | rc = VINF_SUCCESS;
|
---|
7660 | }
|
---|
7661 |
|
---|
7662 | /*
|
---|
7663 | * Update DR6.
|
---|
7664 | */
|
---|
7665 | if (CPUMIsHyperDebugStateActive(pVCpu))
|
---|
7666 | {
|
---|
7667 | Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
|
---|
7668 | pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
|
---|
7669 | pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
|
---|
7670 | }
|
---|
7671 | else
|
---|
7672 | {
|
---|
7673 | AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
|
---|
7674 | Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu));
|
---|
7675 | }
|
---|
7676 |
|
---|
7677 | return rc;
|
---|
7678 | }
|
---|
7679 |
|
---|
7680 |
|
---|
7681 | /**
|
---|
7682 | * \#VMEXIT handler for alignment check exceptions (SVM_EXIT_XCPT_17).
|
---|
7683 | * Conditional \#VMEXIT.
|
---|
7684 | */
|
---|
7685 | HMSVM_EXIT_DECL hmR0SvmExitXcptAC(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7686 | {
|
---|
7687 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7688 | HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
|
---|
7689 |
|
---|
7690 | SVMEVENT Event;
|
---|
7691 | Event.u = 0;
|
---|
7692 | Event.n.u1Valid = 1;
|
---|
7693 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
7694 | Event.n.u8Vector = X86_XCPT_AC;
|
---|
7695 | Event.n.u1ErrorCodeValid = 1;
|
---|
7696 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
7697 | return VINF_SUCCESS;
|
---|
7698 | }
|
---|
7699 |
|
---|
7700 |
|
---|
7701 | /**
|
---|
7702 | * \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_XCPT_3).
|
---|
7703 | * Conditional \#VMEXIT.
|
---|
7704 | */
|
---|
7705 | HMSVM_EXIT_DECL hmR0SvmExitXcptBP(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7706 | {
|
---|
7707 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7708 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
|
---|
7709 | HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
|
---|
7710 |
|
---|
7711 | PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
|
---|
7712 | int rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
|
---|
7713 | if (rc == VINF_EM_RAW_GUEST_TRAP)
|
---|
7714 | {
|
---|
7715 | SVMEVENT Event;
|
---|
7716 | Event.u = 0;
|
---|
7717 | Event.n.u1Valid = 1;
|
---|
7718 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
7719 | Event.n.u8Vector = X86_XCPT_BP;
|
---|
7720 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
7721 | }
|
---|
7722 |
|
---|
7723 | Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
|
---|
7724 | return rc;
|
---|
7725 | }
|
---|
7726 |
|
---|
7727 |
|
---|
7728 | #if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
|
---|
7729 | /**
|
---|
7730 | * \#VMEXIT handler for generic exceptions. Conditional \#VMEXIT.
|
---|
7731 | */
|
---|
7732 | HMSVM_EXIT_DECL hmR0SvmExitXcptGeneric(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7733 | {
|
---|
7734 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7735 | HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
|
---|
7736 |
|
---|
7737 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
7738 | uint8_t const uVector = pVmcb->ctrl.u64ExitCode - SVM_EXIT_XCPT_0;
|
---|
7739 | uint32_t const uErrCode = pVmcb->ctrl.u64ExitInfo1;
|
---|
7740 | Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode);
|
---|
7741 | Assert(uVector <= X86_XCPT_LAST);
|
---|
7742 | Log4Func(("uVector=%#x uErrCode=%u\n", uVector, uErrCode));
|
---|
7743 |
|
---|
7744 | SVMEVENT Event;
|
---|
7745 | Event.u = 0;
|
---|
7746 | Event.n.u1Valid = 1;
|
---|
7747 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
7748 | Event.n.u8Vector = uVector;
|
---|
7749 | switch (uVector)
|
---|
7750 | {
|
---|
7751 | /* Shouldn't be here for reflecting #PFs (among other things, the fault address isn't passed along). */
|
---|
7752 | case X86_XCPT_PF: AssertMsgFailed(("hmR0SvmExitXcptGeneric: Unexpected exception")); return VERR_SVM_IPE_5;
|
---|
7753 | case X86_XCPT_DF:
|
---|
7754 | case X86_XCPT_TS:
|
---|
7755 | case X86_XCPT_NP:
|
---|
7756 | case X86_XCPT_SS:
|
---|
7757 | case X86_XCPT_GP:
|
---|
7758 | case X86_XCPT_AC:
|
---|
7759 | {
|
---|
7760 | Event.n.u1ErrorCodeValid = 1;
|
---|
7761 | Event.n.u32ErrorCode = uErrCode;
|
---|
7762 | break;
|
---|
7763 | }
|
---|
7764 | }
|
---|
7765 |
|
---|
7766 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
7767 | return VINF_SUCCESS;
|
---|
7768 | }
|
---|
7769 | #endif
|
---|
7770 |
|
---|
7771 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
7772 | /**
|
---|
7773 | * \#VMEXIT handler for CLGI (SVM_EXIT_CLGI). Conditional \#VMEXIT.
|
---|
7774 | */
|
---|
7775 | HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7776 | {
|
---|
7777 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7778 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_HWVIRT);
|
---|
7779 |
|
---|
7780 | #ifdef VBOX_STRICT
|
---|
7781 | PCSVMVMCB pVmcbTmp = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
7782 | Assert(pVmcbTmp);
|
---|
7783 | Assert(!pVmcbTmp->ctrl.IntCtrl.n.u1VGifEnable);
|
---|
7784 | RT_NOREF(pVmcbTmp);
|
---|
7785 | #endif
|
---|
7786 |
|
---|
7787 | VBOXSTRICTRC rcStrict;
|
---|
7788 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
7789 | if (fSupportsNextRipSave)
|
---|
7790 | {
|
---|
7791 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
7792 | rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr);
|
---|
7793 | }
|
---|
7794 | else
|
---|
7795 | rcStrict = IEMExecOne(pVCpu);
|
---|
7796 |
|
---|
7797 | if (rcStrict == VINF_SUCCESS)
|
---|
7798 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
|
---|
7799 | else if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
7800 | {
|
---|
7801 | rcStrict = VINF_SUCCESS;
|
---|
7802 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
7803 | }
|
---|
7804 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
7805 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
7806 | }
|
---|
7807 |
|
---|
7808 |
|
---|
7809 | /**
|
---|
7810 | * \#VMEXIT handler for STGI (SVM_EXIT_STGI). Conditional \#VMEXIT.
|
---|
7811 | */
|
---|
7812 | HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7813 | {
|
---|
7814 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7815 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_HWVIRT);
|
---|
7816 |
|
---|
7817 | /*
|
---|
7818 | * When VGIF is not used we always intercept STGI instructions. When VGIF is used,
|
---|
7819 | * we only intercept STGI when events are pending for GIF to become 1.
|
---|
7820 | */
|
---|
7821 | PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
7822 | if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
|
---|
7823 | hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_STGI);
|
---|
7824 |
|
---|
7825 | VBOXSTRICTRC rcStrict;
|
---|
7826 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
7827 | if (fSupportsNextRipSave)
|
---|
7828 | {
|
---|
7829 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
7830 | rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr);
|
---|
7831 | }
|
---|
7832 | else
|
---|
7833 | rcStrict = IEMExecOne(pVCpu);
|
---|
7834 |
|
---|
7835 | if (rcStrict == VINF_SUCCESS)
|
---|
7836 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
|
---|
7837 | else if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
7838 | {
|
---|
7839 | rcStrict = VINF_SUCCESS;
|
---|
7840 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
7841 | }
|
---|
7842 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
7843 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
7844 | }
|
---|
7845 |
|
---|
7846 |
|
---|
7847 | /**
|
---|
7848 | * \#VMEXIT handler for VMLOAD (SVM_EXIT_VMLOAD). Conditional \#VMEXIT.
|
---|
7849 | */
|
---|
7850 | HMSVM_EXIT_DECL hmR0SvmExitVmload(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7851 | {
|
---|
7852 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7853 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK
|
---|
7854 | | CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR
|
---|
7855 | | CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS
|
---|
7856 | | CPUMCTX_EXTRN_SYSENTER_MSRS);
|
---|
7857 |
|
---|
7858 | #ifdef VBOX_STRICT
|
---|
7859 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
7860 | Assert(pVmcb);
|
---|
7861 | Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
|
---|
7862 | RT_NOREF(pVmcb);
|
---|
7863 | #endif
|
---|
7864 |
|
---|
7865 | VBOXSTRICTRC rcStrict;
|
---|
7866 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
7867 | if (fSupportsNextRipSave)
|
---|
7868 | {
|
---|
7869 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
7870 | rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr);
|
---|
7871 | }
|
---|
7872 | else
|
---|
7873 | rcStrict = IEMExecOne(pVCpu);
|
---|
7874 |
|
---|
7875 | if (rcStrict == VINF_SUCCESS)
|
---|
7876 | {
|
---|
7877 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS | HM_CHANGED_GUEST_GS
|
---|
7878 | | HM_CHANGED_GUEST_TR | HM_CHANGED_GUEST_LDTR
|
---|
7879 | | HM_CHANGED_GUEST_KERNEL_GS_BASE | HM_CHANGED_GUEST_SYSCALL_MSRS
|
---|
7880 | | HM_CHANGED_GUEST_SYSENTER_MSR_MASK);
|
---|
7881 | }
|
---|
7882 | else if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
7883 | {
|
---|
7884 | rcStrict = VINF_SUCCESS;
|
---|
7885 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
7886 | }
|
---|
7887 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
7888 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
7889 | }
|
---|
7890 |
|
---|
7891 |
|
---|
7892 | /**
|
---|
7893 | * \#VMEXIT handler for VMSAVE (SVM_EXIT_VMSAVE). Conditional \#VMEXIT.
|
---|
7894 | */
|
---|
7895 | HMSVM_EXIT_DECL hmR0SvmExitVmsave(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7896 | {
|
---|
7897 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7898 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
7899 |
|
---|
7900 | #ifdef VBOX_STRICT
|
---|
7901 | PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
|
---|
7902 | Assert(pVmcb);
|
---|
7903 | Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
|
---|
7904 | RT_NOREF(pVmcb);
|
---|
7905 | #endif
|
---|
7906 | VBOXSTRICTRC rcStrict;
|
---|
7907 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
7908 | if (fSupportsNextRipSave)
|
---|
7909 | {
|
---|
7910 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
7911 | rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr);
|
---|
7912 | }
|
---|
7913 | else
|
---|
7914 | rcStrict = IEMExecOne(pVCpu);
|
---|
7915 |
|
---|
7916 | if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
7917 | {
|
---|
7918 | rcStrict = VINF_SUCCESS;
|
---|
7919 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
7920 | }
|
---|
7921 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
7922 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
7923 | }
|
---|
7924 |
|
---|
7925 |
|
---|
7926 | /**
|
---|
7927 | * \#VMEXIT handler for INVLPGA (SVM_EXIT_INVLPGA). Conditional \#VMEXIT.
|
---|
7928 | */
|
---|
7929 | HMSVM_EXIT_DECL hmR0SvmExitInvlpga(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7930 | {
|
---|
7931 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7932 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
7933 |
|
---|
7934 | VBOXSTRICTRC rcStrict;
|
---|
7935 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
7936 | if (fSupportsNextRipSave)
|
---|
7937 | {
|
---|
7938 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
7939 | rcStrict = IEMExecDecodedInvlpga(pVCpu, cbInstr);
|
---|
7940 | }
|
---|
7941 | else
|
---|
7942 | rcStrict = IEMExecOne(pVCpu);
|
---|
7943 |
|
---|
7944 | if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
7945 | {
|
---|
7946 | rcStrict = VINF_SUCCESS;
|
---|
7947 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
7948 | }
|
---|
7949 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
7950 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
7951 | }
|
---|
7952 |
|
---|
7953 |
|
---|
7954 | /**
|
---|
7955 | * \#VMEXIT handler for STGI (SVM_EXIT_VMRUN). Conditional \#VMEXIT.
|
---|
7956 | */
|
---|
7957 | HMSVM_EXIT_DECL hmR0SvmExitVmrun(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7958 | {
|
---|
7959 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7960 | HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
|
---|
7961 |
|
---|
7962 | VBOXSTRICTRC rcStrict;
|
---|
7963 | bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
|
---|
7964 | if (fSupportsNextRipSave)
|
---|
7965 | {
|
---|
7966 | uint8_t const cbInstr = hmR0SvmGetInstrLength(pVCpu);
|
---|
7967 | rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr);
|
---|
7968 | }
|
---|
7969 | else
|
---|
7970 | rcStrict = IEMExecOne(pVCpu);
|
---|
7971 |
|
---|
7972 | if (rcStrict == VINF_SUCCESS)
|
---|
7973 | {
|
---|
7974 | rcStrict = VINF_SVM_VMRUN;
|
---|
7975 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_VMRUN_MASK);
|
---|
7976 | }
|
---|
7977 | else if (rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
7978 | {
|
---|
7979 | rcStrict = VINF_SUCCESS;
|
---|
7980 | ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
|
---|
7981 | }
|
---|
7982 |
|
---|
7983 | HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
|
---|
7984 | return VBOXSTRICTRC_TODO(rcStrict);
|
---|
7985 | }
|
---|
7986 |
|
---|
7987 |
|
---|
7988 | /**
|
---|
7989 | * Nested-guest \#VMEXIT handler for debug exceptions (SVM_EXIT_XCPT_1).
|
---|
7990 | * Unconditional \#VMEXIT.
|
---|
7991 | */
|
---|
7992 | HMSVM_EXIT_DECL hmR0SvmNestedExitXcptDB(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
7993 | {
|
---|
7994 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
7995 | HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
|
---|
7996 |
|
---|
7997 | if (pVCpu->hm.s.Event.fPending)
|
---|
7998 | {
|
---|
7999 | STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
|
---|
8000 | return VINF_EM_RAW_INJECT_TRPM_EVENT;
|
---|
8001 | }
|
---|
8002 |
|
---|
8003 | hmR0SvmSetPendingXcptDB(pVCpu);
|
---|
8004 | return VINF_SUCCESS;
|
---|
8005 | }
|
---|
8006 |
|
---|
8007 |
|
---|
8008 | /**
|
---|
8009 | * Nested-guest \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_XCPT_3).
|
---|
8010 | * Conditional \#VMEXIT.
|
---|
8011 | */
|
---|
8012 | HMSVM_EXIT_DECL hmR0SvmNestedExitXcptBP(PVMCPU pVCpu, PSVMTRANSIENT pSvmTransient)
|
---|
8013 | {
|
---|
8014 | HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
|
---|
8015 | HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
|
---|
8016 |
|
---|
8017 | SVMEVENT Event;
|
---|
8018 | Event.u = 0;
|
---|
8019 | Event.n.u1Valid = 1;
|
---|
8020 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
8021 | Event.n.u8Vector = X86_XCPT_BP;
|
---|
8022 | hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
|
---|
8023 | return VINF_SUCCESS;
|
---|
8024 | }
|
---|
8025 | #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
|
---|
8026 |
|
---|
8027 | /** @} */
|
---|
8028 |
|
---|