VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp@ 90622

Last change on this file since 90622 was 90622, checked in by vboxsync, 3 years ago

VMM/HMSVMR0: Nested SVM: bugref:10080 Attempt to fix debug registers guru meditation with nested-guests.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 320.6 KB
Line 
1/* $Id: HMSVMR0.cpp 90622 2021-08-11 09:05:03Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2013-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/asm-amd64-x86.h>
25#include <iprt/thread.h>
26
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/dbgf.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/gim.h>
34#include <VBox/vmm/apic.h>
35#include "HMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include <VBox/err.h>
38#include "HMSVMR0.h"
39#include "dtrace/VBoxVMM.h"
40
41#ifdef DEBUG_ramshankar
42# define HMSVM_SYNC_FULL_GUEST_STATE
43# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
44# define HMSVM_ALWAYS_TRAP_PF
45# define HMSVM_ALWAYS_TRAP_TASK_SWITCH
46#endif
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52#ifdef VBOX_WITH_STATISTICS
53# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
54 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \
55 if ((u64ExitCode) == SVM_EXIT_NPF) \
56 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
57 else \
58 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
59 } while (0)
60
61# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
62# define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
63 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); \
64 STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitAll); \
65 if ((u64ExitCode) == SVM_EXIT_NPF) \
66 STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitReasonNpf); \
67 else \
68 STAM_COUNTER_INC(&pVCpu->hm.s.paStatNestedExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
69 } while (0)
70# endif
71#else
72# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
73# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
74# define HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
75# endif
76#endif /* !VBOX_WITH_STATISTICS */
77
78/** If we decide to use a function table approach this can be useful to
79 * switch to a "static DECLCALLBACK(int)". */
80#define HMSVM_EXIT_DECL static VBOXSTRICTRC
81
82/**
83 * Subset of the guest-CPU state that is kept by SVM R0 code while executing the
84 * guest using hardware-assisted SVM.
85 *
86 * This excludes state like TSC AUX, GPRs (other than RSP, RAX) which are always
87 * are swapped and restored across the world-switch and also registers like
88 * EFER, PAT MSR etc. which cannot be modified by the guest without causing a
89 * \#VMEXIT.
90 */
91#define HMSVM_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
92 | CPUMCTX_EXTRN_RFLAGS \
93 | CPUMCTX_EXTRN_RAX \
94 | CPUMCTX_EXTRN_RSP \
95 | CPUMCTX_EXTRN_SREG_MASK \
96 | CPUMCTX_EXTRN_CR0 \
97 | CPUMCTX_EXTRN_CR2 \
98 | CPUMCTX_EXTRN_CR3 \
99 | CPUMCTX_EXTRN_TABLE_MASK \
100 | CPUMCTX_EXTRN_DR6 \
101 | CPUMCTX_EXTRN_DR7 \
102 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
103 | CPUMCTX_EXTRN_SYSCALL_MSRS \
104 | CPUMCTX_EXTRN_SYSENTER_MSRS \
105 | CPUMCTX_EXTRN_HWVIRT \
106 | CPUMCTX_EXTRN_HM_SVM_MASK)
107
108/**
109 * Subset of the guest-CPU state that is shared between the guest and host.
110 */
111#define HMSVM_CPUMCTX_SHARED_STATE CPUMCTX_EXTRN_DR_MASK
112
113/** Macro for importing guest state from the VMCB back into CPUMCTX. */
114#define HMSVM_CPUMCTX_IMPORT_STATE(a_pVCpu, a_fWhat) \
115 do { \
116 if ((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fWhat)) \
117 hmR0SvmImportGuestState((a_pVCpu), (a_fWhat)); \
118 } while (0)
119
120/** Assert that the required state bits are fetched. */
121#define HMSVM_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
122 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
123 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
124
125/** Assert that preemption is disabled or covered by thread-context hooks. */
126#define HMSVM_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
127 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
128
129/** Assert that we haven't migrated CPUs when thread-context hooks are not
130 * used. */
131#define HMSVM_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
132 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
133 ("Illegal migration! Entered on CPU %u Current %u\n", \
134 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()));
135
136/** Assert that we're not executing a nested-guest. */
137#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
138# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
139#else
140# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
141#endif
142
143/** Assert that we're executing a nested-guest. */
144#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
145# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
146#else
147# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0)
148#endif
149
150/** Macro for checking and returning from the using function for
151 * \#VMEXIT intercepts that maybe caused during delivering of another
152 * event in the guest. */
153#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
154# define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \
155 do \
156 { \
157 int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \
158 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
159 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
160 else if ( rc == VINF_EM_RESET \
161 && CPUMIsGuestSvmCtrlInterceptSet((a_pVCpu), &(a_pVCpu)->cpum.GstCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \
162 { \
163 HMSVM_CPUMCTX_IMPORT_STATE((a_pVCpu), HMSVM_CPUMCTX_EXTRN_ALL); \
164 return IEMExecSvmVmexit((a_pVCpu), SVM_EXIT_SHUTDOWN, 0, 0); \
165 } \
166 else \
167 return rc; \
168 } while (0)
169#else
170# define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(a_pVCpu, a_pSvmTransient) \
171 do \
172 { \
173 int rc = hmR0SvmCheckExitDueToEventDelivery((a_pVCpu), (a_pSvmTransient)); \
174 if (RT_LIKELY(rc == VINF_SUCCESS)) { /* continue #VMEXIT handling */ } \
175 else if ( rc == VINF_HM_DOUBLE_FAULT) { return VINF_SUCCESS; } \
176 else \
177 return rc; \
178 } while (0)
179#endif
180
181/** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an
182 * instruction that exited. */
183#define HMSVM_CHECK_SINGLE_STEP(a_pVCpu, a_rc) \
184 do { \
185 if ((a_pVCpu)->hm.s.fSingleInstruction && (a_rc) == VINF_SUCCESS) \
186 (a_rc) = VINF_EM_DBG_STEPPED; \
187 } while (0)
188
189/** Validate segment descriptor granularity bit. */
190#ifdef VBOX_STRICT
191# define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) \
192 AssertMsg( !(a_pCtx)->reg.Attr.n.u1Present \
193 || ( (a_pCtx)->reg.Attr.n.u1Granularity \
194 ? ((a_pCtx)->reg.u32Limit & 0xfff) == 0xfff \
195 : (a_pCtx)->reg.u32Limit <= UINT32_C(0xfffff)), \
196 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", (a_pCtx)->reg.u32Limit, \
197 (a_pCtx)->reg.Attr.u, (a_pCtx)->reg.u64Base))
198#else
199# define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) do { } while (0)
200#endif
201
202/**
203 * Exception bitmap mask for all contributory exceptions.
204 *
205 * Page fault is deliberately excluded here as it's conditional as to whether
206 * it's contributory or benign. Page faults are handled separately.
207 */
208#define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
209 | RT_BIT(X86_XCPT_DE))
210
211/**
212 * Mandatory/unconditional guest control intercepts.
213 *
214 * SMIs can and do happen in normal operation. We need not intercept them
215 * while executing the guest (or nested-guest).
216 */
217#define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS ( SVM_CTRL_INTERCEPT_INTR \
218 | SVM_CTRL_INTERCEPT_NMI \
219 | SVM_CTRL_INTERCEPT_INIT \
220 | SVM_CTRL_INTERCEPT_RDPMC \
221 | SVM_CTRL_INTERCEPT_CPUID \
222 | SVM_CTRL_INTERCEPT_RSM \
223 | SVM_CTRL_INTERCEPT_HLT \
224 | SVM_CTRL_INTERCEPT_IOIO_PROT \
225 | SVM_CTRL_INTERCEPT_MSR_PROT \
226 | SVM_CTRL_INTERCEPT_INVLPGA \
227 | SVM_CTRL_INTERCEPT_SHUTDOWN \
228 | SVM_CTRL_INTERCEPT_FERR_FREEZE \
229 | SVM_CTRL_INTERCEPT_VMRUN \
230 | SVM_CTRL_INTERCEPT_SKINIT \
231 | SVM_CTRL_INTERCEPT_WBINVD \
232 | SVM_CTRL_INTERCEPT_MONITOR \
233 | SVM_CTRL_INTERCEPT_MWAIT \
234 | SVM_CTRL_INTERCEPT_CR0_SEL_WRITE \
235 | SVM_CTRL_INTERCEPT_XSETBV)
236
237/** @name VMCB Clean Bits.
238 *
239 * These flags are used for VMCB-state caching. A set VMCB Clean bit indicates
240 * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
241 * memory.
242 *
243 * @{ */
244/** All intercepts vectors, TSC offset, PAUSE filter counter. */
245#define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
246/** I/O permission bitmap, MSR permission bitmap. */
247#define HMSVM_VMCB_CLEAN_IOPM_MSRPM RT_BIT(1)
248/** ASID. */
249#define HMSVM_VMCB_CLEAN_ASID RT_BIT(2)
250/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
251V_INTR_VECTOR. */
252#define HMSVM_VMCB_CLEAN_INT_CTRL RT_BIT(3)
253/** Nested Paging: Nested CR3 (nCR3), PAT. */
254#define HMSVM_VMCB_CLEAN_NP RT_BIT(4)
255/** Control registers (CR0, CR3, CR4, EFER). */
256#define HMSVM_VMCB_CLEAN_CRX_EFER RT_BIT(5)
257/** Debug registers (DR6, DR7). */
258#define HMSVM_VMCB_CLEAN_DRX RT_BIT(6)
259/** GDT, IDT limit and base. */
260#define HMSVM_VMCB_CLEAN_DT RT_BIT(7)
261/** Segment register: CS, SS, DS, ES limit and base. */
262#define HMSVM_VMCB_CLEAN_SEG RT_BIT(8)
263/** CR2.*/
264#define HMSVM_VMCB_CLEAN_CR2 RT_BIT(9)
265/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
266#define HMSVM_VMCB_CLEAN_LBR RT_BIT(10)
267/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
268PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
269#define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11)
270/** Mask of all valid VMCB Clean bits. */
271#define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \
272 | HMSVM_VMCB_CLEAN_IOPM_MSRPM \
273 | HMSVM_VMCB_CLEAN_ASID \
274 | HMSVM_VMCB_CLEAN_INT_CTRL \
275 | HMSVM_VMCB_CLEAN_NP \
276 | HMSVM_VMCB_CLEAN_CRX_EFER \
277 | HMSVM_VMCB_CLEAN_DRX \
278 | HMSVM_VMCB_CLEAN_DT \
279 | HMSVM_VMCB_CLEAN_SEG \
280 | HMSVM_VMCB_CLEAN_CR2 \
281 | HMSVM_VMCB_CLEAN_LBR \
282 | HMSVM_VMCB_CLEAN_AVIC)
283/** @} */
284
285/** @name SVM transient.
286 *
287 * A state structure for holding miscellaneous information across AMD-V
288 * VMRUN/\#VMEXIT operation, restored after the transition.
289 *
290 * @{ */
291typedef struct SVMTRANSIENT
292{
293 /** The host's rflags/eflags. */
294 RTCCUINTREG fEFlags;
295 /** The \#VMEXIT exit code (the EXITCODE field in the VMCB). */
296 uint64_t u64ExitCode;
297
298 /** The guest's TPR value used for TPR shadowing. */
299 uint8_t u8GuestTpr;
300 /** Alignment. */
301 uint8_t abAlignment0[7];
302
303 /** Pointer to the currently executing VMCB. */
304 PSVMVMCB pVmcb;
305
306 /** Whether we are currently executing a nested-guest. */
307 bool fIsNestedGuest;
308 /** Whether the guest debug state was active at the time of \#VMEXIT. */
309 bool fWasGuestDebugStateActive;
310 /** Whether the hyper debug state was active at the time of \#VMEXIT. */
311 bool fWasHyperDebugStateActive;
312 /** Whether the TSC offset mode needs to be updated. */
313 bool fUpdateTscOffsetting;
314 /** Whether the TSC_AUX MSR needs restoring on \#VMEXIT. */
315 bool fRestoreTscAuxMsr;
316 /** Whether the \#VMEXIT was caused by a page-fault during delivery of a
317 * contributary exception or a page-fault. */
318 bool fVectoringDoublePF;
319 /** Whether the \#VMEXIT was caused by a page-fault during delivery of an
320 * external interrupt or NMI. */
321 bool fVectoringPF;
322 /** Padding. */
323 bool afPadding0;
324} SVMTRANSIENT;
325/** Pointer to SVM transient state. */
326typedef SVMTRANSIENT *PSVMTRANSIENT;
327/** Pointer to a const SVM transient state. */
328typedef const SVMTRANSIENT *PCSVMTRANSIENT;
329
330AssertCompileSizeAlignment(SVMTRANSIENT, sizeof(uint64_t));
331AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t));
332AssertCompileMemberAlignment(SVMTRANSIENT, pVmcb, sizeof(uint64_t));
333/** @} */
334
335/**
336 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
337 */
338typedef enum SVMMSREXITREAD
339{
340 /** Reading this MSR causes a \#VMEXIT. */
341 SVMMSREXIT_INTERCEPT_READ = 0xb,
342 /** Reading this MSR does not cause a \#VMEXIT. */
343 SVMMSREXIT_PASSTHRU_READ
344} SVMMSREXITREAD;
345
346/**
347 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
348 */
349typedef enum SVMMSREXITWRITE
350{
351 /** Writing to this MSR causes a \#VMEXIT. */
352 SVMMSREXIT_INTERCEPT_WRITE = 0xd,
353 /** Writing to this MSR does not cause a \#VMEXIT. */
354 SVMMSREXIT_PASSTHRU_WRITE
355} SVMMSREXITWRITE;
356
357/**
358 * SVM \#VMEXIT handler.
359 *
360 * @returns Strict VBox status code.
361 * @param pVCpu The cross context virtual CPU structure.
362 * @param pSvmTransient Pointer to the SVM-transient structure.
363 */
364typedef VBOXSTRICTRC FNSVMEXITHANDLER(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
365
366
367/*********************************************************************************************************************************
368* Internal Functions *
369*********************************************************************************************************************************/
370static void hmR0SvmPendingEventToTrpmTrap(PVMCPUCC pVCpu);
371static void hmR0SvmLeave(PVMCPUCC pVCpu, bool fImportState);
372
373
374/** @name \#VMEXIT handlers.
375 * @{
376 */
377static FNSVMEXITHANDLER hmR0SvmExitIntr;
378static FNSVMEXITHANDLER hmR0SvmExitWbinvd;
379static FNSVMEXITHANDLER hmR0SvmExitInvd;
380static FNSVMEXITHANDLER hmR0SvmExitCpuid;
381static FNSVMEXITHANDLER hmR0SvmExitRdtsc;
382static FNSVMEXITHANDLER hmR0SvmExitRdtscp;
383static FNSVMEXITHANDLER hmR0SvmExitRdpmc;
384static FNSVMEXITHANDLER hmR0SvmExitInvlpg;
385static FNSVMEXITHANDLER hmR0SvmExitHlt;
386static FNSVMEXITHANDLER hmR0SvmExitMonitor;
387static FNSVMEXITHANDLER hmR0SvmExitMwait;
388static FNSVMEXITHANDLER hmR0SvmExitShutdown;
389static FNSVMEXITHANDLER hmR0SvmExitUnexpected;
390static FNSVMEXITHANDLER hmR0SvmExitReadCRx;
391static FNSVMEXITHANDLER hmR0SvmExitWriteCRx;
392static FNSVMEXITHANDLER hmR0SvmExitMsr;
393static FNSVMEXITHANDLER hmR0SvmExitReadDRx;
394static FNSVMEXITHANDLER hmR0SvmExitWriteDRx;
395static FNSVMEXITHANDLER hmR0SvmExitXsetbv;
396static FNSVMEXITHANDLER hmR0SvmExitIOInstr;
397static FNSVMEXITHANDLER hmR0SvmExitNestedPF;
398static FNSVMEXITHANDLER hmR0SvmExitVIntr;
399static FNSVMEXITHANDLER hmR0SvmExitTaskSwitch;
400static FNSVMEXITHANDLER hmR0SvmExitVmmCall;
401static FNSVMEXITHANDLER hmR0SvmExitPause;
402static FNSVMEXITHANDLER hmR0SvmExitFerrFreeze;
403static FNSVMEXITHANDLER hmR0SvmExitIret;
404static FNSVMEXITHANDLER hmR0SvmExitXcptPF;
405static FNSVMEXITHANDLER hmR0SvmExitXcptUD;
406static FNSVMEXITHANDLER hmR0SvmExitXcptMF;
407static FNSVMEXITHANDLER hmR0SvmExitXcptDB;
408static FNSVMEXITHANDLER hmR0SvmExitXcptAC;
409static FNSVMEXITHANDLER hmR0SvmExitXcptBP;
410static FNSVMEXITHANDLER hmR0SvmExitXcptGP;
411#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
412static FNSVMEXITHANDLER hmR0SvmExitXcptGeneric;
413#endif
414#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
415static FNSVMEXITHANDLER hmR0SvmExitClgi;
416static FNSVMEXITHANDLER hmR0SvmExitStgi;
417static FNSVMEXITHANDLER hmR0SvmExitVmload;
418static FNSVMEXITHANDLER hmR0SvmExitVmsave;
419static FNSVMEXITHANDLER hmR0SvmExitInvlpga;
420static FNSVMEXITHANDLER hmR0SvmExitVmrun;
421static FNSVMEXITHANDLER hmR0SvmNestedExitXcptDB;
422static FNSVMEXITHANDLER hmR0SvmNestedExitXcptBP;
423#endif
424/** @} */
425
426static VBOXSTRICTRC hmR0SvmHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
427#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
428static VBOXSTRICTRC hmR0SvmHandleExitNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
429#endif
430
431
432/*********************************************************************************************************************************
433* Global Variables *
434*********************************************************************************************************************************/
435/** Ring-0 memory object for the IO bitmap. */
436static RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
437/** Physical address of the IO bitmap. */
438static RTHCPHYS g_HCPhysIOBitmap;
439/** Pointer to the IO bitmap. */
440static R0PTRTYPE(void *) g_pvIOBitmap;
441
442#ifdef VBOX_STRICT
443# define HMSVM_LOG_RBP_RSP RT_BIT_32(0)
444# define HMSVM_LOG_CR_REGS RT_BIT_32(1)
445# define HMSVM_LOG_CS RT_BIT_32(2)
446# define HMSVM_LOG_SS RT_BIT_32(3)
447# define HMSVM_LOG_FS RT_BIT_32(4)
448# define HMSVM_LOG_GS RT_BIT_32(5)
449# define HMSVM_LOG_LBR RT_BIT_32(6)
450# define HMSVM_LOG_ALL ( HMSVM_LOG_RBP_RSP \
451 | HMSVM_LOG_CR_REGS \
452 | HMSVM_LOG_CS \
453 | HMSVM_LOG_SS \
454 | HMSVM_LOG_FS \
455 | HMSVM_LOG_GS \
456 | HMSVM_LOG_LBR)
457
458/**
459 * Dumps virtual CPU state and additional info. to the logger for diagnostics.
460 *
461 * @param pVCpu The cross context virtual CPU structure.
462 * @param pVmcb Pointer to the VM control block.
463 * @param pszPrefix Log prefix.
464 * @param fFlags Log flags, see HMSVM_LOG_XXX.
465 * @param uVerbose The verbosity level, currently unused.
466 */
467static void hmR0SvmLogState(PVMCPUCC pVCpu, PCSVMVMCB pVmcb, const char *pszPrefix, uint32_t fFlags, uint8_t uVerbose)
468{
469 RT_NOREF2(pVCpu, uVerbose);
470 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
471
472 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
473 Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX64\n", pszPrefix, pCtx->cs.Sel, pCtx->rip, pCtx->rflags.u));
474
475 if (fFlags & HMSVM_LOG_RBP_RSP)
476 {
477 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RBP);
478 Log4(("%s: rsp=%#RX64 rbp=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp));
479 }
480
481 if (fFlags & HMSVM_LOG_CR_REGS)
482 {
483 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4);
484 Log4(("%s: cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", pszPrefix, pCtx->cr0, pCtx->cr3, pCtx->cr4));
485 }
486
487 if (fFlags & HMSVM_LOG_CS)
488 {
489 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
490 Log4(("%s: cs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base,
491 pCtx->cs.u32Limit, pCtx->cs.Attr.u));
492 }
493 if (fFlags & HMSVM_LOG_SS)
494 {
495 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
496 Log4(("%s: ss={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base,
497 pCtx->ss.u32Limit, pCtx->ss.Attr.u));
498 }
499 if (fFlags & HMSVM_LOG_FS)
500 {
501 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
502 Log4(("%s: fs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base,
503 pCtx->fs.u32Limit, pCtx->fs.Attr.u));
504 }
505 if (fFlags & HMSVM_LOG_GS)
506 {
507 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
508 Log4(("%s: gs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base,
509 pCtx->gs.u32Limit, pCtx->gs.Attr.u));
510 }
511
512 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
513 if (fFlags & HMSVM_LOG_LBR)
514 {
515 Log4(("%s: br_from=%#RX64 br_to=%#RX64 lastxcpt_from=%#RX64 lastxcpt_to=%#RX64\n", pszPrefix, pVmcbGuest->u64BR_FROM,
516 pVmcbGuest->u64BR_TO, pVmcbGuest->u64LASTEXCPFROM, pVmcbGuest->u64LASTEXCPTO));
517 }
518 NOREF(pszPrefix); NOREF(pVmcbGuest); NOREF(pCtx);
519}
520#endif /* VBOX_STRICT */
521
522
523/**
524 * Sets up and activates AMD-V on the current CPU.
525 *
526 * @returns VBox status code.
527 * @param pHostCpu The HM physical-CPU structure.
528 * @param pVM The cross context VM structure. Can be
529 * NULL after a resume!
530 * @param pvCpuPage Pointer to the global CPU page.
531 * @param HCPhysCpuPage Physical address of the global CPU page.
532 * @param fEnabledByHost Whether the host OS has already initialized AMD-V.
533 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs (currently
534 * unused).
535 */
536VMMR0DECL(int) SVMR0EnableCpu(PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
537 PCSUPHWVIRTMSRS pHwvirtMsrs)
538{
539 Assert(!fEnabledByHost);
540 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
541 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
542 Assert(pvCpuPage); NOREF(pvCpuPage);
543 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
544
545 RT_NOREF2(fEnabledByHost, pHwvirtMsrs);
546
547 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */
548 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
549
550 /*
551 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
552 */
553 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
554 if (u64HostEfer & MSR_K6_EFER_SVME)
555 {
556 /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
557 if ( pVM
558 && pVM->hm.s.svm.fIgnoreInUseError)
559 pHostCpu->fIgnoreAMDVInUseError = true;
560
561 if (!pHostCpu->fIgnoreAMDVInUseError)
562 {
563 ASMSetFlags(fEFlags);
564 return VERR_SVM_IN_USE;
565 }
566 }
567
568 /* Turn on AMD-V in the EFER MSR. */
569 ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
570
571 /* Write the physical page address where the CPU will store the host state while executing the VM. */
572 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
573
574 /* Restore interrupts. */
575 ASMSetFlags(fEFlags);
576
577 /*
578 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all
579 * non-zero ASIDs when enabling SVM. AMD doesn't have an SVM instruction to flush all
580 * ASIDs (flushing is done upon VMRUN). Therefore, flag that we need to flush the TLB
581 * entirely with before executing any guest code.
582 */
583 pHostCpu->fFlushAsidBeforeUse = true;
584
585 /*
586 * Ensure each VCPU scheduled on this CPU gets a new ASID on resume. See @bugref{6255}.
587 */
588 ++pHostCpu->cTlbFlushes;
589
590 return VINF_SUCCESS;
591}
592
593
594/**
595 * Deactivates AMD-V on the current CPU.
596 *
597 * @returns VBox status code.
598 * @param pHostCpu The HM physical-CPU structure.
599 * @param pvCpuPage Pointer to the global CPU page.
600 * @param HCPhysCpuPage Physical address of the global CPU page.
601 */
602VMMR0DECL(int) SVMR0DisableCpu(PHMPHYSCPU pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
603{
604 RT_NOREF1(pHostCpu);
605 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
606 AssertReturn( HCPhysCpuPage
607 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
608 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
609
610 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
611 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
612
613 /* Turn off AMD-V in the EFER MSR. */
614 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
615 ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
616
617 /* Invalidate host state physical address. */
618 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
619
620 /* Restore interrupts. */
621 ASMSetFlags(fEFlags);
622
623 return VINF_SUCCESS;
624}
625
626
627/**
628 * Does global AMD-V initialization (called during module initialization).
629 *
630 * @returns VBox status code.
631 */
632VMMR0DECL(int) SVMR0GlobalInit(void)
633{
634 /*
635 * Allocate 12 KB (3 pages) for the IO bitmap. Since this is non-optional and we always
636 * intercept all IO accesses, it's done once globally here instead of per-VM.
637 */
638 Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ);
639 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */);
640 if (RT_FAILURE(rc))
641 return rc;
642
643 g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
644 g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
645
646 /* Set all bits to intercept all IO accesses. */
647 ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
648
649 return VINF_SUCCESS;
650}
651
652
653/**
654 * Does global AMD-V termination (called during module termination).
655 */
656VMMR0DECL(void) SVMR0GlobalTerm(void)
657{
658 if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
659 {
660 RTR0MemObjFree(g_hMemObjIOBitmap, true /* fFreeMappings */);
661 g_pvIOBitmap = NULL;
662 g_HCPhysIOBitmap = 0;
663 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
664 }
665}
666
667
668/**
669 * Frees any allocated per-VCPU structures for a VM.
670 *
671 * @param pVM The cross context VM structure.
672 */
673DECLINLINE(void) hmR0SvmFreeStructs(PVMCC pVM)
674{
675 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
676 {
677 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
678 AssertPtr(pVCpu);
679
680 if (pVCpu->hmr0.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
681 {
682 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjVmcbHost, false);
683 pVCpu->hmr0.s.svm.HCPhysVmcbHost = 0;
684 pVCpu->hmr0.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
685 }
686
687 if (pVCpu->hmr0.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ)
688 {
689 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjVmcb, false);
690 pVCpu->hmr0.s.svm.pVmcb = NULL;
691 pVCpu->hmr0.s.svm.HCPhysVmcb = 0;
692 pVCpu->hmr0.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
693 }
694
695 if (pVCpu->hmr0.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
696 {
697 RTR0MemObjFree(pVCpu->hmr0.s.svm.hMemObjMsrBitmap, false);
698 pVCpu->hmr0.s.svm.pvMsrBitmap = NULL;
699 pVCpu->hmr0.s.svm.HCPhysMsrBitmap = 0;
700 pVCpu->hmr0.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
701 }
702 }
703}
704
705
706/**
707 * Sets pfnVMRun to the best suited variant.
708 *
709 * This must be called whenever anything changes relative to the SVMR0VMRun
710 * variant selection:
711 * - pVCpu->hm.s.fLoadSaveGuestXcr0
712 * - CPUMCTX_WSF_IBPB_ENTRY in pVCpu->cpum.GstCtx.fWorldSwitcher
713 * - CPUMCTX_WSF_IBPB_EXIT in pVCpu->cpum.GstCtx.fWorldSwitcher
714 * - Perhaps: CPUMIsGuestFPUStateActive() (windows only)
715 * - Perhaps: CPUMCTX.fXStateMask (windows only)
716 *
717 * We currently ASSUME that neither CPUMCTX_WSF_IBPB_ENTRY nor
718 * CPUMCTX_WSF_IBPB_EXIT cannot be changed at runtime.
719 */
720static void hmR0SvmUpdateVmRunFunction(PVMCPUCC pVCpu)
721{
722 static const struct CLANGWORKAROUND { PFNHMSVMVMRUN pfn; } s_aHmR0SvmVmRunFunctions[] =
723 {
724 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit },
725 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit },
726 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit },
727 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit },
728 { hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit },
729 { hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit },
730 { hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit },
731 { hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit },
732 };
733 uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0 ? 1 : 0)
734 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ? 2 : 0)
735 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT ? 4 : 0);
736 PFNHMSVMVMRUN const pfnVMRun = s_aHmR0SvmVmRunFunctions[idx].pfn;
737 if (pVCpu->hmr0.s.svm.pfnVMRun != pfnVMRun)
738 pVCpu->hmr0.s.svm.pfnVMRun = pfnVMRun;
739}
740
741
742/**
743 * Selector FNHMSVMVMRUN implementation.
744 */
745static DECLCALLBACK(int) hmR0SvmVMRunSelector(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB)
746{
747 hmR0SvmUpdateVmRunFunction(pVCpu);
748 return pVCpu->hmr0.s.svm.pfnVMRun(pVM, pVCpu, HCPhysVMCB);
749}
750
751
752/**
753 * Does per-VM AMD-V initialization.
754 *
755 * @returns VBox status code.
756 * @param pVM The cross context VM structure.
757 */
758VMMR0DECL(int) SVMR0InitVM(PVMCC pVM)
759{
760 int rc = VERR_INTERNAL_ERROR_5;
761
762 /*
763 * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
764 */
765 uint32_t u32Family;
766 uint32_t u32Model;
767 uint32_t u32Stepping;
768 if (HMIsSubjectToSvmErratum170(&u32Family, &u32Model, &u32Stepping))
769 {
770 Log4Func(("AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
771 pVM->hmr0.s.svm.fAlwaysFlushTLB = true;
772 }
773
774 /*
775 * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
776 */
777 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
778 {
779 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
780 pVCpu->hmr0.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
781 pVCpu->hmr0.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
782 pVCpu->hmr0.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
783 }
784
785 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
786 {
787 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
788
789 /*
790 * Initialize the hardware-assisted SVM guest-execution handler.
791 * We now use a single handler for both 32-bit and 64-bit guests, see @bugref{6208#c73}.
792 */
793 pVCpu->hmr0.s.svm.pfnVMRun = hmR0SvmVMRunSelector;
794
795 /*
796 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
797 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
798 */
799/** @todo Does this need to be below 4G? */
800 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjVmcbHost, SVM_VMCB_PAGES << PAGE_SHIFT, false /* fExecutable */);
801 if (RT_FAILURE(rc))
802 goto failure_cleanup;
803
804 void *pvVmcbHost = RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjVmcbHost);
805 pVCpu->hmr0.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjVmcbHost, 0 /* iPage */);
806 Assert(pVCpu->hmr0.s.svm.HCPhysVmcbHost < _4G);
807 ASMMemZeroPage(pvVmcbHost);
808
809 /*
810 * Allocate one page for the guest-state VMCB.
811 */
812/** @todo Does this need to be below 4G? */
813 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjVmcb, SVM_VMCB_PAGES << PAGE_SHIFT, false /* fExecutable */);
814 if (RT_FAILURE(rc))
815 goto failure_cleanup;
816
817 pVCpu->hmr0.s.svm.pVmcb = (PSVMVMCB)RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjVmcb);
818 pVCpu->hmr0.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjVmcb, 0 /* iPage */);
819 Assert(pVCpu->hmr0.s.svm.HCPhysVmcb < _4G);
820 ASMMemZeroPage(pVCpu->hmr0.s.svm.pVmcb);
821
822 /*
823 * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
824 * SVM to not require one.
825 */
826/** @todo Does this need to be below 4G? */
827 rc = RTR0MemObjAllocCont(&pVCpu->hmr0.s.svm.hMemObjMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT,
828 false /* fExecutable */);
829 if (RT_FAILURE(rc))
830 goto failure_cleanup;
831
832 pVCpu->hmr0.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hmr0.s.svm.hMemObjMsrBitmap);
833 pVCpu->hmr0.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hmr0.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
834 /* Set all bits to intercept all MSR accesses (changed later on). */
835 ASMMemFill32(pVCpu->hmr0.s.svm.pvMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
836 }
837
838 return VINF_SUCCESS;
839
840failure_cleanup:
841 hmR0SvmFreeStructs(pVM);
842 return rc;
843}
844
845
846/**
847 * Does per-VM AMD-V termination.
848 *
849 * @returns VBox status code.
850 * @param pVM The cross context VM structure.
851 */
852VMMR0DECL(int) SVMR0TermVM(PVMCC pVM)
853{
854 hmR0SvmFreeStructs(pVM);
855 return VINF_SUCCESS;
856}
857
858
859/**
860 * Returns whether the VMCB Clean Bits feature is supported.
861 *
862 * @returns @c true if supported, @c false otherwise.
863 * @param pVCpu The cross context virtual CPU structure.
864 * @param fIsNestedGuest Whether we are currently executing the nested-guest.
865 */
866DECL_FORCE_INLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPUCC pVCpu, bool fIsNestedGuest)
867{
868 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
869 bool const fHostVmcbCleanBits = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN);
870 if (!fIsNestedGuest)
871 return fHostVmcbCleanBits;
872 return fHostVmcbCleanBits && pVM->cpum.ro.GuestFeatures.fSvmVmcbClean;
873}
874
875
876/**
877 * Returns whether the decode assists feature is supported.
878 *
879 * @returns @c true if supported, @c false otherwise.
880 * @param pVCpu The cross context virtual CPU structure.
881 */
882DECLINLINE(bool) hmR0SvmSupportsDecodeAssists(PVMCPUCC pVCpu)
883{
884 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
885#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
886 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
887 return (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS)
888 && pVM->cpum.ro.GuestFeatures.fSvmDecodeAssists;
889#endif
890 return RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS);
891}
892
893
894/**
895 * Returns whether the NRIP_SAVE feature is supported.
896 *
897 * @returns @c true if supported, @c false otherwise.
898 * @param pVCpu The cross context virtual CPU structure.
899 */
900DECLINLINE(bool) hmR0SvmSupportsNextRipSave(PVMCPUCC pVCpu)
901{
902 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
903#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
904 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
905 return (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
906 && pVM->cpum.ro.GuestFeatures.fSvmNextRipSave;
907#endif
908 return RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
909}
910
911
912/**
913 * Sets the permission bits for the specified MSR in the MSRPM bitmap.
914 *
915 * @param pVCpu The cross context virtual CPU structure.
916 * @param pbMsrBitmap Pointer to the MSR bitmap.
917 * @param idMsr The MSR for which the permissions are being set.
918 * @param enmRead MSR read permissions.
919 * @param enmWrite MSR write permissions.
920 *
921 * @remarks This function does -not- clear the VMCB clean bits for MSRPM. The
922 * caller needs to take care of this.
923 */
924static void hmR0SvmSetMsrPermission(PVMCPUCC pVCpu, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead,
925 SVMMSREXITWRITE enmWrite)
926{
927 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
928 uint16_t offMsrpm;
929 uint8_t uMsrpmBit;
930 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
931 AssertRC(rc);
932
933 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
934 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
935
936 pbMsrBitmap += offMsrpm;
937 if (enmRead == SVMMSREXIT_INTERCEPT_READ)
938 *pbMsrBitmap |= RT_BIT(uMsrpmBit);
939 else
940 {
941 if (!fInNestedGuestMode)
942 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit);
943#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
944 else
945 {
946 /* Only clear the bit if the nested-guest is also not intercepting the MSR read.*/
947 uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap);
948 pbNstGstMsrBitmap += offMsrpm;
949 if (!(*pbNstGstMsrBitmap & RT_BIT(uMsrpmBit)))
950 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit);
951 else
952 Assert(*pbMsrBitmap & RT_BIT(uMsrpmBit));
953 }
954#endif
955 }
956
957 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
958 *pbMsrBitmap |= RT_BIT(uMsrpmBit + 1);
959 else
960 {
961 if (!fInNestedGuestMode)
962 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1);
963#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
964 else
965 {
966 /* Only clear the bit if the nested-guest is also not intercepting the MSR write.*/
967 uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap);
968 pbNstGstMsrBitmap += offMsrpm;
969 if (!(*pbNstGstMsrBitmap & RT_BIT(uMsrpmBit + 1)))
970 *pbMsrBitmap &= ~RT_BIT(uMsrpmBit + 1);
971 else
972 Assert(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1));
973 }
974#endif
975 }
976}
977
978
979/**
980 * Sets up AMD-V for the specified VM.
981 * This function is only called once per-VM during initalization.
982 *
983 * @returns VBox status code.
984 * @param pVM The cross context VM structure.
985 */
986VMMR0DECL(int) SVMR0SetupVM(PVMCC pVM)
987{
988 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
989 AssertReturn(pVM, VERR_INVALID_PARAMETER);
990
991 /*
992 * Validate and copy over some parameters.
993 */
994 AssertReturn(pVM->hm.s.svm.fSupported, VERR_INCOMPATIBLE_CONFIG);
995 bool const fNestedPaging = pVM->hm.s.fNestedPagingCfg;
996 AssertReturn(!fNestedPaging || (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING), VERR_INCOMPATIBLE_CONFIG);
997 pVM->hmr0.s.fNestedPaging = fNestedPaging;
998 pVM->hmr0.s.fAllow64BitGuests = pVM->hm.s.fAllow64BitGuestsCfg;
999
1000 /*
1001 * Determin some configuration parameters.
1002 */
1003 bool const fPauseFilter = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER);
1004 bool const fPauseFilterThreshold = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_PAUSE_FILTER_THRESHOLD);
1005 bool const fUsePauseFilter = fPauseFilter && pVM->hm.s.svm.cPauseFilter;
1006
1007 bool const fLbrVirt = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_LBR_VIRT);
1008 bool const fUseLbrVirt = fLbrVirt && pVM->hm.s.svm.fLbrVirt; /** @todo IEM implementation etc. */
1009
1010#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1011 bool const fVirtVmsaveVmload = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD);
1012 bool const fUseVirtVmsaveVmload = fVirtVmsaveVmload && pVM->hm.s.svm.fVirtVmsaveVmload && fNestedPaging;
1013
1014 bool const fVGif = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF);
1015 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
1016#endif
1017
1018 PVMCPUCC pVCpu0 = VMCC_GET_CPU_0(pVM);
1019 PSVMVMCB pVmcb0 = pVCpu0->hmr0.s.svm.pVmcb;
1020 AssertMsgReturn(RT_VALID_PTR(pVmcb0), ("Invalid pVmcb (%p) for vcpu[0]\n", pVmcb0), VERR_SVM_INVALID_PVMCB);
1021 PSVMVMCBCTRL pVmcbCtrl0 = &pVmcb0->ctrl;
1022
1023 /* Always trap #AC for reasons of security. */
1024 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_AC);
1025
1026 /* Always trap #DB for reasons of security. */
1027 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_DB);
1028
1029 /* Trap exceptions unconditionally (debug purposes). */
1030#ifdef HMSVM_ALWAYS_TRAP_PF
1031 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_PF);
1032#endif
1033#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
1034 /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
1035 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT_32(X86_XCPT_BP)
1036 | RT_BIT_32(X86_XCPT_DE)
1037 | RT_BIT_32(X86_XCPT_NM)
1038 | RT_BIT_32(X86_XCPT_UD)
1039 | RT_BIT_32(X86_XCPT_NP)
1040 | RT_BIT_32(X86_XCPT_SS)
1041 | RT_BIT_32(X86_XCPT_GP)
1042 | RT_BIT_32(X86_XCPT_PF)
1043 | RT_BIT_32(X86_XCPT_MF)
1044 ;
1045#endif
1046
1047 /* Apply the exceptions intercepts needed by the GIM provider. */
1048 if (pVCpu0->hm.s.fGIMTrapXcptUD || pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
1049 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
1050
1051 /* The mesa 3d driver hack needs #GP. */
1052 if (pVCpu0->hm.s.fTrapXcptGpForLovelyMesaDrv)
1053 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_GP);
1054
1055 /* Set up unconditional intercepts and conditions. */
1056 pVmcbCtrl0->u64InterceptCtrl = HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS
1057 | SVM_CTRL_INTERCEPT_VMMCALL
1058 | SVM_CTRL_INTERCEPT_VMSAVE
1059 | SVM_CTRL_INTERCEPT_VMLOAD
1060 | SVM_CTRL_INTERCEPT_CLGI
1061 | SVM_CTRL_INTERCEPT_STGI;
1062
1063#ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
1064 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH;
1065#endif
1066
1067#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1068 if (pVCpu0->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvm)
1069 {
1070 /* Virtualized VMSAVE/VMLOAD. */
1071 if (fUseVirtVmsaveVmload)
1072 {
1073 pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload = 1;
1074 pVmcbCtrl0->u64InterceptCtrl &= ~( SVM_CTRL_INTERCEPT_VMSAVE
1075 | SVM_CTRL_INTERCEPT_VMLOAD);
1076 }
1077 else
1078 Assert(!pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload);
1079
1080 /* Virtual GIF. */
1081 if (fUseVGif)
1082 {
1083 pVmcbCtrl0->IntCtrl.n.u1VGifEnable = 1;
1084 pVmcbCtrl0->u64InterceptCtrl &= ~( SVM_CTRL_INTERCEPT_CLGI
1085 | SVM_CTRL_INTERCEPT_STGI);
1086 }
1087 else
1088 Assert(!pVmcbCtrl0->IntCtrl.n.u1VGifEnable);
1089 }
1090 else
1091#endif
1092 {
1093 Assert(!pVCpu0->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvm);
1094 Assert(!pVmcbCtrl0->LbrVirt.n.u1VirtVmsaveVmload);
1095 Assert(!pVmcbCtrl0->IntCtrl.n.u1VGifEnable);
1096 }
1097
1098 /* CR4 writes must always be intercepted for tracking PGM mode changes. */
1099 pVmcbCtrl0->u16InterceptWrCRx = RT_BIT(4);
1100
1101 /* Intercept all DRx reads and writes by default. Changed later on. */
1102 pVmcbCtrl0->u16InterceptRdDRx = 0xffff;
1103 pVmcbCtrl0->u16InterceptWrDRx = 0xffff;
1104
1105 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
1106 pVmcbCtrl0->IntCtrl.n.u1VIntrMasking = 1;
1107
1108 /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts
1109 and we currently deliver both PIC and APIC interrupts alike, see hmR0SvmEvaluatePendingEvent() */
1110 pVmcbCtrl0->IntCtrl.n.u1IgnoreTPR = 1;
1111
1112 /* Set the IO permission bitmap physical addresses. */
1113 pVmcbCtrl0->u64IOPMPhysAddr = g_HCPhysIOBitmap;
1114
1115 /* LBR virtualization. */
1116 pVmcbCtrl0->LbrVirt.n.u1LbrVirt = fUseLbrVirt;
1117
1118 /* The host ASID MBZ, for the guest start with 1. */
1119 pVmcbCtrl0->TLBCtrl.n.u32ASID = 1;
1120
1121 /* Setup Nested Paging. This doesn't change throughout the execution time of the VM. */
1122 pVmcbCtrl0->NestedPagingCtrl.n.u1NestedPaging = fNestedPaging;
1123
1124 /* Without Nested Paging, we need additionally intercepts. */
1125 if (!fNestedPaging)
1126 {
1127 /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
1128 pVmcbCtrl0->u16InterceptRdCRx |= RT_BIT(3);
1129 pVmcbCtrl0->u16InterceptWrCRx |= RT_BIT(3);
1130
1131 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
1132 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG
1133 | SVM_CTRL_INTERCEPT_TASK_SWITCH;
1134
1135 /* Page faults must be intercepted to implement shadow paging. */
1136 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_PF);
1137 }
1138
1139 /* Setup Pause Filter for guest pause-loop (spinlock) exiting. */
1140 if (fUsePauseFilter)
1141 {
1142 Assert(pVM->hm.s.svm.cPauseFilter > 0);
1143 pVmcbCtrl0->u16PauseFilterCount = pVM->hm.s.svm.cPauseFilter;
1144 if (fPauseFilterThreshold)
1145 pVmcbCtrl0->u16PauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
1146 pVmcbCtrl0->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_PAUSE;
1147 }
1148
1149 /*
1150 * Setup the MSR permission bitmap.
1151 * The following MSRs are saved/restored automatically during the world-switch.
1152 * Don't intercept guest read/write accesses to these MSRs.
1153 */
1154 uint8_t *pbMsrBitmap0 = (uint8_t *)pVCpu0->hmr0.s.svm.pvMsrBitmap;
1155 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1156 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1157 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1158 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1159 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1160 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1161 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1162 if (!pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
1163 {
1164 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1165 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1166 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1167 }
1168 else
1169 {
1170 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1171 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1172 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
1173 }
1174 pVmcbCtrl0->u64MSRPMPhysAddr = pVCpu0->hmr0.s.svm.HCPhysMsrBitmap;
1175
1176 /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
1177 Assert(pVmcbCtrl0->u32VmcbCleanBits == 0);
1178
1179 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
1180 {
1181 PVMCPUCC pVCpuCur = VMCC_GET_CPU(pVM, idCpu);
1182 PSVMVMCB pVmcbCur = pVCpuCur->hmr0.s.svm.pVmcb;
1183 AssertMsgReturn(RT_VALID_PTR(pVmcbCur), ("Invalid pVmcb (%p) for vcpu[%u]\n", pVmcbCur, idCpu), VERR_SVM_INVALID_PVMCB);
1184 PSVMVMCBCTRL pVmcbCtrlCur = &pVmcbCur->ctrl;
1185
1186 /* Copy the VMCB control area. */
1187 memcpy(pVmcbCtrlCur, pVmcbCtrl0, sizeof(*pVmcbCtrlCur));
1188
1189 /* Copy the MSR bitmap and setup the VCPU-specific host physical address. */
1190 uint8_t *pbMsrBitmapCur = (uint8_t *)pVCpuCur->hmr0.s.svm.pvMsrBitmap;
1191 memcpy(pbMsrBitmapCur, pbMsrBitmap0, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
1192 pVmcbCtrlCur->u64MSRPMPhysAddr = pVCpuCur->hmr0.s.svm.HCPhysMsrBitmap;
1193
1194 /* Initially all VMCB clean bits MBZ indicating that everything should be loaded from the VMCB in memory. */
1195 Assert(pVmcbCtrlCur->u32VmcbCleanBits == 0);
1196
1197 /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs initially. */
1198 Assert(pVCpuCur->hm.s.fGIMTrapXcptUD == pVCpu0->hm.s.fGIMTrapXcptUD);
1199 }
1200
1201#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1202 LogRel(("HM: fUsePauseFilter=%RTbool fUseLbrVirt=%RTbool fUseVGif=%RTbool fUseVirtVmsaveVmload=%RTbool\n", fUsePauseFilter,
1203 fUseLbrVirt, fUseVGif, fUseVirtVmsaveVmload));
1204#else
1205 LogRel(("HM: fUsePauseFilter=%RTbool fUseLbrVirt=%RTbool\n", fUsePauseFilter, fUseLbrVirt));
1206#endif
1207 return VINF_SUCCESS;
1208}
1209
1210
1211/**
1212 * Gets a pointer to the currently active guest (or nested-guest) VMCB.
1213 *
1214 * @returns Pointer to the current context VMCB.
1215 * @param pVCpu The cross context virtual CPU structure.
1216 */
1217DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPUCC pVCpu)
1218{
1219#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1220 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1221 return pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
1222#endif
1223 return pVCpu->hmr0.s.svm.pVmcb;
1224}
1225
1226
1227/**
1228 * Gets a pointer to the nested-guest VMCB cache.
1229 *
1230 * @returns Pointer to the nested-guest VMCB cache.
1231 * @param pVCpu The cross context virtual CPU structure.
1232 */
1233DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPUCC pVCpu)
1234{
1235#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1236 Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
1237 return &pVCpu->hm.s.svm.NstGstVmcbCache;
1238#else
1239 RT_NOREF(pVCpu);
1240 return NULL;
1241#endif
1242}
1243
1244
1245/**
1246 * Invalidates a guest page by guest virtual address.
1247 *
1248 * @returns VBox status code.
1249 * @param pVCpu The cross context virtual CPU structure.
1250 * @param GCVirt Guest virtual address of the page to invalidate.
1251 */
1252VMMR0DECL(int) SVMR0InvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCVirt)
1253{
1254 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
1255
1256 bool const fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH) || pVCpu->CTX_SUFF(pVM)->hmr0.s.svm.fAlwaysFlushTLB;
1257
1258 /* Skip it if a TLB flush is already pending. */
1259 if (!fFlushPending)
1260 {
1261 Log4Func(("%#RGv\n", GCVirt));
1262
1263 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
1264 AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
1265
1266 SVMR0InvlpgA(GCVirt, pVmcb->ctrl.TLBCtrl.n.u32ASID);
1267 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1268 }
1269 return VINF_SUCCESS;
1270}
1271
1272
1273/**
1274 * Flushes the appropriate tagged-TLB entries.
1275 *
1276 * @param pHostCpu The HM physical-CPU structure.
1277 * @param pVCpu The cross context virtual CPU structure.
1278 * @param pVmcb Pointer to the VM control block.
1279 */
1280static void hmR0SvmFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1281{
1282 /*
1283 * Force a TLB flush for the first world switch if the current CPU differs from the one
1284 * we ran on last. This can happen both for start & resume due to long jumps back to
1285 * ring-3.
1286 *
1287 * We also force a TLB flush every time when executing a nested-guest VCPU as there is no
1288 * correlation between it and the physical CPU.
1289 *
1290 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while
1291 * flushing the TLB, so we cannot reuse the ASIDs without flushing.
1292 */
1293 bool fNewAsid = false;
1294 Assert(pHostCpu->idCpu != NIL_RTCPUID);
1295 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu
1296 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes
1297#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1298 || CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx)
1299#endif
1300 )
1301 {
1302 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1303 pVCpu->hmr0.s.fForceTLBFlush = true;
1304 fNewAsid = true;
1305 }
1306
1307 /* Set TLB flush state as checked until we return from the world switch. */
1308 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
1309
1310 /* Check for explicit TLB flushes. */
1311 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1312 {
1313 pVCpu->hmr0.s.fForceTLBFlush = true;
1314 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1315 }
1316
1317 /*
1318 * If the AMD CPU erratum 170, We need to flush the entire TLB for each world switch. Sad.
1319 * This Host CPU requirement takes precedence.
1320 */
1321 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1322 if (pVM->hmr0.s.svm.fAlwaysFlushTLB)
1323 {
1324 pHostCpu->uCurrentAsid = 1;
1325 pVCpu->hmr0.s.uCurrentAsid = 1;
1326 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
1327 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
1328 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1329
1330 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
1331 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1332 }
1333 else
1334 {
1335 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
1336 if (pVCpu->hmr0.s.fForceTLBFlush)
1337 {
1338 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */
1339 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1340
1341 if (fNewAsid)
1342 {
1343 ++pHostCpu->uCurrentAsid;
1344
1345 bool fHitASIDLimit = false;
1346 if (pHostCpu->uCurrentAsid >= g_uHmMaxAsid)
1347 {
1348 pHostCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */
1349 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new ASID. */
1350 fHitASIDLimit = true;
1351 }
1352
1353 if ( fHitASIDLimit
1354 || pHostCpu->fFlushAsidBeforeUse)
1355 {
1356 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1357 pHostCpu->fFlushAsidBeforeUse = false;
1358 }
1359
1360 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid;
1361 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
1362 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
1363 }
1364 else
1365 {
1366 if (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
1367 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
1368 else
1369 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
1370 }
1371
1372 pVCpu->hmr0.s.fForceTLBFlush = false;
1373 }
1374 }
1375
1376 /* Update VMCB with the ASID. */
1377 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hmr0.s.uCurrentAsid)
1378 {
1379 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hmr0.s.uCurrentAsid;
1380 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
1381 }
1382
1383 AssertMsg(pVCpu->hmr0.s.idLastCpu == pHostCpu->idCpu,
1384 ("vcpu idLastCpu=%u hostcpu idCpu=%u\n", pVCpu->hmr0.s.idLastCpu, pHostCpu->idCpu));
1385 AssertMsg(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes,
1386 ("Flush count mismatch for cpu %u (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hmr0.s.cTlbFlushes, pHostCpu->cTlbFlushes));
1387 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < g_uHmMaxAsid,
1388 ("cpu%d uCurrentAsid = %x\n", pHostCpu->idCpu, pHostCpu->uCurrentAsid));
1389 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < g_uHmMaxAsid,
1390 ("cpu%d VM uCurrentAsid = %x\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid));
1391
1392#ifdef VBOX_WITH_STATISTICS
1393 if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
1394 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1395 else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
1396 || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
1397 {
1398 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1399 }
1400 else
1401 {
1402 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE);
1403 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushEntire);
1404 }
1405#endif
1406}
1407
1408
1409/**
1410 * Sets an exception intercept in the specified VMCB.
1411 *
1412 * @param pVmcb Pointer to the VM control block.
1413 * @param uXcpt The exception (X86_XCPT_*).
1414 */
1415DECLINLINE(void) hmR0SvmSetXcptIntercept(PSVMVMCB pVmcb, uint8_t uXcpt)
1416{
1417 if (!(pVmcb->ctrl.u32InterceptXcpt & RT_BIT(uXcpt)))
1418 {
1419 pVmcb->ctrl.u32InterceptXcpt |= RT_BIT(uXcpt);
1420 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1421 }
1422}
1423
1424
1425/**
1426 * Clears an exception intercept in the specified VMCB.
1427 *
1428 * @param pVCpu The cross context virtual CPU structure.
1429 * @param pVmcb Pointer to the VM control block.
1430 * @param uXcpt The exception (X86_XCPT_*).
1431 *
1432 * @remarks This takes into account if we're executing a nested-guest and only
1433 * removes the exception intercept if both the guest -and- nested-guest
1434 * are not intercepting it.
1435 */
1436DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPUCC pVCpu, PSVMVMCB pVmcb, uint8_t uXcpt)
1437{
1438 Assert(uXcpt != X86_XCPT_DB);
1439 Assert(uXcpt != X86_XCPT_AC);
1440 Assert(uXcpt != X86_XCPT_GP);
1441#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
1442 if (pVmcb->ctrl.u32InterceptXcpt & RT_BIT(uXcpt))
1443 {
1444 bool fRemove = true;
1445# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1446 /* Only remove the intercept if the nested-guest is also not intercepting it! */
1447 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1448 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1449 {
1450 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1451 fRemove = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(uXcpt));
1452 }
1453# else
1454 RT_NOREF(pVCpu);
1455# endif
1456 if (fRemove)
1457 {
1458 pVmcb->ctrl.u32InterceptXcpt &= ~RT_BIT(uXcpt);
1459 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1460 }
1461 }
1462#else
1463 RT_NOREF3(pVCpu, pVmcb, uXcpt);
1464#endif
1465}
1466
1467
1468/**
1469 * Sets a control intercept in the specified VMCB.
1470 *
1471 * @param pVmcb Pointer to the VM control block.
1472 * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*).
1473 */
1474DECLINLINE(void) hmR0SvmSetCtrlIntercept(PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
1475{
1476 if (!(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept))
1477 {
1478 pVmcb->ctrl.u64InterceptCtrl |= fCtrlIntercept;
1479 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1480 }
1481}
1482
1483
1484/**
1485 * Clears a control intercept in the specified VMCB.
1486 *
1487 * @returns @c true if the intercept is still set, @c false otherwise.
1488 * @param pVCpu The cross context virtual CPU structure.
1489 * @param pVmcb Pointer to the VM control block.
1490 * @param fCtrlIntercept The control intercept (SVM_CTRL_INTERCEPT_*).
1491 *
1492 * @remarks This takes into account if we're executing a nested-guest and only
1493 * removes the control intercept if both the guest -and- nested-guest
1494 * are not intercepting it.
1495 */
1496static bool hmR0SvmClearCtrlIntercept(PVMCPUCC pVCpu, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
1497{
1498 if (pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept)
1499 {
1500 bool fRemove = true;
1501#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1502 /* Only remove the control intercept if the nested-guest is also not intercepting it! */
1503 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1504 {
1505 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1506 fRemove = !(pVmcbNstGstCache->u64InterceptCtrl & fCtrlIntercept);
1507 }
1508#else
1509 RT_NOREF(pVCpu);
1510#endif
1511 if (fRemove)
1512 {
1513 pVmcb->ctrl.u64InterceptCtrl &= ~fCtrlIntercept;
1514 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1515 }
1516 }
1517
1518 return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept);
1519}
1520
1521
1522/**
1523 * Exports the guest (or nested-guest) CR0 into the VMCB.
1524 *
1525 * @param pVCpu The cross context virtual CPU structure.
1526 * @param pVmcb Pointer to the VM control block.
1527 *
1528 * @remarks This assumes we always pre-load the guest FPU.
1529 * @remarks No-long-jump zone!!!
1530 */
1531static void hmR0SvmExportGuestCR0(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1532{
1533 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1534
1535 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1536 uint64_t const uGuestCr0 = pCtx->cr0;
1537 uint64_t uShadowCr0 = uGuestCr0;
1538
1539 /* Always enable caching. */
1540 uShadowCr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1541
1542 /* When Nested Paging is not available use shadow page tables and intercept #PFs (latter done in SVMR0SetupVM()). */
1543 if (!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
1544 {
1545 uShadowCr0 |= X86_CR0_PG /* Use shadow page tables. */
1546 | X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
1547 }
1548
1549 /*
1550 * Use the #MF style of legacy-FPU error reporting for now. Although AMD-V has MSRs that
1551 * lets us isolate the host from it, IEM/REM still needs work to emulate it properly,
1552 * see @bugref{7243#c103}.
1553 */
1554 if (!(uGuestCr0 & X86_CR0_NE))
1555 {
1556 uShadowCr0 |= X86_CR0_NE;
1557 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_MF);
1558 }
1559 else
1560 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_MF);
1561
1562 /*
1563 * If the shadow and guest CR0 are identical we can avoid intercepting CR0 reads.
1564 *
1565 * CR0 writes still needs interception as PGM requires tracking paging mode changes,
1566 * see @bugref{6944}.
1567 *
1568 * We also don't ever want to honor weird things like cache disable from the guest.
1569 * However, we can avoid intercepting changes to the TS & MP bits by clearing the CR0
1570 * write intercept below and keeping SVM_CTRL_INTERCEPT_CR0_SEL_WRITE instead.
1571 */
1572 if (uShadowCr0 == uGuestCr0)
1573 {
1574 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1575 {
1576 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(0);
1577 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(0);
1578 Assert(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_CR0_SEL_WRITE);
1579 }
1580 else
1581 {
1582 /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */
1583 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1584 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(0))
1585 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(0));
1586 pVmcb->ctrl.u16InterceptWrCRx = (pVmcb->ctrl.u16InterceptWrCRx & ~RT_BIT(0))
1587 | (pVmcbNstGstCache->u16InterceptWrCRx & RT_BIT(0));
1588 }
1589 }
1590 else
1591 {
1592 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(0);
1593 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(0);
1594 }
1595 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1596
1597 Assert(!RT_HI_U32(uShadowCr0));
1598 if (pVmcb->guest.u64CR0 != uShadowCr0)
1599 {
1600 pVmcb->guest.u64CR0 = uShadowCr0;
1601 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1602 }
1603}
1604
1605
1606/**
1607 * Exports the guest (or nested-guest) CR3 into the VMCB.
1608 *
1609 * @param pVCpu The cross context virtual CPU structure.
1610 * @param pVmcb Pointer to the VM control block.
1611 *
1612 * @remarks No-long-jump zone!!!
1613 */
1614static void hmR0SvmExportGuestCR3(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1615{
1616 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1617
1618 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1619 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1620 if (pVM->hmr0.s.fNestedPaging)
1621 {
1622 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetHyperCR3(pVCpu);
1623 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1624 pVmcb->guest.u64CR3 = pCtx->cr3;
1625 Assert(pVmcb->ctrl.u64NestedPagingCR3);
1626 }
1627 else
1628 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
1629
1630 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1631}
1632
1633
1634/**
1635 * Exports the guest (or nested-guest) CR4 into the VMCB.
1636 *
1637 * @param pVCpu The cross context virtual CPU structure.
1638 * @param pVmcb Pointer to the VM control block.
1639 *
1640 * @remarks No-long-jump zone!!!
1641 */
1642static int hmR0SvmExportGuestCR4(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1643{
1644 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1645
1646 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1647 uint64_t uShadowCr4 = pCtx->cr4;
1648 if (!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
1649 {
1650 switch (pVCpu->hm.s.enmShadowMode)
1651 {
1652 case PGMMODE_REAL:
1653 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
1654 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1655
1656 case PGMMODE_32_BIT: /* 32-bit paging. */
1657 uShadowCr4 &= ~X86_CR4_PAE;
1658 break;
1659
1660 case PGMMODE_PAE: /* PAE paging. */
1661 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
1662 /** Must use PAE paging as we could use physical memory > 4 GB */
1663 uShadowCr4 |= X86_CR4_PAE;
1664 break;
1665
1666 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
1667 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
1668#ifdef VBOX_WITH_64_BITS_GUESTS
1669 break;
1670#else
1671 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1672#endif
1673
1674 default: /* shut up gcc */
1675 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1676 }
1677 }
1678
1679 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
1680 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
1681 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
1682 {
1683 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
1684 hmR0SvmUpdateVmRunFunction(pVCpu);
1685 }
1686
1687 /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */
1688 if (uShadowCr4 == pCtx->cr4)
1689 {
1690 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1691 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(4);
1692 else
1693 {
1694 /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */
1695 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
1696 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(4))
1697 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(4));
1698 }
1699 }
1700 else
1701 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(4);
1702
1703 /* CR4 writes are always intercepted (both guest, nested-guest) for tracking PGM mode changes. */
1704 Assert(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(4));
1705
1706 /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */
1707 Assert(!RT_HI_U32(uShadowCr4));
1708 pVmcb->guest.u64CR4 = uShadowCr4;
1709 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS);
1710
1711 return VINF_SUCCESS;
1712}
1713
1714
1715/**
1716 * Exports the guest (or nested-guest) control registers into the VMCB.
1717 *
1718 * @returns VBox status code.
1719 * @param pVCpu The cross context virtual CPU structure.
1720 * @param pVmcb Pointer to the VM control block.
1721 *
1722 * @remarks No-long-jump zone!!!
1723 */
1724static int hmR0SvmExportGuestControlRegs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1725{
1726 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1727
1728 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR_MASK)
1729 {
1730 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR0)
1731 hmR0SvmExportGuestCR0(pVCpu, pVmcb);
1732
1733 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR2)
1734 {
1735 pVmcb->guest.u64CR2 = pVCpu->cpum.GstCtx.cr2;
1736 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
1737 }
1738
1739 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR3)
1740 hmR0SvmExportGuestCR3(pVCpu, pVmcb);
1741
1742 /* CR4 re-loading is ASSUMED to be done everytime we get in from ring-3! (XCR0) */
1743 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR4)
1744 {
1745 int rc = hmR0SvmExportGuestCR4(pVCpu, pVmcb);
1746 if (RT_FAILURE(rc))
1747 return rc;
1748 }
1749
1750 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_CR_MASK;
1751 }
1752 return VINF_SUCCESS;
1753}
1754
1755
1756/**
1757 * Exports the guest (or nested-guest) segment registers into the VMCB.
1758 *
1759 * @returns VBox status code.
1760 * @param pVCpu The cross context virtual CPU structure.
1761 * @param pVmcb Pointer to the VM control block.
1762 *
1763 * @remarks No-long-jump zone!!!
1764 */
1765static void hmR0SvmExportGuestSegmentRegs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1766{
1767 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1768 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1769
1770 /* Guest segment registers. */
1771 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SREG_MASK)
1772 {
1773 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CS)
1774 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, CS, cs);
1775
1776 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SS)
1777 {
1778 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, SS, ss);
1779 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
1780 }
1781
1782 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DS)
1783 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, DS, ds);
1784
1785 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_ES)
1786 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, ES, es);
1787
1788 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_FS)
1789 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs);
1790
1791 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GS)
1792 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs);
1793
1794 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
1795 }
1796
1797 /* Guest TR. */
1798 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_TR)
1799 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr);
1800
1801 /* Guest LDTR. */
1802 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_LDTR)
1803 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr);
1804
1805 /* Guest GDTR. */
1806 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GDTR)
1807 {
1808 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
1809 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
1810 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1811 }
1812
1813 /* Guest IDTR. */
1814 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_IDTR)
1815 {
1816 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
1817 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
1818 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1819 }
1820
1821 pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SREG_MASK
1822 | HM_CHANGED_GUEST_TABLE_MASK);
1823}
1824
1825
1826/**
1827 * Exports the guest (or nested-guest) MSRs into the VMCB.
1828 *
1829 * @param pVCpu The cross context virtual CPU structure.
1830 * @param pVmcb Pointer to the VM control block.
1831 *
1832 * @remarks No-long-jump zone!!!
1833 */
1834static void hmR0SvmExportGuestMsrs(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1835{
1836 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1837 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1838
1839 /* Guest Sysenter MSRs. */
1840 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
1841 {
1842 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
1843 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
1844
1845 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
1846 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
1847
1848 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
1849 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
1850 }
1851
1852 /*
1853 * Guest EFER MSR.
1854 * AMD-V requires guest EFER.SVME to be set. Weird.
1855 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
1856 */
1857 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_EFER_MSR)
1858 {
1859 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
1860 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1861 }
1862
1863 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit, otherwise SVM expects amd64 shadow paging. */
1864 if ( !CPUMIsGuestInLongModeEx(pCtx)
1865 && (pCtx->msrEFER & MSR_K6_EFER_LME))
1866 {
1867 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
1868 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1869 }
1870
1871 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSCALL_MSRS)
1872 {
1873 pVmcb->guest.u64STAR = pCtx->msrSTAR;
1874 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
1875 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
1876 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
1877 }
1878
1879 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_KERNEL_GS_BASE)
1880 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1881
1882 pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SYSENTER_MSR_MASK
1883 | HM_CHANGED_GUEST_EFER_MSR
1884 | HM_CHANGED_GUEST_SYSCALL_MSRS
1885 | HM_CHANGED_GUEST_KERNEL_GS_BASE);
1886
1887 /*
1888 * Setup the PAT MSR (applicable for Nested Paging only).
1889 *
1890 * The default value should be MSR_IA32_CR_PAT_INIT_VAL, but we treat all guest memory
1891 * as WB, so choose type 6 for all PAT slots, see @bugref{9634}.
1892 *
1893 * While guests can modify and see the modified values through the shadow values,
1894 * we shall not honor any guest modifications of this MSR to ensure caching is always
1895 * enabled similar to how we clear CR0.CD and NW bits.
1896 *
1897 * For nested-guests this needs to always be set as well, see @bugref{7243#c109}.
1898 */
1899 pVmcb->guest.u64PAT = UINT64_C(0x0006060606060606);
1900
1901 /* Enable the last branch record bit if LBR virtualization is enabled. */
1902 if (pVmcb->ctrl.LbrVirt.n.u1LbrVirt)
1903 pVmcb->guest.u64DBGCTL = MSR_IA32_DEBUGCTL_LBR;
1904}
1905
1906
1907/**
1908 * Exports the guest (or nested-guest) debug state into the VMCB and programs
1909 * the necessary intercepts accordingly.
1910 *
1911 * @param pVCpu The cross context virtual CPU structure.
1912 * @param pVmcb Pointer to the VM control block.
1913 *
1914 * @remarks No-long-jump zone!!!
1915 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
1916 */
1917static void hmR0SvmExportSharedDebugState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
1918{
1919 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1920
1921 /** @todo Figure out stepping with nested-guest. */
1922 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1923 {
1924 /*
1925 * We don't want to always intercept DRx read/writes for nested-guests as it causes
1926 * problems when the nested hypervisor isn't intercepting them, see @bugref{10080}.
1927 * Instead, they are strictly only requested when the nested hypervisor intercepts
1928 * them -- handled while merging VMCB controls.
1929 *
1930 * If neither the outer nor the nested-hypervisor is intercepting DRx read/writes,
1931 * then the nested-guest debug state should be actively loaded on the host so that
1932 * nested-guest reads/writes its own debug registers without causing VM-exits.
1933 */
1934 if ( ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
1935 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
1936 && !CPUMIsGuestDebugStateActive(pVCpu))
1937 {
1938 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
1939 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1940 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1941 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1942 }
1943
1944 pVmcb->guest.u64DR6 = pCtx->dr[6];
1945 pVmcb->guest.u64DR7 = pCtx->dr[7];
1946 return;
1947 }
1948
1949 /*
1950 * Anyone single stepping on the host side? If so, we'll have to use the
1951 * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
1952 * the VMM level like the VT-x implementations does.
1953 */
1954 bool fInterceptMovDRx = false;
1955 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
1956 if (fStepping)
1957 {
1958 pVCpu->hmr0.s.fClearTrapFlag = true;
1959 pVmcb->guest.u64RFlags |= X86_EFL_TF;
1960 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
1961 }
1962
1963 if ( fStepping
1964 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
1965 {
1966 /*
1967 * Use the combined guest and host DRx values found in the hypervisor
1968 * register set because the debugger has breakpoints active or someone
1969 * is single stepping on the host side.
1970 *
1971 * Note! DBGF expects a clean DR6 state before executing guest code.
1972 */
1973 if (!CPUMIsHyperDebugStateActive(pVCpu))
1974 {
1975 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1976 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1977 Assert(CPUMIsHyperDebugStateActive(pVCpu));
1978 }
1979
1980 /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
1981 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
1982 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu))
1983 {
1984 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
1985 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
1986 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1987 }
1988
1989 /** @todo If we cared, we could optimize to allow the guest to read registers
1990 * with the same values. */
1991 fInterceptMovDRx = true;
1992 pVCpu->hmr0.s.fUsingHyperDR7 = true;
1993 Log5(("hmR0SvmExportSharedDebugState: Loaded hyper DRx\n"));
1994 }
1995 else
1996 {
1997 /*
1998 * Update DR6, DR7 with the guest values if necessary.
1999 */
2000 if ( pVmcb->guest.u64DR7 != pCtx->dr[7]
2001 || pVmcb->guest.u64DR6 != pCtx->dr[6])
2002 {
2003 pVmcb->guest.u64DR7 = pCtx->dr[7];
2004 pVmcb->guest.u64DR6 = pCtx->dr[6];
2005 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
2006 }
2007 pVCpu->hmr0.s.fUsingHyperDR7 = false;
2008
2009 /*
2010 * If the guest has enabled debug registers, we need to load them prior to
2011 * executing guest code so they'll trigger at the right time.
2012 */
2013 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
2014 {
2015 if (!CPUMIsGuestDebugStateActive(pVCpu))
2016 {
2017 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
2018 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
2019 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
2020 Assert(CPUMIsGuestDebugStateActive(pVCpu));
2021 }
2022 Log5(("hmR0SvmExportSharedDebugState: Loaded guest DRx\n"));
2023 }
2024 /*
2025 * If no debugging enabled, we'll lazy load DR0-3. We don't need to
2026 * intercept #DB as DR6 is updated in the VMCB.
2027 *
2028 * Note! If we cared and dared, we could skip intercepting \#DB here.
2029 * However, \#DB shouldn't be performance critical, so we'll play safe
2030 * and keep the code similar to the VT-x code and always intercept it.
2031 */
2032 else if (!CPUMIsGuestDebugStateActive(pVCpu))
2033 fInterceptMovDRx = true;
2034 }
2035
2036 Assert(pVmcb->ctrl.u32InterceptXcpt & RT_BIT_32(X86_XCPT_DB));
2037 if (fInterceptMovDRx)
2038 {
2039 if ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
2040 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
2041 {
2042 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
2043 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
2044 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2045 }
2046 }
2047 else
2048 {
2049 if ( pVmcb->ctrl.u16InterceptRdDRx
2050 || pVmcb->ctrl.u16InterceptWrDRx)
2051 {
2052 pVmcb->ctrl.u16InterceptRdDRx = 0;
2053 pVmcb->ctrl.u16InterceptWrDRx = 0;
2054 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2055 }
2056 }
2057 Log4Func(("DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7]));
2058}
2059
2060/**
2061 * Exports the hardware virtualization state into the nested-guest
2062 * VMCB.
2063 *
2064 * @param pVCpu The cross context virtual CPU structure.
2065 * @param pVmcb Pointer to the VM control block.
2066 *
2067 * @remarks No-long-jump zone!!!
2068 */
2069static void hmR0SvmExportGuestHwvirtState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2070{
2071 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2072
2073 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_HWVIRT)
2074 {
2075 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
2076 {
2077 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2078 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2079
2080 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); /* Nested VGIF is not supported yet. */
2081 Assert(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF); /* Physical hardware supports VGIF. */
2082 Assert(HMIsSvmVGifActive(pVM)); /* Outer VM has enabled VGIF. */
2083 NOREF(pVM);
2084
2085 pVmcb->ctrl.IntCtrl.n.u1VGif = CPUMGetGuestGif(pCtx);
2086 }
2087
2088 /*
2089 * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp.
2090 * since SVM doesn't have a preemption timer.
2091 *
2092 * We do this here rather than in hmR0SvmSetupVmcbNested() as we may have been executing the
2093 * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters
2094 * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between.
2095 */
2096 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2097 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
2098 uint16_t const uGuestPauseFilterCount = pVM->hm.s.svm.cPauseFilter;
2099 uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
2100 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_PAUSE))
2101 {
2102 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2103 pVmcbCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount);
2104 pVmcbCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold);
2105 }
2106 else
2107 {
2108 /** @todo r=ramshankar: We can turn these assignments into assertions. */
2109 pVmcbCtrl->u16PauseFilterCount = uGuestPauseFilterCount;
2110 pVmcbCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold;
2111 }
2112 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2113
2114 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_HWVIRT;
2115 }
2116}
2117
2118
2119/**
2120 * Exports the guest APIC TPR state into the VMCB.
2121 *
2122 * @returns VBox status code.
2123 * @param pVCpu The cross context virtual CPU structure.
2124 * @param pVmcb Pointer to the VM control block.
2125 */
2126static int hmR0SvmExportGuestApicTpr(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2127{
2128 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2129
2130 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
2131 {
2132 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2133 if ( PDMHasApic(pVM)
2134 && APICIsEnabled(pVCpu))
2135 {
2136 bool fPendingIntr;
2137 uint8_t u8Tpr;
2138 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
2139 AssertRCReturn(rc, rc);
2140
2141 /* Assume that we need to trap all TPR accesses and thus need not check on
2142 every #VMEXIT if we should update the TPR. */
2143 Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking);
2144 pVCpu->hmr0.s.svm.fSyncVTpr = false;
2145
2146 if (!pVM->hm.s.fTprPatchingActive)
2147 {
2148 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2149 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
2150
2151 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we
2152 can deliver the interrupt to the guest. */
2153 if (fPendingIntr)
2154 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
2155 else
2156 {
2157 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
2158 pVCpu->hmr0.s.svm.fSyncVTpr = true;
2159 }
2160
2161 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL);
2162 }
2163 else
2164 {
2165 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
2166 pVmcb->guest.u64LSTAR = u8Tpr;
2167 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hmr0.s.svm.pvMsrBitmap;
2168
2169 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
2170 if (fPendingIntr)
2171 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
2172 else
2173 {
2174 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
2175 pVCpu->hmr0.s.svm.fSyncVTpr = true;
2176 }
2177 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
2178 }
2179 }
2180 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
2181 }
2182 return VINF_SUCCESS;
2183}
2184
2185
2186/**
2187 * Sets up the exception interrupts required for guest execution in the VMCB.
2188 *
2189 * @param pVCpu The cross context virtual CPU structure.
2190 * @param pVmcb Pointer to the VM control block.
2191 *
2192 * @remarks No-long-jump zone!!!
2193 */
2194static void hmR0SvmExportGuestXcptIntercepts(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2195{
2196 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2197
2198 /* If we modify intercepts from here, please check & adjust hmR0SvmMergeVmcbCtrlsNested() if required. */
2199 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_SVM_XCPT_INTERCEPTS)
2200 {
2201 /* Trap #UD for GIM provider (e.g. for hypercalls). */
2202 if (pVCpu->hm.s.fGIMTrapXcptUD || pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
2203 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_UD);
2204 else
2205 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_UD);
2206
2207 /* Trap #BP for INT3 debug breakpoints set by the VM debugger. */
2208 if (pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
2209 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_BP);
2210 else
2211 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_BP);
2212
2213 /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmExportGuestCR0(). */
2214 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_SVM_XCPT_INTERCEPTS);
2215 }
2216}
2217
2218
2219#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2220/**
2221 * Merges guest and nested-guest intercepts for executing the nested-guest using
2222 * hardware-assisted SVM.
2223 *
2224 * This merges the guest and nested-guest intercepts in a way that if the outer
2225 * guest intercept is set we need to intercept it in the nested-guest as
2226 * well.
2227 *
2228 * @param pVCpu The cross context virtual CPU structure.
2229 * @param pVmcbNstGst Pointer to the nested-guest VM control block.
2230 */
2231static void hmR0SvmMergeVmcbCtrlsNested(PVMCPUCC pVCpu)
2232{
2233 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2234 PCSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
2235 PSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
2236 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2237
2238 /* Merge the guest's CR intercepts into the nested-guest VMCB. */
2239 pVmcbNstGstCtrl->u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
2240 pVmcbNstGstCtrl->u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
2241
2242 /* Always intercept CR4 writes for tracking PGM mode changes. */
2243 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(4);
2244
2245 /* Without nested paging, intercept CR3 reads and writes as we load shadow page tables. */
2246 if (!pVM->hmr0.s.fNestedPaging)
2247 {
2248 pVmcbNstGstCtrl->u16InterceptRdCRx |= RT_BIT(3);
2249 pVmcbNstGstCtrl->u16InterceptWrCRx |= RT_BIT(3);
2250 }
2251
2252 /* Merge the guest's DR intercepts into the nested-guest VMCB. */
2253 pVmcbNstGstCtrl->u16InterceptRdDRx |= pVmcb->ctrl.u16InterceptRdDRx;
2254 pVmcbNstGstCtrl->u16InterceptWrDRx |= pVmcb->ctrl.u16InterceptWrDRx;
2255
2256 /*
2257 * Merge the guest's exception intercepts into the nested-guest VMCB.
2258 *
2259 * - #UD: Exclude these as the outer guest's GIM hypercalls are not applicable
2260 * while executing the nested-guest.
2261 *
2262 * - #BP: Exclude breakpoints set by the VM debugger for the outer guest. This can
2263 * be tweaked later depending on how we wish to implement breakpoints.
2264 *
2265 * - #GP: Exclude these as it's the inner VMMs problem to get vmsvga 3d drivers
2266 * loaded into their guests, not ours.
2267 *
2268 * Warning!! This ASSUMES we only intercept \#UD for hypercall purposes and \#BP
2269 * for VM debugger breakpoints, see hmR0SvmExportGuestXcptIntercepts().
2270 */
2271#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
2272 pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt
2273 & ~( RT_BIT(X86_XCPT_UD)
2274 | RT_BIT(X86_XCPT_BP)
2275 | (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv ? RT_BIT(X86_XCPT_GP) : 0));
2276#else
2277 pVmcbNstGstCtrl->u32InterceptXcpt |= pVmcb->ctrl.u32InterceptXcpt;
2278#endif
2279
2280 /*
2281 * Adjust intercepts while executing the nested-guest that differ from the
2282 * outer guest intercepts.
2283 *
2284 * - VINTR: Exclude the outer guest intercept as we don't need to cause VINTR #VMEXITs
2285 * that belong to the nested-guest to the outer guest.
2286 *
2287 * - VMMCALL: Exclude the outer guest intercept as when it's also not intercepted by
2288 * the nested-guest, the physical CPU raises a \#UD exception as expected.
2289 */
2290 pVmcbNstGstCtrl->u64InterceptCtrl |= (pVmcb->ctrl.u64InterceptCtrl & ~( SVM_CTRL_INTERCEPT_VINTR
2291 | SVM_CTRL_INTERCEPT_VMMCALL))
2292 | HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;
2293
2294 Assert( (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS)
2295 == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
2296
2297 /* Finally, update the VMCB clean bits. */
2298 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2299}
2300#endif
2301
2302
2303/**
2304 * Enters the AMD-V session.
2305 *
2306 * @returns VBox status code.
2307 * @param pVCpu The cross context virtual CPU structure.
2308 */
2309VMMR0DECL(int) SVMR0Enter(PVMCPUCC pVCpu)
2310{
2311 AssertPtr(pVCpu);
2312 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
2313 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2314
2315 LogFlowFunc(("pVCpu=%p\n", pVCpu));
2316 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
2317 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
2318
2319 pVCpu->hmr0.s.fLeaveDone = false;
2320 return VINF_SUCCESS;
2321}
2322
2323
2324/**
2325 * Thread-context callback for AMD-V.
2326 *
2327 * This is used together with RTThreadCtxHookCreate() on platforms which
2328 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
2329 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
2330 *
2331 * @param enmEvent The thread-context event.
2332 * @param pVCpu The cross context virtual CPU structure.
2333 * @param fGlobalInit Whether global VT-x/AMD-V init. is used.
2334 * @thread EMT(pVCpu)
2335 */
2336VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit)
2337{
2338 NOREF(fGlobalInit);
2339
2340 switch (enmEvent)
2341 {
2342 case RTTHREADCTXEVENT_OUT:
2343 {
2344 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2345 VMCPU_ASSERT_EMT(pVCpu);
2346
2347 /* No longjmps (log-flush, locks) in this fragile context. */
2348 VMMRZCallRing3Disable(pVCpu);
2349
2350 if (!pVCpu->hmr0.s.fLeaveDone)
2351 {
2352 hmR0SvmLeave(pVCpu, false /* fImportState */);
2353 pVCpu->hmr0.s.fLeaveDone = true;
2354 }
2355
2356 /* Leave HM context, takes care of local init (term). */
2357 int rc = HMR0LeaveCpu(pVCpu);
2358 AssertRC(rc); NOREF(rc);
2359
2360 /* Restore longjmp state. */
2361 VMMRZCallRing3Enable(pVCpu);
2362 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
2363 break;
2364 }
2365
2366 case RTTHREADCTXEVENT_IN:
2367 {
2368 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2369 VMCPU_ASSERT_EMT(pVCpu);
2370
2371 /* No longjmps (log-flush, locks) in this fragile context. */
2372 VMMRZCallRing3Disable(pVCpu);
2373
2374 /*
2375 * Initialize the bare minimum state required for HM. This takes care of
2376 * initializing AMD-V if necessary (onlined CPUs, local init etc.)
2377 */
2378 int rc = hmR0EnterCpu(pVCpu);
2379 AssertRC(rc); NOREF(rc);
2380 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
2381 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
2382
2383 pVCpu->hmr0.s.fLeaveDone = false;
2384
2385 /* Restore longjmp state. */
2386 VMMRZCallRing3Enable(pVCpu);
2387 break;
2388 }
2389
2390 default:
2391 break;
2392 }
2393}
2394
2395
2396/**
2397 * Saves the host state.
2398 *
2399 * @returns VBox status code.
2400 * @param pVCpu The cross context virtual CPU structure.
2401 *
2402 * @remarks No-long-jump zone!!!
2403 */
2404VMMR0DECL(int) SVMR0ExportHostState(PVMCPUCC pVCpu)
2405{
2406 NOREF(pVCpu);
2407
2408 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
2409 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_HOST_CONTEXT);
2410 return VINF_SUCCESS;
2411}
2412
2413
2414/**
2415 * Exports the guest or nested-guest state from the virtual-CPU context into the
2416 * VMCB.
2417 *
2418 * Also sets up the appropriate VMRUN function to execute guest or nested-guest
2419 * code based on the virtual-CPU mode.
2420 *
2421 * @returns VBox status code.
2422 * @param pVCpu The cross context virtual CPU structure.
2423 * @param pSvmTransient Pointer to the SVM-transient structure.
2424 *
2425 * @remarks No-long-jump zone!!!
2426 */
2427static int hmR0SvmExportGuestState(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient)
2428{
2429 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
2430
2431 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
2432 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2433 Assert(pVmcb);
2434
2435 pVmcb->guest.u64RIP = pCtx->rip;
2436 pVmcb->guest.u64RSP = pCtx->rsp;
2437 pVmcb->guest.u64RFlags = pCtx->eflags.u32;
2438 pVmcb->guest.u64RAX = pCtx->rax;
2439
2440 bool const fIsNestedGuest = pSvmTransient->fIsNestedGuest;
2441 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
2442
2443 int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcb);
2444 AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc);
2445 hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcb);
2446 hmR0SvmExportGuestMsrs(pVCpu, pVmcb);
2447 hmR0SvmExportGuestHwvirtState(pVCpu, pVmcb);
2448
2449 ASMSetFlags(fEFlags);
2450
2451 if (!fIsNestedGuest)
2452 {
2453 /* hmR0SvmExportGuestApicTpr() must be called -after- hmR0SvmExportGuestMsrs() as we
2454 otherwise we would overwrite the LSTAR MSR that we use for TPR patching. */
2455 hmR0SvmExportGuestApicTpr(pVCpu, pVmcb);
2456 hmR0SvmExportGuestXcptIntercepts(pVCpu, pVmcb);
2457 }
2458
2459 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
2460 uint64_t fUnusedMask = HM_CHANGED_GUEST_RIP
2461 | HM_CHANGED_GUEST_RFLAGS
2462 | HM_CHANGED_GUEST_GPRS_MASK
2463 | HM_CHANGED_GUEST_X87
2464 | HM_CHANGED_GUEST_SSE_AVX
2465 | HM_CHANGED_GUEST_OTHER_XSAVE
2466 | HM_CHANGED_GUEST_XCRx
2467 | HM_CHANGED_GUEST_TSC_AUX
2468 | HM_CHANGED_GUEST_OTHER_MSRS;
2469 if (fIsNestedGuest)
2470 fUnusedMask |= HM_CHANGED_SVM_XCPT_INTERCEPTS
2471 | HM_CHANGED_GUEST_APIC_TPR;
2472
2473 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( fUnusedMask
2474 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_MASK)));
2475
2476#ifdef VBOX_STRICT
2477 /*
2478 * All of the guest-CPU state and SVM keeper bits should be exported here by now,
2479 * except for the host-context and/or shared host-guest context bits.
2480 */
2481 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
2482 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)),
2483 ("fCtxChanged=%#RX64\n", fCtxChanged));
2484
2485 /*
2486 * If we need to log state that isn't always imported, we'll need to import them here.
2487 * See hmR0SvmPostRunGuest() for which part of the state is imported uncondtionally.
2488 */
2489 hmR0SvmLogState(pVCpu, pVmcb, "hmR0SvmExportGuestState", 0 /* fFlags */, 0 /* uVerbose */);
2490#endif
2491
2492 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
2493 return VINF_SUCCESS;
2494}
2495
2496
2497#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2498/**
2499 * Merges the guest and nested-guest MSR permission bitmap.
2500 *
2501 * If the guest is intercepting an MSR we need to intercept it regardless of
2502 * whether the nested-guest is intercepting it or not.
2503 *
2504 * @param pHostCpu The HM physical-CPU structure.
2505 * @param pVCpu The cross context virtual CPU structure.
2506 *
2507 * @remarks No-long-jmp zone!!!
2508 */
2509DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu)
2510{
2511 uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hmr0.s.svm.pvMsrBitmap;
2512 uint64_t const *pu64NstGstMsrpm = (uint64_t const *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap);
2513 uint64_t *pu64DstMsrpm = (uint64_t *)pHostCpu->n.svm.pvNstGstMsrpm;
2514
2515 /* MSRPM bytes from offset 0x1800 are reserved, so we stop merging there. */
2516 uint32_t const offRsvdQwords = 0x1800 >> 3;
2517 for (uint32_t i = 0; i < offRsvdQwords; i++)
2518 pu64DstMsrpm[i] = pu64NstGstMsrpm[i] | pu64GstMsrpm[i];
2519}
2520
2521
2522/**
2523 * Caches the nested-guest VMCB fields before we modify them for execution using
2524 * hardware-assisted SVM.
2525 *
2526 * @returns true if the VMCB was previously already cached, false otherwise.
2527 * @param pVCpu The cross context virtual CPU structure.
2528 *
2529 * @sa HMNotifySvmNstGstVmexit.
2530 */
2531static bool hmR0SvmCacheVmcbNested(PVMCPUCC pVCpu)
2532{
2533 /*
2534 * Cache the nested-guest programmed VMCB fields if we have not cached it yet.
2535 * Otherwise we risk re-caching the values we may have modified, see @bugref{7243#c44}.
2536 *
2537 * Nested-paging CR3 is not saved back into the VMCB on #VMEXIT, hence no need to
2538 * cache and restore it, see AMD spec. 15.25.4 "Nested Paging and VMRUN/#VMEXIT".
2539 */
2540 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
2541 bool const fWasCached = pVmcbNstGstCache->fCacheValid;
2542 if (!fWasCached)
2543 {
2544 PCSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
2545 PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2546 pVmcbNstGstCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
2547 pVmcbNstGstCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
2548 pVmcbNstGstCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
2549 pVmcbNstGstCache->u16InterceptWrDRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
2550 pVmcbNstGstCache->u16PauseFilterThreshold = pVmcbNstGstCtrl->u16PauseFilterThreshold;
2551 pVmcbNstGstCache->u16PauseFilterCount = pVmcbNstGstCtrl->u16PauseFilterCount;
2552 pVmcbNstGstCache->u32InterceptXcpt = pVmcbNstGstCtrl->u32InterceptXcpt;
2553 pVmcbNstGstCache->u64InterceptCtrl = pVmcbNstGstCtrl->u64InterceptCtrl;
2554 pVmcbNstGstCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset;
2555 pVmcbNstGstCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
2556 pVmcbNstGstCache->fNestedPaging = pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging;
2557 pVmcbNstGstCache->fLbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
2558 pVmcbNstGstCache->fCacheValid = true;
2559 Log4Func(("Cached VMCB fields\n"));
2560 }
2561
2562 return fWasCached;
2563}
2564
2565
2566/**
2567 * Sets up the nested-guest VMCB for execution using hardware-assisted SVM.
2568 *
2569 * This is done the first time we enter nested-guest execution using SVM R0
2570 * until the nested-guest \#VMEXIT (not to be confused with physical CPU
2571 * \#VMEXITs which may or may not cause a corresponding nested-guest \#VMEXIT).
2572 *
2573 * @param pVCpu The cross context virtual CPU structure.
2574 */
2575static void hmR0SvmSetupVmcbNested(PVMCPUCC pVCpu)
2576{
2577 PSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
2578 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
2579
2580 HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
2581
2582 /*
2583 * First cache the nested-guest VMCB fields we may potentially modify.
2584 */
2585 bool const fVmcbCached = hmR0SvmCacheVmcbNested(pVCpu);
2586 if (!fVmcbCached)
2587 {
2588 /*
2589 * The IOPM of the nested-guest can be ignored because the the guest always
2590 * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
2591 * than the nested-guest IOPM and swap the field back on the #VMEXIT.
2592 */
2593 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
2594
2595 /*
2596 * Use the same nested-paging as the outer guest. We can't dynamically switch off
2597 * nested-paging suddenly while executing a VM (see assertion at the end of
2598 * Trap0eHandler() in PGMAllBth.h).
2599 */
2600 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging;
2601
2602 /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */
2603 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = 1;
2604
2605 /*
2606 * Turn off TPR syncing on #VMEXIT for nested-guests as CR8 intercepts are subject
2607 * to the nested-guest intercepts and we always run with V_INTR_MASKING.
2608 */
2609 pVCpu->hmr0.s.svm.fSyncVTpr = false;
2610
2611#ifdef DEBUG_ramshankar
2612 /* For debugging purposes - copy the LBR info. from outer guest VMCB. */
2613 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcb->ctrl.LbrVirt.n.u1LbrVirt;
2614#endif
2615
2616 /*
2617 * If we don't expose Virtualized-VMSAVE/VMLOAD feature to the outer guest, we
2618 * need to intercept VMSAVE/VMLOAD instructions executed by the nested-guest.
2619 */
2620 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
2621 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE
2622 | SVM_CTRL_INTERCEPT_VMLOAD;
2623
2624 /*
2625 * If we don't expose Virtual GIF feature to the outer guest, we need to intercept
2626 * CLGI/STGI instructions executed by the nested-guest.
2627 */
2628 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif)
2629 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI
2630 | SVM_CTRL_INTERCEPT_STGI;
2631
2632 /* Merge the guest and nested-guest intercepts. */
2633 hmR0SvmMergeVmcbCtrlsNested(pVCpu);
2634
2635 /* Update the VMCB clean bits. */
2636 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2637 }
2638 else
2639 {
2640 Assert(!pVCpu->hmr0.s.svm.fSyncVTpr);
2641 Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap);
2642 Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
2643 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPagingCfg == pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
2644 }
2645}
2646#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
2647
2648
2649/**
2650 * Exports the state shared between the host and guest (or nested-guest) into
2651 * the VMCB.
2652 *
2653 * @param pVCpu The cross context virtual CPU structure.
2654 * @param pVmcb Pointer to the VM control block.
2655 *
2656 * @remarks No-long-jump zone!!!
2657 */
2658static void hmR0SvmExportSharedState(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
2659{
2660 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2661 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2662
2663 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
2664 hmR0SvmExportSharedDebugState(pVCpu, pVmcb);
2665
2666 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
2667 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE),
2668 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
2669}
2670
2671
2672/**
2673 * Worker for SVMR0ImportStateOnDemand.
2674 *
2675 * @param pVCpu The cross context virtual CPU structure.
2676 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2677 */
2678static void hmR0SvmImportGuestState(PVMCPUCC pVCpu, uint64_t fWhat)
2679{
2680 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
2681
2682 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2683 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
2684 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
2685 PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
2686
2687 /*
2688 * We disable interrupts to make the updating of the state and in particular
2689 * the fExtrn modification atomic wrt to preemption hooks.
2690 */
2691 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
2692
2693 fWhat &= pCtx->fExtrn;
2694 if (fWhat)
2695 {
2696#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2697 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
2698 {
2699 if (pVmcbCtrl->IntCtrl.n.u1VGifEnable)
2700 {
2701 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); /* We don't yet support passing VGIF feature to the guest. */
2702 Assert(HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM))); /* VM has configured it. */
2703 CPUMSetGuestGif(pCtx, pVmcbCtrl->IntCtrl.n.u1VGif);
2704 }
2705 }
2706
2707 if (fWhat & CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)
2708 {
2709 if ( !pVmcbCtrl->IntCtrl.n.u1VIrqPending
2710 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
2711 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2712 }
2713#endif
2714
2715 if (fWhat & CPUMCTX_EXTRN_HM_SVM_INT_SHADOW)
2716 {
2717 if (pVmcbCtrl->IntShadow.n.u1IntShadow)
2718 EMSetInhibitInterruptsPC(pVCpu, pVmcbGuest->u64RIP);
2719 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2720 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2721 }
2722
2723 if (fWhat & CPUMCTX_EXTRN_RIP)
2724 pCtx->rip = pVmcbGuest->u64RIP;
2725
2726 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
2727 pCtx->eflags.u32 = pVmcbGuest->u64RFlags;
2728
2729 if (fWhat & CPUMCTX_EXTRN_RSP)
2730 pCtx->rsp = pVmcbGuest->u64RSP;
2731
2732 if (fWhat & CPUMCTX_EXTRN_RAX)
2733 pCtx->rax = pVmcbGuest->u64RAX;
2734
2735 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
2736 {
2737 if (fWhat & CPUMCTX_EXTRN_CS)
2738 {
2739 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, CS, cs);
2740 /* Correct the CS granularity bit. Haven't seen it being wrong in any other register (yet). */
2741 /** @todo SELM might need to be fixed as it too should not care about the
2742 * granularity bit. See @bugref{6785}. */
2743 if ( !pCtx->cs.Attr.n.u1Granularity
2744 && pCtx->cs.Attr.n.u1Present
2745 && pCtx->cs.u32Limit > UINT32_C(0xfffff))
2746 {
2747 Assert((pCtx->cs.u32Limit & 0xfff) == 0xfff);
2748 pCtx->cs.Attr.n.u1Granularity = 1;
2749 }
2750 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, cs);
2751 }
2752 if (fWhat & CPUMCTX_EXTRN_SS)
2753 {
2754 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, SS, ss);
2755 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ss);
2756 /*
2757 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the
2758 * VMCB and uses that and thus it's possible that when the CPL changes during
2759 * guest execution that the SS DPL isn't updated by AMD-V. Observed on some
2760 * AMD Fusion CPUs with 64-bit guests.
2761 *
2762 * See AMD spec. 15.5.1 "Basic operation".
2763 */
2764 Assert(!(pVmcbGuest->u8CPL & ~0x3));
2765 uint8_t const uCpl = pVmcbGuest->u8CPL;
2766 if (pCtx->ss.Attr.n.u2Dpl != uCpl)
2767 pCtx->ss.Attr.n.u2Dpl = uCpl & 0x3;
2768 }
2769 if (fWhat & CPUMCTX_EXTRN_DS)
2770 {
2771 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, DS, ds);
2772 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ds);
2773 }
2774 if (fWhat & CPUMCTX_EXTRN_ES)
2775 {
2776 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, ES, es);
2777 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, es);
2778 }
2779 if (fWhat & CPUMCTX_EXTRN_FS)
2780 {
2781 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, FS, fs);
2782 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, fs);
2783 }
2784 if (fWhat & CPUMCTX_EXTRN_GS)
2785 {
2786 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, GS, gs);
2787 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, gs);
2788 }
2789 }
2790
2791 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
2792 {
2793 if (fWhat & CPUMCTX_EXTRN_TR)
2794 {
2795 /*
2796 * Fixup TR attributes so it's compatible with Intel. Important when saved-states
2797 * are used between Intel and AMD, see @bugref{6208#c39}.
2798 * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode.
2799 */
2800 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, TR, tr);
2801 if (pCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
2802 {
2803 if ( pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2804 || CPUMIsGuestInLongModeEx(pCtx))
2805 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2806 else if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
2807 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
2808 }
2809 }
2810
2811 if (fWhat & CPUMCTX_EXTRN_LDTR)
2812 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, LDTR, ldtr);
2813
2814 if (fWhat & CPUMCTX_EXTRN_GDTR)
2815 {
2816 pCtx->gdtr.cbGdt = pVmcbGuest->GDTR.u32Limit;
2817 pCtx->gdtr.pGdt = pVmcbGuest->GDTR.u64Base;
2818 }
2819
2820 if (fWhat & CPUMCTX_EXTRN_IDTR)
2821 {
2822 pCtx->idtr.cbIdt = pVmcbGuest->IDTR.u32Limit;
2823 pCtx->idtr.pIdt = pVmcbGuest->IDTR.u64Base;
2824 }
2825 }
2826
2827 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2828 {
2829 pCtx->msrSTAR = pVmcbGuest->u64STAR;
2830 pCtx->msrLSTAR = pVmcbGuest->u64LSTAR;
2831 pCtx->msrCSTAR = pVmcbGuest->u64CSTAR;
2832 pCtx->msrSFMASK = pVmcbGuest->u64SFMASK;
2833 }
2834
2835 if ( (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2836 && !pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit /* Intercepted. AMD-V would clear the high 32 bits of EIP & ESP. */)
2837 {
2838 pCtx->SysEnter.cs = pVmcbGuest->u64SysEnterCS;
2839 pCtx->SysEnter.eip = pVmcbGuest->u64SysEnterEIP;
2840 pCtx->SysEnter.esp = pVmcbGuest->u64SysEnterESP;
2841 }
2842
2843 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
2844 pCtx->msrKERNELGSBASE = pVmcbGuest->u64KernelGSBase;
2845
2846 if (fWhat & CPUMCTX_EXTRN_DR_MASK)
2847 {
2848 if (fWhat & CPUMCTX_EXTRN_DR6)
2849 {
2850 if (!pVCpu->hmr0.s.fUsingHyperDR7)
2851 pCtx->dr[6] = pVmcbGuest->u64DR6;
2852 else
2853 CPUMSetHyperDR6(pVCpu, pVmcbGuest->u64DR6);
2854 }
2855
2856 if (fWhat & CPUMCTX_EXTRN_DR7)
2857 {
2858 if (!pVCpu->hmr0.s.fUsingHyperDR7)
2859 pCtx->dr[7] = pVmcbGuest->u64DR7;
2860 else
2861 Assert(pVmcbGuest->u64DR7 == CPUMGetHyperDR7(pVCpu));
2862 }
2863 }
2864
2865 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
2866 {
2867 if (fWhat & CPUMCTX_EXTRN_CR0)
2868 {
2869 /* We intercept changes to all CR0 bits except maybe TS & MP bits. */
2870 uint64_t const uCr0 = (pCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP))
2871 | (pVmcbGuest->u64CR0 & (X86_CR0_TS | X86_CR0_MP));
2872 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */
2873 CPUMSetGuestCR0(pVCpu, uCr0);
2874 VMMRZCallRing3Enable(pVCpu);
2875 }
2876
2877 if (fWhat & CPUMCTX_EXTRN_CR2)
2878 pCtx->cr2 = pVmcbGuest->u64CR2;
2879
2880 if (fWhat & CPUMCTX_EXTRN_CR3)
2881 {
2882 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
2883 && pCtx->cr3 != pVmcbGuest->u64CR3)
2884 {
2885 CPUMSetGuestCR3(pVCpu, pVmcbGuest->u64CR3);
2886 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2887 }
2888 }
2889
2890 /* Changes to CR4 are always intercepted. */
2891 }
2892
2893 /* Update fExtrn. */
2894 pCtx->fExtrn &= ~fWhat;
2895
2896 /* If everything has been imported, clear the HM keeper bit. */
2897 if (!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL))
2898 {
2899 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
2900 Assert(!pCtx->fExtrn);
2901 }
2902 }
2903 else
2904 Assert(!pCtx->fExtrn || (pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
2905
2906 ASMSetFlags(fEFlags);
2907
2908 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x);
2909
2910 /*
2911 * Honor any pending CR3 updates.
2912 *
2913 * Consider this scenario: #VMEXIT -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp
2914 * -> SVMR0CallRing3Callback() -> VMMRZCallRing3Disable() -> hmR0SvmImportGuestState()
2915 * -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp -> continue with #VMEXIT
2916 * handling -> hmR0SvmImportGuestState() and here we are.
2917 *
2918 * The reason for such complicated handling is because VM-exits that call into PGM expect
2919 * CR3 to be up-to-date and thus any CR3-saves -before- the VM-exit (longjmp) would've
2920 * postponed the CR3 update via the force-flag and cleared CR3 from fExtrn. Any SVM R0
2921 * VM-exit handler that requests CR3 to be saved will end up here and we call PGMUpdateCR3().
2922 *
2923 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again,
2924 * and does not process force-flag like regular exits to ring-3 either, we cover for it here.
2925 */
2926 if ( VMMRZCallRing3IsEnabled(pVCpu)
2927 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
2928 {
2929 AssertMsg(pCtx->cr3 == pVmcbGuest->u64CR3, ("cr3=%#RX64 vmcb_cr3=%#RX64\n", pCtx->cr3, pVmcbGuest->u64CR3));
2930 PGMUpdateCR3(pVCpu, pCtx->cr3);
2931 }
2932}
2933
2934
2935/**
2936 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU
2937 * context.
2938 *
2939 * Currently there is no residual state left in the CPU that is not updated in the
2940 * VMCB.
2941 *
2942 * @returns VBox status code.
2943 * @param pVCpu The cross context virtual CPU structure.
2944 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2945 */
2946VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2947{
2948 hmR0SvmImportGuestState(pVCpu, fWhat);
2949 return VINF_SUCCESS;
2950}
2951
2952
2953/**
2954 * Does the necessary state syncing before returning to ring-3 for any reason
2955 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
2956 *
2957 * @param pVCpu The cross context virtual CPU structure.
2958 * @param fImportState Whether to import the guest state from the VMCB back
2959 * to the guest-CPU context.
2960 *
2961 * @remarks No-long-jmp zone!!!
2962 */
2963static void hmR0SvmLeave(PVMCPUCC pVCpu, bool fImportState)
2964{
2965 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2966 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2967 Assert(VMMR0IsLogFlushDisabled(pVCpu));
2968
2969 /*
2970 * !!! IMPORTANT !!!
2971 * If you modify code here, make sure to check whether SVMR0CallRing3Callback() needs to be updated too.
2972 */
2973
2974 /* Save the guest state if necessary. */
2975 if (fImportState)
2976 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
2977
2978 /* Restore host FPU state if necessary and resync on next R0 reentry. */
2979 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
2980 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
2981
2982 /*
2983 * Restore host debug registers if necessary and resync on next R0 reentry.
2984 */
2985#ifdef VBOX_STRICT
2986 if (CPUMIsHyperDebugStateActive(pVCpu))
2987 {
2988 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb; /** @todo nested-guest. */
2989 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
2990 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
2991 }
2992#endif
2993 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
2994 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
2995 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
2996
2997 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
2998 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
2999 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
3000 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
3001 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
3002 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitVmentry);
3003 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
3004
3005 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
3006}
3007
3008
3009/**
3010 * Leaves the AMD-V session.
3011 *
3012 * Only used while returning to ring-3 either due to longjump or exits to
3013 * ring-3.
3014 *
3015 * @returns VBox status code.
3016 * @param pVCpu The cross context virtual CPU structure.
3017 */
3018static int hmR0SvmLeaveSession(PVMCPUCC pVCpu)
3019{
3020 HM_DISABLE_PREEMPT(pVCpu);
3021 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3022 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3023
3024 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
3025 and done this from the SVMR0ThreadCtxCallback(). */
3026 if (!pVCpu->hmr0.s.fLeaveDone)
3027 {
3028 hmR0SvmLeave(pVCpu, true /* fImportState */);
3029 pVCpu->hmr0.s.fLeaveDone = true;
3030 }
3031
3032 /*
3033 * !!! IMPORTANT !!!
3034 * If you modify code here, make sure to check whether SVMR0CallRing3Callback() needs to be updated too.
3035 */
3036
3037 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
3038 /* Deregister hook now that we've left HM context before re-enabling preemption. */
3039 VMMR0ThreadCtxHookDisable(pVCpu);
3040
3041 /* Leave HM context. This takes care of local init (term). */
3042 int rc = HMR0LeaveCpu(pVCpu);
3043
3044 HM_RESTORE_PREEMPT();
3045 return rc;
3046}
3047
3048
3049/**
3050 * Does the necessary state syncing before doing a longjmp to ring-3.
3051 *
3052 * @returns VBox status code.
3053 * @param pVCpu The cross context virtual CPU structure.
3054 *
3055 * @remarks No-long-jmp zone!!!
3056 */
3057static int hmR0SvmLongJmpToRing3(PVMCPUCC pVCpu)
3058{
3059 return hmR0SvmLeaveSession(pVCpu);
3060}
3061
3062
3063/**
3064 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
3065 * any remaining host state) before we longjump to ring-3 and possibly get
3066 * preempted.
3067 *
3068 * @param pVCpu The cross context virtual CPU structure.
3069 * @param enmOperation The operation causing the ring-3 longjump.
3070 */
3071VMMR0DECL(int) SVMR0CallRing3Callback(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation)
3072{
3073 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
3074 {
3075 /*
3076 * !!! IMPORTANT !!!
3077 * If you modify code here, make sure to check whether hmR0SvmLeave() and hmR0SvmLeaveSession() needs
3078 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
3079 */
3080 VMMRZCallRing3RemoveNotification(pVCpu);
3081 VMMRZCallRing3Disable(pVCpu);
3082 HM_DISABLE_PREEMPT(pVCpu);
3083
3084 /* Import the entire guest state. */
3085 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3086
3087 /* Restore host FPU state if necessary and resync on next R0 reentry. */
3088 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
3089
3090 /* Restore host debug registers if necessary and resync on next R0 reentry. */
3091 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
3092
3093 /* Deregister the hook now that we've left HM context before re-enabling preemption. */
3094 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
3095 VMMR0ThreadCtxHookDisable(pVCpu);
3096
3097 /* Leave HM context. This takes care of local init (term). */
3098 HMR0LeaveCpu(pVCpu);
3099
3100 HM_RESTORE_PREEMPT();
3101 return VINF_SUCCESS;
3102 }
3103
3104 Assert(pVCpu);
3105 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3106 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
3107
3108 VMMRZCallRing3Disable(pVCpu);
3109 Assert(VMMR0IsLogFlushDisabled(pVCpu));
3110
3111 Log4Func(("Calling hmR0SvmLongJmpToRing3\n"));
3112 int rc = hmR0SvmLongJmpToRing3(pVCpu);
3113 AssertRCReturn(rc, rc);
3114
3115 VMMRZCallRing3Enable(pVCpu);
3116 return VINF_SUCCESS;
3117}
3118
3119
3120/**
3121 * Take necessary actions before going back to ring-3.
3122 *
3123 * An action requires us to go back to ring-3. This function does the necessary
3124 * steps before we can safely return to ring-3. This is not the same as longjmps
3125 * to ring-3, this is voluntary.
3126 *
3127 * @returns Strict VBox status code.
3128 * @param pVCpu The cross context virtual CPU structure.
3129 * @param rcExit The reason for exiting to ring-3. Can be
3130 * VINF_VMM_UNKNOWN_RING3_CALL.
3131 */
3132static VBOXSTRICTRC hmR0SvmExitToRing3(PVMCPUCC pVCpu, VBOXSTRICTRC rcExit)
3133{
3134 Assert(pVCpu);
3135 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
3136
3137 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
3138 VMMRZCallRing3Disable(pVCpu);
3139 Log4Func(("rcExit=%d LocalFF=%#RX64 GlobalFF=%#RX32\n", VBOXSTRICTRC_VAL(rcExit), (uint64_t)pVCpu->fLocalForcedActions,
3140 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions));
3141
3142 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
3143 if (pVCpu->hm.s.Event.fPending)
3144 {
3145 hmR0SvmPendingEventToTrpmTrap(pVCpu);
3146 Assert(!pVCpu->hm.s.Event.fPending);
3147 }
3148
3149 /* Sync. the necessary state for going back to ring-3. */
3150 hmR0SvmLeaveSession(pVCpu);
3151 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
3152
3153 /* Thread-context hooks are unregistered at this point!!! */
3154 /* Ring-3 callback notifications are unregistered at this point!!! */
3155
3156 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
3157 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
3158 | CPUM_CHANGED_LDTR
3159 | CPUM_CHANGED_GDTR
3160 | CPUM_CHANGED_IDTR
3161 | CPUM_CHANGED_TR
3162 | CPUM_CHANGED_HIDDEN_SEL_REGS);
3163 if ( pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging
3164 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
3165 {
3166 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
3167 }
3168
3169 /* Update the exit-to-ring 3 reason. */
3170 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
3171
3172 /* On our way back from ring-3, reload the guest-CPU state if it may change while in ring-3. */
3173 if ( rcExit != VINF_EM_RAW_INTERRUPT
3174 || CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
3175 {
3176 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
3177 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
3178 }
3179
3180 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
3181 VMMRZCallRing3Enable(pVCpu);
3182
3183 /*
3184 * If we're emulating an instruction, we shouldn't have any TRPM traps pending
3185 * and if we're injecting an event we should have a TRPM trap pending.
3186 */
3187 AssertReturnStmt(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu),
3188 pVCpu->hm.s.u32HMError = VBOXSTRICTRC_VAL(rcExit),
3189 VERR_SVM_IPE_5);
3190 AssertReturnStmt(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu),
3191 pVCpu->hm.s.u32HMError = VBOXSTRICTRC_VAL(rcExit),
3192 VERR_SVM_IPE_4);
3193
3194 return rcExit;
3195}
3196
3197
3198/**
3199 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
3200 * intercepts.
3201 *
3202 * @param pVCpu The cross context virtual CPU structure.
3203 * @param pVmcb Pointer to the VM control block.
3204 *
3205 * @remarks No-long-jump zone!!!
3206 */
3207static void hmR0SvmUpdateTscOffsetting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3208{
3209 /*
3210 * Avoid intercepting RDTSC/RDTSCP if we determined the host TSC (++) is stable
3211 * and in case of a nested-guest, if the nested-VMCB specifies it is not intercepting
3212 * RDTSC/RDTSCP as well.
3213 */
3214 bool fParavirtTsc;
3215 uint64_t uTscOffset;
3216 bool const fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu->CTX_SUFF(pVM), pVCpu, &uTscOffset, &fParavirtTsc);
3217
3218 bool fIntercept;
3219 if (fCanUseRealTsc)
3220 fIntercept = hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
3221 else
3222 {
3223 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP);
3224 fIntercept = true;
3225 }
3226
3227 if (!fIntercept)
3228 {
3229#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3230 /* Apply the nested-guest VMCB's TSC offset over the guest TSC offset. */
3231 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
3232 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
3233#endif
3234
3235 /* Update the TSC offset in the VMCB and the relevant clean bits. */
3236 pVmcb->ctrl.u64TSCOffset = uTscOffset;
3237 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
3238 }
3239
3240 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
3241 information before every VM-entry, hence we have nothing to do here at the moment. */
3242 if (fParavirtTsc)
3243 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
3244}
3245
3246
3247/**
3248 * Sets an event as a pending event to be injected into the guest.
3249 *
3250 * @param pVCpu The cross context virtual CPU structure.
3251 * @param pEvent Pointer to the SVM event.
3252 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
3253 * page-fault.
3254 *
3255 * @remarks Statistics counter assumes this is a guest event being reflected to
3256 * the guest i.e. 'StatInjectPendingReflect' is incremented always.
3257 */
3258DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPUCC pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
3259{
3260 Assert(!pVCpu->hm.s.Event.fPending);
3261 Assert(pEvent->n.u1Valid);
3262
3263 pVCpu->hm.s.Event.u64IntInfo = pEvent->u;
3264 pVCpu->hm.s.Event.fPending = true;
3265 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
3266
3267 Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
3268 (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
3269}
3270
3271
3272/**
3273 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
3274 *
3275 * @param pVCpu The cross context virtual CPU structure.
3276 */
3277DECLINLINE(void) hmR0SvmSetPendingXcptUD(PVMCPUCC pVCpu)
3278{
3279 SVMEVENT Event;
3280 Event.u = 0;
3281 Event.n.u1Valid = 1;
3282 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3283 Event.n.u8Vector = X86_XCPT_UD;
3284 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3285}
3286
3287
3288/**
3289 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
3290 *
3291 * @param pVCpu The cross context virtual CPU structure.
3292 */
3293DECLINLINE(void) hmR0SvmSetPendingXcptDB(PVMCPUCC pVCpu)
3294{
3295 SVMEVENT Event;
3296 Event.u = 0;
3297 Event.n.u1Valid = 1;
3298 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3299 Event.n.u8Vector = X86_XCPT_DB;
3300 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3301}
3302
3303
3304/**
3305 * Sets a page fault (\#PF) exception as pending-for-injection into the VM.
3306 *
3307 * @param pVCpu The cross context virtual CPU structure.
3308 * @param u32ErrCode The error-code for the page-fault.
3309 * @param uFaultAddress The page fault address (CR2).
3310 *
3311 * @remarks This updates the guest CR2 with @a uFaultAddress!
3312 */
3313DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPUCC pVCpu, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)
3314{
3315 SVMEVENT Event;
3316 Event.u = 0;
3317 Event.n.u1Valid = 1;
3318 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3319 Event.n.u8Vector = X86_XCPT_PF;
3320 Event.n.u1ErrorCodeValid = 1;
3321 Event.n.u32ErrorCode = u32ErrCode;
3322
3323 /* Update CR2 of the guest. */
3324 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR2);
3325 if (pVCpu->cpum.GstCtx.cr2 != uFaultAddress)
3326 {
3327 pVCpu->cpum.GstCtx.cr2 = uFaultAddress;
3328 /* The VMCB clean bit for CR2 will be updated while re-loading the guest state. */
3329 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2);
3330 }
3331
3332 hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress);
3333}
3334
3335
3336/**
3337 * Sets a math-fault (\#MF) exception as pending-for-injection into the VM.
3338 *
3339 * @param pVCpu The cross context virtual CPU structure.
3340 */
3341DECLINLINE(void) hmR0SvmSetPendingXcptMF(PVMCPUCC pVCpu)
3342{
3343 SVMEVENT Event;
3344 Event.u = 0;
3345 Event.n.u1Valid = 1;
3346 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3347 Event.n.u8Vector = X86_XCPT_MF;
3348 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3349}
3350
3351
3352/**
3353 * Sets a double fault (\#DF) exception as pending-for-injection into the VM.
3354 *
3355 * @param pVCpu The cross context virtual CPU structure.
3356 */
3357DECLINLINE(void) hmR0SvmSetPendingXcptDF(PVMCPUCC pVCpu)
3358{
3359 SVMEVENT Event;
3360 Event.u = 0;
3361 Event.n.u1Valid = 1;
3362 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3363 Event.n.u8Vector = X86_XCPT_DF;
3364 Event.n.u1ErrorCodeValid = 1;
3365 Event.n.u32ErrorCode = 0;
3366 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3367}
3368
3369
3370/**
3371 * Injects an event into the guest upon VMRUN by updating the relevant field
3372 * in the VMCB.
3373 *
3374 * @param pVCpu The cross context virtual CPU structure.
3375 * @param pVmcb Pointer to the guest VM control block.
3376 * @param pEvent Pointer to the event.
3377 *
3378 * @remarks No-long-jump zone!!!
3379 * @remarks Requires CR0!
3380 */
3381DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPUCC pVCpu, PSVMVMCB pVmcb, PSVMEVENT pEvent)
3382{
3383 Assert(!pVmcb->ctrl.EventInject.n.u1Valid);
3384 pVmcb->ctrl.EventInject.u = pEvent->u;
3385 if ( pVmcb->ctrl.EventInject.n.u3Type == SVM_EVENT_EXCEPTION
3386 || pVmcb->ctrl.EventInject.n.u3Type == SVM_EVENT_NMI)
3387 {
3388 Assert(pEvent->n.u8Vector <= X86_XCPT_LAST);
3389 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedXcptsR0[pEvent->n.u8Vector]);
3390 }
3391 else
3392 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
3393 RT_NOREF(pVCpu);
3394
3395 Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
3396 (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
3397}
3398
3399
3400
3401/**
3402 * Converts any TRPM trap into a pending HM event. This is typically used when
3403 * entering from ring-3 (not longjmp returns).
3404 *
3405 * @param pVCpu The cross context virtual CPU structure.
3406 */
3407static void hmR0SvmTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
3408{
3409 Assert(TRPMHasTrap(pVCpu));
3410 Assert(!pVCpu->hm.s.Event.fPending);
3411
3412 uint8_t uVector;
3413 TRPMEVENT enmTrpmEvent;
3414 uint32_t uErrCode;
3415 RTGCUINTPTR GCPtrFaultAddress;
3416 uint8_t cbInstr;
3417
3418 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, NULL /* pfIcebp */);
3419 AssertRC(rc);
3420
3421 SVMEVENT Event;
3422 Event.u = 0;
3423 Event.n.u1Valid = 1;
3424 Event.n.u8Vector = uVector;
3425
3426 /* Refer AMD spec. 15.20 "Event Injection" for the format. */
3427 if (enmTrpmEvent == TRPM_TRAP)
3428 {
3429 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3430 switch (uVector)
3431 {
3432 case X86_XCPT_NMI:
3433 {
3434 Event.n.u3Type = SVM_EVENT_NMI;
3435 break;
3436 }
3437
3438 case X86_XCPT_BP:
3439 case X86_XCPT_OF:
3440 AssertMsgFailed(("Invalid TRPM vector %d for event type %d\n", uVector, enmTrpmEvent));
3441 RT_FALL_THRU();
3442
3443 case X86_XCPT_PF:
3444 case X86_XCPT_DF:
3445 case X86_XCPT_TS:
3446 case X86_XCPT_NP:
3447 case X86_XCPT_SS:
3448 case X86_XCPT_GP:
3449 case X86_XCPT_AC:
3450 {
3451 Event.n.u1ErrorCodeValid = 1;
3452 Event.n.u32ErrorCode = uErrCode;
3453 break;
3454 }
3455 }
3456 }
3457 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
3458 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3459 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
3460 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
3461 else
3462 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
3463
3464 rc = TRPMResetTrap(pVCpu);
3465 AssertRC(rc);
3466
3467 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
3468 !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode));
3469
3470 hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress);
3471}
3472
3473
3474/**
3475 * Converts any pending SVM event into a TRPM trap. Typically used when leaving
3476 * AMD-V to execute any instruction.
3477 *
3478 * @param pVCpu The cross context virtual CPU structure.
3479 */
3480static void hmR0SvmPendingEventToTrpmTrap(PVMCPUCC pVCpu)
3481{
3482 Assert(pVCpu->hm.s.Event.fPending);
3483 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
3484
3485 SVMEVENT Event;
3486 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3487
3488 uint8_t uVector = Event.n.u8Vector;
3489 TRPMEVENT enmTrapType = HMSvmEventToTrpmEventType(&Event, uVector);
3490
3491 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, Event.n.u3Type));
3492
3493 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
3494 AssertRC(rc);
3495
3496 if (Event.n.u1ErrorCodeValid)
3497 TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode);
3498
3499 if ( enmTrapType == TRPM_TRAP
3500 && uVector == X86_XCPT_PF)
3501 {
3502 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
3503 Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
3504 }
3505 else if (enmTrapType == TRPM_SOFTWARE_INT)
3506 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
3507 pVCpu->hm.s.Event.fPending = false;
3508}
3509
3510
3511/**
3512 * Checks if the guest (or nested-guest) has an interrupt shadow active right
3513 * now.
3514 *
3515 * @returns @c true if the interrupt shadow is active, @c false otherwise.
3516 * @param pVCpu The cross context virtual CPU structure.
3517 *
3518 * @remarks No-long-jump zone!!!
3519 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
3520 */
3521static bool hmR0SvmIsIntrShadowActive(PVMCPUCC pVCpu)
3522{
3523 /*
3524 * Instructions like STI and MOV SS inhibit interrupts till the next instruction
3525 * completes. Check if we should inhibit interrupts or clear any existing
3526 * interrupt inhibition.
3527 */
3528 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3529 {
3530 if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
3531 {
3532 /*
3533 * We can clear the inhibit force flag as even if we go back to the recompiler
3534 * without executing guest code in AMD-V, the flag's condition to be cleared is
3535 * met and thus the cleared state is correct.
3536 */
3537 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3538 return false;
3539 }
3540 return true;
3541 }
3542 return false;
3543}
3544
3545
3546/**
3547 * Sets the virtual interrupt intercept control in the VMCB.
3548 *
3549 * @param pVCpu The cross context virtual CPU structure.
3550 * @param pVmcb Pointer to the VM control block.
3551 */
3552static void hmR0SvmSetIntWindowExiting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3553{
3554 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); NOREF(pVCpu);
3555
3556 /*
3557 * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when the guest
3558 * is ready to accept interrupts. At #VMEXIT, we then get the interrupt from the APIC
3559 * (updating ISR at the right time) and inject the interrupt.
3560 *
3561 * With AVIC is supported, we could make use of the asynchronously delivery without
3562 * #VMEXIT and we would be passing the AVIC page to SVM.
3563 *
3564 * In AMD-V, an interrupt window is achieved using a combination of V_IRQ (an interrupt
3565 * is pending), V_IGN_TPR (ignore TPR priorities) and the VINTR intercept all being set.
3566 */
3567 Assert(pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR);
3568 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
3569 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
3570 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_VINTR);
3571 Log4(("Set VINTR intercept\n"));
3572}
3573
3574
3575/**
3576 * Clears the virtual interrupt intercept control in the VMCB as
3577 * we are figured the guest is unable process any interrupts
3578 * at this point of time.
3579 *
3580 * @param pVCpu The cross context virtual CPU structure.
3581 * @param pVmcb Pointer to the VM control block.
3582 */
3583static void hmR0SvmClearIntWindowExiting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3584{
3585 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); NOREF(pVCpu);
3586
3587 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
3588 if ( pVmcbCtrl->IntCtrl.n.u1VIrqPending
3589 || (pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
3590 {
3591 pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
3592 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
3593 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_VINTR);
3594 Log4(("Cleared VINTR intercept\n"));
3595 }
3596}
3597
3598
3599/**
3600 * Evaluates the event to be delivered to the guest and sets it as the pending
3601 * event.
3602 *
3603 * @returns Strict VBox status code.
3604 * @param pVCpu The cross context virtual CPU structure.
3605 * @param pSvmTransient Pointer to the SVM transient structure.
3606 */
3607static VBOXSTRICTRC hmR0SvmEvaluatePendingEvent(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient)
3608{
3609 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3610 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT
3611 | CPUMCTX_EXTRN_RFLAGS
3612 | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW
3613 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ);
3614
3615 Assert(!pVCpu->hm.s.Event.fPending);
3616 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
3617 Assert(pVmcb);
3618
3619 bool const fGif = CPUMGetGuestGif(pCtx);
3620 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);
3621 bool const fBlockNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3622
3623 Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntPending=%RTbool fNmiPending=%RTbool\n",
3624 fGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
3625 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
3626
3627 /** @todo SMI. SMIs take priority over NMIs. */
3628
3629 /*
3630 * Check if the guest or nested-guest can receive NMIs.
3631 * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
3632 * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
3633 */
3634 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
3635 && !fBlockNmi)
3636 {
3637 if ( fGif
3638 && !fIntShadow)
3639 {
3640#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3641 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI))
3642 {
3643 Log4(("Intercepting NMI -> #VMEXIT\n"));
3644 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3645 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
3646 }
3647#endif
3648 Log4(("Setting NMI pending for injection\n"));
3649 SVMEVENT Event;
3650 Event.u = 0;
3651 Event.n.u1Valid = 1;
3652 Event.n.u8Vector = X86_XCPT_NMI;
3653 Event.n.u3Type = SVM_EVENT_NMI;
3654 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3655 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3656 }
3657 else if (!fGif)
3658 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
3659 else if (!pSvmTransient->fIsNestedGuest)
3660 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
3661 /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCB controls. */
3662 }
3663 /*
3664 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt()
3665 * returns a valid interrupt we -must- deliver the interrupt. We can no longer re-request
3666 * it from the APIC device.
3667 *
3668 * For nested-guests, physical interrupts always take priority over virtual interrupts.
3669 * We don't need to inject nested-guest virtual interrupts here, we can let the hardware
3670 * do that work when we execute nested-guest code esp. since all the required information
3671 * is in the VMCB, unlike physical interrupts where we need to fetch the interrupt from
3672 * the virtual interrupt controller.
3673 *
3674 * See AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
3675 */
3676 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
3677 && !pVCpu->hm.s.fSingleInstruction)
3678 {
3679 bool const fBlockInt = !pSvmTransient->fIsNestedGuest ? !(pCtx->eflags.u32 & X86_EFL_IF)
3680 : CPUMIsGuestSvmPhysIntrEnabled(pVCpu, pCtx);
3681 if ( fGif
3682 && !fBlockInt
3683 && !fIntShadow)
3684 {
3685#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3686 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
3687 {
3688 Log4(("Intercepting INTR -> #VMEXIT\n"));
3689 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3690 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
3691 }
3692#endif
3693 uint8_t u8Interrupt;
3694 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
3695 if (RT_SUCCESS(rc))
3696 {
3697 Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
3698 SVMEVENT Event;
3699 Event.u = 0;
3700 Event.n.u1Valid = 1;
3701 Event.n.u8Vector = u8Interrupt;
3702 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
3703 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3704 }
3705 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
3706 {
3707 /*
3708 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
3709 * updated eventually when the TPR is written by the guest.
3710 */
3711 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
3712 }
3713 else
3714 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
3715 }
3716 else if (!fGif)
3717 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
3718 else if (!pSvmTransient->fIsNestedGuest)
3719 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
3720 /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCB controls. */
3721 }
3722
3723 return VINF_SUCCESS;
3724}
3725
3726
3727/**
3728 * Injects any pending events into the guest (or nested-guest).
3729 *
3730 * @param pVCpu The cross context virtual CPU structure.
3731 * @param pVmcb Pointer to the VM control block.
3732 *
3733 * @remarks Must only be called when we are guaranteed to enter
3734 * hardware-assisted SVM execution and not return to ring-3
3735 * prematurely.
3736 */
3737static void hmR0SvmInjectPendingEvent(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
3738{
3739 Assert(!TRPMHasTrap(pVCpu));
3740 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
3741
3742 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);
3743#ifdef VBOX_STRICT
3744 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3745 bool const fGif = CPUMGetGuestGif(pCtx);
3746 bool fAllowInt = fGif;
3747 if (fGif)
3748 {
3749 /*
3750 * For nested-guests we have no way to determine if we're injecting a physical or
3751 * virtual interrupt at this point. Hence the partial verification below.
3752 */
3753 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
3754 fAllowInt = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, pCtx) || CPUMIsGuestSvmVirtIntrEnabled(pVCpu, pCtx);
3755 else
3756 fAllowInt = RT_BOOL(pCtx->eflags.u32 & X86_EFL_IF);
3757 }
3758#endif
3759
3760 if (pVCpu->hm.s.Event.fPending)
3761 {
3762 SVMEVENT Event;
3763 Event.u = pVCpu->hm.s.Event.u64IntInfo;
3764 Assert(Event.n.u1Valid);
3765
3766 /*
3767 * Validate event injection pre-conditions.
3768 */
3769 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
3770 {
3771 Assert(fAllowInt);
3772 Assert(!fIntShadow);
3773 }
3774 else if (Event.n.u3Type == SVM_EVENT_NMI)
3775 {
3776 Assert(fGif);
3777 Assert(!fIntShadow);
3778 }
3779
3780 /*
3781 * Before injecting an NMI we must set VMCPU_FF_BLOCK_NMIS to prevent nested NMIs. We
3782 * do this only when we are surely going to inject the NMI as otherwise if we return
3783 * to ring-3 prematurely we could leave NMIs blocked indefinitely upon re-entry into
3784 * SVM R0.
3785 *
3786 * With VT-x, this is handled by the Guest interruptibility information VMCS field
3787 * which will set the VMCS field after actually delivering the NMI which we read on
3788 * VM-exit to determine the state.
3789 */
3790 if ( Event.n.u3Type == SVM_EVENT_NMI
3791 && Event.n.u8Vector == X86_XCPT_NMI
3792 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3793 {
3794 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3795 }
3796
3797 /*
3798 * Inject it (update VMCB for injection by the hardware).
3799 */
3800 Log4(("Injecting pending HM event\n"));
3801 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, &Event);
3802 pVCpu->hm.s.Event.fPending = false;
3803
3804 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
3805 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
3806 else
3807 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
3808 }
3809 else
3810 Assert(pVmcb->ctrl.EventInject.n.u1Valid == 0);
3811
3812 /*
3813 * We could have injected an NMI through IEM and continue guest execution using
3814 * hardware-assisted SVM. In which case, we would not have any events pending (above)
3815 * but we still need to intercept IRET in order to eventually clear NMI inhibition.
3816 */
3817 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3818 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET);
3819
3820 /*
3821 * Update the guest interrupt shadow in the guest (or nested-guest) VMCB.
3822 *
3823 * For nested-guests: We need to update it too for the scenario where IEM executes
3824 * the nested-guest but execution later continues here with an interrupt shadow active.
3825 */
3826 pVmcb->ctrl.IntShadow.n.u1IntShadow = fIntShadow;
3827}
3828
3829
3830/**
3831 * Reports world-switch error and dumps some useful debug info.
3832 *
3833 * @param pVCpu The cross context virtual CPU structure.
3834 * @param rcVMRun The return code from VMRUN (or
3835 * VERR_SVM_INVALID_GUEST_STATE for invalid
3836 * guest-state).
3837 */
3838static void hmR0SvmReportWorldSwitchError(PVMCPUCC pVCpu, int rcVMRun)
3839{
3840 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
3841 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
3842 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
3843
3844 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
3845 {
3846#ifdef VBOX_STRICT
3847 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
3848 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
3849 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits));
3850 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx));
3851 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx));
3852 Log4(("ctrl.u16InterceptRdDRx %#x\n", pVmcb->ctrl.u16InterceptRdDRx));
3853 Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx));
3854 Log4(("ctrl.u32InterceptXcpt %#x\n", pVmcb->ctrl.u32InterceptXcpt));
3855 Log4(("ctrl.u64InterceptCtrl %#RX64\n", pVmcb->ctrl.u64InterceptCtrl));
3856 Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr));
3857 Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr));
3858 Log4(("ctrl.u64TSCOffset %#RX64\n", pVmcb->ctrl.u64TSCOffset));
3859
3860 Log4(("ctrl.TLBCtrl.u32ASID %#x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID));
3861 Log4(("ctrl.TLBCtrl.u8TLBFlush %#x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush));
3862 Log4(("ctrl.TLBCtrl.u24Reserved %#x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved));
3863
3864 Log4(("ctrl.IntCtrl.u8VTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR));
3865 Log4(("ctrl.IntCtrl.u1VIrqPending %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqPending));
3866 Log4(("ctrl.IntCtrl.u1VGif %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGif));
3867 Log4(("ctrl.IntCtrl.u6Reserved0 %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved));
3868 Log4(("ctrl.IntCtrl.u4VIntrPrio %#x\n", pVmcb->ctrl.IntCtrl.n.u4VIntrPrio));
3869 Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR));
3870 Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved));
3871 Log4(("ctrl.IntCtrl.u1VIntrMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIntrMasking));
3872 Log4(("ctrl.IntCtrl.u1VGifEnable %#x\n", pVmcb->ctrl.IntCtrl.n.u1VGifEnable));
3873 Log4(("ctrl.IntCtrl.u5Reserved1 %#x\n", pVmcb->ctrl.IntCtrl.n.u5Reserved));
3874 Log4(("ctrl.IntCtrl.u8VIntrVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIntrVector));
3875 Log4(("ctrl.IntCtrl.u24Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved));
3876
3877 Log4(("ctrl.IntShadow.u1IntShadow %#x\n", pVmcb->ctrl.IntShadow.n.u1IntShadow));
3878 Log4(("ctrl.IntShadow.u1GuestIntMask %#x\n", pVmcb->ctrl.IntShadow.n.u1GuestIntMask));
3879 Log4(("ctrl.u64ExitCode %#RX64\n", pVmcb->ctrl.u64ExitCode));
3880 Log4(("ctrl.u64ExitInfo1 %#RX64\n", pVmcb->ctrl.u64ExitInfo1));
3881 Log4(("ctrl.u64ExitInfo2 %#RX64\n", pVmcb->ctrl.u64ExitInfo2));
3882 Log4(("ctrl.ExitIntInfo.u8Vector %#x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector));
3883 Log4(("ctrl.ExitIntInfo.u3Type %#x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type));
3884 Log4(("ctrl.ExitIntInfo.u1ErrorCodeValid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
3885 Log4(("ctrl.ExitIntInfo.u19Reserved %#x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved));
3886 Log4(("ctrl.ExitIntInfo.u1Valid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid));
3887 Log4(("ctrl.ExitIntInfo.u32ErrorCode %#x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
3888 Log4(("ctrl.NestedPagingCtrl.u1NestedPaging %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging));
3889 Log4(("ctrl.NestedPagingCtrl.u1Sev %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1Sev));
3890 Log4(("ctrl.NestedPagingCtrl.u1SevEs %#x\n", pVmcb->ctrl.NestedPagingCtrl.n.u1SevEs));
3891 Log4(("ctrl.EventInject.u8Vector %#x\n", pVmcb->ctrl.EventInject.n.u8Vector));
3892 Log4(("ctrl.EventInject.u3Type %#x\n", pVmcb->ctrl.EventInject.n.u3Type));
3893 Log4(("ctrl.EventInject.u1ErrorCodeValid %#x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid));
3894 Log4(("ctrl.EventInject.u19Reserved %#x\n", pVmcb->ctrl.EventInject.n.u19Reserved));
3895 Log4(("ctrl.EventInject.u1Valid %#x\n", pVmcb->ctrl.EventInject.n.u1Valid));
3896 Log4(("ctrl.EventInject.u32ErrorCode %#x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode));
3897
3898 Log4(("ctrl.u64NestedPagingCR3 %#RX64\n", pVmcb->ctrl.u64NestedPagingCR3));
3899
3900 Log4(("ctrl.LbrVirt.u1LbrVirt %#x\n", pVmcb->ctrl.LbrVirt.n.u1LbrVirt));
3901 Log4(("ctrl.LbrVirt.u1VirtVmsaveVmload %#x\n", pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload));
3902
3903 Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel));
3904 Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr));
3905 Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit));
3906 Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base));
3907 Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel));
3908 Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr));
3909 Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit));
3910 Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base));
3911 Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel));
3912 Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr));
3913 Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit));
3914 Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base));
3915 Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel));
3916 Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr));
3917 Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit));
3918 Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base));
3919 Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel));
3920 Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr));
3921 Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit));
3922 Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base));
3923
3924 Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit));
3925 Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base));
3926
3927 Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel));
3928 Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr));
3929 Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit));
3930 Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base));
3931
3932 Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit));
3933 Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base));
3934
3935 Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel));
3936 Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr));
3937 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit));
3938 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base));
3939
3940 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL));
3941 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0));
3942 Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2));
3943 Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3));
3944 Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4));
3945 Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6));
3946 Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7));
3947
3948 Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP));
3949 Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP));
3950 Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX));
3951 Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags));
3952
3953 Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS));
3954 Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP));
3955 Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP));
3956
3957 Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER));
3958 Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR));
3959 Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR));
3960 Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR));
3961 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK));
3962 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase));
3963 Log4(("guest.u64PAT %#RX64\n", pVmcb->guest.u64PAT));
3964 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL));
3965 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM));
3966 Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO));
3967 Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM));
3968 Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO));
3969
3970 NOREF(pVmcb);
3971#endif /* VBOX_STRICT */
3972 }
3973 else
3974 Log4Func(("rcVMRun=%d\n", rcVMRun));
3975}
3976
3977
3978/**
3979 * Check per-VM and per-VCPU force flag actions that require us to go back to
3980 * ring-3 for one reason or another.
3981 *
3982 * @returns Strict VBox status code (information status code included).
3983 * @retval VINF_SUCCESS if we don't have any actions that require going back to
3984 * ring-3.
3985 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
3986 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
3987 * interrupts)
3988 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
3989 * all EMTs to be in ring-3.
3990 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
3991 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
3992 * to the EM loop.
3993 *
3994 * @param pVCpu The cross context virtual CPU structure.
3995 */
3996static VBOXSTRICTRC hmR0SvmCheckForceFlags(PVMCPUCC pVCpu)
3997{
3998 Assert(VMMRZCallRing3IsEnabled(pVCpu));
3999 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
4000
4001 /* Could happen as a result of longjump. */
4002 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
4003 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
4004
4005 /* Update pending interrupts into the APIC's IRR. */
4006 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4007 APICUpdatePendingInterrupts(pVCpu);
4008
4009 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4010 if ( VM_FF_IS_ANY_SET(pVM, !pVCpu->hm.s.fSingleInstruction
4011 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
4012 || VMCPU_FF_IS_ANY_SET(pVCpu, !pVCpu->hm.s.fSingleInstruction
4013 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4014 {
4015 /* Pending PGM C3 sync. */
4016 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
4017 {
4018 int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4,
4019 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
4020 if (rc != VINF_SUCCESS)
4021 {
4022 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
4023 return rc;
4024 }
4025 }
4026
4027 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
4028 /* -XXX- what was that about single stepping? */
4029 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
4030 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4031 {
4032 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
4033 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
4034 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
4035 return rc;
4036 }
4037
4038 /* Pending VM request packets, such as hardware interrupts. */
4039 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
4040 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
4041 {
4042 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchVmReq);
4043 Log4Func(("Pending VM request forcing us back to ring-3\n"));
4044 return VINF_EM_PENDING_REQUEST;
4045 }
4046
4047 /* Pending PGM pool flushes. */
4048 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
4049 {
4050 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPgmPoolFlush);
4051 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
4052 return VINF_PGM_POOL_FLUSH_PENDING;
4053 }
4054
4055 /* Pending DMA requests. */
4056 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
4057 {
4058 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchDma);
4059 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
4060 return VINF_EM_RAW_TO_R3;
4061 }
4062 }
4063
4064 return VINF_SUCCESS;
4065}
4066
4067
4068/**
4069 * Does the preparations before executing guest code in AMD-V.
4070 *
4071 * This may cause longjmps to ring-3 and may even result in rescheduling to the
4072 * recompiler. We must be cautious what we do here regarding committing
4073 * guest-state information into the VMCB assuming we assuredly execute the guest
4074 * in AMD-V. If we fall back to the recompiler after updating the VMCB and
4075 * clearing the common-state (TRPM/forceflags), we must undo those changes so
4076 * that the recompiler can (and should) use them when it resumes guest
4077 * execution. Otherwise such operations must be done when we can no longer
4078 * exit to ring-3.
4079 *
4080 * @returns Strict VBox status code (informational status codes included).
4081 * @retval VINF_SUCCESS if we can proceed with running the guest.
4082 * @retval VINF_* scheduling changes, we have to go back to ring-3.
4083 *
4084 * @param pVCpu The cross context virtual CPU structure.
4085 * @param pSvmTransient Pointer to the SVM transient structure.
4086 */
4087static VBOXSTRICTRC hmR0SvmPreRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4088{
4089 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
4090
4091#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
4092 if (pSvmTransient->fIsNestedGuest)
4093 {
4094 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
4095 return VINF_EM_RESCHEDULE_REM;
4096 }
4097#endif
4098
4099 /* Check force flag actions that might require us to go back to ring-3. */
4100 VBOXSTRICTRC rc = hmR0SvmCheckForceFlags(pVCpu);
4101 if (rc != VINF_SUCCESS)
4102 return rc;
4103
4104 if (TRPMHasTrap(pVCpu))
4105 hmR0SvmTrpmTrapToPendingEvent(pVCpu);
4106 else if (!pVCpu->hm.s.Event.fPending)
4107 {
4108 rc = hmR0SvmEvaluatePendingEvent(pVCpu, pSvmTransient);
4109 if ( rc != VINF_SUCCESS
4110 || pSvmTransient->fIsNestedGuest != CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
4111 {
4112 /* If a nested-guest VM-exit occurred, bail. */
4113 if (pSvmTransient->fIsNestedGuest)
4114 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
4115 return rc;
4116 }
4117 }
4118
4119 /*
4120 * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.
4121 * Just do it in software, see @bugref{8411}.
4122 * NB: If we could continue a task switch exit we wouldn't need to do this.
4123 */
4124 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4125 if (RT_UNLIKELY( !g_fHmSvmFeatures
4126 && pVCpu->hm.s.Event.fPending
4127 && SVM_EVENT_GET_TYPE(pVCpu->hm.s.Event.u64IntInfo) == SVM_EVENT_NMI))
4128 return VINF_EM_RAW_INJECT_TRPM_EVENT;
4129
4130#ifdef HMSVM_SYNC_FULL_GUEST_STATE
4131 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
4132 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
4133#endif
4134
4135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4136 /*
4137 * Set up the nested-guest VMCB for execution using hardware-assisted SVM.
4138 */
4139 if (pSvmTransient->fIsNestedGuest)
4140 hmR0SvmSetupVmcbNested(pVCpu);
4141#endif
4142
4143 /*
4144 * Export the guest state bits that are not shared with the host in any way as we can
4145 * longjmp or get preempted in the midst of exporting some of the state.
4146 */
4147 rc = hmR0SvmExportGuestState(pVCpu, pSvmTransient);
4148 AssertRCReturn(rc, rc);
4149 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
4150
4151 /* Ensure we've cached (and hopefully modified) the nested-guest VMCB for execution using hardware-assisted SVM. */
4152 Assert(!pSvmTransient->fIsNestedGuest || pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
4153
4154 /*
4155 * If we're not intercepting TPR changes in the guest, save the guest TPR before the
4156 * world-switch so we can update it on the way back if the guest changed the TPR.
4157 */
4158 if (pVCpu->hmr0.s.svm.fSyncVTpr)
4159 {
4160 Assert(!pSvmTransient->fIsNestedGuest);
4161 PCSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
4162 if (pVM->hm.s.fTprPatchingActive)
4163 pSvmTransient->u8GuestTpr = pVmcb->guest.u64LSTAR;
4164 else
4165 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
4166 }
4167
4168 /*
4169 * No longjmps to ring-3 from this point on!!!
4170 *
4171 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4172 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4173 */
4174 VMMRZCallRing3Disable(pVCpu);
4175
4176 /*
4177 * We disable interrupts so that we don't miss any interrupts that would flag preemption
4178 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
4179 * preemption disabled for a while. Since this is purly to aid the
4180 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
4181 * disable interrupt on NT.
4182 *
4183 * We need to check for force-flags that could've possible been altered since we last
4184 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
4185 * see @bugref{6398}).
4186 *
4187 * We also check a couple of other force-flags as a last opportunity to get the EMT back
4188 * to ring-3 before executing guest code.
4189 */
4190 pSvmTransient->fEFlags = ASMIntDisableFlags();
4191 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4192 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4193 {
4194 ASMSetFlags(pSvmTransient->fEFlags);
4195 VMMRZCallRing3Enable(pVCpu);
4196 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
4197 return VINF_EM_RAW_TO_R3;
4198 }
4199 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
4200 {
4201 ASMSetFlags(pSvmTransient->fEFlags);
4202 VMMRZCallRing3Enable(pVCpu);
4203 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);
4204 return VINF_EM_RAW_INTERRUPT;
4205 }
4206
4207 return VINF_SUCCESS;
4208}
4209
4210
4211/**
4212 * Prepares to run guest (or nested-guest) code in AMD-V and we've committed to
4213 * doing so.
4214 *
4215 * This means there is no backing out to ring-3 or anywhere else at this point.
4216 *
4217 * @param pVCpu The cross context virtual CPU structure.
4218 * @param pSvmTransient Pointer to the SVM transient structure.
4219 *
4220 * @remarks Called with preemption disabled.
4221 * @remarks No-long-jump zone!!!
4222 */
4223static void hmR0SvmPreRunGuestCommitted(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4224{
4225 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4226 Assert(VMMR0IsLogFlushDisabled(pVCpu));
4227 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4228
4229 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4230 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
4231
4232 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4233 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
4234
4235 hmR0SvmInjectPendingEvent(pVCpu, pVmcb);
4236
4237 if (!CPUMIsGuestFPUStateActive(pVCpu))
4238 {
4239 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
4240 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
4241 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
4242 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
4243 }
4244
4245 /* Load the state shared between host and guest (FPU, debug). */
4246 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)
4247 hmR0SvmExportSharedState(pVCpu, pVmcb);
4248
4249 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT; /* Preemption might set this, nothing to do on AMD-V. */
4250 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
4251
4252 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
4253 RTCPUID const idHostCpu = pHostCpu->idCpu;
4254 bool const fMigratedHostCpu = idHostCpu != pVCpu->hmr0.s.idLastCpu;
4255
4256 /* Setup TSC offsetting. */
4257 if ( pSvmTransient->fUpdateTscOffsetting
4258 || fMigratedHostCpu)
4259 {
4260 hmR0SvmUpdateTscOffsetting(pVCpu, pVmcb);
4261 pSvmTransient->fUpdateTscOffsetting = false;
4262 }
4263
4264 /* Record statistics of how often we use TSC offsetting as opposed to intercepting RDTSC/P. */
4265 if (!(pVmcb->ctrl.u64InterceptCtrl & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP)))
4266 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4267 else
4268 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4269
4270 /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
4271 if (fMigratedHostCpu)
4272 pVmcb->ctrl.u32VmcbCleanBits = 0;
4273
4274 /* Store status of the shared guest-host state at the time of VMRUN. */
4275 pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
4276 pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
4277
4278#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4279 uint8_t *pbMsrBitmap;
4280 if (!pSvmTransient->fIsNestedGuest)
4281 pbMsrBitmap = (uint8_t *)pVCpu->hmr0.s.svm.pvMsrBitmap;
4282 else
4283 {
4284 /** @todo We could perhaps optimize this by monitoring if the guest modifies its
4285 * MSRPM and only perform this if it changed also use EVEX.POR when it
4286 * does. */
4287 hmR0SvmMergeMsrpmNested(pHostCpu, pVCpu);
4288
4289 /* Update the nested-guest VMCB with the newly merged MSRPM (clean bits updated below). */
4290 pVmcb->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm;
4291 pbMsrBitmap = (uint8_t *)pHostCpu->n.svm.pvNstGstMsrpm;
4292 }
4293#else
4294 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
4295#endif
4296
4297 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
4298 /* Flush the appropriate tagged-TLB entries. */
4299 hmR0SvmFlushTaggedTlb(pHostCpu, pVCpu, pVmcb);
4300 Assert(pVCpu->hmr0.s.idLastCpu == idHostCpu);
4301
4302 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
4303
4304 TMNotifyStartOfExecution(pVM, pVCpu); /* Finally, notify TM to resume its clocks as we're about
4305 to start executing. */
4306
4307 /*
4308 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that RDTSCPs
4309 * (that don't cause exits) reads the guest MSR, see @bugref{3324}.
4310 *
4311 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
4312 */
4313 if ( pVM->cpum.ro.HostFeatures.fRdTscP
4314 && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
4315 {
4316 uint64_t const uGuestTscAux = CPUMGetGuestTscAux(pVCpu);
4317 pVCpu->hmr0.s.svm.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
4318 if (uGuestTscAux != pVCpu->hmr0.s.svm.u64HostTscAux)
4319 ASMWrMsr(MSR_K8_TSC_AUX, uGuestTscAux);
4320 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
4321 pSvmTransient->fRestoreTscAuxMsr = true;
4322 }
4323 else
4324 {
4325 hmR0SvmSetMsrPermission(pVCpu, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
4326 pSvmTransient->fRestoreTscAuxMsr = false;
4327 }
4328 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
4329
4330 /*
4331 * If VMCB Clean bits isn't supported by the CPU or exposed to the guest in the nested
4332 * virtualization case, mark all state-bits as dirty indicating to the CPU to re-load
4333 * from the VMCB.
4334 */
4335 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pSvmTransient->fIsNestedGuest);
4336 if (!fSupportsVmcbCleanBits)
4337 pVmcb->ctrl.u32VmcbCleanBits = 0;
4338}
4339
4340
4341/**
4342 * Wrapper for running the guest (or nested-guest) code in AMD-V.
4343 *
4344 * @returns VBox strict status code.
4345 * @param pVCpu The cross context virtual CPU structure.
4346 * @param HCPhysVmcb The host physical address of the VMCB.
4347 *
4348 * @remarks No-long-jump zone!!!
4349 */
4350DECLINLINE(int) hmR0SvmRunGuest(PVMCPUCC pVCpu, RTHCPHYS HCPhysVmcb)
4351{
4352 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
4353 pVCpu->cpum.GstCtx.fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
4354 return pVCpu->hmr0.s.svm.pfnVMRun(pVCpu->CTX_SUFF(pVM), pVCpu, HCPhysVmcb);
4355}
4356
4357
4358/**
4359 * Performs some essential restoration of state after running guest (or
4360 * nested-guest) code in AMD-V.
4361 *
4362 * @param pVCpu The cross context virtual CPU structure.
4363 * @param pSvmTransient Pointer to the SVM transient structure.
4364 * @param rcVMRun Return code of VMRUN.
4365 *
4366 * @remarks Called with interrupts disabled.
4367 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
4368 * unconditionally when it is safe to do so.
4369 */
4370static void hmR0SvmPostRunGuest(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, VBOXSTRICTRC rcVMRun)
4371{
4372 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
4373
4374 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
4375 ASMAtomicIncU32(&pVCpu->hmr0.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
4376
4377 PSVMVMCB pVmcb = pSvmTransient->pVmcb;
4378 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
4379
4380 /* TSC read must be done early for maximum accuracy. */
4381 if (!(pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
4382 {
4383 if (!pSvmTransient->fIsNestedGuest)
4384 TMCpuTickSetLastSeen(pVCpu, pVCpu->hmr0.s.uTscExit + pVmcbCtrl->u64TSCOffset);
4385#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4386 else
4387 {
4388 /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMNotifySvmNstGstVmexit(). */
4389 uint64_t const uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, pVCpu->hmr0.s.uTscExit + pVmcbCtrl->u64TSCOffset);
4390 TMCpuTickSetLastSeen(pVCpu, uGstTsc);
4391 }
4392#endif
4393 }
4394
4395 if (pSvmTransient->fRestoreTscAuxMsr)
4396 {
4397 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
4398 CPUMSetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
4399 if (u64GuestTscAuxMsr != pVCpu->hmr0.s.svm.u64HostTscAux)
4400 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hmr0.s.svm.u64HostTscAux);
4401 }
4402
4403 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
4404 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4405 TMNotifyEndOfExecution(pVM, pVCpu, pVCpu->hmr0.s.uTscExit); /* Notify TM that the guest is no longer running. */
4406 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
4407
4408 Assert(!(ASMGetFlags() & X86_EFL_IF));
4409 ASMSetFlags(pSvmTransient->fEFlags); /* Enable interrupts. */
4410 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
4411
4412 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
4413 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
4414 {
4415 Log4Func(("VMRUN failure: rcVMRun=%Rrc\n", VBOXSTRICTRC_VAL(rcVMRun)));
4416 return;
4417 }
4418
4419 pSvmTransient->u64ExitCode = pVmcbCtrl->u64ExitCode; /* Save the #VMEXIT reason. */
4420 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
4421 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
4422 pVmcbCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
4423
4424#ifdef HMSVM_SYNC_FULL_GUEST_STATE
4425 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4426 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMSVM_CPUMCTX_EXTRN_ALL));
4427#else
4428 /*
4429 * Always import the following:
4430 *
4431 * - RIP for exit optimizations and evaluating event injection on re-entry.
4432 * - RFLAGS for evaluating event injection on VM re-entry and for exporting shared debug
4433 * state on preemption.
4434 * - Interrupt shadow, GIF for evaluating event injection on VM re-entry.
4435 * - CS for exit optimizations.
4436 * - RAX, RSP for simplifying assumptions on GPRs. All other GPRs are swapped by the
4437 * assembly switcher code.
4438 * - Shared state (only DR7 currently) for exporting shared debug state on preemption.
4439 */
4440 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP
4441 | CPUMCTX_EXTRN_RFLAGS
4442 | CPUMCTX_EXTRN_RAX
4443 | CPUMCTX_EXTRN_RSP
4444 | CPUMCTX_EXTRN_CS
4445 | CPUMCTX_EXTRN_HWVIRT
4446 | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW
4447 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ
4448 | HMSVM_CPUMCTX_SHARED_STATE);
4449#endif
4450
4451 if ( pSvmTransient->u64ExitCode != SVM_EXIT_INVALID
4452 && pVCpu->hmr0.s.svm.fSyncVTpr)
4453 {
4454 Assert(!pSvmTransient->fIsNestedGuest);
4455 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
4456 if ( pVM->hm.s.fTprPatchingActive
4457 && (pVmcb->guest.u64LSTAR & 0xff) != pSvmTransient->u8GuestTpr)
4458 {
4459 int rc = APICSetTpr(pVCpu, pVmcb->guest.u64LSTAR & 0xff);
4460 AssertRC(rc);
4461 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
4462 }
4463 /* Sync TPR when we aren't intercepting CR8 writes. */
4464 else if (pSvmTransient->u8GuestTpr != pVmcbCtrl->IntCtrl.n.u8VTPR)
4465 {
4466 int rc = APICSetTpr(pVCpu, pVmcbCtrl->IntCtrl.n.u8VTPR << 4);
4467 AssertRC(rc);
4468 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
4469 }
4470 }
4471
4472#ifdef DEBUG_ramshankar
4473 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
4474 {
4475 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4476 hmR0SvmLogState(pVCpu, pVmcb, pVCpu->cpum.GstCtx, "hmR0SvmPostRunGuestNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR,
4477 0 /* uVerbose */);
4478 }
4479#endif
4480
4481 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
4482 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_SVM, pSvmTransient->u64ExitCode & EMEXIT_F_TYPE_MASK),
4483 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, pVCpu->hmr0.s.uTscExit);
4484}
4485
4486
4487/**
4488 * Runs the guest code using AMD-V.
4489 *
4490 * @returns Strict VBox status code.
4491 * @param pVCpu The cross context virtual CPU structure.
4492 * @param pcLoops Pointer to the number of executed loops.
4493 */
4494static VBOXSTRICTRC hmR0SvmRunGuestCodeNormal(PVMCPUCC pVCpu, uint32_t *pcLoops)
4495{
4496 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
4497 Assert(pcLoops);
4498 Assert(*pcLoops <= cMaxResumeLoops);
4499
4500 SVMTRANSIENT SvmTransient;
4501 RT_ZERO(SvmTransient);
4502 SvmTransient.fUpdateTscOffsetting = true;
4503 SvmTransient.pVmcb = pVCpu->hmr0.s.svm.pVmcb;
4504
4505 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_5;
4506 for (;;)
4507 {
4508 Assert(!HMR0SuspendPending());
4509 HMSVM_ASSERT_CPU_SAFE(pVCpu);
4510
4511 /* Preparatory work for running nested-guest code, this may force us to return to
4512 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4513 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4514 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
4515 if (rc != VINF_SUCCESS)
4516 break;
4517
4518 /*
4519 * No longjmps to ring-3 from this point on!!!
4520 *
4521 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4522 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4523 */
4524 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
4525 rc = hmR0SvmRunGuest(pVCpu, pVCpu->hmr0.s.svm.HCPhysVmcb);
4526
4527 /* Restore any residual host-state and save any bits shared between host and guest
4528 into the guest-CPU state. Re-enables interrupts! */
4529 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
4530
4531 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
4532 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
4533 {
4534 if (rc == VINF_SUCCESS)
4535 rc = VERR_SVM_INVALID_GUEST_STATE;
4536 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
4537 hmR0SvmReportWorldSwitchError(pVCpu, VBOXSTRICTRC_VAL(rc));
4538 break;
4539 }
4540
4541 /* Handle the #VMEXIT. */
4542 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4543 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
4544 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, SvmTransient.u64ExitCode, pVCpu->hmr0.s.svm.pVmcb);
4545 rc = hmR0SvmHandleExit(pVCpu, &SvmTransient);
4546 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
4547 if (rc != VINF_SUCCESS)
4548 break;
4549 if (++(*pcLoops) >= cMaxResumeLoops)
4550 {
4551 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4552 rc = VINF_EM_RAW_INTERRUPT;
4553 break;
4554 }
4555 }
4556
4557 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4558 return rc;
4559}
4560
4561
4562/**
4563 * Runs the guest code using AMD-V in single step mode.
4564 *
4565 * @returns Strict VBox status code.
4566 * @param pVCpu The cross context virtual CPU structure.
4567 * @param pcLoops Pointer to the number of executed loops.
4568 */
4569static VBOXSTRICTRC hmR0SvmRunGuestCodeStep(PVMCPUCC pVCpu, uint32_t *pcLoops)
4570{
4571 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
4572 Assert(pcLoops);
4573 Assert(*pcLoops <= cMaxResumeLoops);
4574
4575 SVMTRANSIENT SvmTransient;
4576 RT_ZERO(SvmTransient);
4577 SvmTransient.fUpdateTscOffsetting = true;
4578 SvmTransient.pVmcb = pVCpu->hmr0.s.svm.pVmcb;
4579
4580 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4581 uint16_t const uCsStart = pCtx->cs.Sel;
4582 uint64_t const uRipStart = pCtx->rip;
4583
4584 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_5;
4585 for (;;)
4586 {
4587 Assert(!HMR0SuspendPending());
4588 AssertMsg(pVCpu->hmr0.s.idEnteredCpu == RTMpCpuId(),
4589 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hmr0.s.idEnteredCpu,
4590 (unsigned)RTMpCpuId(), *pcLoops));
4591
4592 /* Preparatory work for running nested-guest code, this may force us to return to
4593 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4594 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4595 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
4596 if (rc != VINF_SUCCESS)
4597 break;
4598
4599 /*
4600 * No longjmps to ring-3 from this point on!!!
4601 *
4602 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4603 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4604 */
4605 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
4606
4607 rc = hmR0SvmRunGuest(pVCpu, pVCpu->hmr0.s.svm.HCPhysVmcb);
4608
4609 /* Restore any residual host-state and save any bits shared between host and guest
4610 into the guest-CPU state. Re-enables interrupts! */
4611 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
4612
4613 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
4614 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
4615 {
4616 if (rc == VINF_SUCCESS)
4617 rc = VERR_SVM_INVALID_GUEST_STATE;
4618 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
4619 hmR0SvmReportWorldSwitchError(pVCpu, VBOXSTRICTRC_VAL(rc));
4620 return rc;
4621 }
4622
4623 /* Handle the #VMEXIT. */
4624 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4625 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
4626 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hmr0.s.svm.pVmcb);
4627 rc = hmR0SvmHandleExit(pVCpu, &SvmTransient);
4628 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
4629 if (rc != VINF_SUCCESS)
4630 break;
4631 if (++(*pcLoops) >= cMaxResumeLoops)
4632 {
4633 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4634 rc = VINF_EM_RAW_INTERRUPT;
4635 break;
4636 }
4637
4638 /*
4639 * Did the RIP change, if so, consider it a single step.
4640 * Otherwise, make sure one of the TFs gets set.
4641 */
4642 if ( pCtx->rip != uRipStart
4643 || pCtx->cs.Sel != uCsStart)
4644 {
4645 rc = VINF_EM_DBG_STEPPED;
4646 break;
4647 }
4648 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR_MASK;
4649 }
4650
4651 /*
4652 * Clear the X86_EFL_TF if necessary.
4653 */
4654 if (pVCpu->hmr0.s.fClearTrapFlag)
4655 {
4656 pVCpu->hmr0.s.fClearTrapFlag = false;
4657 pCtx->eflags.Bits.u1TF = 0;
4658 }
4659
4660 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4661 return rc;
4662}
4663
4664#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4665/**
4666 * Runs the nested-guest code using AMD-V.
4667 *
4668 * @returns Strict VBox status code.
4669 * @param pVCpu The cross context virtual CPU structure.
4670 * @param pcLoops Pointer to the number of executed loops. If we're switching
4671 * from the guest-code execution loop to this nested-guest
4672 * execution loop pass the remainder value, else pass 0.
4673 */
4674static VBOXSTRICTRC hmR0SvmRunGuestCodeNested(PVMCPUCC pVCpu, uint32_t *pcLoops)
4675{
4676 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4677 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
4678 Assert(pcLoops);
4679 Assert(*pcLoops <= pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops);
4680
4681 SVMTRANSIENT SvmTransient;
4682 RT_ZERO(SvmTransient);
4683 SvmTransient.fUpdateTscOffsetting = true;
4684 SvmTransient.pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
4685 SvmTransient.fIsNestedGuest = true;
4686
4687 VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_4;
4688 for (;;)
4689 {
4690 Assert(!HMR0SuspendPending());
4691 HMSVM_ASSERT_CPU_SAFE(pVCpu);
4692
4693 /* Preparatory work for running nested-guest code, this may force us to return to
4694 ring-3. This bugger disables interrupts on VINF_SUCCESS! */
4695 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
4696 rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
4697 if ( rc != VINF_SUCCESS
4698 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
4699 break;
4700
4701 /*
4702 * No longjmps to ring-3 from this point on!!!
4703 *
4704 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
4705 * better than a kernel panic. This also disables flushing of the R0-logger instance.
4706 */
4707 hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
4708
4709 rc = hmR0SvmRunGuest(pVCpu, pCtx->hwvirt.svm.HCPhysVmcb);
4710
4711 /* Restore any residual host-state and save any bits shared between host and guest
4712 into the guest-CPU state. Re-enables interrupts! */
4713 hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
4714
4715 if (RT_LIKELY( rc == VINF_SUCCESS
4716 && SvmTransient.u64ExitCode != SVM_EXIT_INVALID))
4717 { /* extremely likely */ }
4718 else
4719 {
4720 /* VMRUN failed, shouldn't really happen, Guru. */
4721 if (rc != VINF_SUCCESS)
4722 break;
4723
4724 /* Invalid nested-guest state. Cause a #VMEXIT but assert on strict builds. */
4725 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
4726 AssertMsgFailed(("Invalid nested-guest state. rc=%Rrc u64ExitCode=%#RX64\n", rc, SvmTransient.u64ExitCode));
4727 rc = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0, 0);
4728 break;
4729 }
4730
4731 /* Handle the #VMEXIT. */
4732 HMSVM_NESTED_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
4733 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
4734 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pCtx->hwvirt.svm.CTX_SUFF(pVmcb));
4735 rc = hmR0SvmHandleExitNested(pVCpu, &SvmTransient);
4736 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
4737 if (rc == VINF_SUCCESS)
4738 {
4739 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
4740 {
4741 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
4742 rc = VINF_SVM_VMEXIT;
4743 }
4744 else
4745 {
4746 if (++(*pcLoops) <= pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops)
4747 continue;
4748 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
4749 rc = VINF_EM_RAW_INTERRUPT;
4750 }
4751 }
4752 else
4753 Assert(rc != VINF_SVM_VMEXIT);
4754 break;
4755 /** @todo NSTSVM: handle single-stepping. */
4756 }
4757
4758 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4759 return rc;
4760}
4761#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
4762
4763
4764/**
4765 * Runs the guest code using AMD-V.
4766 *
4767 * @returns Strict VBox status code.
4768 * @param pVCpu The cross context virtual CPU structure.
4769 */
4770VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVMCPUCC pVCpu)
4771{
4772 AssertPtr(pVCpu);
4773 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4774 Assert(VMMRZCallRing3IsEnabled(pVCpu));
4775 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
4776 HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
4777
4778 uint32_t cLoops = 0;
4779 VBOXSTRICTRC rc;
4780 for (;;)
4781 {
4782#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4783 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
4784#else
4785 NOREF(pCtx);
4786 bool const fInNestedGuestMode = false;
4787#endif
4788 if (!fInNestedGuestMode)
4789 {
4790 if (!pVCpu->hm.s.fSingleInstruction)
4791 rc = hmR0SvmRunGuestCodeNormal(pVCpu, &cLoops);
4792 else
4793 rc = hmR0SvmRunGuestCodeStep(pVCpu, &cLoops);
4794 }
4795#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4796 else
4797 rc = hmR0SvmRunGuestCodeNested(pVCpu, &cLoops);
4798
4799 if (rc == VINF_SVM_VMRUN)
4800 {
4801 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
4802 continue;
4803 }
4804 if (rc == VINF_SVM_VMEXIT)
4805 {
4806 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
4807 continue;
4808 }
4809#endif
4810 break;
4811 }
4812
4813 /* Fixup error codes. */
4814 if (rc == VERR_EM_INTERPRETER)
4815 rc = VINF_EM_RAW_EMULATE_INSTR;
4816 else if (rc == VINF_EM_RESET)
4817 rc = VINF_EM_TRIPLE_FAULT;
4818
4819 /* Prepare to return to ring-3. This will remove longjmp notifications. */
4820 rc = hmR0SvmExitToRing3(pVCpu, rc);
4821 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
4822 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
4823 return rc;
4824}
4825
4826
4827#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4828/**
4829 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
4830 *
4831 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
4832 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO.
4833 */
4834static bool hmR0SvmIsIoInterceptSet(void *pvIoBitmap, PSVMIOIOEXITINFO pIoExitInfo)
4835{
4836 const uint16_t u16Port = pIoExitInfo->n.u16Port;
4837 const SVMIOIOTYPE enmIoType = (SVMIOIOTYPE)pIoExitInfo->n.u1Type;
4838 const uint8_t cbReg = (pIoExitInfo->u >> SVM_IOIO_OP_SIZE_SHIFT) & 7;
4839 const uint8_t cAddrSizeBits = ((pIoExitInfo->u >> SVM_IOIO_ADDR_SIZE_SHIFT) & 7) << 4;
4840 const uint8_t iEffSeg = pIoExitInfo->n.u3Seg;
4841 const bool fRep = pIoExitInfo->n.u1Rep;
4842 const bool fStrIo = pIoExitInfo->n.u1Str;
4843
4844 return CPUMIsSvmIoInterceptSet(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo,
4845 NULL /* pIoExitInfo */);
4846}
4847
4848
4849/**
4850 * Handles a nested-guest \#VMEXIT (for all EXITCODE values except
4851 * SVM_EXIT_INVALID).
4852 *
4853 * @returns VBox status code (informational status codes included).
4854 * @param pVCpu The cross context virtual CPU structure.
4855 * @param pSvmTransient Pointer to the SVM transient structure.
4856 */
4857static VBOXSTRICTRC hmR0SvmHandleExitNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
4858{
4859 HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
4860 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
4861 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
4862
4863 /*
4864 * We import the complete state here because we use separate VMCBs for the guest and the
4865 * nested-guest, and the guest's VMCB is used after the #VMEXIT. We can only save/restore
4866 * the #VMEXIT specific state if we used the same VMCB for both guest and nested-guest.
4867 */
4868#define NST_GST_VMEXIT_CALL_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
4869 do { \
4870 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \
4871 return IEMExecSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
4872 } while (0)
4873
4874 /*
4875 * For all the #VMEXITs here we primarily figure out if the #VMEXIT is expected by the
4876 * nested-guest. If it isn't, it should be handled by the (outer) guest.
4877 */
4878 PSVMVMCB pVmcbNstGst = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb);
4879 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4880 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
4881 uint64_t const uExitCode = pVmcbNstGstCtrl->u64ExitCode;
4882 uint64_t const uExitInfo1 = pVmcbNstGstCtrl->u64ExitInfo1;
4883 uint64_t const uExitInfo2 = pVmcbNstGstCtrl->u64ExitInfo2;
4884
4885 Assert(uExitCode == pVmcbNstGstCtrl->u64ExitCode);
4886 switch (uExitCode)
4887 {
4888 case SVM_EXIT_CPUID:
4889 {
4890 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CPUID))
4891 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4892 return hmR0SvmExitCpuid(pVCpu, pSvmTransient);
4893 }
4894
4895 case SVM_EXIT_RDTSC:
4896 {
4897 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSC))
4898 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4899 return hmR0SvmExitRdtsc(pVCpu, pSvmTransient);
4900 }
4901
4902 case SVM_EXIT_RDTSCP:
4903 {
4904 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSCP))
4905 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4906 return hmR0SvmExitRdtscp(pVCpu, pSvmTransient);
4907 }
4908
4909 case SVM_EXIT_MONITOR:
4910 {
4911 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MONITOR))
4912 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4913 return hmR0SvmExitMonitor(pVCpu, pSvmTransient);
4914 }
4915
4916 case SVM_EXIT_MWAIT:
4917 {
4918 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MWAIT))
4919 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4920 return hmR0SvmExitMwait(pVCpu, pSvmTransient);
4921 }
4922
4923 case SVM_EXIT_HLT:
4924 {
4925 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_HLT))
4926 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4927 return hmR0SvmExitHlt(pVCpu, pSvmTransient);
4928 }
4929
4930 case SVM_EXIT_MSR:
4931 {
4932 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MSR_PROT))
4933 {
4934 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
4935 uint16_t offMsrpm;
4936 uint8_t uMsrpmBit;
4937 int rc = CPUMGetSvmMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
4938 if (RT_SUCCESS(rc))
4939 {
4940 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
4941 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
4942
4943 uint8_t const *pbMsrBitmap = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap);
4944 pbMsrBitmap += offMsrpm;
4945 bool const fInterceptRead = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit));
4946 bool const fInterceptWrite = RT_BOOL(*pbMsrBitmap & RT_BIT(uMsrpmBit + 1));
4947
4948 if ( (fInterceptWrite && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
4949 || (fInterceptRead && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ))
4950 {
4951 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4952 }
4953 }
4954 else
4955 {
4956 /*
4957 * MSRs not covered by the MSRPM automatically cause an #VMEXIT.
4958 * See AMD-V spec. "15.11 MSR Intercepts".
4959 */
4960 Assert(rc == VERR_OUT_OF_RANGE);
4961 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4962 }
4963 }
4964 return hmR0SvmExitMsr(pVCpu, pSvmTransient);
4965 }
4966
4967 case SVM_EXIT_IOIO:
4968 {
4969 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT))
4970 {
4971 void *pvIoBitmap = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap);
4972 SVMIOIOEXITINFO IoExitInfo;
4973 IoExitInfo.u = pVmcbNstGst->ctrl.u64ExitInfo1;
4974 bool const fIntercept = hmR0SvmIsIoInterceptSet(pvIoBitmap, &IoExitInfo);
4975 if (fIntercept)
4976 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
4977 }
4978 return hmR0SvmExitIOInstr(pVCpu, pSvmTransient);
4979 }
4980
4981 case SVM_EXIT_XCPT_PF:
4982 {
4983 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4984 if (pVM->hmr0.s.fNestedPaging)
4985 {
4986 uint32_t const u32ErrCode = pVmcbNstGstCtrl->u64ExitInfo1;
4987 uint64_t const uFaultAddress = pVmcbNstGstCtrl->u64ExitInfo2;
4988
4989 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
4990 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
4991 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, u32ErrCode, uFaultAddress);
4992
4993 /* If the nested-guest is not intercepting #PFs, forward the #PF to the guest. */
4994 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2);
4995 hmR0SvmSetPendingXcptPF(pVCpu, u32ErrCode, uFaultAddress);
4996 return VINF_SUCCESS;
4997 }
4998 return hmR0SvmExitXcptPF(pVCpu, pSvmTransient);
4999 }
5000
5001 case SVM_EXIT_XCPT_UD:
5002 {
5003 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_UD))
5004 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5005 hmR0SvmSetPendingXcptUD(pVCpu);
5006 return VINF_SUCCESS;
5007 }
5008
5009 case SVM_EXIT_XCPT_MF:
5010 {
5011 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_MF))
5012 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5013 return hmR0SvmExitXcptMF(pVCpu, pSvmTransient);
5014 }
5015
5016 case SVM_EXIT_XCPT_DB:
5017 {
5018 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_DB))
5019 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5020 return hmR0SvmNestedExitXcptDB(pVCpu, pSvmTransient);
5021 }
5022
5023 case SVM_EXIT_XCPT_AC:
5024 {
5025 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_AC))
5026 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5027 return hmR0SvmExitXcptAC(pVCpu, pSvmTransient);
5028 }
5029
5030 case SVM_EXIT_XCPT_BP:
5031 {
5032 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_BP))
5033 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5034 return hmR0SvmNestedExitXcptBP(pVCpu, pSvmTransient);
5035 }
5036
5037 case SVM_EXIT_READ_CR0:
5038 case SVM_EXIT_READ_CR3:
5039 case SVM_EXIT_READ_CR4:
5040 {
5041 uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0;
5042 if (CPUMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr))
5043 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5044 return hmR0SvmExitReadCRx(pVCpu, pSvmTransient);
5045 }
5046
5047 case SVM_EXIT_CR0_SEL_WRITE:
5048 {
5049 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5050 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5051 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
5052 }
5053
5054 case SVM_EXIT_WRITE_CR0:
5055 case SVM_EXIT_WRITE_CR3:
5056 case SVM_EXIT_WRITE_CR4:
5057 case SVM_EXIT_WRITE_CR8: /* CR8 writes would go to the V_TPR rather than here, since we run with V_INTR_MASKING. */
5058 {
5059 uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;
5060 Log4Func(("Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2));
5061
5062 if (CPUMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr))
5063 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5064 return hmR0SvmExitWriteCRx(pVCpu, pSvmTransient);
5065 }
5066
5067 case SVM_EXIT_PAUSE:
5068 {
5069 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
5070 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5071 return hmR0SvmExitPause(pVCpu, pSvmTransient);
5072 }
5073
5074 case SVM_EXIT_VINTR:
5075 {
5076 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
5077 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5078 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
5079 }
5080
5081 case SVM_EXIT_INTR:
5082 case SVM_EXIT_NMI:
5083 case SVM_EXIT_SMI:
5084 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */
5085 {
5086 /*
5087 * We shouldn't direct physical interrupts, NMIs, SMIs to the nested-guest.
5088 *
5089 * Although we don't intercept SMIs, the nested-guest might. Therefore, we might
5090 * get an SMI #VMEXIT here so simply ignore rather than causing a corresponding
5091 * nested-guest #VMEXIT.
5092 *
5093 * We shall import the complete state here as we may cause #VMEXITs from ring-3
5094 * while trying to inject interrupts, see comment at the top of this function.
5095 */
5096 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_ALL);
5097 return hmR0SvmExitIntr(pVCpu, pSvmTransient);
5098 }
5099
5100 case SVM_EXIT_FERR_FREEZE:
5101 {
5102 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_FERR_FREEZE))
5103 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5104 return hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient);
5105 }
5106
5107 case SVM_EXIT_INVLPG:
5108 {
5109 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPG))
5110 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5111 return hmR0SvmExitInvlpg(pVCpu, pSvmTransient);
5112 }
5113
5114 case SVM_EXIT_WBINVD:
5115 {
5116 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_WBINVD))
5117 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5118 return hmR0SvmExitWbinvd(pVCpu, pSvmTransient);
5119 }
5120
5121 case SVM_EXIT_INVD:
5122 {
5123 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVD))
5124 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5125 return hmR0SvmExitInvd(pVCpu, pSvmTransient);
5126 }
5127
5128 case SVM_EXIT_RDPMC:
5129 {
5130 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDPMC))
5131 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5132 return hmR0SvmExitRdpmc(pVCpu, pSvmTransient);
5133 }
5134
5135 default:
5136 {
5137 switch (uExitCode)
5138 {
5139 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5140 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
5141 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
5142 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5143 {
5144 uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0;
5145 if (CPUMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr))
5146 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5147 return hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
5148 }
5149
5150 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5151 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
5152 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
5153 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5154 {
5155 uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0;
5156 if (CPUMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr))
5157 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5158 return hmR0SvmExitWriteDRx(pVCpu, pSvmTransient);
5159 }
5160
5161 case SVM_EXIT_XCPT_DE:
5162 /* SVM_EXIT_XCPT_DB: */ /* Handled above. */
5163 /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */
5164 /* SVM_EXIT_XCPT_BP: */ /* Handled above. */
5165 case SVM_EXIT_XCPT_OF:
5166 case SVM_EXIT_XCPT_BR:
5167 /* SVM_EXIT_XCPT_UD: */ /* Handled above. */
5168 case SVM_EXIT_XCPT_NM:
5169 case SVM_EXIT_XCPT_DF:
5170 case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
5171 case SVM_EXIT_XCPT_TS:
5172 case SVM_EXIT_XCPT_NP:
5173 case SVM_EXIT_XCPT_SS:
5174 case SVM_EXIT_XCPT_GP:
5175 /* SVM_EXIT_XCPT_PF: */ /* Handled above. */
5176 case SVM_EXIT_XCPT_15: /* Reserved. */
5177 /* SVM_EXIT_XCPT_MF: */ /* Handled above. */
5178 /* SVM_EXIT_XCPT_AC: */ /* Handled above. */
5179 case SVM_EXIT_XCPT_MC:
5180 case SVM_EXIT_XCPT_XF:
5181 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
5182 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
5183 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
5184 {
5185 uint8_t const uVector = uExitCode - SVM_EXIT_XCPT_0;
5186 if (CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector))
5187 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5188 return hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient);
5189 }
5190
5191 case SVM_EXIT_XSETBV:
5192 {
5193 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_XSETBV))
5194 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5195 return hmR0SvmExitXsetbv(pVCpu, pSvmTransient);
5196 }
5197
5198 case SVM_EXIT_TASK_SWITCH:
5199 {
5200 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_TASK_SWITCH))
5201 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5202 return hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient);
5203 }
5204
5205 case SVM_EXIT_IRET:
5206 {
5207 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IRET))
5208 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5209 return hmR0SvmExitIret(pVCpu, pSvmTransient);
5210 }
5211
5212 case SVM_EXIT_SHUTDOWN:
5213 {
5214 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN))
5215 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5216 return hmR0SvmExitShutdown(pVCpu, pSvmTransient);
5217 }
5218
5219 case SVM_EXIT_VMMCALL:
5220 {
5221 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMMCALL))
5222 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5223 return hmR0SvmExitVmmCall(pVCpu, pSvmTransient);
5224 }
5225
5226 case SVM_EXIT_CLGI:
5227 {
5228 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CLGI))
5229 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5230 return hmR0SvmExitClgi(pVCpu, pSvmTransient);
5231 }
5232
5233 case SVM_EXIT_STGI:
5234 {
5235 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_STGI))
5236 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5237 return hmR0SvmExitStgi(pVCpu, pSvmTransient);
5238 }
5239
5240 case SVM_EXIT_VMLOAD:
5241 {
5242 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMLOAD))
5243 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5244 return hmR0SvmExitVmload(pVCpu, pSvmTransient);
5245 }
5246
5247 case SVM_EXIT_VMSAVE:
5248 {
5249 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMSAVE))
5250 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5251 return hmR0SvmExitVmsave(pVCpu, pSvmTransient);
5252 }
5253
5254 case SVM_EXIT_INVLPGA:
5255 {
5256 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPGA))
5257 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5258 return hmR0SvmExitInvlpga(pVCpu, pSvmTransient);
5259 }
5260
5261 case SVM_EXIT_VMRUN:
5262 {
5263 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMRUN))
5264 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5265 return hmR0SvmExitVmrun(pVCpu, pSvmTransient);
5266 }
5267
5268 case SVM_EXIT_RSM:
5269 {
5270 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RSM))
5271 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5272 hmR0SvmSetPendingXcptUD(pVCpu);
5273 return VINF_SUCCESS;
5274 }
5275
5276 case SVM_EXIT_SKINIT:
5277 {
5278 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SKINIT))
5279 NST_GST_VMEXIT_CALL_RET(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
5280 hmR0SvmSetPendingXcptUD(pVCpu);
5281 return VINF_SUCCESS;
5282 }
5283
5284 case SVM_EXIT_NPF:
5285 {
5286 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
5287 return hmR0SvmExitNestedPF(pVCpu, pSvmTransient);
5288 }
5289
5290 case SVM_EXIT_INIT: /* We shouldn't get INIT signals while executing a nested-guest. */
5291 return hmR0SvmExitUnexpected(pVCpu, pSvmTransient);
5292
5293 default:
5294 {
5295 AssertMsgFailed(("hmR0SvmHandleExitNested: Unknown exit code %#x\n", pSvmTransient->u64ExitCode));
5296 pVCpu->hm.s.u32HMError = pSvmTransient->u64ExitCode;
5297 return VERR_SVM_UNKNOWN_EXIT;
5298 }
5299 }
5300 }
5301 }
5302 /* not reached */
5303
5304#undef NST_GST_VMEXIT_CALL_RET
5305}
5306#endif
5307
5308
5309/**
5310 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
5311 *
5312 * @returns Strict VBox status code (informational status codes included).
5313 * @param pVCpu The cross context virtual CPU structure.
5314 * @param pSvmTransient Pointer to the SVM transient structure.
5315 */
5316static VBOXSTRICTRC hmR0SvmHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5317{
5318 Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
5319 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
5320
5321#ifdef DEBUG_ramshankar
5322# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) \
5323 do { \
5324 if ((a_fDbg) == 1) \
5325 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \
5326 int rc = a_CallExpr; \
5327 if ((a_fDbg) == 1) \
5328 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
5329 return rc; \
5330 } while (0)
5331#else
5332# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) return a_CallExpr
5333#endif
5334
5335 /*
5336 * The ordering of the case labels is based on most-frequently-occurring #VMEXITs
5337 * for most guests under normal workloads (for some definition of "normal").
5338 */
5339 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
5340 switch (uExitCode)
5341 {
5342 case SVM_EXIT_NPF: VMEXIT_CALL_RET(0, hmR0SvmExitNestedPF(pVCpu, pSvmTransient));
5343 case SVM_EXIT_IOIO: VMEXIT_CALL_RET(0, hmR0SvmExitIOInstr(pVCpu, pSvmTransient));
5344 case SVM_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0SvmExitRdtsc(pVCpu, pSvmTransient));
5345 case SVM_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0SvmExitRdtscp(pVCpu, pSvmTransient));
5346 case SVM_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0SvmExitCpuid(pVCpu, pSvmTransient));
5347 case SVM_EXIT_XCPT_PF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptPF(pVCpu, pSvmTransient));
5348 case SVM_EXIT_MSR: VMEXIT_CALL_RET(0, hmR0SvmExitMsr(pVCpu, pSvmTransient));
5349 case SVM_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0SvmExitMonitor(pVCpu, pSvmTransient));
5350 case SVM_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0SvmExitMwait(pVCpu, pSvmTransient));
5351 case SVM_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0SvmExitHlt(pVCpu, pSvmTransient));
5352
5353 case SVM_EXIT_XCPT_NMI: /* Should not occur, SVM_EXIT_NMI is used instead. */
5354 case SVM_EXIT_INTR:
5355 case SVM_EXIT_NMI: VMEXIT_CALL_RET(0, hmR0SvmExitIntr(pVCpu, pSvmTransient));
5356
5357 case SVM_EXIT_READ_CR0:
5358 case SVM_EXIT_READ_CR3:
5359 case SVM_EXIT_READ_CR4: VMEXIT_CALL_RET(0, hmR0SvmExitReadCRx(pVCpu, pSvmTransient));
5360
5361 case SVM_EXIT_CR0_SEL_WRITE:
5362 case SVM_EXIT_WRITE_CR0:
5363 case SVM_EXIT_WRITE_CR3:
5364 case SVM_EXIT_WRITE_CR4:
5365 case SVM_EXIT_WRITE_CR8: VMEXIT_CALL_RET(0, hmR0SvmExitWriteCRx(pVCpu, pSvmTransient));
5366
5367 case SVM_EXIT_VINTR: VMEXIT_CALL_RET(0, hmR0SvmExitVIntr(pVCpu, pSvmTransient));
5368 case SVM_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0SvmExitPause(pVCpu, pSvmTransient));
5369 case SVM_EXIT_VMMCALL: VMEXIT_CALL_RET(0, hmR0SvmExitVmmCall(pVCpu, pSvmTransient));
5370 case SVM_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpg(pVCpu, pSvmTransient));
5371 case SVM_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0SvmExitWbinvd(pVCpu, pSvmTransient));
5372 case SVM_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0SvmExitInvd(pVCpu, pSvmTransient));
5373 case SVM_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0SvmExitRdpmc(pVCpu, pSvmTransient));
5374 case SVM_EXIT_IRET: VMEXIT_CALL_RET(0, hmR0SvmExitIret(pVCpu, pSvmTransient));
5375 case SVM_EXIT_XCPT_UD: VMEXIT_CALL_RET(0, hmR0SvmExitXcptUD(pVCpu, pSvmTransient));
5376 case SVM_EXIT_XCPT_MF: VMEXIT_CALL_RET(0, hmR0SvmExitXcptMF(pVCpu, pSvmTransient));
5377 case SVM_EXIT_XCPT_DB: VMEXIT_CALL_RET(0, hmR0SvmExitXcptDB(pVCpu, pSvmTransient));
5378 case SVM_EXIT_XCPT_AC: VMEXIT_CALL_RET(0, hmR0SvmExitXcptAC(pVCpu, pSvmTransient));
5379 case SVM_EXIT_XCPT_BP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptBP(pVCpu, pSvmTransient));
5380 case SVM_EXIT_XCPT_GP: VMEXIT_CALL_RET(0, hmR0SvmExitXcptGP(pVCpu, pSvmTransient));
5381 case SVM_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0SvmExitXsetbv(pVCpu, pSvmTransient));
5382 case SVM_EXIT_FERR_FREEZE: VMEXIT_CALL_RET(0, hmR0SvmExitFerrFreeze(pVCpu, pSvmTransient));
5383
5384 default:
5385 {
5386 switch (pSvmTransient->u64ExitCode)
5387 {
5388 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
5389 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
5390 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
5391 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
5392 VMEXIT_CALL_RET(0, hmR0SvmExitReadDRx(pVCpu, pSvmTransient));
5393
5394 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
5395 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
5396 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
5397 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
5398 VMEXIT_CALL_RET(0, hmR0SvmExitWriteDRx(pVCpu, pSvmTransient));
5399
5400 case SVM_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0SvmExitTaskSwitch(pVCpu, pSvmTransient));
5401 case SVM_EXIT_SHUTDOWN: VMEXIT_CALL_RET(0, hmR0SvmExitShutdown(pVCpu, pSvmTransient));
5402
5403 case SVM_EXIT_SMI:
5404 case SVM_EXIT_INIT:
5405 {
5406 /*
5407 * We don't intercept SMIs. As for INIT signals, it really shouldn't ever happen here.
5408 * If it ever does, we want to know about it so log the exit code and bail.
5409 */
5410 VMEXIT_CALL_RET(0, hmR0SvmExitUnexpected(pVCpu, pSvmTransient));
5411 }
5412
5413#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5414 case SVM_EXIT_CLGI: VMEXIT_CALL_RET(0, hmR0SvmExitClgi(pVCpu, pSvmTransient));
5415 case SVM_EXIT_STGI: VMEXIT_CALL_RET(0, hmR0SvmExitStgi(pVCpu, pSvmTransient));
5416 case SVM_EXIT_VMLOAD: VMEXIT_CALL_RET(0, hmR0SvmExitVmload(pVCpu, pSvmTransient));
5417 case SVM_EXIT_VMSAVE: VMEXIT_CALL_RET(0, hmR0SvmExitVmsave(pVCpu, pSvmTransient));
5418 case SVM_EXIT_INVLPGA: VMEXIT_CALL_RET(0, hmR0SvmExitInvlpga(pVCpu, pSvmTransient));
5419 case SVM_EXIT_VMRUN: VMEXIT_CALL_RET(0, hmR0SvmExitVmrun(pVCpu, pSvmTransient));
5420#else
5421 case SVM_EXIT_CLGI:
5422 case SVM_EXIT_STGI:
5423 case SVM_EXIT_VMLOAD:
5424 case SVM_EXIT_VMSAVE:
5425 case SVM_EXIT_INVLPGA:
5426 case SVM_EXIT_VMRUN:
5427#endif
5428 case SVM_EXIT_RSM:
5429 case SVM_EXIT_SKINIT:
5430 {
5431 hmR0SvmSetPendingXcptUD(pVCpu);
5432 return VINF_SUCCESS;
5433 }
5434
5435#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
5436 case SVM_EXIT_XCPT_DE:
5437 /* SVM_EXIT_XCPT_DB: */ /* Handled above. */
5438 /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */
5439 /* SVM_EXIT_XCPT_BP: */ /* Handled above. */
5440 case SVM_EXIT_XCPT_OF:
5441 case SVM_EXIT_XCPT_BR:
5442 /* SVM_EXIT_XCPT_UD: */ /* Handled above. */
5443 case SVM_EXIT_XCPT_NM:
5444 case SVM_EXIT_XCPT_DF:
5445 case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
5446 case SVM_EXIT_XCPT_TS:
5447 case SVM_EXIT_XCPT_NP:
5448 case SVM_EXIT_XCPT_SS:
5449 /* SVM_EXIT_XCPT_GP: */ /* Handled above. */
5450 /* SVM_EXIT_XCPT_PF: */
5451 case SVM_EXIT_XCPT_15: /* Reserved. */
5452 /* SVM_EXIT_XCPT_MF: */ /* Handled above. */
5453 /* SVM_EXIT_XCPT_AC: */ /* Handled above. */
5454 case SVM_EXIT_XCPT_MC:
5455 case SVM_EXIT_XCPT_XF:
5456 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
5457 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
5458 case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
5459 VMEXIT_CALL_RET(0, hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient));
5460#endif /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */
5461
5462 default:
5463 {
5464 AssertMsgFailed(("hmR0SvmHandleExit: Unknown exit code %#RX64\n", uExitCode));
5465 pVCpu->hm.s.u32HMError = uExitCode;
5466 return VERR_SVM_UNKNOWN_EXIT;
5467 }
5468 }
5469 }
5470 }
5471 /* not reached */
5472#undef VMEXIT_CALL_RET
5473}
5474
5475
5476#ifdef VBOX_STRICT
5477/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
5478# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
5479 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
5480
5481# define HMSVM_ASSERT_PREEMPT_CPUID() \
5482 do \
5483 { \
5484 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
5485 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
5486 } while (0)
5487
5488# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \
5489 do { \
5490 AssertPtr((a_pVCpu)); \
5491 AssertPtr((a_pSvmTransient)); \
5492 Assert(ASMIntAreEnabled()); \
5493 HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \
5494 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
5495 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu)); \
5496 HMSVM_ASSERT_PREEMPT_SAFE((a_pVCpu)); \
5497 if (VMMR0IsLogFlushDisabled((a_pVCpu))) \
5498 HMSVM_ASSERT_PREEMPT_CPUID(); \
5499 } while (0)
5500#else
5501# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pSvmTransient) \
5502 do { \
5503 RT_NOREF2(a_pVCpu, a_pSvmTransient); \
5504 } while (0)
5505#endif
5506
5507
5508/**
5509 * Gets the IEM exception flags for the specified SVM event.
5510 *
5511 * @returns The IEM exception flags.
5512 * @param pEvent Pointer to the SVM event.
5513 *
5514 * @remarks This function currently only constructs flags required for
5515 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g. error-code
5516 * and CR2 aspects of an exception are not included).
5517 */
5518static uint32_t hmR0SvmGetIemXcptFlags(PCSVMEVENT pEvent)
5519{
5520 uint8_t const uEventType = pEvent->n.u3Type;
5521 uint32_t fIemXcptFlags;
5522 switch (uEventType)
5523 {
5524 case SVM_EVENT_EXCEPTION:
5525 /*
5526 * Only INT3 and INTO instructions can raise #BP and #OF exceptions.
5527 * See AMD spec. Table 8-1. "Interrupt Vector Source and Cause".
5528 */
5529 if (pEvent->n.u8Vector == X86_XCPT_BP)
5530 {
5531 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR;
5532 break;
5533 }
5534 if (pEvent->n.u8Vector == X86_XCPT_OF)
5535 {
5536 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_OF_INSTR;
5537 break;
5538 }
5539 /** @todo How do we distinguish ICEBP \#DB from the regular one? */
5540 RT_FALL_THRU();
5541 case SVM_EVENT_NMI:
5542 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
5543 break;
5544
5545 case SVM_EVENT_EXTERNAL_IRQ:
5546 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
5547 break;
5548
5549 case SVM_EVENT_SOFTWARE_INT:
5550 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
5551 break;
5552
5553 default:
5554 fIemXcptFlags = 0;
5555 AssertMsgFailed(("Unexpected event type! uEventType=%#x uVector=%#x", uEventType, pEvent->n.u8Vector));
5556 break;
5557 }
5558 return fIemXcptFlags;
5559}
5560
5561
5562/**
5563 * Handle a condition that occurred while delivering an event through the guest
5564 * IDT.
5565 *
5566 * @returns VBox status code (informational error codes included).
5567 * @retval VINF_SUCCESS if we should continue handling the \#VMEXIT.
5568 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought to
5569 * continue execution of the guest which will delivery the \#DF.
5570 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5571 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5572 *
5573 * @param pVCpu The cross context virtual CPU structure.
5574 * @param pSvmTransient Pointer to the SVM transient structure.
5575 *
5576 * @remarks No-long-jump zone!!!
5577 */
5578static int hmR0SvmCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5579{
5580 int rc = VINF_SUCCESS;
5581 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
5582 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2);
5583
5584 Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n",
5585 pVmcb->ctrl.ExitIntInfo.u, !!pVmcb->ctrl.ExitIntInfo.n.u1Valid, !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid,
5586 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, pVmcb->ctrl.ExitIntInfo.n.u3Type, pVmcb->ctrl.ExitIntInfo.n.u8Vector));
5587
5588 /*
5589 * The EXITINTINFO (if valid) contains the prior exception (IDT vector) that was trying to
5590 * be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector).
5591 *
5592 * See AMD spec. 15.7.3 "EXITINFO Pseudo-Code".
5593 */
5594 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid)
5595 {
5596 IEMXCPTRAISE enmRaise;
5597 IEMXCPTRAISEINFO fRaiseInfo;
5598 bool const fExitIsHwXcpt = pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0 <= SVM_EXIT_XCPT_31;
5599 uint8_t const uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
5600 if (fExitIsHwXcpt)
5601 {
5602 uint8_t const uExitVector = pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0;
5603 uint32_t const fIdtVectorFlags = hmR0SvmGetIemXcptFlags(&pVmcb->ctrl.ExitIntInfo);
5604 uint32_t const fExitVectorFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
5605 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5606 }
5607 else
5608 {
5609 /*
5610 * If delivery of an event caused a #VMEXIT that is not an exception (e.g. #NPF)
5611 * then we end up here.
5612 *
5613 * If the event was:
5614 * - a software interrupt, we can re-execute the instruction which will
5615 * regenerate the event.
5616 * - an NMI, we need to clear NMI blocking and re-inject the NMI.
5617 * - a hardware exception or external interrupt, we re-inject it.
5618 */
5619 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5620 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_SOFTWARE_INT)
5621 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5622 else
5623 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5624 }
5625
5626 switch (enmRaise)
5627 {
5628 case IEMXCPTRAISE_CURRENT_XCPT:
5629 case IEMXCPTRAISE_PREV_EVENT:
5630 {
5631 /* For software interrupts, we shall re-execute the instruction. */
5632 if (!(fRaiseInfo & IEMXCPTRAISEINFO_SOFT_INT_XCPT))
5633 {
5634 RTGCUINTPTR GCPtrFaultAddress = 0;
5635
5636 /* If we are re-injecting an NMI, clear NMI blocking. */
5637 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)
5638 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5639
5640 /* Determine a vectoring #PF condition, see comment in hmR0SvmExitXcptPF(). */
5641 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5642 {
5643 pSvmTransient->fVectoringPF = true;
5644 Log4Func(("IDT: Pending vectoring #PF due to delivery of Ext-Int/NMI. uCR2=%#RX64\n",
5645 pVCpu->cpum.GstCtx.cr2));
5646 }
5647 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION
5648 && uIdtVector == X86_XCPT_PF)
5649 {
5650 /*
5651 * If the previous exception was a #PF, we need to recover the CR2 value.
5652 * This can't happen with shadow paging.
5653 */
5654 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
5655 }
5656
5657 /*
5658 * Without nested paging, when uExitVector is #PF, CR2 value will be updated from the VMCB's
5659 * exit info. fields, if it's a guest #PF, see hmR0SvmExitXcptPF().
5660 */
5661 Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);
5662 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflect);
5663 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, GCPtrFaultAddress);
5664
5665 Log4Func(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32 GCPtrFaultAddress=%#RX64\n",
5666 pVmcb->ctrl.ExitIntInfo.u, RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid),
5667 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, GCPtrFaultAddress));
5668 }
5669 break;
5670 }
5671
5672 case IEMXCPTRAISE_REEXEC_INSTR:
5673 {
5674 Assert(rc == VINF_SUCCESS);
5675 break;
5676 }
5677
5678 case IEMXCPTRAISE_DOUBLE_FAULT:
5679 {
5680 /*
5681 * Determing a vectoring double #PF condition. Used later, when PGM evaluates
5682 * the second #PF as a guest #PF (and not a shadow #PF) and needs to be
5683 * converted into a #DF.
5684 */
5685 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
5686 {
5687 Log4Func(("IDT: Pending vectoring double #PF uCR2=%#RX64\n", pVCpu->cpum.GstCtx.cr2));
5688 pSvmTransient->fVectoringDoublePF = true;
5689 Assert(rc == VINF_SUCCESS);
5690 }
5691 else
5692 {
5693 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectConvertDF);
5694 hmR0SvmSetPendingXcptDF(pVCpu);
5695 rc = VINF_HM_DOUBLE_FAULT;
5696 }
5697 break;
5698 }
5699
5700 case IEMXCPTRAISE_TRIPLE_FAULT:
5701 {
5702 rc = VINF_EM_RESET;
5703 break;
5704 }
5705
5706 case IEMXCPTRAISE_CPU_HANG:
5707 {
5708 rc = VERR_EM_GUEST_CPU_HANG;
5709 break;
5710 }
5711
5712 default:
5713 AssertMsgFailedBreakStmt(("Bogus enmRaise value: %d (%#x)\n", enmRaise, enmRaise), rc = VERR_SVM_IPE_2);
5714 }
5715 }
5716 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG);
5717 return rc;
5718}
5719
5720
5721/**
5722 * Advances the guest RIP by the number of bytes specified in @a cb.
5723 *
5724 * @param pVCpu The cross context virtual CPU structure.
5725 * @param cb RIP increment value in bytes.
5726 */
5727DECLINLINE(void) hmR0SvmAdvanceRip(PVMCPUCC pVCpu, uint32_t cb)
5728{
5729 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5730 pCtx->rip += cb;
5731
5732 /* Update interrupt shadow. */
5733 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5734 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
5735 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5736}
5737
5738
5739/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5740/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
5741/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
5742
5743/** @name \#VMEXIT handlers.
5744 * @{
5745 */
5746
5747/**
5748 * \#VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT
5749 * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT).
5750 */
5751HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5752{
5753 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
5754
5755 if (pSvmTransient->u64ExitCode == SVM_EXIT_NMI)
5756 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
5757 else if (pSvmTransient->u64ExitCode == SVM_EXIT_INTR)
5758 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
5759
5760 /*
5761 * AMD-V has no preemption timer and the generic periodic preemption timer has no way to
5762 * signal -before- the timer fires if the current interrupt is our own timer or a some
5763 * other host interrupt. We also cannot examine what interrupt it is until the host
5764 * actually take the interrupt.
5765 *
5766 * Going back to executing guest code here unconditionally causes random scheduling
5767 * problems (observed on an AMD Phenom 9850 Quad-Core on Windows 64-bit host).
5768 */
5769 return VINF_EM_RAW_INTERRUPT;
5770}
5771
5772
5773/**
5774 * \#VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional \#VMEXIT.
5775 */
5776HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5777{
5778 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
5779
5780 VBOXSTRICTRC rcStrict;
5781 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
5782 if (fSupportsNextRipSave)
5783 {
5784 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
5785 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
5786 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
5787 rcStrict = IEMExecDecodedWbinvd(pVCpu, cbInstr);
5788 }
5789 else
5790 {
5791 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
5792 rcStrict = IEMExecOne(pVCpu);
5793 }
5794
5795 if (rcStrict == VINF_IEM_RAISED_XCPT)
5796 {
5797 rcStrict = VINF_SUCCESS;
5798 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
5799 }
5800 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
5801 return rcStrict;
5802}
5803
5804
5805/**
5806 * \#VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional \#VMEXIT.
5807 */
5808HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5809{
5810 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
5811
5812 VBOXSTRICTRC rcStrict;
5813 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
5814 if (fSupportsNextRipSave)
5815 {
5816 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
5817 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
5818 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
5819 rcStrict = IEMExecDecodedInvd(pVCpu, cbInstr);
5820 }
5821 else
5822 {
5823 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
5824 rcStrict = IEMExecOne(pVCpu);
5825 }
5826
5827 if (rcStrict == VINF_IEM_RAISED_XCPT)
5828 {
5829 rcStrict = VINF_SUCCESS;
5830 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
5831 }
5832 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
5833 return rcStrict;
5834}
5835
5836
5837/**
5838 * \#VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional \#VMEXIT.
5839 */
5840HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5841{
5842 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
5843
5844 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
5845 VBOXSTRICTRC rcStrict;
5846 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
5847 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
5848 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
5849 if (!pExitRec)
5850 {
5851 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
5852 if (fSupportsNextRipSave)
5853 {
5854 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
5855 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
5856 rcStrict = IEMExecDecodedCpuid(pVCpu, cbInstr);
5857 }
5858 else
5859 {
5860 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
5861 rcStrict = IEMExecOne(pVCpu);
5862 }
5863
5864 if (rcStrict == VINF_IEM_RAISED_XCPT)
5865 {
5866 rcStrict = VINF_SUCCESS;
5867 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
5868 }
5869 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
5870 }
5871 else
5872 {
5873 /*
5874 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
5875 */
5876 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
5877
5878 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
5879 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
5880
5881 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
5882
5883 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
5884 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
5885 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
5886 }
5887 return rcStrict;
5888}
5889
5890
5891/**
5892 * \#VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional \#VMEXIT.
5893 */
5894HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5895{
5896 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
5897
5898 VBOXSTRICTRC rcStrict;
5899 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
5900 if (fSupportsNextRipSave)
5901 {
5902 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
5903 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
5904 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
5905 rcStrict = IEMExecDecodedRdtsc(pVCpu, cbInstr);
5906 }
5907 else
5908 {
5909 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
5910 rcStrict = IEMExecOne(pVCpu);
5911 }
5912
5913 if (rcStrict == VINF_SUCCESS)
5914 pSvmTransient->fUpdateTscOffsetting = true;
5915 else if (rcStrict == VINF_IEM_RAISED_XCPT)
5916 {
5917 rcStrict = VINF_SUCCESS;
5918 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
5919 }
5920 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
5921 return rcStrict;
5922}
5923
5924
5925/**
5926 * \#VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional \#VMEXIT.
5927 */
5928HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5929{
5930 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
5931
5932 VBOXSTRICTRC rcStrict;
5933 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
5934 if (fSupportsNextRipSave)
5935 {
5936 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
5937 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
5938 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
5939 rcStrict = IEMExecDecodedRdtscp(pVCpu, cbInstr);
5940 }
5941 else
5942 {
5943 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
5944 rcStrict = IEMExecOne(pVCpu);
5945 }
5946
5947 if (rcStrict == VINF_SUCCESS)
5948 pSvmTransient->fUpdateTscOffsetting = true;
5949 else if (rcStrict == VINF_IEM_RAISED_XCPT)
5950 {
5951 rcStrict = VINF_SUCCESS;
5952 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
5953 }
5954 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
5955 return rcStrict;
5956}
5957
5958
5959/**
5960 * \#VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional \#VMEXIT.
5961 */
5962HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5963{
5964 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
5965
5966 VBOXSTRICTRC rcStrict;
5967 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
5968 if (fSupportsNextRipSave)
5969 {
5970 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
5971 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
5972 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
5973 rcStrict = IEMExecDecodedRdpmc(pVCpu, cbInstr);
5974 }
5975 else
5976 {
5977 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
5978 rcStrict = IEMExecOne(pVCpu);
5979 }
5980
5981 if (rcStrict == VINF_IEM_RAISED_XCPT)
5982 {
5983 rcStrict = VINF_SUCCESS;
5984 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
5985 }
5986 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
5987 return rcStrict;
5988}
5989
5990
5991/**
5992 * \#VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional \#VMEXIT.
5993 */
5994HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
5995{
5996 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
5997 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
5998
5999 VBOXSTRICTRC rcStrict;
6000 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
6001 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6002 if ( fSupportsDecodeAssists
6003 && fSupportsNextRipSave)
6004 {
6005 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
6006 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6007 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6008 RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1;
6009 rcStrict = IEMExecDecodedInvlpg(pVCpu, cbInstr, GCPtrPage);
6010 }
6011 else
6012 {
6013 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6014 rcStrict = IEMExecOne(pVCpu);
6015 }
6016
6017 if (rcStrict == VINF_IEM_RAISED_XCPT)
6018 {
6019 rcStrict = VINF_SUCCESS;
6020 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6021 }
6022 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6023 return VBOXSTRICTRC_VAL(rcStrict);
6024}
6025
6026
6027/**
6028 * \#VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional \#VMEXIT.
6029 */
6030HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6031{
6032 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6033
6034 VBOXSTRICTRC rcStrict;
6035 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6036 if (fSupportsNextRipSave)
6037 {
6038 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
6039 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6040 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6041 rcStrict = IEMExecDecodedHlt(pVCpu, cbInstr);
6042 }
6043 else
6044 {
6045 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6046 rcStrict = IEMExecOne(pVCpu);
6047 }
6048
6049 if ( rcStrict == VINF_EM_HALT
6050 || rcStrict == VINF_SUCCESS)
6051 rcStrict = EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx) ? VINF_SUCCESS : VINF_EM_HALT;
6052 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6053 {
6054 rcStrict = VINF_SUCCESS;
6055 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6056 }
6057 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6058 if (rcStrict != VINF_SUCCESS)
6059 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
6060 return VBOXSTRICTRC_VAL(rcStrict);;
6061}
6062
6063
6064/**
6065 * \#VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional \#VMEXIT.
6066 */
6067HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6068{
6069 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6070
6071 /*
6072 * If the instruction length is supplied by the CPU is 3 bytes, we can be certain that no
6073 * segment override prefix is present (and thus use the default segment DS). Otherwise, a
6074 * segment override prefix or other prefixes might be used, in which case we fallback to
6075 * IEMExecOne() to figure out.
6076 */
6077 VBOXSTRICTRC rcStrict;
6078 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6079 uint8_t const cbInstr = hmR0SvmSupportsNextRipSave(pVCpu) ? pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip : 0;
6080 if (cbInstr)
6081 {
6082 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
6083 rcStrict = IEMExecDecodedMonitor(pVCpu, cbInstr);
6084 }
6085 else
6086 {
6087 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6088 rcStrict = IEMExecOne(pVCpu);
6089 }
6090
6091 if (rcStrict == VINF_IEM_RAISED_XCPT)
6092 {
6093 rcStrict = VINF_SUCCESS;
6094 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6095 }
6096 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6097 return rcStrict;
6098}
6099
6100
6101/**
6102 * \#VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional \#VMEXIT.
6103 */
6104HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6105{
6106 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6107
6108 VBOXSTRICTRC rcStrict;
6109 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6110 if (fSupportsNextRipSave)
6111 {
6112 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
6113 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6114 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6115 rcStrict = IEMExecDecodedMwait(pVCpu, cbInstr);
6116 }
6117 else
6118 {
6119 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6120 rcStrict = IEMExecOne(pVCpu);
6121 }
6122
6123 if ( rcStrict == VINF_EM_HALT
6124 && EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
6125 rcStrict = VINF_SUCCESS;
6126 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6127 {
6128 rcStrict = VINF_SUCCESS;
6129 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6130 }
6131 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6132 return rcStrict;
6133}
6134
6135
6136/**
6137 * \#VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN). Conditional
6138 * \#VMEXIT.
6139 */
6140HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6141{
6142 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6143 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6144 return VINF_EM_RESET;
6145}
6146
6147
6148/**
6149 * \#VMEXIT handler for unexpected exits. Conditional \#VMEXIT.
6150 */
6151HMSVM_EXIT_DECL hmR0SvmExitUnexpected(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6152{
6153 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6154 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6155 AssertMsgFailed(("hmR0SvmExitUnexpected: ExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pSvmTransient->u64ExitCode,
6156 pVmcb->ctrl.u64ExitInfo1, pVmcb->ctrl.u64ExitInfo2));
6157 RT_NOREF(pVmcb);
6158 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
6159 return VERR_SVM_UNEXPECTED_EXIT;
6160}
6161
6162
6163/**
6164 * \#VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional \#VMEXIT.
6165 */
6166HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6167{
6168 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6169
6170 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6171 Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
6172#ifdef VBOX_WITH_STATISTICS
6173 switch (pSvmTransient->u64ExitCode)
6174 {
6175 case SVM_EXIT_READ_CR0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
6176 case SVM_EXIT_READ_CR2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
6177 case SVM_EXIT_READ_CR3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
6178 case SVM_EXIT_READ_CR4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
6179 case SVM_EXIT_READ_CR8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
6180 }
6181#endif
6182
6183 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
6184 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6185 if ( fSupportsDecodeAssists
6186 && fSupportsNextRipSave)
6187 {
6188 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6189 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
6190 if (fMovCRx)
6191 {
6192 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR_MASK
6193 | CPUMCTX_EXTRN_APIC_TPR);
6194 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
6195 uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0;
6196 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
6197 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
6198 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6199 return VBOXSTRICTRC_VAL(rcStrict);
6200 }
6201 /* else: SMSW instruction, fall back below to IEM for this. */
6202 }
6203
6204 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6205 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6206 AssertMsg( rcStrict == VINF_SUCCESS
6207 || rcStrict == VINF_PGM_SYNC_CR3
6208 || rcStrict == VINF_IEM_RAISED_XCPT,
6209 ("hmR0SvmExitReadCRx: IEMExecOne failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6210 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15);
6211 if (rcStrict == VINF_IEM_RAISED_XCPT)
6212 {
6213 rcStrict = VINF_SUCCESS;
6214 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6215 }
6216 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6217 return rcStrict;
6218}
6219
6220
6221/**
6222 * \#VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional \#VMEXIT.
6223 */
6224HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6225{
6226 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6227
6228 uint64_t const uExitCode = pSvmTransient->u64ExitCode;
6229 uint8_t const iCrReg = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0);
6230 Assert(iCrReg <= 15);
6231
6232 VBOXSTRICTRC rcStrict = VERR_SVM_IPE_5;
6233 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6234 bool fDecodedInstr = false;
6235 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu);
6236 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6237 if ( fSupportsDecodeAssists
6238 && fSupportsNextRipSave)
6239 {
6240 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6241 bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
6242 if (fMovCRx)
6243 {
6244 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
6245 | CPUMCTX_EXTRN_APIC_TPR);
6246 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
6247 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
6248 Log4Func(("Mov CR%u w/ iGReg=%#x\n", iCrReg, iGReg));
6249 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
6250 fDecodedInstr = true;
6251 }
6252 /* else: LMSW or CLTS instruction, fall back below to IEM for this. */
6253 }
6254
6255 if (!fDecodedInstr)
6256 {
6257 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6258 Log4Func(("iCrReg=%#x\n", iCrReg));
6259 rcStrict = IEMExecOne(pVCpu);
6260 if (RT_UNLIKELY( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
6261 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED))
6262 rcStrict = VERR_EM_INTERPRETER;
6263 }
6264
6265 if (rcStrict == VINF_SUCCESS)
6266 {
6267 switch (iCrReg)
6268 {
6269 case 0:
6270 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
6271 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
6272 break;
6273
6274 case 2:
6275 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2);
6276 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
6277 break;
6278
6279 case 3:
6280 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR3);
6281 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
6282 break;
6283
6284 case 4:
6285 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
6286 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
6287 break;
6288
6289 case 8:
6290 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
6291 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
6292 break;
6293
6294 default:
6295 {
6296 AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x\n",
6297 pSvmTransient->u64ExitCode, iCrReg));
6298 break;
6299 }
6300 }
6301 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6302 }
6303 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6304 {
6305 rcStrict = VINF_SUCCESS;
6306 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6307 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6308 }
6309 else
6310 Assert(rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_PGM_SYNC_CR3);
6311 return rcStrict;
6312}
6313
6314
6315/**
6316 * \#VMEXIT helper for read MSRs, see hmR0SvmExitMsr.
6317 *
6318 * @returns Strict VBox status code.
6319 * @param pVCpu The cross context virtual CPU structure.
6320 * @param pVmcb Pointer to the VM control block.
6321 */
6322static VBOXSTRICTRC hmR0SvmExitReadMsr(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
6323{
6324 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
6325 Log4Func(("idMsr=%#RX32\n", pVCpu->cpum.GstCtx.ecx));
6326
6327 VBOXSTRICTRC rcStrict;
6328 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6329 if (fSupportsNextRipSave)
6330 {
6331 /** @todo Optimize this: Only retrieve the MSR bits we need here. CPUMAllMsrs.cpp
6332 * can ask for what it needs instead of using CPUMCTX_EXTRN_ALL_MSRS. */
6333 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
6334 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6335 rcStrict = IEMExecDecodedRdmsr(pVCpu, cbInstr);
6336 }
6337 else
6338 {
6339 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
6340 rcStrict = IEMExecOne(pVCpu);
6341 }
6342
6343 AssertMsg( rcStrict == VINF_SUCCESS
6344 || rcStrict == VINF_IEM_RAISED_XCPT
6345 || rcStrict == VINF_CPUM_R3_MSR_READ,
6346 ("hmR0SvmExitReadMsr: Unexpected status %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6347
6348 if (rcStrict == VINF_IEM_RAISED_XCPT)
6349 {
6350 rcStrict = VINF_SUCCESS;
6351 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6352 }
6353 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6354 return rcStrict;
6355}
6356
6357
6358/**
6359 * \#VMEXIT helper for write MSRs, see hmR0SvmExitMsr.
6360 *
6361 * @returns Strict VBox status code.
6362 * @param pVCpu The cross context virtual CPU structure.
6363 * @param pVmcb Pointer to the VM control block.
6364 * @param pSvmTransient Pointer to the SVM-transient structure.
6365 */
6366static VBOXSTRICTRC hmR0SvmExitWriteMsr(PVMCPUCC pVCpu, PSVMVMCB pVmcb, PSVMTRANSIENT pSvmTransient)
6367{
6368 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6369 uint32_t const idMsr = pCtx->ecx;
6370 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
6371 Log4Func(("idMsr=%#RX32\n", idMsr));
6372
6373 /*
6374 * Handle TPR patching MSR writes.
6375 * We utilitize the LSTAR MSR for patching.
6376 */
6377 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6378 if ( idMsr == MSR_K8_LSTAR
6379 && pVCpu->CTX_SUFF(pVM)->hm.s.fTprPatchingActive)
6380 {
6381 unsigned cbInstr;
6382 if (fSupportsNextRipSave)
6383 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6384 else
6385 {
6386 PDISCPUSTATE pDis = &pVCpu->hmr0.s.svm.DisState;
6387 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, &cbInstr);
6388 if ( rc == VINF_SUCCESS
6389 && pDis->pCurInstr->uOpcode == OP_WRMSR)
6390 Assert(cbInstr > 0);
6391 else
6392 cbInstr = 0;
6393 }
6394
6395 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */
6396 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr)
6397 {
6398 int rc = APICSetTpr(pVCpu, pCtx->eax & 0xff);
6399 AssertRCReturn(rc, rc);
6400 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
6401 }
6402
6403 int rc = VINF_SUCCESS;
6404 hmR0SvmAdvanceRip(pVCpu, cbInstr);
6405 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6406 return rc;
6407 }
6408
6409 /*
6410 * Handle regular MSR writes.
6411 */
6412 VBOXSTRICTRC rcStrict;
6413 if (fSupportsNextRipSave)
6414 {
6415 /** @todo Optimize this: We don't need to get much of the MSR state here
6416 * since we're only updating. CPUMAllMsrs.cpp can ask for what it needs and
6417 * clear the applicable extern flags. */
6418 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
6419 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
6420 rcStrict = IEMExecDecodedWrmsr(pVCpu, cbInstr);
6421 }
6422 else
6423 {
6424 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
6425 rcStrict = IEMExecOne(pVCpu);
6426 }
6427
6428 AssertMsg( rcStrict == VINF_SUCCESS
6429 || rcStrict == VINF_IEM_RAISED_XCPT
6430 || rcStrict == VINF_CPUM_R3_MSR_WRITE,
6431 ("hmR0SvmExitWriteMsr: Unexpected status %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6432
6433 if (rcStrict == VINF_SUCCESS)
6434 {
6435 /* If this is an X2APIC WRMSR access, update the APIC TPR state. */
6436 if ( idMsr >= MSR_IA32_X2APIC_START
6437 && idMsr <= MSR_IA32_X2APIC_END)
6438 {
6439 /*
6440 * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest().
6441 * When full APIC register virtualization is implemented we'll have to make sure
6442 * APIC state is saved from the VMCB before IEM changes it.
6443 */
6444 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
6445 }
6446 else
6447 {
6448 switch (idMsr)
6449 {
6450 case MSR_IA32_TSC: pSvmTransient->fUpdateTscOffsetting = true; break;
6451 case MSR_K6_EFER: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR); break;
6452 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
6453 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
6454 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
6455 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
6456 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
6457 }
6458 }
6459 }
6460 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6461 {
6462 rcStrict = VINF_SUCCESS;
6463 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6464 }
6465 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6466 return rcStrict;
6467}
6468
6469
6470/**
6471 * \#VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional
6472 * \#VMEXIT.
6473 */
6474HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6475{
6476 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6477
6478 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6479 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ)
6480 return hmR0SvmExitReadMsr(pVCpu, pVmcb);
6481
6482 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE);
6483 return hmR0SvmExitWriteMsr(pVCpu, pVmcb, pSvmTransient);
6484}
6485
6486
6487/**
6488 * \#VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional \#VMEXIT.
6489 */
6490HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6491{
6492 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6493 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6494
6495 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
6496
6497 /** @todo Stepping with nested-guest. */
6498 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6499 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
6500 {
6501 /* We should -not- get this #VMEXIT if the guest's debug registers were active. */
6502 if (pSvmTransient->fWasGuestDebugStateActive)
6503 {
6504 AssertMsgFailed(("hmR0SvmExitReadDRx: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
6505 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
6506 return VERR_SVM_UNEXPECTED_EXIT;
6507 }
6508
6509 /*
6510 * Lazy DR0-3 loading.
6511 */
6512 if (!pSvmTransient->fWasHyperDebugStateActive)
6513 {
6514 Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
6515 Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
6516
6517 /* Don't intercept DRx read and writes. */
6518 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
6519 pVmcb->ctrl.u16InterceptRdDRx = 0;
6520 pVmcb->ctrl.u16InterceptWrDRx = 0;
6521 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
6522
6523 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
6524 VMMRZCallRing3Disable(pVCpu);
6525 HM_DISABLE_PREEMPT(pVCpu);
6526
6527 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
6528 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
6529 Assert(CPUMIsGuestDebugStateActive(pVCpu));
6530
6531 HM_RESTORE_PREEMPT();
6532 VMMRZCallRing3Enable(pVCpu);
6533
6534 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
6535 return VINF_SUCCESS;
6536 }
6537 }
6538
6539 /*
6540 * Interpret the read/writing of DRx.
6541 */
6542 /** @todo Decode assist. */
6543 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
6544 Log5(("hmR0SvmExitReadDRx: Emulated DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
6545 if (RT_LIKELY(rc == VINF_SUCCESS))
6546 {
6547 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
6548 /** @todo CPUM should set this flag! */
6549 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
6550 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
6551 }
6552 else
6553 Assert(rc == VERR_EM_INTERPRETER);
6554 return rc;
6555}
6556
6557
6558/**
6559 * \#VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional \#VMEXIT.
6560 */
6561HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6562{
6563 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6564 /* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
6565 VBOXSTRICTRC rc = hmR0SvmExitReadDRx(pVCpu, pSvmTransient);
6566 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
6567 STAM_COUNTER_DEC(&pVCpu->hm.s.StatExitDRxRead);
6568 return rc;
6569}
6570
6571
6572/**
6573 * \#VMEXIT handler for XCRx write (SVM_EXIT_XSETBV). Conditional \#VMEXIT.
6574 */
6575HMSVM_EXIT_DECL hmR0SvmExitXsetbv(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6576{
6577 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6578 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
6579
6580 /** @todo decode assists... */
6581 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
6582 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
6583 {
6584 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6585 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
6586 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], fLoadSaveGuestXcr0, pCtx->cr4));
6587 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
6588 {
6589 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
6590 hmR0SvmUpdateVmRunFunction(pVCpu);
6591 }
6592 }
6593 else if (rcStrict == VINF_IEM_RAISED_XCPT)
6594 {
6595 rcStrict = VINF_SUCCESS;
6596 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
6597 }
6598 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6599 return rcStrict;
6600}
6601
6602
6603/**
6604 * \#VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional \#VMEXIT.
6605 */
6606HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6607{
6608 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6609 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK);
6610
6611 /* I/O operation lookup arrays. */
6612 static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
6613 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
6614 the result (in AL/AX/EAX). */
6615 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6616 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6617 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6618
6619 Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
6620
6621 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
6622 SVMIOIOEXITINFO IoExitInfo;
6623 IoExitInfo.u = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
6624 uint32_t uIOWidth = (IoExitInfo.u >> 4) & 0x7;
6625 uint32_t cbValue = s_aIOSize[uIOWidth];
6626 uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
6627
6628 if (RT_UNLIKELY(!cbValue))
6629 {
6630 AssertMsgFailed(("hmR0SvmExitIOInstr: Invalid IO operation. uIOWidth=%u\n", uIOWidth));
6631 return VERR_EM_INTERPRETER;
6632 }
6633
6634 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
6635 VBOXSTRICTRC rcStrict;
6636 PCEMEXITREC pExitRec = NULL;
6637 if ( !pVCpu->hm.s.fSingleInstruction
6638 && !pVCpu->cpum.GstCtx.eflags.Bits.u1TF)
6639 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
6640 !IoExitInfo.n.u1Str
6641 ? IoExitInfo.n.u1Type == SVM_IOIO_READ
6642 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
6643 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
6644 : IoExitInfo.n.u1Type == SVM_IOIO_READ
6645 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
6646 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
6647 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
6648 if (!pExitRec)
6649 {
6650 bool fUpdateRipAlready = false;
6651 if (IoExitInfo.n.u1Str)
6652 {
6653 /* INS/OUTS - I/O String instruction. */
6654 /** @todo Huh? why can't we use the segment prefix information given by AMD-V
6655 * in EXITINFO1? Investigate once this thing is up and running. */
6656 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, IoExitInfo.n.u16Port, cbValue,
6657 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? 'w' : 'r'));
6658 AssertReturn(pCtx->dx == IoExitInfo.n.u16Port, VERR_SVM_IPE_2);
6659 static IEMMODE const s_aenmAddrMode[8] =
6660 {
6661 (IEMMODE)-1, IEMMODE_16BIT, IEMMODE_32BIT, (IEMMODE)-1, IEMMODE_64BIT, (IEMMODE)-1, (IEMMODE)-1, (IEMMODE)-1
6662 };
6663 IEMMODE enmAddrMode = s_aenmAddrMode[(IoExitInfo.u >> 7) & 0x7];
6664 if (enmAddrMode != (IEMMODE)-1)
6665 {
6666 uint64_t cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip;
6667 if (cbInstr <= 15 && cbInstr >= 1)
6668 {
6669 Assert(cbInstr >= 1U + IoExitInfo.n.u1Rep);
6670 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
6671 {
6672 /* Don't know exactly how to detect whether u3Seg is valid, currently
6673 only enabling it for Bulldozer and later with NRIP. OS/2 broke on
6674 2384 Opterons when only checking NRIP. */
6675 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
6676 if ( fSupportsNextRipSave
6677 && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First)
6678 {
6679 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_DS || cbInstr > 1U + IoExitInfo.n.u1Rep,
6680 ("u32Seg=%d cbInstr=%d u1REP=%d", IoExitInfo.n.u3Seg, cbInstr, IoExitInfo.n.u1Rep));
6681 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
6682 IoExitInfo.n.u3Seg, true /*fIoChecked*/);
6683 }
6684 else if (cbInstr == 1U + IoExitInfo.n.u1Rep)
6685 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
6686 X86_SREG_DS, true /*fIoChecked*/);
6687 else
6688 rcStrict = IEMExecOne(pVCpu);
6689 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
6690 }
6691 else
6692 {
6693 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_ES /*=0*/, ("%#x\n", IoExitInfo.n.u3Seg));
6694 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,
6695 true /*fIoChecked*/);
6696 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
6697 }
6698 }
6699 else
6700 {
6701 AssertMsgFailed(("rip=%RX64 nrip=%#RX64 cbInstr=%#RX64\n", pCtx->rip, pVmcb->ctrl.u64ExitInfo2, cbInstr));
6702 rcStrict = IEMExecOne(pVCpu);
6703 }
6704 }
6705 else
6706 {
6707 AssertMsgFailed(("IoExitInfo=%RX64\n", IoExitInfo.u));
6708 rcStrict = IEMExecOne(pVCpu);
6709 }
6710 fUpdateRipAlready = true;
6711 }
6712 else
6713 {
6714 /* IN/OUT - I/O instruction. */
6715 Assert(!IoExitInfo.n.u1Rep);
6716
6717 uint8_t const cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip;
6718 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
6719 {
6720 rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue);
6721 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
6722 && !pCtx->eflags.Bits.u1TF)
6723 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, IoExitInfo.n.u16Port, cbInstr, cbValue, pCtx->eax & uAndVal);
6724 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
6725 }
6726 else
6727 {
6728 uint32_t u32Val = 0;
6729 rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue);
6730 if (IOM_SUCCESS(rcStrict))
6731 {
6732 /* Save result of I/O IN instr. in AL/AX/EAX. */
6733 /** @todo r=bird: 32-bit op size should clear high bits of rax! */
6734 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
6735 }
6736 else if ( rcStrict == VINF_IOM_R3_IOPORT_READ
6737 && !pCtx->eflags.Bits.u1TF)
6738 rcStrict = EMRZSetPendingIoPortRead(pVCpu, IoExitInfo.n.u16Port, cbInstr, cbValue);
6739
6740 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
6741 }
6742 }
6743
6744 if (IOM_SUCCESS(rcStrict))
6745 {
6746 /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */
6747 if (!fUpdateRipAlready)
6748 pCtx->rip = pVmcb->ctrl.u64ExitInfo2;
6749
6750 /*
6751 * If any I/O breakpoints are armed, we need to check if one triggered
6752 * and take appropriate action.
6753 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
6754 */
6755 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
6756 * execution engines about whether hyper BPs and such are pending. */
6757 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);
6758 uint32_t const uDr7 = pCtx->dr[7];
6759 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
6760 && X86_DR7_ANY_RW_IO(uDr7)
6761 && (pCtx->cr4 & X86_CR4_DE))
6762 || DBGFBpIsHwIoArmed(pVM)))
6763 {
6764 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
6765 VMMRZCallRing3Disable(pVCpu);
6766 HM_DISABLE_PREEMPT(pVCpu);
6767
6768 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
6769 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/);
6770
6771 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, &pVCpu->cpum.GstCtx, IoExitInfo.n.u16Port, cbValue);
6772 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
6773 {
6774 /* Raise #DB. */
6775 pVmcb->guest.u64DR6 = pCtx->dr[6];
6776 pVmcb->guest.u64DR7 = pCtx->dr[7];
6777 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
6778 hmR0SvmSetPendingXcptDB(pVCpu);
6779 }
6780 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
6781 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
6782 else if ( rcStrict2 != VINF_SUCCESS
6783 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
6784 rcStrict = rcStrict2;
6785 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
6786
6787 HM_RESTORE_PREEMPT();
6788 VMMRZCallRing3Enable(pVCpu);
6789 }
6790
6791 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
6792 }
6793
6794#ifdef VBOX_STRICT
6795 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
6796 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
6797 Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ);
6798 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
6799 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
6800 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
6801 Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE);
6802 else
6803 {
6804 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
6805 * statuses, that the VMM device and some others may return. See
6806 * IOM_SUCCESS() for guidance. */
6807 AssertMsg( RT_FAILURE(rcStrict)
6808 || rcStrict == VINF_SUCCESS
6809 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
6810 || rcStrict == VINF_EM_DBG_BREAKPOINT
6811 || rcStrict == VINF_EM_RAW_GUEST_TRAP
6812 || rcStrict == VINF_EM_DBG_STEPPED
6813 || rcStrict == VINF_EM_RAW_TO_R3
6814 || rcStrict == VINF_TRPM_XCPT_DISPATCHED
6815 || rcStrict == VINF_EM_TRIPLE_FAULT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6816 }
6817#endif
6818 }
6819 else
6820 {
6821 /*
6822 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
6823 */
6824 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6825 STAM_COUNTER_INC(!IoExitInfo.n.u1Str
6826 ? IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
6827 : IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
6828 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
6829 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IoExitInfo.n.u1Rep ? "REP " : "",
6830 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? "OUT" : "IN", IoExitInfo.n.u1Str ? "S" : "", IoExitInfo.n.u16Port, uIOWidth));
6831
6832 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
6833 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
6834
6835 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
6836 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6837 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6838 }
6839 return rcStrict;
6840}
6841
6842
6843/**
6844 * \#VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional \#VMEXIT.
6845 */
6846HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
6847{
6848 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
6849 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6850 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
6851
6852 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6853 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6854 Assert(pVM->hmr0.s.fNestedPaging);
6855
6856 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
6857 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
6858 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
6859 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1; /* Note! High bits in EXITINFO1 may contain additional info and are
6860 thus intentionally not copied into u32ErrCode. */
6861
6862 Log4Func(("#NPF at CS:RIP=%04x:%#RX64 GCPhysFaultAddr=%RGp ErrCode=%#x cbInstrFetched=%u %.15Rhxs\n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr,
6863 u32ErrCode, pVmcb->ctrl.cbInstrFetched, pVmcb->ctrl.abInstr));
6864
6865 /*
6866 * TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions.
6867 */
6868 if ( pVM->hm.s.fTprPatchingAllowed
6869 && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == XAPIC_OFF_TPR
6870 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */
6871 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */
6872 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
6873 && !CPUMIsGuestInLongModeEx(pCtx)
6874 && !CPUMGetGuestCPL(pVCpu)
6875 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
6876 {
6877 RTGCPHYS GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
6878 GCPhysApicBase &= PAGE_BASE_GC_MASK;
6879
6880 if (GCPhysFaultAddr == GCPhysApicBase + XAPIC_OFF_TPR)
6881 {
6882 /* Only attempt to patch the instruction once. */
6883 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
6884 if (!pPatch)
6885 return VINF_EM_HM_PATCH_TPR_INSTR;
6886 }
6887 }
6888
6889 /*
6890 * Determine the nested paging mode.
6891 */
6892/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
6893 PGMMODE const enmNestedPagingMode = PGMGetHostMode(pVM);
6894
6895 /*
6896 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
6897 */
6898 Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
6899 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
6900 {
6901 /*
6902 * If event delivery causes an MMIO #NPF, go back to instruction emulation as otherwise
6903 * injecting the original pending event would most likely cause the same MMIO #NPF.
6904 */
6905 if (pVCpu->hm.s.Event.fPending)
6906 {
6907 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
6908 return VINF_EM_RAW_INJECT_TRPM_EVENT;
6909 }
6910
6911 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
6912 VBOXSTRICTRC rcStrict;
6913 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
6914 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
6915 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
6916 if (!pExitRec)
6917 {
6918
6919 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr,
6920 u32ErrCode);
6921
6922 /*
6923 * If we succeed, resume guest execution.
6924 *
6925 * If we fail in interpreting the instruction because we couldn't get the guest
6926 * physical address of the page containing the instruction via the guest's page
6927 * tables (we would invalidate the guest page in the host TLB), resume execution
6928 * which would cause a guest page fault to let the guest handle this weird case.
6929 *
6930 * See @bugref{6043}.
6931 */
6932 if ( rcStrict == VINF_SUCCESS
6933 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
6934 || rcStrict == VERR_PAGE_NOT_PRESENT)
6935 {
6936 /* Successfully handled MMIO operation. */
6937 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
6938 rcStrict = VINF_SUCCESS;
6939 }
6940 }
6941 else
6942 {
6943 /*
6944 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
6945 */
6946 Assert(pCtx == &pVCpu->cpum.GstCtx);
6947 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
6948 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
6949 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysFaultAddr));
6950
6951 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
6952 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
6953
6954 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
6955 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
6956 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6957 }
6958 return rcStrict;
6959 }
6960
6961 /*
6962 * Nested page-fault.
6963 */
6964 TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode);
6965 int rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr);
6966 TRPMResetTrap(pVCpu);
6967
6968 Log4Func(("#NPF: PGMR0Trap0eHandlerNestedPaging returns %Rrc CS:RIP=%04x:%#RX64\n", rc, pCtx->cs.Sel, pCtx->rip));
6969
6970 /*
6971 * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
6972 */
6973 if ( rc == VINF_SUCCESS
6974 || rc == VERR_PAGE_TABLE_NOT_PRESENT
6975 || rc == VERR_PAGE_NOT_PRESENT)
6976 {
6977 /* We've successfully synced our shadow page tables. */
6978 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
6979 rc = VINF_SUCCESS;
6980 }
6981
6982 /*
6983 * If delivering an event causes an #NPF (and not MMIO), we shall resolve the fault and
6984 * re-inject the original event.
6985 */
6986 if (pVCpu->hm.s.Event.fPending)
6987 {
6988 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflectNPF);
6989
6990 /*
6991 * If the #NPF handler requested emulation of the instruction, ignore it.
6992 * We need to re-inject the original event so as to not lose it.
6993 * Reproducible when booting ReactOS 0.4.12 with BTRFS (installed using BootCD,
6994 * LiveCD is broken for other reasons).
6995 */
6996 if (rc == VINF_EM_RAW_EMULATE_INSTR)
6997 rc = VINF_EM_RAW_INJECT_TRPM_EVENT;
6998 }
6999
7000 return rc;
7001}
7002
7003
7004/**
7005 * \#VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional
7006 * \#VMEXIT.
7007 */
7008HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7009{
7010 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7011 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
7012
7013 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
7014 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7015 hmR0SvmClearIntWindowExiting(pVCpu, pVmcb);
7016
7017 /* Deliver the pending interrupt via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
7018 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
7019 return VINF_SUCCESS;
7020}
7021
7022
7023/**
7024 * \#VMEXIT handler for task switches (SVM_EXIT_TASK_SWITCH). Conditional
7025 * \#VMEXIT.
7026 */
7027HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7028{
7029 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7030 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7031
7032#ifndef HMSVM_ALWAYS_TRAP_TASK_SWITCH
7033 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
7034#endif
7035
7036 /* Check if this task-switch occurred while delivering an event through the guest IDT. */
7037 if (pVCpu->hm.s.Event.fPending) /* Can happen with exceptions/NMI. See @bugref{8411}. */
7038 {
7039 /*
7040 * AMD-V provides us with the exception which caused the TS; we collect
7041 * the information in the call to hmR0SvmCheckExitDueToEventDelivery().
7042 */
7043 Log4Func(("TS occurred during event delivery\n"));
7044 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
7045 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7046 }
7047
7048 /** @todo Emulate task switch someday, currently just going back to ring-3 for
7049 * emulation. */
7050 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
7051 return VERR_EM_INTERPRETER;
7052}
7053
7054
7055/**
7056 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
7057 */
7058HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7059{
7060 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7061 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7062
7063 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7064 if (pVM->hm.s.fTprPatchingAllowed)
7065 {
7066 int rc = hmEmulateSvmMovTpr(pVM, pVCpu);
7067 if (rc != VERR_NOT_FOUND)
7068 {
7069 Log4Func(("hmEmulateSvmMovTpr returns %Rrc\n", rc));
7070 return rc;
7071 }
7072 }
7073
7074 if (EMAreHypercallInstructionsEnabled(pVCpu))
7075 {
7076 unsigned cbInstr;
7077 if (hmR0SvmSupportsNextRipSave(pVCpu))
7078 {
7079 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7080 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7081 }
7082 else
7083 {
7084 PDISCPUSTATE pDis = &pVCpu->hmr0.s.svm.DisState;
7085 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, &cbInstr);
7086 if ( rc == VINF_SUCCESS
7087 && pDis->pCurInstr->uOpcode == OP_VMMCALL)
7088 Assert(cbInstr > 0);
7089 else
7090 cbInstr = 0;
7091 }
7092
7093 VBOXSTRICTRC rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
7094 if (RT_SUCCESS(rcStrict))
7095 {
7096 /* Only update the RIP if we're continuing guest execution and not in the case
7097 of say VINF_GIM_R3_HYPERCALL. */
7098 if (rcStrict == VINF_SUCCESS)
7099 hmR0SvmAdvanceRip(pVCpu, cbInstr);
7100
7101 return VBOXSTRICTRC_VAL(rcStrict);
7102 }
7103 else
7104 Log4Func(("GIMHypercall returns %Rrc -> #UD\n", VBOXSTRICTRC_VAL(rcStrict)));
7105 }
7106
7107 hmR0SvmSetPendingXcptUD(pVCpu);
7108 return VINF_SUCCESS;
7109}
7110
7111
7112/**
7113 * \#VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional \#VMEXIT.
7114 */
7115HMSVM_EXIT_DECL hmR0SvmExitPause(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7116{
7117 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7118
7119 unsigned cbInstr;
7120 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7121 if (fSupportsNextRipSave)
7122 {
7123 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7124 cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7125 }
7126 else
7127 {
7128 PDISCPUSTATE pDis = &pVCpu->hmr0.s.svm.DisState;
7129 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, pDis, &cbInstr);
7130 if ( rc == VINF_SUCCESS
7131 && pDis->pCurInstr->uOpcode == OP_PAUSE)
7132 Assert(cbInstr > 0);
7133 else
7134 cbInstr = 0;
7135 }
7136
7137 /** @todo The guest has likely hit a contended spinlock. We might want to
7138 * poke a schedule different guest VCPU. */
7139 hmR0SvmAdvanceRip(pVCpu, cbInstr);
7140 return VINF_EM_RAW_INTERRUPT;
7141}
7142
7143
7144/**
7145 * \#VMEXIT handler for FERR intercept (SVM_EXIT_FERR_FREEZE). Conditional
7146 * \#VMEXIT.
7147 */
7148HMSVM_EXIT_DECL hmR0SvmExitFerrFreeze(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7149{
7150 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7151 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);
7152 Assert(!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE));
7153
7154 Log4Func(("Raising IRQ 13 in response to #FERR\n"));
7155 return PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
7156}
7157
7158
7159/**
7160 * \#VMEXIT handler for IRET (SVM_EXIT_IRET). Conditional \#VMEXIT.
7161 */
7162HMSVM_EXIT_DECL hmR0SvmExitIret(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7163{
7164 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7165
7166 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now (almost) ready. */
7167 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7168 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_IRET);
7169
7170 /* Emulate the IRET. We have to execute the IRET before an NMI, but must potentially
7171 * deliver a pending NMI right after. If the IRET faults, an NMI can come before the
7172 * handler executes. Yes, x86 is ugly.
7173 */
7174 return VINF_EM_RAW_EMULATE_INSTR;
7175}
7176
7177
7178/**
7179 * \#VMEXIT handler for page-fault exceptions (SVM_EXIT_XCPT_14).
7180 * Conditional \#VMEXIT.
7181 */
7182HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7183{
7184 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7185 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7186 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7187
7188 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
7189 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7190 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7191 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7192 uint32_t uErrCode = pVmcb->ctrl.u64ExitInfo1;
7193 uint64_t const uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
7194
7195#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(HMSVM_ALWAYS_TRAP_PF)
7196 if (pVM->hmr0.s.fNestedPaging)
7197 {
7198 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
7199 if ( !pSvmTransient->fVectoringDoublePF
7200 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
7201 {
7202 /* A genuine guest #PF, reflect it to the guest. */
7203 hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress);
7204 Log4Func(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RX64 ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip,
7205 uFaultAddress, uErrCode));
7206 }
7207 else
7208 {
7209 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7210 hmR0SvmSetPendingXcptDF(pVCpu);
7211 Log4Func(("Pending #DF due to vectoring #PF. NP\n"));
7212 }
7213 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
7214 return VINF_SUCCESS;
7215 }
7216#endif
7217
7218 Assert(!pVM->hmr0.s.fNestedPaging);
7219
7220 /*
7221 * TPR patching shortcut for APIC TPR reads and writes; only applicable to 32-bit guests.
7222 */
7223 if ( pVM->hm.s.fTprPatchingAllowed
7224 && (uFaultAddress & 0xfff) == XAPIC_OFF_TPR
7225 && !(uErrCode & X86_TRAP_PF_P) /* Not present. */
7226 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
7227 && !CPUMIsGuestInLongModeEx(pCtx)
7228 && !CPUMGetGuestCPL(pVCpu)
7229 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
7230 {
7231 RTGCPHYS GCPhysApicBase;
7232 GCPhysApicBase = APICGetBaseMsrNoCheck(pVCpu);
7233 GCPhysApicBase &= PAGE_BASE_GC_MASK;
7234
7235 /* Check if the page at the fault-address is the APIC base. */
7236 RTGCPHYS GCPhysPage;
7237 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);
7238 if ( rc2 == VINF_SUCCESS
7239 && GCPhysPage == GCPhysApicBase)
7240 {
7241 /* Only attempt to patch the instruction once. */
7242 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
7243 if (!pPatch)
7244 return VINF_EM_HM_PATCH_TPR_INSTR;
7245 }
7246 }
7247
7248 Log4Func(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
7249 pCtx->rip, uErrCode, pCtx->cr3));
7250
7251 /*
7252 * If it's a vectoring #PF, emulate injecting the original event injection as
7253 * PGMTrap0eHandler() is incapable of differentiating between instruction emulation and
7254 * event injection that caused a #PF. See @bugref{6607}.
7255 */
7256 if (pSvmTransient->fVectoringPF)
7257 {
7258 Assert(pVCpu->hm.s.Event.fPending);
7259 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7260 }
7261
7262 TRPMAssertXcptPF(pVCpu, uFaultAddress, uErrCode);
7263 int rc = PGMTrap0eHandler(pVCpu, uErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
7264
7265 Log4Func(("#PF: rc=%Rrc\n", rc));
7266
7267 if (rc == VINF_SUCCESS)
7268 {
7269 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
7270 TRPMResetTrap(pVCpu);
7271 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
7272 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
7273 return rc;
7274 }
7275
7276 if (rc == VINF_EM_RAW_GUEST_TRAP)
7277 {
7278 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
7279
7280 /*
7281 * If a nested-guest delivers a #PF and that causes a #PF which is -not- a shadow #PF,
7282 * we should simply forward the #PF to the guest and is up to the nested-hypervisor to
7283 * determine whether it is a nested-shadow #PF or a #DF, see @bugref{7243#c121}.
7284 */
7285 if ( !pSvmTransient->fVectoringDoublePF
7286 || CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
7287 {
7288 /* It's a guest (or nested-guest) page fault and needs to be reflected. */
7289 uErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */
7290 TRPMResetTrap(pVCpu);
7291
7292#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7293 /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
7294 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
7295 && CPUMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
7296 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_XCPT_PF, uErrCode, uFaultAddress);
7297#endif
7298
7299 hmR0SvmSetPendingXcptPF(pVCpu, uErrCode, uFaultAddress);
7300 }
7301 else
7302 {
7303 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
7304 TRPMResetTrap(pVCpu);
7305 hmR0SvmSetPendingXcptDF(pVCpu);
7306 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
7307 }
7308
7309 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
7310 return VINF_SUCCESS;
7311 }
7312
7313 TRPMResetTrap(pVCpu);
7314 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
7315 return rc;
7316}
7317
7318
7319/**
7320 * \#VMEXIT handler for undefined opcode (SVM_EXIT_XCPT_6).
7321 * Conditional \#VMEXIT.
7322 */
7323HMSVM_EXIT_DECL hmR0SvmExitXcptUD(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7324{
7325 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7326 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
7327 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
7328
7329 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
7330 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
7331 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
7332
7333 /** @todo if we accumulate more optional stuff here, we ought to combine the
7334 * reading of opcode bytes to avoid doing more than once. */
7335
7336 VBOXSTRICTRC rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
7337 if (pVCpu->hm.s.fGIMTrapXcptUD)
7338 {
7339 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7340 uint8_t cbInstr = 0;
7341 rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
7342 if (rcStrict == VINF_SUCCESS)
7343 {
7344 /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */
7345 hmR0SvmAdvanceRip(pVCpu, cbInstr);
7346 rcStrict = VINF_SUCCESS;
7347 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7348 }
7349 else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
7350 rcStrict = VINF_SUCCESS;
7351 else if (rcStrict == VINF_GIM_R3_HYPERCALL)
7352 rcStrict = VINF_GIM_R3_HYPERCALL;
7353 else
7354 {
7355 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
7356 rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
7357 }
7358 }
7359
7360 if (pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
7361 {
7362 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
7363 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
7364 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
7365 {
7366 /* Ideally, IEM should just handle all these special #UD situations, but
7367 we don't quite trust things to behave optimially when doing that. So,
7368 for now we'll restrict ourselves to a handful of possible sysenter and
7369 sysexit encodings that we filter right here. */
7370 uint8_t abInstr[SVM_CTRL_GUEST_INSTR_BYTES_MAX];
7371 uint8_t cbInstr = pVmcb->ctrl.cbInstrFetched;
7372 uint32_t const uCpl = CPUMGetGuestCPL(pVCpu);
7373 uint8_t const cbMin = uCpl != 0 ? 2 : 1 + 2;
7374 RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
7375 if (cbInstr < cbMin || cbInstr > SVM_CTRL_GUEST_INSTR_BYTES_MAX)
7376 {
7377 cbInstr = cbMin;
7378 int rc2 = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, GCPtrInstr, cbInstr);
7379 AssertRCStmt(rc2, cbInstr = 0);
7380 }
7381 else
7382 memcpy(abInstr, pVmcb->ctrl.abInstr, cbInstr); /* unlikely */
7383 if ( cbInstr == 0 /* read error */
7384 || (cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x34) /* sysenter */
7385 || ( uCpl == 0
7386 && ( ( cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x35) /* sysexit */
7387 || ( cbInstr >= 3 && abInstr[1] == 0x0f && abInstr[2] == 0x35 /* rex.w sysexit */
7388 && (abInstr[0] & (X86_OP_REX_W | 0xf0)) == X86_OP_REX_W))))
7389 {
7390 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK
7391 | CPUMCTX_EXTRN_SREG_MASK /* without ES+DS+GS the app will #GP later - go figure */);
7392 Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
7393 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), GCPtrInstr, abInstr, cbInstr);
7394 Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: rcStrict=%Rrc %04x:%08RX64 %08RX64 %04x:%08RX64\n",
7395 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u,
7396 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp));
7397 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
7398 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); /** @todo Lazy bird. */
7399 if (rcStrict == VINF_IEM_RAISED_XCPT)
7400 rcStrict = VINF_SUCCESS;
7401 return rcStrict;
7402 }
7403 Log6(("hmR0SvmExitXcptUD: not sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
7404 }
7405 else
7406 Log6(("hmR0SvmExitXcptUD: not in long mode at %04x:%llx\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7407 }
7408
7409 /* If the GIM #UD exception handler didn't succeed for some reason or wasn't needed, raise #UD. */
7410 if (RT_FAILURE(rcStrict))
7411 {
7412 hmR0SvmSetPendingXcptUD(pVCpu);
7413 rcStrict = VINF_SUCCESS;
7414 }
7415
7416 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
7417 return rcStrict;
7418}
7419
7420
7421/**
7422 * \#VMEXIT handler for math-fault exceptions (SVM_EXIT_XCPT_16).
7423 * Conditional \#VMEXIT.
7424 */
7425HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7426{
7427 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7428 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7429 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
7430
7431 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7432 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7433
7434 /* Paranoia; Ensure we cannot be called as a result of event delivery. */
7435 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
7436
7437 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
7438
7439 if (!(pCtx->cr0 & X86_CR0_NE))
7440 {
7441 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7442 PDISSTATE pDis = &pVCpu->hmr0.s.svm.DisState;
7443 unsigned cbInstr;
7444 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbInstr);
7445 if (RT_SUCCESS(rc))
7446 {
7447 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
7448 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
7449 if (RT_SUCCESS(rc))
7450 hmR0SvmAdvanceRip(pVCpu, cbInstr);
7451 }
7452 else
7453 Log4Func(("EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
7454 return rc;
7455 }
7456
7457 hmR0SvmSetPendingXcptMF(pVCpu);
7458 return VINF_SUCCESS;
7459}
7460
7461
7462/**
7463 * \#VMEXIT handler for debug exceptions (SVM_EXIT_XCPT_1). Conditional
7464 * \#VMEXIT.
7465 */
7466HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7467{
7468 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7469 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7470 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7471 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
7472
7473 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
7474 {
7475 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
7476 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7477 }
7478
7479 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
7480
7481 /*
7482 * This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data
7483 * breakpoint). However, for both cases DR6 and DR7 are updated to what the exception
7484 * handler expects. See AMD spec. 15.12.2 "#DB (Debug)".
7485 */
7486 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7487 PSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb;
7488 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7489 int rc = DBGFTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
7490 if (rc == VINF_EM_RAW_GUEST_TRAP)
7491 {
7492 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
7493 if (CPUMIsHyperDebugStateActive(pVCpu))
7494 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
7495
7496 /* Reflect the exception back to the guest. */
7497 hmR0SvmSetPendingXcptDB(pVCpu);
7498 rc = VINF_SUCCESS;
7499 }
7500
7501 /*
7502 * Update DR6.
7503 */
7504 if (CPUMIsHyperDebugStateActive(pVCpu))
7505 {
7506 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
7507 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
7508 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
7509 }
7510 else
7511 {
7512 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
7513 Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu));
7514 }
7515
7516 return rc;
7517}
7518
7519
7520/**
7521 * \#VMEXIT handler for alignment check exceptions (SVM_EXIT_XCPT_17).
7522 * Conditional \#VMEXIT.
7523 */
7524HMSVM_EXIT_DECL hmR0SvmExitXcptAC(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7525{
7526 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7527 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7528 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC);
7529
7530 SVMEVENT Event;
7531 Event.u = 0;
7532 Event.n.u1Valid = 1;
7533 Event.n.u3Type = SVM_EVENT_EXCEPTION;
7534 Event.n.u8Vector = X86_XCPT_AC;
7535 Event.n.u1ErrorCodeValid = 1;
7536 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
7537 return VINF_SUCCESS;
7538}
7539
7540
7541/**
7542 * \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_XCPT_3).
7543 * Conditional \#VMEXIT.
7544 */
7545HMSVM_EXIT_DECL hmR0SvmExitXcptBP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7546{
7547 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7548 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7549 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7550 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
7551
7552 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7553 int rc = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
7554 if (rc == VINF_EM_RAW_GUEST_TRAP)
7555 {
7556 SVMEVENT Event;
7557 Event.u = 0;
7558 Event.n.u1Valid = 1;
7559 Event.n.u3Type = SVM_EVENT_EXCEPTION;
7560 Event.n.u8Vector = X86_XCPT_BP;
7561 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
7562 rc = VINF_SUCCESS;
7563 }
7564
7565 Assert(rc == VINF_SUCCESS || rc == VINF_EM_DBG_BREAKPOINT);
7566 return rc;
7567}
7568
7569
7570/**
7571 * Hacks its way around the lovely mesa driver's backdoor accesses.
7572 *
7573 * @sa hmR0VmxHandleMesaDrvGp
7574 */
7575static int hmR0SvmHandleMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb)
7576{
7577 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK);
7578 Log(("hmR0SvmHandleMesaDrvGp: at %04x:%08RX64 rcx=%RX64 rbx=%RX64\n",
7579 pVmcb->guest.CS.u16Sel, pVmcb->guest.u64RIP, pCtx->rcx, pCtx->rbx));
7580 RT_NOREF(pCtx, pVmcb);
7581
7582 /* For now we'll just skip the instruction. */
7583 hmR0SvmAdvanceRip(pVCpu, 1);
7584 return VINF_SUCCESS;
7585}
7586
7587
7588/**
7589 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
7590 * backdoor logging w/o checking what it is running inside.
7591 *
7592 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
7593 * backdoor port and magic numbers loaded in registers.
7594 *
7595 * @returns true if it is, false if it isn't.
7596 * @sa hmR0VmxIsMesaDrvGp
7597 */
7598DECLINLINE(bool) hmR0SvmIsMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb)
7599{
7600 /* Check magic and port. */
7601 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
7602 /*Log8(("hmR0SvmIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->fExtrn & CPUMCTX_EXTRN_RAX ? pVmcb->guest.u64RAX : pCtx->rax, pCtx->rdx));*/
7603 if (pCtx->dx != UINT32_C(0x5658))
7604 return false;
7605 if ((pCtx->fExtrn & CPUMCTX_EXTRN_RAX ? pVmcb->guest.u64RAX : pCtx->rax) != UINT32_C(0x564d5868))
7606 return false;
7607
7608 /* Check that it is #GP(0). */
7609 if (pVmcb->ctrl.u64ExitInfo1 != 0)
7610 return false;
7611
7612 /* Flat ring-3 CS. */
7613 /*Log8(("hmR0SvmIsMesaDrvGp: u8CPL=%d base=%RX64\n", pVmcb->guest.u8CPL, pCtx->fExtrn & CPUMCTX_EXTRN_CS ? pVmcb->guest.CS.u64Base : pCtx->cs.u64Base));*/
7614 if (pVmcb->guest.u8CPL != 3)
7615 return false;
7616 if ((pCtx->fExtrn & CPUMCTX_EXTRN_CS ? pVmcb->guest.CS.u64Base : pCtx->cs.u64Base) != 0)
7617 return false;
7618
7619 /* 0xed: IN eAX,dx */
7620 if (pVmcb->ctrl.cbInstrFetched < 1) /* unlikely, it turns out. */
7621 {
7622 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_GPRS_MASK
7623 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
7624 uint8_t abInstr[1];
7625 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
7626 /*Log8(("hmR0SvmIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0])); */
7627 if (RT_FAILURE(rc))
7628 return false;
7629 if (abInstr[0] != 0xed)
7630 return false;
7631 }
7632 else
7633 {
7634 /*Log8(("hmR0SvmIsMesaDrvGp: %#x\n", pVmcb->ctrl.abInstr));*/
7635 if (pVmcb->ctrl.abInstr[0] != 0xed)
7636 return false;
7637 }
7638 return true;
7639}
7640
7641
7642/**
7643 * \#VMEXIT handler for general protection faults (SVM_EXIT_XCPT_BP).
7644 * Conditional \#VMEXIT.
7645 */
7646HMSVM_EXIT_DECL hmR0SvmExitXcptGP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7647{
7648 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7649 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7650 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
7651
7652 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7653 Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode);
7654
7655 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7656 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
7657 || !hmR0SvmIsMesaDrvGp(pVCpu, pCtx, pVmcb))
7658 {
7659 SVMEVENT Event;
7660 Event.u = 0;
7661 Event.n.u1Valid = 1;
7662 Event.n.u3Type = SVM_EVENT_EXCEPTION;
7663 Event.n.u8Vector = X86_XCPT_GP;
7664 Event.n.u1ErrorCodeValid = 1;
7665 Event.n.u32ErrorCode = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
7666 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
7667 return VINF_SUCCESS;
7668 }
7669 return hmR0SvmHandleMesaDrvGp(pVCpu, pCtx, pVmcb);
7670}
7671
7672
7673#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
7674/**
7675 * \#VMEXIT handler for generic exceptions. Conditional \#VMEXIT.
7676 */
7677HMSVM_EXIT_DECL hmR0SvmExitXcptGeneric(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7678{
7679 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7680 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7681
7682 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7683 uint8_t const uVector = pVmcb->ctrl.u64ExitCode - SVM_EXIT_XCPT_0;
7684 uint32_t const uErrCode = pVmcb->ctrl.u64ExitInfo1;
7685 Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode);
7686 Assert(uVector <= X86_XCPT_LAST);
7687 Log4Func(("uVector=%#x uErrCode=%u\n", uVector, uErrCode));
7688
7689 SVMEVENT Event;
7690 Event.u = 0;
7691 Event.n.u1Valid = 1;
7692 Event.n.u3Type = SVM_EVENT_EXCEPTION;
7693 Event.n.u8Vector = uVector;
7694 switch (uVector)
7695 {
7696 /* Shouldn't be here for reflecting #PFs (among other things, the fault address isn't passed along). */
7697 case X86_XCPT_PF: AssertMsgFailed(("hmR0SvmExitXcptGeneric: Unexpected exception")); return VERR_SVM_IPE_5;
7698 case X86_XCPT_DF:
7699 case X86_XCPT_TS:
7700 case X86_XCPT_NP:
7701 case X86_XCPT_SS:
7702 case X86_XCPT_GP:
7703 case X86_XCPT_AC:
7704 {
7705 Event.n.u1ErrorCodeValid = 1;
7706 Event.n.u32ErrorCode = uErrCode;
7707 break;
7708 }
7709 }
7710
7711#ifdef VBOX_WITH_STATISTICS
7712 switch (uVector)
7713 {
7714 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); break;
7715 case X86_XCPT_DB: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); break;
7716 case X86_XCPT_BP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); break;
7717 case X86_XCPT_OF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF); break;
7718 case X86_XCPT_BR: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBR); break;
7719 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); break;
7720 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF); break;
7721 case X86_XCPT_DF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDF); break;
7722 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS); break;
7723 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); break;
7724 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); break;
7725 case X86_XCPT_GP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); break;
7726 case X86_XCPT_PF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); break;
7727 case X86_XCPT_MF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); break;
7728 case X86_XCPT_AC: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC); break;
7729 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); break;
7730 default:
7731 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
7732 break;
7733 }
7734#endif
7735
7736 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
7737 return VINF_SUCCESS;
7738}
7739#endif
7740
7741#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7742/**
7743 * \#VMEXIT handler for CLGI (SVM_EXIT_CLGI). Conditional \#VMEXIT.
7744 */
7745HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7746{
7747 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7748
7749 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7750 Assert(pVmcb);
7751 Assert(!pVmcb->ctrl.IntCtrl.n.u1VGifEnable);
7752
7753 VBOXSTRICTRC rcStrict;
7754 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7755 uint64_t const fImport = CPUMCTX_EXTRN_HWVIRT;
7756 if (fSupportsNextRipSave)
7757 {
7758 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
7759 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7760 rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr);
7761 }
7762 else
7763 {
7764 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
7765 rcStrict = IEMExecOne(pVCpu);
7766 }
7767
7768 if (rcStrict == VINF_SUCCESS)
7769 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
7770 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7771 {
7772 rcStrict = VINF_SUCCESS;
7773 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7774 }
7775 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7776 return rcStrict;
7777}
7778
7779
7780/**
7781 * \#VMEXIT handler for STGI (SVM_EXIT_STGI). Conditional \#VMEXIT.
7782 */
7783HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7784{
7785 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7786
7787 /*
7788 * When VGIF is not used we always intercept STGI instructions. When VGIF is used,
7789 * we only intercept STGI when events are pending for GIF to become 1.
7790 */
7791 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7792 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
7793 hmR0SvmClearCtrlIntercept(pVCpu, pVmcb, SVM_CTRL_INTERCEPT_STGI);
7794
7795 VBOXSTRICTRC rcStrict;
7796 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7797 uint64_t const fImport = CPUMCTX_EXTRN_HWVIRT;
7798 if (fSupportsNextRipSave)
7799 {
7800 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
7801 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7802 rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr);
7803 }
7804 else
7805 {
7806 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
7807 rcStrict = IEMExecOne(pVCpu);
7808 }
7809
7810 if (rcStrict == VINF_SUCCESS)
7811 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
7812 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7813 {
7814 rcStrict = VINF_SUCCESS;
7815 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7816 }
7817 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7818 return rcStrict;
7819}
7820
7821
7822/**
7823 * \#VMEXIT handler for VMLOAD (SVM_EXIT_VMLOAD). Conditional \#VMEXIT.
7824 */
7825HMSVM_EXIT_DECL hmR0SvmExitVmload(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7826{
7827 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7828
7829 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7830 Assert(pVmcb);
7831 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
7832
7833 VBOXSTRICTRC rcStrict;
7834 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7835 uint64_t const fImport = CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_KERNEL_GS_BASE
7836 | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_SYSCALL_MSRS
7837 | CPUMCTX_EXTRN_SYSENTER_MSRS;
7838 if (fSupportsNextRipSave)
7839 {
7840 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | fImport);
7841 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7842 rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr);
7843 }
7844 else
7845 {
7846 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | fImport);
7847 rcStrict = IEMExecOne(pVCpu);
7848 }
7849
7850 if (rcStrict == VINF_SUCCESS)
7851 {
7852 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS | HM_CHANGED_GUEST_GS
7853 | HM_CHANGED_GUEST_TR | HM_CHANGED_GUEST_LDTR
7854 | HM_CHANGED_GUEST_KERNEL_GS_BASE | HM_CHANGED_GUEST_SYSCALL_MSRS
7855 | HM_CHANGED_GUEST_SYSENTER_MSR_MASK);
7856 }
7857 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7858 {
7859 rcStrict = VINF_SUCCESS;
7860 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7861 }
7862 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7863 return rcStrict;
7864}
7865
7866
7867/**
7868 * \#VMEXIT handler for VMSAVE (SVM_EXIT_VMSAVE). Conditional \#VMEXIT.
7869 */
7870HMSVM_EXIT_DECL hmR0SvmExitVmsave(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7871{
7872 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7873
7874 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7875 Assert(!pVmcb->ctrl.LbrVirt.n.u1VirtVmsaveVmload);
7876
7877 VBOXSTRICTRC rcStrict;
7878 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7879 if (fSupportsNextRipSave)
7880 {
7881 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7882 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7883 rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr);
7884 }
7885 else
7886 {
7887 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7888 rcStrict = IEMExecOne(pVCpu);
7889 }
7890
7891 if (rcStrict == VINF_IEM_RAISED_XCPT)
7892 {
7893 rcStrict = VINF_SUCCESS;
7894 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7895 }
7896 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7897 return rcStrict;
7898}
7899
7900
7901/**
7902 * \#VMEXIT handler for INVLPGA (SVM_EXIT_INVLPGA). Conditional \#VMEXIT.
7903 */
7904HMSVM_EXIT_DECL hmR0SvmExitInvlpga(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7905{
7906 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7907
7908 VBOXSTRICTRC rcStrict;
7909 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7910 if (fSupportsNextRipSave)
7911 {
7912 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
7913 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7914 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7915 rcStrict = IEMExecDecodedInvlpga(pVCpu, cbInstr);
7916 }
7917 else
7918 {
7919 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
7920 rcStrict = IEMExecOne(pVCpu);
7921 }
7922
7923 if (rcStrict == VINF_IEM_RAISED_XCPT)
7924 {
7925 rcStrict = VINF_SUCCESS;
7926 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7927 }
7928 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7929 return rcStrict;
7930}
7931
7932
7933/**
7934 * \#VMEXIT handler for STGI (SVM_EXIT_VMRUN). Conditional \#VMEXIT.
7935 */
7936HMSVM_EXIT_DECL hmR0SvmExitVmrun(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7937{
7938 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7939 /* We shall import the entire state here, just in case we enter and continue execution of
7940 the nested-guest with hardware-assisted SVM in ring-0, we would be switching VMCBs and
7941 could lose lose part of CPU state. */
7942 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
7943
7944 VBOXSTRICTRC rcStrict;
7945 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu);
7946 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitVmentry, z);
7947 if (fSupportsNextRipSave)
7948 {
7949 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
7950 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pVCpu->cpum.GstCtx.rip;
7951 rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr);
7952 }
7953 else
7954 {
7955 /* We use IEMExecOneBypassEx() here as it supresses attempt to continue emulating any
7956 instruction(s) when interrupt inhibition is set as part of emulating the VMRUN
7957 instruction itself, see @bugref{7243#c126} */
7958 rcStrict = IEMExecOneBypassEx(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), NULL /* pcbWritten */);
7959 }
7960 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitVmentry, z);
7961
7962 if (rcStrict == VINF_SUCCESS)
7963 {
7964 rcStrict = VINF_SVM_VMRUN;
7965 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_VMRUN_MASK);
7966 }
7967 else if (rcStrict == VINF_IEM_RAISED_XCPT)
7968 {
7969 rcStrict = VINF_SUCCESS;
7970 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
7971 }
7972 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
7973 return rcStrict;
7974}
7975
7976
7977/**
7978 * Nested-guest \#VMEXIT handler for debug exceptions (SVM_EXIT_XCPT_1).
7979 * Unconditional \#VMEXIT.
7980 */
7981HMSVM_EXIT_DECL hmR0SvmNestedExitXcptDB(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
7982{
7983 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
7984 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
7985
7986 if (pVCpu->hm.s.Event.fPending)
7987 {
7988 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
7989 return VINF_EM_RAW_INJECT_TRPM_EVENT;
7990 }
7991
7992 hmR0SvmSetPendingXcptDB(pVCpu);
7993 return VINF_SUCCESS;
7994}
7995
7996
7997/**
7998 * Nested-guest \#VMEXIT handler for breakpoint exceptions (SVM_EXIT_XCPT_3).
7999 * Conditional \#VMEXIT.
8000 */
8001HMSVM_EXIT_DECL hmR0SvmNestedExitXcptBP(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
8002{
8003 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
8004 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
8005
8006 SVMEVENT Event;
8007 Event.u = 0;
8008 Event.n.u1Valid = 1;
8009 Event.n.u3Type = SVM_EVENT_EXCEPTION;
8010 Event.n.u8Vector = X86_XCPT_BP;
8011 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
8012 return VINF_SUCCESS;
8013}
8014#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
8015
8016/** @} */
8017
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette