VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp@ 48153

Last change on this file since 48153 was 48153, checked in by vboxsync, 11 years ago

VMM/HM: More dead code path elimination.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 181.4 KB
Line 
1/* $Id: HMSVMR0.cpp 48153 2013-08-29 12:57:00Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24
25#include "HMInternal.h"
26#include <VBox/vmm/vm.h>
27#include "HMSVMR0.h"
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/tm.h>
32
33#ifdef DEBUG_ramshankar
34# define HMSVM_SYNC_FULL_GUEST_STATE
35# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
36# define HMSVM_ALWAYS_TRAP_PF
37# define HMSVM_ALWAYS_TRAP_TASK_SWITCH
38#endif
39
40
41/*******************************************************************************
42* Defined Constants And Macros *
43*******************************************************************************/
44#ifdef VBOX_WITH_STATISTICS
45# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
46 if ((u64ExitCode) == SVM_EXIT_NPF) \
47 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
48 else \
49 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
50 } while (0)
51#else
52# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
53#endif
54
55/** If we decide to use a function table approach this can be useful to
56 * switch to a "static DECLCALLBACK(int)". */
57#define HMSVM_EXIT_DECL static int
58
59
60/** @name Segment attribute conversion between CPU and AMD-V VMCB format.
61 *
62 * The CPU format of the segment attribute is described in X86DESCATTRBITS
63 * which is 16-bits (i.e. includes 4 bits of the segment limit).
64 *
65 * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly
66 * only the attribute bits and nothing else). Upper 4-bits are unused.
67 *
68 * @{ */
69#define HMSVM_CPU_2_VMCB_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0xf000) >> 4) )
70#define HMSVM_VMCB_2_CPU_SEG_ATTR(a) ( ((a) & 0xff) | (((a) & 0x0f00) << 4) )
71/** @} */
72
73
74/** @name Macros for loading, storing segment registers to/from the VMCB.
75 * @{ */
76#define HMSVM_LOAD_SEG_REG(REG, reg) \
77 do \
78 { \
79 Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \
80 Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \
81 pVmcb->guest.REG.u16Sel = pCtx->reg.Sel; \
82 pVmcb->guest.REG.u32Limit = pCtx->reg.u32Limit; \
83 pVmcb->guest.REG.u64Base = pCtx->reg.u64Base; \
84 pVmcb->guest.REG.u16Attr = HMSVM_CPU_2_VMCB_SEG_ATTR(pCtx->reg.Attr.u); \
85 } while (0)
86
87#define HMSVM_SAVE_SEG_REG(REG, reg) \
88 do \
89 { \
90 pMixedCtx->reg.Sel = pVmcb->guest.REG.u16Sel; \
91 pMixedCtx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \
92 pMixedCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \
93 pMixedCtx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \
94 pMixedCtx->reg.u64Base = pVmcb->guest.REG.u64Base; \
95 pMixedCtx->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \
96 } while (0)
97/** @} */
98
99
100/** Macro for checking and returning from the using function for
101 * \#VMEXIT intercepts that maybe caused during delivering of another
102 * event in the guest. */
103#define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY() \
104 do \
105 { \
106 int rc = hmR0SvmCheckExitDueToEventDelivery(pVCpu, pCtx, pSvmTransient); \
107 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT)) \
108 return VINF_SUCCESS; \
109 else if (RT_UNLIKELY(rc == VINF_EM_RESET)) \
110 return rc; \
111 } while (0)
112
113/** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an
114 * instruction that exited. */
115#define HMSVM_CHECK_SINGLE_STEP(a_pVCpu, a_rc) \
116 do { \
117 if ((a_pVCpu)->hm.s.fSingleInstruction && (a_rc) == VINF_SUCCESS) \
118 (a_rc) = VINF_EM_DBG_STEPPED; \
119 } while (0)
120
121/** Assert that preemption is disabled or covered by thread-context hooks. */
122#define HMSVM_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
123 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
124
125/** Assert that we haven't migrated CPUs when thread-context hooks are not
126 * used. */
127#define HMSVM_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
128 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
129 ("Illegal migration! Entered on CPU %u Current %u\n", \
130 pVCpu->hm.s.idEnteredCpu, RTMpCpuId()));
131
132/** Exception bitmap mask for all contributory exceptions.
133 *
134 * Page fault is deliberately excluded here as it's conditional as to whether
135 * it's contributory or benign. Page faults are handled separately.
136 */
137#define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
138 | RT_BIT(X86_XCPT_DE))
139
140
141/** @name VMCB Clean Bits.
142 *
143 * These flags are used for VMCB-state caching. A set VMCB Clean Bit indicates
144 * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
145 * memory.
146 *
147 * @{ */
148/** All intercepts vectors, TSC offset, PAUSE filter counter. */
149#define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
150/** I/O permission bitmap, MSR permission bitmap. */
151#define HMSVM_VMCB_CLEAN_IOPM_MSRPM RT_BIT(1)
152/** ASID. */
153#define HMSVM_VMCB_CLEAN_ASID RT_BIT(2)
154/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
155V_INTR_VECTOR. */
156#define HMSVM_VMCB_CLEAN_TPR RT_BIT(3)
157/** Nested Paging: Nested CR3 (nCR3), PAT. */
158#define HMSVM_VMCB_CLEAN_NP RT_BIT(4)
159/** Control registers (CR0, CR3, CR4, EFER). */
160#define HMSVM_VMCB_CLEAN_CRX_EFER RT_BIT(5)
161/** Debug registers (DR6, DR7). */
162#define HMSVM_VMCB_CLEAN_DRX RT_BIT(6)
163/** GDT, IDT limit and base. */
164#define HMSVM_VMCB_CLEAN_DT RT_BIT(7)
165/** Segment register: CS, SS, DS, ES limit and base. */
166#define HMSVM_VMCB_CLEAN_SEG RT_BIT(8)
167/** CR2.*/
168#define HMSVM_VMCB_CLEAN_CR2 RT_BIT(9)
169/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
170#define HMSVM_VMCB_CLEAN_LBR RT_BIT(10)
171/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
172PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
173#define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11)
174/** Mask of all valid VMCB Clean bits. */
175#define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \
176 | HMSVM_VMCB_CLEAN_IOPM_MSRPM \
177 | HMSVM_VMCB_CLEAN_ASID \
178 | HMSVM_VMCB_CLEAN_TPR \
179 | HMSVM_VMCB_CLEAN_NP \
180 | HMSVM_VMCB_CLEAN_CRX_EFER \
181 | HMSVM_VMCB_CLEAN_DRX \
182 | HMSVM_VMCB_CLEAN_DT \
183 | HMSVM_VMCB_CLEAN_SEG \
184 | HMSVM_VMCB_CLEAN_CR2 \
185 | HMSVM_VMCB_CLEAN_LBR \
186 | HMSVM_VMCB_CLEAN_AVIC)
187/** @} */
188
189/** @name SVM transient.
190 *
191 * A state structure for holding miscellaneous information across AMD-V
192 * VMRUN/#VMEXIT operation, restored after the transition.
193 *
194 * @{ */
195typedef struct SVMTRANSIENT
196{
197 /** The host's rflags/eflags. */
198 RTCCUINTREG uEflags;
199#if HC_ARCH_BITS == 32
200 uint32_t u32Alignment0;
201#endif
202
203 /** The #VMEXIT exit code (the EXITCODE field in the VMCB). */
204 uint64_t u64ExitCode;
205 /** The guest's TPR value used for TPR shadowing. */
206 uint8_t u8GuestTpr;
207 /** Alignment. */
208 uint8_t abAlignment0[7];
209
210 /** Whether the TSC_AUX MSR needs restoring on #VMEXIT. */
211 bool fRestoreTscAuxMsr;
212 /** Whether the #VMEXIT was caused by a page-fault during delivery of a
213 * contributary exception or a page-fault. */
214 bool fVectoringPF;
215 /** Whether the TSC offset mode needs to be updated. */
216 bool fUpdateTscOffsetting;
217} SVMTRANSIENT, *PSVMTRANSIENT;
218AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t));
219AssertCompileMemberAlignment(SVMTRANSIENT, fRestoreTscAuxMsr, sizeof(uint64_t));
220/** @} */
221
222
223/**
224 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
225 */
226typedef enum SVMMSREXITREAD
227{
228 /** Reading this MSR causes a VM-exit. */
229 SVMMSREXIT_INTERCEPT_READ = 0xb,
230 /** Reading this MSR does not cause a VM-exit. */
231 SVMMSREXIT_PASSTHRU_READ
232} SVMMSREXITREAD;
233
234/**
235 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
236 */
237typedef enum SVMMSREXITWRITE
238{
239 /** Writing to this MSR causes a VM-exit. */
240 SVMMSREXIT_INTERCEPT_WRITE = 0xd,
241 /** Writing to this MSR does not cause a VM-exit. */
242 SVMMSREXIT_PASSTHRU_WRITE
243} SVMMSREXITWRITE;
244
245
246/*******************************************************************************
247* Internal Functions *
248*******************************************************************************/
249static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
250static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
251static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
252
253HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
254HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
255HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
256HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
257HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
258HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
259HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
260HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
261HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
262HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
263HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
264HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
265HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
266HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
267HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
268HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
269HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
270HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
271HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
272HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
273HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
274HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
275HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
276HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
277HMSVM_EXIT_DECL hmR0SvmExitXcptNM(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
278HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
279HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
280
281DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
282
283
284/*******************************************************************************
285* Global Variables *
286*******************************************************************************/
287/** Ring-0 memory object for the IO bitmap. */
288RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
289/** Physical address of the IO bitmap. */
290RTHCPHYS g_HCPhysIOBitmap = 0;
291/** Virtual address of the IO bitmap. */
292R0PTRTYPE(void *) g_pvIOBitmap = NULL;
293
294
295/**
296 * Sets up and activates AMD-V on the current CPU.
297 *
298 * @returns VBox status code.
299 * @param pCpu Pointer to the CPU info struct.
300 * @param pVM Pointer to the VM (can be NULL after a resume!).
301 * @param pvCpuPage Pointer to the global CPU page.
302 * @param HCPhysCpuPage Physical address of the global CPU page.
303 */
304VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
305{
306 AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER);
307 AssertReturn( HCPhysCpuPage
308 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
309 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
310
311 /*
312 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
313 */
314 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
315 if (u64HostEfer & MSR_K6_EFER_SVME)
316 {
317 /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
318 if ( pVM
319 && pVM->hm.s.svm.fIgnoreInUseError)
320 {
321 pCpu->fIgnoreAMDVInUseError = true;
322 }
323
324 if (!pCpu->fIgnoreAMDVInUseError)
325 return VERR_SVM_IN_USE;
326 }
327
328 /* Turn on AMD-V in the EFER MSR. */
329 ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
330
331 /* Write the physical page address where the CPU will store the host state while executing the VM. */
332 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
333
334 /*
335 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
336 * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
337 * upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB()
338 * to flush the TLB with before using a new ASID.
339 */
340 pCpu->fFlushAsidBeforeUse = true;
341
342 /*
343 * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
344 */
345 ++pCpu->cTlbFlushes;
346
347 return VINF_SUCCESS;
348}
349
350
351/**
352 * Deactivates AMD-V on the current CPU.
353 *
354 * @returns VBox status code.
355 * @param pCpu Pointer to the CPU info struct.
356 * @param pvCpuPage Pointer to the global CPU page.
357 * @param HCPhysCpuPage Physical address of the global CPU page.
358 */
359VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
360{
361 AssertReturn( HCPhysCpuPage
362 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
363 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
364 NOREF(pCpu);
365
366 /* Turn off AMD-V in the EFER MSR. */
367 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
368 ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
369
370 /* Invalidate host state physical address. */
371 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
372
373 return VINF_SUCCESS;
374}
375
376
377/**
378 * Does global AMD-V initialization (called during module initialization).
379 *
380 * @returns VBox status code.
381 */
382VMMR0DECL(int) SVMR0GlobalInit(void)
383{
384 /*
385 * Allocate 12 KB for the IO bitmap. Since this is non-optional and we always intercept all IO accesses, it's done
386 * once globally here instead of per-VM.
387 */
388 Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ);
389 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, 3 << PAGE_SHIFT, false /* fExecutable */);
390 if (RT_FAILURE(rc))
391 return rc;
392
393 g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
394 g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
395
396 /* Set all bits to intercept all IO accesses. */
397 ASMMemFill32(g_pvIOBitmap, 3 << PAGE_SHIFT, UINT32_C(0xffffffff));
398 return VINF_SUCCESS;
399}
400
401
402/**
403 * Does global AMD-V termination (called during module termination).
404 */
405VMMR0DECL(void) SVMR0GlobalTerm(void)
406{
407 if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
408 {
409 RTR0MemObjFree(g_hMemObjIOBitmap, false /* fFreeMappings */);
410 g_pvIOBitmap = NULL;
411 g_HCPhysIOBitmap = 0;
412 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
413 }
414}
415
416
417/**
418 * Frees any allocated per-VCPU structures for a VM.
419 *
420 * @param pVM Pointer to the VM.
421 */
422DECLINLINE(void) hmR0SvmFreeStructs(PVM pVM)
423{
424 for (uint32_t i = 0; i < pVM->cCpus; i++)
425 {
426 PVMCPU pVCpu = &pVM->aCpus[i];
427 AssertPtr(pVCpu);
428
429 if (pVCpu->hm.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
430 {
431 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false);
432 pVCpu->hm.s.svm.pvVmcbHost = 0;
433 pVCpu->hm.s.svm.HCPhysVmcbHost = 0;
434 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
435 }
436
437 if (pVCpu->hm.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ)
438 {
439 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false);
440 pVCpu->hm.s.svm.pvVmcb = 0;
441 pVCpu->hm.s.svm.HCPhysVmcb = 0;
442 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
443 }
444
445 if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
446 {
447 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
448 pVCpu->hm.s.svm.pvMsrBitmap = 0;
449 pVCpu->hm.s.svm.HCPhysMsrBitmap = 0;
450 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
451 }
452 }
453}
454
455
456/**
457 * Does per-VM AMD-V initialization.
458 *
459 * @returns VBox status code.
460 * @param pVM Pointer to the VM.
461 */
462VMMR0DECL(int) SVMR0InitVM(PVM pVM)
463{
464 int rc = VERR_INTERNAL_ERROR_5;
465
466 /*
467 * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
468 */
469 uint32_t u32Family;
470 uint32_t u32Model;
471 uint32_t u32Stepping;
472 if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
473 {
474 Log4(("SVMR0InitVM: AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
475 pVM->hm.s.svm.fAlwaysFlushTLB = true;
476 }
477
478 /*
479 * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
480 */
481 for (VMCPUID i = 0; i < pVM->cCpus; i++)
482 {
483 PVMCPU pVCpu = &pVM->aCpus[i];
484 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
485 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
486 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
487 }
488
489 for (VMCPUID i = 0; i < pVM->cCpus; i++)
490 {
491 PVMCPU pVCpu = &pVM->aCpus[i];
492
493 /*
494 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
495 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
496 */
497 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1 << PAGE_SHIFT, false /* fExecutable */);
498 if (RT_FAILURE(rc))
499 goto failure_cleanup;
500
501 pVCpu->hm.s.svm.pvVmcbHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost);
502 pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 /* iPage */);
503 Assert(pVCpu->hm.s.svm.HCPhysVmcbHost < _4G);
504 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcbHost);
505
506 /*
507 * Allocate one page for the guest-state VMCB.
508 */
509 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, 1 << PAGE_SHIFT, false /* fExecutable */);
510 if (RT_FAILURE(rc))
511 goto failure_cleanup;
512
513 pVCpu->hm.s.svm.pvVmcb = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);
514 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */);
515 Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G);
516 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcb);
517
518 /*
519 * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
520 * SVM to not require one.
521 */
522 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
523 if (RT_FAILURE(rc))
524 goto failure_cleanup;
525
526 pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
527 pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
528 /* Set all bits to intercept all MSR accesses (changed later on). */
529 ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, 0xffffffff);
530 }
531
532 return VINF_SUCCESS;
533
534failure_cleanup:
535 hmR0SvmFreeStructs(pVM);
536 return rc;
537}
538
539
540/**
541 * Does per-VM AMD-V termination.
542 *
543 * @returns VBox status code.
544 * @param pVM Pointer to the VM.
545 */
546VMMR0DECL(int) SVMR0TermVM(PVM pVM)
547{
548 hmR0SvmFreeStructs(pVM);
549 return VINF_SUCCESS;
550}
551
552
553/**
554 * Sets the permission bits for the specified MSR in the MSRPM.
555 *
556 * @param pVCpu Pointer to the VMCPU.
557 * @param uMsr The MSR for which the access permissions are being set.
558 * @param enmRead MSR read permissions.
559 * @param enmWrite MSR write permissions.
560 */
561static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
562{
563 unsigned ulBit;
564 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
565
566 /*
567 * Layout:
568 * Byte offset MSR range
569 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
570 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
571 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
572 * 0x1800 - 0x1fff Reserved
573 */
574 if (uMsr <= 0x00001FFF)
575 {
576 /* Pentium-compatible MSRs. */
577 ulBit = uMsr * 2;
578 }
579 else if ( uMsr >= 0xC0000000
580 && uMsr <= 0xC0001FFF)
581 {
582 /* AMD Sixth Generation x86 Processor MSRs. */
583 ulBit = (uMsr - 0xC0000000) * 2;
584 pbMsrBitmap += 0x800;
585 }
586 else if ( uMsr >= 0xC0010000
587 && uMsr <= 0xC0011FFF)
588 {
589 /* AMD Seventh and Eighth Generation Processor MSRs. */
590 ulBit = (uMsr - 0xC0001000) * 2;
591 pbMsrBitmap += 0x1000;
592 }
593 else
594 {
595 AssertFailed();
596 return;
597 }
598
599 Assert(ulBit < 0x3fff /* 16 * 1024 - 1 */);
600 if (enmRead == SVMMSREXIT_INTERCEPT_READ)
601 ASMBitSet(pbMsrBitmap, ulBit);
602 else
603 ASMBitClear(pbMsrBitmap, ulBit);
604
605 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
606 ASMBitSet(pbMsrBitmap, ulBit + 1);
607 else
608 ASMBitClear(pbMsrBitmap, ulBit + 1);
609
610 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
611 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
612}
613
614
615/**
616 * Sets up AMD-V for the specified VM.
617 * This function is only called once per-VM during initalization.
618 *
619 * @returns VBox status code.
620 * @param pVM Pointer to the VM.
621 */
622VMMR0DECL(int) SVMR0SetupVM(PVM pVM)
623{
624 int rc = VINF_SUCCESS;
625
626 AssertReturn(pVM, VERR_INVALID_PARAMETER);
627 Assert(pVM->hm.s.svm.fSupported);
628
629 for (VMCPUID i = 0; i < pVM->cCpus; i++)
630 {
631 PVMCPU pVCpu = &pVM->aCpus[i];
632 PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb;
633
634 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
635
636 /* Trap exceptions unconditionally (debug purposes). */
637#ifdef HMSVM_ALWAYS_TRAP_PF
638 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
639#endif
640#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
641 /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */
642 pVmcb->ctrl.u32InterceptException |= 0
643 | RT_BIT(X86_XCPT_BP)
644 | RT_BIT(X86_XCPT_DB)
645 | RT_BIT(X86_XCPT_DE)
646 | RT_BIT(X86_XCPT_NM)
647 | RT_BIT(X86_XCPT_UD)
648 | RT_BIT(X86_XCPT_NP)
649 | RT_BIT(X86_XCPT_SS)
650 | RT_BIT(X86_XCPT_GP)
651 | RT_BIT(X86_XCPT_PF)
652 | RT_BIT(X86_XCPT_MF)
653 ;
654#endif
655
656 /* Set up unconditional intercepts and conditions. */
657 pVmcb->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR /* External interrupt causes a VM-exit. */
658 | SVM_CTRL1_INTERCEPT_NMI /* Non-Maskable Interrupts causes a VM-exit. */
659 | SVM_CTRL1_INTERCEPT_INIT /* INIT signal causes a VM-exit. */
660 | SVM_CTRL1_INTERCEPT_RDPMC /* RDPMC causes a VM-exit. */
661 | SVM_CTRL1_INTERCEPT_CPUID /* CPUID causes a VM-exit. */
662 | SVM_CTRL1_INTERCEPT_RSM /* RSM causes a VM-exit. */
663 | SVM_CTRL1_INTERCEPT_HLT /* HLT causes a VM-exit. */
664 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO VM-exits. */
665 | SVM_CTRL1_INTERCEPT_MSR_SHADOW /* MSR access not covered by MSRPM causes a VM-exit.*/
666 | SVM_CTRL1_INTERCEPT_INVLPGA /* INVLPGA causes a VM-exit. */
667 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* Shutdown events causes a VM-exit. */
668 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Intercept "freezing" during legacy FPU handling. */
669
670 pVmcb->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* VMRUN causes a VM-exit. */
671 | SVM_CTRL2_INTERCEPT_VMMCALL /* VMMCALL causes a VM-exit. */
672 | SVM_CTRL2_INTERCEPT_VMLOAD /* VMLOAD causes a VM-exit. */
673 | SVM_CTRL2_INTERCEPT_VMSAVE /* VMSAVE causes a VM-exit. */
674 | SVM_CTRL2_INTERCEPT_STGI /* STGI causes a VM-exit. */
675 | SVM_CTRL2_INTERCEPT_CLGI /* CLGI causes a VM-exit. */
676 | SVM_CTRL2_INTERCEPT_SKINIT /* SKINIT causes a VM-exit. */
677 | SVM_CTRL2_INTERCEPT_WBINVD /* WBINVD causes a VM-exit. */
678 | SVM_CTRL2_INTERCEPT_MONITOR /* MONITOR causes a VM-exit. */
679 | SVM_CTRL2_INTERCEPT_MWAIT; /* MWAIT causes a VM-exit. */
680
681 /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
682 pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
683
684 /* CR0, CR4 writes must be intercepted for the same reasons as above. */
685 pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
686
687 /* Intercept all DRx reads and writes by default. Changed later on. */
688 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
689 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
690
691 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
692 pVmcb->ctrl.IntCtrl.n.u1VIrqMasking = 1;
693
694 /* Ignore the priority in the TPR. This is necessary for delivering PIC style (ExtInt) interrupts and we currently
695 deliver both PIC and APIC interrupts alike. See hmR0SvmInjectPendingEvent() */
696 pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR = 1;
697
698 /* Set IO and MSR bitmap permission bitmap physical addresses. */
699 pVmcb->ctrl.u64IOPMPhysAddr = g_HCPhysIOBitmap;
700 pVmcb->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
701
702 /* No LBR virtualization. */
703 pVmcb->ctrl.u64LBRVirt = 0;
704
705 /* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from the VMCB in memory. */
706 pVmcb->ctrl.u64VmcbCleanBits = 0;
707
708 /* The host ASID MBZ, for the guest start with 1. */
709 pVmcb->ctrl.TLBCtrl.n.u32ASID = 1;
710
711 /*
712 * Setup the PAT MSR (applicable for Nested Paging only).
713 * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
714 * so choose type 6 for all PAT slots.
715 */
716 pVmcb->guest.u64GPAT = UINT64_C(0x0006060606060606);
717
718 /* Without Nested Paging, we need additionally intercepts. */
719 if (!pVM->hm.s.fNestedPaging)
720 {
721 /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
722 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(3);
723 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(3);
724
725 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
726 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG
727 | SVM_CTRL1_INTERCEPT_TASK_SWITCH;
728
729 /* Page faults must be intercepted to implement shadow paging. */
730 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
731 }
732
733#ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
734 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_TASK_SWITCH;
735#endif
736
737 /*
738 * The following MSRs are saved/restored automatically during the world-switch.
739 * Don't intercept guest read/write accesses to these MSRs.
740 */
741 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
742 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
743 hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
744 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
745 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
746 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
747 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
748 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
749 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
750 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
751 }
752
753 return rc;
754}
755
756
757/**
758 * Invalidates a guest page by guest virtual address.
759 *
760 * @returns VBox status code.
761 * @param pVM Pointer to the VM.
762 * @param pVCpu Pointer to the VMCPU.
763 * @param GCVirt Guest virtual address of the page to invalidate.
764 */
765VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
766{
767 AssertReturn(pVM, VERR_INVALID_PARAMETER);
768 Assert(pVM->hm.s.svm.fSupported);
769
770 bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
771
772 /* Skip it if a TLB flush is already pending. */
773 if (!fFlushPending)
774 {
775 Log4(("SVMR0InvalidatePage %RGv\n", GCVirt));
776
777 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
778 AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
779
780#if HC_ARCH_BITS == 32
781 /* If we get a flush in 64-bit guest mode, then force a full TLB flush. INVLPGA takes only 32-bit addresses. */
782 if (CPUMIsGuestInLongMode(pVCpu))
783 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
784 else
785#endif
786 {
787 SVMR0InvlpgA(GCVirt, pVmcb->ctrl.TLBCtrl.n.u32ASID);
788 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
789 }
790 }
791 return VINF_SUCCESS;
792}
793
794
795/**
796 * Flushes the appropriate tagged-TLB entries.
797 *
798 * @param pVM Pointer to the VM.
799 * @param pVCpu Pointer to the VMCPU.
800 */
801static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu)
802{
803 PVM pVM = pVCpu->CTX_SUFF(pVM);
804 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
805 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
806
807 /*
808 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
809 * This can happen both for start & resume due to long jumps back to ring-3.
810 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
811 * so we cannot reuse the ASIDs without flushing.
812 */
813 bool fNewAsid = false;
814 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
815 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
816 {
817 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
818 pVCpu->hm.s.fForceTLBFlush = true;
819 fNewAsid = true;
820 }
821
822 /* Set TLB flush state as checked until we return from the world switch. */
823 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
824
825 /* Check for explicit TLB shootdowns. */
826 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
827 {
828 pVCpu->hm.s.fForceTLBFlush = true;
829 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
830 }
831
832 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
833 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
834
835 if (pVM->hm.s.svm.fAlwaysFlushTLB)
836 {
837 /*
838 * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
839 */
840 pCpu->uCurrentAsid = 1;
841 pVCpu->hm.s.uCurrentAsid = 1;
842 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
843 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
844 }
845 else if (pVCpu->hm.s.fForceTLBFlush)
846 {
847 if (fNewAsid)
848 {
849 ++pCpu->uCurrentAsid;
850 bool fHitASIDLimit = false;
851 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
852 {
853 pCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */
854 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
855 fHitASIDLimit = true;
856
857 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
858 {
859 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
860 pCpu->fFlushAsidBeforeUse = true;
861 }
862 else
863 {
864 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
865 pCpu->fFlushAsidBeforeUse = false;
866 }
867 }
868
869 if ( !fHitASIDLimit
870 && pCpu->fFlushAsidBeforeUse)
871 {
872 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
873 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
874 else
875 {
876 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
877 pCpu->fFlushAsidBeforeUse = false;
878 }
879 }
880
881 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
882 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
883 }
884 else
885 {
886 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
887 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
888 else
889 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
890 }
891
892 pVCpu->hm.s.fForceTLBFlush = false;
893 }
894 else
895 {
896 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
897 * not be executed. See hmQueueInvlPage() where it is commented
898 * out. Support individual entry flushing someday. */
899 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
900 {
901 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
902 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
903 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
904 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVmcb->ctrl.TLBCtrl.n.u32ASID);
905 }
906 }
907
908 pVCpu->hm.s.TlbShootdown.cPages = 0;
909 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
910
911 /* Update VMCB with the ASID. */
912 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hm.s.uCurrentAsid)
913 {
914 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
915 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
916 }
917
918 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
919 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
920 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
921 ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
922 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
923 ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
924
925#ifdef VBOX_WITH_STATISTICS
926 if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
927 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
928 else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
929 || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
930 {
931 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
932 }
933 else
934 {
935 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE);
936 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushEntire);
937 }
938#endif
939}
940
941
942/** @name 64-bit guest on 32-bit host OS helper functions.
943 *
944 * The host CPU is still 64-bit capable but the host OS is running in 32-bit
945 * mode (code segment, paging). These wrappers/helpers perform the necessary
946 * bits for the 32->64 switcher.
947 *
948 * @{ */
949#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
950/**
951 * Prepares for and executes VMRUN (64-bit guests on a 32-bit host).
952 *
953 * @returns VBox status code.
954 * @param HCPhysVmcbHost Physical address of host VMCB.
955 * @param HCPhysVmcb Physical address of the VMCB.
956 * @param pCtx Pointer to the guest-CPU context.
957 * @param pVM Pointer to the VM.
958 * @param pVCpu Pointer to the VMCPU.
959 */
960DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
961{
962 uint32_t aParam[4];
963 aParam[0] = (uint32_t)(HCPhysVmcbHost); /* Param 1: HCPhysVmcbHost - Lo. */
964 aParam[1] = (uint32_t)(HCPhysVmcbHost >> 32); /* Param 1: HCPhysVmcbHost - Hi. */
965 aParam[2] = (uint32_t)(HCPhysVmcb); /* Param 2: HCPhysVmcb - Lo. */
966 aParam[3] = (uint32_t)(HCPhysVmcb >> 32); /* Param 2: HCPhysVmcb - Hi. */
967
968 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, 4, &aParam[0]);
969}
970
971
972/**
973 * Executes the specified VMRUN handler in 64-bit mode.
974 *
975 * @returns VBox status code.
976 * @param pVM Pointer to the VM.
977 * @param pVCpu Pointer to the VMCPU.
978 * @param pCtx Pointer to the guest-CPU context.
979 * @param enmOp The operation to perform.
980 * @param cbParam Number of parameters.
981 * @param paParam Array of 32-bit parameters.
982 */
983VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
984 uint32_t *paParam)
985{
986 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
987 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
988
989 /* Disable interrupts. */
990 RTHCUINTREG uOldEFlags = ASMIntDisableFlags();
991
992#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
993 RTCPUID idHostCpu = RTMpCpuId();
994 CPUMR0SetLApic(pVCpu, idHostCpu);
995#endif
996
997 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
998 CPUMSetHyperEIP(pVCpu, enmOp);
999 for (int i = (int)cbParam - 1; i >= 0; i--)
1000 CPUMPushHyper(pVCpu, paParam[i]);
1001
1002 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
1003 /* Call the switcher. */
1004 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
1005 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
1006
1007 /* Restore interrupts. */
1008 ASMSetFlags(uOldEFlags);
1009 return rc;
1010}
1011
1012#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
1013/** @} */
1014
1015
1016/**
1017 * Adds an exception to the intercept exception bitmap in the VMCB and updates
1018 * the corresponding VMCB Clean Bit.
1019 *
1020 * @param pVmcb Pointer to the VMCB.
1021 * @param u32Xcpt The value of the exception (X86_XCPT_*).
1022 */
1023DECLINLINE(void) hmR0SvmAddXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt)
1024{
1025 if (!(pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt)))
1026 {
1027 pVmcb->ctrl.u32InterceptException |= RT_BIT(u32Xcpt);
1028 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1029 }
1030}
1031
1032
1033/**
1034 * Removes an exception from the intercept-exception bitmap in the VMCB and
1035 * updates the corresponding VMCB Clean Bit.
1036 *
1037 * @param pVmcb Pointer to the VMCB.
1038 * @param u32Xcpt The value of the exception (X86_XCPT_*).
1039 */
1040DECLINLINE(void) hmR0SvmRemoveXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt)
1041{
1042#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
1043 if (pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt))
1044 {
1045 pVmcb->ctrl.u32InterceptException &= ~RT_BIT(u32Xcpt);
1046 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1047 }
1048#endif
1049}
1050
1051
1052/**
1053 * Loads the guest control registers (CR0, CR2, CR3, CR4) into the VMCB.
1054 *
1055 * @returns VBox status code.
1056 * @param pVCpu Pointer to the VMCPU.
1057 * @param pVmcb Pointer to the VMCB.
1058 * @param pCtx Pointer the guest-CPU context.
1059 *
1060 * @remarks No-long-jump zone!!!
1061 */
1062DECLINLINE(int) hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1063{
1064 /*
1065 * Guest CR0.
1066 */
1067 PVM pVM = pVCpu->CTX_SUFF(pVM);
1068 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
1069 {
1070 uint64_t u64GuestCR0 = pCtx->cr0;
1071
1072 /* Always enable caching. */
1073 u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);
1074
1075 /*
1076 * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
1077 */
1078 if (!pVM->hm.s.fNestedPaging)
1079 {
1080 u64GuestCR0 |= X86_CR0_PG; /* When Nested Paging is not available, use shadow page tables. */
1081 u64GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
1082 }
1083
1084 /*
1085 * Guest FPU bits.
1086 */
1087 bool fInterceptNM = false;
1088 bool fInterceptMF = false;
1089 u64GuestCR0 |= X86_CR0_NE; /* Use internal x87 FPU exceptions handling rather than external interrupts. */
1090 if (CPUMIsGuestFPUStateActive(pVCpu))
1091 {
1092 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
1093 if (!(u64GuestCR0 & X86_CR0_NE))
1094 {
1095 Log4(("hmR0SvmLoadGuestControlRegs: Intercepting Guest CR0.MP Old-style FPU handling!!!\n"));
1096 fInterceptMF = true;
1097 }
1098 }
1099 else
1100 {
1101 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
1102 u64GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
1103 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
1104 }
1105
1106 /*
1107 * Update the exception intercept bitmap.
1108 */
1109 if (fInterceptNM)
1110 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_NM);
1111 else
1112 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_NM);
1113
1114 if (fInterceptMF)
1115 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_MF);
1116 else
1117 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_MF);
1118
1119 pVmcb->guest.u64CR0 = u64GuestCR0;
1120 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1121 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
1122 }
1123
1124 /*
1125 * Guest CR2.
1126 */
1127 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR2)
1128 {
1129 pVmcb->guest.u64CR2 = pCtx->cr2;
1130 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
1131 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
1132 }
1133
1134 /*
1135 * Guest CR3.
1136 */
1137 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
1138 {
1139 if (pVM->hm.s.fNestedPaging)
1140 {
1141 PGMMODE enmShwPagingMode;
1142#if HC_ARCH_BITS == 32
1143 if (CPUMIsGuestInLongModeEx(pCtx))
1144 enmShwPagingMode = PGMMODE_AMD64_NX;
1145 else
1146#endif
1147 enmShwPagingMode = PGMGetHostMode(pVM);
1148
1149 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
1150 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
1151 Assert(pVmcb->ctrl.u64NestedPagingCR3);
1152 pVmcb->guest.u64CR3 = pCtx->cr3;
1153 }
1154 else
1155 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
1156
1157 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1158 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
1159 }
1160
1161 /*
1162 * Guest CR4.
1163 */
1164 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
1165 {
1166 uint64_t u64GuestCR4 = pCtx->cr4;
1167 if (!pVM->hm.s.fNestedPaging)
1168 {
1169 switch (pVCpu->hm.s.enmShadowMode)
1170 {
1171 case PGMMODE_REAL:
1172 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
1173 AssertFailed();
1174 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1175
1176 case PGMMODE_32_BIT: /* 32-bit paging. */
1177 u64GuestCR4 &= ~X86_CR4_PAE;
1178 break;
1179
1180 case PGMMODE_PAE: /* PAE paging. */
1181 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
1182 /** Must use PAE paging as we could use physical memory > 4 GB */
1183 u64GuestCR4 |= X86_CR4_PAE;
1184 break;
1185
1186 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
1187 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
1188#ifdef VBOX_ENABLE_64_BITS_GUESTS
1189 break;
1190#else
1191 AssertFailed();
1192 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1193#endif
1194
1195 default: /* shut up gcc */
1196 AssertFailed();
1197 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1198 }
1199 }
1200
1201 pVmcb->guest.u64CR4 = u64GuestCR4;
1202 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1203 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
1204 }
1205
1206 return VINF_SUCCESS;
1207}
1208
1209
1210/**
1211 * Loads the guest segment registers into the VMCB.
1212 *
1213 * @returns VBox status code.
1214 * @param pVCpu Pointer to the VMCPU.
1215 * @param pVmcb Pointer to the VMCB.
1216 * @param pCtx Pointer to the guest-CPU context.
1217 *
1218 * @remarks No-long-jump zone!!!
1219 */
1220DECLINLINE(void) hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1221{
1222 /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
1223 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
1224 {
1225 HMSVM_LOAD_SEG_REG(CS, cs);
1226 HMSVM_LOAD_SEG_REG(SS, ss);
1227 HMSVM_LOAD_SEG_REG(DS, ds);
1228 HMSVM_LOAD_SEG_REG(ES, es);
1229 HMSVM_LOAD_SEG_REG(FS, fs);
1230 HMSVM_LOAD_SEG_REG(GS, gs);
1231
1232 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
1233 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
1234 }
1235
1236 /* Guest TR. */
1237 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
1238 {
1239 HMSVM_LOAD_SEG_REG(TR, tr);
1240 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
1241 }
1242
1243 /* Guest LDTR. */
1244 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
1245 {
1246 HMSVM_LOAD_SEG_REG(LDTR, ldtr);
1247 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
1248 }
1249
1250 /* Guest GDTR. */
1251 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
1252 {
1253 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
1254 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
1255 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1256 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
1257 }
1258
1259 /* Guest IDTR. */
1260 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
1261 {
1262 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
1263 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
1264 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1265 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
1266 }
1267}
1268
1269
1270/**
1271 * Loads the guest MSRs into the VMCB.
1272 *
1273 * @param pVCpu Pointer to the VMCPU.
1274 * @param pVmcb Pointer to the VMCB.
1275 * @param pCtx Pointer to the guest-CPU context.
1276 *
1277 * @remarks No-long-jump zone!!!
1278 */
1279DECLINLINE(void) hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1280{
1281 /* Guest Sysenter MSRs. */
1282 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
1283 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
1284 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
1285
1286 /*
1287 * Guest EFER MSR.
1288 * AMD-V requires guest EFER.SVME to be set. Weird. .
1289 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
1290 */
1291 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_EFER_MSR)
1292 {
1293 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
1294 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1295 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_SVM_GUEST_EFER_MSR;
1296 }
1297
1298 /* 64-bit MSRs. */
1299 if (CPUMIsGuestInLongModeEx(pCtx))
1300 {
1301 pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;
1302 pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;
1303 }
1304 else
1305 {
1306 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */
1307 if (pCtx->msrEFER & MSR_K6_EFER_LME)
1308 {
1309 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
1310 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
1311 }
1312 }
1313
1314
1315 /** @todo The following are used in 64-bit only (SYSCALL/SYSRET) but they might
1316 * be writable in 32-bit mode. Clarify with AMD spec. */
1317 pVmcb->guest.u64STAR = pCtx->msrSTAR;
1318 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
1319 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
1320 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
1321 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1322}
1323
1324
1325/**
1326 * Loads the guest debug registers into the VMCB.
1327 *
1328 * @param pVCpu Pointer to the VMCPU.
1329 * @param pVmcb Pointer to the VMCB.
1330 * @param pCtx Pointer to the guest-CPU context.
1331 *
1332 * @remarks No-long-jump zone!!!
1333 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
1334 */
1335DECLINLINE(void) hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1336{
1337 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
1338 return;
1339 Assert((pCtx->dr[6] & X86_DR6_RA1_MASK) == X86_DR6_RA1_MASK); Assert((pCtx->dr[6] & X86_DR6_RAZ_MASK) == 0);
1340 Assert((pCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); Assert((pCtx->dr[7] & X86_DR7_RAZ_MASK) == 0);
1341
1342 bool fInterceptDB = false;
1343 bool fInterceptMovDRx = false;
1344
1345 /*
1346 * Anyone single stepping on the host side? If so, we'll have to use the
1347 * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
1348 * the VMM level like VT-x implementations does.
1349 */
1350 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
1351 if (fStepping)
1352 {
1353 pVCpu->hm.s.fClearTrapFlag = true;
1354 pVmcb->guest.u64RFlags |= X86_EFL_TF;
1355 fInterceptDB = true;
1356 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
1357 }
1358
1359 PVM pVM = pVCpu->CTX_SUFF(pVM);
1360 if (fStepping || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
1361 {
1362 /*
1363 * Use the combined guest and host DRx values found in the hypervisor
1364 * register set because the debugger has breakpoints active or someone
1365 * is single stepping on the host side.
1366 *
1367 * Note! DBGF expects a clean DR6 state before executing guest code.
1368 */
1369 if (!CPUMIsHyperDebugStateActive(pVCpu))
1370 CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
1371 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1372 Assert(CPUMIsHyperDebugStateActive(pVCpu));
1373
1374 /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
1375 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
1376 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu) )
1377 {
1378 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
1379 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
1380 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1381 }
1382
1383 /** @todo If we cared, we could optimize to allow the guest to read registers
1384 * with the same values. */
1385 fInterceptDB = true;
1386 fInterceptMovDRx = true;
1387 Log5(("hm: Loaded hyper DRx\n"));
1388 }
1389 else
1390 {
1391 /*
1392 * Update DR6, DR7 with the guest values if necessary.
1393 */
1394 if ( pVmcb->guest.u64DR7 != pCtx->dr[7]
1395 || pVmcb->guest.u64DR6 != pCtx->dr[6])
1396 {
1397 pVmcb->guest.u64DR7 = pCtx->dr[7];
1398 pVmcb->guest.u64DR6 = pCtx->dr[6];
1399 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1400 }
1401
1402 /*
1403 * If the guest has enabled debug registers, we need to load them prior to
1404 * executing guest code so they'll trigger at the right time.
1405 */
1406 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
1407 {
1408 if (!CPUMIsGuestDebugStateActive(pVCpu))
1409 {
1410 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
1411 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1412 }
1413 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1414 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1415 Log5(("hm: Loaded guest DRx\n"));
1416 }
1417 /*
1418 * If no debugging enabled, we'll lazy load DR0-3.
1419 */
1420 else if (!CPUMIsGuestDebugStateActive(pVCpu))
1421 fInterceptMovDRx = true;
1422 }
1423
1424 /*
1425 * Set up the intercepts.
1426 */
1427 if (fInterceptDB)
1428 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_DB);
1429 else
1430 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_DB);
1431
1432 if (fInterceptMovDRx)
1433 {
1434 if ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
1435 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
1436 {
1437 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
1438 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
1439 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1440 }
1441 }
1442 else
1443 {
1444 if ( pVmcb->ctrl.u16InterceptRdDRx
1445 || pVmcb->ctrl.u16InterceptWrDRx)
1446 {
1447 pVmcb->ctrl.u16InterceptRdDRx = 0;
1448 pVmcb->ctrl.u16InterceptWrDRx = 0;
1449 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1450 }
1451 }
1452
1453 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
1454}
1455
1456
1457/**
1458 * Loads the guest APIC state (currently just the TPR).
1459 *
1460 * @returns VBox status code.
1461 * @param pVCpu Pointer to the VMCPU.
1462 * @param pVmcb Pointer to the VMCB.
1463 * @param pCtx Pointer to the guest-CPU context.
1464 */
1465DECLINLINE(int) hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
1466{
1467 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_APIC_STATE))
1468 return VINF_SUCCESS;
1469
1470 bool fPendingIntr;
1471 uint8_t u8Tpr;
1472 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
1473 AssertRCReturn(rc, rc);
1474
1475 /** Assume that we need to trap all TPR accesses and thus need not check on
1476 * every #VMEXIT if we should update the TPR. */
1477 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking);
1478 pVCpu->hm.s.svm.fSyncVTpr = false;
1479
1480 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
1481 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
1482 {
1483 pCtx->msrLSTAR = u8Tpr;
1484
1485 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
1486 if (fPendingIntr)
1487 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
1488 else
1489 {
1490 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
1491 pVCpu->hm.s.svm.fSyncVTpr = true;
1492 }
1493 }
1494 else
1495 {
1496 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
1497 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
1498
1499 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
1500 if (fPendingIntr)
1501 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
1502 else
1503 {
1504 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
1505 pVCpu->hm.s.svm.fSyncVTpr = true;
1506 }
1507
1508 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
1509 }
1510
1511 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_SVM_GUEST_APIC_STATE;
1512 return rc;
1513}
1514
1515
1516/**
1517 * Sets up the appropriate function to run guest code.
1518 *
1519 * @returns VBox status code.
1520 * @param pVCpu Pointer to the VMCPU.
1521 * @param pCtx Pointer to the guest-CPU context.
1522 *
1523 * @remarks No-long-jump zone!!!
1524 */
1525static int hmR0SvmSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pCtx)
1526{
1527 if (CPUMIsGuestInLongModeEx(pCtx))
1528 {
1529#ifndef VBOX_ENABLE_64_BITS_GUESTS
1530 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1531#endif
1532 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
1533#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1534 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
1535 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
1536#else
1537 /* 64-bit host or hybrid host. */
1538 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64;
1539#endif
1540 }
1541 else
1542 {
1543 /* Guest is not in long mode, use the 32-bit handler. */
1544 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
1545 }
1546 return VINF_SUCCESS;
1547}
1548
1549
1550/**
1551 * Enters the AMD-V session.
1552 *
1553 * @returns VBox status code.
1554 * @param pVM Pointer to the VM.
1555 * @param pVCpu Pointer to the VMCPU.
1556 * @param pCpu Pointer to the CPU info struct.
1557 */
1558VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1559{
1560 AssertPtr(pVM);
1561 AssertPtr(pVCpu);
1562 Assert(pVM->hm.s.svm.fSupported);
1563 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1564 NOREF(pCpu);
1565
1566 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
1567
1568 pVCpu->hm.s.fLeaveDone = false;
1569 return VINF_SUCCESS;
1570}
1571
1572
1573/**
1574 * Thread-context callback for AMD-V.
1575 *
1576 * @param enmEvent The thread-context event.
1577 * @param pVCpu Pointer to the VMCPU.
1578 * @param fGlobalInit Whether global VT-x/AMD-V init. is used.
1579 */
1580VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
1581{
1582 switch (enmEvent)
1583 {
1584 case RTTHREADCTXEVENT_PREEMPTING:
1585 {
1586 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1587 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
1588 VMCPU_ASSERT_EMT(pVCpu);
1589
1590 PVM pVM = pVCpu->CTX_SUFF(pVM);
1591 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1592 VMMRZCallRing3Disable(pVCpu); /* No longjmps (log-flush, locks) in this fragile context. */
1593
1594 if (!pVCpu->hm.s.fLeaveDone)
1595 {
1596 hmR0SvmLeave(pVM, pVCpu, pCtx);
1597 pVCpu->hm.s.fLeaveDone = true;
1598 }
1599
1600 int rc = HMR0LeaveCpu(pVCpu); /* Leave HM context, takes care of local init (term). */
1601 AssertRC(rc); NOREF(rc);
1602
1603 VMMRZCallRing3Enable(pVCpu); /* Restore longjmp state. */
1604 break;
1605 }
1606
1607 case RTTHREADCTXEVENT_RESUMED:
1608 {
1609 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1610 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
1611 VMCPU_ASSERT_EMT(pVCpu);
1612
1613 VMMRZCallRing3Disable(pVCpu); /* No longjmps (log-flush, locks) in this fragile context. */
1614
1615 /*
1616 * Initialize the bare minimum state required for HM. This takes care of
1617 * initializing AMD-V if necessary (onlined CPUs, local init etc.)
1618 */
1619 int rc = HMR0EnterCpu(pVCpu);
1620 AssertRC(rc); NOREF(rc);
1621 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
1622
1623 pVCpu->hm.s.fLeaveDone = false;
1624 VMMRZCallRing3Enable(pVCpu); /* Restore longjmp state. */
1625 break;
1626 }
1627
1628 default:
1629 break;
1630 }
1631}
1632
1633
1634/**
1635 * Saves the host state.
1636 *
1637 * @returns VBox status code.
1638 * @param pVM Pointer to the VM.
1639 * @param pVCpu Pointer to the VMCPU.
1640 *
1641 * @remarks No-long-jump zone!!!
1642 */
1643VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
1644{
1645 NOREF(pVM);
1646 NOREF(pVCpu);
1647 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
1648 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
1649 return VINF_SUCCESS;
1650}
1651
1652
1653/**
1654 * Loads the guest state into the VMCB. The CPU state will be loaded from these
1655 * fields on every successful VM-entry.
1656 *
1657 * Sets up the appropriate VMRUN function to execute guest code based
1658 * on the guest CPU mode.
1659 *
1660 * @returns VBox status code.
1661 * @param pVM Pointer to the VM.
1662 * @param pVCpu Pointer to the VMCPU.
1663 * @param pMixedCtx Pointer to the guest-CPU context.
1664 *
1665 * @remarks No-long-jump zone!!!
1666 */
1667static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1668{
1669 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1670 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
1671
1672 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
1673
1674 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcb, pCtx);
1675 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1676
1677 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcb, pCtx);
1678 hmR0SvmLoadGuestMsrs(pVCpu, pVmcb, pCtx);
1679
1680 pVmcb->guest.u64RIP = pCtx->rip;
1681 pVmcb->guest.u64RSP = pCtx->rsp;
1682 pVmcb->guest.u64RFlags = pCtx->eflags.u32;
1683 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
1684 pVmcb->guest.u64RAX = pCtx->rax;
1685
1686 /* hmR0SvmLoadGuestDebugRegs() must be called -after- updating guest RFLAGS as the RFLAGS may need to be changed. */
1687 hmR0SvmLoadGuestDebugRegs(pVCpu, pVmcb, pCtx);
1688
1689 rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx);
1690 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1691
1692 rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx);
1693 AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1694
1695 /* Clear any unused and reserved bits. */
1696 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */
1697 | HM_CHANGED_GUEST_RSP
1698 | HM_CHANGED_GUEST_RFLAGS
1699 | HM_CHANGED_GUEST_SYSENTER_CS_MSR
1700 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
1701 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
1702 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */
1703 | HM_CHANGED_SVM_RESERVED2
1704 | HM_CHANGED_SVM_RESERVED3);
1705
1706 /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */
1707 AssertMsg( !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)
1708 || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)),
1709 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
1710 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
1711
1712 Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss, pCtx->rsp));
1713
1714 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
1715 return rc;
1716}
1717
1718
1719/**
1720 * Saves the entire guest state from the VMCB into the
1721 * guest-CPU context. Currently there is no residual state left in the CPU that
1722 * is not updated in the VMCB.
1723 *
1724 * @returns VBox status code.
1725 * @param pVCpu Pointer to the VMCPU.
1726 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1727 * out-of-sync. Make sure to update the required fields
1728 * before using them.
1729 */
1730static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1731{
1732 Assert(VMMRZCallRing3IsEnabled(pVCpu));
1733
1734 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1735
1736 pMixedCtx->rip = pVmcb->guest.u64RIP;
1737 pMixedCtx->rsp = pVmcb->guest.u64RSP;
1738 pMixedCtx->eflags.u32 = pVmcb->guest.u64RFlags;
1739 pMixedCtx->rax = pVmcb->guest.u64RAX;
1740
1741 /*
1742 * Guest interrupt shadow.
1743 */
1744 if (pVmcb->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
1745 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
1746 else
1747 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1748
1749 /*
1750 * Guest Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
1751 */
1752 pMixedCtx->cr2 = pVmcb->guest.u64CR2;
1753
1754 /*
1755 * Guest MSRs.
1756 */
1757 pMixedCtx->msrSTAR = pVmcb->guest.u64STAR; /* legacy syscall eip, cs & ss */
1758 pMixedCtx->msrLSTAR = pVmcb->guest.u64LSTAR; /* 64-bit mode syscall rip */
1759 pMixedCtx->msrCSTAR = pVmcb->guest.u64CSTAR; /* compatibility mode syscall rip */
1760 pMixedCtx->msrSFMASK = pVmcb->guest.u64SFMASK; /* syscall flag mask */
1761 pMixedCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; /* swapgs exchange value */
1762 pMixedCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS;
1763 pMixedCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP;
1764 pMixedCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP;
1765
1766 /*
1767 * Guest segment registers (includes FS, GS base MSRs for 64-bit guests).
1768 */
1769 HMSVM_SAVE_SEG_REG(CS, cs);
1770 HMSVM_SAVE_SEG_REG(SS, ss);
1771 HMSVM_SAVE_SEG_REG(DS, ds);
1772 HMSVM_SAVE_SEG_REG(ES, es);
1773 HMSVM_SAVE_SEG_REG(FS, fs);
1774 HMSVM_SAVE_SEG_REG(GS, gs);
1775
1776 /*
1777 * Correct the hidden CS granularity bit. Haven't seen it being wrong in any other
1778 * register (yet).
1779 */
1780 /** @todo SELM might need to be fixed as it too should not care about the
1781 * granularity bit. See @bugref{6785}. */
1782 if ( !pMixedCtx->cs.Attr.n.u1Granularity
1783 && pMixedCtx->cs.Attr.n.u1Present
1784 && pMixedCtx->cs.u32Limit > UINT32_C(0xfffff))
1785 {
1786 Assert((pMixedCtx->cs.u32Limit & 0xfff) == 0xfff);
1787 pMixedCtx->cs.Attr.n.u1Granularity = 1;
1788 }
1789
1790#ifdef VBOX_STRICT
1791# define HMSVM_ASSERT_SEG_GRANULARITY(reg) \
1792 AssertMsg( !pMixedCtx->reg.Attr.n.u1Present \
1793 || ( pMixedCtx->reg.Attr.n.u1Granularity \
1794 ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \
1795 : pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \
1796 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", pMixedCtx->reg.u32Limit, \
1797 pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base))
1798
1799 HMSVM_ASSERT_SEG_GRANULARITY(cs);
1800 HMSVM_ASSERT_SEG_GRANULARITY(ss);
1801 HMSVM_ASSERT_SEG_GRANULARITY(ds);
1802 HMSVM_ASSERT_SEG_GRANULARITY(es);
1803 HMSVM_ASSERT_SEG_GRANULARITY(fs);
1804 HMSVM_ASSERT_SEG_GRANULARITY(gs);
1805
1806# undef HMSVM_ASSERT_SEL_GRANULARITY
1807#endif
1808
1809 /*
1810 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the VMCB and uses that
1811 * and thus it's possible that when the CPL changes during guest execution that the SS DPL
1812 * isn't updated by AMD-V. Observed on some AMD Fusion CPUs with 64-bit guests.
1813 * See AMD spec. 15.5.1 "Basic operation".
1814 */
1815 Assert(!(pVmcb->guest.u8CPL & ~0x3));
1816 pMixedCtx->ss.Attr.n.u2Dpl = pVmcb->guest.u8CPL & 0x3;
1817
1818 /*
1819 * Guest Descriptor-Table registers.
1820 */
1821 HMSVM_SAVE_SEG_REG(TR, tr);
1822 HMSVM_SAVE_SEG_REG(LDTR, ldtr);
1823 pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit;
1824 pMixedCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base;
1825
1826 pMixedCtx->idtr.cbIdt = pVmcb->guest.IDTR.u32Limit;
1827 pMixedCtx->idtr.pIdt = pVmcb->guest.IDTR.u64Base;
1828
1829 /*
1830 * Guest Debug registers.
1831 */
1832 if (!CPUMIsHyperDebugStateActive(pVCpu))
1833 {
1834 pMixedCtx->dr[6] = pVmcb->guest.u64DR6;
1835 pMixedCtx->dr[7] = pVmcb->guest.u64DR7;
1836 }
1837 else
1838 {
1839 Assert(pVmcb->guest.u64DR7 == CPUMGetHyperDR7(pVCpu));
1840 CPUMSetHyperDR6(pVCpu, pVmcb->guest.u64DR6);
1841 }
1842
1843 /*
1844 * With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now.
1845 * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
1846 */
1847 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
1848 && pMixedCtx->cr3 != pVmcb->guest.u64CR3)
1849 {
1850 CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3);
1851 PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3);
1852 }
1853}
1854
1855
1856/**
1857 * Does the necessary state syncing before returning to ring-3 for any reason
1858 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
1859 *
1860 * @param pVM Pointer to the VM.
1861 * @param pVCpu Pointer to the VMCPU.
1862 * @param pMixedCtx Pointer to the guest-CPU context.
1863 *
1864 * @remarks No-long-jmp zone!!!
1865 */
1866static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1867{
1868 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1869 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1870 Assert(VMMR0IsLogFlushDisabled(pVCpu));
1871
1872 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
1873 if (CPUMIsGuestFPUStateActive(pVCpu))
1874 {
1875 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
1876 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
1877 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
1878 }
1879
1880 /*
1881 * Restore host debug registers if necessary and resync on next R0 reentry.
1882 */
1883#ifdef VBOX_STRICT
1884 if (CPUMIsHyperDebugStateActive(pVCpu))
1885 {
1886 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1887 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
1888 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
1889 }
1890#endif
1891 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
1892 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
1893
1894 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1895 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1896
1897 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
1898 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
1899 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
1900 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
1901 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
1902
1903 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
1904}
1905
1906
1907/**
1908 * Leaves the AMD-V session.
1909 *
1910 * @returns VBox status code.
1911 * @param pVM Pointer to the VM.
1912 * @param pVCpu Pointer to the VMCPU.
1913 * @param pCtx Pointer to the guest-CPU context.
1914 */
1915DECLINLINE(void) hmR0SvmLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1916{
1917 HM_DISABLE_PREEMPT_IF_NEEDED();
1918 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1919 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1920
1921 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
1922 and done this from the VMXR0ThreadCtxCallback(). */
1923 if (!pVCpu->hm.s.fLeaveDone)
1924 {
1925 hmR0SvmLeave(pVM, pVCpu, pCtx);
1926 pVCpu->hm.s.fLeaveDone = true;
1927 }
1928
1929 /* Deregister hook now that we've left HM context before re-enabling preemption. */
1930 /** @todo This is bad. Deregistering here means we need to VMCLEAR always
1931 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
1932 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
1933 VMMR0ThreadCtxHooksDeregister(pVCpu);
1934
1935 /* Leave HM context. This takes care of local init (term). */
1936 int rc = HMR0LeaveCpu(pVCpu);
1937 AssertRC(rc); NOREF(rc);
1938
1939 HM_RESTORE_PREEMPT_IF_NEEDED();
1940}
1941
1942
1943/**
1944 * Does the necessary state syncing before doing a longjmp to ring-3.
1945 *
1946 * @param pVM Pointer to the VM.
1947 * @param pVCpu Pointer to the VMCPU.
1948 * @param pCtx Pointer to the guest-CPU context.
1949 *
1950 * @remarks No-long-jmp zone!!!
1951 */
1952static void hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1953{
1954 hmR0SvmLeaveSession(pVM, pVCpu, pCtx);
1955}
1956
1957
1958/**
1959 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
1960 * any remaining host state) before we longjump to ring-3 and possibly get
1961 * preempted.
1962 *
1963 * @param pVCpu Pointer to the VMCPU.
1964 * @param enmOperation The operation causing the ring-3 longjump.
1965 * @param pvUser The user argument (pointer to the possibly
1966 * out-of-date guest-CPU context).
1967 *
1968 * @remarks Must never be called with @a enmOperation ==
1969 * VMMCALLRING3_VM_R0_ASSERTION.
1970 */
1971DECLCALLBACK(void) hmR0SvmCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
1972{
1973 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
1974 Assert(pVCpu);
1975 Assert(pvUser);
1976 Assert(VMMRZCallRing3IsEnabled(pVCpu));
1977 HMSVM_ASSERT_PREEMPT_SAFE();
1978
1979 VMMRZCallRing3Disable(pVCpu);
1980 Assert(VMMR0IsLogFlushDisabled(pVCpu));
1981
1982 Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));
1983 hmR0SvmLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
1984
1985 VMMRZCallRing3Enable(pVCpu);
1986}
1987
1988
1989/**
1990 * Take necessary actions before going back to ring-3.
1991 *
1992 * An action requires us to go back to ring-3. This function does the necessary
1993 * steps before we can safely return to ring-3. This is not the same as longjmps
1994 * to ring-3, this is voluntary.
1995 *
1996 * @param pVM Pointer to the VM.
1997 * @param pVCpu Pointer to the VMCPU.
1998 * @param pCtx Pointer to the guest-CPU context.
1999 * @param rcExit The reason for exiting to ring-3. Can be
2000 * VINF_VMM_UNKNOWN_RING3_CALL.
2001 */
2002static void hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit)
2003{
2004 Assert(pVM);
2005 Assert(pVCpu);
2006 Assert(pCtx);
2007 HMSVM_ASSERT_PREEMPT_SAFE();
2008
2009 if (RT_UNLIKELY(rcExit == VERR_SVM_INVALID_GUEST_STATE))
2010 {
2011 /* We don't need to do any syncing here, we're not going to come back to execute anything again. */
2012 return;
2013 }
2014
2015 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
2016 VMMRZCallRing3Disable(pVCpu);
2017 Log4(("hmR0SvmExitToRing3: rcExit=%d\n", rcExit));
2018
2019 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
2020 if (pVCpu->hm.s.Event.fPending)
2021 {
2022 hmR0SvmPendingEventToTrpmTrap(pVCpu);
2023 Assert(!pVCpu->hm.s.Event.fPending);
2024 }
2025
2026 /* Sync. the necessary state for going back to ring-3. */
2027 hmR0SvmLeaveSession(pVM, pVCpu, pCtx);
2028 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
2029
2030 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
2031 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
2032 | CPUM_CHANGED_LDTR
2033 | CPUM_CHANGED_GDTR
2034 | CPUM_CHANGED_IDTR
2035 | CPUM_CHANGED_TR
2036 | CPUM_CHANGED_HIDDEN_SEL_REGS);
2037 if ( pVM->hm.s.fNestedPaging
2038 && CPUMIsGuestPagingEnabledEx(pCtx))
2039 {
2040 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
2041 }
2042
2043 /* Make sure we've undo the trap flag if we tried to single step something. */
2044 if (pVCpu->hm.s.fClearTrapFlag)
2045 {
2046 pCtx->eflags.Bits.u1TF = 0;
2047 pVCpu->hm.s.fClearTrapFlag = false;
2048 }
2049
2050 /* On our way back from ring-3 the following needs to be done. */
2051 /** @todo This can change with preemption hooks. */
2052 if (rcExit == VINF_EM_RAW_INTERRUPT)
2053 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
2054 else
2055 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
2056
2057 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
2058 VMMRZCallRing3Enable(pVCpu);
2059}
2060
2061
2062/**
2063 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary
2064 * intercepts.
2065 *
2066 * @param pVCpu Pointer to the VMCPU.
2067 *
2068 * @remarks No-long-jump zone!!!
2069 */
2070static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu)
2071{
2072 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2073 if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset))
2074 {
2075 uint64_t u64CurTSC = ASMReadTSC();
2076 if (u64CurTSC + pVmcb->ctrl.u64TSCOffset > TMCpuTickGetLastSeen(pVCpu))
2077 {
2078 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
2079 pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
2080 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
2081 }
2082 else
2083 {
2084 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
2085 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
2086 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
2087 }
2088 }
2089 else
2090 {
2091 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
2092 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
2093 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
2094 }
2095
2096 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
2097}
2098
2099
2100/**
2101 * Sets an event as a pending event to be injected into the guest.
2102 *
2103 * @param pVCpu Pointer to the VMCPU.
2104 * @param pEvent Pointer to the SVM event.
2105 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
2106 * page-fault.
2107 *
2108 * @remarks Statistics counter assumes this is a guest event being reflected to
2109 * the guest i.e. 'StatInjectPendingReflect' is incremented always.
2110 */
2111DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
2112{
2113 Assert(!pVCpu->hm.s.Event.fPending);
2114 Assert(pEvent->n.u1Valid);
2115
2116 pVCpu->hm.s.Event.u64IntrInfo = pEvent->u;
2117 pVCpu->hm.s.Event.fPending = true;
2118 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
2119
2120 Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,
2121 pEvent->n.u8Vector, (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
2122
2123 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
2124}
2125
2126
2127/**
2128 * Injects an event into the guest upon VMRUN by updating the relevant field
2129 * in the VMCB.
2130 *
2131 * @param pVCpu Pointer to the VMCPU.
2132 * @param pVmcb Pointer to the guest VMCB.
2133 * @param pCtx Pointer to the guest-CPU context.
2134 * @param pEvent Pointer to the event.
2135 *
2136 * @remarks No-long-jump zone!!!
2137 * @remarks Requires CR0!
2138 */
2139DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent)
2140{
2141 pVmcb->ctrl.EventInject.u = pEvent->u;
2142 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
2143
2144 Log4(("hmR0SvmInjectEventVmcb: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,
2145 pEvent->n.u8Vector, (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
2146}
2147
2148
2149
2150/**
2151 * Converts any TRPM trap into a pending HM event. This is typically used when
2152 * entering from ring-3 (not longjmp returns).
2153 *
2154 * @param pVCpu Pointer to the VMCPU.
2155 */
2156static void hmR0SvmTrpmTrapToPendingEvent(PVMCPU pVCpu)
2157{
2158 Assert(TRPMHasTrap(pVCpu));
2159 Assert(!pVCpu->hm.s.Event.fPending);
2160
2161 uint8_t uVector;
2162 TRPMEVENT enmTrpmEvent;
2163 RTGCUINT uErrCode;
2164 RTGCUINTPTR GCPtrFaultAddress;
2165 uint8_t cbInstr;
2166
2167 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
2168 AssertRC(rc);
2169
2170 SVMEVENT Event;
2171 Event.u = 0;
2172 Event.n.u1Valid = 1;
2173 Event.n.u8Vector = uVector;
2174
2175 /* Refer AMD spec. 15.20 "Event Injection" for the format. */
2176 if (enmTrpmEvent == TRPM_TRAP)
2177 {
2178 Event.n.u3Type = SVM_EVENT_EXCEPTION;
2179 switch (uVector)
2180 {
2181 case X86_XCPT_PF:
2182 case X86_XCPT_DF:
2183 case X86_XCPT_TS:
2184 case X86_XCPT_NP:
2185 case X86_XCPT_SS:
2186 case X86_XCPT_GP:
2187 case X86_XCPT_AC:
2188 {
2189 Event.n.u1ErrorCodeValid = 1;
2190 Event.n.u32ErrorCode = uErrCode;
2191 break;
2192 }
2193 }
2194 }
2195 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
2196 {
2197 if (uVector == X86_XCPT_NMI)
2198 Event.n.u3Type = SVM_EVENT_NMI;
2199 else
2200 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
2201 }
2202 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
2203 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT;
2204 else
2205 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
2206
2207 rc = TRPMResetTrap(pVCpu);
2208 AssertRC(rc);
2209
2210 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
2211 !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode));
2212
2213 hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress);
2214 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
2215}
2216
2217
2218/**
2219 * Converts any pending SVM event into a TRPM trap. Typically used when leaving
2220 * AMD-V to execute any instruction.
2221 *
2222 * @param pvCpu Pointer to the VMCPU.
2223 */
2224static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu)
2225{
2226 Assert(pVCpu->hm.s.Event.fPending);
2227 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
2228
2229 SVMEVENT Event;
2230 Event.u = pVCpu->hm.s.Event.u64IntrInfo;
2231
2232 uint8_t uVector = Event.n.u8Vector;
2233 uint8_t uVectorType = Event.n.u3Type;
2234
2235 TRPMEVENT enmTrapType;
2236 switch (uVectorType)
2237 {
2238 case SVM_EVENT_EXTERNAL_IRQ:
2239 case SVM_EVENT_NMI:
2240 enmTrapType = TRPM_HARDWARE_INT;
2241 break;
2242 case SVM_EVENT_SOFTWARE_INT:
2243 enmTrapType = TRPM_SOFTWARE_INT;
2244 break;
2245 case SVM_EVENT_EXCEPTION:
2246 enmTrapType = TRPM_TRAP;
2247 break;
2248 default:
2249 AssertMsgFailed(("Invalid pending-event type %#x\n", uVectorType));
2250 enmTrapType = TRPM_32BIT_HACK;
2251 break;
2252 }
2253
2254 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, uVectorType));
2255
2256 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
2257 AssertRC(rc);
2258
2259 if (Event.n.u1ErrorCodeValid)
2260 TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode);
2261
2262 if ( uVectorType == SVM_EVENT_EXCEPTION
2263 && uVector == X86_XCPT_PF)
2264 {
2265 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
2266 Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
2267 }
2268 else if (uVectorType == SVM_EVENT_SOFTWARE_INT)
2269 {
2270 AssertMsg( uVectorType == SVM_EVENT_SOFTWARE_INT
2271 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
2272 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
2273 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
2274 }
2275 pVCpu->hm.s.Event.fPending = false;
2276}
2277
2278
2279/**
2280 * Gets the guest's interrupt-shadow.
2281 *
2282 * @returns The guest's interrupt-shadow.
2283 * @param pVCpu Pointer to the VMCPU.
2284 * @param pCtx Pointer to the guest-CPU context.
2285 *
2286 * @remarks No-long-jump zone!!!
2287 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2288 */
2289DECLINLINE(uint32_t) hmR0SvmGetGuestIntrShadow(PVMCPU pVCpu, PCPUMCTX pCtx)
2290{
2291 /*
2292 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2293 * inhibit interrupts or clear any existing interrupt-inhibition.
2294 */
2295 uint32_t uIntrState = 0;
2296 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2297 {
2298 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2299 {
2300 /*
2301 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2302 * AMD-V, the flag's condition to be cleared is met and thus the cleared state is correct.
2303 */
2304 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2305 }
2306 else
2307 uIntrState = SVM_INTERRUPT_SHADOW_ACTIVE;
2308 }
2309 return uIntrState;
2310}
2311
2312
2313/**
2314 * Sets the virtual interrupt intercept control in the VMCB which
2315 * instructs AMD-V to cause a #VMEXIT as soon as the guest is in a state to
2316 * receive interrupts.
2317 *
2318 * @param pVmcb Pointer to the VMCB.
2319 */
2320DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
2321{
2322 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_VINTR))
2323 {
2324 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 1; /* A virtual interrupt is pending. */
2325 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* Not necessary as we #VMEXIT for delivering the interrupt. */
2326 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
2327 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
2328
2329 Log4(("Setting VINTR intercept\n"));
2330 }
2331}
2332
2333
2334/**
2335 * Evaluates the event to be delivered to the guest and sets it as the pending
2336 * event.
2337 *
2338 * @param pVCpu Pointer to the VMCPU.
2339 * @param pCtx Pointer to the guest-CPU context.
2340 */
2341static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
2342{
2343 Assert(!pVCpu->hm.s.Event.fPending);
2344 Log4Func(("\n"));
2345
2346 const bool fIntShadow = !!hmR0SvmGetGuestIntrShadow(pVCpu, pCtx);
2347 const bool fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
2348 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2349
2350 SVMEVENT Event;
2351 Event.u = 0;
2352 /** @todo SMI. SMIs take priority over NMIs. */
2353 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
2354 {
2355 if (!fIntShadow)
2356 {
2357 Log4(("Pending NMI\n"));
2358
2359 Event.n.u1Valid = 1;
2360 Event.n.u8Vector = X86_XCPT_NMI;
2361 Event.n.u3Type = SVM_EVENT_NMI;
2362
2363 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
2364 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
2365 }
2366 else
2367 hmR0SvmSetVirtIntrIntercept(pVmcb);
2368 }
2369 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
2370 {
2371 /*
2372 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
2373 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt which is why it is
2374 * evaluated here and not set as pending, solely based on the force-flags.
2375 */
2376 if ( !fBlockInt
2377 && !fIntShadow)
2378 {
2379 uint8_t u8Interrupt;
2380 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
2381 if (RT_SUCCESS(rc))
2382 {
2383 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
2384
2385 Event.n.u1Valid = 1;
2386 Event.n.u8Vector = u8Interrupt;
2387 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
2388
2389 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
2390 }
2391 else
2392 {
2393 /** @todo Does this actually happen? If not turn it into an assertion. */
2394 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
2395 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
2396 }
2397 }
2398 else
2399 hmR0SvmSetVirtIntrIntercept(pVmcb);
2400 }
2401}
2402
2403
2404/**
2405 * Injects any pending events into the guest if the guest is in a state to
2406 * receive them.
2407 *
2408 * @param pVCpu Pointer to the VMCPU.
2409 * @param pCtx Pointer to the guest-CPU context.
2410 */
2411static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
2412{
2413 Assert(!TRPMHasTrap(pVCpu));
2414 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2415 Log4Func(("\n"));
2416
2417 const bool fIntShadow = !!hmR0SvmGetGuestIntrShadow(pVCpu, pCtx);
2418 const bool fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
2419 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2420
2421 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
2422 {
2423 SVMEVENT Event;
2424 Event.u = pVCpu->hm.s.Event.u64IntrInfo;
2425 Assert(Event.n.u1Valid);
2426#ifdef VBOX_STRICT
2427 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
2428 {
2429 Assert(!fBlockInt);
2430 Assert(!fIntShadow);
2431 }
2432 else if (Event.n.u3Type == SVM_EVENT_NMI)
2433 Assert(!fIntShadow);
2434#endif
2435
2436 Log4(("Injecting pending HM event.\n"));
2437 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
2438 pVCpu->hm.s.Event.fPending = false;
2439
2440#ifdef VBOX_WITH_STATISTICS
2441 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
2442 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
2443 else
2444 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
2445#endif
2446 }
2447
2448 /* Update the guest interrupt shadow in the VMCB. */
2449 pVmcb->ctrl.u64IntShadow = !!fIntShadow;
2450}
2451
2452
2453/**
2454 * Reports world-switch error and dumps some useful debug info.
2455 *
2456 * @param pVM Pointer to the VM.
2457 * @param pVCpu Pointer to the VMCPU.
2458 * @param rcVMRun The return code from VMRUN (or
2459 * VERR_SVM_INVALID_GUEST_STATE for invalid
2460 * guest-state).
2461 * @param pCtx Pointer to the guest-CPU context.
2462 */
2463static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx)
2464{
2465 HMSVM_ASSERT_PREEMPT_SAFE();
2466 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2467
2468 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
2469 {
2470 HMDumpRegs(pVM, pVCpu, pCtx);
2471#ifdef VBOX_STRICT
2472 Log4(("ctrl.u64VmcbCleanBits %#RX64\n", pVmcb->ctrl.u64VmcbCleanBits));
2473 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx));
2474 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx));
2475 Log4(("ctrl.u16InterceptRdDRx %#x\n", pVmcb->ctrl.u16InterceptRdDRx));
2476 Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx));
2477 Log4(("ctrl.u32InterceptException %#x\n", pVmcb->ctrl.u32InterceptException));
2478 Log4(("ctrl.u32InterceptCtrl1 %#x\n", pVmcb->ctrl.u32InterceptCtrl1));
2479 Log4(("ctrl.u32InterceptCtrl2 %#x\n", pVmcb->ctrl.u32InterceptCtrl2));
2480 Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr));
2481 Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr));
2482 Log4(("ctrl.u64TSCOffset %#RX64\n", pVmcb->ctrl.u64TSCOffset));
2483
2484 Log4(("ctrl.TLBCtrl.u32ASID %#x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID));
2485 Log4(("ctrl.TLBCtrl.u8TLBFlush %#x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush));
2486 Log4(("ctrl.TLBCtrl.u24Reserved %#x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved));
2487
2488 Log4(("ctrl.IntCtrl.u8VTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR));
2489 Log4(("ctrl.IntCtrl.u1VIrqValid %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqValid));
2490 Log4(("ctrl.IntCtrl.u7Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u7Reserved));
2491 Log4(("ctrl.IntCtrl.u4VIrqPriority %#x\n", pVmcb->ctrl.IntCtrl.n.u4VIrqPriority));
2492 Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR));
2493 Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved));
2494 Log4(("ctrl.IntCtrl.u1VIrqMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqMasking));
2495 Log4(("ctrl.IntCtrl.u6Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved));
2496 Log4(("ctrl.IntCtrl.u8VIrqVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIrqVector));
2497 Log4(("ctrl.IntCtrl.u24Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved));
2498
2499 Log4(("ctrl.u64IntShadow %#RX64\n", pVmcb->ctrl.u64IntShadow));
2500 Log4(("ctrl.u64ExitCode %#RX64\n", pVmcb->ctrl.u64ExitCode));
2501 Log4(("ctrl.u64ExitInfo1 %#RX64\n", pVmcb->ctrl.u64ExitInfo1));
2502 Log4(("ctrl.u64ExitInfo2 %#RX64\n", pVmcb->ctrl.u64ExitInfo2));
2503 Log4(("ctrl.ExitIntInfo.u8Vector %#x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector));
2504 Log4(("ctrl.ExitIntInfo.u3Type %#x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type));
2505 Log4(("ctrl.ExitIntInfo.u1ErrorCodeValid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
2506 Log4(("ctrl.ExitIntInfo.u19Reserved %#x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved));
2507 Log4(("ctrl.ExitIntInfo.u1Valid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid));
2508 Log4(("ctrl.ExitIntInfo.u32ErrorCode %#x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
2509 Log4(("ctrl.NestedPaging %#RX64\n", pVmcb->ctrl.NestedPaging.u));
2510 Log4(("ctrl.EventInject.u8Vector %#x\n", pVmcb->ctrl.EventInject.n.u8Vector));
2511 Log4(("ctrl.EventInject.u3Type %#x\n", pVmcb->ctrl.EventInject.n.u3Type));
2512 Log4(("ctrl.EventInject.u1ErrorCodeValid %#x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid));
2513 Log4(("ctrl.EventInject.u19Reserved %#x\n", pVmcb->ctrl.EventInject.n.u19Reserved));
2514 Log4(("ctrl.EventInject.u1Valid %#x\n", pVmcb->ctrl.EventInject.n.u1Valid));
2515 Log4(("ctrl.EventInject.u32ErrorCode %#x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode));
2516
2517 Log4(("ctrl.u64NestedPagingCR3 %#RX64\n", pVmcb->ctrl.u64NestedPagingCR3));
2518 Log4(("ctrl.u64LBRVirt %#RX64\n", pVmcb->ctrl.u64LBRVirt));
2519
2520 Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel));
2521 Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr));
2522 Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit));
2523 Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base));
2524 Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel));
2525 Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr));
2526 Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit));
2527 Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base));
2528 Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel));
2529 Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr));
2530 Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit));
2531 Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base));
2532 Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel));
2533 Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr));
2534 Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit));
2535 Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base));
2536 Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel));
2537 Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr));
2538 Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit));
2539 Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base));
2540
2541 Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit));
2542 Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base));
2543
2544 Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel));
2545 Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr));
2546 Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit));
2547 Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base));
2548
2549 Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit));
2550 Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base));
2551
2552 Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel));
2553 Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr));
2554 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit));
2555 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base));
2556
2557 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL));
2558 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0));
2559 Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2));
2560 Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3));
2561 Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4));
2562 Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6));
2563 Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7));
2564
2565 Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP));
2566 Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP));
2567 Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX));
2568 Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags));
2569
2570 Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS));
2571 Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP));
2572 Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP));
2573
2574 Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER));
2575 Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR));
2576 Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR));
2577 Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR));
2578 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK));
2579 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase));
2580 Log4(("guest.u64GPAT %#RX64\n", pVmcb->guest.u64GPAT));
2581 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL));
2582 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM));
2583 Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO));
2584 Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM));
2585 Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO));
2586#endif
2587 }
2588 else
2589 Log4(("hmR0SvmReportWorldSwitchError: rcVMRun=%d\n", rcVMRun));
2590}
2591
2592
2593/**
2594 * Check per-VM and per-VCPU force flag actions that require us to go back to
2595 * ring-3 for one reason or another.
2596 *
2597 * @returns VBox status code (information status code included).
2598 * @retval VINF_SUCCESS if we don't have any actions that require going back to
2599 * ring-3.
2600 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
2601 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
2602 * interrupts)
2603 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
2604 * all EMTs to be in ring-3.
2605 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
2606 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
2607 * to the EM loop.
2608 *
2609 * @param pVM Pointer to the VM.
2610 * @param pVCpu Pointer to the VMCPU.
2611 * @param pCtx Pointer to the guest-CPU context.
2612 */
2613static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2614{
2615 Assert(VMMRZCallRing3IsEnabled(pVCpu));
2616
2617 /* On AMD-V we don't need to update CR3, PAE PDPES lazily. See hmR0SvmSaveGuestState(). */
2618 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
2619 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
2620
2621 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
2622 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2623 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
2624 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2625 {
2626 /* Pending PGM C3 sync. */
2627 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
2628 {
2629 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2630 if (rc != VINF_SUCCESS)
2631 {
2632 Log4(("hmR0SvmCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
2633 return rc;
2634 }
2635 }
2636
2637 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
2638 /* -XXX- what was that about single stepping? */
2639 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
2640 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2641 {
2642 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
2643 int rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
2644 Log4(("hmR0SvmCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
2645 return rc;
2646 }
2647
2648 /* Pending VM request packets, such as hardware interrupts. */
2649 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
2650 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2651 {
2652 Log4(("hmR0SvmCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
2653 return VINF_EM_PENDING_REQUEST;
2654 }
2655
2656 /* Pending PGM pool flushes. */
2657 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
2658 {
2659 Log4(("hmR0SvmCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
2660 return VINF_PGM_POOL_FLUSH_PENDING;
2661 }
2662
2663 /* Pending DMA requests. */
2664 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
2665 {
2666 Log4(("hmR0SvmCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
2667 return VINF_EM_RAW_TO_R3;
2668 }
2669 }
2670
2671 return VINF_SUCCESS;
2672}
2673
2674
2675/**
2676 * Does the preparations before executing guest code in AMD-V.
2677 *
2678 * This may cause longjmps to ring-3 and may even result in rescheduling to the
2679 * recompiler. We must be cautious what we do here regarding committing
2680 * guest-state information into the the VMCB assuming we assuredly execute the
2681 * guest in AMD-V. If we fall back to the recompiler after updating the VMCB and
2682 * clearing the common-state (TRPM/forceflags), we must undo those changes so
2683 * that the recompiler can (and should) use them when it resumes guest
2684 * execution. Otherwise such operations must be done when we can no longer
2685 * exit to ring-3.
2686 *
2687 * @returns VBox status code (informational status codes included).
2688 * @retval VINF_SUCCESS if we can proceed with running the guest.
2689 * @retval VINF_* scheduling changes, we have to go back to ring-3.
2690 *
2691 * @param pVM Pointer to the VM.
2692 * @param pVCpu Pointer to the VMCPU.
2693 * @param pCtx Pointer to the guest-CPU context.
2694 * @param pSvmTransient Pointer to the SVM transient structure.
2695 */
2696DECLINLINE(int) hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
2697{
2698 /* Check force flag actions that might require us to go back to ring-3. */
2699 int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
2700 if (rc != VINF_SUCCESS)
2701 return rc;
2702
2703 if (TRPMHasTrap(pVCpu))
2704 hmR0SvmTrpmTrapToPendingEvent(pVCpu);
2705 else if (!pVCpu->hm.s.Event.fPending)
2706 hmR0SvmEvaluatePendingEvent(pVCpu, pCtx);
2707
2708 /*
2709 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
2710 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
2711 *
2712 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
2713 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
2714 *
2715 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
2716 * executing guest code.
2717 */
2718 pSvmTransient->uEflags = ASMIntDisableFlags();
2719 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2720 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2721 {
2722 ASMSetFlags(pSvmTransient->uEflags);
2723 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
2724 return VINF_EM_RAW_TO_R3;
2725 }
2726 else if (RTThreadPreemptIsPending(NIL_RTTHREAD))
2727 {
2728 ASMSetFlags(pSvmTransient->uEflags);
2729 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
2730 return VINF_EM_RAW_INTERRUPT;
2731 }
2732
2733 /* Indicate the start of guest execution. No more longjmps or returns to ring-3 from this point!!! */
2734 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
2735 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
2736
2737 return VINF_SUCCESS;
2738}
2739
2740
2741/**
2742 * Prepares to run guest code in AMD-V and we've committed to doing so. This
2743 * means there is no backing out to ring-3 or anywhere else at this
2744 * point.
2745 *
2746 * @param pVM Pointer to the VM.
2747 * @param pVCpu Pointer to the VMCPU.
2748 * @param pCtx Pointer to the guest-CPU context.
2749 * @param pSvmTransient Pointer to the SVM transient structure.
2750 *
2751 * @remarks Called with preemption disabled.
2752 * @remarks No-long-jump zone!!!
2753 */
2754DECLINLINE(void) hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
2755{
2756 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2757 Assert(VMMR0IsLogFlushDisabled(pVCpu));
2758
2759 hmR0SvmInjectPendingEvent(pVCpu, pCtx);
2760
2761 /*
2762 * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
2763 * We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run.
2764 */
2765 /** @todo The above assumption could be wrong. It's not documented what
2766 * should be done wrt to the VMCB Clean Bit, but we'll find out the
2767 * hard way. */
2768 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2769 pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
2770
2771#ifdef HMSVM_SYNC_FULL_GUEST_STATE
2772 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
2773#endif
2774
2775 /* Load the guest state. */
2776 int rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx);
2777 AssertRC(rc);
2778 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT; /* Preemption might set this, nothing to do on AMD-V. */
2779 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
2780 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
2781
2782 /* If VMCB Clean Bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
2783 if (!(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN))
2784 pVmcb->ctrl.u64VmcbCleanBits = 0;
2785
2786 /*
2787 * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
2788 * so we can update it on the way back if the guest changed the TPR.
2789 */
2790 if (pVCpu->hm.s.svm.fSyncVTpr)
2791 {
2792 if (pVM->hm.s.fTPRPatchingActive)
2793 pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
2794 else
2795 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
2796 }
2797
2798 /* Setup TSC offsetting. */
2799 if ( pSvmTransient->fUpdateTscOffsetting
2800 || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
2801 {
2802 hmR0SvmUpdateTscOffsetting(pVCpu);
2803 pSvmTransient->fUpdateTscOffsetting = false;
2804 }
2805
2806 /* Flush the appropriate tagged-TLB entries. */
2807 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
2808 hmR0SvmFlushTaggedTlb(pVCpu);
2809 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
2810
2811 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
2812
2813 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
2814 to start executing. */
2815
2816 /*
2817 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
2818 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
2819 *
2820 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
2821 */
2822 pSvmTransient->fRestoreTscAuxMsr = false;
2823 if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
2824 && !(pVmcb->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
2825 {
2826 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
2827 uint64_t u64GuestTscAux = 0;
2828 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAux);
2829 AssertRC(rc2);
2830 if (u64GuestTscAux != pVCpu->hm.s.u64HostTscAux)
2831 {
2832 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
2833 pSvmTransient->fRestoreTscAuxMsr = true;
2834 }
2835 }
2836}
2837
2838
2839/**
2840 * Wrapper for running the guest code in AMD-V.
2841 *
2842 * @returns VBox strict status code.
2843 * @param pVM Pointer to the VM.
2844 * @param pVCpu Pointer to the VMCPU.
2845 * @param pCtx Pointer to the guest-CPU context.
2846 *
2847 * @remarks No-long-jump zone!!!
2848 */
2849DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2850{
2851 /*
2852 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
2853 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
2854 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
2855 */
2856#ifdef VBOX_WITH_KERNEL_USING_XMM
2857 return HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
2858 pVCpu->hm.s.svm.pfnVMRun);
2859#else
2860 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
2861#endif
2862}
2863
2864
2865/**
2866 * Performs some essential restoration of state after running guest code in
2867 * AMD-V.
2868 *
2869 * @param pVM Pointer to the VM.
2870 * @param pVCpu Pointer to the VMCPU.
2871 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
2872 * out-of-sync. Make sure to update the required fields
2873 * before using them.
2874 * @param pSvmTransient Pointer to the SVM transient structure.
2875 * @param rcVMRun Return code of VMRUN.
2876 *
2877 * @remarks Called with interrupts disabled.
2878 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
2879 * unconditionally when it is safe to do so.
2880 */
2881DECLINLINE(void) hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
2882{
2883 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2884
2885 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
2886 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
2887
2888 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2889 pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
2890
2891 if (pSvmTransient->fRestoreTscAuxMsr)
2892 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
2893
2894 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
2895 {
2896 /** @todo Find a way to fix hardcoding a guestimate. */
2897 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset - 0x400);
2898 }
2899
2900 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
2901 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
2902 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
2903
2904 Assert(!(ASMGetFlags() & X86_EFL_IF));
2905 ASMSetFlags(pSvmTransient->uEflags); /* Enable interrupts. */
2906
2907 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
2908
2909 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
2910 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
2911 {
2912 Log4(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun));
2913 return;
2914 }
2915
2916 pSvmTransient->u64ExitCode = pVmcb->ctrl.u64ExitCode; /* Save the #VMEXIT reason. */
2917 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
2918 hmR0SvmSaveGuestState(pVCpu, pMixedCtx); /* Save the guest state from the VMCB to the guest-CPU context. */
2919
2920 if (RT_LIKELY(pSvmTransient->u64ExitCode != (uint64_t)SVM_EXIT_INVALID))
2921 {
2922 if (pVCpu->hm.s.svm.fSyncVTpr)
2923 {
2924 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
2925 if ( pVM->hm.s.fTPRPatchingActive
2926 && (pMixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
2927 {
2928 int rc = PDMApicSetTPR(pVCpu, pMixedCtx->msrLSTAR & 0xff);
2929 AssertRC(rc);
2930 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
2931 }
2932 else if (pSvmTransient->u8GuestTpr != pVmcb->ctrl.IntCtrl.n.u8VTPR)
2933 {
2934 int rc = PDMApicSetTPR(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4);
2935 AssertRC(rc);
2936 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
2937 }
2938 }
2939 }
2940}
2941
2942
2943/**
2944 * Runs the guest code using AMD-V.
2945 *
2946 * @returns VBox status code.
2947 * @param pVM Pointer to the VM.
2948 * @param pVCpu Pointer to the VMCPU.
2949 * @param pCtx Pointer to the guest-CPU context.
2950 */
2951VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2952{
2953 Assert(VMMRZCallRing3IsEnabled(pVCpu));
2954 HMSVM_ASSERT_PREEMPT_SAFE();
2955 VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pCtx);
2956
2957 SVMTRANSIENT SvmTransient;
2958 SvmTransient.fUpdateTscOffsetting = true;
2959 uint32_t cLoops = 0;
2960 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
2961 int rc = VERR_INTERNAL_ERROR_5;
2962
2963 for (;; cLoops++)
2964 {
2965 Assert(!HMR0SuspendPending());
2966 HMSVM_ASSERT_CPU_SAFE();
2967
2968 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
2969 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
2970 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
2971 if (rc != VINF_SUCCESS)
2972 break;
2973
2974 /*
2975 * No longjmps to ring-3 from this point on!!!
2976 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
2977 * This also disables flushing of the R0-logger instance (if any).
2978 */
2979 VMMRZCallRing3Disable(pVCpu);
2980 hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
2981
2982 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
2983
2984 /*
2985 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
2986 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
2987 */
2988 hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
2989 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */
2990 || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
2991 {
2992 if (rc == VINF_SUCCESS)
2993 rc = VERR_SVM_INVALID_GUEST_STATE;
2994 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
2995 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
2996 break;
2997 }
2998
2999 /* Handle the #VMEXIT. */
3000 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
3001 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
3002 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
3003 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
3004 if (rc != VINF_SUCCESS)
3005 break;
3006 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
3007 {
3008 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
3009 rc = VINF_EM_RAW_INTERRUPT;
3010 break;
3011 }
3012 }
3013
3014 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
3015 if (rc == VERR_EM_INTERPRETER)
3016 rc = VINF_EM_RAW_EMULATE_INSTR;
3017 else if (rc == VINF_EM_RESET)
3018 rc = VINF_EM_TRIPLE_FAULT;
3019
3020 hmR0SvmExitToRing3(pVM, pVCpu, pCtx, rc);
3021 VMMRZCallRing3RemoveNotification(pVCpu);
3022 return rc;
3023}
3024
3025
3026/**
3027 * Handles a #VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
3028 *
3029 * @returns VBox status code (informational status codes included).
3030 * @param pVCpu Pointer to the VMCPU.
3031 * @param pCtx Pointer to the guest-CPU context.
3032 * @param pSvmTransient Pointer to the SVM transient structure.
3033 */
3034DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3035{
3036 Assert(pSvmTransient->u64ExitCode != (uint64_t)SVM_EXIT_INVALID);
3037 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
3038
3039 /*
3040 * The ordering of the case labels is based on most-frequently-occurring VM-exits for most guests under
3041 * normal workloads (for some definition of "normal").
3042 */
3043 uint32_t u32ExitCode = pSvmTransient->u64ExitCode;
3044 switch (pSvmTransient->u64ExitCode)
3045 {
3046 case SVM_EXIT_NPF:
3047 return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient);
3048
3049 case SVM_EXIT_IOIO:
3050 return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
3051
3052 case SVM_EXIT_RDTSC:
3053 return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
3054
3055 case SVM_EXIT_RDTSCP:
3056 return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
3057
3058 case SVM_EXIT_CPUID:
3059 return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient);
3060
3061 case SVM_EXIT_EXCEPTION_E: /* X86_XCPT_PF */
3062 return hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient);
3063
3064 case SVM_EXIT_EXCEPTION_7: /* X86_XCPT_NM */
3065 return hmR0SvmExitXcptNM(pVCpu, pCtx, pSvmTransient);
3066
3067 case SVM_EXIT_EXCEPTION_10: /* X86_XCPT_MF */
3068 return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient);
3069
3070 case SVM_EXIT_EXCEPTION_1: /* X86_XCPT_DB */
3071 return hmR0SvmExitXcptDB(pVCpu, pCtx, pSvmTransient);
3072
3073 case SVM_EXIT_MONITOR:
3074 return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient);
3075
3076 case SVM_EXIT_MWAIT:
3077 return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient);
3078
3079 case SVM_EXIT_HLT:
3080 return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient);
3081
3082 case SVM_EXIT_READ_CR0:
3083 case SVM_EXIT_READ_CR3:
3084 case SVM_EXIT_READ_CR4:
3085 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
3086
3087 case SVM_EXIT_WRITE_CR0:
3088 case SVM_EXIT_WRITE_CR3:
3089 case SVM_EXIT_WRITE_CR4:
3090 case SVM_EXIT_WRITE_CR8:
3091 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
3092
3093 case SVM_EXIT_VINTR:
3094 return hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient);
3095
3096 case SVM_EXIT_INTR:
3097 case SVM_EXIT_FERR_FREEZE:
3098 case SVM_EXIT_NMI:
3099 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
3100
3101 case SVM_EXIT_MSR:
3102 return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient);
3103
3104 case SVM_EXIT_INVLPG:
3105 return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient);
3106
3107 case SVM_EXIT_WBINVD:
3108 return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient);
3109
3110 case SVM_EXIT_INVD:
3111 return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient);
3112
3113 case SVM_EXIT_RDPMC:
3114 return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient);
3115
3116 default:
3117 {
3118 switch (pSvmTransient->u64ExitCode)
3119 {
3120 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
3121 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:
3122 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:
3123 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
3124 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
3125
3126 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
3127 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:
3128 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
3129 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
3130 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
3131
3132 case SVM_EXIT_TASK_SWITCH:
3133 return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
3134
3135 case SVM_EXIT_VMMCALL:
3136 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
3137
3138 case SVM_EXIT_SHUTDOWN:
3139 return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient);
3140
3141 case SVM_EXIT_SMI:
3142 case SVM_EXIT_INIT:
3143 {
3144 /*
3145 * We don't intercept NMIs. As for INIT signals, it really shouldn't ever happen here. If it ever does,
3146 * we want to know about it so log the exit code and bail.
3147 */
3148 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
3149 pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
3150 return VERR_SVM_UNEXPECTED_EXIT;
3151 }
3152
3153 case SVM_EXIT_INVLPGA:
3154 case SVM_EXIT_RSM:
3155 case SVM_EXIT_VMRUN:
3156 case SVM_EXIT_VMLOAD:
3157 case SVM_EXIT_VMSAVE:
3158 case SVM_EXIT_STGI:
3159 case SVM_EXIT_CLGI:
3160 case SVM_EXIT_SKINIT:
3161 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
3162
3163#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
3164 case SVM_EXIT_EXCEPTION_0: /* X86_XCPT_DE */
3165 /* SVM_EXIT_EXCEPTION_1: */ /* X86_XCPT_DB - Handled above. */
3166 case SVM_EXIT_EXCEPTION_2: /* X86_XCPT_NMI */
3167 case SVM_EXIT_EXCEPTION_3: /* X86_XCPT_BP */
3168 case SVM_EXIT_EXCEPTION_4: /* X86_XCPT_OF */
3169 case SVM_EXIT_EXCEPTION_5: /* X86_XCPT_BR */
3170 case SVM_EXIT_EXCEPTION_6: /* X86_XCPT_UD */
3171 /* SVM_EXIT_EXCEPTION_7: */ /* X86_XCPT_NM - Handled above. */
3172 case SVM_EXIT_EXCEPTION_8: /* X86_XCPT_DF */
3173 case SVM_EXIT_EXCEPTION_9: /* X86_XCPT_CO_SEG_OVERRUN */
3174 case SVM_EXIT_EXCEPTION_A: /* X86_XCPT_TS */
3175 case SVM_EXIT_EXCEPTION_B: /* X86_XCPT_NP */
3176 case SVM_EXIT_EXCEPTION_C: /* X86_XCPT_SS */
3177 case SVM_EXIT_EXCEPTION_D: /* X86_XCPT_GP */
3178 /* SVM_EXIT_EXCEPTION_E: */ /* X86_XCPT_PF - Handled above. */
3179 /* SVM_EXIT_EXCEPTION_10: */ /* X86_XCPT_MF - Handled above. */
3180 case SVM_EXIT_EXCEPTION_11: /* X86_XCPT_AC */
3181 case SVM_EXIT_EXCEPTION_12: /* X86_XCPT_MC */
3182 case SVM_EXIT_EXCEPTION_13: /* X86_XCPT_XF */
3183
3184 case SVM_EXIT_EXCEPTION_F: /* Reserved */
3185 case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16:
3186 case SVM_EXIT_EXCEPTION_17: case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19:
3187 case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B: case SVM_EXIT_EXCEPTION_1C:
3188 case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
3189 {
3190 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
3191 SVMEVENT Event;
3192 Event.u = 0;
3193 Event.n.u1Valid = 1;
3194 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3195 Event.n.u8Vector = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0;
3196
3197 switch (Event.n.u8Vector)
3198 {
3199 case X86_XCPT_DE:
3200 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
3201 break;
3202
3203 case X86_XCPT_BP:
3204 /** Saves the wrong EIP on the stack (pointing to the int3) instead of the
3205 * next instruction. */
3206 /** @todo Investigate this later. */
3207 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
3208 break;
3209
3210 case X86_XCPT_UD:
3211 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
3212 break;
3213
3214 case X86_XCPT_NP:
3215 Event.n.u1ErrorCodeValid = 1;
3216 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1;
3217 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
3218 break;
3219
3220 case X86_XCPT_SS:
3221 Event.n.u1ErrorCodeValid = 1;
3222 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1;
3223 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
3224 break;
3225
3226 case X86_XCPT_GP:
3227 Event.n.u1ErrorCodeValid = 1;
3228 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1;
3229 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
3230 break;
3231
3232 default:
3233 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit caused by exception %#x\n", Event.n.u8Vector));
3234 pVCpu->hm.s.u32HMError = Event.n.u8Vector;
3235 return VERR_SVM_UNEXPECTED_XCPT_EXIT;
3236 }
3237
3238 Log4(("#Xcpt: Vector=%#x at CS:RIP=%04x:%RGv\n", Event.n.u8Vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
3239 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3240 return VINF_SUCCESS;
3241 }
3242#endif /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */
3243
3244 default:
3245 {
3246 AssertMsgFailed(("hmR0SvmHandleExit: Unknown exit code %#x\n", u32ExitCode));
3247 pVCpu->hm.s.u32HMError = u32ExitCode;
3248 return VERR_SVM_UNKNOWN_EXIT;
3249 }
3250 }
3251 }
3252 }
3253 return VERR_INTERNAL_ERROR_5; /* Should never happen. */
3254}
3255
3256
3257#ifdef DEBUG
3258/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
3259# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
3260 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
3261
3262# define HMSVM_ASSERT_PREEMPT_CPUID() \
3263 do \
3264 { \
3265 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
3266 AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
3267 } while (0)
3268
3269# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \
3270 do { \
3271 AssertPtr(pVCpu); \
3272 AssertPtr(pCtx); \
3273 AssertPtr(pSvmTransient); \
3274 Assert(ASMIntAreEnabled()); \
3275 HMSVM_ASSERT_PREEMPT_SAFE(); \
3276 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
3277 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
3278 HMSVM_ASSERT_PREEMPT_SAFE(); \
3279 if (VMMR0IsLogFlushDisabled(pVCpu)) \
3280 HMSVM_ASSERT_PREEMPT_CPUID(); \
3281 } while (0)
3282#else /* Release builds */
3283# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { } while(0)
3284#endif
3285
3286
3287/**
3288 * Worker for hmR0SvmInterpretInvlpg().
3289 *
3290 * @return VBox status code.
3291 * @param pVCpu Pointer to the VMCPU.
3292 * @param pCpu Pointer to the disassembler state.
3293 * @param pRegFrame Pointer to the register frame.
3294 */
3295static int hmR0SvmInterpretInvlPgEx(PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame)
3296{
3297 DISQPVPARAMVAL Param1;
3298 RTGCPTR GCPtrPage;
3299
3300 int rc = DISQueryParamVal(pRegFrame, pCpu, &pCpu->Param1, &Param1, DISQPVWHICH_SRC);
3301 if (RT_FAILURE(rc))
3302 return VERR_EM_INTERPRETER;
3303
3304 if ( Param1.type == DISQPV_TYPE_IMMEDIATE
3305 || Param1.type == DISQPV_TYPE_ADDRESS)
3306 {
3307 if (!(Param1.flags & (DISQPV_FLAG_32 | DISQPV_FLAG_64)))
3308 return VERR_EM_INTERPRETER;
3309
3310 GCPtrPage = Param1.val.val64;
3311 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVCpu->CTX_SUFF(pVM), pVCpu, pRegFrame, GCPtrPage);
3312 rc = VBOXSTRICTRC_VAL(rc2);
3313 }
3314 else
3315 {
3316 Log4(("hmR0SvmInterpretInvlPgEx invalid parameter type %#x\n", Param1.type));
3317 rc = VERR_EM_INTERPRETER;
3318 }
3319
3320 return rc;
3321}
3322
3323
3324/**
3325 * Interprets INVLPG.
3326 *
3327 * @returns VBox status code.
3328 * @retval VINF_* Scheduling instructions.
3329 * @retval VERR_EM_INTERPRETER Something we can't cope with.
3330 * @retval VERR_* Fatal errors.
3331 *
3332 * @param pVM Pointer to the VM.
3333 * @param pRegFrame Pointer to the register frame.
3334 *
3335 * @remarks Updates the RIP if the instruction was executed successfully.
3336 */
3337static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
3338{
3339 /* Only allow 32 & 64 bit code. */
3340 if (CPUMGetGuestCodeBits(pVCpu) != 16)
3341 {
3342 PDISSTATE pDis = &pVCpu->hm.s.DisState;
3343 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
3344 if ( RT_SUCCESS(rc)
3345 && pDis->pCurInstr->uOpcode == OP_INVLPG)
3346 {
3347 rc = hmR0SvmInterpretInvlPgEx(pVCpu, pDis, pRegFrame);
3348 if (RT_SUCCESS(rc))
3349 pRegFrame->rip += pDis->cbInstr;
3350 return rc;
3351 }
3352 else
3353 Log4(("hmR0SvmInterpretInvlpg: EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
3354 }
3355 return VERR_EM_INTERPRETER;
3356}
3357
3358
3359/**
3360 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
3361 *
3362 * @param pVCpu Pointer to the VMCPU.
3363 */
3364DECLINLINE(void) hmR0SvmSetPendingXcptUD(PVMCPU pVCpu)
3365{
3366 SVMEVENT Event;
3367 Event.u = 0;
3368 Event.n.u1Valid = 1;
3369 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3370 Event.n.u8Vector = X86_XCPT_UD;
3371 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3372}
3373
3374
3375/**
3376 * Sets a debug (#DB) exception as pending-for-injection into the VM.
3377 *
3378 * @param pVCpu Pointer to the VMCPU.
3379 */
3380DECLINLINE(void) hmR0SvmSetPendingXcptDB(PVMCPU pVCpu)
3381{
3382 SVMEVENT Event;
3383 Event.u = 0;
3384 Event.n.u1Valid = 1;
3385 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3386 Event.n.u8Vector = X86_XCPT_DB;
3387 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3388}
3389
3390
3391/**
3392 * Sets a page fault (#PF) exception as pending-for-injection into the VM.
3393 *
3394 * @param pVCpu Pointer to the VMCPU.
3395 * @param pCtx Pointer to the guest-CPU context.
3396 * @param u32ErrCode The error-code for the page-fault.
3397 * @param uFaultAddress The page fault address (CR2).
3398 *
3399 * @remarks This updates the guest CR2 with @a uFaultAddress!
3400 */
3401DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)
3402{
3403 SVMEVENT Event;
3404 Event.u = 0;
3405 Event.n.u1Valid = 1;
3406 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3407 Event.n.u8Vector = X86_XCPT_PF;
3408 Event.n.u1ErrorCodeValid = 1;
3409 Event.n.u32ErrorCode = u32ErrCode;
3410
3411 /* Update CR2 of the guest. */
3412 if (pCtx->cr2 != uFaultAddress)
3413 {
3414 pCtx->cr2 = uFaultAddress;
3415 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR2;
3416 }
3417
3418 hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress);
3419}
3420
3421
3422/**
3423 * Sets a device-not-available (#NM) exception as pending-for-injection into the
3424 * VM.
3425 *
3426 * @param pVCpu Pointer to the VMCPU.
3427 */
3428DECLINLINE(void) hmR0SvmSetPendingXcptNM(PVMCPU pVCpu)
3429{
3430 SVMEVENT Event;
3431 Event.u = 0;
3432 Event.n.u1Valid = 1;
3433 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3434 Event.n.u8Vector = X86_XCPT_NM;
3435 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3436}
3437
3438
3439/**
3440 * Sets a math-fault (#MF) exception as pending-for-injection into the VM.
3441 *
3442 * @param pVCpu Pointer to the VMCPU.
3443 */
3444DECLINLINE(void) hmR0SvmSetPendingXcptMF(PVMCPU pVCpu)
3445{
3446 SVMEVENT Event;
3447 Event.u = 0;
3448 Event.n.u1Valid = 1;
3449 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3450 Event.n.u8Vector = X86_XCPT_MF;
3451 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3452}
3453
3454
3455/**
3456 * Sets a double fault (#DF) exception as pending-for-injection into the VM.
3457 *
3458 * @param pVCpu Pointer to the VMCPU.
3459 */
3460DECLINLINE(void) hmR0SvmSetPendingXcptDF(PVMCPU pVCpu)
3461{
3462 SVMEVENT Event;
3463 Event.u = 0;
3464 Event.n.u1Valid = 1;
3465 Event.n.u3Type = SVM_EVENT_EXCEPTION;
3466 Event.n.u8Vector = X86_XCPT_DF;
3467 Event.n.u1ErrorCodeValid = 1;
3468 Event.n.u32ErrorCode = 0;
3469 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
3470}
3471
3472
3473/**
3474 * Emulates a simple MOV TPR (CR8) instruction, used for TPR patching on 32-bit
3475 * guests. This simply looks up the patch record at EIP and does the required.
3476 *
3477 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
3478 * like how we want it to be (e.g. not followed by shr 4 as is usually done for
3479 * TPR). See hmR3ReplaceTprInstr() for the details.
3480 *
3481 * @returns VBox status code.
3482 * @param pVM Pointer to the VM.
3483 * @param pVCpu Pointer to the VMCPU.
3484 * @param pCtx Pointer to the guest-CPU context.
3485 */
3486static int hmR0SvmEmulateMovTpr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3487{
3488 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
3489 for (;;)
3490 {
3491 bool fPending;
3492 uint8_t u8Tpr;
3493
3494 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
3495 if (!pPatch)
3496 break;
3497
3498 switch (pPatch->enmType)
3499 {
3500 case HMTPRINSTR_READ:
3501 {
3502 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
3503 AssertRC(rc);
3504
3505 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
3506 AssertRC(rc);
3507 pCtx->rip += pPatch->cbOp;
3508 break;
3509 }
3510
3511 case HMTPRINSTR_WRITE_REG:
3512 case HMTPRINSTR_WRITE_IMM:
3513 {
3514 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
3515 {
3516 uint32_t u32Val;
3517 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
3518 AssertRC(rc);
3519 u8Tpr = u32Val;
3520 }
3521 else
3522 u8Tpr = (uint8_t)pPatch->uSrcOperand;
3523
3524 int rc2 = PDMApicSetTPR(pVCpu, u8Tpr);
3525 AssertRC(rc2);
3526 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
3527
3528 pCtx->rip += pPatch->cbOp;
3529 break;
3530 }
3531
3532 default:
3533 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
3534 pVCpu->hm.s.u32HMError = pPatch->enmType;
3535 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
3536 }
3537 }
3538
3539 return VINF_SUCCESS;
3540}
3541
3542/**
3543 * Determines if an exception is a contributory exception. Contributory
3544 * exceptions are ones which can cause double-faults. Page-fault is
3545 * intentionally not included here as it's a conditional contributory exception.
3546 *
3547 * @returns true if the exception is contributory, false otherwise.
3548 * @param uVector The exception vector.
3549 */
3550DECLINLINE(bool) hmR0SvmIsContributoryXcpt(const uint32_t uVector)
3551{
3552 switch (uVector)
3553 {
3554 case X86_XCPT_GP:
3555 case X86_XCPT_SS:
3556 case X86_XCPT_NP:
3557 case X86_XCPT_TS:
3558 case X86_XCPT_DE:
3559 return true;
3560 default:
3561 break;
3562 }
3563 return false;
3564}
3565
3566
3567/**
3568 * Handle a condition that occurred while delivering an event through the guest
3569 * IDT.
3570 *
3571 * @returns VBox status code (informational error codes included).
3572 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
3573 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
3574 * continue execution of the guest which will delivery the #DF.
3575 * @retval VINF_EM_RESET if we detected a triple-fault condition.
3576 *
3577 * @param pVCpu Pointer to the VMCPU.
3578 * @param pCtx Pointer to the guest-CPU context.
3579 * @param pSvmTransient Pointer to the SVM transient structure.
3580 *
3581 * @remarks No-long-jump zone!!!
3582 */
3583static int hmR0SvmCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3584{
3585 int rc = VINF_SUCCESS;
3586 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
3587
3588 /* See AMD spec. 15.7.3 "EXITINFO Pseudo-Code". The EXITINTINFO (if valid) contains the prior exception (IDT vector)
3589 * that was trying to be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector). */
3590 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid)
3591 {
3592 uint8_t uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;
3593
3594 typedef enum
3595 {
3596 SVMREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
3597 SVMREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
3598 SVMREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
3599 SVMREFLECTXCPT_NONE /* Nothing to reflect. */
3600 } SVMREFLECTXCPT;
3601
3602 SVMREFLECTXCPT enmReflect = SVMREFLECTXCPT_NONE;
3603 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION)
3604 {
3605 if (pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_1F)
3606 {
3607 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0);
3608
3609#ifdef VBOX_STRICT
3610 if ( hmR0SvmIsContributoryXcpt(uIdtVector)
3611 && uExitVector == X86_XCPT_PF)
3612 {
3613 Log4(("IDT: Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pCtx->cr2));
3614 }
3615#endif
3616 if ( uExitVector == X86_XCPT_PF
3617 && uIdtVector == X86_XCPT_PF)
3618 {
3619 pSvmTransient->fVectoringPF = true;
3620 Log4(("IDT: Vectoring #PF uCR2=%#RX64\n", pCtx->cr2));
3621 }
3622 else if ( (pVmcb->ctrl.u32InterceptException & HMSVM_CONTRIBUTORY_XCPT_MASK)
3623 && hmR0SvmIsContributoryXcpt(uExitVector)
3624 && ( hmR0SvmIsContributoryXcpt(uIdtVector)
3625 || uIdtVector == X86_XCPT_PF))
3626 {
3627 enmReflect = SVMREFLECTXCPT_DF;
3628 Log4(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo,
3629 uIdtVector, uExitVector));
3630 }
3631 else if (uIdtVector == X86_XCPT_DF)
3632 {
3633 enmReflect = SVMREFLECTXCPT_TF;
3634 Log4(("IDT: Pending vectoring triple-fault %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo,
3635 uIdtVector, uExitVector));
3636 }
3637 else
3638 enmReflect = SVMREFLECTXCPT_XCPT;
3639 }
3640 else
3641 {
3642 /*
3643 * If event delivery caused an #VMEXIT that is not an exception (e.g. #NPF) then reflect the original
3644 * exception to the guest after handling the VM-exit.
3645 */
3646 enmReflect = SVMREFLECTXCPT_XCPT;
3647 }
3648 }
3649 else if (pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT)
3650 {
3651 /* Ignore software interrupts (INT n) as they reoccur when restarting the instruction. */
3652 enmReflect = SVMREFLECTXCPT_XCPT;
3653 }
3654
3655 switch (enmReflect)
3656 {
3657 case SVMREFLECTXCPT_XCPT:
3658 {
3659 Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);
3660 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, 0 /* GCPtrFaultAddress */);
3661
3662 /* If uExitVector is #PF, CR2 value will be updated from the VMCB if it's a guest #PF. See hmR0SvmExitXcptPF(). */
3663 Log4(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32\n", pVmcb->ctrl.ExitIntInfo.u,
3664 !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid, pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));
3665 break;
3666 }
3667
3668 case SVMREFLECTXCPT_DF:
3669 {
3670 hmR0SvmSetPendingXcptDF(pVCpu);
3671 rc = VINF_HM_DOUBLE_FAULT;
3672 break;
3673 }
3674
3675 case SVMREFLECTXCPT_TF:
3676 {
3677 rc = VINF_EM_RESET;
3678 break;
3679 }
3680
3681 default:
3682 Assert(rc == VINF_SUCCESS);
3683 break;
3684 }
3685 }
3686 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
3687 return rc;
3688}
3689
3690
3691/**
3692 * Advances the guest RIP in the if the NRIP_SAVE feature is supported by the
3693 * CPU, otherwise advances the RIP by @a cb bytes.
3694 *
3695 * @param pVCpu Pointer to the VMCPU.
3696 * @param pCtx Pointer to the guest-CPU context.
3697 * @param cb RIP increment value in bytes.
3698 *
3699 * @remarks Use this function only from #VMEXIT's where the NRIP value is valid
3700 * when NRIP_SAVE is supported by the CPU!
3701 */
3702DECLINLINE(void) hmR0SvmUpdateRip(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t cb)
3703{
3704 if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
3705 {
3706 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
3707 pCtx->rip = pVmcb->ctrl.u64NextRIP;
3708 }
3709 else
3710 pCtx->rip += cb;
3711}
3712
3713
3714/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
3715/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
3716/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
3717
3718/** @name VM-exit handlers.
3719 * @{
3720 */
3721
3722/**
3723 * #VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT
3724 * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT).
3725 */
3726HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3727{
3728 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3729
3730 if (pSvmTransient->u64ExitCode == SVM_EXIT_NMI)
3731 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmi);
3732 else if (pSvmTransient->u64ExitCode == SVM_EXIT_INTR)
3733 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
3734
3735 /*
3736 * AMD-V has no preemption timer and the generic periodic preemption timer has no way to signal -before- the timer
3737 * fires if the current interrupt is our own timer or a some other host interrupt. We also cannot examine what
3738 * interrupt it is until the host actually take the interrupt.
3739 *
3740 * Going back to executing guest code here unconditionally causes random scheduling problems (observed on an
3741 * AMD Phenom 9850 Quad-Core on Windows 64-bit host).
3742 */
3743 return VINF_EM_RAW_INTERRUPT;
3744}
3745
3746
3747/**
3748 * #VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional #VMEXIT.
3749 */
3750HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3751{
3752 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3753
3754 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
3755 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
3756 int rc = VINF_SUCCESS;
3757 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3758 return rc;
3759}
3760
3761
3762/**
3763 * #VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional #VMEXIT.
3764 */
3765HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3766{
3767 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3768
3769 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
3770 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
3771 int rc = VINF_SUCCESS;
3772 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3773 return rc;
3774}
3775
3776
3777/**
3778 * #VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional #VMEXIT.
3779 */
3780HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3781{
3782 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3783 PVM pVM = pVCpu->CTX_SUFF(pVM);
3784 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
3785 if (RT_LIKELY(rc == VINF_SUCCESS))
3786 {
3787 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
3788 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3789 }
3790 else
3791 {
3792 AssertMsgFailed(("hmR0SvmExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
3793 rc = VERR_EM_INTERPRETER;
3794 }
3795 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
3796 return rc;
3797}
3798
3799
3800/**
3801 * #VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional #VMEXIT.
3802 */
3803HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3804{
3805 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3806 PVM pVM = pVCpu->CTX_SUFF(pVM);
3807 int rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
3808 if (RT_LIKELY(rc == VINF_SUCCESS))
3809 {
3810 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
3811 pSvmTransient->fUpdateTscOffsetting = true;
3812
3813 /* Single step check. */
3814 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3815 }
3816 else
3817 {
3818 AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
3819 rc = VERR_EM_INTERPRETER;
3820 }
3821 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
3822 return rc;
3823}
3824
3825
3826/**
3827 * #VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional #VMEXIT.
3828 */
3829HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3830{
3831 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3832 int rc = EMInterpretRdtscp(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
3833 if (RT_LIKELY(rc == VINF_SUCCESS))
3834 {
3835 hmR0SvmUpdateRip(pVCpu, pCtx, 3);
3836 pSvmTransient->fUpdateTscOffsetting = true;
3837 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3838 }
3839 else
3840 {
3841 AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtscp failed with %Rrc\n", rc));
3842 rc = VERR_EM_INTERPRETER;
3843 }
3844 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
3845 return rc;
3846}
3847
3848
3849/**
3850 * #VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional #VMEXIT.
3851 */
3852HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3853{
3854 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3855 int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
3856 if (RT_LIKELY(rc == VINF_SUCCESS))
3857 {
3858 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
3859 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3860 }
3861 else
3862 {
3863 AssertMsgFailed(("hmR0SvmExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
3864 rc = VERR_EM_INTERPRETER;
3865 }
3866 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
3867 return rc;
3868}
3869
3870
3871/**
3872 * #VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional #VMEXIT.
3873 */
3874HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3875{
3876 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3877 PVM pVM = pVCpu->CTX_SUFF(pVM);
3878 Assert(!pVM->hm.s.fNestedPaging);
3879
3880 /** @todo Decode Assist. */
3881 int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx)); /* Updates RIP if successful. */
3882 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
3883 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
3884 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3885 return rc;
3886}
3887
3888
3889/**
3890 * #VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional #VMEXIT.
3891 */
3892HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3893{
3894 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3895 hmR0SvmUpdateRip(pVCpu, pCtx, 1);
3896 int rc = EMShouldContinueAfterHalt(pVCpu, pCtx) ? VINF_SUCCESS : VINF_EM_HALT;
3897 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3898 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
3899 return rc;
3900}
3901
3902
3903/**
3904 * #VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional #VMEXIT.
3905 */
3906HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3907{
3908 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3909 int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
3910 if (RT_LIKELY(rc == VINF_SUCCESS))
3911 {
3912 hmR0SvmUpdateRip(pVCpu, pCtx, 3);
3913 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3914 }
3915 else
3916 {
3917 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
3918 rc = VERR_EM_INTERPRETER;
3919 }
3920 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
3921 return rc;
3922}
3923
3924
3925/**
3926 * #VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional #VMEXIT.
3927 */
3928HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3929{
3930 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3931 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
3932 int rc = VBOXSTRICTRC_VAL(rc2);
3933 if ( rc == VINF_EM_HALT
3934 || rc == VINF_SUCCESS)
3935 {
3936 hmR0SvmUpdateRip(pVCpu, pCtx, 3);
3937
3938 if ( rc == VINF_EM_HALT
3939 && EMShouldContinueAfterHalt(pVCpu, pCtx))
3940 {
3941 rc = VINF_SUCCESS;
3942 }
3943 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3944 }
3945 else
3946 {
3947 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
3948 rc = VERR_EM_INTERPRETER;
3949 }
3950 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
3951 ("hmR0SvmExitMwait: EMInterpretMWait failed rc=%Rrc\n", rc));
3952 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
3953 return rc;
3954}
3955
3956
3957/**
3958 * #VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN).
3959 * Conditional #VMEXIT.
3960 */
3961HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3962{
3963 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3964 return VINF_EM_RESET;
3965}
3966
3967
3968/**
3969 * #VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional #VMEXIT.
3970 */
3971HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3972{
3973 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3974
3975 Log4(("hmR0SvmExitReadCRx: CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
3976
3977 /** @todo Decode Assist. */
3978 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
3979 int rc = VBOXSTRICTRC_VAL(rc2);
3980 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3,
3981 ("hmR0SvmExitReadCRx: EMInterpretInstruction failed rc=%Rrc\n", rc));
3982 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15);
3983 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0]);
3984 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
3985 return rc;
3986}
3987
3988
3989/**
3990 * #VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional #VMEXIT.
3991 */
3992HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
3993{
3994 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
3995 /** @todo Decode Assist. */
3996 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
3997 int rc = VBOXSTRICTRC_VAL(rc2);
3998 if (rc == VINF_SUCCESS)
3999 {
4000 /* RIP has been updated by EMInterpretInstruction(). */
4001 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0) <= 15);
4002 switch (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0)
4003 {
4004 case 0: /* CR0. */
4005 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
4006 break;
4007
4008 case 3: /* CR3. */
4009 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
4010 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
4011 break;
4012
4013 case 4: /* CR4. */
4014 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
4015 break;
4016
4017 case 8: /* CR8 (TPR). */
4018 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
4019 break;
4020
4021 default:
4022 AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x CRx=%#RX64\n",
4023 pSvmTransient->u64ExitCode, pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0));
4024 break;
4025 }
4026 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4027 }
4028 else
4029 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
4030 return rc;
4031}
4032
4033
4034/**
4035 * #VMEXIT handler for instructions that result in a #UD exception delivered to
4036 * the guest.
4037 */
4038HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4039{
4040 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4041 hmR0SvmSetPendingXcptUD(pVCpu);
4042 return VINF_SUCCESS;
4043}
4044
4045
4046/**
4047 * #VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional #VMEXIT.
4048 */
4049HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4050{
4051 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4052 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4053 PVM pVM = pVCpu->CTX_SUFF(pVM);
4054
4055 int rc;
4056 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
4057 {
4058 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
4059
4060 /* Handle TPR patching; intercepted LSTAR write. */
4061 if ( pVM->hm.s.fTPRPatchingActive
4062 && pCtx->ecx == MSR_K8_LSTAR)
4063 {
4064 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr)
4065 {
4066 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */
4067 int rc2 = PDMApicSetTPR(pVCpu, pCtx->eax & 0xff);
4068 AssertRC(rc2);
4069 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
4070 }
4071 hmR0SvmUpdateRip(pVCpu, pCtx, 2);
4072 rc = VINF_SUCCESS;
4073 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4074 return rc;
4075 }
4076
4077 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
4078 {
4079 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4080 if (RT_LIKELY(rc == VINF_SUCCESS))
4081 {
4082 pCtx->rip = pVmcb->ctrl.u64NextRIP;
4083 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4084 }
4085 else
4086 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc));
4087 }
4088 else
4089 {
4090 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */));
4091 if (RT_UNLIKELY(rc != VINF_SUCCESS))
4092 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: WrMsr. EMInterpretInstruction failed rc=%Rrc\n", rc));
4093 /* RIP updated by EMInterpretInstruction(). */
4094 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4095 }
4096
4097 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
4098 if ( pCtx->ecx >= MSR_IA32_X2APIC_START
4099 && pCtx->ecx <= MSR_IA32_X2APIC_END)
4100 {
4101 /* We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest(). When full APIC register
4102 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCB before
4103 EMInterpretWrmsr() changes it. */
4104 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
4105 }
4106 else if (pCtx->ecx == MSR_K6_EFER)
4107 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_EFER_MSR;
4108 else if (pCtx->ecx == MSR_IA32_TSC)
4109 pSvmTransient->fUpdateTscOffsetting = true;
4110 }
4111 else
4112 {
4113 /* MSR Read access. */
4114 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
4115 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ);
4116
4117 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
4118 {
4119 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4120 if (RT_LIKELY(rc == VINF_SUCCESS))
4121 {
4122 pCtx->rip = pVmcb->ctrl.u64NextRIP;
4123 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4124 }
4125 else
4126 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretRdmsr failed rc=%Rrc\n", rc));
4127 }
4128 else
4129 {
4130 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0));
4131 if (RT_UNLIKELY(rc != VINF_SUCCESS))
4132 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: RdMsr. EMInterpretInstruction failed rc=%Rrc\n", rc));
4133 /* RIP updated by EMInterpretInstruction(). */
4134 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4135 }
4136 }
4137
4138 /* RIP has been updated by EMInterpret[Rd|Wr]msr(). */
4139 return rc;
4140}
4141
4142
4143/**
4144 * #VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional #VMEXIT.
4145 */
4146HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4147{
4148 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4149 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
4150
4151 /* We should -not- get this VM-exit if the guest is debugging. */
4152 AssertMsgReturn(!CPUMIsGuestDebugStateActive(pVCpu),
4153 ("hmR0SvmExitReadDRx: Unexpected exit. pVCpu=%p pCtx=%p\n", pVCpu, pCtx),
4154 VERR_SVM_UNEXPECTED_EXIT);
4155
4156 /*
4157 * Lazy DR0-3 loading?
4158 */
4159 if (!CPUMIsHyperDebugStateActive(pVCpu))
4160 {
4161 Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
4162 Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
4163
4164 /* Don't intercept DRx read and writes. */
4165 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4166 pVmcb->ctrl.u16InterceptRdDRx = 0;
4167 pVmcb->ctrl.u16InterceptWrDRx = 0;
4168 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
4169
4170 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
4171 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
4172 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4173
4174 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
4175 return VINF_SUCCESS;
4176 }
4177
4178 /*
4179 * Interpret the read/writing of DRx.
4180 */
4181 /** @todo Decode assist. */
4182 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
4183 Log5(("hmR0SvmExitReadDRx: Emulatined DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
4184 if (RT_LIKELY(rc == VINF_SUCCESS))
4185 {
4186 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
4187 /** @todo CPUM should set this flag! */
4188 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
4189 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4190 }
4191 else
4192 Assert(rc == VERR_EM_INTERPRETER);
4193 return VBOXSTRICTRC_TODO(rc);
4194}
4195
4196
4197/**
4198 * #VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional #VMEXIT.
4199 */
4200HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4201{
4202 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4203 /* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
4204 int rc = hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
4205 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
4206 STAM_COUNTER_DEC(&pVCpu->hm.s.StatExitDRxRead);
4207 return rc;
4208}
4209
4210
4211/**
4212 * #VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional #VMEXIT.
4213 */
4214HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4215{
4216 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4217
4218 /* I/O operation lookup arrays. */
4219 static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
4220 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
4221 the result (in AL/AX/EAX). */
4222 Log4(("hmR0SvmExitIOInstr: CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
4223
4224 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4225 PVM pVM = pVCpu->CTX_SUFF(pVM);
4226
4227 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
4228 SVMIOIOEXIT IoExitInfo;
4229 IoExitInfo.u = (uint32_t)pVmcb->ctrl.u64ExitInfo1;
4230 uint32_t uIOWidth = (IoExitInfo.u >> 4) & 0x7;
4231 uint32_t cbValue = s_aIOSize[uIOWidth];
4232 uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
4233
4234 if (RT_UNLIKELY(!cbValue))
4235 {
4236 AssertMsgFailed(("hmR0SvmExitIOInstr: Invalid IO operation. uIOWidth=%u\n", uIOWidth));
4237 return VERR_EM_INTERPRETER;
4238 }
4239
4240 VBOXSTRICTRC rcStrict;
4241 if (IoExitInfo.n.u1STR)
4242 {
4243 /* INS/OUTS - I/O String instruction. */
4244 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
4245
4246 /** @todo Huh? why can't we use the segment prefix information given by AMD-V
4247 * in EXITINFO1? Investigate once this thing is up and running. */
4248
4249 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
4250 if (rcStrict == VINF_SUCCESS)
4251 {
4252 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
4253 {
4254 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
4255 (DISCPUMODE)pDis->uAddrMode, cbValue);
4256 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
4257 }
4258 else
4259 {
4260 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
4261 (DISCPUMODE)pDis->uAddrMode, cbValue);
4262 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
4263 }
4264 }
4265 else
4266 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
4267 }
4268 else
4269 {
4270 /* IN/OUT - I/O instruction. */
4271 Assert(!IoExitInfo.n.u1REP);
4272
4273 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)
4274 {
4275 rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue);
4276 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
4277 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, cbValue);
4278
4279 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
4280 }
4281 else
4282 {
4283 uint32_t u32Val = 0;
4284
4285 rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue);
4286 if (IOM_SUCCESS(rcStrict))
4287 {
4288 /* Save result of I/O IN instr. in AL/AX/EAX. */
4289 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
4290 }
4291 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
4292 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, cbValue);
4293
4294 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
4295 }
4296 }
4297
4298 if (IOM_SUCCESS(rcStrict))
4299 {
4300 /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */
4301 pCtx->rip = pVmcb->ctrl.u64ExitInfo2;
4302
4303 /*
4304 * If any I/O breakpoints are armed, we need to check if one triggered
4305 * and take appropriate action.
4306 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
4307 */
4308 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
4309 * execution engines about whether hyper BPs and such are pending. */
4310 uint32_t const uDr7 = pCtx->dr[7];
4311 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
4312 && X86_DR7_ANY_RW_IO(uDr7)
4313 && (pCtx->cr4 & X86_CR4_DE))
4314 || DBGFBpIsHwIoArmed(pVM)))
4315 {
4316 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
4317 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/);
4318
4319 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, IoExitInfo.n.u16Port, cbValue);
4320 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
4321 {
4322 /* Raise #DB. */
4323 pVmcb->guest.u64DR6 = pCtx->dr[6];
4324 pVmcb->guest.u64DR7 = pCtx->dr[7];
4325 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
4326 hmR0SvmSetPendingXcptDB(pVCpu);
4327 }
4328 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
4329 else if ( rcStrict2 != VINF_SUCCESS
4330 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
4331 rcStrict = rcStrict2;
4332 }
4333
4334 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
4335 }
4336
4337#ifdef VBOX_STRICT
4338 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
4339 Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ);
4340 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
4341 Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE);
4342 else
4343 {
4344 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
4345 * statuses, that the VMM device and some others may return. See
4346 * IOM_SUCCESS() for guidance. */
4347 AssertMsg( RT_FAILURE(rcStrict)
4348 || rcStrict == VINF_SUCCESS
4349 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
4350 || rcStrict == VINF_EM_DBG_BREAKPOINT
4351 || rcStrict == VINF_EM_RAW_GUEST_TRAP
4352 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
4353 }
4354#endif
4355 return VBOXSTRICTRC_TODO(rcStrict);
4356}
4357
4358
4359/**
4360 * #VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional
4361 * #VMEXIT.
4362 */
4363HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4364{
4365 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4366 PVM pVM = pVCpu->CTX_SUFF(pVM);
4367 Assert(pVM->hm.s.fNestedPaging);
4368
4369 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
4370
4371 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
4372 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4373 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1;
4374 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
4375
4376 Log4(("#NPF at CS:RIP=%04x:%#RX64 faultaddr=%RGp errcode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr, u32ErrCode));
4377
4378#ifdef VBOX_HM_WITH_GUEST_PATCHING
4379 /* TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions. */
4380 if ( pVM->hm.s.fTRPPatchingAllowed
4381 && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == 0x80 /* TPR offset. */
4382 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */
4383 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */
4384 && !CPUMGetGuestCPL(pVCpu)
4385 && !CPUMIsGuestInLongModeEx(pCtx)
4386 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
4387 {
4388 RTGCPHYS GCPhysApicBase = pCtx->msrApicBase;
4389 GCPhysApicBase &= PAGE_BASE_GC_MASK;
4390
4391 if (GCPhysFaultAddr == GCPhysApicBase + 0x80)
4392 {
4393 /* Only attempt to patch the instruction once. */
4394 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
4395 if (!pPatch)
4396 return VINF_EM_HM_PATCH_TPR_INSTR;
4397 }
4398 }
4399#endif
4400
4401 /*
4402 * Determine the nested paging mode.
4403 */
4404 PGMMODE enmNestedPagingMode;
4405#if HC_ARCH_BITS == 32
4406 if (CPUMIsGuestInLongModeEx(pCtx))
4407 enmNestedPagingMode = PGMMODE_AMD64_NX;
4408 else
4409#endif
4410 enmNestedPagingMode = PGMGetHostMode(pVM);
4411
4412 /*
4413 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
4414 */
4415 int rc;
4416 Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
4417 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
4418 {
4419 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr,
4420 u32ErrCode);
4421 rc = VBOXSTRICTRC_VAL(rc2);
4422
4423 /*
4424 * If we succeed, resume guest execution.
4425 * If we fail in interpreting the instruction because we couldn't get the guest physical address
4426 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
4427 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
4428 * weird case. See @bugref{6043}.
4429 */
4430 if ( rc == VINF_SUCCESS
4431 || rc == VERR_PAGE_TABLE_NOT_PRESENT
4432 || rc == VERR_PAGE_NOT_PRESENT)
4433 {
4434 /* Successfully handled MMIO operation. */
4435 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
4436 rc = VINF_SUCCESS;
4437 }
4438 return rc;
4439 }
4440
4441 TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode);
4442 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr);
4443 TRPMResetTrap(pVCpu);
4444
4445 Log4(("#NPF: PGMR0Trap0eHandlerNestedPaging returned %Rrc CS:RIP=%04x:%#RX64\n", rc, pCtx->cs.Sel, pCtx->rip));
4446
4447 /*
4448 * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
4449 */
4450 if ( rc == VINF_SUCCESS
4451 || rc == VERR_PAGE_TABLE_NOT_PRESENT
4452 || rc == VERR_PAGE_NOT_PRESENT)
4453 {
4454 /* We've successfully synced our shadow page tables. */
4455 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
4456 rc = VINF_SUCCESS;
4457 }
4458
4459 return rc;
4460}
4461
4462
4463/**
4464 * #VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional #VMEXIT.
4465 */
4466HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4467{
4468 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4469
4470 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4471 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 0; /* No virtual interrupts pending, we'll inject the current one before reentry. */
4472 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0;
4473
4474 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
4475 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_VINTR;
4476 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
4477
4478 /* Deliver the pending interrupt via hmR0SvmPreRunGuest()->hmR0SvmInjectEventVmcb() and resume guest execution. */
4479 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
4480 return VINF_SUCCESS;
4481}
4482
4483
4484/**
4485 * #VMEXIT handler for task switches (SVM_EXIT_TASK_SWITCH). Conditional #VMEXIT.
4486 */
4487HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4488{
4489 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4490
4491#ifndef HMSVM_ALWAYS_TRAP_TASK_SWITCH
4492 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
4493#endif
4494
4495 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
4496 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4497 if ( !(pVmcb->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP))
4498 && pVCpu->hm.s.Event.fPending)
4499 {
4500 /*
4501 * AMD-V does not provide us with the original exception but we have it in u64IntrInfo since we
4502 * injected the event during VM-entry. Software interrupts and exceptions will be regenerated
4503 * when the recompiler restarts the instruction.
4504 */
4505 SVMEVENT Event;
4506 Event.u = pVCpu->hm.s.Event.u64IntrInfo;
4507 if ( Event.n.u3Type == SVM_EVENT_EXCEPTION
4508 || Event.n.u3Type == SVM_EVENT_SOFTWARE_INT)
4509 {
4510 pVCpu->hm.s.Event.fPending = false;
4511 }
4512 else
4513 Log4(("hmR0SvmExitTaskSwitch: TS occurred during event delivery. Kept pending u8Vector=%#x\n", Event.n.u8Vector));
4514 }
4515
4516 /** @todo Emulate task switch someday, currently just going back to ring-3 for
4517 * emulation. */
4518 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
4519 return VERR_EM_INTERPRETER;
4520}
4521
4522
4523/**
4524 * #VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional #VMEXIT.
4525 */
4526HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4527{
4528 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4529
4530 int rc = hmR0SvmEmulateMovTpr(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
4531 if (RT_LIKELY(rc == VINF_SUCCESS))
4532 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
4533 else
4534 hmR0SvmSetPendingXcptUD(pVCpu);
4535 return VINF_SUCCESS;
4536}
4537
4538
4539/**
4540 * #VMEXIT handler for page-fault exceptions (SVM_EXIT_EXCEPTION_E). Conditional
4541 * #VMEXIT.
4542 */
4543HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4544{
4545 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4546
4547 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
4548
4549 /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
4550 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4551 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1;
4552 RTGCUINTPTR uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
4553 PVM pVM = pVCpu->CTX_SUFF(pVM);
4554
4555#if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(HMSVM_ALWAYS_TRAP_PF)
4556 if (pVM->hm.s.fNestedPaging)
4557 {
4558 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
4559 if (!pSvmTransient->fVectoringPF)
4560 {
4561 /* A genuine guest #PF, reflect it to the guest. */
4562 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
4563 Log4(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RGv ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip,
4564 uFaultAddress, u32ErrCode));
4565 }
4566 else
4567 {
4568 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
4569 hmR0SvmSetPendingXcptDF(pVCpu);
4570 Log4(("Pending #DF due to vectoring #PF. NP\n"));
4571 }
4572 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
4573 return VINF_SUCCESS;
4574 }
4575#endif
4576
4577 Assert(!pVM->hm.s.fNestedPaging);
4578
4579#ifdef VBOX_HM_WITH_GUEST_PATCHING
4580 /* Shortcut for APIC TPR reads and writes; only applicable to 32-bit guests. */
4581 if ( pVM->hm.s.fTRPPatchingAllowed
4582 && (uFaultAddress & 0xfff) == 0x80 /* TPR offset. */
4583 && !(u32ErrCode & X86_TRAP_PF_P) /* Not present. */
4584 && !CPUMGetGuestCPL(pVCpu)
4585 && !CPUMIsGuestInLongModeEx(pCtx)
4586 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
4587 {
4588 RTGCPHYS GCPhysApicBase;
4589 GCPhysApicBase = pCtx->msrApicBase;
4590 GCPhysApicBase &= PAGE_BASE_GC_MASK;
4591
4592 /* Check if the page at the fault-address is the APIC base. */
4593 RTGCPHYS GCPhysPage;
4594 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);
4595 if ( rc2 == VINF_SUCCESS
4596 && GCPhysPage == GCPhysApicBase)
4597 {
4598 /* Only attempt to patch the instruction once. */
4599 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
4600 if (!pPatch)
4601 return VINF_EM_HM_PATCH_TPR_INSTR;
4602 }
4603 }
4604#endif
4605
4606 Log4(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 u32ErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
4607 pCtx->rip, u32ErrCode, pCtx->cr3));
4608
4609 TRPMAssertXcptPF(pVCpu, uFaultAddress, u32ErrCode);
4610 int rc = PGMTrap0eHandler(pVCpu, u32ErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
4611
4612 Log4(("#PF rc=%Rrc\n", rc));
4613
4614 if (rc == VINF_SUCCESS)
4615 {
4616 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
4617 TRPMResetTrap(pVCpu);
4618 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
4619 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
4620 return rc;
4621 }
4622 else if (rc == VINF_EM_RAW_GUEST_TRAP)
4623 {
4624 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
4625
4626 if (!pSvmTransient->fVectoringPF)
4627 {
4628 /* It's a guest page fault and needs to be reflected to the guest. */
4629 u32ErrCode = TRPMGetErrorCode(pVCpu); /* The error code might have been changed. */
4630 TRPMResetTrap(pVCpu);
4631 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
4632 }
4633 else
4634 {
4635 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
4636 TRPMResetTrap(pVCpu);
4637 hmR0SvmSetPendingXcptDF(pVCpu);
4638 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
4639 }
4640
4641 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
4642 return VINF_SUCCESS;
4643 }
4644
4645 TRPMResetTrap(pVCpu);
4646 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
4647 return rc;
4648}
4649
4650
4651/**
4652 * #VMEXIT handler for device-not-available exceptions (SVM_EXIT_EXCEPTION_7).
4653 * Conditional #VMEXIT.
4654 */
4655HMSVM_EXIT_DECL hmR0SvmExitXcptNM(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4656{
4657 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4658
4659 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
4660
4661#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
4662 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
4663#endif
4664
4665 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
4666 int rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
4667 if (rc == VINF_SUCCESS)
4668 {
4669 Assert(CPUMIsGuestFPUStateActive(pVCpu));
4670 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
4671 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
4672 return VINF_SUCCESS;
4673 }
4674
4675 /* Forward #NM to the guest. */
4676 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
4677 hmR0SvmSetPendingXcptNM(pVCpu);
4678 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
4679 return VINF_SUCCESS;
4680}
4681
4682
4683/**
4684 * #VMEXIT handler for math-fault exceptions (SVM_EXIT_EXCEPTION_10).
4685 * Conditional #VMEXIT.
4686 */
4687HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4688{
4689 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4690
4691 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
4692
4693 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
4694
4695 if (!(pCtx->cr0 & X86_CR0_NE))
4696 {
4697 /* Old-style FPU error reporting needs some extra work. */
4698 /** @todo don't fall back to the recompiler, but do it manually. */
4699 return VERR_EM_INTERPRETER;
4700 }
4701
4702 hmR0SvmSetPendingXcptMF(pVCpu);
4703 return VINF_SUCCESS;
4704}
4705
4706
4707/**
4708 * #VMEXIT handler for debug exceptions (SVM_EXIT_EXCEPTION_1). Conditional
4709 * #VMEXIT.
4710 */
4711HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
4712{
4713 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
4714
4715 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
4716
4717 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
4718
4719 /* If we set the trap flag above, we have to clear it. */
4720 if (pVCpu->hm.s.fClearTrapFlag)
4721 {
4722 pVCpu->hm.s.fClearTrapFlag = false;
4723 pCtx->eflags.Bits.u1TF = 0;
4724 }
4725
4726 /* This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data breakpoint). However, for both cases
4727 DR6 and DR7 are updated to what the exception handler expects. See AMD spec. 15.12.2 "#DB (Debug)". */
4728 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
4729 PVM pVM = pVCpu->CTX_SUFF(pVM);
4730 int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
4731 if (rc == VINF_EM_RAW_GUEST_TRAP)
4732 {
4733 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
4734 if (CPUMIsHyperDebugStateActive(pVCpu))
4735 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
4736
4737 /* Reflect the exception back to the guest. */
4738 hmR0SvmSetPendingXcptDB(pVCpu);
4739 rc = VINF_SUCCESS;
4740 }
4741
4742 /*
4743 * Update DR6.
4744 */
4745 if (CPUMIsHyperDebugStateActive(pVCpu))
4746 {
4747 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
4748 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
4749 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
4750 }
4751 else
4752 {
4753 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
4754 Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu));
4755 }
4756
4757 return rc;
4758}
4759
4760/** @} */
4761
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette