VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp@ 10330

Last change on this file since 10330 was 10330, checked in by vboxsync, 16 years ago

Wrong assertion. Due to ring 3 far jumps the assertion condition can actually be false.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 82.1 KB
Line 
1/* $Id: HWSVMR0.cpp 10330 2008-07-07 14:21:22Z vboxsync $ */
2/** @file
3 * HWACCM SVM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_svm.h>
32#include <VBox/pgm.h>
33#include <VBox/pdm.h>
34#include <VBox/err.h>
35#include <VBox/log.h>
36#include <VBox/selm.h>
37#include <VBox/iom.h>
38#include <VBox/dis.h>
39#include <VBox/dbgf.h>
40#include <VBox/disopcode.h>
41#include <iprt/param.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/cpuset.h>
45#include <iprt/mp.h>
46#include "HWSVMR0.h"
47
48static int SVMR0InterpretInvpg(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uASID);
49
50/**
51 * Sets up and activates AMD-V on the current CPU
52 *
53 * @returns VBox status code.
54 * @param pCpu CPU info struct
55 * @param pVM The VM to operate on.
56 * @param pvPageCpu Pointer to the global cpu page
57 * @param pPageCpuPhys Physical address of the global cpu page
58 */
59HWACCMR0DECL(int) SVMR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
60{
61 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
62 AssertReturn(pVM, VERR_INVALID_PARAMETER);
63 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
64
65 /* We must turn on AMD-V and setup the host state physical address, as those MSRs are per-cpu/core. */
66
67#ifdef LOG_ENABLED
68 SUPR0Printf("SVMR0EnableCpu cpu %d page (%x) %x\n", pCpu->idCpu, pvPageCpu, (uint32_t)pPageCpuPhys);
69#endif
70
71 /* Turn on AMD-V in the EFER MSR. */
72 uint64_t val = ASMRdMsr(MSR_K6_EFER);
73 if (!(val & MSR_K6_EFER_SVME))
74 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
75
76 /* Write the physical page address where the CPU will store the host state while executing the VM. */
77 ASMWrMsr(MSR_K8_VM_HSAVE_PA, pPageCpuPhys);
78
79 pCpu->uCurrentASID = 0; /* we'll aways increment this the first time (host uses ASID 0) */
80 pCpu->cTLBFlushes = 0;
81 return VINF_SUCCESS;
82}
83
84/**
85 * Deactivates AMD-V on the current CPU
86 *
87 * @returns VBox status code.
88 * @param pCpu CPU info struct
89 * @param pvPageCpu Pointer to the global cpu page
90 * @param pPageCpuPhys Physical address of the global cpu page
91 */
92HWACCMR0DECL(int) SVMR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
93{
94 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
95 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
96
97#ifdef LOG_ENABLED
98 SUPR0Printf("SVMR0DisableCpu cpu %d\n", pCpu->idCpu);
99#endif
100
101 /* Turn off AMD-V in the EFER MSR. */
102 uint64_t val = ASMRdMsr(MSR_K6_EFER);
103 ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
104
105 /* Invalidate host state physical address. */
106 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
107 pCpu->uCurrentASID = 0;
108
109 return VINF_SUCCESS;
110}
111
112/**
113 * Does Ring-0 per VM AMD-V init.
114 *
115 * @returns VBox status code.
116 * @param pVM The VM to operate on.
117 */
118HWACCMR0DECL(int) SVMR0InitVM(PVM pVM)
119{
120 int rc;
121
122 /* Allocate one page for the VM control block (VMCB). */
123 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
124 if (RT_FAILURE(rc))
125 return rc;
126
127 pVM->hwaccm.s.svm.pVMCB = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjVMCB);
128 pVM->hwaccm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjVMCB, 0);
129 ASMMemZero32(pVM->hwaccm.s.svm.pVMCB, PAGE_SIZE);
130
131 /* Allocate one page for the host context */
132 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
133 if (RT_FAILURE(rc))
134 return rc;
135
136 pVM->hwaccm.s.svm.pVMCBHost = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjVMCBHost);
137 pVM->hwaccm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjVMCBHost, 0);
138 ASMMemZero32(pVM->hwaccm.s.svm.pVMCBHost, PAGE_SIZE);
139
140 /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
141 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, true /* executable R0 mapping */);
142 if (RT_FAILURE(rc))
143 return rc;
144
145 pVM->hwaccm.s.svm.pIOBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjIOBitmap);
146 pVM->hwaccm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjIOBitmap, 0);
147 /* Set all bits to intercept all IO accesses. */
148 ASMMemFill32(pVM->hwaccm.s.svm.pIOBitmap, PAGE_SIZE*3, 0xffffffff);
149
150 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
151 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, true /* executable R0 mapping */);
152 if (RT_FAILURE(rc))
153 return rc;
154
155 pVM->hwaccm.s.svm.pMSRBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjMSRBitmap);
156 pVM->hwaccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjMSRBitmap, 0);
157 /* Set all bits to intercept all MSR accesses. */
158 ASMMemFill32(pVM->hwaccm.s.svm.pMSRBitmap, PAGE_SIZE*2, 0xffffffff);
159
160 /* Erratum 170 which requires a forced TLB flush for each world switch:
161 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
162 *
163 * All BH-G1/2 and DH-G1/2 models include a fix:
164 * Athlon X2: 0x6b 1/2
165 * 0x68 1/2
166 * Athlon 64: 0x7f 1
167 * 0x6f 2
168 * Sempron: 0x7f 1/2
169 * 0x6f 2
170 * 0x6c 2
171 * 0x7c 2
172 * Turion 64: 0x68 2
173 *
174 */
175 uint32_t u32Dummy;
176 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
177 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
178 u32BaseFamily= (u32Version >> 8) & 0xf;
179 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
180 u32Model = ((u32Version >> 4) & 0xf);
181 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
182 u32Stepping = u32Version & 0xf;
183 if ( u32Family == 0xf
184 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
185 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
186 {
187 Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
188 pVM->hwaccm.s.svm.fAlwaysFlushTLB = true;
189 }
190
191 /* Invalidate the last cpu we were running on. */
192 pVM->hwaccm.s.svm.idLastCpu = NIL_RTCPUID;
193 return VINF_SUCCESS;
194}
195
196/**
197 * Does Ring-0 per VM AMD-V termination.
198 *
199 * @returns VBox status code.
200 * @param pVM The VM to operate on.
201 */
202HWACCMR0DECL(int) SVMR0TermVM(PVM pVM)
203{
204 if (pVM->hwaccm.s.svm.pMemObjVMCB)
205 {
206 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjVMCB, false);
207 pVM->hwaccm.s.svm.pVMCB = 0;
208 pVM->hwaccm.s.svm.pVMCBPhys = 0;
209 pVM->hwaccm.s.svm.pMemObjVMCB = 0;
210 }
211 if (pVM->hwaccm.s.svm.pMemObjVMCBHost)
212 {
213 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjVMCBHost, false);
214 pVM->hwaccm.s.svm.pVMCBHost = 0;
215 pVM->hwaccm.s.svm.pVMCBHostPhys = 0;
216 pVM->hwaccm.s.svm.pMemObjVMCBHost = 0;
217 }
218 if (pVM->hwaccm.s.svm.pMemObjIOBitmap)
219 {
220 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjIOBitmap, false);
221 pVM->hwaccm.s.svm.pIOBitmap = 0;
222 pVM->hwaccm.s.svm.pIOBitmapPhys = 0;
223 pVM->hwaccm.s.svm.pMemObjIOBitmap = 0;
224 }
225 if (pVM->hwaccm.s.svm.pMemObjMSRBitmap)
226 {
227 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjMSRBitmap, false);
228 pVM->hwaccm.s.svm.pMSRBitmap = 0;
229 pVM->hwaccm.s.svm.pMSRBitmapPhys = 0;
230 pVM->hwaccm.s.svm.pMemObjMSRBitmap = 0;
231 }
232 return VINF_SUCCESS;
233}
234
235/**
236 * Sets up AMD-V for the specified VM
237 *
238 * @returns VBox status code.
239 * @param pVM The VM to operate on.
240 */
241HWACCMR0DECL(int) SVMR0SetupVM(PVM pVM)
242{
243 int rc = VINF_SUCCESS;
244 SVM_VMCB *pVMCB;
245
246 AssertReturn(pVM, VERR_INVALID_PARAMETER);
247
248 Assert(pVM->hwaccm.s.svm.fSupported);
249
250 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
251 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
252
253 /* Program the control fields. Most of them never have to be changed again. */
254 /* CR0/3/4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
255 /** @note CR0 & CR4 can be safely read when guest and shadow copies are identical. */
256 if (!pVM->hwaccm.s.fNestedPaging)
257 pVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(3) | RT_BIT(4);
258 else
259 pVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
260
261 /*
262 * CR0/3/4 writes must be intercepted for obvious reasons.
263 */
264 if (!pVM->hwaccm.s.fNestedPaging)
265 pVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(3) | RT_BIT(4);
266 else
267 pVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
268
269 /* Intercept all DRx reads and writes. */
270 pVMCB->ctrl.u16InterceptRdDRx = RT_BIT(0) | RT_BIT(1) | RT_BIT(2) | RT_BIT(3) | RT_BIT(4) | RT_BIT(5) | RT_BIT(6) | RT_BIT(7);
271 pVMCB->ctrl.u16InterceptWrDRx = RT_BIT(0) | RT_BIT(1) | RT_BIT(2) | RT_BIT(3) | RT_BIT(4) | RT_BIT(5) | RT_BIT(6) | RT_BIT(7);
272
273 /* Currently we don't care about DRx reads or writes. DRx registers are trashed.
274 * All breakpoints are automatically cleared when the VM exits.
275 */
276
277 pVMCB->ctrl.u32InterceptException = HWACCM_SVM_TRAP_MASK;
278#ifndef DEBUG
279 if (pVM->hwaccm.s.fNestedPaging)
280 pVMCB->ctrl.u32InterceptException &= ~RT_BIT(14); /* no longer need to intercept #PF. */
281#endif
282
283 pVMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR
284 | SVM_CTRL1_INTERCEPT_VINTR
285 | SVM_CTRL1_INTERCEPT_NMI
286 | SVM_CTRL1_INTERCEPT_SMI
287 | SVM_CTRL1_INTERCEPT_INIT
288 | SVM_CTRL1_INTERCEPT_RDPMC
289 | SVM_CTRL1_INTERCEPT_CPUID
290 | SVM_CTRL1_INTERCEPT_RSM
291 | SVM_CTRL1_INTERCEPT_HLT
292 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP
293 | SVM_CTRL1_INTERCEPT_MSR_SHADOW
294 | SVM_CTRL1_INTERCEPT_INVLPG
295 | SVM_CTRL1_INTERCEPT_INVLPGA /* AMD only */
296 | SVM_CTRL1_INTERCEPT_TASK_SWITCH
297 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* fatal */
298 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Legacy FPU FERR handling. */
299 ;
300 /* With nested paging we don't care about invlpg anymore. */
301 if (pVM->hwaccm.s.fNestedPaging)
302 pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_INVLPG;
303
304 pVMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */
305 | SVM_CTRL2_INTERCEPT_VMMCALL
306 | SVM_CTRL2_INTERCEPT_VMLOAD
307 | SVM_CTRL2_INTERCEPT_VMSAVE
308 | SVM_CTRL2_INTERCEPT_STGI
309 | SVM_CTRL2_INTERCEPT_CLGI
310 | SVM_CTRL2_INTERCEPT_SKINIT
311 | SVM_CTRL2_INTERCEPT_RDTSCP /* AMD only; we don't support this one */
312 | SVM_CTRL2_INTERCEPT_WBINVD
313 | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
314 ;
315 Log(("pVMCB->ctrl.u32InterceptException = %x\n", pVMCB->ctrl.u32InterceptException));
316 Log(("pVMCB->ctrl.u32InterceptCtrl1 = %x\n", pVMCB->ctrl.u32InterceptCtrl1));
317 Log(("pVMCB->ctrl.u32InterceptCtrl2 = %x\n", pVMCB->ctrl.u32InterceptCtrl2));
318
319 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
320 pVMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1;
321
322 /* Set IO and MSR bitmap addresses. */
323 pVMCB->ctrl.u64IOPMPhysAddr = pVM->hwaccm.s.svm.pIOBitmapPhys;
324 pVMCB->ctrl.u64MSRPMPhysAddr = pVM->hwaccm.s.svm.pMSRBitmapPhys;
325
326 /* No LBR virtualization. */
327 pVMCB->ctrl.u64LBRVirt = 0;
328
329 /** The ASID must start at 1; the host uses 0. */
330 pVMCB->ctrl.TLBCtrl.n.u32ASID = 1;
331
332 /** Setup the PAT msr (nested paging only) */
333 pVMCB->guest.u64GPAT = 0x0007040600070406ULL;
334 return rc;
335}
336
337
338/**
339 * Injects an event (trap or external interrupt)
340 *
341 * @param pVM The VM to operate on.
342 * @param pVMCB SVM control block
343 * @param pCtx CPU Context
344 * @param pIntInfo SVM interrupt info
345 */
346inline void SVMR0InjectEvent(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx, SVM_EVENT* pEvent)
347{
348#ifdef VBOX_STRICT
349 if (pEvent->n.u8Vector == 0xE)
350 Log(("SVM: Inject int %d at %VGv error code=%02x CR2=%VGv intInfo=%08x\n", pEvent->n.u8Vector, pCtx->rip, pEvent->n.u32ErrorCode, pCtx->cr2, pEvent->au64[0]));
351 else
352 if (pEvent->n.u8Vector < 0x20)
353 Log(("SVM: Inject int %d at %VGv error code=%08x\n", pEvent->n.u8Vector, pCtx->rip, pEvent->n.u32ErrorCode));
354 else
355 {
356 Log(("INJ-EI: %x at %VGv\n", pEvent->n.u8Vector, pCtx->rip));
357 Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
358 Assert(pCtx->eflags.u32 & X86_EFL_IF);
359 }
360#endif
361
362 /* Set event injection state. */
363 pVMCB->ctrl.EventInject.au64[0] = pEvent->au64[0];
364}
365
366
367/**
368 * Checks for pending guest interrupts and injects them
369 *
370 * @returns VBox status code.
371 * @param pVM The VM to operate on.
372 * @param pVMCB SVM control block
373 * @param pCtx CPU Context
374 */
375static int SVMR0CheckPendingInterrupt(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx)
376{
377 int rc;
378
379 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
380 if (pVM->hwaccm.s.Event.fPending)
381 {
382 SVM_EVENT Event;
383
384 Log(("Reinjecting event %08x %08x at %VGv\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, pCtx->rip));
385 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject);
386 Event.au64[0] = pVM->hwaccm.s.Event.intInfo;
387 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
388
389 pVM->hwaccm.s.Event.fPending = false;
390 return VINF_SUCCESS;
391 }
392
393 /* When external interrupts are pending, we should exit the VM when IF is set. */
394 if ( !TRPMHasTrap(pVM)
395 && VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
396 {
397 if (!(pCtx->eflags.u32 & X86_EFL_IF))
398 {
399 if (!pVMCB->ctrl.IntCtrl.n.u1VIrqValid)
400 {
401 LogFlow(("Enable irq window exit!\n"));
402 /** @todo use virtual interrupt method to inject a pending irq; dispatched as soon as guest.IF is set. */
403 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
404 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 1;
405 pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 1; /* ignore the priority in the TPR; just deliver it */
406 pVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */
407 }
408 }
409 else
410 if (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
411 {
412 uint8_t u8Interrupt;
413
414 rc = PDMGetInterrupt(pVM, &u8Interrupt);
415 Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Vrc\n", u8Interrupt, u8Interrupt, rc));
416 if (VBOX_SUCCESS(rc))
417 {
418 rc = TRPMAssertTrap(pVM, u8Interrupt, TRPM_HARDWARE_INT);
419 AssertRC(rc);
420 }
421 else
422 {
423 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
424 Assert(!VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)));
425 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchGuestIrq);
426 /* Just continue */
427 }
428 }
429 else
430 Log(("Pending interrupt blocked at %VGv by VM_FF_INHIBIT_INTERRUPTS!!\n", pCtx->rip));
431 }
432
433#ifdef VBOX_STRICT
434 if (TRPMHasTrap(pVM))
435 {
436 uint8_t u8Vector;
437 rc = TRPMQueryTrapAll(pVM, &u8Vector, 0, 0, 0);
438 AssertRC(rc);
439 }
440#endif
441
442 if ( pCtx->eflags.u32 & X86_EFL_IF
443 && (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
444 && TRPMHasTrap(pVM)
445 )
446 {
447 uint8_t u8Vector;
448 int rc;
449 TRPMEVENT enmType;
450 SVM_EVENT Event;
451 RTGCUINT u32ErrorCode;
452
453 Event.au64[0] = 0;
454
455 /* If a new event is pending, then dispatch it now. */
456 rc = TRPMQueryTrapAll(pVM, &u8Vector, &enmType, &u32ErrorCode, 0);
457 AssertRC(rc);
458 Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
459 Assert(enmType != TRPM_SOFTWARE_INT);
460
461 /* Clear the pending trap. */
462 rc = TRPMResetTrap(pVM);
463 AssertRC(rc);
464
465 Event.n.u8Vector = u8Vector;
466 Event.n.u1Valid = 1;
467 Event.n.u32ErrorCode = u32ErrorCode;
468
469 if (enmType == TRPM_TRAP)
470 {
471 switch (u8Vector) {
472 case 8:
473 case 10:
474 case 11:
475 case 12:
476 case 13:
477 case 14:
478 case 17:
479 /* Valid error codes. */
480 Event.n.u1ErrorCodeValid = 1;
481 break;
482 default:
483 break;
484 }
485 if (u8Vector == X86_XCPT_NMI)
486 Event.n.u3Type = SVM_EVENT_NMI;
487 else
488 Event.n.u3Type = SVM_EVENT_EXCEPTION;
489 }
490 else
491 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
492
493 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntInject);
494 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
495 } /* if (interrupts can be dispatched) */
496
497 return VINF_SUCCESS;
498}
499
500/**
501 * Save the host state
502 *
503 * @returns VBox status code.
504 * @param pVM The VM to operate on.
505 */
506HWACCMR0DECL(int) SVMR0SaveHostState(PVM pVM)
507{
508 /* Nothing to do here. */
509 return VINF_SUCCESS;
510}
511
512/**
513 * Loads the guest state
514 *
515 * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!!
516 *
517 * @returns VBox status code.
518 * @param pVM The VM to operate on.
519 * @param pCtx Guest context
520 */
521HWACCMR0DECL(int) SVMR0LoadGuestState(PVM pVM, CPUMCTX *pCtx)
522{
523 RTGCUINTPTR val;
524 SVM_VMCB *pVMCB;
525
526 if (pVM == NULL)
527 return VERR_INVALID_PARAMETER;
528
529 /* Setup AMD SVM. */
530 Assert(pVM->hwaccm.s.svm.fSupported);
531
532 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
533 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
534
535 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
536 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
537 {
538 SVM_WRITE_SELREG(CS, cs);
539 SVM_WRITE_SELREG(SS, ss);
540 SVM_WRITE_SELREG(DS, ds);
541 SVM_WRITE_SELREG(ES, es);
542 SVM_WRITE_SELREG(FS, fs);
543 SVM_WRITE_SELREG(GS, gs);
544 }
545
546 /* Guest CPU context: LDTR. */
547 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
548 {
549 SVM_WRITE_SELREG(LDTR, ldtr);
550 }
551
552 /* Guest CPU context: TR. */
553 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
554 {
555 SVM_WRITE_SELREG(TR, tr);
556 }
557
558 /* Guest CPU context: GDTR. */
559 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
560 {
561 pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
562 pVMCB->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
563 }
564
565 /* Guest CPU context: IDTR. */
566 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
567 {
568 pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
569 pVMCB->guest.IDTR.u64Base = pCtx->idtr.pIdt;
570 }
571
572 /*
573 * Sysenter MSRs (unconditional)
574 */
575 pVMCB->guest.u64SysEnterCS = pCtx->SysEnter.cs;
576 pVMCB->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
577 pVMCB->guest.u64SysEnterESP = pCtx->SysEnter.esp;
578
579 /* Control registers */
580 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
581 {
582 val = pCtx->cr0;
583 if (!CPUMIsGuestFPUStateActive(pVM))
584 {
585 Assert(!pVM->hwaccm.s.svm.fResumeVM);
586 /* Always use #NM exceptions to load the FPU/XMM state on demand. */
587 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
588 }
589 else
590 {
591 /** @todo check if we support the old style mess correctly. */
592 if (!(val & X86_CR0_NE))
593 {
594 Log(("Forcing X86_CR0_NE!!!\n"));
595
596 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
597 if (!pVM->hwaccm.s.fFPUOldStyleOverride)
598 {
599 pVMCB->ctrl.u32InterceptException |= RT_BIT(16);
600 pVM->hwaccm.s.fFPUOldStyleOverride = true;
601 }
602 }
603 val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
604 }
605 /* Always enable caching. */
606 val &= ~(X86_CR0_CD|X86_CR0_NW);
607
608 /* Note: WP is not relevant in nested paging mode as we catch accesses on the (guest) physical level. */
609 /* Note: In nested paging mode the guest is allowed to run with paging disabled; the guest physical to host physical translation will remain active. */
610 if (!pVM->hwaccm.s.fNestedPaging)
611 {
612 val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
613 val |= X86_CR0_WP; /* Must set this as we rely on protect various pages and supervisor writes must be caught. */
614 }
615 pVMCB->guest.u64CR0 = val;
616 }
617 /* CR2 as well */
618 pVMCB->guest.u64CR2 = pCtx->cr2;
619
620 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
621 {
622 /* Save our shadow CR3 register. */
623 if (pVM->hwaccm.s.fNestedPaging)
624 {
625 pVMCB->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
626 Assert(pVMCB->ctrl.u64NestedPagingCR3);
627 pVMCB->guest.u64CR3 = pCtx->cr3;
628 }
629 else
630 pVMCB->guest.u64CR3 = PGMGetHyperCR3(pVM);
631 }
632
633 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
634 {
635 val = pCtx->cr4;
636 if (!pVM->hwaccm.s.fNestedPaging)
637 {
638 switch(pVM->hwaccm.s.enmShadowMode)
639 {
640 case PGMMODE_REAL:
641 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
642 AssertFailed();
643 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
644
645 case PGMMODE_32_BIT: /* 32-bit paging. */
646 break;
647
648 case PGMMODE_PAE: /* PAE paging. */
649 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
650 /** @todo use normal 32 bits paging */
651 val |= X86_CR4_PAE;
652 break;
653
654 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
655 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
656#ifdef VBOX_ENABLE_64_BITS_GUESTS
657 break;
658#else
659 AssertFailed();
660 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
661#endif
662
663 default: /* shut up gcc */
664 AssertFailed();
665 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
666 }
667 }
668 pVMCB->guest.u64CR4 = val;
669 }
670
671 /* Debug registers. */
672 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
673 {
674 /** @todo DR0-6 */
675 val = pCtx->dr7;
676 val &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
677 val |= 0x400; /* must be one */
678#ifdef VBOX_STRICT
679 val = 0x400;
680#endif
681 pVMCB->guest.u64DR7 = val;
682
683 pVMCB->guest.u64DR6 = pCtx->dr6;
684 }
685
686 /* EIP, ESP and EFLAGS */
687 pVMCB->guest.u64RIP = pCtx->rip;
688 pVMCB->guest.u64RSP = pCtx->rsp;
689 pVMCB->guest.u64RFlags = pCtx->eflags.u32;
690
691 /* Set CPL */
692 pVMCB->guest.u8CPL = pCtx->csHid.Attr.n.u2Dpl;
693
694 /* RAX/EAX too, as VMRUN uses RAX as an implicit parameter. */
695 pVMCB->guest.u64RAX = pCtx->rax;
696
697 /* vmrun will fail without MSR_K6_EFER_SVME. */
698 pVMCB->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
699
700 /* 64 bits guest mode? */
701 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
702 {
703#if !defined(VBOX_WITH_64_BITS_GUESTS) || HC_ARCH_BITS != 64
704 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
705#else
706 pVM->hwaccm.s.svm.pfnVMRun = SVMVMRun64;
707#endif
708 /* Unconditionally update these as wrmsr might have changed them. (HWACCM_CHANGED_GUEST_SEGMENT_REGS will not be set) */
709 pVMCB->guest.FS.u64Base = pCtx->fsHid.u64Base;
710 pVMCB->guest.GS.u64Base = pCtx->gsHid.u64Base;
711 }
712 else
713 {
714 /* Filter out the MSR_K6_LME bit or else AMD-V expects amd64 shadow paging. */
715 pVMCB->guest.u64EFER &= ~MSR_K6_EFER_LME;
716
717 pVM->hwaccm.s.svm.pfnVMRun = SVMVMRun;
718 }
719
720 /** TSC offset. */
721 if (TMCpuTickCanUseRealTSC(pVM, &pVMCB->ctrl.u64TSCOffset))
722 {
723 pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
724 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTSCOffset);
725 }
726 else
727 {
728 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
729 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTSCIntercept);
730 }
731
732 /* Sync the various msrs for 64 bits mode. */
733 pVMCB->guest.u64STAR = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */
734 pVMCB->guest.u64LSTAR = pCtx->msrLSTAR; /* 64 bits mode syscall rip */
735 pVMCB->guest.u64CSTAR = pCtx->msrCSTAR; /* compatibility mode syscall rip */
736 pVMCB->guest.u64SFMASK = pCtx->msrSFMASK; /* syscall flag mask */
737 pVMCB->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; /* swapgs exchange value */
738
739#ifdef DEBUG
740 /* Intercept X86_XCPT_DB if stepping is enabled */
741 if (DBGFIsStepping(pVM))
742 pVMCB->ctrl.u32InterceptException |= RT_BIT(1);
743 else
744 pVMCB->ctrl.u32InterceptException &= ~RT_BIT(1);
745#endif
746
747 /* Done. */
748 pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
749
750 return VINF_SUCCESS;
751}
752
753
754/**
755 * Runs guest code in an SVM VM.
756 *
757 * @todo This can be much more efficient, when we only sync that which has actually changed. (this is the first attempt only)
758 *
759 * @returns VBox status code.
760 * @param pVM The VM to operate on.
761 * @param pCtx Guest context
762 * @param pCpu CPU info struct
763 */
764HWACCMR0DECL(int) SVMR0RunGuestCode(PVM pVM, CPUMCTX *pCtx, PHWACCM_CPUINFO pCpu)
765{
766 int rc = VINF_SUCCESS;
767 uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID;
768 SVM_VMCB *pVMCB;
769 bool fGuestStateSynced = false;
770 unsigned cResume = 0;
771
772 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatEntry, x);
773
774 AssertReturn(pCpu->fConfigured, VERR_EM_INTERNAL_ERROR);
775
776 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
777 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
778
779 /* We can jump to this point to resume execution after determining that a VM-exit is innocent.
780 */
781ResumeExecution:
782 /* Safety precaution; looping for too long here can have a very bad effect on the host */
783 if (++cResume > HWACCM_MAX_RESUME_LOOPS)
784 {
785 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitMaxResume);
786 rc = VINF_EM_RAW_INTERRUPT;
787 goto end;
788 }
789
790 /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
791 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
792 {
793 Log(("VM_FF_INHIBIT_INTERRUPTS at %VGv successor %VGv\n", pCtx->rip, EMGetInhibitInterruptsPC(pVM)));
794 if (pCtx->rip != EMGetInhibitInterruptsPC(pVM))
795 {
796 /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
797 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
798 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
799 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
800 */
801 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
802 /* Irq inhibition is no longer active; clear the corresponding SVM state. */
803 pVMCB->ctrl.u64IntShadow = 0;
804 }
805 }
806 else
807 {
808 /* Irq inhibition is no longer active; clear the corresponding SVM state. */
809 pVMCB->ctrl.u64IntShadow = 0;
810 }
811
812 /* Check for pending actions that force us to go back to ring 3. */
813#ifdef DEBUG
814 /* Intercept X86_XCPT_DB if stepping is enabled */
815 if (!DBGFIsStepping(pVM))
816#endif
817 {
818 if (VM_FF_ISPENDING(pVM, VM_FF_TO_R3 | VM_FF_TIMER))
819 {
820 VM_FF_CLEAR(pVM, VM_FF_TO_R3);
821 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchToR3);
822 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
823 rc = VINF_EM_RAW_TO_R3;
824 goto end;
825 }
826 }
827
828 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
829 if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST))
830 {
831 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
832 rc = VINF_EM_PENDING_REQUEST;
833 goto end;
834 }
835
836 /* When external interrupts are pending, we should exit the VM when IF is set. */
837 /** @note *after* VM_FF_INHIBIT_INTERRUPTS check!!! */
838 rc = SVMR0CheckPendingInterrupt(pVM, pVMCB, pCtx);
839 if (VBOX_FAILURE(rc))
840 {
841 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
842 goto end;
843 }
844
845 /* Load the guest state */
846 rc = SVMR0LoadGuestState(pVM, pCtx);
847 if (rc != VINF_SUCCESS)
848 {
849 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
850 goto end;
851 }
852 fGuestStateSynced = true;
853
854 /* TPR caching using CR8 is only available in 64 bits mode */
855 /* Note the 32 bits exception for AMD (X86_CPUID_AMD_FEATURE_ECX_CR8L), but that appears missing in Intel CPUs */
856 /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock). */
857 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
858 {
859 /* TPR caching in CR8 */
860 uint8_t u8TPR;
861 int rc = PDMApicGetTPR(pVM, &u8TPR);
862 AssertRC(rc);
863 pCtx->cr8 = u8TPR;
864 pVMCB->ctrl.IntCtrl.n.u8VTPR = u8TPR;
865 }
866
867 /* All done! Let's start VM execution. */
868 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x);
869
870 /* Enable nested paging if necessary (disabled each time after #VMEXIT). */
871 pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hwaccm.s.fNestedPaging;
872
873 /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
874 if (!pVM->hwaccm.s.svm.fResumeVM)
875 {
876 if ( pVM->hwaccm.s.svm.idLastCpu != pCpu->idCpu
877 /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
878 || pVM->hwaccm.s.svm.cTLBFlushes != pCpu->cTLBFlushes)
879 {
880 /* Force a TLB flush on VM entry. */
881 pVM->hwaccm.s.svm.fForceTLBFlush = true;
882 }
883 pVM->hwaccm.s.svm.idLastCpu = pCpu->idCpu;
884 }
885 else
886 Assert(pVM->hwaccm.s.svm.idLastCpu == pCpu->idCpu);
887
888 /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */
889 if ( pVM->hwaccm.s.svm.fForceTLBFlush
890 && !pVM->hwaccm.s.svm.fAlwaysFlushTLB)
891 {
892 if (++pCpu->uCurrentASID >= pVM->hwaccm.s.svm.u32MaxASID)
893 {
894 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */
895 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = 1; /* wrap around; flush TLB */
896 pCpu->cTLBFlushes++;
897 }
898 else
899 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID);
900
901 pVM->hwaccm.s.svm.cTLBFlushes = pCpu->cTLBFlushes;
902 }
903 else
904 {
905 /* We never increase uCurrentASID in the fAlwaysFlushTLB (erratum 170) case. */
906 if (!pCpu->uCurrentASID)
907 pCpu->uCurrentASID = 1;
908
909 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVM->hwaccm.s.svm.fForceTLBFlush;
910 }
911
912 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.svm.u32MaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
913 pVMCB->ctrl.TLBCtrl.n.u32ASID = pCpu->uCurrentASID;
914
915#ifdef VBOX_WITH_STATISTICS
916 if (pVMCB->ctrl.TLBCtrl.n.u1TLBFlush)
917 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBWorldSwitch);
918 else
919 STAM_COUNTER_INC(&pVM->hwaccm.s.StatNoFlushTLBWorldSwitch);
920#endif
921
922 /* In case we execute a goto ResumeExecution later on. */
923 pVM->hwaccm.s.svm.fResumeVM = true;
924 pVM->hwaccm.s.svm.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;
925
926 Assert(sizeof(pVM->hwaccm.s.svm.pVMCBPhys) == 8);
927 Assert(pVMCB->ctrl.u32InterceptCtrl2 == ( SVM_CTRL2_INTERCEPT_VMRUN /* required */
928 | SVM_CTRL2_INTERCEPT_VMMCALL
929 | SVM_CTRL2_INTERCEPT_VMLOAD
930 | SVM_CTRL2_INTERCEPT_VMSAVE
931 | SVM_CTRL2_INTERCEPT_STGI
932 | SVM_CTRL2_INTERCEPT_CLGI
933 | SVM_CTRL2_INTERCEPT_SKINIT
934 | SVM_CTRL2_INTERCEPT_RDTSCP /* AMD only; we don't support this one */
935 | SVM_CTRL2_INTERCEPT_WBINVD
936 | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
937 ));
938 Assert(pVMCB->ctrl.IntCtrl.n.u1VIrqMasking);
939 Assert(pVMCB->ctrl.u64IOPMPhysAddr == pVM->hwaccm.s.svm.pIOBitmapPhys);
940 Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVM->hwaccm.s.svm.pMSRBitmapPhys);
941 Assert(pVMCB->ctrl.u64LBRVirt == 0);
942
943 pVM->hwaccm.s.svm.pfnVMRun(pVM->hwaccm.s.svm.pVMCBHostPhys, pVM->hwaccm.s.svm.pVMCBPhys, pCtx);
944 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatInGC, x);
945
946 /**
947 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
948 * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING 3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
949 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
950 */
951
952 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatExit, x);
953
954 /* Reason for the VM exit */
955 exitCode = pVMCB->ctrl.u64ExitCode;
956
957 if (exitCode == (uint64_t)SVM_EXIT_INVALID) /* Invalid guest state. */
958 {
959 HWACCMDumpRegs(pCtx);
960#ifdef DEBUG
961 Log(("ctrl.u16InterceptRdCRx %x\n", pVMCB->ctrl.u16InterceptRdCRx));
962 Log(("ctrl.u16InterceptWrCRx %x\n", pVMCB->ctrl.u16InterceptWrCRx));
963 Log(("ctrl.u16InterceptRdDRx %x\n", pVMCB->ctrl.u16InterceptRdDRx));
964 Log(("ctrl.u16InterceptWrDRx %x\n", pVMCB->ctrl.u16InterceptWrDRx));
965 Log(("ctrl.u32InterceptException %x\n", pVMCB->ctrl.u32InterceptException));
966 Log(("ctrl.u32InterceptCtrl1 %x\n", pVMCB->ctrl.u32InterceptCtrl1));
967 Log(("ctrl.u32InterceptCtrl2 %x\n", pVMCB->ctrl.u32InterceptCtrl2));
968 Log(("ctrl.u64IOPMPhysAddr %VX64\n", pVMCB->ctrl.u64IOPMPhysAddr));
969 Log(("ctrl.u64MSRPMPhysAddr %VX64\n", pVMCB->ctrl.u64MSRPMPhysAddr));
970 Log(("ctrl.u64TSCOffset %VX64\n", pVMCB->ctrl.u64TSCOffset));
971
972 Log(("ctrl.TLBCtrl.u32ASID %x\n", pVMCB->ctrl.TLBCtrl.n.u32ASID));
973 Log(("ctrl.TLBCtrl.u1TLBFlush %x\n", pVMCB->ctrl.TLBCtrl.n.u1TLBFlush));
974 Log(("ctrl.TLBCtrl.u7Reserved %x\n", pVMCB->ctrl.TLBCtrl.n.u7Reserved));
975 Log(("ctrl.TLBCtrl.u24Reserved %x\n", pVMCB->ctrl.TLBCtrl.n.u24Reserved));
976
977 Log(("ctrl.IntCtrl.u8VTPR %x\n", pVMCB->ctrl.IntCtrl.n.u8VTPR));
978 Log(("ctrl.IntCtrl.u1VIrqValid %x\n", pVMCB->ctrl.IntCtrl.n.u1VIrqValid));
979 Log(("ctrl.IntCtrl.u7Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u7Reserved));
980 Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", pVMCB->ctrl.IntCtrl.n.u4VIrqPriority));
981 Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR));
982 Log(("ctrl.IntCtrl.u3Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u3Reserved));
983 Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", pVMCB->ctrl.IntCtrl.n.u1VIrqMasking));
984 Log(("ctrl.IntCtrl.u7Reserved2 %x\n", pVMCB->ctrl.IntCtrl.n.u7Reserved2));
985 Log(("ctrl.IntCtrl.u8VIrqVector %x\n", pVMCB->ctrl.IntCtrl.n.u8VIrqVector));
986 Log(("ctrl.IntCtrl.u24Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u24Reserved));
987
988 Log(("ctrl.u64IntShadow %VX64\n", pVMCB->ctrl.u64IntShadow));
989 Log(("ctrl.u64ExitCode %VX64\n", pVMCB->ctrl.u64ExitCode));
990 Log(("ctrl.u64ExitInfo1 %VX64\n", pVMCB->ctrl.u64ExitInfo1));
991 Log(("ctrl.u64ExitInfo2 %VX64\n", pVMCB->ctrl.u64ExitInfo2));
992 Log(("ctrl.ExitIntInfo.u8Vector %x\n", pVMCB->ctrl.ExitIntInfo.n.u8Vector));
993 Log(("ctrl.ExitIntInfo.u3Type %x\n", pVMCB->ctrl.ExitIntInfo.n.u3Type));
994 Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
995 Log(("ctrl.ExitIntInfo.u19Reserved %x\n", pVMCB->ctrl.ExitIntInfo.n.u19Reserved));
996 Log(("ctrl.ExitIntInfo.u1Valid %x\n", pVMCB->ctrl.ExitIntInfo.n.u1Valid));
997 Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode));
998 Log(("ctrl.NestedPaging %VX64\n", pVMCB->ctrl.NestedPaging.au64));
999 Log(("ctrl.EventInject.u8Vector %x\n", pVMCB->ctrl.EventInject.n.u8Vector));
1000 Log(("ctrl.EventInject.u3Type %x\n", pVMCB->ctrl.EventInject.n.u3Type));
1001 Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", pVMCB->ctrl.EventInject.n.u1ErrorCodeValid));
1002 Log(("ctrl.EventInject.u19Reserved %x\n", pVMCB->ctrl.EventInject.n.u19Reserved));
1003 Log(("ctrl.EventInject.u1Valid %x\n", pVMCB->ctrl.EventInject.n.u1Valid));
1004 Log(("ctrl.EventInject.u32ErrorCode %x\n", pVMCB->ctrl.EventInject.n.u32ErrorCode));
1005
1006 Log(("ctrl.u64NestedPagingCR3 %VX64\n", pVMCB->ctrl.u64NestedPagingCR3));
1007 Log(("ctrl.u64LBRVirt %VX64\n", pVMCB->ctrl.u64LBRVirt));
1008
1009 Log(("guest.CS.u16Sel %04X\n", pVMCB->guest.CS.u16Sel));
1010 Log(("guest.CS.u16Attr %04X\n", pVMCB->guest.CS.u16Attr));
1011 Log(("guest.CS.u32Limit %X\n", pVMCB->guest.CS.u32Limit));
1012 Log(("guest.CS.u64Base %VX64\n", pVMCB->guest.CS.u64Base));
1013 Log(("guest.DS.u16Sel %04X\n", pVMCB->guest.DS.u16Sel));
1014 Log(("guest.DS.u16Attr %04X\n", pVMCB->guest.DS.u16Attr));
1015 Log(("guest.DS.u32Limit %X\n", pVMCB->guest.DS.u32Limit));
1016 Log(("guest.DS.u64Base %VX64\n", pVMCB->guest.DS.u64Base));
1017 Log(("guest.ES.u16Sel %04X\n", pVMCB->guest.ES.u16Sel));
1018 Log(("guest.ES.u16Attr %04X\n", pVMCB->guest.ES.u16Attr));
1019 Log(("guest.ES.u32Limit %X\n", pVMCB->guest.ES.u32Limit));
1020 Log(("guest.ES.u64Base %VX64\n", pVMCB->guest.ES.u64Base));
1021 Log(("guest.FS.u16Sel %04X\n", pVMCB->guest.FS.u16Sel));
1022 Log(("guest.FS.u16Attr %04X\n", pVMCB->guest.FS.u16Attr));
1023 Log(("guest.FS.u32Limit %X\n", pVMCB->guest.FS.u32Limit));
1024 Log(("guest.FS.u64Base %VX64\n", pVMCB->guest.FS.u64Base));
1025 Log(("guest.GS.u16Sel %04X\n", pVMCB->guest.GS.u16Sel));
1026 Log(("guest.GS.u16Attr %04X\n", pVMCB->guest.GS.u16Attr));
1027 Log(("guest.GS.u32Limit %X\n", pVMCB->guest.GS.u32Limit));
1028 Log(("guest.GS.u64Base %VX64\n", pVMCB->guest.GS.u64Base));
1029
1030 Log(("guest.GDTR.u32Limit %X\n", pVMCB->guest.GDTR.u32Limit));
1031 Log(("guest.GDTR.u64Base %VX64\n", pVMCB->guest.GDTR.u64Base));
1032
1033 Log(("guest.LDTR.u16Sel %04X\n", pVMCB->guest.LDTR.u16Sel));
1034 Log(("guest.LDTR.u16Attr %04X\n", pVMCB->guest.LDTR.u16Attr));
1035 Log(("guest.LDTR.u32Limit %X\n", pVMCB->guest.LDTR.u32Limit));
1036 Log(("guest.LDTR.u64Base %VX64\n", pVMCB->guest.LDTR.u64Base));
1037
1038 Log(("guest.IDTR.u32Limit %X\n", pVMCB->guest.IDTR.u32Limit));
1039 Log(("guest.IDTR.u64Base %VX64\n", pVMCB->guest.IDTR.u64Base));
1040
1041 Log(("guest.TR.u16Sel %04X\n", pVMCB->guest.TR.u16Sel));
1042 Log(("guest.TR.u16Attr %04X\n", pVMCB->guest.TR.u16Attr));
1043 Log(("guest.TR.u32Limit %X\n", pVMCB->guest.TR.u32Limit));
1044 Log(("guest.TR.u64Base %VX64\n", pVMCB->guest.TR.u64Base));
1045
1046 Log(("guest.u8CPL %X\n", pVMCB->guest.u8CPL));
1047 Log(("guest.u64CR0 %VX64\n", pVMCB->guest.u64CR0));
1048 Log(("guest.u64CR2 %VX64\n", pVMCB->guest.u64CR2));
1049 Log(("guest.u64CR3 %VX64\n", pVMCB->guest.u64CR3));
1050 Log(("guest.u64CR4 %VX64\n", pVMCB->guest.u64CR4));
1051 Log(("guest.u64DR6 %VX64\n", pVMCB->guest.u64DR6));
1052 Log(("guest.u64DR7 %VX64\n", pVMCB->guest.u64DR7));
1053
1054 Log(("guest.u64RIP %VX64\n", pVMCB->guest.u64RIP));
1055 Log(("guest.u64RSP %VX64\n", pVMCB->guest.u64RSP));
1056 Log(("guest.u64RAX %VX64\n", pVMCB->guest.u64RAX));
1057 Log(("guest.u64RFlags %VX64\n", pVMCB->guest.u64RFlags));
1058
1059 Log(("guest.u64SysEnterCS %VX64\n", pVMCB->guest.u64SysEnterCS));
1060 Log(("guest.u64SysEnterEIP %VX64\n", pVMCB->guest.u64SysEnterEIP));
1061 Log(("guest.u64SysEnterESP %VX64\n", pVMCB->guest.u64SysEnterESP));
1062
1063 Log(("guest.u64EFER %VX64\n", pVMCB->guest.u64EFER));
1064 Log(("guest.u64STAR %VX64\n", pVMCB->guest.u64STAR));
1065 Log(("guest.u64LSTAR %VX64\n", pVMCB->guest.u64LSTAR));
1066 Log(("guest.u64CSTAR %VX64\n", pVMCB->guest.u64CSTAR));
1067 Log(("guest.u64SFMASK %VX64\n", pVMCB->guest.u64SFMASK));
1068 Log(("guest.u64KernelGSBase %VX64\n", pVMCB->guest.u64KernelGSBase));
1069 Log(("guest.u64GPAT %VX64\n", pVMCB->guest.u64GPAT));
1070 Log(("guest.u64DBGCTL %VX64\n", pVMCB->guest.u64DBGCTL));
1071 Log(("guest.u64BR_FROM %VX64\n", pVMCB->guest.u64BR_FROM));
1072 Log(("guest.u64BR_TO %VX64\n", pVMCB->guest.u64BR_TO));
1073 Log(("guest.u64LASTEXCPFROM %VX64\n", pVMCB->guest.u64LASTEXCPFROM));
1074 Log(("guest.u64LASTEXCPTO %VX64\n", pVMCB->guest.u64LASTEXCPTO));
1075
1076#endif
1077 rc = VERR_SVM_UNABLE_TO_START_VM;
1078 goto end;
1079 }
1080
1081 /* Let's first sync back eip, esp, and eflags. */
1082 pCtx->rip = pVMCB->guest.u64RIP;
1083 pCtx->rsp = pVMCB->guest.u64RSP;
1084 pCtx->eflags.u32 = pVMCB->guest.u64RFlags;
1085 /* eax is saved/restore across the vmrun instruction */
1086 pCtx->rax = pVMCB->guest.u64RAX;
1087
1088 pCtx->msrKERNELGSBASE = pVMCB->guest.u64KernelGSBase; /* swapgs exchange value */
1089
1090 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
1091 SVM_READ_SELREG(SS, ss);
1092 SVM_READ_SELREG(CS, cs);
1093 SVM_READ_SELREG(DS, ds);
1094 SVM_READ_SELREG(ES, es);
1095 SVM_READ_SELREG(FS, fs);
1096 SVM_READ_SELREG(GS, gs);
1097
1098 /* Note: no reason to sync back the CRx and DRx registers. They can't be changed by the guest. */
1099 /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */
1100 if ( pVM->hwaccm.s.fNestedPaging
1101 && pCtx->cr3 != pVMCB->guest.u64CR3)
1102 {
1103 CPUMSetGuestCR3(pVM, pVMCB->guest.u64CR3);
1104 PGMUpdateCR3(pVM, pVMCB->guest.u64CR3);
1105 }
1106
1107 /** @note NOW IT'S SAFE FOR LOGGING! */
1108
1109 /* Take care of instruction fusing (sti, mov ss) */
1110 if (pVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
1111 {
1112 Log(("uInterruptState %x eip=%VGv\n", pVMCB->ctrl.u64IntShadow, pCtx->rip));
1113 EMSetInhibitInterruptsPC(pVM, pCtx->rip);
1114 }
1115 else
1116 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
1117
1118 Log2(("exitCode = %x\n", exitCode));
1119
1120 /* Sync back the debug registers. */
1121 /** @todo Implement debug registers correctly. */
1122 pCtx->dr6 = pVMCB->guest.u64DR6;
1123 pCtx->dr7 = pVMCB->guest.u64DR7;
1124
1125 /* Update the APIC if the cached TPR value has changed. */
1126 if (pVMCB->ctrl.IntCtrl.n.u8VTPR != pCtx->cr8)
1127 {
1128 rc = PDMApicSetTPR(pVM, pVMCB->ctrl.IntCtrl.n.u8VTPR);
1129 AssertRC(rc);
1130 pCtx->cr8 = pVMCB->ctrl.IntCtrl.n.u8VTPR;
1131 }
1132 pVMCB->ctrl.IntCtrl.n.u8VTPR = pCtx->cr8;
1133
1134 /* Check if an injected event was interrupted prematurely. */
1135 pVM->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
1136 if ( pVMCB->ctrl.ExitIntInfo.n.u1Valid
1137 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT /* we don't care about 'int xx' as the instruction will be restarted. */)
1138 {
1139 Log(("Pending inject %VX64 at %VGv exit=%08x\n", pVM->hwaccm.s.Event.intInfo, pCtx->rip, exitCode));
1140
1141#ifdef LOG_ENABLED
1142 SVM_EVENT Event;
1143 Event.au64[0] = pVM->hwaccm.s.Event.intInfo;
1144
1145 if ( exitCode == SVM_EXIT_EXCEPTION_E
1146 && Event.n.u8Vector == 0xE)
1147 {
1148 Log(("Double fault!\n"));
1149 }
1150#endif
1151
1152 pVM->hwaccm.s.Event.fPending = true;
1153 /* Error code present? (redundant) */
1154 if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)
1155 {
1156 pVM->hwaccm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
1157 }
1158 else
1159 pVM->hwaccm.s.Event.errCode = 0;
1160 }
1161#ifdef VBOX_WITH_STATISTICS
1162 if (exitCode == SVM_EXIT_NPF)
1163 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitReasonNPF);
1164 else
1165 STAM_COUNTER_INC(&pVM->hwaccm.s.pStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);
1166#endif
1167
1168 /* Deal with the reason of the VM-exit. */
1169 switch (exitCode)
1170 {
1171 case SVM_EXIT_EXCEPTION_0: case SVM_EXIT_EXCEPTION_1: case SVM_EXIT_EXCEPTION_2: case SVM_EXIT_EXCEPTION_3:
1172 case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: case SVM_EXIT_EXCEPTION_6: case SVM_EXIT_EXCEPTION_7:
1173 case SVM_EXIT_EXCEPTION_8: case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_A: case SVM_EXIT_EXCEPTION_B:
1174 case SVM_EXIT_EXCEPTION_C: case SVM_EXIT_EXCEPTION_D: case SVM_EXIT_EXCEPTION_E: case SVM_EXIT_EXCEPTION_F:
1175 case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11: case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13:
1176 case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16: case SVM_EXIT_EXCEPTION_17:
1177 case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19: case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B:
1178 case SVM_EXIT_EXCEPTION_1C: case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
1179 {
1180 /* Pending trap. */
1181 SVM_EVENT Event;
1182 uint32_t vector = exitCode - SVM_EXIT_EXCEPTION_0;
1183
1184 Log2(("Hardware/software interrupt %d\n", vector));
1185 switch (vector)
1186 {
1187#ifdef DEBUG
1188 case X86_XCPT_DB:
1189 rc = DBGFR0Trap01Handler(pVM, CPUMCTX2CORE(pCtx), pVMCB->guest.u64DR6);
1190 Assert(rc != VINF_EM_RAW_GUEST_TRAP);
1191 break;
1192#endif
1193
1194 case X86_XCPT_NM:
1195 {
1196 uint32_t oldCR0;
1197
1198 Log(("#NM fault at %VGv\n", pCtx->rip));
1199
1200 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */
1201 oldCR0 = ASMGetCR0();
1202 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
1203 rc = CPUMHandleLazyFPU(pVM);
1204 if (rc == VINF_SUCCESS)
1205 {
1206 Assert(CPUMIsGuestFPUStateActive(pVM));
1207
1208 /* CPUMHandleLazyFPU could have changed CR0; restore it. */
1209 ASMSetCR0(oldCR0);
1210
1211 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowNM);
1212
1213 /* Continue execution. */
1214 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1215 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1216
1217 goto ResumeExecution;
1218 }
1219
1220 Log(("Forward #NM fault to the guest\n"));
1221 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNM);
1222
1223 Event.au64[0] = 0;
1224 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1225 Event.n.u1Valid = 1;
1226 Event.n.u8Vector = X86_XCPT_NM;
1227
1228 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1229 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1230 goto ResumeExecution;
1231 }
1232
1233 case X86_XCPT_PF: /* Page fault */
1234 {
1235 uint32_t errCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1236 RTGCUINTPTR uFaultAddress = pVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */
1237
1238#ifdef DEBUG
1239 if (pVM->hwaccm.s.fNestedPaging)
1240 { /* A genuine pagefault.
1241 * Forward the trap to the guest by injecting the exception and resuming execution.
1242 */
1243 Log(("Guest page fault at %VGv cr2=%VGv error code %x rsp=%VGv\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode, (RTGCPTR)pCtx->rsp));
1244 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF);
1245
1246 /* Now we must update CR2. */
1247 pCtx->cr2 = uFaultAddress;
1248
1249 Event.au64[0] = 0;
1250 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1251 Event.n.u1Valid = 1;
1252 Event.n.u8Vector = X86_XCPT_PF;
1253 Event.n.u1ErrorCodeValid = 1;
1254 Event.n.u32ErrorCode = errCode;
1255
1256 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1257
1258 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1259 goto ResumeExecution;
1260 }
1261#endif
1262 Assert(!pVM->hwaccm.s.fNestedPaging);
1263
1264 Log2(("Page fault at %VGv cr2=%VGv error code %x\n", pCtx->rip, uFaultAddress, errCode));
1265 /* Exit qualification contains the linear address of the page fault. */
1266 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
1267 TRPMSetErrorCode(pVM, errCode);
1268 TRPMSetFaultAddress(pVM, uFaultAddress);
1269
1270 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
1271 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
1272 Log2(("PGMTrap0eHandler %VGv returned %Vrc\n", pCtx->rip, rc));
1273 if (rc == VINF_SUCCESS)
1274 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1275 Log2(("Shadow page fault at %VGv cr2=%VGv error code %x\n", pCtx->rip, uFaultAddress, errCode));
1276 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF);
1277
1278 TRPMResetTrap(pVM);
1279
1280 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1281 goto ResumeExecution;
1282 }
1283 else
1284 if (rc == VINF_EM_RAW_GUEST_TRAP)
1285 { /* A genuine pagefault.
1286 * Forward the trap to the guest by injecting the exception and resuming execution.
1287 */
1288 Log2(("Forward page fault to the guest\n"));
1289 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF);
1290 /* The error code might have been changed. */
1291 errCode = TRPMGetErrorCode(pVM);
1292
1293 TRPMResetTrap(pVM);
1294
1295 /* Now we must update CR2. */
1296 pCtx->cr2 = uFaultAddress;
1297
1298 Event.au64[0] = 0;
1299 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1300 Event.n.u1Valid = 1;
1301 Event.n.u8Vector = X86_XCPT_PF;
1302 Event.n.u1ErrorCodeValid = 1;
1303 Event.n.u32ErrorCode = errCode;
1304
1305 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1306
1307 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1308 goto ResumeExecution;
1309 }
1310#ifdef VBOX_STRICT
1311 if (rc != VINF_EM_RAW_EMULATE_INSTR)
1312 LogFlow(("PGMTrap0eHandler failed with %d\n", rc));
1313#endif
1314 /* Need to go back to the recompiler to emulate the instruction. */
1315 TRPMResetTrap(pVM);
1316 break;
1317 }
1318
1319 case X86_XCPT_MF: /* Floating point exception. */
1320 {
1321 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestMF);
1322 if (!(pCtx->cr0 & X86_CR0_NE))
1323 {
1324 /* old style FPU error reporting needs some extra work. */
1325 /** @todo don't fall back to the recompiler, but do it manually. */
1326 rc = VINF_EM_RAW_EMULATE_INSTR;
1327 break;
1328 }
1329 Log(("Trap %x at %VGv\n", vector, pCtx->rip));
1330
1331 Event.au64[0] = 0;
1332 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1333 Event.n.u1Valid = 1;
1334 Event.n.u8Vector = X86_XCPT_MF;
1335
1336 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1337
1338 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1339 goto ResumeExecution;
1340 }
1341
1342#ifdef VBOX_STRICT
1343 case X86_XCPT_GP: /* General protection failure exception.*/
1344 case X86_XCPT_UD: /* Unknown opcode exception. */
1345 case X86_XCPT_DE: /* Debug exception. */
1346 case X86_XCPT_SS: /* Stack segment exception. */
1347 case X86_XCPT_NP: /* Segment not present exception. */
1348 {
1349 Event.au64[0] = 0;
1350 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1351 Event.n.u1Valid = 1;
1352 Event.n.u8Vector = vector;
1353
1354 switch(vector)
1355 {
1356 case X86_XCPT_GP:
1357 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestGP);
1358 Event.n.u1ErrorCodeValid = 1;
1359 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1360 break;
1361 case X86_XCPT_DE:
1362 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestDE);
1363 break;
1364 case X86_XCPT_UD:
1365 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestUD);
1366 break;
1367 case X86_XCPT_SS:
1368 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestSS);
1369 Event.n.u1ErrorCodeValid = 1;
1370 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1371 break;
1372 case X86_XCPT_NP:
1373 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNP);
1374 Event.n.u1ErrorCodeValid = 1;
1375 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1376 break;
1377 }
1378 Log(("Trap %x at %VGv esi=%x\n", vector, pCtx->rip, pCtx->esi));
1379 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1380
1381 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1382 goto ResumeExecution;
1383 }
1384#endif
1385 default:
1386 AssertMsgFailed(("Unexpected vm-exit caused by exception %x\n", vector));
1387 rc = VERR_EM_INTERNAL_ERROR;
1388 break;
1389
1390 } /* switch (vector) */
1391 break;
1392 }
1393
1394 case SVM_EXIT_NPF:
1395 {
1396 /* EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault. */
1397 uint32_t errCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1398 RTGCPHYS uFaultAddress = pVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */
1399
1400 Assert(pVM->hwaccm.s.fNestedPaging);
1401 Log(("Nested page fault at %VGv cr2=%VGp error code %x\n", pCtx->rip, uFaultAddress, errCode));
1402 /* Exit qualification contains the linear address of the page fault. */
1403 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
1404 TRPMSetErrorCode(pVM, errCode);
1405 TRPMSetFaultAddress(pVM, uFaultAddress);
1406
1407 /* Handle the pagefault trap for the nested shadow table. */
1408 rc = PGMR0Trap0eHandlerNestedPaging(pVM, PGMGetHostMode(pVM), errCode, CPUMCTX2CORE(pCtx), uFaultAddress);
1409 Log2(("PGMR0Trap0eHandlerNestedPaging %VGv returned %Vrc\n", pCtx->rip, rc));
1410 if (rc == VINF_SUCCESS)
1411 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1412 Log2(("Shadow page fault at %VGv cr2=%VGp error code %x\n", pCtx->rip, uFaultAddress, errCode));
1413 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF);
1414
1415 TRPMResetTrap(pVM);
1416
1417 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1418 goto ResumeExecution;
1419 }
1420
1421#ifdef VBOX_STRICT
1422 if (rc != VINF_EM_RAW_EMULATE_INSTR)
1423 LogFlow(("PGMTrap0eHandlerNestedPaging failed with %d\n", rc));
1424#endif
1425 /* Need to go back to the recompiler to emulate the instruction. */
1426 TRPMResetTrap(pVM);
1427 break;
1428 }
1429
1430 case SVM_EXIT_VINTR:
1431 /* A virtual interrupt is about to be delivered, which means IF=1. */
1432 Log(("SVM_EXIT_VINTR IF=%d\n", pCtx->eflags.Bits.u1IF));
1433 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 0;
1434 pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 0;
1435 pVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0;
1436 goto ResumeExecution;
1437
1438 case SVM_EXIT_FERR_FREEZE:
1439 case SVM_EXIT_INTR:
1440 case SVM_EXIT_NMI:
1441 case SVM_EXIT_SMI:
1442 case SVM_EXIT_INIT:
1443 /* External interrupt; leave to allow it to be dispatched again. */
1444 rc = VINF_EM_RAW_INTERRUPT;
1445 break;
1446
1447 case SVM_EXIT_WBINVD:
1448 case SVM_EXIT_INVD: /* Guest software attempted to execute INVD. */
1449 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvd);
1450 /* Skip instruction and continue directly. */
1451 pCtx->rip += 2; /** @note hardcoded opcode size! */
1452 /* Continue execution.*/
1453 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1454 goto ResumeExecution;
1455
1456 case SVM_EXIT_CPUID: /* Guest software attempted to execute CPUID. */
1457 {
1458 Log2(("SVM: Cpuid at %VGv for %x\n", pCtx->rip, pCtx->eax));
1459 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCpuid);
1460 rc = EMInterpretCpuId(pVM, CPUMCTX2CORE(pCtx));
1461 if (rc == VINF_SUCCESS)
1462 {
1463 /* Update EIP and continue execution. */
1464 pCtx->rip += 2; /** @note hardcoded opcode size! */
1465 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1466 goto ResumeExecution;
1467 }
1468 AssertMsgFailed(("EMU: cpuid failed with %Vrc\n", rc));
1469 rc = VINF_EM_RAW_EMULATE_INSTR;
1470 break;
1471 }
1472
1473 case SVM_EXIT_RDTSC: /* Guest software attempted to execute RDTSC. */
1474 {
1475 Log2(("SVM: Rdtsc\n"));
1476 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitRdtsc);
1477 rc = EMInterpretRdtsc(pVM, CPUMCTX2CORE(pCtx));
1478 if (rc == VINF_SUCCESS)
1479 {
1480 /* Update EIP and continue execution. */
1481 pCtx->rip += 2; /** @note hardcoded opcode size! */
1482 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1483 goto ResumeExecution;
1484 }
1485 AssertMsgFailed(("EMU: rdtsc failed with %Vrc\n", rc));
1486 rc = VINF_EM_RAW_EMULATE_INSTR;
1487 break;
1488 }
1489
1490 case SVM_EXIT_INVLPG: /* Guest software attempted to execute INVPG. */
1491 {
1492 Log2(("SVM: invlpg\n"));
1493 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvpg);
1494
1495 Assert(!pVM->hwaccm.s.fNestedPaging);
1496
1497 /* Truly a pita. Why can't SVM give the same information as VT-x? */
1498 rc = SVMR0InterpretInvpg(pVM, CPUMCTX2CORE(pCtx), pVMCB->ctrl.TLBCtrl.n.u32ASID);
1499 if (rc == VINF_SUCCESS)
1500 {
1501 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushPageInvlpg);
1502 goto ResumeExecution; /* eip already updated */
1503 }
1504 break;
1505 }
1506
1507 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
1508 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
1509 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
1510 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
1511 {
1512 uint32_t cbSize;
1513
1514 Log2(("SVM: %VGv mov cr%d, \n", pCtx->rip, exitCode - SVM_EXIT_WRITE_CR0));
1515 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxWrite);
1516 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1517
1518 switch (exitCode - SVM_EXIT_WRITE_CR0)
1519 {
1520 case 0:
1521 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1522 break;
1523 case 2:
1524 break;
1525 case 3:
1526 Assert(!pVM->hwaccm.s.fNestedPaging);
1527 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
1528 break;
1529 case 4:
1530 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
1531 break;
1532 case 8:
1533 AssertFailed(); /* shouldn't come here anymore */
1534 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR8;
1535 break;
1536 default:
1537 AssertFailed();
1538 }
1539 /* Check if a sync operation is pending. */
1540 if ( rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
1541 && VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
1542 {
1543 rc = PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
1544 AssertRC(rc);
1545
1546 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBCRxChange);
1547
1548 /* Must be set by PGMSyncCR3 */
1549 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PROTECTED || pVM->hwaccm.s.svm.fForceTLBFlush);
1550 }
1551 if (rc == VINF_SUCCESS)
1552 {
1553 /* EIP has been updated already. */
1554
1555 /* Only resume if successful. */
1556 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1557 goto ResumeExecution;
1558 }
1559 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1560 break;
1561 }
1562
1563 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
1564 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
1565 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
1566 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
1567 {
1568 uint32_t cbSize;
1569
1570 Log2(("SVM: %VGv mov x, cr%d\n", pCtx->rip, exitCode - SVM_EXIT_READ_CR0));
1571 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxRead);
1572 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1573 if (rc == VINF_SUCCESS)
1574 {
1575 /* EIP has been updated already. */
1576
1577 /* Only resume if successful. */
1578 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1579 goto ResumeExecution;
1580 }
1581 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1582 break;
1583 }
1584
1585 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
1586 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
1587 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
1588 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
1589 {
1590 uint32_t cbSize;
1591
1592 Log2(("SVM: %VGv mov dr%d, x\n", pCtx->rip, exitCode - SVM_EXIT_WRITE_DR0));
1593 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1594 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1595 if (rc == VINF_SUCCESS)
1596 {
1597 /* EIP has been updated already. */
1598
1599 /* Only resume if successful. */
1600 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1601 goto ResumeExecution;
1602 }
1603 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1604 break;
1605 }
1606
1607 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
1608 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
1609 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
1610 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
1611 {
1612 uint32_t cbSize;
1613
1614 Log2(("SVM: %VGv mov dr%d, x\n", pCtx->rip, exitCode - SVM_EXIT_READ_DR0));
1615 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1616 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1617 if (rc == VINF_SUCCESS)
1618 {
1619 /* EIP has been updated already. */
1620
1621 /* Only resume if successful. */
1622 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1623 goto ResumeExecution;
1624 }
1625 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1626 break;
1627 }
1628
1629 /* Note: We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
1630 case SVM_EXIT_IOIO: /* I/O instruction. */
1631 {
1632 SVM_IOIO_EXIT IoExitInfo;
1633 uint32_t uIOSize, uAndVal;
1634
1635 IoExitInfo.au32[0] = pVMCB->ctrl.u64ExitInfo1;
1636
1637 /** @todo could use a lookup table here */
1638 if (IoExitInfo.n.u1OP8)
1639 {
1640 uIOSize = 1;
1641 uAndVal = 0xff;
1642 }
1643 else
1644 if (IoExitInfo.n.u1OP16)
1645 {
1646 uIOSize = 2;
1647 uAndVal = 0xffff;
1648 }
1649 else
1650 if (IoExitInfo.n.u1OP32)
1651 {
1652 uIOSize = 4;
1653 uAndVal = 0xffffffff;
1654 }
1655 else
1656 {
1657 AssertFailed(); /* should be fatal. */
1658 rc = VINF_EM_RAW_EMULATE_INSTR;
1659 break;
1660 }
1661
1662 if (IoExitInfo.n.u1STR)
1663 {
1664 /* ins/outs */
1665 uint32_t prefix = 0;
1666 if (IoExitInfo.n.u1REP)
1667 prefix |= PREFIX_REP;
1668
1669 if (IoExitInfo.n.u1Type == 0)
1670 {
1671 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
1672 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringWrite);
1673 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, prefix, uIOSize);
1674 }
1675 else
1676 {
1677 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
1678 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringRead);
1679 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, prefix, uIOSize);
1680 }
1681 }
1682 else
1683 {
1684 /* normal in/out */
1685 Assert(!IoExitInfo.n.u1REP);
1686
1687 if (IoExitInfo.n.u1Type == 0)
1688 {
1689 Log2(("IOMIOPortWrite %VGv %x %x size=%d\n", pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize));
1690 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOWrite);
1691 rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
1692 }
1693 else
1694 {
1695 uint32_t u32Val = 0;
1696
1697 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIORead);
1698 rc = IOMIOPortRead(pVM, IoExitInfo.n.u16Port, &u32Val, uIOSize);
1699 if (IOM_SUCCESS(rc))
1700 {
1701 /* Write back to the EAX register. */
1702 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
1703 Log2(("IOMIOPortRead %VGv %x %x size=%d\n", pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal, uIOSize));
1704 }
1705 }
1706 }
1707 /*
1708 * Handled the I/O return codes.
1709 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1710 */
1711 if (IOM_SUCCESS(rc))
1712 {
1713 /* Update EIP and continue execution. */
1714 pCtx->rip = pVMCB->ctrl.u64ExitInfo2; /* RIP/EIP of the next instruction is saved in EXITINFO2. */
1715 if (RT_LIKELY(rc == VINF_SUCCESS))
1716 {
1717 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1718 goto ResumeExecution;
1719 }
1720 Log2(("EM status from IO at %VGv %x size %d: %Vrc\n", pCtx->rip, IoExitInfo.n.u16Port, uIOSize, rc));
1721 break;
1722 }
1723
1724#ifdef VBOX_STRICT
1725 if (rc == VINF_IOM_HC_IOPORT_READ)
1726 Assert(IoExitInfo.n.u1Type != 0);
1727 else if (rc == VINF_IOM_HC_IOPORT_WRITE)
1728 Assert(IoExitInfo.n.u1Type == 0);
1729 else
1730 AssertMsg(VBOX_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Vrc\n", rc));
1731#endif
1732 Log2(("Failed IO at %VGv %x size %d\n", pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
1733 break;
1734 }
1735
1736 case SVM_EXIT_HLT:
1737 /** Check if external interrupts are pending; if so, don't switch back. */
1738 if ( pCtx->eflags.Bits.u1IF
1739 && VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
1740 {
1741 pCtx->rip++; /* skip hlt */
1742 goto ResumeExecution;
1743 }
1744
1745 rc = VINF_EM_RAW_EMULATE_INSTR_HLT;
1746 break;
1747
1748 case SVM_EXIT_RSM:
1749 case SVM_EXIT_INVLPGA:
1750 case SVM_EXIT_VMRUN:
1751 case SVM_EXIT_VMMCALL:
1752 case SVM_EXIT_VMLOAD:
1753 case SVM_EXIT_VMSAVE:
1754 case SVM_EXIT_STGI:
1755 case SVM_EXIT_CLGI:
1756 case SVM_EXIT_SKINIT:
1757 case SVM_EXIT_RDTSCP:
1758 {
1759 /* Unsupported instructions. */
1760 SVM_EVENT Event;
1761
1762 Event.au64[0] = 0;
1763 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1764 Event.n.u1Valid = 1;
1765 Event.n.u8Vector = X86_XCPT_UD;
1766
1767 Log(("Forced #UD trap at %VGv\n", pCtx->rip));
1768 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1769
1770 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1771 goto ResumeExecution;
1772 }
1773
1774 /* Emulate in ring 3. */
1775 case SVM_EXIT_MSR:
1776 {
1777 uint32_t cbSize;
1778
1779 /* Note: the intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */
1780 Log(("SVM: %s\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr"));
1781 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1782 if (rc == VINF_SUCCESS)
1783 {
1784 /* EIP has been updated already. */
1785
1786 /* Only resume if successful. */
1787 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1788 goto ResumeExecution;
1789 }
1790 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Vrc\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr", rc));
1791 break;
1792 }
1793
1794 case SVM_EXIT_MONITOR:
1795 case SVM_EXIT_RDPMC:
1796 case SVM_EXIT_PAUSE:
1797 case SVM_EXIT_MWAIT_UNCOND:
1798 case SVM_EXIT_MWAIT_ARMED:
1799 case SVM_EXIT_TASK_SWITCH: /* can change CR3; emulate */
1800 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
1801 break;
1802
1803 case SVM_EXIT_SHUTDOWN:
1804 rc = VINF_EM_RESET; /* Triple fault equals a reset. */
1805 break;
1806
1807 case SVM_EXIT_IDTR_READ:
1808 case SVM_EXIT_GDTR_READ:
1809 case SVM_EXIT_LDTR_READ:
1810 case SVM_EXIT_TR_READ:
1811 case SVM_EXIT_IDTR_WRITE:
1812 case SVM_EXIT_GDTR_WRITE:
1813 case SVM_EXIT_LDTR_WRITE:
1814 case SVM_EXIT_TR_WRITE:
1815 case SVM_EXIT_CR0_SEL_WRITE:
1816 default:
1817 /* Unexpected exit codes. */
1818 rc = VERR_EM_INTERNAL_ERROR;
1819 AssertMsgFailed(("Unexpected exit code %x\n", exitCode)); /* Can't happen. */
1820 break;
1821 }
1822
1823end:
1824 if (fGuestStateSynced)
1825 {
1826 /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR. */
1827 SVM_READ_SELREG(LDTR, ldtr);
1828 SVM_READ_SELREG(TR, tr);
1829
1830 pCtx->gdtr.cbGdt = pVMCB->guest.GDTR.u32Limit;
1831 pCtx->gdtr.pGdt = pVMCB->guest.GDTR.u64Base;
1832
1833 pCtx->idtr.cbIdt = pVMCB->guest.IDTR.u32Limit;
1834 pCtx->idtr.pIdt = pVMCB->guest.IDTR.u64Base;
1835
1836 /*
1837 * System MSRs
1838 */
1839 pCtx->SysEnter.cs = pVMCB->guest.u64SysEnterCS;
1840 pCtx->SysEnter.eip = pVMCB->guest.u64SysEnterEIP;
1841 pCtx->SysEnter.esp = pVMCB->guest.u64SysEnterESP;
1842 }
1843
1844 /* Signal changes for the recompiler. */
1845 CPUMSetChangedFlags(pVM, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);
1846
1847 /* If we executed vmrun and an external irq was pending, then we don't have to do a full sync the next time. */
1848 if (exitCode == SVM_EXIT_INTR)
1849 {
1850 STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq);
1851 /* On the next entry we'll only sync the host context. */
1852 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
1853 }
1854 else
1855 {
1856 /* On the next entry we'll sync everything. */
1857 /** @todo we can do better than this */
1858 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
1859 }
1860
1861 /* translate into a less severe return code */
1862 if (rc == VERR_EM_INTERPRETER)
1863 rc = VINF_EM_RAW_EMULATE_INSTR;
1864
1865 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1866 return rc;
1867}
1868
1869/**
1870 * Enters the AMD-V session
1871 *
1872 * @returns VBox status code.
1873 * @param pVM The VM to operate on.
1874 * @param pCpu CPU info struct
1875 */
1876HWACCMR0DECL(int) SVMR0Enter(PVM pVM, PHWACCM_CPUINFO pCpu)
1877{
1878 Assert(pVM->hwaccm.s.svm.fSupported);
1879
1880 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVM->hwaccm.s.svm.idLastCpu, pCpu->uCurrentASID));
1881 pVM->hwaccm.s.svm.fResumeVM = false;
1882
1883 /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */
1884 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;
1885
1886 return VINF_SUCCESS;
1887}
1888
1889
1890/**
1891 * Leaves the AMD-V session
1892 *
1893 * @returns VBox status code.
1894 * @param pVM The VM to operate on.
1895 */
1896HWACCMR0DECL(int) SVMR0Leave(PVM pVM)
1897{
1898 Assert(pVM->hwaccm.s.svm.fSupported);
1899 return VINF_SUCCESS;
1900}
1901
1902
1903static int svmInterpretInvlPg(PVM pVM, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)
1904{
1905 OP_PARAMVAL param1;
1906 RTGCPTR addr;
1907
1908 int rc = DISQueryParamVal(pRegFrame, pCpu, &pCpu->param1, &param1, PARAM_SOURCE);
1909 if(VBOX_FAILURE(rc))
1910 return VERR_EM_INTERPRETER;
1911
1912 switch(param1.type)
1913 {
1914 case PARMTYPE_IMMEDIATE:
1915 case PARMTYPE_ADDRESS:
1916 if(!(param1.flags & (PARAM_VAL32|PARAM_VAL64)))
1917 return VERR_EM_INTERPRETER;
1918 addr = param1.val.val64;
1919 break;
1920
1921 default:
1922 return VERR_EM_INTERPRETER;
1923 }
1924
1925 /** @todo is addr always a flat linear address or ds based
1926 * (in absence of segment override prefixes)????
1927 */
1928 rc = PGMInvalidatePage(pVM, addr);
1929 if (VBOX_SUCCESS(rc))
1930 {
1931 /* Manually invalidate the page for the VM's TLB. */
1932 Log(("SVMInvlpgA %VGv ASID=%d\n", addr, uASID));
1933 SVMInvlpgA(addr, uASID);
1934 return VINF_SUCCESS;
1935 }
1936 Assert(rc == VERR_REM_FLUSHED_PAGES_OVERFLOW);
1937 return rc;
1938}
1939
1940/**
1941 * Interprets INVLPG
1942 *
1943 * @returns VBox status code.
1944 * @retval VINF_* Scheduling instructions.
1945 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1946 * @retval VERR_* Fatal errors.
1947 *
1948 * @param pVM The VM handle.
1949 * @param pRegFrame The register frame.
1950 * @param ASID Tagged TLB id for the guest
1951 *
1952 * Updates the EIP if an instruction was executed successfully.
1953 */
1954static int SVMR0InterpretInvpg(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uASID)
1955{
1956 /*
1957 * Only allow 32 & 64 bits code.
1958 */
1959 DISCPUMODE enmMode = SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid);
1960 if (enmMode != CPUMODE_16BIT)
1961 {
1962 RTGCPTR pbCode;
1963 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->rip, &pbCode);
1964 if (VBOX_SUCCESS(rc))
1965 {
1966 uint32_t cbOp;
1967 DISCPUSTATE Cpu;
1968
1969 Cpu.mode = enmMode;
1970 rc = EMInterpretDisasOneEx(pVM, pbCode, pRegFrame, &Cpu, &cbOp);
1971 Assert(VBOX_FAILURE(rc) || Cpu.pCurInstr->opcode == OP_INVLPG);
1972 if (VBOX_SUCCESS(rc) && Cpu.pCurInstr->opcode == OP_INVLPG)
1973 {
1974 Assert(cbOp == Cpu.opsize);
1975 rc = svmInterpretInvlPg(pVM, &Cpu, pRegFrame, uASID);
1976 if (VBOX_SUCCESS(rc))
1977 {
1978 pRegFrame->rip += cbOp; /* Move on to the next instruction. */
1979 }
1980 return rc;
1981 }
1982 }
1983 }
1984 return VERR_EM_INTERPRETER;
1985}
1986
1987
1988/**
1989 * Invalidates a guest page
1990 *
1991 * @returns VBox status code.
1992 * @param pVM The VM to operate on.
1993 * @param GCVirt Page to invalidate
1994 */
1995HWACCMR0DECL(int) SVMR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)
1996{
1997 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.svm.fForceTLBFlush;
1998
1999 /* Skip it if a TLB flush is already pending. */
2000 if (!fFlushPending)
2001 {
2002 SVM_VMCB *pVMCB;
2003
2004 Log2(("SVMR0InvalidatePage %VGv\n", GCVirt));
2005 AssertReturn(pVM, VERR_INVALID_PARAMETER);
2006 Assert(pVM->hwaccm.s.svm.fSupported);
2007
2008 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
2009 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
2010
2011 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushPageManual);
2012 SVMInvlpgA(GCVirt, pVMCB->ctrl.TLBCtrl.n.u32ASID);
2013 }
2014 return VINF_SUCCESS;
2015}
2016
2017
2018/**
2019 * Invalidates a guest page by physical address
2020 *
2021 * NOTE: Assumes the current instruction references this physical page though a virtual address!!
2022 *
2023 * @returns VBox status code.
2024 * @param pVM The VM to operate on.
2025 * @param GCPhys Page to invalidate
2026 */
2027HWACCMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
2028{
2029 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.svm.fForceTLBFlush;
2030
2031 Assert(pVM->hwaccm.s.fNestedPaging);
2032
2033 /* Skip it if a TLB flush is already pending. */
2034 if (!fFlushPending)
2035 {
2036 CPUMCTX *pCtx;
2037 int rc;
2038 SVM_VMCB *pVMCB;
2039
2040 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
2041 AssertRCReturn(rc, rc);
2042
2043 Log2(("SVMR0InvalidatePhysPage %VGp\n", GCPhys));
2044 AssertReturn(pVM, VERR_INVALID_PARAMETER);
2045 Assert(pVM->hwaccm.s.svm.fSupported);
2046
2047 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
2048 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
2049
2050 /*
2051 * Only allow 32 & 64 bits code.
2052 */
2053 DISCPUMODE enmMode = SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid);
2054 if (enmMode != CPUMODE_16BIT)
2055 {
2056 RTGCPTR pbCode;
2057 int rc = SELMValidateAndConvertCSAddr(pVM, pCtx->eflags, pCtx->ss, pCtx->cs, &pCtx->csHid, (RTGCPTR)pCtx->rip, &pbCode);
2058 if (VBOX_SUCCESS(rc))
2059 {
2060 uint32_t cbOp;
2061 DISCPUSTATE Cpu;
2062 OP_PARAMVAL param1;
2063 RTGCPTR addr;
2064
2065 Cpu.mode = enmMode;
2066 rc = EMInterpretDisasOneEx(pVM, pbCode, CPUMCTX2CORE(pCtx), &Cpu, &cbOp);
2067 AssertRCReturn(rc, rc);
2068 Assert(cbOp == Cpu.opsize);
2069
2070 int rc = DISQueryParamVal(CPUMCTX2CORE(pCtx), &Cpu, &Cpu.param1, &param1, PARAM_SOURCE);
2071 AssertRCReturn(rc, VERR_EM_INTERPRETER);
2072
2073 switch(param1.type)
2074 {
2075 case PARMTYPE_IMMEDIATE:
2076 case PARMTYPE_ADDRESS:
2077 AssertReturn((param1.flags & (PARAM_VAL32|PARAM_VAL64)), VERR_EM_INTERPRETER);
2078
2079 addr = param1.val.val64;
2080 break;
2081
2082 default:
2083 AssertFailed();
2084 return VERR_EM_INTERPRETER;
2085 }
2086
2087 /* Manually invalidate the page for the VM's TLB. */
2088 Log(("SVMR0InvalidatePhysPage Phys=%VGp Virt=%VGv ASID=%d\n", GCPhys, addr, pVMCB->ctrl.TLBCtrl.n.u32ASID));
2089 SVMInvlpgA(addr, pVMCB->ctrl.TLBCtrl.n.u32ASID);
2090 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushPhysPageManual);
2091
2092 return VINF_SUCCESS;
2093 }
2094 }
2095 AssertFailed();
2096 return VERR_EM_INTERPRETER;
2097 }
2098 return VINF_SUCCESS;
2099}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette