VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp@ 9110

Last change on this file since 9110 was 9110, checked in by vboxsync, 17 years ago

Minor update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 75.5 KB
Line 
1/* $Id: HWSVMR0.cpp 9110 2008-05-26 10:01:07Z vboxsync $ */
2/** @file
3 * HWACCM SVM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_svm.h>
32#include <VBox/pgm.h>
33#include <VBox/pdm.h>
34#include <VBox/err.h>
35#include <VBox/log.h>
36#include <VBox/selm.h>
37#include <VBox/iom.h>
38#include <VBox/dis.h>
39#include <VBox/dbgf.h>
40#include <VBox/disopcode.h>
41#include <iprt/param.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/cpuset.h>
45#include <iprt/mp.h>
46#include "HWSVMR0.h"
47
48static int SVMR0InterpretInvpg(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uASID);
49
50/**
51 * Sets up and activates AMD-V on the current CPU
52 *
53 * @returns VBox status code.
54 * @param pCpu CPU info struct
55 * @param pVM The VM to operate on.
56 * @param pvPageCpu Pointer to the global cpu page
57 * @param pPageCpuPhys Physical address of the global cpu page
58 */
59HWACCMR0DECL(int) SVMR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
60{
61 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
62 AssertReturn(pVM, VERR_INVALID_PARAMETER);
63 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
64
65 /* We must turn on AMD-V and setup the host state physical address, as those MSRs are per-cpu/core. */
66
67#ifdef LOG_ENABLED
68 SUPR0Printf("SVMR0EnableCpu cpu %d page (%x) %x\n", pCpu->idCpu, pvPageCpu, (uint32_t)pPageCpuPhys);
69#endif
70
71 /* Turn on AMD-V in the EFER MSR. */
72 uint64_t val = ASMRdMsr(MSR_K6_EFER);
73 if (!(val & MSR_K6_EFER_SVME))
74 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
75
76 /* Write the physical page address where the CPU will store the host state while executing the VM. */
77 ASMWrMsr(MSR_K8_VM_HSAVE_PA, pPageCpuPhys);
78
79 pCpu->uCurrentASID = 0; /* we'll aways increment this the first time (host uses ASID 0) */
80 pCpu->cTLBFlushes = 0;
81 return VINF_SUCCESS;
82}
83
84/**
85 * Deactivates AMD-V on the current CPU
86 *
87 * @returns VBox status code.
88 * @param pCpu CPU info struct
89 * @param pvPageCpu Pointer to the global cpu page
90 * @param pPageCpuPhys Physical address of the global cpu page
91 */
92HWACCMR0DECL(int) SVMR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
93{
94 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
95 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
96
97#ifdef LOG_ENABLED
98 SUPR0Printf("SVMR0DisableCpu cpu %d\n", pCpu->idCpu);
99#endif
100
101 /* Turn off AMD-V in the EFER MSR. */
102 uint64_t val = ASMRdMsr(MSR_K6_EFER);
103 ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
104
105 /* Invalidate host state physical address. */
106 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
107 pCpu->uCurrentASID = 0;
108
109 return VINF_SUCCESS;
110}
111
112/**
113 * Does Ring-0 per VM AMD-V init.
114 *
115 * @returns VBox status code.
116 * @param pVM The VM to operate on.
117 */
118HWACCMR0DECL(int) SVMR0InitVM(PVM pVM)
119{
120 int rc;
121
122 /* Allocate one page for the VM control block (VMCB). */
123 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
124 if (RT_FAILURE(rc))
125 return rc;
126
127 pVM->hwaccm.s.svm.pVMCB = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjVMCB);
128 pVM->hwaccm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjVMCB, 0);
129 ASMMemZero32(pVM->hwaccm.s.svm.pVMCB, PAGE_SIZE);
130
131 /* Allocate one page for the host context */
132 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
133 if (RT_FAILURE(rc))
134 return rc;
135
136 pVM->hwaccm.s.svm.pVMCBHost = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjVMCBHost);
137 pVM->hwaccm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjVMCBHost, 0);
138 ASMMemZero32(pVM->hwaccm.s.svm.pVMCBHost, PAGE_SIZE);
139
140 /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
141 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, true /* executable R0 mapping */);
142 if (RT_FAILURE(rc))
143 return rc;
144
145 pVM->hwaccm.s.svm.pIOBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjIOBitmap);
146 pVM->hwaccm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjIOBitmap, 0);
147 /* Set all bits to intercept all IO accesses. */
148 ASMMemFill32(pVM->hwaccm.s.svm.pIOBitmap, PAGE_SIZE*3, 0xffffffff);
149
150 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
151 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, true /* executable R0 mapping */);
152 if (RT_FAILURE(rc))
153 return rc;
154
155 pVM->hwaccm.s.svm.pMSRBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjMSRBitmap);
156 pVM->hwaccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjMSRBitmap, 0);
157 /* Set all bits to intercept all MSR accesses. */
158 ASMMemFill32(pVM->hwaccm.s.svm.pMSRBitmap, PAGE_SIZE*2, 0xffffffff);
159
160 /* Erratum 170 which requires a forced TLB flush for each world switch:
161 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
162 *
163 * All BH-G1/2 and DH-G1/2 models include a fix:
164 * Athlon X2: 0x6b 1/2
165 * 0x68 1/2
166 * Athlon 64: 0x7f 1
167 * 0x6f 2
168 * Sempron: 0x7f 1/2
169 * 0x6f 2
170 * 0x6c 2
171 * 0x7c 2
172 * Turion 64: 0x68 2
173 *
174 */
175 uint32_t u32Dummy;
176 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
177 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
178 u32BaseFamily= (u32Version >> 8) & 0xf;
179 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
180 u32Model = ((u32Version >> 4) & 0xf);
181 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
182 u32Stepping = u32Version & 0xf;
183 if ( u32Family == 0xf
184 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
185 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
186 {
187 Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
188 pVM->hwaccm.s.svm.fAlwaysFlushTLB = true;
189 }
190
191 /* Invalidate the last cpu we were running on. */
192 pVM->hwaccm.s.svm.idLastCpu = NIL_RTCPUID;
193 return VINF_SUCCESS;
194}
195
196/**
197 * Does Ring-0 per VM AMD-V termination.
198 *
199 * @returns VBox status code.
200 * @param pVM The VM to operate on.
201 */
202HWACCMR0DECL(int) SVMR0TermVM(PVM pVM)
203{
204 if (pVM->hwaccm.s.svm.pMemObjVMCB)
205 {
206 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjVMCB, false);
207 pVM->hwaccm.s.svm.pVMCB = 0;
208 pVM->hwaccm.s.svm.pVMCBPhys = 0;
209 pVM->hwaccm.s.svm.pMemObjVMCB = 0;
210 }
211 if (pVM->hwaccm.s.svm.pMemObjVMCBHost)
212 {
213 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjVMCBHost, false);
214 pVM->hwaccm.s.svm.pVMCBHost = 0;
215 pVM->hwaccm.s.svm.pVMCBHostPhys = 0;
216 pVM->hwaccm.s.svm.pMemObjVMCBHost = 0;
217 }
218 if (pVM->hwaccm.s.svm.pMemObjIOBitmap)
219 {
220 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjIOBitmap, false);
221 pVM->hwaccm.s.svm.pIOBitmap = 0;
222 pVM->hwaccm.s.svm.pIOBitmapPhys = 0;
223 pVM->hwaccm.s.svm.pMemObjIOBitmap = 0;
224 }
225 if (pVM->hwaccm.s.svm.pMemObjMSRBitmap)
226 {
227 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjMSRBitmap, false);
228 pVM->hwaccm.s.svm.pMSRBitmap = 0;
229 pVM->hwaccm.s.svm.pMSRBitmapPhys = 0;
230 pVM->hwaccm.s.svm.pMemObjMSRBitmap = 0;
231 }
232 return VINF_SUCCESS;
233}
234
235/**
236 * Sets up AMD-V for the specified VM
237 *
238 * @returns VBox status code.
239 * @param pVM The VM to operate on.
240 */
241HWACCMR0DECL(int) SVMR0SetupVM(PVM pVM)
242{
243 int rc = VINF_SUCCESS;
244 SVM_VMCB *pVMCB;
245
246 AssertReturn(pVM, VERR_INVALID_PARAMETER);
247
248 Assert(pVM->hwaccm.s.svm.fSupported);
249
250 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
251 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
252
253 /* Program the control fields. Most of them never have to be changed again. */
254 /* CR0/3/4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
255 /* Note: CR8 reads will refer to V_TPR, so no need to catch them. */
256 /** @note CR0 & CR4 can be safely read when guest and shadow copies are identical. */
257 if (!pVM->hwaccm.s.fNestedPaging)
258 pVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(3) | RT_BIT(4);
259 else
260 pVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
261
262 /*
263 * CR0/3/4 writes must be intercepted for obvious reasons.
264 */
265 if (!pVM->hwaccm.s.fNestedPaging)
266 pVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(3) | RT_BIT(4) | RT_BIT(8);
267 else
268 pVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4) | RT_BIT(8);
269
270 /* Intercept all DRx reads and writes. */
271 pVMCB->ctrl.u16InterceptRdDRx = RT_BIT(0) | RT_BIT(1) | RT_BIT(2) | RT_BIT(3) | RT_BIT(4) | RT_BIT(5) | RT_BIT(6) | RT_BIT(7);
272 pVMCB->ctrl.u16InterceptWrDRx = RT_BIT(0) | RT_BIT(1) | RT_BIT(2) | RT_BIT(3) | RT_BIT(4) | RT_BIT(5) | RT_BIT(6) | RT_BIT(7);
273
274 /* Currently we don't care about DRx reads or writes. DRx registers are trashed.
275 * All breakpoints are automatically cleared when the VM exits.
276 */
277
278 pVMCB->ctrl.u32InterceptException = HWACCM_SVM_TRAP_MASK;
279#ifndef DEBUG
280 if (pVM->hwaccm.s.fNestedPaging)
281 pVMCB->ctrl.u32InterceptException &= ~RT_BIT(14); /* no longer need to intercept #PF. */
282#endif
283
284 pVMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR
285 | SVM_CTRL1_INTERCEPT_VINTR
286 | SVM_CTRL1_INTERCEPT_NMI
287 | SVM_CTRL1_INTERCEPT_SMI
288 | SVM_CTRL1_INTERCEPT_INIT
289 | SVM_CTRL1_INTERCEPT_RDPMC
290 | SVM_CTRL1_INTERCEPT_CPUID
291 | SVM_CTRL1_INTERCEPT_RSM
292 | SVM_CTRL1_INTERCEPT_HLT
293 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP
294 | SVM_CTRL1_INTERCEPT_MSR_SHADOW
295 | SVM_CTRL1_INTERCEPT_INVLPG
296 | SVM_CTRL1_INTERCEPT_INVLPGA /* AMD only */
297 | SVM_CTRL1_INTERCEPT_TASK_SWITCH
298 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* fatal */
299 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Legacy FPU FERR handling. */
300 ;
301 /* With nested paging we don't care about invlpg anymore. */
302 if (pVM->hwaccm.s.fNestedPaging)
303 pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_INVLPG;
304
305 pVMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */
306 | SVM_CTRL2_INTERCEPT_VMMCALL
307 | SVM_CTRL2_INTERCEPT_VMLOAD
308 | SVM_CTRL2_INTERCEPT_VMSAVE
309 | SVM_CTRL2_INTERCEPT_STGI
310 | SVM_CTRL2_INTERCEPT_CLGI
311 | SVM_CTRL2_INTERCEPT_SKINIT
312 | SVM_CTRL2_INTERCEPT_RDTSCP /* AMD only; we don't support this one */
313 | SVM_CTRL2_INTERCEPT_WBINVD
314 | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
315 ;
316 Log(("pVMCB->ctrl.u32InterceptException = %x\n", pVMCB->ctrl.u32InterceptException));
317 Log(("pVMCB->ctrl.u32InterceptCtrl1 = %x\n", pVMCB->ctrl.u32InterceptCtrl1));
318 Log(("pVMCB->ctrl.u32InterceptCtrl2 = %x\n", pVMCB->ctrl.u32InterceptCtrl2));
319
320 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
321 pVMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1;
322
323 /* Set IO and MSR bitmap addresses. */
324 pVMCB->ctrl.u64IOPMPhysAddr = pVM->hwaccm.s.svm.pIOBitmapPhys;
325 pVMCB->ctrl.u64MSRPMPhysAddr = pVM->hwaccm.s.svm.pMSRBitmapPhys;
326
327 /* No LBR virtualization. */
328 pVMCB->ctrl.u64LBRVirt = 0;
329
330 /** The ASID must start at 1; the host uses 0. */
331 pVMCB->ctrl.TLBCtrl.n.u32ASID = 1;
332
333 return rc;
334}
335
336
337/**
338 * Injects an event (trap or external interrupt)
339 *
340 * @param pVM The VM to operate on.
341 * @param pVMCB SVM control block
342 * @param pCtx CPU Context
343 * @param pIntInfo SVM interrupt info
344 */
345inline void SVMR0InjectEvent(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx, SVM_EVENT* pEvent)
346{
347#ifdef VBOX_STRICT
348 if (pEvent->n.u8Vector == 0xE)
349 Log(("SVM: Inject int %d at %VGv error code=%08x CR2=%08x intInfo=%08x\n", pEvent->n.u8Vector, pCtx->eip, pEvent->n.u32ErrorCode, pCtx->cr2, pEvent->au64[0]));
350 else
351 if (pEvent->n.u8Vector < 0x20)
352 Log(("SVM: Inject int %d at %VGv error code=%08x\n", pEvent->n.u8Vector, pCtx->eip, pEvent->n.u32ErrorCode));
353 else
354 {
355 Log(("INJ-EI: %x at %VGv\n", pEvent->n.u8Vector, pCtx->eip));
356 Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
357 Assert(pCtx->eflags.u32 & X86_EFL_IF);
358 }
359#endif
360
361 /* Set event injection state. */
362 pVMCB->ctrl.EventInject.au64[0] = pEvent->au64[0];
363}
364
365
366/**
367 * Checks for pending guest interrupts and injects them
368 *
369 * @returns VBox status code.
370 * @param pVM The VM to operate on.
371 * @param pVMCB SVM control block
372 * @param pCtx CPU Context
373 */
374static int SVMR0CheckPendingInterrupt(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx)
375{
376 int rc;
377
378 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
379 if (pVM->hwaccm.s.Event.fPending)
380 {
381 SVM_EVENT Event;
382
383 Log(("Reinjecting event %08x %08x at %VGv\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, pCtx->eip));
384 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject);
385 Event.au64[0] = pVM->hwaccm.s.Event.intInfo;
386 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
387
388 pVM->hwaccm.s.Event.fPending = false;
389 return VINF_SUCCESS;
390 }
391
392 /* When external interrupts are pending, we should exit the VM when IF is set. */
393 if ( !TRPMHasTrap(pVM)
394 && VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
395 {
396 if (!(pCtx->eflags.u32 & X86_EFL_IF))
397 {
398 if (!pVMCB->ctrl.IntCtrl.n.u1VIrqValid)
399 {
400 Log(("Enable irq window exit!\n"));
401 /** @todo use virtual interrupt method to inject a pending irq; dispatched as soon as guest.IF is set. */
402 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
403 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 1;
404 pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 1; /* ignore the priority in the TPR; just deliver it */
405 pVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */
406 }
407 }
408 else
409 if (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
410 {
411 uint8_t u8Interrupt;
412
413 rc = PDMGetInterrupt(pVM, &u8Interrupt);
414 Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Vrc\n", u8Interrupt, u8Interrupt, rc));
415 if (VBOX_SUCCESS(rc))
416 {
417 rc = TRPMAssertTrap(pVM, u8Interrupt, TRPM_HARDWARE_INT);
418 AssertRC(rc);
419 }
420 else
421 {
422 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
423 Assert(!VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)));
424 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchGuestIrq);
425 /* Just continue */
426 }
427 }
428 else
429 Log(("Pending interrupt blocked at %VGv by VM_FF_INHIBIT_INTERRUPTS!!\n", pCtx->eip));
430 }
431
432#ifdef VBOX_STRICT
433 if (TRPMHasTrap(pVM))
434 {
435 uint8_t u8Vector;
436 rc = TRPMQueryTrapAll(pVM, &u8Vector, 0, 0, 0);
437 AssertRC(rc);
438 }
439#endif
440
441 if ( pCtx->eflags.u32 & X86_EFL_IF
442 && (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
443 && TRPMHasTrap(pVM)
444 )
445 {
446 uint8_t u8Vector;
447 int rc;
448 TRPMEVENT enmType;
449 SVM_EVENT Event;
450 uint32_t u32ErrorCode;
451
452 Event.au64[0] = 0;
453
454 /* If a new event is pending, then dispatch it now. */
455 rc = TRPMQueryTrapAll(pVM, &u8Vector, &enmType, &u32ErrorCode, 0);
456 AssertRC(rc);
457 Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
458 Assert(enmType != TRPM_SOFTWARE_INT);
459
460 /* Clear the pending trap. */
461 rc = TRPMResetTrap(pVM);
462 AssertRC(rc);
463
464 Event.n.u8Vector = u8Vector;
465 Event.n.u1Valid = 1;
466 Event.n.u32ErrorCode = u32ErrorCode;
467
468 if (enmType == TRPM_TRAP)
469 {
470 switch (u8Vector) {
471 case 8:
472 case 10:
473 case 11:
474 case 12:
475 case 13:
476 case 14:
477 case 17:
478 /* Valid error codes. */
479 Event.n.u1ErrorCodeValid = 1;
480 break;
481 default:
482 break;
483 }
484 if (u8Vector == X86_XCPT_NMI)
485 Event.n.u3Type = SVM_EVENT_NMI;
486 else
487 Event.n.u3Type = SVM_EVENT_EXCEPTION;
488 }
489 else
490 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
491
492 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntInject);
493 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
494 } /* if (interrupts can be dispatched) */
495
496 return VINF_SUCCESS;
497}
498
499
500/**
501 * Loads the guest state
502 *
503 * @returns VBox status code.
504 * @param pVM The VM to operate on.
505 * @param pCtx Guest context
506 */
507HWACCMR0DECL(int) SVMR0LoadGuestState(PVM pVM, CPUMCTX *pCtx)
508{
509 RTGCUINTPTR val;
510 SVM_VMCB *pVMCB;
511
512 if (pVM == NULL)
513 return VERR_INVALID_PARAMETER;
514
515 /* Setup AMD SVM. */
516 Assert(pVM->hwaccm.s.svm.fSupported);
517
518 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
519 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
520
521 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
522 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
523 {
524 SVM_WRITE_SELREG(CS, cs);
525 SVM_WRITE_SELREG(SS, ss);
526 SVM_WRITE_SELREG(DS, ds);
527 SVM_WRITE_SELREG(ES, es);
528 SVM_WRITE_SELREG(FS, fs);
529 SVM_WRITE_SELREG(GS, gs);
530 }
531
532 /* Guest CPU context: LDTR. */
533 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
534 {
535 SVM_WRITE_SELREG(LDTR, ldtr);
536 }
537
538 /* Guest CPU context: TR. */
539 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
540 {
541 SVM_WRITE_SELREG(TR, tr);
542 }
543
544 /* Guest CPU context: GDTR. */
545 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
546 {
547 pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
548 pVMCB->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
549 }
550
551 /* Guest CPU context: IDTR. */
552 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
553 {
554 pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
555 pVMCB->guest.IDTR.u64Base = pCtx->idtr.pIdt;
556 }
557
558 /*
559 * Sysenter MSRs
560 */
561 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SYSENTER_MSR)
562 {
563 pVMCB->guest.u64SysEnterCS = pCtx->SysEnter.cs;
564 pVMCB->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
565 pVMCB->guest.u64SysEnterESP = pCtx->SysEnter.esp;
566 }
567
568 /* Control registers */
569 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
570 {
571 val = pCtx->cr0;
572 if (CPUMIsGuestFPUStateActive(pVM) == false)
573 {
574 /* Always use #NM exceptions to load the FPU/XMM state on demand. */
575 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
576 }
577 else
578 {
579 Assert(pVM->hwaccm.s.svm.fResumeVM == true);
580 /** @todo check if we support the old style mess correctly. */
581 if (!(val & X86_CR0_NE))
582 {
583 Log(("Forcing X86_CR0_NE!!!\n"));
584
585 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
586 if (!pVM->hwaccm.s.fFPUOldStyleOverride)
587 {
588 pVMCB->ctrl.u32InterceptException |= RT_BIT(16);
589 pVM->hwaccm.s.fFPUOldStyleOverride = true;
590 }
591 }
592 val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
593 }
594 /* Always enable caching. */
595 val &= ~(X86_CR0_CD|X86_CR0_NW);
596
597 /* Note: WP is not relevant in nested paging mode as we catch accesses on the (host) physical level. */
598 /* Note: In nested paging mode the guest is allowed to run with paging disabled; the guest physical to host physical translation will remain active. */
599 if (!pVM->hwaccm.s.fNestedPaging)
600 {
601 val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
602 val |= X86_CR0_WP; /* Must set this as we rely on protect various pages and supervisor writes must be caught. */
603 }
604 pVMCB->guest.u64CR0 = val;
605 }
606 /* CR2 as well */
607 pVMCB->guest.u64CR2 = pCtx->cr2;
608
609 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
610 {
611 /* Save our shadow CR3 register. */
612 if (pVM->hwaccm.s.fNestedPaging)
613 {
614 pVMCB->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
615 pVMCB->guest.u64CR3 = pCtx->cr3;
616 }
617 else
618 pVMCB->guest.u64CR3 = PGMGetHyperCR3(pVM);
619 }
620
621 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
622 {
623 val = pCtx->cr4;
624 if (!pVM->hwaccm.s.fNestedPaging)
625 {
626 switch(pVM->hwaccm.s.enmShadowMode)
627 {
628 case PGMMODE_REAL:
629 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
630 AssertFailed();
631 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
632
633 case PGMMODE_32_BIT: /* 32-bit paging. */
634 break;
635
636 case PGMMODE_PAE: /* PAE paging. */
637 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
638 /** @todo use normal 32 bits paging */
639 val |= X86_CR4_PAE;
640 break;
641
642 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
643 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
644 AssertFailed();
645 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
646
647 default: /* shut up gcc */
648 AssertFailed();
649 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
650 }
651 }
652 pVMCB->guest.u64CR4 = val;
653 }
654
655 /* Debug registers. */
656 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
657 {
658 /** @todo DR0-6 */
659 val = pCtx->dr7;
660 val &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
661 val |= 0x400; /* must be one */
662#ifdef VBOX_STRICT
663 val = 0x400;
664#endif
665 pVMCB->guest.u64DR7 = val;
666
667 pVMCB->guest.u64DR6 = pCtx->dr6;
668 }
669
670 /* EIP, ESP and EFLAGS */
671 pVMCB->guest.u64RIP = pCtx->eip;
672 pVMCB->guest.u64RSP = pCtx->esp;
673 pVMCB->guest.u64RFlags = pCtx->eflags.u32;
674
675 /* Set CPL */
676 pVMCB->guest.u8CPL = pCtx->ssHid.Attr.n.u2Dpl;
677
678 /* RAX/EAX too, as VMRUN uses RAX as an implicit parameter. */
679 pVMCB->guest.u64RAX = pCtx->eax;
680
681 /* vmrun will fail otherwise. */
682 pVMCB->guest.u64EFER = MSR_K6_EFER_SVME;
683
684 /** TSC offset. */
685 if (TMCpuTickCanUseRealTSC(pVM, &pVMCB->ctrl.u64TSCOffset))
686 {
687 pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
688 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTSCOffset);
689 }
690 else
691 {
692 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
693 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTSCIntercept);
694 }
695
696 /** @todo 64 bits stuff (?):
697 * - STAR
698 * - LSTAR
699 * - CSTAR
700 * - SFMASK
701 * - KernelGSBase
702 */
703
704#ifdef DEBUG
705 /* Intercept X86_XCPT_DB if stepping is enabled */
706 if (DBGFIsStepping(pVM))
707 pVMCB->ctrl.u32InterceptException |= RT_BIT(1);
708 else
709 pVMCB->ctrl.u32InterceptException &= ~RT_BIT(1);
710#endif
711
712 /* Done. */
713 pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
714
715 return VINF_SUCCESS;
716}
717
718
719/**
720 * Runs guest code in an SVM VM.
721 *
722 * @todo This can be much more efficient, when we only sync that which has actually changed. (this is the first attempt only)
723 *
724 * @returns VBox status code.
725 * @param pVM The VM to operate on.
726 * @param pCtx Guest context
727 * @param pCpu CPU info struct
728 */
729HWACCMR0DECL(int) SVMR0RunGuestCode(PVM pVM, CPUMCTX *pCtx, PHWACCM_CPUINFO pCpu)
730{
731 int rc = VINF_SUCCESS;
732 uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID;
733 SVM_VMCB *pVMCB;
734 bool fGuestStateSynced = false;
735 unsigned cResume = 0;
736
737 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatEntry, x);
738
739 AssertReturn(pCpu->fSVMConfigured, VERR_EM_INTERNAL_ERROR);
740
741 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
742 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
743
744 /* We can jump to this point to resume execution after determining that a VM-exit is innocent.
745 */
746ResumeExecution:
747 /* Safety precaution; looping for too long here can have a very bad effect on the host */
748 if (++cResume > HWACCM_MAX_RESUME_LOOPS)
749 {
750 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitMaxResume);
751 rc = VINF_EM_RAW_INTERRUPT;
752 goto end;
753 }
754
755 /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
756 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
757 {
758 Log(("VM_FF_INHIBIT_INTERRUPTS at %VGv successor %VGv\n", pCtx->eip, EMGetInhibitInterruptsPC(pVM)));
759 if (pCtx->eip != EMGetInhibitInterruptsPC(pVM))
760 {
761 /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
762 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
763 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
764 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
765 */
766 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
767 /* Irq inhibition is no longer active; clear the corresponding SVM state. */
768 pVMCB->ctrl.u64IntShadow = 0;
769 }
770 }
771 else
772 {
773 /* Irq inhibition is no longer active; clear the corresponding SVM state. */
774 pVMCB->ctrl.u64IntShadow = 0;
775 }
776
777 /* Check for pending actions that force us to go back to ring 3. */
778#ifdef DEBUG
779 /* Intercept X86_XCPT_DB if stepping is enabled */
780 if (!DBGFIsStepping(pVM))
781#endif
782 {
783 if (VM_FF_ISPENDING(pVM, VM_FF_TO_R3 | VM_FF_TIMER))
784 {
785 VM_FF_CLEAR(pVM, VM_FF_TO_R3);
786 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchToR3);
787 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
788 rc = VINF_EM_RAW_TO_R3;
789 goto end;
790 }
791 }
792
793 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
794 if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST))
795 {
796 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
797 rc = VINF_EM_PENDING_REQUEST;
798 goto end;
799 }
800
801 /* When external interrupts are pending, we should exit the VM when IF is set. */
802 /** @note *after* VM_FF_INHIBIT_INTERRUPTS check!!! */
803 rc = SVMR0CheckPendingInterrupt(pVM, pVMCB, pCtx);
804 if (VBOX_FAILURE(rc))
805 {
806 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
807 goto end;
808 }
809
810 /* Load the guest state */
811 rc = SVMR0LoadGuestState(pVM, pCtx);
812 if (rc != VINF_SUCCESS)
813 {
814 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
815 goto end;
816 }
817 fGuestStateSynced = true;
818
819 /* All done! Let's start VM execution. */
820 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x);
821
822 /* Enable nested paging if necessary (disabled each time after #VMEXIT). */
823 pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hwaccm.s.fNestedPaging;
824
825 /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
826 if (!pVM->hwaccm.s.svm.fResumeVM)
827 {
828 if ( pVM->hwaccm.s.svm.idLastCpu != pCpu->idCpu
829 /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
830 || pVM->hwaccm.s.svm.cTLBFlushes != pCpu->cTLBFlushes)
831 {
832 /* Force a TLB flush on VM entry. */
833 pVM->hwaccm.s.svm.fForceTLBFlush = true;
834 }
835 pVM->hwaccm.s.svm.idLastCpu = pCpu->idCpu;
836 }
837
838 /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */
839 if ( pVM->hwaccm.s.svm.fForceTLBFlush
840 && !pVM->hwaccm.s.svm.fAlwaysFlushTLB)
841 {
842 if (++pCpu->uCurrentASID >= pVM->hwaccm.s.svm.u32MaxASID)
843 {
844 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */
845 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = 1; /* wrap around; flush TLB */
846 pCpu->cTLBFlushes++;
847 }
848 else
849 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID);
850
851 pVM->hwaccm.s.svm.cTLBFlushes = pCpu->cTLBFlushes;
852 }
853 else
854 {
855 /* We never increase uCurrentASID in the fAlwaysFlushTLB (erratum 170) case. */
856 if (!pCpu->uCurrentASID)
857 pCpu->uCurrentASID = 1;
858
859 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVM->hwaccm.s.svm.fForceTLBFlush;
860 }
861
862 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.svm.u32MaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
863 pVMCB->ctrl.TLBCtrl.n.u32ASID = pCpu->uCurrentASID;
864
865#ifdef VBOX_WITH_STATISTICS
866 if (pVMCB->ctrl.TLBCtrl.n.u1TLBFlush)
867 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBWorldSwitch);
868 else
869 STAM_COUNTER_INC(&pVM->hwaccm.s.StatNoFlushTLBWorldSwitch);
870#endif
871
872 /* In case we execute a goto ResumeExecution later on. */
873 pVM->hwaccm.s.svm.fResumeVM = true;
874 pVM->hwaccm.s.svm.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;
875
876 Assert(sizeof(pVM->hwaccm.s.svm.pVMCBPhys) == 8);
877 Assert(pVMCB->ctrl.u32InterceptCtrl2 == ( SVM_CTRL2_INTERCEPT_VMRUN /* required */
878 | SVM_CTRL2_INTERCEPT_VMMCALL
879 | SVM_CTRL2_INTERCEPT_VMLOAD
880 | SVM_CTRL2_INTERCEPT_VMSAVE
881 | SVM_CTRL2_INTERCEPT_STGI
882 | SVM_CTRL2_INTERCEPT_CLGI
883 | SVM_CTRL2_INTERCEPT_SKINIT
884 | SVM_CTRL2_INTERCEPT_RDTSCP /* AMD only; we don't support this one */
885 | SVM_CTRL2_INTERCEPT_WBINVD
886 | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
887 ));
888 Assert(pVMCB->ctrl.IntCtrl.n.u1VIrqMasking);
889 Assert(pVMCB->ctrl.u64IOPMPhysAddr == pVM->hwaccm.s.svm.pIOBitmapPhys);
890 Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVM->hwaccm.s.svm.pMSRBitmapPhys);
891 Assert(pVMCB->ctrl.u64LBRVirt == 0);
892
893 SVMVMRun(pVM->hwaccm.s.svm.pVMCBHostPhys, pVM->hwaccm.s.svm.pVMCBPhys, pCtx);
894 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatInGC, x);
895
896 /**
897 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
898 * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING 3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
899 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
900 */
901
902 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatExit, x);
903
904 /* Reason for the VM exit */
905 exitCode = pVMCB->ctrl.u64ExitCode;
906
907 if (exitCode == (uint64_t)SVM_EXIT_INVALID) /* Invalid guest state. */
908 {
909 HWACCMDumpRegs(pCtx);
910#ifdef DEBUG
911 Log(("ctrl.u16InterceptRdCRx %x\n", pVMCB->ctrl.u16InterceptRdCRx));
912 Log(("ctrl.u16InterceptWrCRx %x\n", pVMCB->ctrl.u16InterceptWrCRx));
913 Log(("ctrl.u16InterceptRdDRx %x\n", pVMCB->ctrl.u16InterceptRdDRx));
914 Log(("ctrl.u16InterceptWrDRx %x\n", pVMCB->ctrl.u16InterceptWrDRx));
915 Log(("ctrl.u32InterceptException %x\n", pVMCB->ctrl.u32InterceptException));
916 Log(("ctrl.u32InterceptCtrl1 %x\n", pVMCB->ctrl.u32InterceptCtrl1));
917 Log(("ctrl.u32InterceptCtrl2 %x\n", pVMCB->ctrl.u32InterceptCtrl2));
918 Log(("ctrl.u64IOPMPhysAddr %VX64\n", pVMCB->ctrl.u64IOPMPhysAddr));
919 Log(("ctrl.u64MSRPMPhysAddr %VX64\n", pVMCB->ctrl.u64MSRPMPhysAddr));
920 Log(("ctrl.u64TSCOffset %VX64\n", pVMCB->ctrl.u64TSCOffset));
921
922 Log(("ctrl.TLBCtrl.u32ASID %x\n", pVMCB->ctrl.TLBCtrl.n.u32ASID));
923 Log(("ctrl.TLBCtrl.u1TLBFlush %x\n", pVMCB->ctrl.TLBCtrl.n.u1TLBFlush));
924 Log(("ctrl.TLBCtrl.u7Reserved %x\n", pVMCB->ctrl.TLBCtrl.n.u7Reserved));
925 Log(("ctrl.TLBCtrl.u24Reserved %x\n", pVMCB->ctrl.TLBCtrl.n.u24Reserved));
926
927 Log(("ctrl.IntCtrl.u8VTPR %x\n", pVMCB->ctrl.IntCtrl.n.u8VTPR));
928 Log(("ctrl.IntCtrl.u1VIrqValid %x\n", pVMCB->ctrl.IntCtrl.n.u1VIrqValid));
929 Log(("ctrl.IntCtrl.u7Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u7Reserved));
930 Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", pVMCB->ctrl.IntCtrl.n.u4VIrqPriority));
931 Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR));
932 Log(("ctrl.IntCtrl.u3Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u3Reserved));
933 Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", pVMCB->ctrl.IntCtrl.n.u1VIrqMasking));
934 Log(("ctrl.IntCtrl.u7Reserved2 %x\n", pVMCB->ctrl.IntCtrl.n.u7Reserved2));
935 Log(("ctrl.IntCtrl.u8VIrqVector %x\n", pVMCB->ctrl.IntCtrl.n.u8VIrqVector));
936 Log(("ctrl.IntCtrl.u24Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u24Reserved));
937
938 Log(("ctrl.u64IntShadow %VX64\n", pVMCB->ctrl.u64IntShadow));
939 Log(("ctrl.u64ExitCode %VX64\n", pVMCB->ctrl.u64ExitCode));
940 Log(("ctrl.u64ExitInfo1 %VX64\n", pVMCB->ctrl.u64ExitInfo1));
941 Log(("ctrl.u64ExitInfo2 %VX64\n", pVMCB->ctrl.u64ExitInfo2));
942 Log(("ctrl.ExitIntInfo.u8Vector %x\n", pVMCB->ctrl.ExitIntInfo.n.u8Vector));
943 Log(("ctrl.ExitIntInfo.u3Type %x\n", pVMCB->ctrl.ExitIntInfo.n.u3Type));
944 Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
945 Log(("ctrl.ExitIntInfo.u19Reserved %x\n", pVMCB->ctrl.ExitIntInfo.n.u19Reserved));
946 Log(("ctrl.ExitIntInfo.u1Valid %x\n", pVMCB->ctrl.ExitIntInfo.n.u1Valid));
947 Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode));
948 Log(("ctrl.NestedPaging %VX64\n", pVMCB->ctrl.NestedPaging.au64));
949 Log(("ctrl.EventInject.u8Vector %x\n", pVMCB->ctrl.EventInject.n.u8Vector));
950 Log(("ctrl.EventInject.u3Type %x\n", pVMCB->ctrl.EventInject.n.u3Type));
951 Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", pVMCB->ctrl.EventInject.n.u1ErrorCodeValid));
952 Log(("ctrl.EventInject.u19Reserved %x\n", pVMCB->ctrl.EventInject.n.u19Reserved));
953 Log(("ctrl.EventInject.u1Valid %x\n", pVMCB->ctrl.EventInject.n.u1Valid));
954 Log(("ctrl.EventInject.u32ErrorCode %x\n", pVMCB->ctrl.EventInject.n.u32ErrorCode));
955
956 Log(("ctrl.u64NestedPagingCR3 %VX64\n", pVMCB->ctrl.u64NestedPagingCR3));
957 Log(("ctrl.u64LBRVirt %VX64\n", pVMCB->ctrl.u64LBRVirt));
958
959 Log(("guest.CS.u16Sel %04X\n", pVMCB->guest.CS.u16Sel));
960 Log(("guest.CS.u16Attr %04X\n", pVMCB->guest.CS.u16Attr));
961 Log(("guest.CS.u32Limit %X\n", pVMCB->guest.CS.u32Limit));
962 Log(("guest.CS.u64Base %VX64\n", pVMCB->guest.CS.u64Base));
963 Log(("guest.DS.u16Sel %04X\n", pVMCB->guest.DS.u16Sel));
964 Log(("guest.DS.u16Attr %04X\n", pVMCB->guest.DS.u16Attr));
965 Log(("guest.DS.u32Limit %X\n", pVMCB->guest.DS.u32Limit));
966 Log(("guest.DS.u64Base %VX64\n", pVMCB->guest.DS.u64Base));
967 Log(("guest.ES.u16Sel %04X\n", pVMCB->guest.ES.u16Sel));
968 Log(("guest.ES.u16Attr %04X\n", pVMCB->guest.ES.u16Attr));
969 Log(("guest.ES.u32Limit %X\n", pVMCB->guest.ES.u32Limit));
970 Log(("guest.ES.u64Base %VX64\n", pVMCB->guest.ES.u64Base));
971 Log(("guest.FS.u16Sel %04X\n", pVMCB->guest.FS.u16Sel));
972 Log(("guest.FS.u16Attr %04X\n", pVMCB->guest.FS.u16Attr));
973 Log(("guest.FS.u32Limit %X\n", pVMCB->guest.FS.u32Limit));
974 Log(("guest.FS.u64Base %VX64\n", pVMCB->guest.FS.u64Base));
975 Log(("guest.GS.u16Sel %04X\n", pVMCB->guest.GS.u16Sel));
976 Log(("guest.GS.u16Attr %04X\n", pVMCB->guest.GS.u16Attr));
977 Log(("guest.GS.u32Limit %X\n", pVMCB->guest.GS.u32Limit));
978 Log(("guest.GS.u64Base %VX64\n", pVMCB->guest.GS.u64Base));
979
980 Log(("guest.GDTR.u32Limit %X\n", pVMCB->guest.GDTR.u32Limit));
981 Log(("guest.GDTR.u64Base %VX64\n", pVMCB->guest.GDTR.u64Base));
982
983 Log(("guest.LDTR.u16Sel %04X\n", pVMCB->guest.LDTR.u16Sel));
984 Log(("guest.LDTR.u16Attr %04X\n", pVMCB->guest.LDTR.u16Attr));
985 Log(("guest.LDTR.u32Limit %X\n", pVMCB->guest.LDTR.u32Limit));
986 Log(("guest.LDTR.u64Base %VX64\n", pVMCB->guest.LDTR.u64Base));
987
988 Log(("guest.IDTR.u32Limit %X\n", pVMCB->guest.IDTR.u32Limit));
989 Log(("guest.IDTR.u64Base %VX64\n", pVMCB->guest.IDTR.u64Base));
990
991 Log(("guest.TR.u16Sel %04X\n", pVMCB->guest.TR.u16Sel));
992 Log(("guest.TR.u16Attr %04X\n", pVMCB->guest.TR.u16Attr));
993 Log(("guest.TR.u32Limit %X\n", pVMCB->guest.TR.u32Limit));
994 Log(("guest.TR.u64Base %VX64\n", pVMCB->guest.TR.u64Base));
995
996 Log(("guest.u8CPL %X\n", pVMCB->guest.u8CPL));
997 Log(("guest.u64CR0 %VX64\n", pVMCB->guest.u64CR0));
998 Log(("guest.u64CR2 %VX64\n", pVMCB->guest.u64CR2));
999 Log(("guest.u64CR3 %VX64\n", pVMCB->guest.u64CR3));
1000 Log(("guest.u64CR4 %VX64\n", pVMCB->guest.u64CR4));
1001 Log(("guest.u64DR6 %VX64\n", pVMCB->guest.u64DR6));
1002 Log(("guest.u64DR7 %VX64\n", pVMCB->guest.u64DR7));
1003
1004 Log(("guest.u64RIP %VX64\n", pVMCB->guest.u64RIP));
1005 Log(("guest.u64RSP %VX64\n", pVMCB->guest.u64RSP));
1006 Log(("guest.u64RAX %VX64\n", pVMCB->guest.u64RAX));
1007 Log(("guest.u64RFlags %VX64\n", pVMCB->guest.u64RFlags));
1008
1009 Log(("guest.u64SysEnterCS %VX64\n", pVMCB->guest.u64SysEnterCS));
1010 Log(("guest.u64SysEnterEIP %VX64\n", pVMCB->guest.u64SysEnterEIP));
1011 Log(("guest.u64SysEnterESP %VX64\n", pVMCB->guest.u64SysEnterESP));
1012
1013 Log(("guest.u64EFER %VX64\n", pVMCB->guest.u64EFER));
1014 Log(("guest.u64STAR %VX64\n", pVMCB->guest.u64STAR));
1015 Log(("guest.u64LSTAR %VX64\n", pVMCB->guest.u64LSTAR));
1016 Log(("guest.u64CSTAR %VX64\n", pVMCB->guest.u64CSTAR));
1017 Log(("guest.u64SFMASK %VX64\n", pVMCB->guest.u64SFMASK));
1018 Log(("guest.u64KernelGSBase %VX64\n", pVMCB->guest.u64KernelGSBase));
1019 Log(("guest.u64GPAT %VX64\n", pVMCB->guest.u64GPAT));
1020 Log(("guest.u64DBGCTL %VX64\n", pVMCB->guest.u64DBGCTL));
1021 Log(("guest.u64BR_FROM %VX64\n", pVMCB->guest.u64BR_FROM));
1022 Log(("guest.u64BR_TO %VX64\n", pVMCB->guest.u64BR_TO));
1023 Log(("guest.u64LASTEXCPFROM %VX64\n", pVMCB->guest.u64LASTEXCPFROM));
1024 Log(("guest.u64LASTEXCPTO %VX64\n", pVMCB->guest.u64LASTEXCPTO));
1025
1026#endif
1027 rc = VERR_SVM_UNABLE_TO_START_VM;
1028 goto end;
1029 }
1030
1031 /* Let's first sync back eip, esp, and eflags. */
1032 pCtx->eip = pVMCB->guest.u64RIP;
1033 pCtx->esp = pVMCB->guest.u64RSP;
1034 pCtx->eflags.u32 = pVMCB->guest.u64RFlags;
1035 /* eax is saved/restore across the vmrun instruction */
1036 pCtx->eax = pVMCB->guest.u64RAX;
1037
1038 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
1039 SVM_READ_SELREG(SS, ss);
1040 SVM_READ_SELREG(CS, cs);
1041 SVM_READ_SELREG(DS, ds);
1042 SVM_READ_SELREG(ES, es);
1043 SVM_READ_SELREG(FS, fs);
1044 SVM_READ_SELREG(GS, gs);
1045
1046 /* Note: no reason to sync back the CRx and DRx registers. They can't be changed by the guest. */
1047 /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */
1048 if ( pVM->hwaccm.s.fNestedPaging
1049 && pCtx->cr3 != pVMCB->guest.u64CR3)
1050 {
1051 CPUMSetGuestCR3(pVM, pVMCB->guest.u64CR3);
1052 PGMUpdateCR3(pVM, pVMCB->guest.u64CR3);
1053 }
1054
1055 /** @note NOW IT'S SAFE FOR LOGGING! */
1056
1057 /* Take care of instruction fusing (sti, mov ss) */
1058 if (pVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
1059 {
1060 Log(("uInterruptState %x eip=%VGv\n", pVMCB->ctrl.u64IntShadow, pCtx->eip));
1061 EMSetInhibitInterruptsPC(pVM, pCtx->eip);
1062 }
1063 else
1064 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
1065
1066 Log2(("exitCode = %x\n", exitCode));
1067
1068 /* Sync back the debug registers. */
1069 /** @todo Implement debug registers correctly. */
1070 pCtx->dr6 = pVMCB->guest.u64DR6;
1071 pCtx->dr7 = pVMCB->guest.u64DR7;
1072
1073 /* Check if an injected event was interrupted prematurely. */
1074 pVM->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
1075 if ( pVMCB->ctrl.ExitIntInfo.n.u1Valid
1076 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT /* we don't care about 'int xx' as the instruction will be restarted. */)
1077 {
1078 Log(("Pending inject %VX64 at %08x exit=%08x\n", pVM->hwaccm.s.Event.intInfo, pCtx->eip, exitCode));
1079 pVM->hwaccm.s.Event.fPending = true;
1080 /* Error code present? (redundant) */
1081 if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)
1082 {
1083 pVM->hwaccm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
1084 }
1085 else
1086 pVM->hwaccm.s.Event.errCode = 0;
1087 }
1088#ifdef VBOX_WITH_STATISTICS
1089 if (exitCode == SVM_EXIT_NPF)
1090 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitReasonNPF);
1091 else
1092 STAM_COUNTER_INC(&pVM->hwaccm.s.pStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);
1093#endif
1094
1095 /* Deal with the reason of the VM-exit. */
1096 switch (exitCode)
1097 {
1098 case SVM_EXIT_EXCEPTION_0: case SVM_EXIT_EXCEPTION_1: case SVM_EXIT_EXCEPTION_2: case SVM_EXIT_EXCEPTION_3:
1099 case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: case SVM_EXIT_EXCEPTION_6: case SVM_EXIT_EXCEPTION_7:
1100 case SVM_EXIT_EXCEPTION_8: case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_A: case SVM_EXIT_EXCEPTION_B:
1101 case SVM_EXIT_EXCEPTION_C: case SVM_EXIT_EXCEPTION_D: case SVM_EXIT_EXCEPTION_E: case SVM_EXIT_EXCEPTION_F:
1102 case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11: case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13:
1103 case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16: case SVM_EXIT_EXCEPTION_17:
1104 case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19: case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B:
1105 case SVM_EXIT_EXCEPTION_1C: case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
1106 {
1107 /* Pending trap. */
1108 SVM_EVENT Event;
1109 uint32_t vector = exitCode - SVM_EXIT_EXCEPTION_0;
1110
1111 Log2(("Hardware/software interrupt %d\n", vector));
1112 switch (vector)
1113 {
1114#ifdef DEBUG
1115 case X86_XCPT_DB:
1116 rc = DBGFR0Trap01Handler(pVM, CPUMCTX2CORE(pCtx), pVMCB->guest.u64DR6);
1117 Assert(rc != VINF_EM_RAW_GUEST_TRAP);
1118 break;
1119#endif
1120
1121 case X86_XCPT_NM:
1122 {
1123 uint32_t oldCR0;
1124
1125 Log(("#NM fault at %VGv\n", pCtx->eip));
1126
1127 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */
1128 oldCR0 = ASMGetCR0();
1129 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
1130 rc = CPUMHandleLazyFPU(pVM);
1131 if (rc == VINF_SUCCESS)
1132 {
1133 Assert(CPUMIsGuestFPUStateActive(pVM));
1134
1135 /* CPUMHandleLazyFPU could have changed CR0; restore it. */
1136 ASMSetCR0(oldCR0);
1137
1138 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowNM);
1139
1140 /* Continue execution. */
1141 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1142 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1143
1144 goto ResumeExecution;
1145 }
1146
1147 Log(("Forward #NM fault to the guest\n"));
1148 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNM);
1149
1150 Event.au64[0] = 0;
1151 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1152 Event.n.u1Valid = 1;
1153 Event.n.u8Vector = X86_XCPT_NM;
1154
1155 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1156 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1157 goto ResumeExecution;
1158 }
1159
1160 case X86_XCPT_PF: /* Page fault */
1161 {
1162 uint32_t errCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1163 RTGCUINTPTR uFaultAddress = pVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */
1164
1165#ifdef DEBUG
1166 if (pVM->hwaccm.s.fNestedPaging)
1167 { /* A genuine pagefault.
1168 * Forward the trap to the guest by injecting the exception and resuming execution.
1169 */
1170 Log(("Page fault at %VGv cr2=%VGv error code %x\n", pCtx->eip, uFaultAddress, errCode));
1171 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF);
1172
1173 /* Now we must update CR2. */
1174 pCtx->cr2 = uFaultAddress;
1175
1176 Event.au64[0] = 0;
1177 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1178 Event.n.u1Valid = 1;
1179 Event.n.u8Vector = X86_XCPT_PF;
1180 Event.n.u1ErrorCodeValid = 1;
1181 Event.n.u32ErrorCode = errCode;
1182
1183 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1184
1185 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1186 goto ResumeExecution;
1187 }
1188#endif
1189 Assert(!pVM->hwaccm.s.fNestedPaging);
1190
1191 Log2(("Page fault at %VGv cr2=%VGv error code %x\n", pCtx->eip, uFaultAddress, errCode));
1192 /* Exit qualification contains the linear address of the page fault. */
1193 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
1194 TRPMSetErrorCode(pVM, errCode);
1195 TRPMSetFaultAddress(pVM, uFaultAddress);
1196
1197 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
1198 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
1199 Log2(("PGMTrap0eHandler %VGv returned %Vrc\n", pCtx->eip, rc));
1200 if (rc == VINF_SUCCESS)
1201 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1202 Log2(("Shadow page fault at %VGv cr2=%VGv error code %x\n", pCtx->eip, uFaultAddress, errCode));
1203 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF);
1204
1205 TRPMResetTrap(pVM);
1206
1207 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1208 goto ResumeExecution;
1209 }
1210 else
1211 if (rc == VINF_EM_RAW_GUEST_TRAP)
1212 { /* A genuine pagefault.
1213 * Forward the trap to the guest by injecting the exception and resuming execution.
1214 */
1215 Log2(("Forward page fault to the guest\n"));
1216 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF);
1217 /* The error code might have been changed. */
1218 errCode = TRPMGetErrorCode(pVM);
1219
1220 TRPMResetTrap(pVM);
1221
1222 /* Now we must update CR2. */
1223 pCtx->cr2 = uFaultAddress;
1224
1225 Event.au64[0] = 0;
1226 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1227 Event.n.u1Valid = 1;
1228 Event.n.u8Vector = X86_XCPT_PF;
1229 Event.n.u1ErrorCodeValid = 1;
1230 Event.n.u32ErrorCode = errCode;
1231
1232 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1233
1234 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1235 goto ResumeExecution;
1236 }
1237#ifdef VBOX_STRICT
1238 if (rc != VINF_EM_RAW_EMULATE_INSTR)
1239 LogFlow(("PGMTrap0eHandler failed with %d\n", rc));
1240#endif
1241 /* Need to go back to the recompiler to emulate the instruction. */
1242 TRPMResetTrap(pVM);
1243 break;
1244 }
1245
1246 case X86_XCPT_MF: /* Floating point exception. */
1247 {
1248 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestMF);
1249 if (!(pCtx->cr0 & X86_CR0_NE))
1250 {
1251 /* old style FPU error reporting needs some extra work. */
1252 /** @todo don't fall back to the recompiler, but do it manually. */
1253 rc = VINF_EM_RAW_EMULATE_INSTR;
1254 break;
1255 }
1256 Log(("Trap %x at %VGv\n", vector, pCtx->eip));
1257
1258 Event.au64[0] = 0;
1259 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1260 Event.n.u1Valid = 1;
1261 Event.n.u8Vector = X86_XCPT_MF;
1262
1263 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1264
1265 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1266 goto ResumeExecution;
1267 }
1268
1269#ifdef VBOX_STRICT
1270 case X86_XCPT_GP: /* General protection failure exception.*/
1271 case X86_XCPT_UD: /* Unknown opcode exception. */
1272 case X86_XCPT_DE: /* Debug exception. */
1273 case X86_XCPT_SS: /* Stack segment exception. */
1274 case X86_XCPT_NP: /* Segment not present exception. */
1275 {
1276 Event.au64[0] = 0;
1277 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1278 Event.n.u1Valid = 1;
1279 Event.n.u8Vector = vector;
1280
1281 switch(vector)
1282 {
1283 case X86_XCPT_GP:
1284 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestGP);
1285 Event.n.u1ErrorCodeValid = 1;
1286 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1287 break;
1288 case X86_XCPT_DE:
1289 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestDE);
1290 break;
1291 case X86_XCPT_UD:
1292 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestUD);
1293 break;
1294 case X86_XCPT_SS:
1295 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestSS);
1296 Event.n.u1ErrorCodeValid = 1;
1297 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1298 break;
1299 case X86_XCPT_NP:
1300 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNP);
1301 Event.n.u1ErrorCodeValid = 1;
1302 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1303 break;
1304 }
1305 Log(("Trap %x at %VGv esi=%x\n", vector, pCtx->eip, pCtx->esi));
1306 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1307
1308 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1309 goto ResumeExecution;
1310 }
1311#endif
1312 default:
1313 AssertMsgFailed(("Unexpected vm-exit caused by exception %x\n", vector));
1314 rc = VERR_EM_INTERNAL_ERROR;
1315 break;
1316
1317 } /* switch (vector) */
1318 break;
1319 }
1320
1321 case SVM_EXIT_NPF:
1322 {
1323 /* EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault. */
1324 uint32_t errCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1325 RTGCPHYS uFaultAddress = pVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */
1326
1327 Assert(pVM->hwaccm.s.fNestedPaging);
1328
1329 Log(("Nested page fault at %VGv cr2=%VGp error code %x\n", pCtx->eip, uFaultAddress, errCode));
1330 /* Exit qualification contains the linear address of the page fault. */
1331 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
1332 TRPMSetErrorCode(pVM, errCode);
1333 TRPMSetFaultAddress(pVM, uFaultAddress);
1334
1335 /* Handle the pagefault trap for the nested shadow table. */
1336 rc = PGMR0Trap0eHandlerNestedPaging(pVM, PGMGetHostMode(pVM), errCode, CPUMCTX2CORE(pCtx), uFaultAddress);
1337 Log2(("PGMR0Trap0eHandlerNestedPaging %VGv returned %Vrc\n", pCtx->eip, rc));
1338 if (rc == VINF_SUCCESS)
1339 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1340 Log2(("Shadow page fault at %VGv cr2=%VGp error code %x\n", pCtx->eip, uFaultAddress, errCode));
1341 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF);
1342
1343 TRPMResetTrap(pVM);
1344
1345 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1346 goto ResumeExecution;
1347 }
1348
1349#ifdef VBOX_STRICT
1350 if (rc != VINF_EM_RAW_EMULATE_INSTR)
1351 LogFlow(("PGMTrap0eHandlerNestedPaging failed with %d\n", rc));
1352#endif
1353 /* Need to go back to the recompiler to emulate the instruction. */
1354 TRPMResetTrap(pVM);
1355 break;
1356 }
1357
1358 case SVM_EXIT_VINTR:
1359 /* A virtual interrupt is about to be delivered, which means IF=1. */
1360 Log(("SVM_EXIT_VINTR IF=%d\n", pCtx->eflags.Bits.u1IF));
1361 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 0;
1362 pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 0;
1363 pVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0;
1364 goto ResumeExecution;
1365
1366 case SVM_EXIT_FERR_FREEZE:
1367 case SVM_EXIT_INTR:
1368 case SVM_EXIT_NMI:
1369 case SVM_EXIT_SMI:
1370 case SVM_EXIT_INIT:
1371 /* External interrupt; leave to allow it to be dispatched again. */
1372 rc = VINF_EM_RAW_INTERRUPT;
1373 break;
1374
1375 case SVM_EXIT_WBINVD:
1376 case SVM_EXIT_INVD: /* Guest software attempted to execute INVD. */
1377 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvd);
1378 /* Skip instruction and continue directly. */
1379 pCtx->eip += 2; /** @note hardcoded opcode size! */
1380 /* Continue execution.*/
1381 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1382 goto ResumeExecution;
1383
1384 case SVM_EXIT_CPUID: /* Guest software attempted to execute CPUID. */
1385 {
1386 Log2(("SVM: Cpuid %x\n", pCtx->eax));
1387 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCpuid);
1388 rc = EMInterpretCpuId(pVM, CPUMCTX2CORE(pCtx));
1389 if (rc == VINF_SUCCESS)
1390 {
1391 /* Update EIP and continue execution. */
1392 pCtx->eip += 2; /** @note hardcoded opcode size! */
1393 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1394 goto ResumeExecution;
1395 }
1396 AssertMsgFailed(("EMU: cpuid failed with %Vrc\n", rc));
1397 rc = VINF_EM_RAW_EMULATE_INSTR;
1398 break;
1399 }
1400
1401 case SVM_EXIT_RDTSC: /* Guest software attempted to execute RDTSC. */
1402 {
1403 Log2(("SVM: Rdtsc\n"));
1404 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitRdtsc);
1405 rc = EMInterpretRdtsc(pVM, CPUMCTX2CORE(pCtx));
1406 if (rc == VINF_SUCCESS)
1407 {
1408 /* Update EIP and continue execution. */
1409 pCtx->eip += 2; /** @note hardcoded opcode size! */
1410 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1411 goto ResumeExecution;
1412 }
1413 AssertMsgFailed(("EMU: rdtsc failed with %Vrc\n", rc));
1414 rc = VINF_EM_RAW_EMULATE_INSTR;
1415 break;
1416 }
1417
1418 case SVM_EXIT_INVLPG: /* Guest software attempted to execute INVPG. */
1419 {
1420 Log2(("SVM: invlpg\n"));
1421 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvpg);
1422
1423 Assert(!pVM->hwaccm.s.fNestedPaging);
1424
1425 /* Truly a pita. Why can't SVM give the same information as VT-x? */
1426 rc = SVMR0InterpretInvpg(pVM, CPUMCTX2CORE(pCtx), pVMCB->ctrl.TLBCtrl.n.u32ASID);
1427 if (rc == VINF_SUCCESS)
1428 {
1429 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushPageInvlpg);
1430 goto ResumeExecution; /* eip already updated */
1431 }
1432 break;
1433 }
1434
1435 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
1436 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
1437 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
1438 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
1439 {
1440 uint32_t cbSize;
1441
1442 Log2(("SVM: %VGv mov cr%d, \n", pCtx->eip, exitCode - SVM_EXIT_WRITE_CR0));
1443 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxWrite);
1444 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1445
1446 switch (exitCode - SVM_EXIT_WRITE_CR0)
1447 {
1448 case 0:
1449 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1450 break;
1451 case 2:
1452 break;
1453 case 3:
1454 Assert(!pVM->hwaccm.s.fNestedPaging);
1455 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
1456 break;
1457 case 4:
1458 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
1459 break;
1460 default:
1461 AssertFailed();
1462 }
1463 /* Check if a sync operation is pending. */
1464 if ( rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
1465 && VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
1466 {
1467 rc = PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
1468 AssertRC(rc);
1469
1470 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBCRxChange);
1471
1472 /** @note Force a TLB flush. SVM requires us to do it manually. */
1473 pVM->hwaccm.s.svm.fForceTLBFlush = true;
1474 }
1475 if (rc == VINF_SUCCESS)
1476 {
1477 /* EIP has been updated already. */
1478
1479 /* Only resume if successful. */
1480 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1481 goto ResumeExecution;
1482 }
1483 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1484 break;
1485 }
1486
1487 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
1488 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
1489 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
1490 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
1491 {
1492 uint32_t cbSize;
1493
1494 Log2(("SVM: %VGv mov x, cr%d\n", pCtx->eip, exitCode - SVM_EXIT_READ_CR0));
1495 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxRead);
1496 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1497 if (rc == VINF_SUCCESS)
1498 {
1499 /* EIP has been updated already. */
1500
1501 /* Only resume if successful. */
1502 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1503 goto ResumeExecution;
1504 }
1505 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1506 break;
1507 }
1508
1509 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
1510 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
1511 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
1512 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
1513 {
1514 uint32_t cbSize;
1515
1516 Log2(("SVM: %VGv mov dr%d, x\n", pCtx->eip, exitCode - SVM_EXIT_WRITE_DR0));
1517 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1518 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1519 if (rc == VINF_SUCCESS)
1520 {
1521 /* EIP has been updated already. */
1522
1523 /* Only resume if successful. */
1524 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1525 goto ResumeExecution;
1526 }
1527 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1528 break;
1529 }
1530
1531 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
1532 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
1533 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
1534 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
1535 {
1536 uint32_t cbSize;
1537
1538 Log2(("SVM: %VGv mov dr%d, x\n", pCtx->eip, exitCode - SVM_EXIT_READ_DR0));
1539 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1540 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1541 if (rc == VINF_SUCCESS)
1542 {
1543 /* EIP has been updated already. */
1544
1545 /* Only resume if successful. */
1546 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1547 goto ResumeExecution;
1548 }
1549 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1550 break;
1551 }
1552
1553 /* Note: We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
1554 case SVM_EXIT_IOIO: /* I/O instruction. */
1555 {
1556 SVM_IOIO_EXIT IoExitInfo;
1557 uint32_t uIOSize, uAndVal;
1558
1559 IoExitInfo.au32[0] = pVMCB->ctrl.u64ExitInfo1;
1560
1561 /** @todo could use a lookup table here */
1562 if (IoExitInfo.n.u1OP8)
1563 {
1564 uIOSize = 1;
1565 uAndVal = 0xff;
1566 }
1567 else
1568 if (IoExitInfo.n.u1OP16)
1569 {
1570 uIOSize = 2;
1571 uAndVal = 0xffff;
1572 }
1573 else
1574 if (IoExitInfo.n.u1OP32)
1575 {
1576 uIOSize = 4;
1577 uAndVal = 0xffffffff;
1578 }
1579 else
1580 {
1581 AssertFailed(); /* should be fatal. */
1582 rc = VINF_EM_RAW_EMULATE_INSTR;
1583 break;
1584 }
1585
1586 if (IoExitInfo.n.u1STR)
1587 {
1588 /* ins/outs */
1589 uint32_t prefix = 0;
1590 if (IoExitInfo.n.u1REP)
1591 prefix |= PREFIX_REP;
1592
1593 if (IoExitInfo.n.u1Type == 0)
1594 {
1595 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize));
1596 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringWrite);
1597 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, prefix, uIOSize);
1598 }
1599 else
1600 {
1601 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize));
1602 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringRead);
1603 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, prefix, uIOSize);
1604 }
1605 }
1606 else
1607 {
1608 /* normal in/out */
1609 Assert(!IoExitInfo.n.u1REP);
1610
1611 if (IoExitInfo.n.u1Type == 0)
1612 {
1613 Log2(("IOMIOPortWrite %VGv %x %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize));
1614 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOWrite);
1615 rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
1616 }
1617 else
1618 {
1619 uint32_t u32Val = 0;
1620
1621 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIORead);
1622 rc = IOMIOPortRead(pVM, IoExitInfo.n.u16Port, &u32Val, uIOSize);
1623 if (IOM_SUCCESS(rc))
1624 {
1625 /* Write back to the EAX register. */
1626 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
1627 Log2(("IOMIOPortRead %VGv %x %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, u32Val & uAndVal, uIOSize));
1628 }
1629 }
1630 }
1631 /*
1632 * Handled the I/O return codes.
1633 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1634 */
1635 if (IOM_SUCCESS(rc))
1636 {
1637 /* Update EIP and continue execution. */
1638 pCtx->eip = pVMCB->ctrl.u64ExitInfo2; /* RIP/EIP of the next instruction is saved in EXITINFO2. */
1639 if (RT_LIKELY(rc == VINF_SUCCESS))
1640 {
1641 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1642 goto ResumeExecution;
1643 }
1644 Log2(("EM status from IO at %VGv %x size %d: %Vrc\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize, rc));
1645 break;
1646 }
1647
1648#ifdef VBOX_STRICT
1649 if (rc == VINF_IOM_HC_IOPORT_READ)
1650 Assert(IoExitInfo.n.u1Type != 0);
1651 else if (rc == VINF_IOM_HC_IOPORT_WRITE)
1652 Assert(IoExitInfo.n.u1Type == 0);
1653 else
1654 AssertMsg(VBOX_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Vrc\n", rc));
1655#endif
1656 Log2(("Failed IO at %VGv %x size %d\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize));
1657 break;
1658 }
1659
1660 case SVM_EXIT_HLT:
1661 /** Check if external interrupts are pending; if so, don't switch back. */
1662 if (VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
1663 {
1664 pCtx->eip++; /* skip hlt */
1665 goto ResumeExecution;
1666 }
1667
1668 rc = VINF_EM_RAW_EMULATE_INSTR_HLT;
1669 break;
1670
1671 case SVM_EXIT_RSM:
1672 case SVM_EXIT_INVLPGA:
1673 case SVM_EXIT_VMRUN:
1674 case SVM_EXIT_VMMCALL:
1675 case SVM_EXIT_VMLOAD:
1676 case SVM_EXIT_VMSAVE:
1677 case SVM_EXIT_STGI:
1678 case SVM_EXIT_CLGI:
1679 case SVM_EXIT_SKINIT:
1680 case SVM_EXIT_RDTSCP:
1681 {
1682 /* Unsupported instructions. */
1683 SVM_EVENT Event;
1684
1685 Event.au64[0] = 0;
1686 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1687 Event.n.u1Valid = 1;
1688 Event.n.u8Vector = X86_XCPT_UD;
1689
1690 Log(("Forced #UD trap at %VGv\n", pCtx->eip));
1691 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1692
1693 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1694 goto ResumeExecution;
1695 }
1696
1697 /* Emulate in ring 3. */
1698 case SVM_EXIT_MONITOR:
1699 case SVM_EXIT_RDPMC:
1700 case SVM_EXIT_PAUSE:
1701 case SVM_EXIT_MWAIT_UNCOND:
1702 case SVM_EXIT_MWAIT_ARMED:
1703 case SVM_EXIT_MSR:
1704 case SVM_EXIT_TASK_SWITCH: /* can change CR3; emulate */
1705 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
1706 break;
1707
1708 case SVM_EXIT_SHUTDOWN:
1709 rc = VINF_EM_RESET; /* Triple fault equals a reset. */
1710 break;
1711
1712 case SVM_EXIT_IDTR_READ:
1713 case SVM_EXIT_GDTR_READ:
1714 case SVM_EXIT_LDTR_READ:
1715 case SVM_EXIT_TR_READ:
1716 case SVM_EXIT_IDTR_WRITE:
1717 case SVM_EXIT_GDTR_WRITE:
1718 case SVM_EXIT_LDTR_WRITE:
1719 case SVM_EXIT_TR_WRITE:
1720 case SVM_EXIT_CR0_SEL_WRITE:
1721 default:
1722 /* Unexpected exit codes. */
1723 rc = VERR_EM_INTERNAL_ERROR;
1724 AssertMsgFailed(("Unexpected exit code %x\n", exitCode)); /* Can't happen. */
1725 break;
1726 }
1727
1728end:
1729 if (fGuestStateSynced)
1730 {
1731 /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR. */
1732 SVM_READ_SELREG(LDTR, ldtr);
1733 SVM_READ_SELREG(TR, tr);
1734
1735 pCtx->gdtr.cbGdt = pVMCB->guest.GDTR.u32Limit;
1736 pCtx->gdtr.pGdt = pVMCB->guest.GDTR.u64Base;
1737
1738 pCtx->idtr.cbIdt = pVMCB->guest.IDTR.u32Limit;
1739 pCtx->idtr.pIdt = pVMCB->guest.IDTR.u64Base;
1740
1741 /*
1742 * System MSRs
1743 */
1744 pCtx->SysEnter.cs = pVMCB->guest.u64SysEnterCS;
1745 pCtx->SysEnter.eip = pVMCB->guest.u64SysEnterEIP;
1746 pCtx->SysEnter.esp = pVMCB->guest.u64SysEnterESP;
1747 }
1748
1749 /* Signal changes for the recompiler. */
1750 CPUMSetChangedFlags(pVM, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);
1751
1752 /* If we executed vmrun and an external irq was pending, then we don't have to do a full sync the next time. */
1753 if (exitCode == SVM_EXIT_INTR)
1754 {
1755 STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq);
1756 /* On the next entry we'll only sync the host context. */
1757 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
1758 }
1759 else
1760 {
1761 /* On the next entry we'll sync everything. */
1762 /** @todo we can do better than this */
1763 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
1764 }
1765
1766 /* translate into a less severe return code */
1767 if (rc == VERR_EM_INTERPRETER)
1768 rc = VINF_EM_RAW_EMULATE_INSTR;
1769
1770 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1771 return rc;
1772}
1773
1774/**
1775 * Enters the AMD-V session
1776 *
1777 * @returns VBox status code.
1778 * @param pVM The VM to operate on.
1779 * @param pCpu CPU info struct
1780 */
1781HWACCMR0DECL(int) SVMR0Enter(PVM pVM, PHWACCM_CPUINFO pCpu)
1782{
1783 Assert(pVM->hwaccm.s.svm.fSupported);
1784
1785 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVM->hwaccm.s.svm.idLastCpu, pCpu->uCurrentASID));
1786 pVM->hwaccm.s.svm.fResumeVM = false;
1787
1788 /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */
1789 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;
1790
1791 return VINF_SUCCESS;
1792}
1793
1794
1795/**
1796 * Leaves the AMD-V session
1797 *
1798 * @returns VBox status code.
1799 * @param pVM The VM to operate on.
1800 */
1801HWACCMR0DECL(int) SVMR0Leave(PVM pVM)
1802{
1803 Assert(pVM->hwaccm.s.svm.fSupported);
1804 return VINF_SUCCESS;
1805}
1806
1807
1808static int svmInterpretInvlPg(PVM pVM, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)
1809{
1810 OP_PARAMVAL param1;
1811 RTGCPTR addr;
1812
1813 int rc = DISQueryParamVal(pRegFrame, pCpu, &pCpu->param1, &param1, PARAM_SOURCE);
1814 if(VBOX_FAILURE(rc))
1815 return VERR_EM_INTERPRETER;
1816
1817 switch(param1.type)
1818 {
1819 case PARMTYPE_IMMEDIATE:
1820 case PARMTYPE_ADDRESS:
1821 if(!(param1.flags & PARAM_VAL32))
1822 return VERR_EM_INTERPRETER;
1823 addr = (RTGCPTR)param1.val.val32;
1824 break;
1825
1826 default:
1827 return VERR_EM_INTERPRETER;
1828 }
1829
1830 /** @todo is addr always a flat linear address or ds based
1831 * (in absence of segment override prefixes)????
1832 */
1833 rc = PGMInvalidatePage(pVM, addr);
1834 if (VBOX_SUCCESS(rc))
1835 {
1836 /* Manually invalidate the page for the VM's TLB. */
1837 Log(("SVMInvlpgA %VGv ASID=%d\n", addr, uASID));
1838 SVMInvlpgA(addr, uASID);
1839 return VINF_SUCCESS;
1840 }
1841 Assert(rc == VERR_REM_FLUSHED_PAGES_OVERFLOW);
1842 return rc;
1843}
1844
1845/**
1846 * Interprets INVLPG
1847 *
1848 * @returns VBox status code.
1849 * @retval VINF_* Scheduling instructions.
1850 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1851 * @retval VERR_* Fatal errors.
1852 *
1853 * @param pVM The VM handle.
1854 * @param pRegFrame The register frame.
1855 * @param ASID Tagged TLB id for the guest
1856 *
1857 * Updates the EIP if an instruction was executed successfully.
1858 */
1859static int SVMR0InterpretInvpg(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uASID)
1860{
1861 /*
1862 * Only allow 32-bit code.
1863 */
1864 if (SELMIsSelector32Bit(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid))
1865 {
1866 RTGCPTR pbCode;
1867 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &pbCode);
1868 if (VBOX_SUCCESS(rc))
1869 {
1870 uint32_t cbOp;
1871 DISCPUSTATE Cpu;
1872
1873 Cpu.mode = CPUMODE_32BIT;
1874 rc = EMInterpretDisasOneEx(pVM, pbCode, pRegFrame, &Cpu, &cbOp);
1875 Assert(VBOX_FAILURE(rc) || Cpu.pCurInstr->opcode == OP_INVLPG);
1876 if (VBOX_SUCCESS(rc) && Cpu.pCurInstr->opcode == OP_INVLPG)
1877 {
1878 Assert(cbOp == Cpu.opsize);
1879 rc = svmInterpretInvlPg(pVM, &Cpu, pRegFrame, uASID);
1880 if (VBOX_SUCCESS(rc))
1881 {
1882 pRegFrame->eip += cbOp; /* Move on to the next instruction. */
1883 }
1884 return rc;
1885 }
1886 }
1887 }
1888 return VERR_EM_INTERPRETER;
1889}
1890
1891
1892/**
1893 * Invalidates a guest page
1894 *
1895 * @returns VBox status code.
1896 * @param pVM The VM to operate on.
1897 * @param GCVirt Page to invalidate
1898 */
1899HWACCMR0DECL(int) SVMR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)
1900{
1901 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.svm.fForceTLBFlush;
1902
1903 /* Skip it if a TLB flush is already pending. */
1904 if (!fFlushPending)
1905 {
1906 SVM_VMCB *pVMCB;
1907
1908 Log2(("SVMR0InvalidatePage %VGv\n", GCVirt));
1909 AssertReturn(pVM, VERR_INVALID_PARAMETER);
1910 Assert(pVM->hwaccm.s.svm.fSupported);
1911
1912 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
1913 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
1914
1915 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushPageManual);
1916 SVMInvlpgA(GCVirt, pVMCB->ctrl.TLBCtrl.n.u32ASID);
1917 }
1918 return VINF_SUCCESS;
1919}
1920
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette