VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp@ 8848

Last change on this file since 8848 was 8848, checked in by vboxsync, 17 years ago

Backed out 30862; redundant

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 66.7 KB
Line 
1/* $Id: HWSVMR0.cpp 8848 2008-05-15 13:08:01Z vboxsync $ */
2/** @file
3 * HWACCM SVM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/hwacc_svm.h>
32#include <VBox/pgm.h>
33#include <VBox/pdm.h>
34#include <VBox/err.h>
35#include <VBox/log.h>
36#include <VBox/selm.h>
37#include <VBox/iom.h>
38#include <VBox/dis.h>
39#include <VBox/dbgf.h>
40#include <VBox/disopcode.h>
41#include <iprt/param.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include "HWSVMR0.h"
45
46static int SVMR0InterpretInvpg(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uASID);
47
48/**
49 * Sets up and activates AMD-V on the current CPU
50 *
51 * @returns VBox status code.
52 * @param idCpu The identifier for the CPU the function is called on.
53 * @param pVM The VM to operate on.
54 * @param pvPageCpu Pointer to the global cpu page
55 * @param pPageCpuPhys Physical address of the global cpu page
56 */
57HWACCMR0DECL(int) SVMR0EnableCpu(RTCPUID idCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
58{
59 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
60 AssertReturn(pVM, VERR_INVALID_PARAMETER);
61 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
62
63 /* We must turn on AMD-V and setup the host state physical address, as those MSRs are per-cpu/core. */
64
65 /* Turn on AMD-V in the EFER MSR. */
66 uint64_t val = ASMRdMsr(MSR_K6_EFER);
67 if (!(val & MSR_K6_EFER_SVME))
68 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME);
69
70 /* Write the physical page address where the CPU will store the host state while executing the VM. */
71 ASMWrMsr(MSR_K8_VM_HSAVE_PA, pPageCpuPhys);
72 return VINF_SUCCESS;
73}
74
75/**
76 * Deactivates AMD-V on the current CPU
77 *
78 * @returns VBox status code.
79 * @param idCpu The identifier for the CPU the function is called on.
80 * @param pvPageCpu Pointer to the global cpu page
81 * @param pPageCpuPhys Physical address of the global cpu page
82 */
83HWACCMR0DECL(int) SVMR0DisableCpu(RTCPUID idCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
84{
85 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
86 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
87
88 /* Turn off AMD-V in the EFER MSR. */
89 uint64_t val = ASMRdMsr(MSR_K6_EFER);
90 ASMWrMsr(MSR_K6_EFER, val & ~MSR_K6_EFER_SVME);
91
92 /* Invalidate host state physical address. */
93 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
94 return VINF_SUCCESS;
95}
96
97/**
98 * Does Ring-0 per VM AMD-V init.
99 *
100 * @returns VBox status code.
101 * @param pVM The VM to operate on.
102 */
103HWACCMR0DECL(int) SVMR0InitVM(PVM pVM)
104{
105 int rc;
106
107 /* Allocate one page for the VM control block (VMCB). */
108 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
109 if (RT_FAILURE(rc))
110 return rc;
111
112 pVM->hwaccm.s.svm.pVMCB = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjVMCB);
113 pVM->hwaccm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjVMCB, 0);
114 ASMMemZero32(pVM->hwaccm.s.svm.pVMCB, PAGE_SIZE);
115
116 /* Allocate one page for the host context */
117 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
118 if (RT_FAILURE(rc))
119 return rc;
120
121 pVM->hwaccm.s.svm.pVMCBHost = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjVMCBHost);
122 pVM->hwaccm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjVMCBHost, 0);
123 ASMMemZero32(pVM->hwaccm.s.svm.pVMCBHost, PAGE_SIZE);
124
125 /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
126 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, true /* executable R0 mapping */);
127 if (RT_FAILURE(rc))
128 return rc;
129
130 pVM->hwaccm.s.svm.pIOBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjIOBitmap);
131 pVM->hwaccm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjIOBitmap, 0);
132 /* Set all bits to intercept all IO accesses. */
133 ASMMemFill32(pVM->hwaccm.s.svm.pIOBitmap, PAGE_SIZE*3, 0xffffffff);
134
135 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
136 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, true /* executable R0 mapping */);
137 if (RT_FAILURE(rc))
138 return rc;
139
140 pVM->hwaccm.s.svm.pMSRBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjMSRBitmap);
141 pVM->hwaccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjMSRBitmap, 0);
142 /* Set all bits to intercept all MSR accesses. */
143 ASMMemFill32(pVM->hwaccm.s.svm.pMSRBitmap, PAGE_SIZE*2, 0xffffffff);
144
145 /* Erratum 170 which requires a forced TLB flush for each world switch:
146 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
147 *
148 * All BH-G1/2 and DH-G1/2 models include a fix:
149 * Athlon X2: 0x6b 1/2
150 * 0x68 1/2
151 * Athlon 64: 0x7f 1
152 * 0x6f 2
153 * Sempron: 0x7f 1/2
154 * 0x6f 2
155 * 0x6c 2
156 * 0x7c 2
157 * Turion 64: 0x68 2
158 *
159 */
160 uint32_t u32Dummy;
161 uint32_t u32Version, u32Family, u32Model, u32Stepping;
162 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
163 u32Family = ((u32Version >> 8) & 0xf) + (((u32Version >> 8) & 0xf) == 0xf ? (u32Version >> 20) & 0x7f : 0);
164 u32Model = ((u32Version >> 4) & 0xf);
165 u32Model = u32Model | ((u32Model == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
166 u32Stepping = u32Version & 0xf;
167 if ( u32Family == 0xf
168 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
169 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
170 {
171 Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
172 pVM->hwaccm.s.svm.fForceTLBFlush = true;
173 }
174
175 return VINF_SUCCESS;
176}
177
178/**
179 * Does Ring-0 per VM AMD-V termination.
180 *
181 * @returns VBox status code.
182 * @param pVM The VM to operate on.
183 */
184HWACCMR0DECL(int) SVMR0TermVM(PVM pVM)
185{
186 if (pVM->hwaccm.s.svm.pMemObjVMCB)
187 {
188 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjVMCB, false);
189 pVM->hwaccm.s.svm.pVMCB = 0;
190 pVM->hwaccm.s.svm.pVMCBPhys = 0;
191 pVM->hwaccm.s.svm.pMemObjVMCB = 0;
192 }
193 if (pVM->hwaccm.s.svm.pMemObjVMCBHost)
194 {
195 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjVMCBHost, false);
196 pVM->hwaccm.s.svm.pVMCBHost = 0;
197 pVM->hwaccm.s.svm.pVMCBHostPhys = 0;
198 pVM->hwaccm.s.svm.pMemObjVMCBHost = 0;
199 }
200 if (pVM->hwaccm.s.svm.pMemObjIOBitmap)
201 {
202 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjIOBitmap, false);
203 pVM->hwaccm.s.svm.pIOBitmap = 0;
204 pVM->hwaccm.s.svm.pIOBitmapPhys = 0;
205 pVM->hwaccm.s.svm.pMemObjIOBitmap = 0;
206 }
207 if (pVM->hwaccm.s.svm.pMemObjMSRBitmap)
208 {
209 RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjMSRBitmap, false);
210 pVM->hwaccm.s.svm.pMSRBitmap = 0;
211 pVM->hwaccm.s.svm.pMSRBitmapPhys = 0;
212 pVM->hwaccm.s.svm.pMemObjMSRBitmap = 0;
213 }
214 return VINF_SUCCESS;
215}
216
217/**
218 * Sets up AMD-V for the specified VM
219 *
220 * @returns VBox status code.
221 * @param pVM The VM to operate on.
222 */
223HWACCMR0DECL(int) SVMR0SetupVM(PVM pVM)
224{
225 int rc = VINF_SUCCESS;
226 SVM_VMCB *pVMCB;
227
228 AssertReturn(pVM, VERR_INVALID_PARAMETER);
229
230 Assert(pVM->hwaccm.s.svm.fSupported);
231
232 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
233 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
234
235 /* Program the control fields. Most of them never have to be changed again. */
236 /* CR0/3/4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
237 /** @note CR0 & CR4 can be safely read when guest and shadow copies are identical. */
238 pVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(3) | RT_BIT(4) | RT_BIT(8);
239
240 /*
241 * CR0/3/4 writes must be intercepted for obvious reasons.
242 */
243 pVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(3) | RT_BIT(4) | RT_BIT(8);
244
245 /* Intercept all DRx reads and writes. */
246 pVMCB->ctrl.u16InterceptRdDRx = RT_BIT(0) | RT_BIT(1) | RT_BIT(2) | RT_BIT(3) | RT_BIT(4) | RT_BIT(5) | RT_BIT(6) | RT_BIT(7);
247 pVMCB->ctrl.u16InterceptWrDRx = RT_BIT(0) | RT_BIT(1) | RT_BIT(2) | RT_BIT(3) | RT_BIT(4) | RT_BIT(5) | RT_BIT(6) | RT_BIT(7);
248
249 /* Currently we don't care about DRx reads or writes. DRx registers are trashed.
250 * All breakpoints are automatically cleared when the VM exits.
251 */
252
253 /** @todo nested paging */
254 /* Intercept #NM only; #PF is not relevant due to nested paging (we get a seperate exit code (SVM_EXIT_NPF) for
255 * pagefaults that need our attention).
256 */
257 pVMCB->ctrl.u32InterceptException = HWACCM_SVM_TRAP_MASK;
258
259 pVMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR
260 | SVM_CTRL1_INTERCEPT_VINTR
261 | SVM_CTRL1_INTERCEPT_NMI
262 | SVM_CTRL1_INTERCEPT_SMI
263 | SVM_CTRL1_INTERCEPT_INIT
264 | SVM_CTRL1_INTERCEPT_CR0 /** @todo redundant? */
265 | SVM_CTRL1_INTERCEPT_RDPMC
266 | SVM_CTRL1_INTERCEPT_CPUID
267 | SVM_CTRL1_INTERCEPT_RSM
268 | SVM_CTRL1_INTERCEPT_HLT
269 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP
270 | SVM_CTRL1_INTERCEPT_MSR_SHADOW
271 | SVM_CTRL1_INTERCEPT_INVLPG
272 | SVM_CTRL1_INTERCEPT_INVLPGA /* AMD only */
273 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* fatal */
274 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Legacy FPU FERR handling. */
275 ;
276 pVMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */
277 | SVM_CTRL2_INTERCEPT_VMMCALL
278 | SVM_CTRL2_INTERCEPT_VMLOAD
279 | SVM_CTRL2_INTERCEPT_VMSAVE
280 | SVM_CTRL2_INTERCEPT_STGI
281 | SVM_CTRL2_INTERCEPT_CLGI
282 | SVM_CTRL2_INTERCEPT_SKINIT
283 | SVM_CTRL2_INTERCEPT_RDTSCP /* AMD only; we don't support this one */
284 ;
285 Log(("pVMCB->ctrl.u32InterceptException = %x\n", pVMCB->ctrl.u32InterceptException));
286 Log(("pVMCB->ctrl.u32InterceptCtrl1 = %x\n", pVMCB->ctrl.u32InterceptCtrl1));
287 Log(("pVMCB->ctrl.u32InterceptCtrl2 = %x\n", pVMCB->ctrl.u32InterceptCtrl2));
288
289 /* Virtualize masking of INTR interrupts. */
290 pVMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1;
291
292 /* Set IO and MSR bitmap addresses. */
293 pVMCB->ctrl.u64IOPMPhysAddr = pVM->hwaccm.s.svm.pIOBitmapPhys;
294 pVMCB->ctrl.u64MSRPMPhysAddr = pVM->hwaccm.s.svm.pMSRBitmapPhys;
295
296 /* Enable nested paging. */
297 /** @todo how to detect support for this?? */
298 pVMCB->ctrl.u64NestedPaging = 0; /** @todo SVM_NESTED_PAGING_ENABLE; */
299
300 /* No LBR virtualization. */
301 pVMCB->ctrl.u64LBRVirt = 0;
302
303 return rc;
304}
305
306
307/**
308 * Injects an event (trap or external interrupt)
309 *
310 * @param pVM The VM to operate on.
311 * @param pVMCB SVM control block
312 * @param pCtx CPU Context
313 * @param pIntInfo SVM interrupt info
314 */
315inline void SVMR0InjectEvent(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx, SVM_EVENT* pEvent)
316{
317#ifdef VBOX_STRICT
318 if (pEvent->n.u8Vector == 0xE)
319 Log(("SVM: Inject int %d at %VGv error code=%08x CR2=%08x intInfo=%08x\n", pEvent->n.u8Vector, pCtx->eip, pEvent->n.u32ErrorCode, pCtx->cr2, pEvent->au64[0]));
320 else
321 if (pEvent->n.u8Vector < 0x20)
322 Log(("SVM: Inject int %d at %VGv error code=%08x\n", pEvent->n.u8Vector, pCtx->eip, pEvent->n.u32ErrorCode));
323 else
324 {
325 Log(("INJ-EI: %x at %VGv\n", pEvent->n.u8Vector, pCtx->eip));
326 Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
327 Assert(pCtx->eflags.u32 & X86_EFL_IF);
328 }
329#endif
330
331 /* Set event injection state. */
332 pVMCB->ctrl.EventInject.au64[0] = pEvent->au64[0];
333}
334
335
336/**
337 * Checks for pending guest interrupts and injects them
338 *
339 * @returns VBox status code.
340 * @param pVM The VM to operate on.
341 * @param pVMCB SVM control block
342 * @param pCtx CPU Context
343 */
344static int SVMR0CheckPendingInterrupt(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx)
345{
346 int rc;
347
348 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
349 if (pVM->hwaccm.s.Event.fPending)
350 {
351 SVM_EVENT Event;
352
353 Log(("Reinjecting event %08x %08x at %VGv\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, pCtx->eip));
354 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject);
355 Event.au64[0] = pVM->hwaccm.s.Event.intInfo;
356 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
357
358 pVM->hwaccm.s.Event.fPending = false;
359 return VINF_SUCCESS;
360 }
361
362 /* When external interrupts are pending, we should exit the VM when IF is set. */
363 if ( !TRPMHasTrap(pVM)
364 && VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
365 {
366 if (!(pCtx->eflags.u32 & X86_EFL_IF))
367 {
368 Log2(("Enable irq window exit!\n"));
369 /** @todo use virtual interrupt method to inject a pending irq; dispatched as soon as guest.IF is set. */
370 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
371 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 1;
372 pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 1; /* ignore the priority in the TPR; just deliver it */
373 pVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */
374 }
375 else
376 if (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
377 {
378 uint8_t u8Interrupt;
379
380 rc = PDMGetInterrupt(pVM, &u8Interrupt);
381 Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Vrc\n", u8Interrupt, u8Interrupt, rc));
382 if (VBOX_SUCCESS(rc))
383 {
384 rc = TRPMAssertTrap(pVM, u8Interrupt, TRPM_HARDWARE_INT);
385 AssertRC(rc);
386 }
387 else
388 {
389 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
390 Assert(!VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)));
391 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchGuestIrq);
392 /* Just continue */
393 }
394 }
395 else
396 Log(("Pending interrupt blocked at %VGv by VM_FF_INHIBIT_INTERRUPTS!!\n", pCtx->eip));
397 }
398
399#ifdef VBOX_STRICT
400 if (TRPMHasTrap(pVM))
401 {
402 uint8_t u8Vector;
403 rc = TRPMQueryTrapAll(pVM, &u8Vector, 0, 0, 0);
404 AssertRC(rc);
405 }
406#endif
407
408 if ( pCtx->eflags.u32 & X86_EFL_IF
409 && (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
410 && TRPMHasTrap(pVM)
411 )
412 {
413 uint8_t u8Vector;
414 int rc;
415 TRPMEVENT enmType;
416 SVM_EVENT Event;
417 uint32_t u32ErrorCode;
418
419 Event.au64[0] = 0;
420
421 /* If a new event is pending, then dispatch it now. */
422 rc = TRPMQueryTrapAll(pVM, &u8Vector, &enmType, &u32ErrorCode, 0);
423 AssertRC(rc);
424 Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
425 Assert(enmType != TRPM_SOFTWARE_INT);
426
427 /* Clear the pending trap. */
428 rc = TRPMResetTrap(pVM);
429 AssertRC(rc);
430
431 Event.n.u8Vector = u8Vector;
432 Event.n.u1Valid = 1;
433 Event.n.u32ErrorCode = u32ErrorCode;
434
435 if (enmType == TRPM_TRAP)
436 {
437 switch (u8Vector) {
438 case 8:
439 case 10:
440 case 11:
441 case 12:
442 case 13:
443 case 14:
444 case 17:
445 /* Valid error codes. */
446 Event.n.u1ErrorCodeValid = 1;
447 break;
448 default:
449 break;
450 }
451 if (u8Vector == X86_XCPT_NMI)
452 Event.n.u3Type = SVM_EVENT_NMI;
453 else
454 Event.n.u3Type = SVM_EVENT_EXCEPTION;
455 }
456 else
457 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
458
459 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntInject);
460 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
461 } /* if (interrupts can be dispatched) */
462
463 return VINF_SUCCESS;
464}
465
466
467/**
468 * Loads the guest state
469 *
470 * @returns VBox status code.
471 * @param pVM The VM to operate on.
472 * @param pCtx Guest context
473 */
474HWACCMR0DECL(int) SVMR0LoadGuestState(PVM pVM, CPUMCTX *pCtx)
475{
476 RTGCUINTPTR val;
477 SVM_VMCB *pVMCB;
478
479 if (pVM == NULL)
480 return VERR_INVALID_PARAMETER;
481
482 /* Setup AMD SVM. */
483 Assert(pVM->hwaccm.s.svm.fSupported);
484
485 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
486 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
487
488 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
489 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
490 {
491 SVM_WRITE_SELREG(CS, cs);
492 SVM_WRITE_SELREG(SS, ss);
493 SVM_WRITE_SELREG(DS, ds);
494 SVM_WRITE_SELREG(ES, es);
495 SVM_WRITE_SELREG(FS, fs);
496 SVM_WRITE_SELREG(GS, gs);
497 }
498
499 /* Guest CPU context: LDTR. */
500 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
501 {
502 SVM_WRITE_SELREG(LDTR, ldtr);
503 }
504
505 /* Guest CPU context: TR. */
506 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
507 {
508 SVM_WRITE_SELREG(TR, tr);
509 }
510
511 /* Guest CPU context: GDTR. */
512 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
513 {
514 pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
515 pVMCB->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
516 }
517
518 /* Guest CPU context: IDTR. */
519 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
520 {
521 pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
522 pVMCB->guest.IDTR.u64Base = pCtx->idtr.pIdt;
523 }
524
525 /*
526 * Sysenter MSRs
527 */
528 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SYSENTER_MSR)
529 {
530 pVMCB->guest.u64SysEnterCS = pCtx->SysEnter.cs;
531 pVMCB->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
532 pVMCB->guest.u64SysEnterESP = pCtx->SysEnter.esp;
533 }
534
535 /* Control registers */
536 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
537 {
538 val = pCtx->cr0;
539 if (CPUMIsGuestFPUStateActive(pVM) == false)
540 {
541 /* Always use #NM exceptions to load the FPU/XMM state on demand. */
542 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
543 }
544 else
545 {
546 Assert(pVM->hwaccm.s.svm.fResumeVM == true);
547 /** @todo check if we support the old style mess correctly. */
548 if (!(val & X86_CR0_NE))
549 {
550 Log(("Forcing X86_CR0_NE!!!\n"));
551
552 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
553 if (!pVM->hwaccm.s.fFPUOldStyleOverride)
554 {
555 pVMCB->ctrl.u32InterceptException |= RT_BIT(16);
556 pVM->hwaccm.s.fFPUOldStyleOverride = true;
557 }
558 }
559 val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
560 }
561 if (!(val & X86_CR0_CD))
562 val &= ~X86_CR0_NW; /* Illegal when cache is turned on. */
563
564 val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
565 val |= X86_CR0_WP; /* Must set this as we rely on protect various pages and supervisor writes must be caught. */
566 pVMCB->guest.u64CR0 = val;
567 }
568 /* CR2 as well */
569 pVMCB->guest.u64CR2 = pCtx->cr2;
570
571 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
572 {
573 /* Save our shadow CR3 register. */
574 pVMCB->guest.u64CR3 = PGMGetHyperCR3(pVM);
575 }
576
577 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
578 {
579 val = pCtx->cr4;
580 switch(pVM->hwaccm.s.enmShadowMode)
581 {
582 case PGMMODE_REAL:
583 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
584 AssertFailed();
585 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
586
587 case PGMMODE_32_BIT: /* 32-bit paging. */
588 break;
589
590 case PGMMODE_PAE: /* PAE paging. */
591 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
592 /** @todo use normal 32 bits paging */
593 val |= X86_CR4_PAE;
594 break;
595
596 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
597 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
598 AssertFailed();
599 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
600
601 default: /* shut up gcc */
602 AssertFailed();
603 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
604 }
605 pVMCB->guest.u64CR4 = val;
606 }
607
608 /* Debug registers. */
609 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
610 {
611 /** @todo DR0-6 */
612 val = pCtx->dr7;
613 val &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
614 val |= 0x400; /* must be one */
615#ifdef VBOX_STRICT
616 val = 0x400;
617#endif
618 pVMCB->guest.u64DR7 = val;
619
620 pVMCB->guest.u64DR6 = pCtx->dr6;
621 }
622
623 /* EIP, ESP and EFLAGS */
624 pVMCB->guest.u64RIP = pCtx->eip;
625 pVMCB->guest.u64RSP = pCtx->esp;
626 pVMCB->guest.u64RFlags = pCtx->eflags.u32;
627
628 /* Set CPL */
629 pVMCB->guest.u8CPL = pCtx->ssHid.Attr.n.u2Dpl;
630
631 /* RAX/EAX too, as VMRUN uses RAX as an implicit parameter. */
632 pVMCB->guest.u64RAX = pCtx->eax;
633
634 /* vmrun will fail otherwise. */
635 pVMCB->guest.u64EFER = MSR_K6_EFER_SVME;
636
637 /** @note We can do more complex things with tagged TLBs. */
638 pVMCB->ctrl.TLBCtrl.n.u32ASID = 1;
639
640 /** TSC offset. */
641 if (TMCpuTickCanUseRealTSC(pVM, &pVMCB->ctrl.u64TSCOffset))
642 pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
643 else
644 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
645
646 /** @todo 64 bits stuff (?):
647 * - STAR
648 * - LSTAR
649 * - CSTAR
650 * - SFMASK
651 * - KernelGSBase
652 */
653
654#ifdef DEBUG
655 /* Intercept X86_XCPT_DB if stepping is enabled */
656 if (DBGFIsStepping(pVM))
657 pVMCB->ctrl.u32InterceptException |= RT_BIT(1);
658 else
659 pVMCB->ctrl.u32InterceptException &= ~RT_BIT(1);
660#endif
661
662 /* Done. */
663 pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
664
665 return VINF_SUCCESS;
666}
667
668
669/**
670 * Runs guest code in an SVM VM.
671 *
672 * @todo This can be much more efficient, when we only sync that which has actually changed. (this is the first attempt only)
673 *
674 * @returns VBox status code.
675 * @param pVM The VM to operate on.
676 * @param pCtx Guest context
677 */
678HWACCMR0DECL(int) SVMR0RunGuestCode(PVM pVM, CPUMCTX *pCtx)
679{
680 int rc = VINF_SUCCESS;
681 uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID;
682 SVM_VMCB *pVMCB;
683 bool fForceTLBFlush = false;
684 bool fGuestStateSynced = false;
685 unsigned cResume = 0;
686
687 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatEntry, x);
688
689 pVMCB = (SVM_VMCB *)pVM->hwaccm.s.svm.pVMCB;
690 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR);
691
692 /* We can jump to this point to resume execution after determining that a VM-exit is innocent.
693 */
694ResumeExecution:
695 /* Safety precaution; looping for too long here can have a very bad effect on the host */
696 if (++cResume > HWACCM_MAX_RESUME_LOOPS)
697 {
698 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitMaxResume);
699 rc = VINF_EM_RAW_INTERRUPT;
700 goto end;
701 }
702
703 /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
704 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
705 {
706 Log(("VM_FF_INHIBIT_INTERRUPTS at %VGv successor %VGv\n", pCtx->eip, EMGetInhibitInterruptsPC(pVM)));
707 if (pCtx->eip != EMGetInhibitInterruptsPC(pVM))
708 {
709 /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
710 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
711 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
712 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
713 */
714 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
715 /* Irq inhibition is no longer active; clear the corresponding SVM state. */
716 pVMCB->ctrl.u64IntShadow = 0;
717 }
718 }
719 else
720 {
721 /* Irq inhibition is no longer active; clear the corresponding SVM state. */
722 pVMCB->ctrl.u64IntShadow = 0;
723 }
724
725 /* Check for pending actions that force us to go back to ring 3. */
726#ifdef DEBUG
727 /* Intercept X86_XCPT_DB if stepping is enabled */
728 if (!DBGFIsStepping(pVM))
729#endif
730 {
731 if (VM_FF_ISPENDING(pVM, VM_FF_TO_R3 | VM_FF_TIMER))
732 {
733 VM_FF_CLEAR(pVM, VM_FF_TO_R3);
734 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchToR3);
735 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
736 rc = VINF_EM_RAW_TO_R3;
737 goto end;
738 }
739 }
740
741 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
742 if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST))
743 {
744 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
745 rc = VINF_EM_PENDING_REQUEST;
746 goto end;
747 }
748
749 /* When external interrupts are pending, we should exit the VM when IF is set. */
750 /** @note *after* VM_FF_INHIBIT_INTERRUPTS check!!! */
751 rc = SVMR0CheckPendingInterrupt(pVM, pVMCB, pCtx);
752 if (VBOX_FAILURE(rc))
753 {
754 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
755 goto end;
756 }
757
758 /* Load the guest state */
759 rc = SVMR0LoadGuestState(pVM, pCtx);
760 if (rc != VINF_SUCCESS)
761 {
762 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
763 goto end;
764 }
765 fGuestStateSynced = true;
766
767 /* All done! Let's start VM execution. */
768 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x);
769
770 if ( pVM->hwaccm.s.svm.fResumeVM == false
771 || pVM->hwaccm.s.svm.fForceTLBFlush
772 || fForceTLBFlush)
773 {
774 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = 1;
775 }
776 else
777 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = 0;
778
779 /* In case we execute a goto ResumeExecution later on. */
780 pVM->hwaccm.s.svm.fResumeVM = true;
781 fForceTLBFlush = false;
782
783 Assert(sizeof(pVM->hwaccm.s.svm.pVMCBPhys) == 8);
784 Assert(pVMCB->ctrl.u32InterceptCtrl2 == ( SVM_CTRL2_INTERCEPT_VMRUN /* required */
785 | SVM_CTRL2_INTERCEPT_VMMCALL
786 | SVM_CTRL2_INTERCEPT_VMLOAD
787 | SVM_CTRL2_INTERCEPT_VMSAVE
788 | SVM_CTRL2_INTERCEPT_STGI
789 | SVM_CTRL2_INTERCEPT_CLGI
790 | SVM_CTRL2_INTERCEPT_SKINIT
791 | SVM_CTRL2_INTERCEPT_RDTSCP /* AMD only; we don't support this one */
792 ));
793 Assert(pVMCB->ctrl.IntCtrl.n.u1VIrqMasking);
794 Assert(pVMCB->ctrl.u64IOPMPhysAddr == pVM->hwaccm.s.svm.pIOBitmapPhys);
795 Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVM->hwaccm.s.svm.pMSRBitmapPhys);
796 Assert(pVMCB->ctrl.u64NestedPaging == 0);
797 Assert(pVMCB->ctrl.u64LBRVirt == 0);
798
799 SVMVMRun(pVM->hwaccm.s.svm.pVMCBHostPhys, pVM->hwaccm.s.svm.pVMCBPhys, pCtx);
800 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatInGC, x);
801
802 /**
803 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
804 * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING 3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
805 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
806 */
807
808 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatExit, x);
809
810 /* Reason for the VM exit */
811 exitCode = pVMCB->ctrl.u64ExitCode;
812
813 if (exitCode == (uint64_t)SVM_EXIT_INVALID) /* Invalid guest state. */
814 {
815 HWACCMDumpRegs(pCtx);
816#ifdef DEBUG
817 Log(("ctrl.u16InterceptRdCRx %x\n", pVMCB->ctrl.u16InterceptRdCRx));
818 Log(("ctrl.u16InterceptWrCRx %x\n", pVMCB->ctrl.u16InterceptWrCRx));
819 Log(("ctrl.u16InterceptRdDRx %x\n", pVMCB->ctrl.u16InterceptRdDRx));
820 Log(("ctrl.u16InterceptWrDRx %x\n", pVMCB->ctrl.u16InterceptWrDRx));
821 Log(("ctrl.u32InterceptException %x\n", pVMCB->ctrl.u32InterceptException));
822 Log(("ctrl.u32InterceptCtrl1 %x\n", pVMCB->ctrl.u32InterceptCtrl1));
823 Log(("ctrl.u32InterceptCtrl2 %x\n", pVMCB->ctrl.u32InterceptCtrl2));
824 Log(("ctrl.u64IOPMPhysAddr %VX64\n", pVMCB->ctrl.u64IOPMPhysAddr));
825 Log(("ctrl.u64MSRPMPhysAddr %VX64\n", pVMCB->ctrl.u64MSRPMPhysAddr));
826 Log(("ctrl.u64TSCOffset %VX64\n", pVMCB->ctrl.u64TSCOffset));
827
828 Log(("ctrl.TLBCtrl.u32ASID %x\n", pVMCB->ctrl.TLBCtrl.n.u32ASID));
829 Log(("ctrl.TLBCtrl.u1TLBFlush %x\n", pVMCB->ctrl.TLBCtrl.n.u1TLBFlush));
830 Log(("ctrl.TLBCtrl.u7Reserved %x\n", pVMCB->ctrl.TLBCtrl.n.u7Reserved));
831 Log(("ctrl.TLBCtrl.u24Reserved %x\n", pVMCB->ctrl.TLBCtrl.n.u24Reserved));
832
833 Log(("ctrl.IntCtrl.u8VTPR %x\n", pVMCB->ctrl.IntCtrl.n.u8VTPR));
834 Log(("ctrl.IntCtrl.u1VIrqValid %x\n", pVMCB->ctrl.IntCtrl.n.u1VIrqValid));
835 Log(("ctrl.IntCtrl.u7Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u7Reserved));
836 Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", pVMCB->ctrl.IntCtrl.n.u4VIrqPriority));
837 Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR));
838 Log(("ctrl.IntCtrl.u3Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u3Reserved));
839 Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", pVMCB->ctrl.IntCtrl.n.u1VIrqMasking));
840 Log(("ctrl.IntCtrl.u7Reserved2 %x\n", pVMCB->ctrl.IntCtrl.n.u7Reserved2));
841 Log(("ctrl.IntCtrl.u8VIrqVector %x\n", pVMCB->ctrl.IntCtrl.n.u8VIrqVector));
842 Log(("ctrl.IntCtrl.u24Reserved %x\n", pVMCB->ctrl.IntCtrl.n.u24Reserved));
843
844 Log(("ctrl.u64IntShadow %VX64\n", pVMCB->ctrl.u64IntShadow));
845 Log(("ctrl.u64ExitCode %VX64\n", pVMCB->ctrl.u64ExitCode));
846 Log(("ctrl.u64ExitInfo1 %VX64\n", pVMCB->ctrl.u64ExitInfo1));
847 Log(("ctrl.u64ExitInfo2 %VX64\n", pVMCB->ctrl.u64ExitInfo2));
848 Log(("ctrl.ExitIntInfo.u8Vector %x\n", pVMCB->ctrl.ExitIntInfo.n.u8Vector));
849 Log(("ctrl.ExitIntInfo.u3Type %x\n", pVMCB->ctrl.ExitIntInfo.n.u3Type));
850 Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
851 Log(("ctrl.ExitIntInfo.u19Reserved %x\n", pVMCB->ctrl.ExitIntInfo.n.u19Reserved));
852 Log(("ctrl.ExitIntInfo.u1Valid %x\n", pVMCB->ctrl.ExitIntInfo.n.u1Valid));
853 Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode));
854 Log(("ctrl.u64NestedPaging %VX64\n", pVMCB->ctrl.u64NestedPaging));
855 Log(("ctrl.EventInject.u8Vector %x\n", pVMCB->ctrl.EventInject.n.u8Vector));
856 Log(("ctrl.EventInject.u3Type %x\n", pVMCB->ctrl.EventInject.n.u3Type));
857 Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", pVMCB->ctrl.EventInject.n.u1ErrorCodeValid));
858 Log(("ctrl.EventInject.u19Reserved %x\n", pVMCB->ctrl.EventInject.n.u19Reserved));
859 Log(("ctrl.EventInject.u1Valid %x\n", pVMCB->ctrl.EventInject.n.u1Valid));
860 Log(("ctrl.EventInject.u32ErrorCode %x\n", pVMCB->ctrl.EventInject.n.u32ErrorCode));
861
862 Log(("ctrl.u64HostCR3 %VX64\n", pVMCB->ctrl.u64HostCR3));
863 Log(("ctrl.u64LBRVirt %VX64\n", pVMCB->ctrl.u64LBRVirt));
864
865 Log(("guest.CS.u16Sel %04X\n", pVMCB->guest.CS.u16Sel));
866 Log(("guest.CS.u16Attr %04X\n", pVMCB->guest.CS.u16Attr));
867 Log(("guest.CS.u32Limit %X\n", pVMCB->guest.CS.u32Limit));
868 Log(("guest.CS.u64Base %VX64\n", pVMCB->guest.CS.u64Base));
869 Log(("guest.DS.u16Sel %04X\n", pVMCB->guest.DS.u16Sel));
870 Log(("guest.DS.u16Attr %04X\n", pVMCB->guest.DS.u16Attr));
871 Log(("guest.DS.u32Limit %X\n", pVMCB->guest.DS.u32Limit));
872 Log(("guest.DS.u64Base %VX64\n", pVMCB->guest.DS.u64Base));
873 Log(("guest.ES.u16Sel %04X\n", pVMCB->guest.ES.u16Sel));
874 Log(("guest.ES.u16Attr %04X\n", pVMCB->guest.ES.u16Attr));
875 Log(("guest.ES.u32Limit %X\n", pVMCB->guest.ES.u32Limit));
876 Log(("guest.ES.u64Base %VX64\n", pVMCB->guest.ES.u64Base));
877 Log(("guest.FS.u16Sel %04X\n", pVMCB->guest.FS.u16Sel));
878 Log(("guest.FS.u16Attr %04X\n", pVMCB->guest.FS.u16Attr));
879 Log(("guest.FS.u32Limit %X\n", pVMCB->guest.FS.u32Limit));
880 Log(("guest.FS.u64Base %VX64\n", pVMCB->guest.FS.u64Base));
881 Log(("guest.GS.u16Sel %04X\n", pVMCB->guest.GS.u16Sel));
882 Log(("guest.GS.u16Attr %04X\n", pVMCB->guest.GS.u16Attr));
883 Log(("guest.GS.u32Limit %X\n", pVMCB->guest.GS.u32Limit));
884 Log(("guest.GS.u64Base %VX64\n", pVMCB->guest.GS.u64Base));
885
886 Log(("guest.GDTR.u32Limit %X\n", pVMCB->guest.GDTR.u32Limit));
887 Log(("guest.GDTR.u64Base %VX64\n", pVMCB->guest.GDTR.u64Base));
888
889 Log(("guest.LDTR.u16Sel %04X\n", pVMCB->guest.LDTR.u16Sel));
890 Log(("guest.LDTR.u16Attr %04X\n", pVMCB->guest.LDTR.u16Attr));
891 Log(("guest.LDTR.u32Limit %X\n", pVMCB->guest.LDTR.u32Limit));
892 Log(("guest.LDTR.u64Base %VX64\n", pVMCB->guest.LDTR.u64Base));
893
894 Log(("guest.IDTR.u32Limit %X\n", pVMCB->guest.IDTR.u32Limit));
895 Log(("guest.IDTR.u64Base %VX64\n", pVMCB->guest.IDTR.u64Base));
896
897 Log(("guest.TR.u16Sel %04X\n", pVMCB->guest.TR.u16Sel));
898 Log(("guest.TR.u16Attr %04X\n", pVMCB->guest.TR.u16Attr));
899 Log(("guest.TR.u32Limit %X\n", pVMCB->guest.TR.u32Limit));
900 Log(("guest.TR.u64Base %VX64\n", pVMCB->guest.TR.u64Base));
901
902 Log(("guest.u8CPL %X\n", pVMCB->guest.u8CPL));
903 Log(("guest.u64CR0 %VX64\n", pVMCB->guest.u64CR0));
904 Log(("guest.u64CR2 %VX64\n", pVMCB->guest.u64CR2));
905 Log(("guest.u64CR3 %VX64\n", pVMCB->guest.u64CR3));
906 Log(("guest.u64CR4 %VX64\n", pVMCB->guest.u64CR4));
907 Log(("guest.u64DR6 %VX64\n", pVMCB->guest.u64DR6));
908 Log(("guest.u64DR7 %VX64\n", pVMCB->guest.u64DR7));
909
910 Log(("guest.u64RIP %VX64\n", pVMCB->guest.u64RIP));
911 Log(("guest.u64RSP %VX64\n", pVMCB->guest.u64RSP));
912 Log(("guest.u64RAX %VX64\n", pVMCB->guest.u64RAX));
913 Log(("guest.u64RFlags %VX64\n", pVMCB->guest.u64RFlags));
914
915 Log(("guest.u64SysEnterCS %VX64\n", pVMCB->guest.u64SysEnterCS));
916 Log(("guest.u64SysEnterEIP %VX64\n", pVMCB->guest.u64SysEnterEIP));
917 Log(("guest.u64SysEnterESP %VX64\n", pVMCB->guest.u64SysEnterESP));
918
919 Log(("guest.u64EFER %VX64\n", pVMCB->guest.u64EFER));
920 Log(("guest.u64STAR %VX64\n", pVMCB->guest.u64STAR));
921 Log(("guest.u64LSTAR %VX64\n", pVMCB->guest.u64LSTAR));
922 Log(("guest.u64CSTAR %VX64\n", pVMCB->guest.u64CSTAR));
923 Log(("guest.u64SFMASK %VX64\n", pVMCB->guest.u64SFMASK));
924 Log(("guest.u64KernelGSBase %VX64\n", pVMCB->guest.u64KernelGSBase));
925 Log(("guest.u64GPAT %VX64\n", pVMCB->guest.u64GPAT));
926 Log(("guest.u64DBGCTL %VX64\n", pVMCB->guest.u64DBGCTL));
927 Log(("guest.u64BR_FROM %VX64\n", pVMCB->guest.u64BR_FROM));
928 Log(("guest.u64BR_TO %VX64\n", pVMCB->guest.u64BR_TO));
929 Log(("guest.u64LASTEXCPFROM %VX64\n", pVMCB->guest.u64LASTEXCPFROM));
930 Log(("guest.u64LASTEXCPTO %VX64\n", pVMCB->guest.u64LASTEXCPTO));
931
932#endif
933 rc = VERR_SVM_UNABLE_TO_START_VM;
934 goto end;
935 }
936
937 /* Let's first sync back eip, esp, and eflags. */
938 pCtx->eip = pVMCB->guest.u64RIP;
939 pCtx->esp = pVMCB->guest.u64RSP;
940 pCtx->eflags.u32 = pVMCB->guest.u64RFlags;
941 /* eax is saved/restore across the vmrun instruction */
942 pCtx->eax = pVMCB->guest.u64RAX;
943
944 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
945 SVM_READ_SELREG(SS, ss);
946 SVM_READ_SELREG(CS, cs);
947 SVM_READ_SELREG(DS, ds);
948 SVM_READ_SELREG(ES, es);
949 SVM_READ_SELREG(FS, fs);
950 SVM_READ_SELREG(GS, gs);
951
952 /** @note no reason to sync back the CRx and DRx registers. They can't be changed by the guest. */
953
954 /** @note NOW IT'S SAFE FOR LOGGING! */
955
956 /* Take care of instruction fusing (sti, mov ss) */
957 if (pVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
958 {
959 Log(("uInterruptState %x eip=%VGv\n", pVMCB->ctrl.u64IntShadow, pCtx->eip));
960 EMSetInhibitInterruptsPC(pVM, pCtx->eip);
961 }
962 else
963 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
964
965 Log2(("exitCode = %x\n", exitCode));
966
967 /* Check if an injected event was interrupted prematurely. */
968 pVM->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
969 if ( pVMCB->ctrl.ExitIntInfo.n.u1Valid
970 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT /* we don't care about 'int xx' as the instruction will be restarted. */)
971 {
972 Log(("Pending inject %VX64 at %08x exit=%08x\n", pVM->hwaccm.s.Event.intInfo, pCtx->eip, exitCode));
973 pVM->hwaccm.s.Event.fPending = true;
974 /* Error code present? (redundant) */
975 if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)
976 {
977 pVM->hwaccm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
978 }
979 else
980 pVM->hwaccm.s.Event.errCode = 0;
981 }
982 STAM_COUNTER_INC(&pVM->hwaccm.s.pStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);
983
984 /* Deal with the reason of the VM-exit. */
985 switch (exitCode)
986 {
987 case SVM_EXIT_EXCEPTION_0: case SVM_EXIT_EXCEPTION_1: case SVM_EXIT_EXCEPTION_2: case SVM_EXIT_EXCEPTION_3:
988 case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: case SVM_EXIT_EXCEPTION_6: case SVM_EXIT_EXCEPTION_7:
989 case SVM_EXIT_EXCEPTION_8: case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_A: case SVM_EXIT_EXCEPTION_B:
990 case SVM_EXIT_EXCEPTION_C: case SVM_EXIT_EXCEPTION_D: case SVM_EXIT_EXCEPTION_E: case SVM_EXIT_EXCEPTION_F:
991 case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11: case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13:
992 case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16: case SVM_EXIT_EXCEPTION_17:
993 case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19: case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B:
994 case SVM_EXIT_EXCEPTION_1C: case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
995 {
996 /* Pending trap. */
997 SVM_EVENT Event;
998 uint32_t vector = exitCode - SVM_EXIT_EXCEPTION_0;
999
1000 Log2(("Hardware/software interrupt %d\n", vector));
1001 switch (vector)
1002 {
1003#ifdef DEBUG
1004 case X86_XCPT_DB:
1005 rc = DBGFR0Trap01Handler(pVM, CPUMCTX2CORE(pCtx), pVMCB->guest.u64DR6);
1006 Assert(rc != VINF_EM_RAW_GUEST_TRAP);
1007 break;
1008#endif
1009
1010 case X86_XCPT_NM:
1011 {
1012 uint32_t oldCR0;
1013
1014 Log(("#NM fault at %VGv\n", pCtx->eip));
1015
1016 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */
1017 oldCR0 = ASMGetCR0();
1018 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
1019 rc = CPUMHandleLazyFPU(pVM);
1020 if (rc == VINF_SUCCESS)
1021 {
1022 Assert(CPUMIsGuestFPUStateActive(pVM));
1023
1024 /* CPUMHandleLazyFPU could have changed CR0; restore it. */
1025 ASMSetCR0(oldCR0);
1026
1027 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowNM);
1028
1029 /* Continue execution. */
1030 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1031 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1032
1033 goto ResumeExecution;
1034 }
1035
1036 Log(("Forward #NM fault to the guest\n"));
1037 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNM);
1038
1039 Event.au64[0] = 0;
1040 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1041 Event.n.u1Valid = 1;
1042 Event.n.u8Vector = X86_XCPT_NM;
1043
1044 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1045 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1046 goto ResumeExecution;
1047 }
1048
1049 case X86_XCPT_PF: /* Page fault */
1050 {
1051 uint32_t errCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1052 RTGCUINTPTR uFaultAddress = pVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */
1053
1054 Log2(("Page fault at %VGv cr2=%VGv error code %x\n", pCtx->eip, uFaultAddress, errCode));
1055 /* Exit qualification contains the linear address of the page fault. */
1056 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
1057 TRPMSetErrorCode(pVM, errCode);
1058 TRPMSetFaultAddress(pVM, uFaultAddress);
1059
1060 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
1061 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
1062 Log2(("PGMTrap0eHandler %VGv returned %Vrc\n", pCtx->eip, rc));
1063 if (rc == VINF_SUCCESS)
1064 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1065 Log2(("Shadow page fault at %VGv cr2=%VGv error code %x\n", pCtx->eip, uFaultAddress, errCode));
1066 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF);
1067
1068 TRPMResetTrap(pVM);
1069
1070 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1071 goto ResumeExecution;
1072 }
1073 else
1074 if (rc == VINF_EM_RAW_GUEST_TRAP)
1075 { /* A genuine pagefault.
1076 * Forward the trap to the guest by injecting the exception and resuming execution.
1077 */
1078 Log2(("Forward page fault to the guest\n"));
1079 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF);
1080 /* The error code might have been changed. */
1081 errCode = TRPMGetErrorCode(pVM);
1082
1083 TRPMResetTrap(pVM);
1084
1085 /* Now we must update CR2. */
1086 pCtx->cr2 = uFaultAddress;
1087
1088 Event.au64[0] = 0;
1089 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1090 Event.n.u1Valid = 1;
1091 Event.n.u8Vector = X86_XCPT_PF;
1092 Event.n.u1ErrorCodeValid = 1;
1093 Event.n.u32ErrorCode = errCode;
1094
1095 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1096
1097 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1098 goto ResumeExecution;
1099 }
1100#ifdef VBOX_STRICT
1101 if (rc != VINF_EM_RAW_EMULATE_INSTR)
1102 Log(("PGMTrap0eHandler failed with %d\n", rc));
1103#endif
1104 /* Need to go back to the recompiler to emulate the instruction. */
1105 TRPMResetTrap(pVM);
1106 break;
1107 }
1108
1109 case X86_XCPT_MF: /* Floating point exception. */
1110 {
1111 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestMF);
1112 if (!(pCtx->cr0 & X86_CR0_NE))
1113 {
1114 /* old style FPU error reporting needs some extra work. */
1115 /** @todo don't fall back to the recompiler, but do it manually. */
1116 rc = VINF_EM_RAW_EMULATE_INSTR;
1117 break;
1118 }
1119 Log(("Trap %x at %VGv\n", vector, pCtx->eip));
1120
1121 Event.au64[0] = 0;
1122 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1123 Event.n.u1Valid = 1;
1124 Event.n.u8Vector = X86_XCPT_MF;
1125
1126 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1127
1128 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1129 goto ResumeExecution;
1130 }
1131
1132#ifdef VBOX_STRICT
1133 case X86_XCPT_GP: /* General protection failure exception.*/
1134 case X86_XCPT_UD: /* Unknown opcode exception. */
1135 case X86_XCPT_DE: /* Debug exception. */
1136 case X86_XCPT_SS: /* Stack segment exception. */
1137 case X86_XCPT_NP: /* Segment not present exception. */
1138 {
1139 Event.au64[0] = 0;
1140 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1141 Event.n.u1Valid = 1;
1142 Event.n.u8Vector = vector;
1143
1144 switch(vector)
1145 {
1146 case X86_XCPT_GP:
1147 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestGP);
1148 Event.n.u1ErrorCodeValid = 1;
1149 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1150 break;
1151 case X86_XCPT_DE:
1152 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestDE);
1153 break;
1154 case X86_XCPT_UD:
1155 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestUD);
1156 break;
1157 case X86_XCPT_SS:
1158 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestSS);
1159 Event.n.u1ErrorCodeValid = 1;
1160 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1161 break;
1162 case X86_XCPT_NP:
1163 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNP);
1164 Event.n.u1ErrorCodeValid = 1;
1165 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
1166 break;
1167 }
1168 Log(("Trap %x at %VGv\n", vector, pCtx->eip));
1169 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1170
1171 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1172 goto ResumeExecution;
1173 }
1174#endif
1175 default:
1176 AssertMsgFailed(("Unexpected vm-exit caused by exception %x\n", vector));
1177 rc = VERR_EM_INTERNAL_ERROR;
1178 break;
1179
1180 } /* switch (vector) */
1181 break;
1182 }
1183
1184 case SVM_EXIT_VINTR:
1185 /* A virtual interrupt is about to be delivered, which means IF=1. */
1186 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 0;
1187 pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 0;
1188 pVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0;
1189 goto ResumeExecution;
1190
1191 case SVM_EXIT_FERR_FREEZE:
1192 case SVM_EXIT_INTR:
1193 case SVM_EXIT_NMI:
1194 case SVM_EXIT_SMI:
1195 case SVM_EXIT_INIT:
1196 /* External interrupt; leave to allow it to be dispatched again. */
1197 rc = VINF_EM_RAW_INTERRUPT;
1198 break;
1199
1200 case SVM_EXIT_INVD: /* Guest software attempted to execute INVD. */
1201 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvd);
1202 /* Skip instruction and continue directly. */
1203 pCtx->eip += 2; /** @note hardcoded opcode size! */
1204 /* Continue execution.*/
1205 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1206 goto ResumeExecution;
1207
1208 case SVM_EXIT_CPUID: /* Guest software attempted to execute CPUID. */
1209 {
1210 Log2(("SVM: Cpuid %x\n", pCtx->eax));
1211 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCpuid);
1212 rc = EMInterpretCpuId(pVM, CPUMCTX2CORE(pCtx));
1213 if (rc == VINF_SUCCESS)
1214 {
1215 /* Update EIP and continue execution. */
1216 pCtx->eip += 2; /** @note hardcoded opcode size! */
1217 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1218 goto ResumeExecution;
1219 }
1220 AssertMsgFailed(("EMU: cpuid failed with %Vrc\n", rc));
1221 rc = VINF_EM_RAW_EMULATE_INSTR;
1222 break;
1223 }
1224
1225 case SVM_EXIT_RDTSC: /* Guest software attempted to execute RDTSC. */
1226 {
1227 Log2(("SVM: Rdtsc\n"));
1228 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitRdtsc);
1229 rc = EMInterpretRdtsc(pVM, CPUMCTX2CORE(pCtx));
1230 if (rc == VINF_SUCCESS)
1231 {
1232 /* Update EIP and continue execution. */
1233 pCtx->eip += 2; /** @note hardcoded opcode size! */
1234 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1235 goto ResumeExecution;
1236 }
1237 AssertMsgFailed(("EMU: rdtsc failed with %Vrc\n", rc));
1238 rc = VINF_EM_RAW_EMULATE_INSTR;
1239 break;
1240 }
1241
1242 case SVM_EXIT_INVLPG: /* Guest software attempted to execute INVPG. */
1243 {
1244 Log2(("SVM: invlpg\n"));
1245 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvpg);
1246
1247 /* Truly a pita. Why can't SVM give the same information as VMX? */
1248 rc = SVMR0InterpretInvpg(pVM, CPUMCTX2CORE(pCtx), pVMCB->ctrl.TLBCtrl.n.u32ASID);
1249 if (rc == VINF_SUCCESS)
1250 goto ResumeExecution; /* eip already updated */
1251 break;
1252 }
1253
1254 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
1255 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
1256 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
1257 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
1258 {
1259 uint32_t cbSize;
1260
1261 Log2(("SVM: %VGv mov cr%d, \n", pCtx->eip, exitCode - SVM_EXIT_WRITE_CR0));
1262 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxWrite);
1263 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1264
1265 switch (exitCode - SVM_EXIT_WRITE_CR0)
1266 {
1267 case 0:
1268 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1269 break;
1270 case 2:
1271 break;
1272 case 3:
1273 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
1274 break;
1275 case 4:
1276 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
1277 break;
1278 default:
1279 AssertFailed();
1280 }
1281 /* Check if a sync operation is pending. */
1282 if ( rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
1283 && VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
1284 {
1285 rc = PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
1286 AssertRC(rc);
1287
1288 /** @note Force a TLB flush. SVM requires us to do it manually. */
1289 fForceTLBFlush = true;
1290 }
1291 if (rc == VINF_SUCCESS)
1292 {
1293 /* EIP has been updated already. */
1294
1295 /* Only resume if successful. */
1296 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1297 goto ResumeExecution;
1298 }
1299 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1300 break;
1301 }
1302
1303 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
1304 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
1305 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
1306 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
1307 {
1308 uint32_t cbSize;
1309
1310 Log2(("SVM: %VGv mov x, cr%d\n", pCtx->eip, exitCode - SVM_EXIT_READ_CR0));
1311 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxRead);
1312 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1313 if (rc == VINF_SUCCESS)
1314 {
1315 /* EIP has been updated already. */
1316
1317 /* Only resume if successful. */
1318 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1319 goto ResumeExecution;
1320 }
1321 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1322 break;
1323 }
1324
1325 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
1326 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
1327 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
1328 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
1329 {
1330 uint32_t cbSize;
1331
1332 Log2(("SVM: %VGv mov dr%d, x\n", pCtx->eip, exitCode - SVM_EXIT_WRITE_DR0));
1333 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1334 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1335 if (rc == VINF_SUCCESS)
1336 {
1337 /* EIP has been updated already. */
1338
1339 /* Only resume if successful. */
1340 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1341 goto ResumeExecution;
1342 }
1343 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1344 break;
1345 }
1346
1347 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
1348 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
1349 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
1350 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
1351 {
1352 uint32_t cbSize;
1353
1354 Log2(("SVM: %VGv mov dr%d, x\n", pCtx->eip, exitCode - SVM_EXIT_READ_DR0));
1355 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1356 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1357 if (rc == VINF_SUCCESS)
1358 {
1359 /* EIP has been updated already. */
1360
1361 /* Only resume if successful. */
1362 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1363 goto ResumeExecution;
1364 }
1365 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1366 break;
1367 }
1368
1369 /* Note: We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
1370 case SVM_EXIT_IOIO: /* I/O instruction. */
1371 {
1372 SVM_IOIO_EXIT IoExitInfo;
1373 uint32_t uIOSize, uAndVal;
1374
1375 IoExitInfo.au32[0] = pVMCB->ctrl.u64ExitInfo1;
1376
1377 /** @todo could use a lookup table here */
1378 if (IoExitInfo.n.u1OP8)
1379 {
1380 uIOSize = 1;
1381 uAndVal = 0xff;
1382 }
1383 else
1384 if (IoExitInfo.n.u1OP16)
1385 {
1386 uIOSize = 2;
1387 uAndVal = 0xffff;
1388 }
1389 else
1390 if (IoExitInfo.n.u1OP32)
1391 {
1392 uIOSize = 4;
1393 uAndVal = 0xffffffff;
1394 }
1395 else
1396 {
1397 AssertFailed(); /* should be fatal. */
1398 rc = VINF_EM_RAW_EMULATE_INSTR;
1399 break;
1400 }
1401
1402 if (IoExitInfo.n.u1STR)
1403 {
1404 /* ins/outs */
1405 uint32_t prefix = 0;
1406 if (IoExitInfo.n.u1REP)
1407 prefix |= PREFIX_REP;
1408
1409 if (IoExitInfo.n.u1Type == 0)
1410 {
1411 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize));
1412 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringWrite);
1413 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, prefix, uIOSize);
1414 }
1415 else
1416 {
1417 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize));
1418 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringRead);
1419 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, prefix, uIOSize);
1420 }
1421 }
1422 else
1423 {
1424 /* normal in/out */
1425 Assert(!IoExitInfo.n.u1REP);
1426
1427 if (IoExitInfo.n.u1Type == 0)
1428 {
1429 Log2(("IOMIOPortWrite %VGv %x %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize));
1430 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOWrite);
1431 rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
1432 }
1433 else
1434 {
1435 uint32_t u32Val = 0;
1436
1437 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIORead);
1438 rc = IOMIOPortRead(pVM, IoExitInfo.n.u16Port, &u32Val, uIOSize);
1439 if (IOM_SUCCESS(rc))
1440 {
1441 /* Write back to the EAX register. */
1442 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
1443 Log2(("IOMIOPortRead %VGv %x %x size=%d\n", pCtx->eip, IoExitInfo.n.u16Port, u32Val & uAndVal, uIOSize));
1444 }
1445 }
1446 }
1447 /*
1448 * Handled the I/O return codes.
1449 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1450 */
1451 if (IOM_SUCCESS(rc))
1452 {
1453 /* Update EIP and continue execution. */
1454 pCtx->eip = pVMCB->ctrl.u64ExitInfo2; /* RIP/EIP of the next instruction is saved in EXITINFO2. */
1455 if (RT_LIKELY(rc == VINF_SUCCESS))
1456 {
1457 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1458 goto ResumeExecution;
1459 }
1460 Log2(("EM status from IO at %VGv %x size %d: %Vrc\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize, rc));
1461 break;
1462 }
1463
1464#ifdef VBOX_STRICT
1465 if (rc == VINF_IOM_HC_IOPORT_READ)
1466 Assert(IoExitInfo.n.u1Type != 0);
1467 else if (rc == VINF_IOM_HC_IOPORT_WRITE)
1468 Assert(IoExitInfo.n.u1Type == 0);
1469 else
1470 AssertMsg(VBOX_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Vrc\n", rc));
1471#endif
1472 Log2(("Failed IO at %VGv %x size %d\n", pCtx->eip, IoExitInfo.n.u16Port, uIOSize));
1473 break;
1474 }
1475
1476 case SVM_EXIT_HLT:
1477 /** Check if external interrupts are pending; if so, don't switch back. */
1478 if (VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
1479 {
1480 pCtx->eip++; /* skip hlt */
1481 goto ResumeExecution;
1482 }
1483
1484 rc = VINF_EM_RAW_EMULATE_INSTR_HLT;
1485 break;
1486
1487 case SVM_EXIT_RDPMC:
1488 case SVM_EXIT_RSM:
1489 case SVM_EXIT_INVLPGA:
1490 case SVM_EXIT_VMRUN:
1491 case SVM_EXIT_VMMCALL:
1492 case SVM_EXIT_VMLOAD:
1493 case SVM_EXIT_VMSAVE:
1494 case SVM_EXIT_STGI:
1495 case SVM_EXIT_CLGI:
1496 case SVM_EXIT_SKINIT:
1497 case SVM_EXIT_RDTSCP:
1498 {
1499 /* Unsupported instructions. */
1500 SVM_EVENT Event;
1501
1502 Event.au64[0] = 0;
1503 Event.n.u3Type = SVM_EVENT_EXCEPTION;
1504 Event.n.u1Valid = 1;
1505 Event.n.u8Vector = X86_XCPT_UD;
1506
1507 Log(("Forced #UD trap at %VGv\n", pCtx->eip));
1508 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event);
1509
1510 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1511 goto ResumeExecution;
1512 }
1513
1514 /* Emulate RDMSR & WRMSR in ring 3. */
1515 case SVM_EXIT_MSR:
1516 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
1517 break;
1518
1519 case SVM_EXIT_NPF:
1520 AssertFailed(); /* unexpected */
1521 break;
1522
1523 case SVM_EXIT_SHUTDOWN:
1524 rc = VINF_EM_RESET; /* Triple fault equals a reset. */
1525 break;
1526
1527 case SVM_EXIT_PAUSE:
1528 case SVM_EXIT_IDTR_READ:
1529 case SVM_EXIT_GDTR_READ:
1530 case SVM_EXIT_LDTR_READ:
1531 case SVM_EXIT_TR_READ:
1532 case SVM_EXIT_IDTR_WRITE:
1533 case SVM_EXIT_GDTR_WRITE:
1534 case SVM_EXIT_LDTR_WRITE:
1535 case SVM_EXIT_TR_WRITE:
1536 case SVM_EXIT_CR0_SEL_WRITE:
1537 default:
1538 /* Unexpected exit codes. */
1539 rc = VERR_EM_INTERNAL_ERROR;
1540 AssertMsgFailed(("Unexpected exit code %x\n", exitCode)); /* Can't happen. */
1541 break;
1542 }
1543
1544end:
1545 if (fGuestStateSynced)
1546 {
1547 /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR. */
1548 SVM_READ_SELREG(LDTR, ldtr);
1549 SVM_READ_SELREG(TR, tr);
1550
1551 pCtx->gdtr.cbGdt = pVMCB->guest.GDTR.u32Limit;
1552 pCtx->gdtr.pGdt = pVMCB->guest.GDTR.u64Base;
1553
1554 pCtx->idtr.cbIdt = pVMCB->guest.IDTR.u32Limit;
1555 pCtx->idtr.pIdt = pVMCB->guest.IDTR.u64Base;
1556
1557 /*
1558 * System MSRs
1559 */
1560 pCtx->SysEnter.cs = pVMCB->guest.u64SysEnterCS;
1561 pCtx->SysEnter.eip = pVMCB->guest.u64SysEnterEIP;
1562 pCtx->SysEnter.esp = pVMCB->guest.u64SysEnterESP;
1563 }
1564
1565 /* Signal changes for the recompiler. */
1566 CPUMSetChangedFlags(pVM, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);
1567
1568 /* If we executed vmrun and an external irq was pending, then we don't have to do a full sync the next time. */
1569 if (exitCode == SVM_EXIT_INTR)
1570 {
1571 STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq);
1572 /* On the next entry we'll only sync the host context. */
1573 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
1574 }
1575 else
1576 {
1577 /* On the next entry we'll sync everything. */
1578 /** @todo we can do better than this */
1579 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
1580 }
1581
1582 /* translate into a less severe return code */
1583 if (rc == VERR_EM_INTERPRETER)
1584 rc = VINF_EM_RAW_EMULATE_INSTR;
1585
1586 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1587 return rc;
1588}
1589
1590/**
1591 * Enters the AMD-V session
1592 *
1593 * @returns VBox status code.
1594 * @param pVM The VM to operate on.
1595 */
1596HWACCMR0DECL(int) SVMR0Enter(PVM pVM)
1597{
1598 Assert(pVM->hwaccm.s.svm.fSupported);
1599
1600 /* Force a TLB flush on VM entry. */
1601 pVM->hwaccm.s.svm.fResumeVM = false;
1602
1603 /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */
1604 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;
1605
1606 return VINF_SUCCESS;
1607}
1608
1609
1610/**
1611 * Leaves the AMD-V session
1612 *
1613 * @returns VBox status code.
1614 * @param pVM The VM to operate on.
1615 */
1616HWACCMR0DECL(int) SVMR0Leave(PVM pVM)
1617{
1618 Assert(pVM->hwaccm.s.svm.fSupported);
1619 return VINF_SUCCESS;
1620}
1621
1622
1623static int svmInterpretInvlPg(PVM pVM, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)
1624{
1625 OP_PARAMVAL param1;
1626 RTGCPTR addr;
1627
1628 int rc = DISQueryParamVal(pRegFrame, pCpu, &pCpu->param1, &param1, PARAM_SOURCE);
1629 if(VBOX_FAILURE(rc))
1630 return VERR_EM_INTERPRETER;
1631
1632 switch(param1.type)
1633 {
1634 case PARMTYPE_IMMEDIATE:
1635 case PARMTYPE_ADDRESS:
1636 if(!(param1.flags & PARAM_VAL32))
1637 return VERR_EM_INTERPRETER;
1638 addr = (RTGCPTR)param1.val.val32;
1639 break;
1640
1641 default:
1642 return VERR_EM_INTERPRETER;
1643 }
1644
1645 /** @todo is addr always a flat linear address or ds based
1646 * (in absence of segment override prefixes)????
1647 */
1648 rc = PGMInvalidatePage(pVM, addr);
1649 if (VBOX_SUCCESS(rc))
1650 {
1651 /* Manually invalidate the page for the VM's TLB. */
1652 SVMInvlpgA(addr, uASID);
1653 return VINF_SUCCESS;
1654 }
1655 Assert(rc == VERR_REM_FLUSHED_PAGES_OVERFLOW);
1656 return (rc == VERR_REM_FLUSHED_PAGES_OVERFLOW) ? VERR_EM_INTERPRETER : rc;
1657}
1658
1659/**
1660 * Interprets INVLPG
1661 *
1662 * @returns VBox status code.
1663 * @retval VINF_* Scheduling instructions.
1664 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1665 * @retval VERR_* Fatal errors.
1666 *
1667 * @param pVM The VM handle.
1668 * @param pRegFrame The register frame.
1669 * @param ASID Tagged TLB id for the guest
1670 *
1671 * Updates the EIP if an instruction was executed successfully.
1672 */
1673static int SVMR0InterpretInvpg(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uASID)
1674{
1675 /*
1676 * Only allow 32-bit code.
1677 */
1678 if (SELMIsSelector32Bit(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid))
1679 {
1680 RTGCPTR pbCode;
1681 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &pbCode);
1682 if (VBOX_SUCCESS(rc))
1683 {
1684 uint32_t cbOp;
1685 DISCPUSTATE Cpu;
1686
1687 Cpu.mode = CPUMODE_32BIT;
1688 rc = EMInterpretDisasOneEx(pVM, pbCode, pRegFrame, &Cpu, &cbOp);
1689 Assert(VBOX_FAILURE(rc) || Cpu.pCurInstr->opcode == OP_INVLPG);
1690 if (VBOX_SUCCESS(rc) && Cpu.pCurInstr->opcode == OP_INVLPG)
1691 {
1692 Assert(cbOp == Cpu.opsize);
1693 rc = svmInterpretInvlPg(pVM, &Cpu, pRegFrame, uASID);
1694 if (VBOX_SUCCESS(rc))
1695 {
1696 pRegFrame->eip += cbOp; /* Move on to the next instruction. */
1697 }
1698 return rc;
1699 }
1700 }
1701 }
1702 return VERR_EM_INTERPRETER;
1703}
1704
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette