1 | /* $Id: HWSVMR0.cpp 43494 2012-10-01 14:29:11Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * HM SVM (AMD-V) - Host Context Ring-0.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2012 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 | /*******************************************************************************
|
---|
19 | * Header Files *
|
---|
20 | *******************************************************************************/
|
---|
21 | #define LOG_GROUP LOG_GROUP_HM
|
---|
22 | #include <VBox/vmm/hm.h>
|
---|
23 | #include <VBox/vmm/pgm.h>
|
---|
24 | #include <VBox/vmm/selm.h>
|
---|
25 | #include <VBox/vmm/iom.h>
|
---|
26 | #include <VBox/vmm/dbgf.h>
|
---|
27 | #include <VBox/vmm/dbgftrace.h>
|
---|
28 | #include <VBox/vmm/tm.h>
|
---|
29 | #include <VBox/vmm/pdmapi.h>
|
---|
30 | #include "HMInternal.h"
|
---|
31 | #include <VBox/vmm/vm.h>
|
---|
32 | #include <VBox/vmm/hm_svm.h>
|
---|
33 | #include <VBox/err.h>
|
---|
34 | #include <VBox/log.h>
|
---|
35 | #include <VBox/dis.h>
|
---|
36 | #include <VBox/disopcode.h>
|
---|
37 | #include <iprt/param.h>
|
---|
38 | #include <iprt/assert.h>
|
---|
39 | #include <iprt/asm.h>
|
---|
40 | #include <iprt/asm-amd64-x86.h>
|
---|
41 | #include <iprt/cpuset.h>
|
---|
42 | #include <iprt/mp.h>
|
---|
43 | #include <iprt/time.h>
|
---|
44 | #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
|
---|
45 | # include <iprt/thread.h>
|
---|
46 | #endif
|
---|
47 | #include <iprt/x86.h>
|
---|
48 | #include "HWSVMR0.h"
|
---|
49 |
|
---|
50 | #include "dtrace/VBoxVMM.h"
|
---|
51 |
|
---|
52 |
|
---|
53 | /*******************************************************************************
|
---|
54 | * Internal Functions *
|
---|
55 | *******************************************************************************/
|
---|
56 | static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
|
---|
57 | static int hmR0SvmEmulateTprVMMCall(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
|
---|
58 | static void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite);
|
---|
59 |
|
---|
60 | /*******************************************************************************
|
---|
61 | * Defined Constants And Macros *
|
---|
62 | *******************************************************************************/
|
---|
63 | /** Convert hidden selector attribute word between VMX and SVM formats. */
|
---|
64 | #define SVM_HIDSEGATTR_VMX2SVM(a) (a & 0xFF) | ((a & 0xF000) >> 4)
|
---|
65 | #define SVM_HIDSEGATTR_SVM2VMX(a) (a & 0xFF) | ((a & 0x0F00) << 4)
|
---|
66 |
|
---|
67 | #define SVM_WRITE_SELREG(REG, reg) \
|
---|
68 | do \
|
---|
69 | { \
|
---|
70 | Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \
|
---|
71 | Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \
|
---|
72 | pvVMCB->guest.REG.u16Sel = pCtx->reg.Sel; \
|
---|
73 | pvVMCB->guest.REG.u32Limit = pCtx->reg.u32Limit; \
|
---|
74 | pvVMCB->guest.REG.u64Base = pCtx->reg.u64Base; \
|
---|
75 | pvVMCB->guest.REG.u16Attr = SVM_HIDSEGATTR_VMX2SVM(pCtx->reg.Attr.u); \
|
---|
76 | } while (0)
|
---|
77 |
|
---|
78 | #define SVM_READ_SELREG(REG, reg) \
|
---|
79 | do \
|
---|
80 | { \
|
---|
81 | pCtx->reg.Sel = pvVMCB->guest.REG.u16Sel; \
|
---|
82 | pCtx->reg.ValidSel = pvVMCB->guest.REG.u16Sel; \
|
---|
83 | pCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \
|
---|
84 | pCtx->reg.u32Limit = pvVMCB->guest.REG.u32Limit; \
|
---|
85 | pCtx->reg.u64Base = pvVMCB->guest.REG.u64Base; \
|
---|
86 | pCtx->reg.Attr.u = SVM_HIDSEGATTR_SVM2VMX(pvVMCB->guest.REG.u16Attr); \
|
---|
87 | } while (0)
|
---|
88 |
|
---|
89 | /*******************************************************************************
|
---|
90 | * Global Variables *
|
---|
91 | *******************************************************************************/
|
---|
92 | /* IO operation lookup arrays. */
|
---|
93 | static uint32_t const g_aIOSize[8] = {0, 1, 2, 0, 4, 0, 0, 0};
|
---|
94 | static uint32_t const g_aIOOpAnd[8] = {0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0};
|
---|
95 |
|
---|
96 |
|
---|
97 | /**
|
---|
98 | * Sets up and activates AMD-V on the current CPU.
|
---|
99 | *
|
---|
100 | * @returns VBox status code.
|
---|
101 | * @param pCpu Pointer to the CPU info struct.
|
---|
102 | * @param pVM Pointer to the VM (can be NULL after a resume!).
|
---|
103 | * @param pvCpuPage Pointer to the global CPU page.
|
---|
104 | * @param HCPhysCpuPage Physical address of the global CPU page.
|
---|
105 | */
|
---|
106 | VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
|
---|
107 | {
|
---|
108 | AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER);
|
---|
109 | AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
|
---|
110 | AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
|
---|
111 |
|
---|
112 | /*
|
---|
113 | * We must turn on AMD-V and setup the host state physical address, as those MSRs are per cpu/core.
|
---|
114 | */
|
---|
115 | uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
|
---|
116 | if (fEfer & MSR_K6_EFER_SVME)
|
---|
117 | {
|
---|
118 | /*
|
---|
119 | * If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V.
|
---|
120 | */
|
---|
121 | if ( pVM
|
---|
122 | && pVM->hm.s.svm.fIgnoreInUseError)
|
---|
123 | {
|
---|
124 | pCpu->fIgnoreAMDVInUseError = true;
|
---|
125 | }
|
---|
126 |
|
---|
127 | if (!pCpu->fIgnoreAMDVInUseError)
|
---|
128 | return VERR_SVM_IN_USE;
|
---|
129 | }
|
---|
130 |
|
---|
131 | /* Turn on AMD-V in the EFER MSR. */
|
---|
132 | ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
|
---|
133 |
|
---|
134 | /* Write the physical page address where the CPU will store the host state while executing the VM. */
|
---|
135 | ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
|
---|
136 |
|
---|
137 | /*
|
---|
138 | * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
|
---|
139 | * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
|
---|
140 | * upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB()
|
---|
141 | * to flush the TLB with before using a new ASID.
|
---|
142 | */
|
---|
143 | pCpu->fFlushAsidBeforeUse = true;
|
---|
144 |
|
---|
145 | /*
|
---|
146 | * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
|
---|
147 | */
|
---|
148 | ++pCpu->cTlbFlushes;
|
---|
149 |
|
---|
150 | return VINF_SUCCESS;
|
---|
151 | }
|
---|
152 |
|
---|
153 |
|
---|
154 | /**
|
---|
155 | * Deactivates AMD-V on the current CPU.
|
---|
156 | *
|
---|
157 | * @returns VBox status code.
|
---|
158 | * @param pCpu Pointer to the CPU info struct.
|
---|
159 | * @param pvCpuPage Pointer to the global CPU page.
|
---|
160 | * @param HCPhysCpuPage Physical address of the global CPU page.
|
---|
161 | */
|
---|
162 | VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
|
---|
163 | {
|
---|
164 | AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
|
---|
165 | AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
|
---|
166 | NOREF(pCpu);
|
---|
167 |
|
---|
168 | /* Turn off AMD-V in the EFER MSR. */
|
---|
169 | uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
|
---|
170 | ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
|
---|
171 |
|
---|
172 | /* Invalidate host state physical address. */
|
---|
173 | ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
|
---|
174 |
|
---|
175 | return VINF_SUCCESS;
|
---|
176 | }
|
---|
177 |
|
---|
178 |
|
---|
179 | /**
|
---|
180 | * Does Ring-0 per VM AMD-V init.
|
---|
181 | *
|
---|
182 | * @returns VBox status code.
|
---|
183 | * @param pVM Pointer to the VM.
|
---|
184 | */
|
---|
185 | VMMR0DECL(int) SVMR0InitVM(PVM pVM)
|
---|
186 | {
|
---|
187 | int rc;
|
---|
188 |
|
---|
189 | pVM->hm.s.svm.hMemObjIOBitmap = NIL_RTR0MEMOBJ;
|
---|
190 |
|
---|
191 | /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
|
---|
192 | rc = RTR0MemObjAllocCont(&pVM->hm.s.svm.hMemObjIOBitmap, 3 << PAGE_SHIFT, false /* fExecutable */);
|
---|
193 | if (RT_FAILURE(rc))
|
---|
194 | return rc;
|
---|
195 |
|
---|
196 | pVM->hm.s.svm.pvIOBitmap = RTR0MemObjAddress(pVM->hm.s.svm.hMemObjIOBitmap);
|
---|
197 | pVM->hm.s.svm.HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(pVM->hm.s.svm.hMemObjIOBitmap, 0);
|
---|
198 | /* Set all bits to intercept all IO accesses. */
|
---|
199 | ASMMemFill32(pVM->hm.s.svm.pvIOBitmap, 3 << PAGE_SHIFT, 0xffffffff);
|
---|
200 |
|
---|
201 | /*
|
---|
202 | * Erratum 170 which requires a forced TLB flush for each world switch:
|
---|
203 | * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
|
---|
204 | *
|
---|
205 | * All BH-G1/2 and DH-G1/2 models include a fix:
|
---|
206 | * Athlon X2: 0x6b 1/2
|
---|
207 | * 0x68 1/2
|
---|
208 | * Athlon 64: 0x7f 1
|
---|
209 | * 0x6f 2
|
---|
210 | * Sempron: 0x7f 1/2
|
---|
211 | * 0x6f 2
|
---|
212 | * 0x6c 2
|
---|
213 | * 0x7c 2
|
---|
214 | * Turion 64: 0x68 2
|
---|
215 | */
|
---|
216 | uint32_t u32Dummy;
|
---|
217 | uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
|
---|
218 | ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
|
---|
219 | u32BaseFamily = (u32Version >> 8) & 0xf;
|
---|
220 | u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
|
---|
221 | u32Model = ((u32Version >> 4) & 0xf);
|
---|
222 | u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
|
---|
223 | u32Stepping = u32Version & 0xf;
|
---|
224 | if ( u32Family == 0xf
|
---|
225 | && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
|
---|
226 | && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
|
---|
227 | {
|
---|
228 | Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
|
---|
229 | pVM->hm.s.svm.fAlwaysFlushTLB = true;
|
---|
230 | }
|
---|
231 |
|
---|
232 | /* Allocate VMCBs for all guest CPUs. */
|
---|
233 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
234 | {
|
---|
235 | PVMCPU pVCpu = &pVM->aCpus[i];
|
---|
236 |
|
---|
237 | pVCpu->hm.s.svm.hMemObjVMCBHost = NIL_RTR0MEMOBJ;
|
---|
238 | pVCpu->hm.s.svm.hMemObjVMCB = NIL_RTR0MEMOBJ;
|
---|
239 | pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
|
---|
240 |
|
---|
241 | /* Allocate one page for the host context */
|
---|
242 | rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVMCBHost, 1 << PAGE_SHIFT, false /* fExecutable */);
|
---|
243 | if (RT_FAILURE(rc))
|
---|
244 | return rc;
|
---|
245 |
|
---|
246 | pVCpu->hm.s.svm.pvVMCBHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCBHost);
|
---|
247 | pVCpu->hm.s.svm.HCPhysVMCBHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCBHost, 0);
|
---|
248 | Assert(pVCpu->hm.s.svm.HCPhysVMCBHost < _4G);
|
---|
249 | ASMMemZeroPage(pVCpu->hm.s.svm.pvVMCBHost);
|
---|
250 |
|
---|
251 | /* Allocate one page for the VM control block (VMCB). */
|
---|
252 | rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVMCB, 1 << PAGE_SHIFT, false /* fExecutable */);
|
---|
253 | if (RT_FAILURE(rc))
|
---|
254 | return rc;
|
---|
255 |
|
---|
256 | pVCpu->hm.s.svm.pvVMCB = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCB);
|
---|
257 | pVCpu->hm.s.svm.HCPhysVMCB = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCB, 0);
|
---|
258 | Assert(pVCpu->hm.s.svm.HCPhysVMCB < _4G);
|
---|
259 | ASMMemZeroPage(pVCpu->hm.s.svm.pvVMCB);
|
---|
260 |
|
---|
261 | /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
|
---|
262 | rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
|
---|
263 | if (RT_FAILURE(rc))
|
---|
264 | return rc;
|
---|
265 |
|
---|
266 | pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
|
---|
267 | pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0);
|
---|
268 | /* Set all bits to intercept all MSR accesses. */
|
---|
269 | ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, 0xffffffff);
|
---|
270 | }
|
---|
271 |
|
---|
272 | return VINF_SUCCESS;
|
---|
273 | }
|
---|
274 |
|
---|
275 |
|
---|
276 | /**
|
---|
277 | * Does Ring-0 per VM AMD-V termination.
|
---|
278 | *
|
---|
279 | * @returns VBox status code.
|
---|
280 | * @param pVM Pointer to the VM.
|
---|
281 | */
|
---|
282 | VMMR0DECL(int) SVMR0TermVM(PVM pVM)
|
---|
283 | {
|
---|
284 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
285 | {
|
---|
286 | PVMCPU pVCpu = &pVM->aCpus[i];
|
---|
287 |
|
---|
288 | if (pVCpu->hm.s.svm.hMemObjVMCBHost != NIL_RTR0MEMOBJ)
|
---|
289 | {
|
---|
290 | RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVMCBHost, false);
|
---|
291 | pVCpu->hm.s.svm.pvVMCBHost = 0;
|
---|
292 | pVCpu->hm.s.svm.HCPhysVMCBHost = 0;
|
---|
293 | pVCpu->hm.s.svm.hMemObjVMCBHost = NIL_RTR0MEMOBJ;
|
---|
294 | }
|
---|
295 |
|
---|
296 | if (pVCpu->hm.s.svm.hMemObjVMCB != NIL_RTR0MEMOBJ)
|
---|
297 | {
|
---|
298 | RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVMCB, false);
|
---|
299 | pVCpu->hm.s.svm.pvVMCB = 0;
|
---|
300 | pVCpu->hm.s.svm.HCPhysVMCB = 0;
|
---|
301 | pVCpu->hm.s.svm.hMemObjVMCB = NIL_RTR0MEMOBJ;
|
---|
302 | }
|
---|
303 | if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
|
---|
304 | {
|
---|
305 | RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
|
---|
306 | pVCpu->hm.s.svm.pvMsrBitmap = 0;
|
---|
307 | pVCpu->hm.s.svm.HCPhysMsrBitmap = 0;
|
---|
308 | pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
|
---|
309 | }
|
---|
310 | }
|
---|
311 | if (pVM->hm.s.svm.hMemObjIOBitmap != NIL_RTR0MEMOBJ)
|
---|
312 | {
|
---|
313 | RTR0MemObjFree(pVM->hm.s.svm.hMemObjIOBitmap, false);
|
---|
314 | pVM->hm.s.svm.pvIOBitmap = 0;
|
---|
315 | pVM->hm.s.svm.HCPhysIOBitmap = 0;
|
---|
316 | pVM->hm.s.svm.hMemObjIOBitmap = NIL_RTR0MEMOBJ;
|
---|
317 | }
|
---|
318 | return VINF_SUCCESS;
|
---|
319 | }
|
---|
320 |
|
---|
321 |
|
---|
322 | /**
|
---|
323 | * Sets up AMD-V for the specified VM.
|
---|
324 | *
|
---|
325 | * @returns VBox status code.
|
---|
326 | * @param pVM Pointer to the VM.
|
---|
327 | */
|
---|
328 | VMMR0DECL(int) SVMR0SetupVM(PVM pVM)
|
---|
329 | {
|
---|
330 | int rc = VINF_SUCCESS;
|
---|
331 |
|
---|
332 | AssertReturn(pVM, VERR_INVALID_PARAMETER);
|
---|
333 | Assert(pVM->hm.s.svm.fSupported);
|
---|
334 |
|
---|
335 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
336 | {
|
---|
337 | PVMCPU pVCpu = &pVM->aCpus[i];
|
---|
338 | SVM_VMCB *pvVMCB = (SVM_VMCB *)pVM->aCpus[i].hm.s.svm.pvVMCB;
|
---|
339 |
|
---|
340 | AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
|
---|
341 |
|
---|
342 | /*
|
---|
343 | * Program the control fields. Most of them never have to be changed again.
|
---|
344 | * CR0/4 reads must be intercepted, our shadow values are not necessarily the same as the guest's.
|
---|
345 | * Note: CR0 & CR4 can be safely read when guest and shadow copies are identical.
|
---|
346 | */
|
---|
347 | pvVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
|
---|
348 |
|
---|
349 | /* CR0/4 writes must be intercepted for obvious reasons. */
|
---|
350 | pvVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
|
---|
351 |
|
---|
352 | /* Intercept all DRx reads and writes by default. Changed later on. */
|
---|
353 | pvVMCB->ctrl.u16InterceptRdDRx = 0xFFFF;
|
---|
354 | pvVMCB->ctrl.u16InterceptWrDRx = 0xFFFF;
|
---|
355 |
|
---|
356 | /* Intercept traps; only #NM is always intercepted. */
|
---|
357 | pvVMCB->ctrl.u32InterceptException = RT_BIT(X86_XCPT_NM);
|
---|
358 | #ifdef VBOX_ALWAYS_TRAP_PF
|
---|
359 | pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
|
---|
360 | #endif
|
---|
361 | #ifdef VBOX_STRICT
|
---|
362 | pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_BP)
|
---|
363 | | RT_BIT(X86_XCPT_DB)
|
---|
364 | | RT_BIT(X86_XCPT_DE)
|
---|
365 | | RT_BIT(X86_XCPT_UD)
|
---|
366 | | RT_BIT(X86_XCPT_NP)
|
---|
367 | | RT_BIT(X86_XCPT_SS)
|
---|
368 | | RT_BIT(X86_XCPT_GP)
|
---|
369 | | RT_BIT(X86_XCPT_MF)
|
---|
370 | ;
|
---|
371 | #endif
|
---|
372 |
|
---|
373 | /* Set up instruction and miscellaneous intercepts. */
|
---|
374 | pvVMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR
|
---|
375 | | SVM_CTRL1_INTERCEPT_VINTR
|
---|
376 | | SVM_CTRL1_INTERCEPT_NMI
|
---|
377 | | SVM_CTRL1_INTERCEPT_SMI
|
---|
378 | | SVM_CTRL1_INTERCEPT_INIT
|
---|
379 | | SVM_CTRL1_INTERCEPT_RDPMC
|
---|
380 | | SVM_CTRL1_INTERCEPT_CPUID
|
---|
381 | | SVM_CTRL1_INTERCEPT_RSM
|
---|
382 | | SVM_CTRL1_INTERCEPT_HLT
|
---|
383 | | SVM_CTRL1_INTERCEPT_INOUT_BITMAP
|
---|
384 | | SVM_CTRL1_INTERCEPT_MSR_SHADOW
|
---|
385 | | SVM_CTRL1_INTERCEPT_INVLPGA /* AMD only */
|
---|
386 | | SVM_CTRL1_INTERCEPT_SHUTDOWN /* fatal */
|
---|
387 | | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Legacy FPU FERR handling. */
|
---|
388 | ;
|
---|
389 | pvVMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */
|
---|
390 | | SVM_CTRL2_INTERCEPT_VMMCALL
|
---|
391 | | SVM_CTRL2_INTERCEPT_VMLOAD
|
---|
392 | | SVM_CTRL2_INTERCEPT_VMSAVE
|
---|
393 | | SVM_CTRL2_INTERCEPT_STGI
|
---|
394 | | SVM_CTRL2_INTERCEPT_CLGI
|
---|
395 | | SVM_CTRL2_INTERCEPT_SKINIT
|
---|
396 | | SVM_CTRL2_INTERCEPT_WBINVD
|
---|
397 | | SVM_CTRL2_INTERCEPT_MONITOR
|
---|
398 | | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the
|
---|
399 | guest (host thinks the cpu load is high) */
|
---|
400 |
|
---|
401 | Log(("pvVMCB->ctrl.u32InterceptException = %x\n", pvVMCB->ctrl.u32InterceptException));
|
---|
402 | Log(("pvVMCB->ctrl.u32InterceptCtrl1 = %x\n", pvVMCB->ctrl.u32InterceptCtrl1));
|
---|
403 | Log(("pvVMCB->ctrl.u32InterceptCtrl2 = %x\n", pvVMCB->ctrl.u32InterceptCtrl2));
|
---|
404 |
|
---|
405 | /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
|
---|
406 | pvVMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1;
|
---|
407 |
|
---|
408 | /* Ignore the priority in the TPR; just deliver it when we tell it to. */
|
---|
409 | pvVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 1;
|
---|
410 |
|
---|
411 | /* Set IO and MSR bitmap addresses. */
|
---|
412 | pvVMCB->ctrl.u64IOPMPhysAddr = pVM->hm.s.svm.HCPhysIOBitmap;
|
---|
413 | pvVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
|
---|
414 |
|
---|
415 | /* No LBR virtualization. */
|
---|
416 | pvVMCB->ctrl.u64LBRVirt = 0;
|
---|
417 |
|
---|
418 | /* The ASID must start at 1; the host uses 0. */
|
---|
419 | pvVMCB->ctrl.TLBCtrl.n.u32ASID = 1;
|
---|
420 |
|
---|
421 | /*
|
---|
422 | * Setup the PAT MSR (nested paging only)
|
---|
423 | * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
|
---|
424 | * so choose type 6 for all PAT slots.
|
---|
425 | */
|
---|
426 | pvVMCB->guest.u64GPAT = 0x0006060606060606ULL;
|
---|
427 |
|
---|
428 | /* If nested paging is not in use, additional intercepts have to be set up. */
|
---|
429 | if (!pVM->hm.s.fNestedPaging)
|
---|
430 | {
|
---|
431 | /* CR3 reads/writes must be intercepted; our shadow values are different from guest's. */
|
---|
432 | pvVMCB->ctrl.u16InterceptRdCRx |= RT_BIT(3);
|
---|
433 | pvVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(3);
|
---|
434 |
|
---|
435 | /*
|
---|
436 | * We must also intercept:
|
---|
437 | * - INVLPG (must go through shadow paging)
|
---|
438 | * - task switches (may change CR3/EFLAGS/LDT)
|
---|
439 | */
|
---|
440 | pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG
|
---|
441 | | SVM_CTRL1_INTERCEPT_TASK_SWITCH;
|
---|
442 |
|
---|
443 | /* Page faults must be intercepted to implement shadow paging. */
|
---|
444 | pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
|
---|
445 | }
|
---|
446 |
|
---|
447 | /*
|
---|
448 | * The following MSRs are saved automatically by vmload/vmsave, so we allow the guest
|
---|
449 | * to modify them directly.
|
---|
450 | */
|
---|
451 | hmR0SvmSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
|
---|
452 | hmR0SvmSetMSRPermission(pVCpu, MSR_K8_CSTAR, true, true);
|
---|
453 | hmR0SvmSetMSRPermission(pVCpu, MSR_K6_STAR, true, true);
|
---|
454 | hmR0SvmSetMSRPermission(pVCpu, MSR_K8_SF_MASK, true, true);
|
---|
455 | hmR0SvmSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true);
|
---|
456 | hmR0SvmSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true);
|
---|
457 | hmR0SvmSetMSRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, true, true);
|
---|
458 | hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true);
|
---|
459 | hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true);
|
---|
460 | hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, true, true);
|
---|
461 | }
|
---|
462 |
|
---|
463 | return rc;
|
---|
464 | }
|
---|
465 |
|
---|
466 |
|
---|
467 | /**
|
---|
468 | * Sets the permission bits for the specified MSR.
|
---|
469 | *
|
---|
470 | * @param pVCpu Pointer to the VMCPU.
|
---|
471 | * @param ulMSR MSR value.
|
---|
472 | * @param fRead Whether reading is allowed.
|
---|
473 | * @param fWrite Whether writing is allowed.
|
---|
474 | */
|
---|
475 | static void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite)
|
---|
476 | {
|
---|
477 | unsigned ulBit;
|
---|
478 | uint8_t *pvMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
|
---|
479 |
|
---|
480 | if (ulMSR <= 0x00001FFF)
|
---|
481 | {
|
---|
482 | /* Pentium-compatible MSRs */
|
---|
483 | ulBit = ulMSR * 2;
|
---|
484 | }
|
---|
485 | else if ( ulMSR >= 0xC0000000
|
---|
486 | && ulMSR <= 0xC0001FFF)
|
---|
487 | {
|
---|
488 | /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */
|
---|
489 | ulBit = (ulMSR - 0xC0000000) * 2;
|
---|
490 | pvMsrBitmap += 0x800;
|
---|
491 | }
|
---|
492 | else if ( ulMSR >= 0xC0010000
|
---|
493 | && ulMSR <= 0xC0011FFF)
|
---|
494 | {
|
---|
495 | /* AMD Seventh and Eighth Generation Processor MSRs */
|
---|
496 | ulBit = (ulMSR - 0xC0001000) * 2;
|
---|
497 | pvMsrBitmap += 0x1000;
|
---|
498 | }
|
---|
499 | else
|
---|
500 | {
|
---|
501 | AssertFailed();
|
---|
502 | return;
|
---|
503 | }
|
---|
504 | Assert(ulBit < 16 * 1024 - 1);
|
---|
505 | if (fRead)
|
---|
506 | ASMBitClear(pvMsrBitmap, ulBit);
|
---|
507 | else
|
---|
508 | ASMBitSet(pvMsrBitmap, ulBit);
|
---|
509 |
|
---|
510 | if (fWrite)
|
---|
511 | ASMBitClear(pvMsrBitmap, ulBit + 1);
|
---|
512 | else
|
---|
513 | ASMBitSet(pvMsrBitmap, ulBit + 1);
|
---|
514 | }
|
---|
515 |
|
---|
516 |
|
---|
517 | /**
|
---|
518 | * Injects an event (trap or external interrupt).
|
---|
519 | *
|
---|
520 | * @param pVCpu Pointer to the VMCPU.
|
---|
521 | * @param pvVMCB Pointer to the VMCB.
|
---|
522 | * @param pCtx Pointer to the guest CPU context.
|
---|
523 | * @param pIntInfo Pointer to the SVM interrupt info.
|
---|
524 | */
|
---|
525 | DECLINLINE(void) hmR0SvmInjectEvent(PVMCPU pVCpu, SVM_VMCB *pvVMCB, CPUMCTX *pCtx, SVM_EVENT *pEvent)
|
---|
526 | {
|
---|
527 | #ifdef VBOX_WITH_STATISTICS
|
---|
528 | STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
|
---|
529 | #endif
|
---|
530 |
|
---|
531 | #ifdef VBOX_STRICT
|
---|
532 | if (pEvent->n.u8Vector == 0xE)
|
---|
533 | {
|
---|
534 | Log(("SVM: Inject int %d at %RGv error code=%02x CR2=%RGv intInfo=%08x\n", pEvent->n.u8Vector,
|
---|
535 | (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode, (RTGCPTR)pCtx->cr2, pEvent->au64[0]));
|
---|
536 | }
|
---|
537 | else if (pEvent->n.u8Vector < 0x20)
|
---|
538 | Log(("SVM: Inject int %d at %RGv error code=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode));
|
---|
539 | else
|
---|
540 | {
|
---|
541 | Log(("INJ-EI: %x at %RGv\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip));
|
---|
542 | Assert(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
|
---|
543 | Assert(pCtx->eflags.u32 & X86_EFL_IF);
|
---|
544 | }
|
---|
545 | #endif
|
---|
546 |
|
---|
547 | /* Set event injection state. */
|
---|
548 | pvVMCB->ctrl.EventInject.au64[0] = pEvent->au64[0];
|
---|
549 | }
|
---|
550 |
|
---|
551 |
|
---|
552 | /**
|
---|
553 | * Checks for pending guest interrupts and injects them.
|
---|
554 | *
|
---|
555 | * @returns VBox status code.
|
---|
556 | * @param pVM Pointer to the VM.
|
---|
557 | * @param pVCpu Pointer to the VMCPU.
|
---|
558 | * @param pvVMCB Pointer to the VMCB.
|
---|
559 | * @param pCtx Pointer to the guest CPU Context.
|
---|
560 | */
|
---|
561 | static int hmR0SvmCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *pvVMCB, CPUMCTX *pCtx)
|
---|
562 | {
|
---|
563 | int rc;
|
---|
564 | NOREF(pVM);
|
---|
565 |
|
---|
566 | /*
|
---|
567 | * Dispatch any pending interrupts (injected before, but a VM-exit occurred prematurely).
|
---|
568 | */
|
---|
569 | if (pVCpu->hm.s.Event.fPending)
|
---|
570 | {
|
---|
571 | SVM_EVENT Event;
|
---|
572 |
|
---|
573 | Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hm.s.Event.intInfo, pVCpu->hm.s.Event.errCode,
|
---|
574 | (RTGCPTR)pCtx->rip));
|
---|
575 | STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
|
---|
576 | Event.au64[0] = pVCpu->hm.s.Event.intInfo;
|
---|
577 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
578 |
|
---|
579 | pVCpu->hm.s.Event.fPending = false;
|
---|
580 | return VINF_SUCCESS;
|
---|
581 | }
|
---|
582 |
|
---|
583 | /*
|
---|
584 | * If an active trap is already pending, we must forward it first!
|
---|
585 | */
|
---|
586 | if (!TRPMHasTrap(pVCpu))
|
---|
587 | {
|
---|
588 | if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
|
---|
589 | {
|
---|
590 | SVM_EVENT Event;
|
---|
591 |
|
---|
592 | Log(("CPU%d: injecting #NMI\n", pVCpu->idCpu));
|
---|
593 | Event.n.u8Vector = X86_XCPT_NMI;
|
---|
594 | Event.n.u1Valid = 1;
|
---|
595 | Event.n.u32ErrorCode = 0;
|
---|
596 | Event.n.u3Type = SVM_EVENT_NMI;
|
---|
597 |
|
---|
598 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
599 | return VINF_SUCCESS;
|
---|
600 | }
|
---|
601 |
|
---|
602 | /** @todo SMI interrupts. */
|
---|
603 |
|
---|
604 | /*
|
---|
605 | * When external interrupts are pending, we should exit the VM when IF is set.
|
---|
606 | */
|
---|
607 | if (VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
|
---|
608 | {
|
---|
609 | if ( !(pCtx->eflags.u32 & X86_EFL_IF)
|
---|
610 | || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
|
---|
611 | {
|
---|
612 | if (!pvVMCB->ctrl.IntCtrl.n.u1VIrqValid)
|
---|
613 | {
|
---|
614 | if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
|
---|
615 | LogFlow(("Enable irq window exit!\n"));
|
---|
616 | else
|
---|
617 | {
|
---|
618 | Log(("Pending interrupt blocked at %RGv by VM_FF_INHIBIT_INTERRUPTS -> irq window exit\n",
|
---|
619 | (RTGCPTR)pCtx->rip));
|
---|
620 | }
|
---|
621 |
|
---|
622 | /** @todo Use virtual interrupt method to inject a pending IRQ; dispatched as
|
---|
623 | * soon as guest.IF is set. */
|
---|
624 | pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
|
---|
625 | pvVMCB->ctrl.IntCtrl.n.u1VIrqValid = 1;
|
---|
626 | pvVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */
|
---|
627 | }
|
---|
628 | }
|
---|
629 | else
|
---|
630 | {
|
---|
631 | uint8_t u8Interrupt;
|
---|
632 |
|
---|
633 | rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
|
---|
634 | Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
|
---|
635 | if (RT_SUCCESS(rc))
|
---|
636 | {
|
---|
637 | rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
|
---|
638 | AssertRC(rc);
|
---|
639 | }
|
---|
640 | else
|
---|
641 | {
|
---|
642 | /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
|
---|
643 | Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
|
---|
644 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
|
---|
645 | /* Just continue */
|
---|
646 | }
|
---|
647 | }
|
---|
648 | }
|
---|
649 | }
|
---|
650 |
|
---|
651 | #ifdef VBOX_STRICT
|
---|
652 | if (TRPMHasTrap(pVCpu))
|
---|
653 | {
|
---|
654 | uint8_t u8Vector;
|
---|
655 | rc = TRPMQueryTrapAll(pVCpu, &u8Vector, 0, 0, 0);
|
---|
656 | AssertRC(rc);
|
---|
657 | }
|
---|
658 | #endif
|
---|
659 |
|
---|
660 | if ( (pCtx->eflags.u32 & X86_EFL_IF)
|
---|
661 | && (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
|
---|
662 | && TRPMHasTrap(pVCpu)
|
---|
663 | )
|
---|
664 | {
|
---|
665 | uint8_t u8Vector;
|
---|
666 | TRPMEVENT enmType;
|
---|
667 | SVM_EVENT Event;
|
---|
668 | RTGCUINT u32ErrorCode;
|
---|
669 |
|
---|
670 | Event.au64[0] = 0;
|
---|
671 |
|
---|
672 | /* If a new event is pending, then dispatch it now. */
|
---|
673 | rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmType, &u32ErrorCode, 0);
|
---|
674 | AssertRC(rc);
|
---|
675 | Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
|
---|
676 | Assert(enmType != TRPM_SOFTWARE_INT);
|
---|
677 |
|
---|
678 | /* Clear the pending trap. */
|
---|
679 | rc = TRPMResetTrap(pVCpu);
|
---|
680 | AssertRC(rc);
|
---|
681 |
|
---|
682 | Event.n.u8Vector = u8Vector;
|
---|
683 | Event.n.u1Valid = 1;
|
---|
684 | Event.n.u32ErrorCode = u32ErrorCode;
|
---|
685 |
|
---|
686 | if (enmType == TRPM_TRAP)
|
---|
687 | {
|
---|
688 | switch (u8Vector)
|
---|
689 | {
|
---|
690 | case X86_XCPT_DF:
|
---|
691 | case X86_XCPT_TS:
|
---|
692 | case X86_XCPT_NP:
|
---|
693 | case X86_XCPT_SS:
|
---|
694 | case X86_XCPT_GP:
|
---|
695 | case X86_XCPT_PF:
|
---|
696 | case X86_XCPT_AC:
|
---|
697 | /* Valid error codes. */
|
---|
698 | Event.n.u1ErrorCodeValid = 1;
|
---|
699 | break;
|
---|
700 | default:
|
---|
701 | break;
|
---|
702 | }
|
---|
703 | if (u8Vector == X86_XCPT_NMI)
|
---|
704 | Event.n.u3Type = SVM_EVENT_NMI;
|
---|
705 | else
|
---|
706 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
707 | }
|
---|
708 | else
|
---|
709 | Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
|
---|
710 |
|
---|
711 | STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
|
---|
712 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
713 | } /* if (interrupts can be dispatched) */
|
---|
714 |
|
---|
715 | return VINF_SUCCESS;
|
---|
716 | }
|
---|
717 |
|
---|
718 |
|
---|
719 | /**
|
---|
720 | * Save the host state.
|
---|
721 | *
|
---|
722 | * @returns VBox status code.
|
---|
723 | * @param pVM Pointer to the VM.
|
---|
724 | * @param pVCpu Pointer to the VMCPU.
|
---|
725 | */
|
---|
726 | VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
|
---|
727 | {
|
---|
728 | NOREF(pVM);
|
---|
729 | NOREF(pVCpu);
|
---|
730 | /* Nothing to do here. */
|
---|
731 | return VINF_SUCCESS;
|
---|
732 | }
|
---|
733 |
|
---|
734 |
|
---|
735 | /**
|
---|
736 | * Loads the guest state.
|
---|
737 | *
|
---|
738 | * NOTE: Don't do anything here that can cause a jump back to ring-3!!!
|
---|
739 | *
|
---|
740 | * @returns VBox status code.
|
---|
741 | * @param pVM Pointer to the VM.
|
---|
742 | * @param pVCpu Pointer to the VMCPU.
|
---|
743 | * @param pCtx Pointer to the guest CPU context.
|
---|
744 | */
|
---|
745 | VMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
746 | {
|
---|
747 | RTGCUINTPTR val;
|
---|
748 | SVM_VMCB *pvVMCB;
|
---|
749 |
|
---|
750 | if (pVM == NULL)
|
---|
751 | return VERR_INVALID_PARAMETER;
|
---|
752 |
|
---|
753 | /* Setup AMD SVM. */
|
---|
754 | Assert(pVM->hm.s.svm.fSupported);
|
---|
755 |
|
---|
756 | pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;
|
---|
757 | AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
|
---|
758 |
|
---|
759 | /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
|
---|
760 | if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
|
---|
761 | {
|
---|
762 | SVM_WRITE_SELREG(CS, cs);
|
---|
763 | SVM_WRITE_SELREG(SS, ss);
|
---|
764 | SVM_WRITE_SELREG(DS, ds);
|
---|
765 | SVM_WRITE_SELREG(ES, es);
|
---|
766 | SVM_WRITE_SELREG(FS, fs);
|
---|
767 | SVM_WRITE_SELREG(GS, gs);
|
---|
768 | }
|
---|
769 |
|
---|
770 | /* Guest CPU context: LDTR. */
|
---|
771 | if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
|
---|
772 | {
|
---|
773 | SVM_WRITE_SELREG(LDTR, ldtr);
|
---|
774 | }
|
---|
775 |
|
---|
776 | /* Guest CPU context: TR. */
|
---|
777 | if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
|
---|
778 | {
|
---|
779 | SVM_WRITE_SELREG(TR, tr);
|
---|
780 | }
|
---|
781 |
|
---|
782 | /* Guest CPU context: GDTR. */
|
---|
783 | if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
|
---|
784 | {
|
---|
785 | pvVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
|
---|
786 | pvVMCB->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
|
---|
787 | }
|
---|
788 |
|
---|
789 | /* Guest CPU context: IDTR. */
|
---|
790 | if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
|
---|
791 | {
|
---|
792 | pvVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
|
---|
793 | pvVMCB->guest.IDTR.u64Base = pCtx->idtr.pIdt;
|
---|
794 | }
|
---|
795 |
|
---|
796 | /*
|
---|
797 | * Sysenter MSRs (unconditional)
|
---|
798 | */
|
---|
799 | pvVMCB->guest.u64SysEnterCS = pCtx->SysEnter.cs;
|
---|
800 | pvVMCB->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
|
---|
801 | pvVMCB->guest.u64SysEnterESP = pCtx->SysEnter.esp;
|
---|
802 |
|
---|
803 | /* Control registers */
|
---|
804 | if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
|
---|
805 | {
|
---|
806 | val = pCtx->cr0;
|
---|
807 | if (!CPUMIsGuestFPUStateActive(pVCpu))
|
---|
808 | {
|
---|
809 | /* Always use #NM exceptions to load the FPU/XMM state on demand. */
|
---|
810 | val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
|
---|
811 | }
|
---|
812 | else
|
---|
813 | {
|
---|
814 | /** @todo check if we support the old style mess correctly. */
|
---|
815 | if (!(val & X86_CR0_NE))
|
---|
816 | {
|
---|
817 | Log(("Forcing X86_CR0_NE!!!\n"));
|
---|
818 |
|
---|
819 | /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
|
---|
820 | if (!pVCpu->hm.s.fFPUOldStyleOverride)
|
---|
821 | {
|
---|
822 | pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);
|
---|
823 | pVCpu->hm.s.fFPUOldStyleOverride = true;
|
---|
824 | }
|
---|
825 | }
|
---|
826 | val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
|
---|
827 | }
|
---|
828 | /* Always enable caching. */
|
---|
829 | val &= ~(X86_CR0_CD|X86_CR0_NW);
|
---|
830 |
|
---|
831 | /*
|
---|
832 | * Note: WP is not relevant in nested paging mode as we catch accesses on the (guest) physical level.
|
---|
833 | * Note: In nested paging mode, the guest is allowed to run with paging disabled; the guest-physical to host-physical
|
---|
834 | * translation will remain active.
|
---|
835 | */
|
---|
836 | if (!pVM->hm.s.fNestedPaging)
|
---|
837 | {
|
---|
838 | val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
|
---|
839 | val |= X86_CR0_WP; /* Must set this as we rely on protecting various pages and supervisor writes must be caught. */
|
---|
840 | }
|
---|
841 | pvVMCB->guest.u64CR0 = val;
|
---|
842 | }
|
---|
843 | /* CR2 as well */
|
---|
844 | pvVMCB->guest.u64CR2 = pCtx->cr2;
|
---|
845 |
|
---|
846 | if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
|
---|
847 | {
|
---|
848 | /* Save our shadow CR3 register. */
|
---|
849 | if (pVM->hm.s.fNestedPaging)
|
---|
850 | {
|
---|
851 | PGMMODE enmShwPagingMode;
|
---|
852 |
|
---|
853 | #if HC_ARCH_BITS == 32
|
---|
854 | if (CPUMIsGuestInLongModeEx(pCtx))
|
---|
855 | enmShwPagingMode = PGMMODE_AMD64_NX;
|
---|
856 | else
|
---|
857 | #endif
|
---|
858 | enmShwPagingMode = PGMGetHostMode(pVM);
|
---|
859 |
|
---|
860 | pvVMCB->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
|
---|
861 | Assert(pvVMCB->ctrl.u64NestedPagingCR3);
|
---|
862 | pvVMCB->guest.u64CR3 = pCtx->cr3;
|
---|
863 | }
|
---|
864 | else
|
---|
865 | {
|
---|
866 | pvVMCB->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
|
---|
867 | Assert(pvVMCB->guest.u64CR3 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
|
---|
868 | }
|
---|
869 | }
|
---|
870 |
|
---|
871 | if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
|
---|
872 | {
|
---|
873 | val = pCtx->cr4;
|
---|
874 | if (!pVM->hm.s.fNestedPaging)
|
---|
875 | {
|
---|
876 | switch (pVCpu->hm.s.enmShadowMode)
|
---|
877 | {
|
---|
878 | case PGMMODE_REAL:
|
---|
879 | case PGMMODE_PROTECTED: /* Protected mode, no paging. */
|
---|
880 | AssertFailed();
|
---|
881 | return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
|
---|
882 |
|
---|
883 | case PGMMODE_32_BIT: /* 32-bit paging. */
|
---|
884 | val &= ~X86_CR4_PAE;
|
---|
885 | break;
|
---|
886 |
|
---|
887 | case PGMMODE_PAE: /* PAE paging. */
|
---|
888 | case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
|
---|
889 | /** Must use PAE paging as we could use physical memory > 4 GB */
|
---|
890 | val |= X86_CR4_PAE;
|
---|
891 | break;
|
---|
892 |
|
---|
893 | case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
|
---|
894 | case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
|
---|
895 | #ifdef VBOX_ENABLE_64_BITS_GUESTS
|
---|
896 | break;
|
---|
897 | #else
|
---|
898 | AssertFailed();
|
---|
899 | return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
|
---|
900 | #endif
|
---|
901 |
|
---|
902 | default: /* shut up gcc */
|
---|
903 | AssertFailed();
|
---|
904 | return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
|
---|
905 | }
|
---|
906 | }
|
---|
907 | pvVMCB->guest.u64CR4 = val;
|
---|
908 | }
|
---|
909 |
|
---|
910 | /* Debug registers. */
|
---|
911 | if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
|
---|
912 | {
|
---|
913 | pCtx->dr[6] |= X86_DR6_INIT_VAL; /* set all reserved bits to 1. */
|
---|
914 | pCtx->dr[6] &= ~RT_BIT(12); /* must be zero. */
|
---|
915 |
|
---|
916 | pCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
|
---|
917 | pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
|
---|
918 | pCtx->dr[7] |= 0x400; /* must be one */
|
---|
919 |
|
---|
920 | pvVMCB->guest.u64DR7 = pCtx->dr[7];
|
---|
921 | pvVMCB->guest.u64DR6 = pCtx->dr[6];
|
---|
922 |
|
---|
923 | #ifdef DEBUG
|
---|
924 | /* Sync the hypervisor debug state now if any breakpoint is armed. */
|
---|
925 | if ( CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK|X86_DR7_GD)
|
---|
926 | && !CPUMIsHyperDebugStateActive(pVCpu)
|
---|
927 | && !DBGFIsStepping(pVCpu))
|
---|
928 | {
|
---|
929 | /* Save the host and load the hypervisor debug state. */
|
---|
930 | int rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, false /* exclude DR6 */);
|
---|
931 | AssertRC(rc);
|
---|
932 |
|
---|
933 | /* DRx intercepts remain enabled. */
|
---|
934 |
|
---|
935 | /* Override dr6 & dr7 with the hypervisor values. */
|
---|
936 | pvVMCB->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
|
---|
937 | pvVMCB->guest.u64DR6 = CPUMGetHyperDR6(pVCpu);
|
---|
938 | }
|
---|
939 | else
|
---|
940 | #endif
|
---|
941 | /* Sync the debug state now if any breakpoint is armed. */
|
---|
942 | if ( (pCtx->dr[7] & (X86_DR7_ENABLED_MASK|X86_DR7_GD))
|
---|
943 | && !CPUMIsGuestDebugStateActive(pVCpu)
|
---|
944 | && !DBGFIsStepping(pVCpu))
|
---|
945 | {
|
---|
946 | STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
|
---|
947 |
|
---|
948 | /* Disable drx move intercepts. */
|
---|
949 | pvVMCB->ctrl.u16InterceptRdDRx = 0;
|
---|
950 | pvVMCB->ctrl.u16InterceptWrDRx = 0;
|
---|
951 |
|
---|
952 | /* Save the host and load the guest debug state. */
|
---|
953 | int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, false /* exclude DR6 */);
|
---|
954 | AssertRC(rc);
|
---|
955 | }
|
---|
956 | }
|
---|
957 |
|
---|
958 | /* EIP, ESP and EFLAGS */
|
---|
959 | pvVMCB->guest.u64RIP = pCtx->rip;
|
---|
960 | pvVMCB->guest.u64RSP = pCtx->rsp;
|
---|
961 | pvVMCB->guest.u64RFlags = pCtx->eflags.u32;
|
---|
962 |
|
---|
963 | /* Set CPL */
|
---|
964 | pvVMCB->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
|
---|
965 |
|
---|
966 | /* RAX/EAX too, as VMRUN uses RAX as an implicit parameter. */
|
---|
967 | pvVMCB->guest.u64RAX = pCtx->rax;
|
---|
968 |
|
---|
969 | /* vmrun will fail without MSR_K6_EFER_SVME. */
|
---|
970 | pvVMCB->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
|
---|
971 |
|
---|
972 | /* 64 bits guest mode? */
|
---|
973 | if (CPUMIsGuestInLongModeEx(pCtx))
|
---|
974 | {
|
---|
975 | #if !defined(VBOX_ENABLE_64_BITS_GUESTS)
|
---|
976 | return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
|
---|
977 | #elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
|
---|
978 | pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
|
---|
979 | #else
|
---|
980 | # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
|
---|
981 | if (!pVM->hm.s.fAllow64BitGuests)
|
---|
982 | return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
|
---|
983 | # endif
|
---|
984 | pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64;
|
---|
985 | #endif
|
---|
986 | /* Unconditionally update these as wrmsr might have changed them. (HM_CHANGED_GUEST_SEGMENT_REGS will not be set) */
|
---|
987 | pvVMCB->guest.FS.u64Base = pCtx->fs.u64Base;
|
---|
988 | pvVMCB->guest.GS.u64Base = pCtx->gs.u64Base;
|
---|
989 | }
|
---|
990 | else
|
---|
991 | {
|
---|
992 | /* Filter out the MSR_K6_LME bit or else AMD-V expects amd64 shadow paging. */
|
---|
993 | pvVMCB->guest.u64EFER &= ~MSR_K6_EFER_LME;
|
---|
994 |
|
---|
995 | pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
|
---|
996 | }
|
---|
997 |
|
---|
998 | /* TSC offset. */
|
---|
999 | if (TMCpuTickCanUseRealTSC(pVCpu, &pvVMCB->ctrl.u64TSCOffset))
|
---|
1000 | {
|
---|
1001 | uint64_t u64CurTSC = ASMReadTSC();
|
---|
1002 | if (u64CurTSC + pvVMCB->ctrl.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
|
---|
1003 | {
|
---|
1004 | pvVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
|
---|
1005 | pvVMCB->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
|
---|
1006 | STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCOffset);
|
---|
1007 | }
|
---|
1008 | else
|
---|
1009 | {
|
---|
1010 | /* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */
|
---|
1011 | LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC,
|
---|
1012 | pvVMCB->ctrl.u64TSCOffset, u64CurTSC + pvVMCB->ctrl.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu),
|
---|
1013 | TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pvVMCB->ctrl.u64TSCOffset, TMCpuTickGet(pVCpu)));
|
---|
1014 | pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
|
---|
1015 | pvVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
|
---|
1016 | STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow);
|
---|
1017 | }
|
---|
1018 | }
|
---|
1019 | else
|
---|
1020 | {
|
---|
1021 | pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
|
---|
1022 | pvVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
|
---|
1023 | STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCIntercept);
|
---|
1024 | }
|
---|
1025 |
|
---|
1026 | /* Sync the various MSRs for 64-bit mode. */
|
---|
1027 | pvVMCB->guest.u64STAR = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */
|
---|
1028 | pvVMCB->guest.u64LSTAR = pCtx->msrLSTAR; /* 64-bit mode syscall rip */
|
---|
1029 | pvVMCB->guest.u64CSTAR = pCtx->msrCSTAR; /* compatibility mode syscall rip */
|
---|
1030 | pvVMCB->guest.u64SFMASK = pCtx->msrSFMASK; /* syscall flag mask */
|
---|
1031 | pvVMCB->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; /* SWAPGS exchange value */
|
---|
1032 |
|
---|
1033 | #ifdef DEBUG
|
---|
1034 | /* Intercept X86_XCPT_DB if stepping is enabled */
|
---|
1035 | if ( DBGFIsStepping(pVCpu)
|
---|
1036 | || CPUMIsHyperDebugStateActive(pVCpu))
|
---|
1037 | pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_DB);
|
---|
1038 | else
|
---|
1039 | pvVMCB->ctrl.u32InterceptException &= ~RT_BIT(X86_XCPT_DB);
|
---|
1040 | #endif
|
---|
1041 |
|
---|
1042 | /* Done. */
|
---|
1043 | pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST;
|
---|
1044 |
|
---|
1045 | return VINF_SUCCESS;
|
---|
1046 | }
|
---|
1047 |
|
---|
1048 |
|
---|
1049 | /**
|
---|
1050 | * Setup TLB for ASID.
|
---|
1051 | *
|
---|
1052 | * @param pVM Pointer to the VM.
|
---|
1053 | * @param pVCpu Pointer to the VMCPU.
|
---|
1054 | */
|
---|
1055 | static void hmR0SvmSetupTLB(PVM pVM, PVMCPU pVCpu)
|
---|
1056 | {
|
---|
1057 | PHMGLOBLCPUINFO pCpu;
|
---|
1058 |
|
---|
1059 | AssertPtr(pVM);
|
---|
1060 | AssertPtr(pVCpu);
|
---|
1061 |
|
---|
1062 | SVM_VMCB *pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;
|
---|
1063 | pCpu = HMR0GetCurrentCpu();
|
---|
1064 |
|
---|
1065 | /*
|
---|
1066 | * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
|
---|
1067 | * This can happen both for start & resume due to long jumps back to ring-3.
|
---|
1068 | * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
|
---|
1069 | * so we cannot reuse the ASIDs without flushing.
|
---|
1070 | */
|
---|
1071 | bool fNewAsid = false;
|
---|
1072 | if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
|
---|
1073 | || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
|
---|
1074 | {
|
---|
1075 | pVCpu->hm.s.fForceTLBFlush = true;
|
---|
1076 | fNewAsid = true;
|
---|
1077 | }
|
---|
1078 |
|
---|
1079 | /*
|
---|
1080 | * Set TLB flush state as checked until we return from the world switch.
|
---|
1081 | */
|
---|
1082 | ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
|
---|
1083 |
|
---|
1084 | /*
|
---|
1085 | * Check for TLB shootdown flushes.
|
---|
1086 | */
|
---|
1087 | if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
|
---|
1088 | pVCpu->hm.s.fForceTLBFlush = true;
|
---|
1089 |
|
---|
1090 | pVCpu->hm.s.idLastCpu = pCpu->idCpu;
|
---|
1091 | pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
|
---|
1092 |
|
---|
1093 | if (RT_UNLIKELY(pVM->hm.s.svm.fAlwaysFlushTLB))
|
---|
1094 | {
|
---|
1095 | /*
|
---|
1096 | * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
|
---|
1097 | */
|
---|
1098 | pCpu->uCurrentAsid = 1;
|
---|
1099 | pVCpu->hm.s.uCurrentAsid = 1;
|
---|
1100 | pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
|
---|
1101 | pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
|
---|
1102 | }
|
---|
1103 | else if (pVCpu->hm.s.fForceTLBFlush)
|
---|
1104 | {
|
---|
1105 | if (fNewAsid)
|
---|
1106 | {
|
---|
1107 | ++pCpu->uCurrentAsid;
|
---|
1108 | bool fHitASIDLimit = false;
|
---|
1109 | if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
|
---|
1110 | {
|
---|
1111 | pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
|
---|
1112 | pCpu->cTlbFlushes++;
|
---|
1113 | fHitASIDLimit = true;
|
---|
1114 |
|
---|
1115 | if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
|
---|
1116 | {
|
---|
1117 | pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
|
---|
1118 | pCpu->fFlushAsidBeforeUse = true;
|
---|
1119 | }
|
---|
1120 | else
|
---|
1121 | {
|
---|
1122 | pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
|
---|
1123 | pCpu->fFlushAsidBeforeUse = false;
|
---|
1124 | }
|
---|
1125 | }
|
---|
1126 |
|
---|
1127 | if ( !fHitASIDLimit
|
---|
1128 | && pCpu->fFlushAsidBeforeUse)
|
---|
1129 | {
|
---|
1130 | if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
|
---|
1131 | pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
|
---|
1132 | else
|
---|
1133 | {
|
---|
1134 | pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
|
---|
1135 | pCpu->fFlushAsidBeforeUse = false;
|
---|
1136 | }
|
---|
1137 | }
|
---|
1138 |
|
---|
1139 | pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
|
---|
1140 | pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
|
---|
1141 | }
|
---|
1142 | else
|
---|
1143 | {
|
---|
1144 | if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
|
---|
1145 | pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
|
---|
1146 | else
|
---|
1147 | pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
|
---|
1148 | }
|
---|
1149 |
|
---|
1150 | pVCpu->hm.s.fForceTLBFlush = false;
|
---|
1151 | }
|
---|
1152 | else
|
---|
1153 | {
|
---|
1154 | /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
|
---|
1155 | * not be executed. See hmQueueInvlPage() where it is commented
|
---|
1156 | * out. Support individual entry flushing someday. */
|
---|
1157 | if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
|
---|
1158 | {
|
---|
1159 | /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
|
---|
1160 | STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
|
---|
1161 | for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
|
---|
1162 | SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pvVMCB->ctrl.TLBCtrl.n.u32ASID);
|
---|
1163 | }
|
---|
1164 | }
|
---|
1165 |
|
---|
1166 | pVCpu->hm.s.TlbShootdown.cPages = 0;
|
---|
1167 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
|
---|
1168 |
|
---|
1169 | /* Update VMCB with the ASID. */
|
---|
1170 | pvVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
|
---|
1171 |
|
---|
1172 | AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
|
---|
1173 | ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
|
---|
1174 | AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
|
---|
1175 | ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
|
---|
1176 | AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
|
---|
1177 | ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
|
---|
1178 |
|
---|
1179 | #ifdef VBOX_WITH_STATISTICS
|
---|
1180 | if (pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
|
---|
1181 | STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch);
|
---|
1182 | else if ( pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
|
---|
1183 | || pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
|
---|
1184 | {
|
---|
1185 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID);
|
---|
1186 | }
|
---|
1187 | else
|
---|
1188 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch);
|
---|
1189 | #endif
|
---|
1190 | }
|
---|
1191 |
|
---|
1192 |
|
---|
1193 | /**
|
---|
1194 | * Runs guest code in an AMD-V VM.
|
---|
1195 | *
|
---|
1196 | * @returns VBox status code.
|
---|
1197 | * @param pVM Pointer to the VM.
|
---|
1198 | * @param pVCpu Pointer to the VMCPU.
|
---|
1199 | * @param pCtx Pointer to the guest CPU context.
|
---|
1200 | */
|
---|
1201 | VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
1202 | {
|
---|
1203 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
|
---|
1204 | STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
|
---|
1205 | STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
|
---|
1206 |
|
---|
1207 | VBOXSTRICTRC rc = VINF_SUCCESS;
|
---|
1208 | int rc2;
|
---|
1209 | uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID;
|
---|
1210 | SVM_VMCB *pvVMCB = NULL;
|
---|
1211 | bool fSyncTPR = false;
|
---|
1212 | unsigned cResume = 0;
|
---|
1213 | uint8_t u8LastTPR = 0; /* Initialized for potentially stupid compilers. */
|
---|
1214 | uint32_t u32HostExtFeatures = 0;
|
---|
1215 | PHMGLOBLCPUINFO pCpu = 0;
|
---|
1216 | RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;
|
---|
1217 | #ifdef VBOX_STRICT
|
---|
1218 | RTCPUID idCpuCheck;
|
---|
1219 | #endif
|
---|
1220 | #ifdef VBOX_HIGH_RES_TIMERS_HACK_IN_RING0
|
---|
1221 | uint64_t u64LastTime = RTTimeMilliTS();
|
---|
1222 | #endif
|
---|
1223 |
|
---|
1224 | pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;
|
---|
1225 | AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
|
---|
1226 |
|
---|
1227 | /*
|
---|
1228 | * We can jump to this point to resume execution after determining that a VM-exit is innocent.
|
---|
1229 | */
|
---|
1230 | ResumeExecution:
|
---|
1231 | if (!STAM_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry))
|
---|
1232 | STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x);
|
---|
1233 | Assert(!HMR0SuspendPending());
|
---|
1234 |
|
---|
1235 | /*
|
---|
1236 | * Safety precaution; looping for too long here can have a very bad effect on the host.
|
---|
1237 | */
|
---|
1238 | if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops))
|
---|
1239 | {
|
---|
1240 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
|
---|
1241 | rc = VINF_EM_RAW_INTERRUPT;
|
---|
1242 | goto end;
|
---|
1243 | }
|
---|
1244 |
|
---|
1245 | /*
|
---|
1246 | * Check for IRQ inhibition due to instruction fusing (sti, mov ss).
|
---|
1247 | */
|
---|
1248 | if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
|
---|
1249 | {
|
---|
1250 | Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
|
---|
1251 | if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
|
---|
1252 | {
|
---|
1253 | /*
|
---|
1254 | * Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
|
---|
1255 | * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
|
---|
1256 | * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
|
---|
1257 | * break the guest. Sounds very unlikely, but such timing sensitive problems are not as rare as you might think.
|
---|
1258 | */
|
---|
1259 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
|
---|
1260 | /* Irq inhibition is no longer active; clear the corresponding SVM state. */
|
---|
1261 | pvVMCB->ctrl.u64IntShadow = 0;
|
---|
1262 | }
|
---|
1263 | }
|
---|
1264 | else
|
---|
1265 | {
|
---|
1266 | /* Irq inhibition is no longer active; clear the corresponding SVM state. */
|
---|
1267 | pvVMCB->ctrl.u64IntShadow = 0;
|
---|
1268 | }
|
---|
1269 |
|
---|
1270 | #ifdef VBOX_HIGH_RES_TIMERS_HACK_IN_RING0
|
---|
1271 | if (RT_UNLIKELY((cResume & 0xf) == 0))
|
---|
1272 | {
|
---|
1273 | uint64_t u64CurTime = RTTimeMilliTS();
|
---|
1274 |
|
---|
1275 | if (RT_UNLIKELY(u64CurTime > u64LastTime))
|
---|
1276 | {
|
---|
1277 | u64LastTime = u64CurTime;
|
---|
1278 | TMTimerPollVoid(pVM, pVCpu);
|
---|
1279 | }
|
---|
1280 | }
|
---|
1281 | #endif
|
---|
1282 |
|
---|
1283 | /*
|
---|
1284 | * Check for pending actions that force us to go back to ring-3.
|
---|
1285 | */
|
---|
1286 | if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
|
---|
1287 | || VMCPU_FF_ISPENDING(pVCpu,
|
---|
1288 | VMCPU_FF_HM_TO_R3_MASK
|
---|
1289 | | VMCPU_FF_PGM_SYNC_CR3
|
---|
1290 | | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
|
---|
1291 | | VMCPU_FF_REQUEST))
|
---|
1292 | {
|
---|
1293 | /* Check if a sync operation is pending. */
|
---|
1294 | if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
|
---|
1295 | {
|
---|
1296 | rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
|
---|
1297 | AssertRC(VBOXSTRICTRC_VAL(rc));
|
---|
1298 | if (rc != VINF_SUCCESS)
|
---|
1299 | {
|
---|
1300 | Log(("Pending pool sync is forcing us back to ring 3; rc=%d\n", VBOXSTRICTRC_VAL(rc)));
|
---|
1301 | goto end;
|
---|
1302 | }
|
---|
1303 | }
|
---|
1304 |
|
---|
1305 | #ifdef DEBUG
|
---|
1306 | /* Intercept X86_XCPT_DB if stepping is enabled */
|
---|
1307 | if (!DBGFIsStepping(pVCpu))
|
---|
1308 | #endif
|
---|
1309 | {
|
---|
1310 | if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK)
|
---|
1311 | || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
|
---|
1312 | {
|
---|
1313 | STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchToR3);
|
---|
1314 | rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
|
---|
1315 | goto end;
|
---|
1316 | }
|
---|
1317 | }
|
---|
1318 |
|
---|
1319 | /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
|
---|
1320 | if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST)
|
---|
1321 | || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
|
---|
1322 | {
|
---|
1323 | rc = VINF_EM_PENDING_REQUEST;
|
---|
1324 | goto end;
|
---|
1325 | }
|
---|
1326 |
|
---|
1327 | /* Check if a pgm pool flush is in progress. */
|
---|
1328 | if (VM_FF_ISPENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
|
---|
1329 | {
|
---|
1330 | rc = VINF_PGM_POOL_FLUSH_PENDING;
|
---|
1331 | goto end;
|
---|
1332 | }
|
---|
1333 |
|
---|
1334 | /* Check if DMA work is pending (2nd+ run). */
|
---|
1335 | if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA) && cResume > 1)
|
---|
1336 | {
|
---|
1337 | rc = VINF_EM_RAW_TO_R3;
|
---|
1338 | goto end;
|
---|
1339 | }
|
---|
1340 | }
|
---|
1341 |
|
---|
1342 | #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
|
---|
1343 | /*
|
---|
1344 | * Exit to ring-3 preemption/work is pending.
|
---|
1345 | *
|
---|
1346 | * Interrupts are disabled before the call to make sure we don't miss any interrupt
|
---|
1347 | * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this
|
---|
1348 | * further down, but hmR0SvmCheckPendingInterrupt makes that impossible.)
|
---|
1349 | *
|
---|
1350 | * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
|
---|
1351 | * shootdowns rely on this.
|
---|
1352 | */
|
---|
1353 | uOldEFlags = ASMIntDisableFlags();
|
---|
1354 | if (RTThreadPreemptIsPending(NIL_RTTHREAD))
|
---|
1355 | {
|
---|
1356 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending);
|
---|
1357 | rc = VINF_EM_RAW_INTERRUPT;
|
---|
1358 | goto end;
|
---|
1359 | }
|
---|
1360 | VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
|
---|
1361 | #endif
|
---|
1362 |
|
---|
1363 | /*
|
---|
1364 | * When external interrupts are pending, we should exit the VM when IF is set.
|
---|
1365 | * Note: *After* VM_FF_INHIBIT_INTERRUPTS check!!
|
---|
1366 | */
|
---|
1367 | rc = hmR0SvmCheckPendingInterrupt(pVM, pVCpu, pvVMCB, pCtx);
|
---|
1368 | if (RT_FAILURE(rc))
|
---|
1369 | goto end;
|
---|
1370 |
|
---|
1371 | /*
|
---|
1372 | * TPR caching using CR8 is only available in 64-bit mode or with 32-bit guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is
|
---|
1373 | * supported.
|
---|
1374 | * Note: we can't do this in LoddGuestState as PDMApicGetTPR can jump back to ring 3 (lock)! (no longer true)
|
---|
1375 | */
|
---|
1376 | /** @todo query and update the TPR only when it could have been changed (mmio access)
|
---|
1377 | */
|
---|
1378 | if (pVM->hm.s.fHasIoApic)
|
---|
1379 | {
|
---|
1380 | /* TPR caching in CR8 */
|
---|
1381 | bool fPending;
|
---|
1382 | rc2 = PDMApicGetTPR(pVCpu, &u8LastTPR, &fPending);
|
---|
1383 | AssertRC(rc2);
|
---|
1384 |
|
---|
1385 | if (pVM->hm.s.fTPRPatchingActive)
|
---|
1386 | {
|
---|
1387 | /* Our patch code uses LSTAR for TPR caching. */
|
---|
1388 | pCtx->msrLSTAR = u8LastTPR;
|
---|
1389 |
|
---|
1390 | if (fPending)
|
---|
1391 | {
|
---|
1392 | /* A TPR change could activate a pending interrupt, so catch lstar writes. */
|
---|
1393 | hmR0SvmSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, false);
|
---|
1394 | }
|
---|
1395 | else
|
---|
1396 | {
|
---|
1397 | /*
|
---|
1398 | * No interrupts are pending, so we don't need to be explicitely notified.
|
---|
1399 | * There are enough world switches for detecting pending interrupts.
|
---|
1400 | */
|
---|
1401 | hmR0SvmSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
|
---|
1402 | }
|
---|
1403 | }
|
---|
1404 | else
|
---|
1405 | {
|
---|
1406 | /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
|
---|
1407 | pvVMCB->ctrl.IntCtrl.n.u8VTPR = (u8LastTPR >> 4);
|
---|
1408 |
|
---|
1409 | if (fPending)
|
---|
1410 | {
|
---|
1411 | /* A TPR change could activate a pending interrupt, so catch cr8 writes. */
|
---|
1412 | pvVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(8);
|
---|
1413 | }
|
---|
1414 | else
|
---|
1415 | {
|
---|
1416 | /*
|
---|
1417 | * No interrupts are pending, so we don't need to be explicitly notified.
|
---|
1418 | * There are enough world switches for detecting pending interrupts.
|
---|
1419 | */
|
---|
1420 | pvVMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
|
---|
1421 | }
|
---|
1422 | }
|
---|
1423 | fSyncTPR = !fPending;
|
---|
1424 | }
|
---|
1425 |
|
---|
1426 | /* All done! Let's start VM execution. */
|
---|
1427 |
|
---|
1428 | /* Enable nested paging if necessary (disabled each time after #VMEXIT). */
|
---|
1429 | pvVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
|
---|
1430 |
|
---|
1431 | #ifdef LOG_ENABLED
|
---|
1432 | pCpu = HMR0GetCurrentCpu();
|
---|
1433 | if (pVCpu->hm.s.idLastCpu != pCpu->idCpu)
|
---|
1434 | LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu));
|
---|
1435 | else if (pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
|
---|
1436 | LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
|
---|
1437 | else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
|
---|
1438 | LogFlow(("Manual TLB flush\n"));
|
---|
1439 | #endif
|
---|
1440 |
|
---|
1441 | /*
|
---|
1442 | * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3!
|
---|
1443 | * (until the actual world switch)
|
---|
1444 | */
|
---|
1445 | #ifdef VBOX_STRICT
|
---|
1446 | idCpuCheck = RTMpCpuId();
|
---|
1447 | #endif
|
---|
1448 | VMMR0LogFlushDisable(pVCpu);
|
---|
1449 |
|
---|
1450 | /*
|
---|
1451 | * Load the guest state; *must* be here as it sets up the shadow CR0 for lazy FPU syncing!
|
---|
1452 | */
|
---|
1453 | rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx);
|
---|
1454 | if (RT_UNLIKELY(rc != VINF_SUCCESS))
|
---|
1455 | {
|
---|
1456 | VMMR0LogFlushEnable(pVCpu);
|
---|
1457 | goto end;
|
---|
1458 | }
|
---|
1459 |
|
---|
1460 | #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
|
---|
1461 | /*
|
---|
1462 | * Disable interrupts to make sure a poke will interrupt execution.
|
---|
1463 | * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
|
---|
1464 | */
|
---|
1465 | uOldEFlags = ASMIntDisableFlags();
|
---|
1466 | VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
|
---|
1467 | #endif
|
---|
1468 | STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
|
---|
1469 |
|
---|
1470 | /* Setup TLB control and ASID in the VMCB. */
|
---|
1471 | hmR0SvmSetupTLB(pVM, pVCpu);
|
---|
1472 |
|
---|
1473 | /* In case we execute a goto ResumeExecution later on. */
|
---|
1474 | pVCpu->hm.s.fResumeVM = true;
|
---|
1475 | pVCpu->hm.s.fForceTLBFlush = pVM->hm.s.svm.fAlwaysFlushTLB;
|
---|
1476 |
|
---|
1477 | Assert(sizeof(pVCpu->hm.s.svm.HCPhysVMCB) == 8);
|
---|
1478 | Assert(pvVMCB->ctrl.IntCtrl.n.u1VIrqMasking);
|
---|
1479 | Assert(pvVMCB->ctrl.u64IOPMPhysAddr == pVM->hm.s.svm.HCPhysIOBitmap);
|
---|
1480 | Assert(pvVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.HCPhysMsrBitmap);
|
---|
1481 | Assert(pvVMCB->ctrl.u64LBRVirt == 0);
|
---|
1482 |
|
---|
1483 | #ifdef VBOX_STRICT
|
---|
1484 | Assert(idCpuCheck == RTMpCpuId());
|
---|
1485 | #endif
|
---|
1486 | TMNotifyStartOfExecution(pVCpu);
|
---|
1487 |
|
---|
1488 | /*
|
---|
1489 | * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
|
---|
1490 | * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
|
---|
1491 | */
|
---|
1492 | u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
|
---|
1493 | if ( (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
|
---|
1494 | && !(pvVMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
|
---|
1495 | {
|
---|
1496 | pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
|
---|
1497 | uint64_t u64GuestTscAux = 0;
|
---|
1498 | rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAux);
|
---|
1499 | AssertRC(rc2);
|
---|
1500 | ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
|
---|
1501 | }
|
---|
1502 |
|
---|
1503 | #ifdef VBOX_WITH_KERNEL_USING_XMM
|
---|
1504 | hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVMCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu,
|
---|
1505 | pVCpu->hm.s.svm.pfnVMRun);
|
---|
1506 | #else
|
---|
1507 | pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVMCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu);
|
---|
1508 | #endif
|
---|
1509 |
|
---|
1510 | ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);
|
---|
1511 | ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);
|
---|
1512 | /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
|
---|
1513 | if (!(pvVMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
|
---|
1514 | {
|
---|
1515 | /* Restore host's TSC_AUX. */
|
---|
1516 | if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
|
---|
1517 | ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
|
---|
1518 |
|
---|
1519 | TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() +
|
---|
1520 | pvVMCB->ctrl.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
|
---|
1521 | }
|
---|
1522 | TMNotifyEndOfExecution(pVCpu);
|
---|
1523 | VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
|
---|
1524 | STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
|
---|
1525 | ASMSetFlags(uOldEFlags);
|
---|
1526 | #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
|
---|
1527 | uOldEFlags = ~(RTCCUINTREG)0;
|
---|
1528 | #endif
|
---|
1529 |
|
---|
1530 | /*
|
---|
1531 | * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
---|
1532 | * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING-3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
|
---|
1533 | * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
---|
1534 | */
|
---|
1535 |
|
---|
1536 | /* Reason for the VM exit */
|
---|
1537 | exitCode = pvVMCB->ctrl.u64ExitCode;
|
---|
1538 |
|
---|
1539 | if (RT_UNLIKELY(exitCode == (uint64_t)SVM_EXIT_INVALID)) /* Invalid guest state. */
|
---|
1540 | {
|
---|
1541 | HMDumpRegs(pVM, pVCpu, pCtx);
|
---|
1542 | #ifdef DEBUG
|
---|
1543 | Log(("ctrl.u16InterceptRdCRx %x\n", pvVMCB->ctrl.u16InterceptRdCRx));
|
---|
1544 | Log(("ctrl.u16InterceptWrCRx %x\n", pvVMCB->ctrl.u16InterceptWrCRx));
|
---|
1545 | Log(("ctrl.u16InterceptRdDRx %x\n", pvVMCB->ctrl.u16InterceptRdDRx));
|
---|
1546 | Log(("ctrl.u16InterceptWrDRx %x\n", pvVMCB->ctrl.u16InterceptWrDRx));
|
---|
1547 | Log(("ctrl.u32InterceptException %x\n", pvVMCB->ctrl.u32InterceptException));
|
---|
1548 | Log(("ctrl.u32InterceptCtrl1 %x\n", pvVMCB->ctrl.u32InterceptCtrl1));
|
---|
1549 | Log(("ctrl.u32InterceptCtrl2 %x\n", pvVMCB->ctrl.u32InterceptCtrl2));
|
---|
1550 | Log(("ctrl.u64IOPMPhysAddr %RX64\n", pvVMCB->ctrl.u64IOPMPhysAddr));
|
---|
1551 | Log(("ctrl.u64MSRPMPhysAddr %RX64\n", pvVMCB->ctrl.u64MSRPMPhysAddr));
|
---|
1552 | Log(("ctrl.u64TSCOffset %RX64\n", pvVMCB->ctrl.u64TSCOffset));
|
---|
1553 |
|
---|
1554 | Log(("ctrl.TLBCtrl.u32ASID %x\n", pvVMCB->ctrl.TLBCtrl.n.u32ASID));
|
---|
1555 | Log(("ctrl.TLBCtrl.u8TLBFlush %x\n", pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush));
|
---|
1556 | Log(("ctrl.TLBCtrl.u24Reserved %x\n", pvVMCB->ctrl.TLBCtrl.n.u24Reserved));
|
---|
1557 |
|
---|
1558 | Log(("ctrl.IntCtrl.u8VTPR %x\n", pvVMCB->ctrl.IntCtrl.n.u8VTPR));
|
---|
1559 | Log(("ctrl.IntCtrl.u1VIrqValid %x\n", pvVMCB->ctrl.IntCtrl.n.u1VIrqValid));
|
---|
1560 | Log(("ctrl.IntCtrl.u7Reserved %x\n", pvVMCB->ctrl.IntCtrl.n.u7Reserved));
|
---|
1561 | Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", pvVMCB->ctrl.IntCtrl.n.u4VIrqPriority));
|
---|
1562 | Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", pvVMCB->ctrl.IntCtrl.n.u1IgnoreTPR));
|
---|
1563 | Log(("ctrl.IntCtrl.u3Reserved %x\n", pvVMCB->ctrl.IntCtrl.n.u3Reserved));
|
---|
1564 | Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", pvVMCB->ctrl.IntCtrl.n.u1VIrqMasking));
|
---|
1565 | Log(("ctrl.IntCtrl.u7Reserved2 %x\n", pvVMCB->ctrl.IntCtrl.n.u7Reserved2));
|
---|
1566 | Log(("ctrl.IntCtrl.u8VIrqVector %x\n", pvVMCB->ctrl.IntCtrl.n.u8VIrqVector));
|
---|
1567 | Log(("ctrl.IntCtrl.u24Reserved %x\n", pvVMCB->ctrl.IntCtrl.n.u24Reserved));
|
---|
1568 |
|
---|
1569 | Log(("ctrl.u64IntShadow %RX64\n", pvVMCB->ctrl.u64IntShadow));
|
---|
1570 | Log(("ctrl.u64ExitCode %RX64\n", pvVMCB->ctrl.u64ExitCode));
|
---|
1571 | Log(("ctrl.u64ExitInfo1 %RX64\n", pvVMCB->ctrl.u64ExitInfo1));
|
---|
1572 | Log(("ctrl.u64ExitInfo2 %RX64\n", pvVMCB->ctrl.u64ExitInfo2));
|
---|
1573 | Log(("ctrl.ExitIntInfo.u8Vector %x\n", pvVMCB->ctrl.ExitIntInfo.n.u8Vector));
|
---|
1574 | Log(("ctrl.ExitIntInfo.u3Type %x\n", pvVMCB->ctrl.ExitIntInfo.n.u3Type));
|
---|
1575 | Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", pvVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
|
---|
1576 | Log(("ctrl.ExitIntInfo.u19Reserved %x\n", pvVMCB->ctrl.ExitIntInfo.n.u19Reserved));
|
---|
1577 | Log(("ctrl.ExitIntInfo.u1Valid %x\n", pvVMCB->ctrl.ExitIntInfo.n.u1Valid));
|
---|
1578 | Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", pvVMCB->ctrl.ExitIntInfo.n.u32ErrorCode));
|
---|
1579 | Log(("ctrl.NestedPaging %RX64\n", pvVMCB->ctrl.NestedPaging.au64));
|
---|
1580 | Log(("ctrl.EventInject.u8Vector %x\n", pvVMCB->ctrl.EventInject.n.u8Vector));
|
---|
1581 | Log(("ctrl.EventInject.u3Type %x\n", pvVMCB->ctrl.EventInject.n.u3Type));
|
---|
1582 | Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", pvVMCB->ctrl.EventInject.n.u1ErrorCodeValid));
|
---|
1583 | Log(("ctrl.EventInject.u19Reserved %x\n", pvVMCB->ctrl.EventInject.n.u19Reserved));
|
---|
1584 | Log(("ctrl.EventInject.u1Valid %x\n", pvVMCB->ctrl.EventInject.n.u1Valid));
|
---|
1585 | Log(("ctrl.EventInject.u32ErrorCode %x\n", pvVMCB->ctrl.EventInject.n.u32ErrorCode));
|
---|
1586 |
|
---|
1587 | Log(("ctrl.u64NestedPagingCR3 %RX64\n", pvVMCB->ctrl.u64NestedPagingCR3));
|
---|
1588 | Log(("ctrl.u64LBRVirt %RX64\n", pvVMCB->ctrl.u64LBRVirt));
|
---|
1589 |
|
---|
1590 | Log(("guest.CS.u16Sel %04X\n", pvVMCB->guest.CS.u16Sel));
|
---|
1591 | Log(("guest.CS.u16Attr %04X\n", pvVMCB->guest.CS.u16Attr));
|
---|
1592 | Log(("guest.CS.u32Limit %X\n", pvVMCB->guest.CS.u32Limit));
|
---|
1593 | Log(("guest.CS.u64Base %RX64\n", pvVMCB->guest.CS.u64Base));
|
---|
1594 | Log(("guest.DS.u16Sel %04X\n", pvVMCB->guest.DS.u16Sel));
|
---|
1595 | Log(("guest.DS.u16Attr %04X\n", pvVMCB->guest.DS.u16Attr));
|
---|
1596 | Log(("guest.DS.u32Limit %X\n", pvVMCB->guest.DS.u32Limit));
|
---|
1597 | Log(("guest.DS.u64Base %RX64\n", pvVMCB->guest.DS.u64Base));
|
---|
1598 | Log(("guest.ES.u16Sel %04X\n", pvVMCB->guest.ES.u16Sel));
|
---|
1599 | Log(("guest.ES.u16Attr %04X\n", pvVMCB->guest.ES.u16Attr));
|
---|
1600 | Log(("guest.ES.u32Limit %X\n", pvVMCB->guest.ES.u32Limit));
|
---|
1601 | Log(("guest.ES.u64Base %RX64\n", pvVMCB->guest.ES.u64Base));
|
---|
1602 | Log(("guest.FS.u16Sel %04X\n", pvVMCB->guest.FS.u16Sel));
|
---|
1603 | Log(("guest.FS.u16Attr %04X\n", pvVMCB->guest.FS.u16Attr));
|
---|
1604 | Log(("guest.FS.u32Limit %X\n", pvVMCB->guest.FS.u32Limit));
|
---|
1605 | Log(("guest.FS.u64Base %RX64\n", pvVMCB->guest.FS.u64Base));
|
---|
1606 | Log(("guest.GS.u16Sel %04X\n", pvVMCB->guest.GS.u16Sel));
|
---|
1607 | Log(("guest.GS.u16Attr %04X\n", pvVMCB->guest.GS.u16Attr));
|
---|
1608 | Log(("guest.GS.u32Limit %X\n", pvVMCB->guest.GS.u32Limit));
|
---|
1609 | Log(("guest.GS.u64Base %RX64\n", pvVMCB->guest.GS.u64Base));
|
---|
1610 |
|
---|
1611 | Log(("guest.GDTR.u32Limit %X\n", pvVMCB->guest.GDTR.u32Limit));
|
---|
1612 | Log(("guest.GDTR.u64Base %RX64\n", pvVMCB->guest.GDTR.u64Base));
|
---|
1613 |
|
---|
1614 | Log(("guest.LDTR.u16Sel %04X\n", pvVMCB->guest.LDTR.u16Sel));
|
---|
1615 | Log(("guest.LDTR.u16Attr %04X\n", pvVMCB->guest.LDTR.u16Attr));
|
---|
1616 | Log(("guest.LDTR.u32Limit %X\n", pvVMCB->guest.LDTR.u32Limit));
|
---|
1617 | Log(("guest.LDTR.u64Base %RX64\n", pvVMCB->guest.LDTR.u64Base));
|
---|
1618 |
|
---|
1619 | Log(("guest.IDTR.u32Limit %X\n", pvVMCB->guest.IDTR.u32Limit));
|
---|
1620 | Log(("guest.IDTR.u64Base %RX64\n", pvVMCB->guest.IDTR.u64Base));
|
---|
1621 |
|
---|
1622 | Log(("guest.TR.u16Sel %04X\n", pvVMCB->guest.TR.u16Sel));
|
---|
1623 | Log(("guest.TR.u16Attr %04X\n", pvVMCB->guest.TR.u16Attr));
|
---|
1624 | Log(("guest.TR.u32Limit %X\n", pvVMCB->guest.TR.u32Limit));
|
---|
1625 | Log(("guest.TR.u64Base %RX64\n", pvVMCB->guest.TR.u64Base));
|
---|
1626 |
|
---|
1627 | Log(("guest.u8CPL %X\n", pvVMCB->guest.u8CPL));
|
---|
1628 | Log(("guest.u64CR0 %RX64\n", pvVMCB->guest.u64CR0));
|
---|
1629 | Log(("guest.u64CR2 %RX64\n", pvVMCB->guest.u64CR2));
|
---|
1630 | Log(("guest.u64CR3 %RX64\n", pvVMCB->guest.u64CR3));
|
---|
1631 | Log(("guest.u64CR4 %RX64\n", pvVMCB->guest.u64CR4));
|
---|
1632 | Log(("guest.u64DR6 %RX64\n", pvVMCB->guest.u64DR6));
|
---|
1633 | Log(("guest.u64DR7 %RX64\n", pvVMCB->guest.u64DR7));
|
---|
1634 |
|
---|
1635 | Log(("guest.u64RIP %RX64\n", pvVMCB->guest.u64RIP));
|
---|
1636 | Log(("guest.u64RSP %RX64\n", pvVMCB->guest.u64RSP));
|
---|
1637 | Log(("guest.u64RAX %RX64\n", pvVMCB->guest.u64RAX));
|
---|
1638 | Log(("guest.u64RFlags %RX64\n", pvVMCB->guest.u64RFlags));
|
---|
1639 |
|
---|
1640 | Log(("guest.u64SysEnterCS %RX64\n", pvVMCB->guest.u64SysEnterCS));
|
---|
1641 | Log(("guest.u64SysEnterEIP %RX64\n", pvVMCB->guest.u64SysEnterEIP));
|
---|
1642 | Log(("guest.u64SysEnterESP %RX64\n", pvVMCB->guest.u64SysEnterESP));
|
---|
1643 |
|
---|
1644 | Log(("guest.u64EFER %RX64\n", pvVMCB->guest.u64EFER));
|
---|
1645 | Log(("guest.u64STAR %RX64\n", pvVMCB->guest.u64STAR));
|
---|
1646 | Log(("guest.u64LSTAR %RX64\n", pvVMCB->guest.u64LSTAR));
|
---|
1647 | Log(("guest.u64CSTAR %RX64\n", pvVMCB->guest.u64CSTAR));
|
---|
1648 | Log(("guest.u64SFMASK %RX64\n", pvVMCB->guest.u64SFMASK));
|
---|
1649 | Log(("guest.u64KernelGSBase %RX64\n", pvVMCB->guest.u64KernelGSBase));
|
---|
1650 | Log(("guest.u64GPAT %RX64\n", pvVMCB->guest.u64GPAT));
|
---|
1651 | Log(("guest.u64DBGCTL %RX64\n", pvVMCB->guest.u64DBGCTL));
|
---|
1652 | Log(("guest.u64BR_FROM %RX64\n", pvVMCB->guest.u64BR_FROM));
|
---|
1653 | Log(("guest.u64BR_TO %RX64\n", pvVMCB->guest.u64BR_TO));
|
---|
1654 | Log(("guest.u64LASTEXCPFROM %RX64\n", pvVMCB->guest.u64LASTEXCPFROM));
|
---|
1655 | Log(("guest.u64LASTEXCPTO %RX64\n", pvVMCB->guest.u64LASTEXCPTO));
|
---|
1656 | #endif
|
---|
1657 | rc = VERR_SVM_UNABLE_TO_START_VM;
|
---|
1658 | VMMR0LogFlushEnable(pVCpu);
|
---|
1659 | goto end;
|
---|
1660 | }
|
---|
1661 |
|
---|
1662 | /* Let's first sync back EIP, ESP, and EFLAGS. */
|
---|
1663 | pCtx->rip = pvVMCB->guest.u64RIP;
|
---|
1664 | pCtx->rsp = pvVMCB->guest.u64RSP;
|
---|
1665 | pCtx->eflags.u32 = pvVMCB->guest.u64RFlags;
|
---|
1666 | /* eax is saved/restore across the vmrun instruction */
|
---|
1667 | pCtx->rax = pvVMCB->guest.u64RAX;
|
---|
1668 |
|
---|
1669 | /*
|
---|
1670 | * Save all the MSRs that can be changed by the guest without causing a world switch.
|
---|
1671 | * FS & GS base are saved with SVM_READ_SELREG.
|
---|
1672 | */
|
---|
1673 | pCtx->msrSTAR = pvVMCB->guest.u64STAR; /* legacy syscall eip, cs & ss */
|
---|
1674 | pCtx->msrLSTAR = pvVMCB->guest.u64LSTAR; /* 64-bit mode syscall rip */
|
---|
1675 | pCtx->msrCSTAR = pvVMCB->guest.u64CSTAR; /* compatibility mode syscall rip */
|
---|
1676 | pCtx->msrSFMASK = pvVMCB->guest.u64SFMASK; /* syscall flag mask */
|
---|
1677 | pCtx->msrKERNELGSBASE = pvVMCB->guest.u64KernelGSBase; /* swapgs exchange value */
|
---|
1678 | pCtx->SysEnter.cs = pvVMCB->guest.u64SysEnterCS;
|
---|
1679 | pCtx->SysEnter.eip = pvVMCB->guest.u64SysEnterEIP;
|
---|
1680 | pCtx->SysEnter.esp = pvVMCB->guest.u64SysEnterESP;
|
---|
1681 |
|
---|
1682 | /* Can be updated behind our back in the nested paging case. */
|
---|
1683 | pCtx->cr2 = pvVMCB->guest.u64CR2;
|
---|
1684 |
|
---|
1685 | /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
|
---|
1686 | SVM_READ_SELREG(SS, ss);
|
---|
1687 | SVM_READ_SELREG(CS, cs);
|
---|
1688 | SVM_READ_SELREG(DS, ds);
|
---|
1689 | SVM_READ_SELREG(ES, es);
|
---|
1690 | SVM_READ_SELREG(FS, fs);
|
---|
1691 | SVM_READ_SELREG(GS, gs);
|
---|
1692 |
|
---|
1693 | /*
|
---|
1694 | * Correct the hidden CS granularity flag. Haven't seen it being wrong in any other
|
---|
1695 | * register (yet).
|
---|
1696 | */
|
---|
1697 | if ( !pCtx->cs.Attr.n.u1Granularity
|
---|
1698 | && pCtx->cs.Attr.n.u1Present
|
---|
1699 | && pCtx->cs.u32Limit > UINT32_C(0xfffff))
|
---|
1700 | {
|
---|
1701 | Assert((pCtx->cs.u32Limit & 0xfff) == 0xfff);
|
---|
1702 | pCtx->cs.Attr.n.u1Granularity = 1;
|
---|
1703 | }
|
---|
1704 | #define SVM_ASSERT_SEL_GRANULARITY(reg) \
|
---|
1705 | AssertMsg( !pCtx->reg.Attr.n.u1Present \
|
---|
1706 | || ( pCtx->reg.Attr.n.u1Granularity \
|
---|
1707 | ? (pCtx->reg.u32Limit & 0xfff) == 0xfff \
|
---|
1708 | : pCtx->reg.u32Limit <= 0xfffff), \
|
---|
1709 | ("%#x %#x %#llx\n", pCtx->reg.u32Limit, pCtx->reg.Attr.u, pCtx->reg.u64Base))
|
---|
1710 | SVM_ASSERT_SEL_GRANULARITY(ss);
|
---|
1711 | SVM_ASSERT_SEL_GRANULARITY(cs);
|
---|
1712 | SVM_ASSERT_SEL_GRANULARITY(ds);
|
---|
1713 | SVM_ASSERT_SEL_GRANULARITY(es);
|
---|
1714 | SVM_ASSERT_SEL_GRANULARITY(fs);
|
---|
1715 | SVM_ASSERT_SEL_GRANULARITY(gs);
|
---|
1716 | #undef SVM_ASSERT_SEL_GRANULARITY
|
---|
1717 |
|
---|
1718 | /*
|
---|
1719 | * Correct the hidden SS DPL field. It can be wrong on certain CPUs
|
---|
1720 | * sometimes (seen it on AMD Fusion CPUs with 64-bit guests). The CPU
|
---|
1721 | * always uses the CPL field in the VMCB instead of the DPL in the hidden
|
---|
1722 | * SS (chapter AMD spec. 15.5.1 Basic operation).
|
---|
1723 | */
|
---|
1724 | Assert(!(pvVMCB->guest.u8CPL & ~0x3));
|
---|
1725 | pCtx->ss.Attr.n.u2Dpl = pvVMCB->guest.u8CPL & 0x3;
|
---|
1726 |
|
---|
1727 | /*
|
---|
1728 | * Remaining guest CPU context: TR, IDTR, GDTR, LDTR;
|
---|
1729 | * must sync everything otherwise we can get out of sync when jumping back to ring-3.
|
---|
1730 | */
|
---|
1731 | SVM_READ_SELREG(LDTR, ldtr);
|
---|
1732 | SVM_READ_SELREG(TR, tr);
|
---|
1733 |
|
---|
1734 | pCtx->gdtr.cbGdt = pvVMCB->guest.GDTR.u32Limit;
|
---|
1735 | pCtx->gdtr.pGdt = pvVMCB->guest.GDTR.u64Base;
|
---|
1736 |
|
---|
1737 | pCtx->idtr.cbIdt = pvVMCB->guest.IDTR.u32Limit;
|
---|
1738 | pCtx->idtr.pIdt = pvVMCB->guest.IDTR.u64Base;
|
---|
1739 |
|
---|
1740 | /*
|
---|
1741 | * No reason to sync back the CRx and DRx registers as they cannot be changed by the guest
|
---|
1742 | * unless in the nested paging case where CR3 can be changed by the guest.
|
---|
1743 | */
|
---|
1744 | if ( pVM->hm.s.fNestedPaging
|
---|
1745 | && pCtx->cr3 != pvVMCB->guest.u64CR3)
|
---|
1746 | {
|
---|
1747 | CPUMSetGuestCR3(pVCpu, pvVMCB->guest.u64CR3);
|
---|
1748 | PGMUpdateCR3(pVCpu, pvVMCB->guest.u64CR3);
|
---|
1749 | }
|
---|
1750 |
|
---|
1751 | /* Note! NOW IT'S SAFE FOR LOGGING! */
|
---|
1752 | VMMR0LogFlushEnable(pVCpu);
|
---|
1753 |
|
---|
1754 | /* Take care of instruction fusing (sti, mov ss) (see AMD spec. 15.20.5 Interrupt Shadows) */
|
---|
1755 | if (pvVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
|
---|
1756 | {
|
---|
1757 | Log(("uInterruptState %x rip=%RGv\n", pvVMCB->ctrl.u64IntShadow, (RTGCPTR)pCtx->rip));
|
---|
1758 | EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
|
---|
1759 | }
|
---|
1760 | else
|
---|
1761 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
|
---|
1762 |
|
---|
1763 | Log2(("exitCode = %x\n", exitCode));
|
---|
1764 |
|
---|
1765 | /* Sync back DR6 as it could have been changed by hitting breakpoints. */
|
---|
1766 | pCtx->dr[6] = pvVMCB->guest.u64DR6;
|
---|
1767 | /* DR7.GD can be cleared by debug exceptions, so sync it back as well. */
|
---|
1768 | pCtx->dr[7] = pvVMCB->guest.u64DR7;
|
---|
1769 |
|
---|
1770 | /* Check if an injected event was interrupted prematurely. */
|
---|
1771 | pVCpu->hm.s.Event.intInfo = pvVMCB->ctrl.ExitIntInfo.au64[0];
|
---|
1772 | if ( pvVMCB->ctrl.ExitIntInfo.n.u1Valid
|
---|
1773 | /* we don't care about 'int xx' as the instruction will be restarted. */
|
---|
1774 | && pvVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT)
|
---|
1775 | {
|
---|
1776 | Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));
|
---|
1777 |
|
---|
1778 | #ifdef LOG_ENABLED
|
---|
1779 | SVM_EVENT Event;
|
---|
1780 | Event.au64[0] = pVCpu->hm.s.Event.intInfo;
|
---|
1781 |
|
---|
1782 | if ( exitCode == SVM_EXIT_EXCEPTION_E
|
---|
1783 | && Event.n.u8Vector == 0xE)
|
---|
1784 | {
|
---|
1785 | Log(("Double fault!\n"));
|
---|
1786 | }
|
---|
1787 | #endif
|
---|
1788 |
|
---|
1789 | pVCpu->hm.s.Event.fPending = true;
|
---|
1790 | /* Error code present? (redundant) */
|
---|
1791 | if (pvVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)
|
---|
1792 | pVCpu->hm.s.Event.errCode = pvVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
|
---|
1793 | else
|
---|
1794 | pVCpu->hm.s.Event.errCode = 0;
|
---|
1795 | }
|
---|
1796 | #ifdef VBOX_WITH_STATISTICS
|
---|
1797 | if (exitCode == SVM_EXIT_NPF)
|
---|
1798 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNPF);
|
---|
1799 | else
|
---|
1800 | STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);
|
---|
1801 | #endif
|
---|
1802 |
|
---|
1803 | /* Sync back the TPR if it was changed. */
|
---|
1804 | if (fSyncTPR)
|
---|
1805 | {
|
---|
1806 | if (pVM->hm.s.fTPRPatchingActive)
|
---|
1807 | {
|
---|
1808 | if ((pCtx->msrLSTAR & 0xff) != u8LastTPR)
|
---|
1809 | {
|
---|
1810 | /* Our patch code uses LSTAR for TPR caching. */
|
---|
1811 | rc2 = PDMApicSetTPR(pVCpu, pCtx->msrLSTAR & 0xff);
|
---|
1812 | AssertRC(rc2);
|
---|
1813 | }
|
---|
1814 | }
|
---|
1815 | else
|
---|
1816 | {
|
---|
1817 | if ((uint8_t)(u8LastTPR >> 4) != pvVMCB->ctrl.IntCtrl.n.u8VTPR)
|
---|
1818 | {
|
---|
1819 | /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
|
---|
1820 | rc2 = PDMApicSetTPR(pVCpu, pvVMCB->ctrl.IntCtrl.n.u8VTPR << 4);
|
---|
1821 | AssertRC(rc2);
|
---|
1822 | }
|
---|
1823 | }
|
---|
1824 | }
|
---|
1825 |
|
---|
1826 | #ifdef DBGFTRACE_ENABLED /** @todo DTrace */
|
---|
1827 | RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "vmexit %08x at %04:%08RX64 %RX64 %RX64 %RX64",
|
---|
1828 | exitCode, pCtx->cs.Sel, pCtx->rip,
|
---|
1829 | pvVMCB->ctrl.u64ExitInfo1, pvVMCB->ctrl.u64ExitInfo2, pvVMCB->ctrl.ExitIntInfo.au64[0]);
|
---|
1830 | #endif
|
---|
1831 | #if ARCH_BITS == 64 /* for the time being */
|
---|
1832 | VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, pvVMCB->ctrl.u64ExitInfo1, pvVMCB->ctrl.u64ExitInfo2,
|
---|
1833 | pvVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX);
|
---|
1834 | #endif
|
---|
1835 | STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
|
---|
1836 |
|
---|
1837 | /* Deal with the reason of the VM-exit. */
|
---|
1838 | switch (exitCode)
|
---|
1839 | {
|
---|
1840 | case SVM_EXIT_EXCEPTION_0: case SVM_EXIT_EXCEPTION_1: case SVM_EXIT_EXCEPTION_2: case SVM_EXIT_EXCEPTION_3:
|
---|
1841 | case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: case SVM_EXIT_EXCEPTION_6: case SVM_EXIT_EXCEPTION_7:
|
---|
1842 | case SVM_EXIT_EXCEPTION_8: case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_A: case SVM_EXIT_EXCEPTION_B:
|
---|
1843 | case SVM_EXIT_EXCEPTION_C: case SVM_EXIT_EXCEPTION_D: case SVM_EXIT_EXCEPTION_E: case SVM_EXIT_EXCEPTION_F:
|
---|
1844 | case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11: case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13:
|
---|
1845 | case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16: case SVM_EXIT_EXCEPTION_17:
|
---|
1846 | case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19: case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B:
|
---|
1847 | case SVM_EXIT_EXCEPTION_1C: case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
|
---|
1848 | {
|
---|
1849 | /* Pending trap. */
|
---|
1850 | SVM_EVENT Event;
|
---|
1851 | uint32_t vector = exitCode - SVM_EXIT_EXCEPTION_0;
|
---|
1852 |
|
---|
1853 | Log2(("Hardware/software interrupt %d\n", vector));
|
---|
1854 | switch (vector)
|
---|
1855 | {
|
---|
1856 | case X86_XCPT_DB:
|
---|
1857 | {
|
---|
1858 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
|
---|
1859 |
|
---|
1860 | /* Note that we don't support guest and host-initiated debugging at the same time. */
|
---|
1861 | Assert(DBGFIsStepping(pVCpu) || CPUMIsHyperDebugStateActive(pVCpu));
|
---|
1862 |
|
---|
1863 | rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pCtx->dr[6]);
|
---|
1864 | if (rc == VINF_EM_RAW_GUEST_TRAP)
|
---|
1865 | {
|
---|
1866 | Log(("Trap %x (debug) at %016RX64\n", vector, pCtx->rip));
|
---|
1867 |
|
---|
1868 | /* Reinject the exception. */
|
---|
1869 | Event.au64[0] = 0;
|
---|
1870 | Event.n.u3Type = SVM_EVENT_EXCEPTION; /* trap or fault */
|
---|
1871 | Event.n.u1Valid = 1;
|
---|
1872 | Event.n.u8Vector = X86_XCPT_DB;
|
---|
1873 |
|
---|
1874 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
1875 | goto ResumeExecution;
|
---|
1876 | }
|
---|
1877 | /* Return to ring 3 to deal with the debug exit code. */
|
---|
1878 | Log(("Debugger hardware BP at %04x:%RGv (rc=%Rrc)\n", pCtx->cs.Sel, pCtx->rip, VBOXSTRICTRC_VAL(rc)));
|
---|
1879 | break;
|
---|
1880 | }
|
---|
1881 |
|
---|
1882 | case X86_XCPT_NM:
|
---|
1883 | {
|
---|
1884 | Log(("#NM fault at %RGv\n", (RTGCPTR)pCtx->rip));
|
---|
1885 |
|
---|
1886 | /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */
|
---|
1887 | /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
|
---|
1888 | rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx);
|
---|
1889 | if (rc == VINF_SUCCESS)
|
---|
1890 | {
|
---|
1891 | Assert(CPUMIsGuestFPUStateActive(pVCpu));
|
---|
1892 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
|
---|
1893 |
|
---|
1894 | /* Continue execution. */
|
---|
1895 | pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
|
---|
1896 |
|
---|
1897 | goto ResumeExecution;
|
---|
1898 | }
|
---|
1899 |
|
---|
1900 | Log(("Forward #NM fault to the guest\n"));
|
---|
1901 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
|
---|
1902 |
|
---|
1903 | Event.au64[0] = 0;
|
---|
1904 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
1905 | Event.n.u1Valid = 1;
|
---|
1906 | Event.n.u8Vector = X86_XCPT_NM;
|
---|
1907 |
|
---|
1908 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
1909 | goto ResumeExecution;
|
---|
1910 | }
|
---|
1911 |
|
---|
1912 | case X86_XCPT_PF: /* Page fault */
|
---|
1913 | {
|
---|
1914 | uint32_t errCode = pvVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
|
---|
1915 | RTGCUINTPTR uFaultAddress = pvVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */
|
---|
1916 |
|
---|
1917 | #ifdef VBOX_ALWAYS_TRAP_PF
|
---|
1918 | if (pVM->hm.s.fNestedPaging)
|
---|
1919 | {
|
---|
1920 | /*
|
---|
1921 | * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
|
---|
1922 | */
|
---|
1923 | Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip,
|
---|
1924 | uFaultAddress, errCode, (RTGCPTR)pCtx->rsp));
|
---|
1925 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
|
---|
1926 |
|
---|
1927 | /* Now we must update CR2. */
|
---|
1928 | pCtx->cr2 = uFaultAddress;
|
---|
1929 |
|
---|
1930 | Event.au64[0] = 0;
|
---|
1931 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
1932 | Event.n.u1Valid = 1;
|
---|
1933 | Event.n.u8Vector = X86_XCPT_PF;
|
---|
1934 | Event.n.u1ErrorCodeValid = 1;
|
---|
1935 | Event.n.u32ErrorCode = errCode;
|
---|
1936 |
|
---|
1937 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
1938 | goto ResumeExecution;
|
---|
1939 | }
|
---|
1940 | #endif
|
---|
1941 | Assert(!pVM->hm.s.fNestedPaging);
|
---|
1942 |
|
---|
1943 | #ifdef VBOX_HM_WITH_GUEST_PATCHING
|
---|
1944 | /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
|
---|
1945 | if ( pVM->hm.s.fTRPPatchingAllowed
|
---|
1946 | && (uFaultAddress & 0xfff) == 0x080
|
---|
1947 | && !(errCode & X86_TRAP_PF_P) /* not present */
|
---|
1948 | && CPUMGetGuestCPL(pVCpu) == 0
|
---|
1949 | && !CPUMIsGuestInLongModeEx(pCtx)
|
---|
1950 | && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
|
---|
1951 | {
|
---|
1952 | RTGCPHYS GCPhysApicBase, GCPhys;
|
---|
1953 | PDMApicGetBase(pVM, &GCPhysApicBase); /** @todo cache this */
|
---|
1954 | GCPhysApicBase &= PAGE_BASE_GC_MASK;
|
---|
1955 |
|
---|
1956 | rc = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL, &GCPhys);
|
---|
1957 | if ( rc == VINF_SUCCESS
|
---|
1958 | && GCPhys == GCPhysApicBase)
|
---|
1959 | {
|
---|
1960 | /* Only attempt to patch the instruction once. */
|
---|
1961 | PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
|
---|
1962 | if (!pPatch)
|
---|
1963 | {
|
---|
1964 | rc = VINF_EM_HM_PATCH_TPR_INSTR;
|
---|
1965 | break;
|
---|
1966 | }
|
---|
1967 | }
|
---|
1968 | }
|
---|
1969 | #endif
|
---|
1970 |
|
---|
1971 | Log2(("Page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
|
---|
1972 | /* Exit qualification contains the linear address of the page fault. */
|
---|
1973 | TRPMAssertTrap(pVCpu, X86_XCPT_PF, TRPM_TRAP);
|
---|
1974 | TRPMSetErrorCode(pVCpu, errCode);
|
---|
1975 | TRPMSetFaultAddress(pVCpu, uFaultAddress);
|
---|
1976 |
|
---|
1977 | /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
|
---|
1978 | rc = PGMTrap0eHandler(pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
|
---|
1979 | Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, VBOXSTRICTRC_VAL(rc)));
|
---|
1980 | if (rc == VINF_SUCCESS)
|
---|
1981 | {
|
---|
1982 | /* We've successfully synced our shadow pages, so let's just continue execution. */
|
---|
1983 | Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
|
---|
1984 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
|
---|
1985 |
|
---|
1986 | TRPMResetTrap(pVCpu);
|
---|
1987 | goto ResumeExecution;
|
---|
1988 | }
|
---|
1989 | else if (rc == VINF_EM_RAW_GUEST_TRAP)
|
---|
1990 | {
|
---|
1991 | /*
|
---|
1992 | * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
|
---|
1993 | */
|
---|
1994 | Log2(("Forward page fault to the guest\n"));
|
---|
1995 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
|
---|
1996 | /* The error code might have been changed. */
|
---|
1997 | errCode = TRPMGetErrorCode(pVCpu);
|
---|
1998 |
|
---|
1999 | TRPMResetTrap(pVCpu);
|
---|
2000 |
|
---|
2001 | /* Now we must update CR2. */
|
---|
2002 | pCtx->cr2 = uFaultAddress;
|
---|
2003 |
|
---|
2004 | Event.au64[0] = 0;
|
---|
2005 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
2006 | Event.n.u1Valid = 1;
|
---|
2007 | Event.n.u8Vector = X86_XCPT_PF;
|
---|
2008 | Event.n.u1ErrorCodeValid = 1;
|
---|
2009 | Event.n.u32ErrorCode = errCode;
|
---|
2010 |
|
---|
2011 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
2012 | goto ResumeExecution;
|
---|
2013 | }
|
---|
2014 | #ifdef VBOX_STRICT
|
---|
2015 | if (rc != VINF_EM_RAW_EMULATE_INSTR && rc != VINF_EM_RAW_EMULATE_IO_BLOCK)
|
---|
2016 | LogFlow(("PGMTrap0eHandler failed with %d\n", VBOXSTRICTRC_VAL(rc)));
|
---|
2017 | #endif
|
---|
2018 | /* Need to go back to the recompiler to emulate the instruction. */
|
---|
2019 | TRPMResetTrap(pVCpu);
|
---|
2020 | break;
|
---|
2021 | }
|
---|
2022 |
|
---|
2023 | case X86_XCPT_MF: /* Floating point exception. */
|
---|
2024 | {
|
---|
2025 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
|
---|
2026 | if (!(pCtx->cr0 & X86_CR0_NE))
|
---|
2027 | {
|
---|
2028 | /* old style FPU error reporting needs some extra work. */
|
---|
2029 | /** @todo don't fall back to the recompiler, but do it manually. */
|
---|
2030 | rc = VINF_EM_RAW_EMULATE_INSTR;
|
---|
2031 | break;
|
---|
2032 | }
|
---|
2033 | Log(("Trap %x at %RGv\n", vector, (RTGCPTR)pCtx->rip));
|
---|
2034 |
|
---|
2035 | Event.au64[0] = 0;
|
---|
2036 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
2037 | Event.n.u1Valid = 1;
|
---|
2038 | Event.n.u8Vector = X86_XCPT_MF;
|
---|
2039 |
|
---|
2040 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
2041 | goto ResumeExecution;
|
---|
2042 | }
|
---|
2043 |
|
---|
2044 | #ifdef VBOX_STRICT
|
---|
2045 | case X86_XCPT_BP: /* Breakpoint. */
|
---|
2046 | case X86_XCPT_GP: /* General protection failure exception.*/
|
---|
2047 | case X86_XCPT_UD: /* Unknown opcode exception. */
|
---|
2048 | case X86_XCPT_DE: /* Divide error. */
|
---|
2049 | case X86_XCPT_SS: /* Stack segment exception. */
|
---|
2050 | case X86_XCPT_NP: /* Segment not present exception. */
|
---|
2051 | {
|
---|
2052 | Event.au64[0] = 0;
|
---|
2053 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
2054 | Event.n.u1Valid = 1;
|
---|
2055 | Event.n.u8Vector = vector;
|
---|
2056 |
|
---|
2057 | switch (vector)
|
---|
2058 | {
|
---|
2059 | case X86_XCPT_GP:
|
---|
2060 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
|
---|
2061 | Event.n.u1ErrorCodeValid = 1;
|
---|
2062 | Event.n.u32ErrorCode = pvVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
|
---|
2063 | break;
|
---|
2064 | case X86_XCPT_BP:
|
---|
2065 | /** Saves the wrong EIP on the stack (pointing to the int3 instead of the next instruction. */
|
---|
2066 | break;
|
---|
2067 | case X86_XCPT_DE:
|
---|
2068 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
|
---|
2069 | break;
|
---|
2070 | case X86_XCPT_UD:
|
---|
2071 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
|
---|
2072 | break;
|
---|
2073 | case X86_XCPT_SS:
|
---|
2074 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
|
---|
2075 | Event.n.u1ErrorCodeValid = 1;
|
---|
2076 | Event.n.u32ErrorCode = pvVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
|
---|
2077 | break;
|
---|
2078 | case X86_XCPT_NP:
|
---|
2079 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
|
---|
2080 | Event.n.u1ErrorCodeValid = 1;
|
---|
2081 | Event.n.u32ErrorCode = pvVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
|
---|
2082 | break;
|
---|
2083 | }
|
---|
2084 | Log(("Trap %x at %04x:%RGv esi=%x\n", vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->esi));
|
---|
2085 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
2086 | goto ResumeExecution;
|
---|
2087 | }
|
---|
2088 | #endif
|
---|
2089 | default:
|
---|
2090 | AssertMsgFailed(("Unexpected vm-exit caused by exception %x\n", vector));
|
---|
2091 | rc = VERR_HMSVM_UNEXPECTED_XCPT_EXIT;
|
---|
2092 | break;
|
---|
2093 |
|
---|
2094 | } /* switch (vector) */
|
---|
2095 | break;
|
---|
2096 | }
|
---|
2097 |
|
---|
2098 | case SVM_EXIT_NPF:
|
---|
2099 | {
|
---|
2100 | /* EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault. */
|
---|
2101 | uint32_t errCode = pvVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
|
---|
2102 | RTGCPHYS GCPhysFault = pvVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */
|
---|
2103 | PGMMODE enmShwPagingMode;
|
---|
2104 |
|
---|
2105 | Assert(pVM->hm.s.fNestedPaging);
|
---|
2106 | LogFlow(("Nested page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
|
---|
2107 |
|
---|
2108 | #ifdef VBOX_HM_WITH_GUEST_PATCHING
|
---|
2109 | /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
|
---|
2110 | if ( pVM->hm.s.fTRPPatchingAllowed
|
---|
2111 | && (GCPhysFault & PAGE_OFFSET_MASK) == 0x080
|
---|
2112 | && ( !(errCode & X86_TRAP_PF_P) /* not present */
|
---|
2113 | || (errCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD) /* mmio optimization */)
|
---|
2114 | && CPUMGetGuestCPL(pVCpu) == 0
|
---|
2115 | && !CPUMIsGuestInLongModeEx(pCtx)
|
---|
2116 | && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
|
---|
2117 | {
|
---|
2118 | RTGCPHYS GCPhysApicBase;
|
---|
2119 | PDMApicGetBase(pVM, &GCPhysApicBase); /** @todo cache this */
|
---|
2120 | GCPhysApicBase &= PAGE_BASE_GC_MASK;
|
---|
2121 |
|
---|
2122 | if (GCPhysFault == GCPhysApicBase + 0x80)
|
---|
2123 | {
|
---|
2124 | /* Only attempt to patch the instruction once. */
|
---|
2125 | PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
|
---|
2126 | if (!pPatch)
|
---|
2127 | {
|
---|
2128 | rc = VINF_EM_HM_PATCH_TPR_INSTR;
|
---|
2129 | break;
|
---|
2130 | }
|
---|
2131 | }
|
---|
2132 | }
|
---|
2133 | #endif
|
---|
2134 |
|
---|
2135 | /* Handle the pagefault trap for the nested shadow table. */
|
---|
2136 | #if HC_ARCH_BITS == 32 /** @todo shadow this in a variable. */
|
---|
2137 | if (CPUMIsGuestInLongModeEx(pCtx))
|
---|
2138 | enmShwPagingMode = PGMMODE_AMD64_NX;
|
---|
2139 | else
|
---|
2140 | #endif
|
---|
2141 | enmShwPagingMode = PGMGetHostMode(pVM);
|
---|
2142 |
|
---|
2143 | /* MMIO optimization */
|
---|
2144 | Assert((errCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
|
---|
2145 | if ((errCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
|
---|
2146 | {
|
---|
2147 | rc = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmShwPagingMode, CPUMCTX2CORE(pCtx), GCPhysFault, errCode);
|
---|
2148 |
|
---|
2149 | /*
|
---|
2150 | * If we succeed, resume execution.
|
---|
2151 | * Or, if fail in interpreting the instruction because we couldn't get the guest physical address
|
---|
2152 | * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
|
---|
2153 | * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
|
---|
2154 | * weird case. See @bugref{6043}.
|
---|
2155 | */
|
---|
2156 | if ( rc == VINF_SUCCESS
|
---|
2157 | || rc == VERR_PAGE_TABLE_NOT_PRESENT
|
---|
2158 | || rc == VERR_PAGE_NOT_PRESENT)
|
---|
2159 | {
|
---|
2160 | Log2(("PGMR0Trap0eHandlerNPMisconfig(,,,%RGp) at %RGv -> resume\n", GCPhysFault, (RTGCPTR)pCtx->rip));
|
---|
2161 | goto ResumeExecution;
|
---|
2162 | }
|
---|
2163 | Log2(("PGMR0Trap0eHandlerNPMisconfig(,,,%RGp) at %RGv -> resume\n", GCPhysFault, (RTGCPTR)pCtx->rip));
|
---|
2164 | break;
|
---|
2165 | }
|
---|
2166 |
|
---|
2167 | /* Exit qualification contains the linear address of the page fault. */
|
---|
2168 | TRPMAssertTrap(pVCpu, X86_XCPT_PF, TRPM_TRAP);
|
---|
2169 | TRPMSetErrorCode(pVCpu, errCode);
|
---|
2170 | TRPMSetFaultAddress(pVCpu, GCPhysFault);
|
---|
2171 |
|
---|
2172 | rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmShwPagingMode, errCode, CPUMCTX2CORE(pCtx), GCPhysFault);
|
---|
2173 | Log2(("PGMR0Trap0eHandlerNestedPaging %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, VBOXSTRICTRC_VAL(rc)));
|
---|
2174 |
|
---|
2175 | /*
|
---|
2176 | * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
|
---|
2177 | */
|
---|
2178 | if ( rc == VINF_SUCCESS
|
---|
2179 | || rc == VERR_PAGE_TABLE_NOT_PRESENT
|
---|
2180 | || rc == VERR_PAGE_NOT_PRESENT)
|
---|
2181 | {
|
---|
2182 | /* We've successfully synced our shadow pages, so let's just continue execution. */
|
---|
2183 | Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
|
---|
2184 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
|
---|
2185 |
|
---|
2186 | TRPMResetTrap(pVCpu);
|
---|
2187 | goto ResumeExecution;
|
---|
2188 | }
|
---|
2189 |
|
---|
2190 | #ifdef VBOX_STRICT
|
---|
2191 | if (rc != VINF_EM_RAW_EMULATE_INSTR)
|
---|
2192 | LogFlow(("PGMTrap0eHandlerNestedPaging failed with %d\n", VBOXSTRICTRC_VAL(rc)));
|
---|
2193 | #endif
|
---|
2194 | /* Need to go back to the recompiler to emulate the instruction. */
|
---|
2195 | TRPMResetTrap(pVCpu);
|
---|
2196 | break;
|
---|
2197 | }
|
---|
2198 |
|
---|
2199 | case SVM_EXIT_VINTR:
|
---|
2200 | /* A virtual interrupt is about to be delivered, which means IF=1. */
|
---|
2201 | Log(("SVM_EXIT_VINTR IF=%d\n", pCtx->eflags.Bits.u1IF));
|
---|
2202 | pvVMCB->ctrl.IntCtrl.n.u1VIrqValid = 0;
|
---|
2203 | pvVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0;
|
---|
2204 | goto ResumeExecution;
|
---|
2205 |
|
---|
2206 | case SVM_EXIT_FERR_FREEZE:
|
---|
2207 | case SVM_EXIT_INTR:
|
---|
2208 | case SVM_EXIT_NMI:
|
---|
2209 | case SVM_EXIT_SMI:
|
---|
2210 | case SVM_EXIT_INIT:
|
---|
2211 | /* External interrupt; leave to allow it to be dispatched again. */
|
---|
2212 | rc = VINF_EM_RAW_INTERRUPT;
|
---|
2213 | break;
|
---|
2214 |
|
---|
2215 | case SVM_EXIT_WBINVD:
|
---|
2216 | case SVM_EXIT_INVD: /* Guest software attempted to execute INVD. */
|
---|
2217 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
|
---|
2218 | /* Skip instruction and continue directly. */
|
---|
2219 | pCtx->rip += 2; /* Note! hardcoded opcode size! */
|
---|
2220 | /* Continue execution.*/
|
---|
2221 | goto ResumeExecution;
|
---|
2222 |
|
---|
2223 | case SVM_EXIT_CPUID: /* Guest software attempted to execute CPUID. */
|
---|
2224 | {
|
---|
2225 | Log2(("SVM: Cpuid at %RGv for %x\n", (RTGCPTR)pCtx->rip, pCtx->eax));
|
---|
2226 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
|
---|
2227 | rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
|
---|
2228 | if (rc == VINF_SUCCESS)
|
---|
2229 | {
|
---|
2230 | /* Update EIP and continue execution. */
|
---|
2231 | pCtx->rip += 2; /* Note! hardcoded opcode size! */
|
---|
2232 | goto ResumeExecution;
|
---|
2233 | }
|
---|
2234 | AssertMsgFailed(("EMU: cpuid failed with %Rrc\n", VBOXSTRICTRC_VAL(rc)));
|
---|
2235 | rc = VINF_EM_RAW_EMULATE_INSTR;
|
---|
2236 | break;
|
---|
2237 | }
|
---|
2238 |
|
---|
2239 | case SVM_EXIT_RDTSC: /* Guest software attempted to execute RDTSC. */
|
---|
2240 | {
|
---|
2241 | Log2(("SVM: Rdtsc\n"));
|
---|
2242 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
|
---|
2243 | rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
|
---|
2244 | if (rc == VINF_SUCCESS)
|
---|
2245 | {
|
---|
2246 | /* Update EIP and continue execution. */
|
---|
2247 | pCtx->rip += 2; /* Note! hardcoded opcode size! */
|
---|
2248 | goto ResumeExecution;
|
---|
2249 | }
|
---|
2250 | rc = VINF_EM_RAW_EMULATE_INSTR;
|
---|
2251 | break;
|
---|
2252 | }
|
---|
2253 |
|
---|
2254 | case SVM_EXIT_RDPMC: /* Guest software attempted to execute RDPMC. */
|
---|
2255 | {
|
---|
2256 | Log2(("SVM: Rdpmc %x\n", pCtx->ecx));
|
---|
2257 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
|
---|
2258 | rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
|
---|
2259 | if (rc == VINF_SUCCESS)
|
---|
2260 | {
|
---|
2261 | /* Update EIP and continue execution. */
|
---|
2262 | pCtx->rip += 2; /* Note! hardcoded opcode size! */
|
---|
2263 | goto ResumeExecution;
|
---|
2264 | }
|
---|
2265 | rc = VINF_EM_RAW_EMULATE_INSTR;
|
---|
2266 | break;
|
---|
2267 | }
|
---|
2268 |
|
---|
2269 | case SVM_EXIT_RDTSCP: /* Guest software attempted to execute RDTSCP. */
|
---|
2270 | {
|
---|
2271 | Log2(("SVM: Rdtscp\n"));
|
---|
2272 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
|
---|
2273 | rc = EMInterpretRdtscp(pVM, pVCpu, pCtx);
|
---|
2274 | if (rc == VINF_SUCCESS)
|
---|
2275 | {
|
---|
2276 | /* Update EIP and continue execution. */
|
---|
2277 | pCtx->rip += 3; /* Note! hardcoded opcode size! */
|
---|
2278 | goto ResumeExecution;
|
---|
2279 | }
|
---|
2280 | AssertMsgFailed(("EMU: rdtscp failed with %Rrc\n", VBOXSTRICTRC_VAL(rc)));
|
---|
2281 | rc = VINF_EM_RAW_EMULATE_INSTR;
|
---|
2282 | break;
|
---|
2283 | }
|
---|
2284 |
|
---|
2285 | case SVM_EXIT_INVLPG: /* Guest software attempted to execute INVLPG. */
|
---|
2286 | {
|
---|
2287 | Log2(("SVM: invlpg\n"));
|
---|
2288 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
|
---|
2289 |
|
---|
2290 | Assert(!pVM->hm.s.fNestedPaging);
|
---|
2291 |
|
---|
2292 | /* Truly a pita. Why can't SVM give the same information as VT-x? */
|
---|
2293 | rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx));
|
---|
2294 | if (rc == VINF_SUCCESS)
|
---|
2295 | {
|
---|
2296 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageInvlpg);
|
---|
2297 | goto ResumeExecution; /* eip already updated */
|
---|
2298 | }
|
---|
2299 | break;
|
---|
2300 | }
|
---|
2301 |
|
---|
2302 | case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
|
---|
2303 | case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
|
---|
2304 | case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
|
---|
2305 | case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
|
---|
2306 | {
|
---|
2307 | Log2(("SVM: %RGv mov cr%d, \n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_CR0));
|
---|
2308 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]);
|
---|
2309 | rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
|
---|
2310 |
|
---|
2311 | switch (exitCode - SVM_EXIT_WRITE_CR0)
|
---|
2312 | {
|
---|
2313 | case 0:
|
---|
2314 | pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
|
---|
2315 | break;
|
---|
2316 | case 2:
|
---|
2317 | break;
|
---|
2318 | case 3:
|
---|
2319 | Assert(!pVM->hm.s.fNestedPaging);
|
---|
2320 | pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
|
---|
2321 | break;
|
---|
2322 | case 4:
|
---|
2323 | pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
|
---|
2324 | break;
|
---|
2325 | case 8:
|
---|
2326 | break;
|
---|
2327 | default:
|
---|
2328 | AssertFailed();
|
---|
2329 | }
|
---|
2330 | if (rc == VINF_SUCCESS)
|
---|
2331 | {
|
---|
2332 | /* EIP has been updated already. */
|
---|
2333 | /* Only resume if successful. */
|
---|
2334 | goto ResumeExecution;
|
---|
2335 | }
|
---|
2336 | Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
|
---|
2337 | break;
|
---|
2338 | }
|
---|
2339 |
|
---|
2340 | case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
|
---|
2341 | case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
|
---|
2342 | case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
|
---|
2343 | case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
|
---|
2344 | {
|
---|
2345 | Log2(("SVM: %RGv mov x, cr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_CR0));
|
---|
2346 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]);
|
---|
2347 | rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
|
---|
2348 | if (rc == VINF_SUCCESS)
|
---|
2349 | {
|
---|
2350 | /* EIP has been updated already. */
|
---|
2351 | /* Only resume if successful. */
|
---|
2352 | goto ResumeExecution;
|
---|
2353 | }
|
---|
2354 | Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
|
---|
2355 | break;
|
---|
2356 | }
|
---|
2357 |
|
---|
2358 | case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
|
---|
2359 | case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
|
---|
2360 | case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
|
---|
2361 | case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
|
---|
2362 | {
|
---|
2363 | Log2(("SVM: %RGv mov dr%d, x\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_DR0));
|
---|
2364 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
|
---|
2365 |
|
---|
2366 | if ( !DBGFIsStepping(pVCpu)
|
---|
2367 | && !CPUMIsHyperDebugStateActive(pVCpu))
|
---|
2368 | {
|
---|
2369 | STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
|
---|
2370 |
|
---|
2371 | /* Disable drx move intercepts. */
|
---|
2372 | pvVMCB->ctrl.u16InterceptRdDRx = 0;
|
---|
2373 | pvVMCB->ctrl.u16InterceptWrDRx = 0;
|
---|
2374 |
|
---|
2375 | /* Save the host and load the guest debug state. */
|
---|
2376 | rc2 = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, false /* exclude DR6 */);
|
---|
2377 | AssertRC(rc2);
|
---|
2378 | goto ResumeExecution;
|
---|
2379 | }
|
---|
2380 |
|
---|
2381 | rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
|
---|
2382 | if (rc == VINF_SUCCESS)
|
---|
2383 | {
|
---|
2384 | /* EIP has been updated already. */
|
---|
2385 | pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
|
---|
2386 |
|
---|
2387 | /* Only resume if successful. */
|
---|
2388 | goto ResumeExecution;
|
---|
2389 | }
|
---|
2390 | Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
|
---|
2391 | break;
|
---|
2392 | }
|
---|
2393 |
|
---|
2394 | case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
|
---|
2395 | case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
|
---|
2396 | case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
|
---|
2397 | case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
|
---|
2398 | {
|
---|
2399 | Log2(("SVM: %RGv mov x, dr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_DR0));
|
---|
2400 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
|
---|
2401 |
|
---|
2402 | if (!DBGFIsStepping(pVCpu))
|
---|
2403 | {
|
---|
2404 | STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
|
---|
2405 |
|
---|
2406 | /* Disable DRx move intercepts. */
|
---|
2407 | pvVMCB->ctrl.u16InterceptRdDRx = 0;
|
---|
2408 | pvVMCB->ctrl.u16InterceptWrDRx = 0;
|
---|
2409 |
|
---|
2410 | /* Save the host and load the guest debug state. */
|
---|
2411 | rc2 = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, false /* exclude DR6 */);
|
---|
2412 | AssertRC(rc2);
|
---|
2413 | goto ResumeExecution;
|
---|
2414 | }
|
---|
2415 |
|
---|
2416 | rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
|
---|
2417 | if (rc == VINF_SUCCESS)
|
---|
2418 | {
|
---|
2419 | /* EIP has been updated already. */
|
---|
2420 | /* Only resume if successful. */
|
---|
2421 | goto ResumeExecution;
|
---|
2422 | }
|
---|
2423 | Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
|
---|
2424 | break;
|
---|
2425 | }
|
---|
2426 |
|
---|
2427 | /* Note: We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
|
---|
2428 | case SVM_EXIT_IOIO: /* I/O instruction. */
|
---|
2429 | {
|
---|
2430 | SVM_IOIO_EXIT IoExitInfo;
|
---|
2431 |
|
---|
2432 | IoExitInfo.au32[0] = pvVMCB->ctrl.u64ExitInfo1;
|
---|
2433 | unsigned uIdx = (IoExitInfo.au32[0] >> 4) & 0x7;
|
---|
2434 | uint32_t uIOSize = g_aIOSize[uIdx];
|
---|
2435 | uint32_t uAndVal = g_aIOOpAnd[uIdx];
|
---|
2436 | if (RT_UNLIKELY(!uIOSize))
|
---|
2437 | {
|
---|
2438 | AssertFailed(); /* should be fatal. */
|
---|
2439 | rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo r=ramshankar: would this really fall back to the recompiler and work? */
|
---|
2440 | break;
|
---|
2441 | }
|
---|
2442 |
|
---|
2443 | if (IoExitInfo.n.u1STR)
|
---|
2444 | {
|
---|
2445 | /* ins/outs */
|
---|
2446 | PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
|
---|
2447 |
|
---|
2448 | /* Disassemble manually to deal with segment prefixes. */
|
---|
2449 | rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
|
---|
2450 | if (rc == VINF_SUCCESS)
|
---|
2451 | {
|
---|
2452 | if (IoExitInfo.n.u1Type == 0)
|
---|
2453 | {
|
---|
2454 | Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
|
---|
2455 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
|
---|
2456 | rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
|
---|
2457 | (DISCPUMODE)pDis->uAddrMode, uIOSize);
|
---|
2458 | }
|
---|
2459 | else
|
---|
2460 | {
|
---|
2461 | Log2(("IOMInterpretINSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
|
---|
2462 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
|
---|
2463 | rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
|
---|
2464 | (DISCPUMODE)pDis->uAddrMode, uIOSize);
|
---|
2465 | }
|
---|
2466 | }
|
---|
2467 | else
|
---|
2468 | rc = VINF_EM_RAW_EMULATE_INSTR;
|
---|
2469 | }
|
---|
2470 | else
|
---|
2471 | {
|
---|
2472 | /* Normal in/out */
|
---|
2473 | Assert(!IoExitInfo.n.u1REP);
|
---|
2474 |
|
---|
2475 | if (IoExitInfo.n.u1Type == 0)
|
---|
2476 | {
|
---|
2477 | Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal,
|
---|
2478 | uIOSize));
|
---|
2479 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
|
---|
2480 | rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
|
---|
2481 | if (rc == VINF_IOM_R3_IOPORT_WRITE)
|
---|
2482 | {
|
---|
2483 | HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pvVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
|
---|
2484 | uAndVal, uIOSize);
|
---|
2485 | }
|
---|
2486 | }
|
---|
2487 | else
|
---|
2488 | {
|
---|
2489 | uint32_t u32Val = 0;
|
---|
2490 |
|
---|
2491 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
|
---|
2492 | rc = IOMIOPortRead(pVM, IoExitInfo.n.u16Port, &u32Val, uIOSize);
|
---|
2493 | if (IOM_SUCCESS(rc))
|
---|
2494 | {
|
---|
2495 | /* Write back to the EAX register. */
|
---|
2496 | pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
|
---|
2497 | Log2(("IOMIOPortRead %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal,
|
---|
2498 | uIOSize));
|
---|
2499 | }
|
---|
2500 | else if (rc == VINF_IOM_R3_IOPORT_READ)
|
---|
2501 | {
|
---|
2502 | HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pvVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
|
---|
2503 | uAndVal, uIOSize);
|
---|
2504 | }
|
---|
2505 | }
|
---|
2506 | }
|
---|
2507 |
|
---|
2508 | /*
|
---|
2509 | * Handled the I/O return codes.
|
---|
2510 | * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
|
---|
2511 | */
|
---|
2512 | if (IOM_SUCCESS(rc))
|
---|
2513 | {
|
---|
2514 | /* Update EIP and continue execution. */
|
---|
2515 | pCtx->rip = pvVMCB->ctrl.u64ExitInfo2; /* RIP/EIP of the next instruction is saved in EXITINFO2. */
|
---|
2516 | if (RT_LIKELY(rc == VINF_SUCCESS))
|
---|
2517 | {
|
---|
2518 | /* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
|
---|
2519 | if (pCtx->dr[7] & X86_DR7_ENABLED_MASK)
|
---|
2520 | {
|
---|
2521 | /* IO operation lookup arrays. */
|
---|
2522 | static uint32_t const aIOSize[4] = { 1, 2, 0, 4 };
|
---|
2523 |
|
---|
2524 | STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIOCheck);
|
---|
2525 | for (unsigned i = 0; i < 4; i++)
|
---|
2526 | {
|
---|
2527 | unsigned uBPLen = aIOSize[X86_DR7_GET_LEN(pCtx->dr[7], i)];
|
---|
2528 |
|
---|
2529 | if ( (IoExitInfo.n.u16Port >= pCtx->dr[i] && IoExitInfo.n.u16Port < pCtx->dr[i] + uBPLen)
|
---|
2530 | && (pCtx->dr[7] & (X86_DR7_L(i) | X86_DR7_G(i)))
|
---|
2531 | && (pCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO))
|
---|
2532 | {
|
---|
2533 | SVM_EVENT Event;
|
---|
2534 |
|
---|
2535 | Assert(CPUMIsGuestDebugStateActive(pVCpu));
|
---|
2536 |
|
---|
2537 | /* Clear all breakpoint status flags and set the one we just hit. */
|
---|
2538 | pCtx->dr[6] &= ~(X86_DR6_B0|X86_DR6_B1|X86_DR6_B2|X86_DR6_B3);
|
---|
2539 | pCtx->dr[6] |= (uint64_t)RT_BIT(i);
|
---|
2540 |
|
---|
2541 | /*
|
---|
2542 | * Note: AMD64 Architecture Programmer's Manual 13.1:
|
---|
2543 | * Bits 15:13 of the DR6 register is never cleared by the processor and must be cleared
|
---|
2544 | * by software after the contents have been read.
|
---|
2545 | */
|
---|
2546 | pvVMCB->guest.u64DR6 = pCtx->dr[6];
|
---|
2547 |
|
---|
2548 | /* X86_DR7_GD will be cleared if drx accesses should be trapped inside the guest. */
|
---|
2549 | pCtx->dr[7] &= ~X86_DR7_GD;
|
---|
2550 |
|
---|
2551 | /* Paranoia. */
|
---|
2552 | pCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
|
---|
2553 | pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
|
---|
2554 | pCtx->dr[7] |= 0x400; /* must be one */
|
---|
2555 |
|
---|
2556 | pvVMCB->guest.u64DR7 = pCtx->dr[7];
|
---|
2557 |
|
---|
2558 | /* Inject the exception. */
|
---|
2559 | Log(("Inject IO debug trap at %RGv\n", (RTGCPTR)pCtx->rip));
|
---|
2560 |
|
---|
2561 | Event.au64[0] = 0;
|
---|
2562 | Event.n.u3Type = SVM_EVENT_EXCEPTION; /* trap or fault */
|
---|
2563 | Event.n.u1Valid = 1;
|
---|
2564 | Event.n.u8Vector = X86_XCPT_DB;
|
---|
2565 |
|
---|
2566 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
2567 | goto ResumeExecution;
|
---|
2568 | }
|
---|
2569 | }
|
---|
2570 | }
|
---|
2571 | goto ResumeExecution;
|
---|
2572 | }
|
---|
2573 | Log2(("EM status from IO at %RGv %x size %d: %Rrc\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize,
|
---|
2574 | VBOXSTRICTRC_VAL(rc)));
|
---|
2575 | break;
|
---|
2576 | }
|
---|
2577 |
|
---|
2578 | #ifdef VBOX_STRICT
|
---|
2579 | if (rc == VINF_IOM_R3_IOPORT_READ)
|
---|
2580 | Assert(IoExitInfo.n.u1Type != 0);
|
---|
2581 | else if (rc == VINF_IOM_R3_IOPORT_WRITE)
|
---|
2582 | Assert(IoExitInfo.n.u1Type == 0);
|
---|
2583 | else
|
---|
2584 | {
|
---|
2585 | AssertMsg( RT_FAILURE(rc)
|
---|
2586 | || rc == VINF_EM_RAW_EMULATE_INSTR
|
---|
2587 | || rc == VINF_EM_RAW_GUEST_TRAP
|
---|
2588 | || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rc)));
|
---|
2589 | }
|
---|
2590 | #endif
|
---|
2591 | Log2(("Failed IO at %RGv %x size %d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
|
---|
2592 | break;
|
---|
2593 | }
|
---|
2594 |
|
---|
2595 | case SVM_EXIT_HLT:
|
---|
2596 | /* Check if external interrupts are pending; if so, don't switch back. */
|
---|
2597 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
|
---|
2598 | pCtx->rip++; /* skip hlt */
|
---|
2599 | if (EMShouldContinueAfterHalt(pVCpu, pCtx))
|
---|
2600 | goto ResumeExecution;
|
---|
2601 |
|
---|
2602 | rc = VINF_EM_HALT;
|
---|
2603 | break;
|
---|
2604 |
|
---|
2605 | case SVM_EXIT_MWAIT_UNCOND:
|
---|
2606 | Log2(("SVM: mwait\n"));
|
---|
2607 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
|
---|
2608 | rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx));
|
---|
2609 | if ( rc == VINF_EM_HALT
|
---|
2610 | || rc == VINF_SUCCESS)
|
---|
2611 | {
|
---|
2612 | /* Update EIP and continue execution. */
|
---|
2613 | pCtx->rip += 3; /* Note: hardcoded opcode size assumption! */
|
---|
2614 |
|
---|
2615 | /* Check if external interrupts are pending; if so, don't switch back. */
|
---|
2616 | if ( rc == VINF_SUCCESS
|
---|
2617 | || ( rc == VINF_EM_HALT
|
---|
2618 | && EMShouldContinueAfterHalt(pVCpu, pCtx))
|
---|
2619 | )
|
---|
2620 | goto ResumeExecution;
|
---|
2621 | }
|
---|
2622 | AssertMsg(rc == VERR_EM_INTERPRETER || rc == VINF_EM_HALT, ("EMU: mwait failed with %Rrc\n", VBOXSTRICTRC_VAL(rc)));
|
---|
2623 | break;
|
---|
2624 |
|
---|
2625 | case SVM_EXIT_MONITOR:
|
---|
2626 | {
|
---|
2627 | Log2(("SVM: monitor\n"));
|
---|
2628 |
|
---|
2629 | STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
|
---|
2630 | rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx));
|
---|
2631 | if (rc == VINF_SUCCESS)
|
---|
2632 | {
|
---|
2633 | /* Update EIP and continue execution. */
|
---|
2634 | pCtx->rip += 3; /* Note: hardcoded opcode size assumption! */
|
---|
2635 | goto ResumeExecution;
|
---|
2636 | }
|
---|
2637 | AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: monitor failed with %Rrc\n", VBOXSTRICTRC_VAL(rc)));
|
---|
2638 | break;
|
---|
2639 | }
|
---|
2640 |
|
---|
2641 | case SVM_EXIT_VMMCALL:
|
---|
2642 | rc = hmR0SvmEmulateTprVMMCall(pVM, pVCpu, pCtx);
|
---|
2643 | if (rc == VINF_SUCCESS)
|
---|
2644 | {
|
---|
2645 | goto ResumeExecution; /* rip already updated. */
|
---|
2646 | }
|
---|
2647 | /* no break */
|
---|
2648 |
|
---|
2649 | case SVM_EXIT_RSM:
|
---|
2650 | case SVM_EXIT_INVLPGA:
|
---|
2651 | case SVM_EXIT_VMRUN:
|
---|
2652 | case SVM_EXIT_VMLOAD:
|
---|
2653 | case SVM_EXIT_VMSAVE:
|
---|
2654 | case SVM_EXIT_STGI:
|
---|
2655 | case SVM_EXIT_CLGI:
|
---|
2656 | case SVM_EXIT_SKINIT:
|
---|
2657 | {
|
---|
2658 | /* Unsupported instructions. */
|
---|
2659 | SVM_EVENT Event;
|
---|
2660 |
|
---|
2661 | Event.au64[0] = 0;
|
---|
2662 | Event.n.u3Type = SVM_EVENT_EXCEPTION;
|
---|
2663 | Event.n.u1Valid = 1;
|
---|
2664 | Event.n.u8Vector = X86_XCPT_UD;
|
---|
2665 |
|
---|
2666 | Log(("Forced #UD trap at %RGv\n", (RTGCPTR)pCtx->rip));
|
---|
2667 | hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event);
|
---|
2668 | goto ResumeExecution;
|
---|
2669 | }
|
---|
2670 |
|
---|
2671 | /* Emulate in ring-3. */
|
---|
2672 | case SVM_EXIT_MSR:
|
---|
2673 | {
|
---|
2674 | /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */
|
---|
2675 | if ( pVM->hm.s.fTPRPatchingActive
|
---|
2676 | && pCtx->ecx == MSR_K8_LSTAR
|
---|
2677 | && pvVMCB->ctrl.u64ExitInfo1 == 1 /* wrmsr */)
|
---|
2678 | {
|
---|
2679 | if ((pCtx->eax & 0xff) != u8LastTPR)
|
---|
2680 | {
|
---|
2681 | Log(("SVM: Faulting MSR_K8_LSTAR write with new TPR value %x\n", pCtx->eax & 0xff));
|
---|
2682 |
|
---|
2683 | /* Our patch code uses LSTAR for TPR caching. */
|
---|
2684 | rc2 = PDMApicSetTPR(pVCpu, pCtx->eax & 0xff);
|
---|
2685 | AssertRC(rc2);
|
---|
2686 | }
|
---|
2687 |
|
---|
2688 | /* Skip the instruction and continue. */
|
---|
2689 | pCtx->rip += 2; /* wrmsr = [0F 30] */
|
---|
2690 |
|
---|
2691 | /* Only resume if successful. */
|
---|
2692 | goto ResumeExecution;
|
---|
2693 | }
|
---|
2694 |
|
---|
2695 | /*
|
---|
2696 | * The Intel spec. claims there's an REX version of RDMSR that's slightly different,
|
---|
2697 | * so we play safe by completely disassembling the instruction.
|
---|
2698 | */
|
---|
2699 | STAM_COUNTER_INC((pvVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr);
|
---|
2700 | Log(("SVM: %s\n", (pvVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr"));
|
---|
2701 | rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
|
---|
2702 | if (rc == VINF_SUCCESS)
|
---|
2703 | {
|
---|
2704 | /* EIP has been updated already. */
|
---|
2705 | /* Only resume if successful. */
|
---|
2706 | goto ResumeExecution;
|
---|
2707 | }
|
---|
2708 | AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", (pvVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr",
|
---|
2709 | VBOXSTRICTRC_VAL(rc)));
|
---|
2710 | break;
|
---|
2711 | }
|
---|
2712 |
|
---|
2713 | case SVM_EXIT_TASK_SWITCH: /* too complicated to emulate, so fall back to the recompiler */
|
---|
2714 | Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", pvVMCB->ctrl.u64ExitInfo2));
|
---|
2715 | if ( !(pvVMCB->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP))
|
---|
2716 | && pVCpu->hm.s.Event.fPending)
|
---|
2717 | {
|
---|
2718 | SVM_EVENT Event;
|
---|
2719 | Event.au64[0] = pVCpu->hm.s.Event.intInfo;
|
---|
2720 |
|
---|
2721 | /* Caused by an injected interrupt. */
|
---|
2722 | pVCpu->hm.s.Event.fPending = false;
|
---|
2723 | switch (Event.n.u3Type)
|
---|
2724 | {
|
---|
2725 | case SVM_EVENT_EXTERNAL_IRQ:
|
---|
2726 | case SVM_EVENT_NMI:
|
---|
2727 | Log(("SVM_EXIT_TASK_SWITCH: reassert trap %d\n", Event.n.u8Vector));
|
---|
2728 | Assert(!Event.n.u1ErrorCodeValid);
|
---|
2729 | rc2 = TRPMAssertTrap(pVCpu, Event.n.u8Vector, TRPM_HARDWARE_INT);
|
---|
2730 | AssertRC(rc2);
|
---|
2731 | break;
|
---|
2732 |
|
---|
2733 | default:
|
---|
2734 | /* Exceptions and software interrupts can just be restarted. */
|
---|
2735 | break;
|
---|
2736 | }
|
---|
2737 | }
|
---|
2738 | rc = VERR_EM_INTERPRETER;
|
---|
2739 | break;
|
---|
2740 |
|
---|
2741 | case SVM_EXIT_PAUSE:
|
---|
2742 | case SVM_EXIT_MWAIT_ARMED:
|
---|
2743 | rc = VERR_EM_INTERPRETER;
|
---|
2744 | break;
|
---|
2745 |
|
---|
2746 | case SVM_EXIT_SHUTDOWN:
|
---|
2747 | rc = VINF_EM_RESET; /* Triple fault equals a reset. */
|
---|
2748 | break;
|
---|
2749 |
|
---|
2750 | case SVM_EXIT_IDTR_READ:
|
---|
2751 | case SVM_EXIT_GDTR_READ:
|
---|
2752 | case SVM_EXIT_LDTR_READ:
|
---|
2753 | case SVM_EXIT_TR_READ:
|
---|
2754 | case SVM_EXIT_IDTR_WRITE:
|
---|
2755 | case SVM_EXIT_GDTR_WRITE:
|
---|
2756 | case SVM_EXIT_LDTR_WRITE:
|
---|
2757 | case SVM_EXIT_TR_WRITE:
|
---|
2758 | case SVM_EXIT_CR0_SEL_WRITE:
|
---|
2759 | default:
|
---|
2760 | /* Unexpected exit codes. */
|
---|
2761 | rc = VERR_HMSVM_UNEXPECTED_EXIT;
|
---|
2762 | AssertMsgFailed(("Unexpected exit code %x\n", exitCode)); /* Can't happen. */
|
---|
2763 | break;
|
---|
2764 | }
|
---|
2765 |
|
---|
2766 | end:
|
---|
2767 |
|
---|
2768 | /*
|
---|
2769 | * We are now going back to ring-3, so clear the forced action flag.
|
---|
2770 | */
|
---|
2771 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
|
---|
2772 |
|
---|
2773 | /*
|
---|
2774 | * Signal changes to the recompiler.
|
---|
2775 | */
|
---|
2776 | CPUMSetChangedFlags(pVCpu,
|
---|
2777 | CPUM_CHANGED_SYSENTER_MSR
|
---|
2778 | | CPUM_CHANGED_LDTR
|
---|
2779 | | CPUM_CHANGED_GDTR
|
---|
2780 | | CPUM_CHANGED_IDTR
|
---|
2781 | | CPUM_CHANGED_TR
|
---|
2782 | | CPUM_CHANGED_HIDDEN_SEL_REGS);
|
---|
2783 |
|
---|
2784 | /*
|
---|
2785 | * If we executed vmrun and an external IRQ was pending, then we don't have to do a full sync the next time.
|
---|
2786 | */
|
---|
2787 | if (exitCode == SVM_EXIT_INTR)
|
---|
2788 | {
|
---|
2789 | STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
|
---|
2790 | /* On the next entry we'll only sync the host context. */
|
---|
2791 | pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
|
---|
2792 | }
|
---|
2793 | else
|
---|
2794 | {
|
---|
2795 | /* On the next entry we'll sync everything. */
|
---|
2796 | /** @todo we can do better than this */
|
---|
2797 | /* Not in the VINF_PGM_CHANGE_MODE though! */
|
---|
2798 | pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL;
|
---|
2799 | }
|
---|
2800 |
|
---|
2801 | /* Translate into a less severe return code */
|
---|
2802 | if (rc == VERR_EM_INTERPRETER)
|
---|
2803 | rc = VINF_EM_RAW_EMULATE_INSTR;
|
---|
2804 |
|
---|
2805 | /* Just set the correct state here instead of trying to catch every goto above. */
|
---|
2806 | VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC);
|
---|
2807 |
|
---|
2808 | #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
|
---|
2809 | /* Restore interrupts if we exitted after disabling them. */
|
---|
2810 | if (uOldEFlags != ~(RTCCUINTREG)0)
|
---|
2811 | ASMSetFlags(uOldEFlags);
|
---|
2812 | #endif
|
---|
2813 |
|
---|
2814 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
|
---|
2815 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
|
---|
2816 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
|
---|
2817 | return VBOXSTRICTRC_TODO(rc);
|
---|
2818 | }
|
---|
2819 |
|
---|
2820 |
|
---|
2821 | /**
|
---|
2822 | * Emulate simple mov tpr instruction.
|
---|
2823 | *
|
---|
2824 | * @returns VBox status code.
|
---|
2825 | * @param pVM Pointer to the VM.
|
---|
2826 | * @param pVCpu Pointer to the VMCPU.
|
---|
2827 | * @param pCtx Pointer to the guest CPU context.
|
---|
2828 | */
|
---|
2829 | static int hmR0SvmEmulateTprVMMCall(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
2830 | {
|
---|
2831 | int rc;
|
---|
2832 |
|
---|
2833 | LogFlow(("Emulated VMMCall TPR access replacement at %RGv\n", pCtx->rip));
|
---|
2834 |
|
---|
2835 | for (;;)
|
---|
2836 | {
|
---|
2837 | bool fPending;
|
---|
2838 | uint8_t u8Tpr;
|
---|
2839 |
|
---|
2840 | PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
|
---|
2841 | if (!pPatch)
|
---|
2842 | break;
|
---|
2843 |
|
---|
2844 | switch (pPatch->enmType)
|
---|
2845 | {
|
---|
2846 | case HMTPRINSTR_READ:
|
---|
2847 | /* TPR caching in CR8 */
|
---|
2848 | rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending);
|
---|
2849 | AssertRC(rc);
|
---|
2850 |
|
---|
2851 | rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
|
---|
2852 | AssertRC(rc);
|
---|
2853 |
|
---|
2854 | LogFlow(("Emulated read successfully\n"));
|
---|
2855 | pCtx->rip += pPatch->cbOp;
|
---|
2856 | break;
|
---|
2857 |
|
---|
2858 | case HMTPRINSTR_WRITE_REG:
|
---|
2859 | case HMTPRINSTR_WRITE_IMM:
|
---|
2860 | /* Fetch the new TPR value */
|
---|
2861 | if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
|
---|
2862 | {
|
---|
2863 | uint32_t val;
|
---|
2864 |
|
---|
2865 | rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &val);
|
---|
2866 | AssertRC(rc);
|
---|
2867 | u8Tpr = val;
|
---|
2868 | }
|
---|
2869 | else
|
---|
2870 | u8Tpr = (uint8_t)pPatch->uSrcOperand;
|
---|
2871 |
|
---|
2872 | rc = PDMApicSetTPR(pVCpu, u8Tpr);
|
---|
2873 | AssertRC(rc);
|
---|
2874 | LogFlow(("Emulated write successfully\n"));
|
---|
2875 | pCtx->rip += pPatch->cbOp;
|
---|
2876 | break;
|
---|
2877 |
|
---|
2878 | default:
|
---|
2879 | AssertMsgFailedReturn(("Unexpected type %d\n", pPatch->enmType), VERR_HMSVM_UNEXPECTED_PATCH_TYPE);
|
---|
2880 | }
|
---|
2881 | }
|
---|
2882 | return VINF_SUCCESS;
|
---|
2883 | }
|
---|
2884 |
|
---|
2885 |
|
---|
2886 | /**
|
---|
2887 | * Enters the AMD-V session.
|
---|
2888 | *
|
---|
2889 | * @returns VBox status code.
|
---|
2890 | * @param pVM Pointer to the VM.
|
---|
2891 | * @param pVCpu Pointer to the VMCPU.
|
---|
2892 | * @param pCpu Pointer to the CPU info struct.
|
---|
2893 | */
|
---|
2894 | VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
|
---|
2895 | {
|
---|
2896 | Assert(pVM->hm.s.svm.fSupported);
|
---|
2897 |
|
---|
2898 | LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentAsid));
|
---|
2899 | pVCpu->hm.s.fResumeVM = false;
|
---|
2900 |
|
---|
2901 | /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */
|
---|
2902 | pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_LDTR;
|
---|
2903 |
|
---|
2904 | return VINF_SUCCESS;
|
---|
2905 | }
|
---|
2906 |
|
---|
2907 |
|
---|
2908 | /**
|
---|
2909 | * Leaves the AMD-V session.
|
---|
2910 | *
|
---|
2911 | * @returns VBox status code.
|
---|
2912 | * @param pVM Pointer to the VM.
|
---|
2913 | * @param pVCpu Pointer to the VMCPU.
|
---|
2914 | * @param pCtx Pointer to the guest CPU context.
|
---|
2915 | */
|
---|
2916 | VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
|
---|
2917 | {
|
---|
2918 | SVM_VMCB *pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;
|
---|
2919 |
|
---|
2920 | Assert(pVM->hm.s.svm.fSupported);
|
---|
2921 |
|
---|
2922 | #ifdef DEBUG
|
---|
2923 | if (CPUMIsHyperDebugStateActive(pVCpu))
|
---|
2924 | {
|
---|
2925 | CPUMR0LoadHostDebugState(pVM, pVCpu);
|
---|
2926 | }
|
---|
2927 | else
|
---|
2928 | #endif
|
---|
2929 | /* Save the guest debug state if necessary. */
|
---|
2930 | if (CPUMIsGuestDebugStateActive(pVCpu))
|
---|
2931 | {
|
---|
2932 | CPUMR0SaveGuestDebugState(pVM, pVCpu, pCtx, false /* skip DR6 */);
|
---|
2933 |
|
---|
2934 | /* Intercept all DRx reads and writes again. Changed later on. */
|
---|
2935 | pvVMCB->ctrl.u16InterceptRdDRx = 0xFFFF;
|
---|
2936 | pvVMCB->ctrl.u16InterceptWrDRx = 0xFFFF;
|
---|
2937 |
|
---|
2938 | /* Resync the debug registers the next time. */
|
---|
2939 | pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
|
---|
2940 | }
|
---|
2941 | else
|
---|
2942 | Assert(pvVMCB->ctrl.u16InterceptRdDRx == 0xFFFF && pvVMCB->ctrl.u16InterceptWrDRx == 0xFFFF);
|
---|
2943 |
|
---|
2944 | return VINF_SUCCESS;
|
---|
2945 | }
|
---|
2946 |
|
---|
2947 |
|
---|
2948 | /**
|
---|
2949 | * Worker for Interprets INVLPG.
|
---|
2950 | *
|
---|
2951 | * @return VBox status code.
|
---|
2952 | * @param pVCpu Pointer to the VMCPU.
|
---|
2953 | * @param pCpu Pointer to the CPU info struct.
|
---|
2954 | * @param pRegFrame Pointer to the register frame.
|
---|
2955 | */
|
---|
2956 | static int hmR0svmInterpretInvlPgEx(PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame)
|
---|
2957 | {
|
---|
2958 | DISQPVPARAMVAL param1;
|
---|
2959 | RTGCPTR addr;
|
---|
2960 |
|
---|
2961 | int rc = DISQueryParamVal(pRegFrame, pCpu, &pCpu->Param1, ¶m1, DISQPVWHICH_SRC);
|
---|
2962 | if (RT_FAILURE(rc))
|
---|
2963 | return VERR_EM_INTERPRETER;
|
---|
2964 |
|
---|
2965 | switch (param1.type)
|
---|
2966 | {
|
---|
2967 | case DISQPV_TYPE_IMMEDIATE:
|
---|
2968 | case DISQPV_TYPE_ADDRESS:
|
---|
2969 | if (!(param1.flags & (DISQPV_FLAG_32 | DISQPV_FLAG_64)))
|
---|
2970 | return VERR_EM_INTERPRETER;
|
---|
2971 | addr = param1.val.val64;
|
---|
2972 | break;
|
---|
2973 |
|
---|
2974 | default:
|
---|
2975 | return VERR_EM_INTERPRETER;
|
---|
2976 | }
|
---|
2977 |
|
---|
2978 | /** @todo is addr always a flat linear address or ds based
|
---|
2979 | * (in absence of segment override prefixes)????
|
---|
2980 | */
|
---|
2981 | rc = PGMInvalidatePage(pVCpu, addr);
|
---|
2982 | if (RT_SUCCESS(rc))
|
---|
2983 | return VINF_SUCCESS;
|
---|
2984 |
|
---|
2985 | AssertRC(rc);
|
---|
2986 | return rc;
|
---|
2987 | }
|
---|
2988 |
|
---|
2989 |
|
---|
2990 | /**
|
---|
2991 | * Interprets INVLPG.
|
---|
2992 | *
|
---|
2993 | * @returns VBox status code.
|
---|
2994 | * @retval VINF_* Scheduling instructions.
|
---|
2995 | * @retval VERR_EM_INTERPRETER Something we can't cope with.
|
---|
2996 | * @retval VERR_* Fatal errors.
|
---|
2997 | *
|
---|
2998 | * @param pVM Pointer to the VM.
|
---|
2999 | * @param pRegFrame Pointer to the register frame.
|
---|
3000 | *
|
---|
3001 | * @remarks Updates the EIP if an instruction was executed successfully.
|
---|
3002 | */
|
---|
3003 | static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
|
---|
3004 | {
|
---|
3005 | /*
|
---|
3006 | * Only allow 32 & 64 bit code.
|
---|
3007 | */
|
---|
3008 | if (CPUMGetGuestCodeBits(pVCpu) != 16)
|
---|
3009 | {
|
---|
3010 | PDISSTATE pDis = &pVCpu->hm.s.DisState;
|
---|
3011 | int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
|
---|
3012 | if (RT_SUCCESS(rc) && pDis->pCurInstr->uOpcode == OP_INVLPG)
|
---|
3013 | {
|
---|
3014 | rc = hmR0svmInterpretInvlPgEx(pVCpu, pDis, pRegFrame);
|
---|
3015 | if (RT_SUCCESS(rc))
|
---|
3016 | pRegFrame->rip += pDis->cbInstr; /* Move on to the next instruction. */
|
---|
3017 | return rc;
|
---|
3018 | }
|
---|
3019 | }
|
---|
3020 | return VERR_EM_INTERPRETER;
|
---|
3021 | }
|
---|
3022 |
|
---|
3023 |
|
---|
3024 | /**
|
---|
3025 | * Invalidates a guest page by guest virtual address.
|
---|
3026 | *
|
---|
3027 | * @returns VBox status code.
|
---|
3028 | * @param pVM Pointer to the VM.
|
---|
3029 | * @param pVCpu Pointer to the VMCPU.
|
---|
3030 | * @param GCVirt Guest virtual address of the page to invalidate.
|
---|
3031 | */
|
---|
3032 | VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
|
---|
3033 | {
|
---|
3034 | bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB | VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH);
|
---|
3035 |
|
---|
3036 | /* Skip it if a TLB flush is already pending. */
|
---|
3037 | if (!fFlushPending)
|
---|
3038 | {
|
---|
3039 | SVM_VMCB *pvVMCB;
|
---|
3040 |
|
---|
3041 | Log2(("SVMR0InvalidatePage %RGv\n", GCVirt));
|
---|
3042 | AssertReturn(pVM, VERR_INVALID_PARAMETER);
|
---|
3043 | Assert(pVM->hm.s.svm.fSupported);
|
---|
3044 |
|
---|
3045 | pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;
|
---|
3046 | AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
|
---|
3047 |
|
---|
3048 | #if HC_ARCH_BITS == 32
|
---|
3049 | /* If we get a flush in 64 bits guest mode, then force a full TLB flush. Invlpga takes only 32 bits addresses. */
|
---|
3050 | if (CPUMIsGuestInLongMode(pVCpu))
|
---|
3051 | VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
|
---|
3052 | else
|
---|
3053 | #endif
|
---|
3054 | SVMR0InvlpgA(GCVirt, pvVMCB->ctrl.TLBCtrl.n.u32ASID);
|
---|
3055 | }
|
---|
3056 | return VINF_SUCCESS;
|
---|
3057 | }
|
---|
3058 |
|
---|
3059 |
|
---|
3060 | #if 0 /* obsolete, but left here for clarification. */
|
---|
3061 | /**
|
---|
3062 | * Invalidates a guest page by physical address.
|
---|
3063 | *
|
---|
3064 | * @returns VBox status code.
|
---|
3065 | * @param pVM Pointer to the VM.
|
---|
3066 | * @param pVCpu Pointer to the VMCPU.
|
---|
3067 | * @param GCPhys Guest physical address of the page to invalidate.
|
---|
3068 | */
|
---|
3069 | VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
|
---|
3070 | {
|
---|
3071 | Assert(pVM->hm.s.fNestedPaging);
|
---|
3072 | /* invlpga only invalidates TLB entries for guest virtual addresses; we have no choice but to force a TLB flush here. */
|
---|
3073 | VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
|
---|
3074 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBInvlpga);
|
---|
3075 | return VINF_SUCCESS;
|
---|
3076 | }
|
---|
3077 | #endif
|
---|
3078 |
|
---|
3079 |
|
---|
3080 | #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
|
---|
3081 | /**
|
---|
3082 | * Prepares for and executes VMRUN (64-bit guests from a 32-bit host).
|
---|
3083 | *
|
---|
3084 | * @returns VBox status code.
|
---|
3085 | * @param HCPhysVMCBHost Physical address of host VMCB.
|
---|
3086 | * @param HCPhysVMCB Physical address of the VMCB.
|
---|
3087 | * @param pCtx Pointer to the guest CPU context.
|
---|
3088 | * @param pVM Pointer to the VM.
|
---|
3089 | * @param pVCpu Pointer to the VMCPU.
|
---|
3090 | */
|
---|
3091 | DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVMCBHost, RTHCPHYS HCPhysVMCB, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
|
---|
3092 | {
|
---|
3093 | uint32_t aParam[4];
|
---|
3094 |
|
---|
3095 | aParam[0] = (uint32_t)(HCPhysVMCBHost); /* Param 1: HCPhysVMCBHost - Lo. */
|
---|
3096 | aParam[1] = (uint32_t)(HCPhysVMCBHost >> 32); /* Param 1: HCPhysVMCBHost - Hi. */
|
---|
3097 | aParam[2] = (uint32_t)(HCPhysVMCB); /* Param 2: HCPhysVMCB - Lo. */
|
---|
3098 | aParam[3] = (uint32_t)(HCPhysVMCB >> 32); /* Param 2: HCPhysVMCB - Hi. */
|
---|
3099 |
|
---|
3100 | return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSVMGCVMRun64, 4, &aParam[0]);
|
---|
3101 | }
|
---|
3102 |
|
---|
3103 |
|
---|
3104 | /**
|
---|
3105 | * Executes the specified handler in 64-bit mode.
|
---|
3106 | *
|
---|
3107 | * @returns VBox status code.
|
---|
3108 | * @param pVM Pointer to the VM.
|
---|
3109 | * @param pVCpu Pointer to the VMCPU.
|
---|
3110 | * @param pCtx Pointer to the guest CPU context.
|
---|
3111 | * @param pfnHandler Pointer to the RC handler function.
|
---|
3112 | * @param cbParam Number of parameters.
|
---|
3113 | * @param paParam Array of 32-bit parameters.
|
---|
3114 | */
|
---|
3115 | VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
|
---|
3116 | uint32_t *paParam)
|
---|
3117 | {
|
---|
3118 | int rc;
|
---|
3119 | RTHCUINTREG uOldEFlags;
|
---|
3120 |
|
---|
3121 | Assert(pfnHandler);
|
---|
3122 |
|
---|
3123 | /* Disable interrupts. */
|
---|
3124 | uOldEFlags = ASMIntDisableFlags();
|
---|
3125 |
|
---|
3126 | #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
|
---|
3127 | RTCPUID idHostCpu = RTMpCpuId();
|
---|
3128 | CPUMR0SetLApic(pVM, idHostCpu);
|
---|
3129 | #endif
|
---|
3130 |
|
---|
3131 | CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
|
---|
3132 | CPUMSetHyperEIP(pVCpu, pfnHandler);
|
---|
3133 | for (int i = (int)cbParam - 1; i >= 0; i--)
|
---|
3134 | CPUMPushHyper(pVCpu, paParam[i]);
|
---|
3135 |
|
---|
3136 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
|
---|
3137 | /* Call switcher. */
|
---|
3138 | rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
|
---|
3139 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
|
---|
3140 |
|
---|
3141 | ASMSetFlags(uOldEFlags);
|
---|
3142 | return rc;
|
---|
3143 | }
|
---|
3144 |
|
---|
3145 | #endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
|
---|
3146 |
|
---|