VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp@ 46502

Last change on this file since 46502 was 46500, checked in by vboxsync, 12 years ago

VMM/HMSVMR0: AMD-V bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 69.2 KB
Line 
1/* $Id: HMSVMR0.cpp 46500 2013-06-11 16:00:10Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21
22#ifdef DEBUG_ramshankar
23# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
24# define HMSVM_ALWAYS_TRAP_PF
25#endif
26
27
28/*******************************************************************************
29* Defined Constants And Macros *
30*******************************************************************************/
31/** @name Segment attribute conversion between CPU and AMD-V VMCB format.
32 *
33 * The CPU format of the segment attribute is described in X86DESCATTRBITS
34 * which is 16-bits (i.e. includes 4 bits of the segment limit).
35 *
36 * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly
37 * only the attribute bits and nothing else). Upper 4-bits are unused.
38 *
39 * @{ */
40#define HMSVM_CPU_2_VMCB_SEG_ATTR(a) (a & 0xff) | ((a & 0xf000) >> 4)
41#define HMSVM_VMCB_2_CPU_SEG_ATTR(a) (a & 0xff) | ((a & 0x0f00) << 4)
42/** @} */
43
44/** @name Macros for loading, storing segment registers to/from the VMCB.
45 * @{ */
46#define HMSVM_LOAD_SEG_REG(REG, reg) \
47 do \
48 { \
49 Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \
50 Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \
51 pVmcb->guest.REG.u16Sel = pCtx->reg.Sel; \
52 pVmcb->guest.REG.u32Limit = pCtx->reg.u32Limit; \
53 pVmcb->guest.REG.u64Base = pCtx->reg.u64Base; \
54 pVmcb->guest.REG.u16Attr = HMSVM_CPU_2_VMCB_SEG_ATTR(pCtx->reg.Attr.u); \
55 } while (0)
56
57#define HMSVM_SAVE_SEG_REG(REG, reg) \
58 do \
59 { \
60 pCtx->reg.Sel = pVmcb->guest.REG.u16Sel; \
61 pCtx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \
62 pCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \
63 pCtx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \
64 pCtx->reg.u64Base = pVmcb->guest.REG.u64Base; \
65 pCtx->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \
66 } while (0)
67/** @} */
68
69/** @name VMCB Clean Bits used for VMCB-state caching. */
70/** All intercepts vectors, TSC offset, PAUSE filter counter. */
71#define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
72/** I/O permission bitmap, MSR permission bitmap. */
73#define HMSVM_VMCB_CLEAN_IOPM_MSRPM RT_BIT(1)
74/** ASID. */
75#define HMSVM_VMCB_CLEAN_ASID RT_BIT(2)
76/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
77V_INTR_VECTOR. */
78#define HMSVM_VMCB_CLEAN_TPR RT_BIT(3)
79/** Nested Paging: Nested CR3 (nCR3), PAT. */
80#define HMSVM_VMCB_CLEAN_NP RT_BIT(4)
81/** Control registers (CR0, CR3, CR4, EFER). */
82#define HMSVM_VMCB_CLEAN_CRX RT_BIT(5)
83/** Debug registers (DR6, DR7). */
84#define HMSVM_VMCB_CLEAN_DRX RT_BIT(6)
85/** GDT, IDT limit and base. */
86#define HMSVM_VMCB_CLEAN_DT RT_BIT(7)
87/** Segment register: CS, SS, DS, ES limit and base. */
88#define HMSVM_VMCB_CLEAN_SEG RT_BIT(8)
89/** CR2.*/
90#define HMSVM_VMCB_CLEAN_CR2 RT_BIT(9)
91/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
92#define HMSVM_VMCB_CLEAN_LBR RT_BIT(10)
93/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
94PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
95#define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11)
96/** @} */
97
98/**
99 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
100 */
101typedef enum SVMMSREXITREAD
102{
103 /** Reading this MSR causes a VM-exit. */
104 SVMMSREXIT_INTERCEPT_READ = 0xb,
105 /** Reading this MSR does not cause a VM-exit. */
106 SVMMSREXIT_PASSTHRU_READ
107} VMXMSREXITREAD;
108
109/**
110 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
111 */
112typedef enum SVMMSREXITWRITE
113{
114 /** Writing to this MSR causes a VM-exit. */
115 SVMMSREXIT_INTERCEPT_WRITE = 0xd,
116 /** Writing to this MSR does not cause a VM-exit. */
117 SVMMSREXIT_PASSTHRU_WRITE
118} VMXMSREXITWRITE;
119
120
121/*******************************************************************************
122* Internal Functions *
123*******************************************************************************/
124static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
125
126
127/*******************************************************************************
128* Global Variables *
129*******************************************************************************/
130/** Ring-0 memory object for the IO bitmap. */
131RTR0MEMOBJ g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
132/** Physical address of the IO bitmap. */
133RTHCPHYS g_HCPhysIOBitmap = 0;
134/** Virtual address of the IO bitmap. */
135R0PTRTYPE(void *) g_pvIOBitmap = NULL;
136
137
138/**
139 * Sets up and activates AMD-V on the current CPU.
140 *
141 * @returns VBox status code.
142 * @param pCpu Pointer to the CPU info struct.
143 * @param pVM Pointer to the VM (can be NULL after a resume!).
144 * @param pvCpuPage Pointer to the global CPU page.
145 * @param HCPhysCpuPage Physical address of the global CPU page.
146 */
147VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
148{
149 AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER);
150 AssertReturn( HCPhysCpuPage
151 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
152 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
153
154 /*
155 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
156 */
157 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
158 if (u64HostEfer & MSR_K6_EFER_SVME)
159 {
160 /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
161 if ( pVM
162 && pVM->hm.s.svm.fIgnoreInUseError)
163 {
164 pCpu->fIgnoreAMDVInUseError = true;
165 }
166
167 if (!pCpu->fIgnoreAMDVInUseError)
168 return VERR_SVM_IN_USE;
169 }
170
171 /* Turn on AMD-V in the EFER MSR. */
172 ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
173
174 /* Write the physical page address where the CPU will store the host state while executing the VM. */
175 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
176
177 /*
178 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
179 * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
180 * upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB()
181 * to flush the TLB with before using a new ASID.
182 */
183 pCpu->fFlushAsidBeforeUse = true;
184
185 /*
186 * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
187 */
188 ++pCpu->cTlbFlushes;
189
190 return VINF_SUCCESS;
191}
192
193
194/**
195 * Deactivates AMD-V on the current CPU.
196 *
197 * @returns VBox status code.
198 * @param pCpu Pointer to the CPU info struct.
199 * @param pvCpuPage Pointer to the global CPU page.
200 * @param HCPhysCpuPage Physical address of the global CPU page.
201 */
202VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
203{
204 AssertReturn( HCPhysCpuPage
205 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
206 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
207 NOREF(pCpu);
208
209 /* Turn off AMD-V in the EFER MSR if AMD-V is active. */
210 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
211 if (u64HostEfer & MSR_K6_EFER_SVME)
212 {
213 ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
214
215 /* Invalidate host state physical address. */
216 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
217 }
218
219 return VINF_SUCCESS;
220}
221
222
223/**
224 * Does global AMD-V initialization (called during module initialization).
225 *
226 * @returns VBox status code.
227 */
228VMMR0DECL(int) SVMR0GlobalInit(void)
229{
230 /*
231 * Allocate 12 KB for the IO bitmap. Since this is non-optional and we always intercept all IO accesses, it's done
232 * once globally here instead of per-VM.
233 */
234 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, 3 << PAGE_SHIFT, false /* fExecutable */);
235 if (RT_FAILURE(rc))
236 return rc;
237
238 g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
239 g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
240
241 /* Set all bits to intercept all IO accesses. */
242 ASMMemFill32(pVM->hm.s.svm.pvIOBitmap, 3 << PAGE_SHIFT, UINT32_C(0xffffffff));
243}
244
245
246/**
247 * Does global VT-x termination (called during module termination).
248 */
249VMMR0DECL(void) SVMR0GlobalTerm(void)
250{
251 if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
252 {
253 RTR0MemObjFree(pVM->hm.s.svm.hMemObjIOBitmap, false /* fFreeMappings */);
254 g_pvIOBitmap = NULL;
255 g_HCPhysIOBitmap = 0;
256 g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
257 }
258}
259
260
261/**
262 * Frees any allocated per-VCPU structures for a VM.
263 *
264 * @param pVM Pointer to the VM.
265 */
266DECLINLINE(void) hmR0SvmFreeStructs(PVM pVM)
267{
268 for (uint32_t i = 0; i < pVM->cCpus; i++)
269 {
270 PVMCPU pVCpu = &pVM->aCpus[i];
271 AssertPtr(pVCpu);
272
273 if (pVCpu->hm.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
274 {
275 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false);
276 pVCpu->hm.s.svm.pvVmcbHost = 0;
277 pVCpu->hm.s.svm.HCPhysVmcbHost = 0;
278 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
279 }
280
281 if (pVCpu->hm.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ)
282 {
283 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false);
284 pVCpu->hm.s.svm.pvVmcb = 0;
285 pVCpu->hm.s.svm.HCPhysVmcb = 0;
286 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
287 }
288
289 if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
290 {
291 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
292 pVCpu->hm.s.svm.pvMsrBitmap = 0;
293 pVCpu->hm.s.svm.HCPhysMsrBitmap = 0;
294 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
295 }
296 }
297}
298
299
300/**
301 * Does per-VM AMD-V initialization.
302 *
303 * @returns VBox status code.
304 * @param pVM Pointer to the VM.
305 */
306VMMR0DECL(int) SVMR0InitVM(PVM pVM)
307{
308 int rc = VERR_INTERNAL_ERROR_5;
309
310 /*
311 * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
312 */
313 uint32_t u32Family;
314 uint32_t u32Model;
315 uint32_t u32Stepping;
316 if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
317 {
318 Log4(("SVMR0InitVM: AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
319 pVM->hm.s.svm.fAlwaysFlushTLB = true;
320 }
321
322 /*
323 * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
324 */
325 for (VMCPUID i = 0; i < pVM->cCpus; i++)
326 {
327 PVMCPU pVCpu = &pVM->aCpus[i];
328 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
329 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
330 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
331 }
332
333 for (VMCPUID i = 0; i < pVM->cCpus; i++)
334 {
335 /*
336 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
337 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
338 */
339 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1 << PAGE_SHIFT, false /* fExecutable */);
340 if (RT_FAILURE(rc))
341 goto failure_cleanup;
342
343 pVCpu->hm.s.svm.pvVmcbHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost);
344 pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 /* iPage */);
345 Assert(pVCpu->hm.s.svm.HCPhysVmcbHost < _4G);
346 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcbHost);
347
348 /*
349 * Allocate one page for the guest-state VMCB.
350 */
351 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, 1 << PAGE_SHIFT, false /* fExecutable */);
352 if (RT_FAILURE(rc))
353 goto failure_cleanup;
354
355 pVCpu->hm.s.svm.pvVmcb = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);
356 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */);
357 Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G);
358 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcb);
359
360 /*
361 * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
362 * SVM to not require one.
363 */
364 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
365 if (RT_FAILURE(rc))
366 failure_cleanup;
367
368 pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
369 pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
370 /* Set all bits to intercept all MSR accesses (changed later on). */
371 ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, 0xffffffff);
372 }
373
374 return VINF_SUCCESS;
375
376failure_cleanup:
377 hmR0SvmFreeVMStructs(pVM);
378 return rc;
379}
380
381
382/**
383 * Does per-VM AMD-V termination.
384 *
385 * @returns VBox status code.
386 * @param pVM Pointer to the VM.
387 */
388VMMR0DECL(int) SVMR0TermVM(PVM pVM)
389{
390 hmR0SvmFreeVMStructs(pVM);
391 return VINF_SUCCESS;
392}
393
394
395/**
396 * Sets the permission bits for the specified MSR in the MSRPM.
397 *
398 * @param pVCpu Pointer to the VMCPU.
399 * @param uMsr The MSR.
400 * @param fRead Whether reading is allowed.
401 * @param fWrite Whether writing is allowed.
402 */
403static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
404{
405 unsigned ulBit;
406 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
407
408 /*
409 * Layout:
410 * Byte offset MSR range
411 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
412 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
413 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
414 * 0x1800 - 0x1fff Reserved
415 */
416 if (uMsr <= 0x00001FFF)
417 {
418 /* Pentium-compatible MSRs */
419 ulBit = uMsr * 2;
420 }
421 else if ( uMsr >= 0xC0000000
422 && uMsr <= 0xC0001FFF)
423 {
424 /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */
425 ulBit = (uMsr - 0xC0000000) * 2;
426 pbMsrBitmap += 0x800;
427 }
428 else if ( uMsr >= 0xC0010000
429 && uMsr <= 0xC0011FFF)
430 {
431 /* AMD Seventh and Eighth Generation Processor MSRs */
432 ulBit = (uMsr - 0xC0001000) * 2;
433 pbMsrBitmap += 0x1000;
434 }
435 else
436 {
437 AssertFailed();
438 return;
439 }
440
441 Assert(ulBit < 0x3fff /* 16 * 1024 - 1 */);
442 if (enmRead == SVMMSREXIT_INTERCEPT_READ)
443 ASMBitSet(pbMsrBitmap, ulBit);
444 else
445 ASMBitClear(pbMsrBitmap, ulBit);
446
447 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
448 ASMBitSet(pbMsrBitmap, ulBit + 1);
449 else
450 ASMBitClear(pbMsrBitmap, ulBit + 1);
451
452 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
453}
454
455
456/**
457 * Sets up AMD-V for the specified VM.
458 * This function is only called once per-VM during initalization.
459 *
460 * @returns VBox status code.
461 * @param pVM Pointer to the VM.
462 */
463VMMR0DECL(int) SVMR0SetupVM(PVM pVM)
464{
465 int rc = VINF_SUCCESS;
466
467 AssertReturn(pVM, VERR_INVALID_PARAMETER);
468 Assert(pVM->hm.s.svm.fSupported);
469
470 for (VMCPUID i = 0; i < pVM->cCpus; i++)
471 {
472 PVMCPU pVCpu = &pVM->aCpus[i];
473 PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb;
474
475 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
476
477 /* Trap exceptions unconditionally (debug purposes). */
478#ifdef HMSVM_ALWAYS_TRAP_PF
479 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
480#endif
481#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
482 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_BP)
483 | RT_BIT(X86_XCPT_DB)
484 | RT_BIT(X86_XCPT_DE)
485 | RT_BIT(X86_XCPT_NM)
486 | RT_BIT(X86_XCPT_UD)
487 | RT_BIT(X86_XCPT_NP)
488 | RT_BIT(X86_XCPT_SS)
489 | RT_BIT(X86_XCPT_GP)
490 | RT_BIT(X86_XCPT_PF)
491 | RT_BIT(X86_XCPT_MF);
492#endif
493
494 /* Set up unconditional intercepts and conditions. */
495 pVmcb->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR /* External interrupt causes a VM-exit. */
496 | SVM_CTRL1_INTERCEPT_VINTR /* When guest enables interrupts cause a VM-exit. */
497 | SVM_CTRL1_INTERCEPT_NMI /* Non-Maskable Interrupts causes a VM-exit. */
498 | SVM_CTRL1_INTERCEPT_SMI /* System Management Interrupt cause a VM-exit. */
499 | SVM_CTRL1_INTERCEPT_INIT /* INIT signal causes a VM-exit. */
500 | SVM_CTRL1_INTERCEPT_RDPMC /* RDPMC causes a VM-exit. */
501 | SVM_CTRL1_INTERCEPT_CPUID /* CPUID causes a VM-exit. */
502 | SVM_CTRL1_INTERCEPT_RSM /* RSM causes a VM-exit. */
503 | SVM_CTRL1_INTERCEPT_HLT /* HLT causes a VM-exit. */
504 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO VM-exits. */
505 | SVM_CTRL1_INTERCEPT_MSR_SHADOW /* MSR access not covered by MSRPM causes a VM-exit.*/
506 | SVM_CTRL1_INTERCEPT_INVLPGA /* INVLPGA causes a VM-exit. */
507 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* Shutdown events causes a VM-exit. */
508 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Intercept "freezing" during legacy FPU handling. */
509
510 pVmcb->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* VMRUN causes a VM-exit. */
511 | SVM_CTRL2_INTERCEPT_VMMCALL /* VMMCALL causes a VM-exit. */
512 | SVM_CTRL2_INTERCEPT_VMLOAD /* VMLOAD causes a VM-exit. */
513 | SVM_CTRL2_INTERCEPT_VMSAVE /* VMSAVE causes a VM-exit. */
514 | SVM_CTRL2_INTERCEPT_STGI /* STGI causes a VM-exit. */
515 | SVM_CTRL2_INTERCEPT_CLGI /* CLGI causes a VM-exit. */
516 | SVM_CTRL2_INTERCEPT_SKINIT /* SKINIT causes a VM-exit. */
517 | SVM_CTRL2_INTERCEPT_WBINVD /* WBINVD causes a VM-exit. */
518 | SVM_CTRL2_INTERCEPT_MONITOR /* MONITOR causes a VM-exit. */
519 | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* MWAIT causes a VM-exit. */
520
521 /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
522 pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
523
524 /* CR0, CR4 writes must be intercepted for the same reasons as above. */
525 pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
526
527 /* Intercept all DRx reads and writes by default. Changed later on. */
528 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
529 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
530
531 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
532 pVmcb->ctrl.IntCtrl.n.u1VIrqMasking = 1;
533
534 /* Ignore the priority in the TPR; we take into account the guest TPR anyway while delivering interrupts. */
535 pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR = 1;
536
537 /* Set IO and MSR bitmap permission bitmap physical addresses. */
538 pVmcb->ctrl.u64IOPMPhysAddr = g_HCPhysIOBitmap;
539 pVmcb->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
540
541 /* No LBR virtualization. */
542 pVmcb->ctrl.u64LBRVirt = 0;
543
544 /* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from memory. */
545 pVmcb->u64VmcbCleanBits = 0;
546
547 /* The guest ASID MBNZ, set it to 1. The host uses 0. */
548 pVmcb->ctrl.TLBCtrl.n.u32ASID = 1;
549
550 /*
551 * Setup the PAT MSR (applicable for Nested Paging only).
552 * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
553 * so choose type 6 for all PAT slots.
554 */
555 pVmcb->guest.u64GPAT = UINT64_C(0x0006060606060606);
556
557 /* Without Nested Paging, we need additionally intercepts. */
558 if (!pVM->hm.s.fNestedPaging)
559 {
560 /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
561 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(3);
562 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(3);
563
564 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
565 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG
566 | SVM_CTRL1_INTERCEPT_TASK_SWITCH;
567
568 /* Page faults must be intercepted to implement shadow paging. */
569 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
570 }
571
572 /*
573 * The following MSRs are saved/restored automatically during the world-switch.
574 * Don't intercept guest read/write accesses to these MSRs.
575 */
576 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
577 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
578 hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
579 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
580 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
581 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
582 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
583 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
584 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
585 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
586 }
587
588 return rc;
589}
590
591
592/**
593 * Flushes the appropriate tagged-TLB entries.
594 *
595 * @param pVM Pointer to the VM.
596 * @param pVCpu Pointer to the VMCPU.
597 */
598static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu)
599{
600 PVM pVM = pVCpu->CTX_SUFF(pVM);
601 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
602 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
603
604 /*
605 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
606 * This can happen both for start & resume due to long jumps back to ring-3.
607 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
608 * so we cannot reuse the ASIDs without flushing.
609 */
610 bool fNewAsid = false;
611 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
612 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
613 {
614 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
615 pVCpu->hm.s.fForceTLBFlush = true;
616 fNewAsid = true;
617 }
618
619 /* Set TLB flush state as checked until we return from the world switch. */
620 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
621
622 /* Check for explicit TLB shootdowns. */
623 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
624 {
625 pVCpu->hm.s.fForceTLBFlush = true;
626 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
627 }
628
629 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
630 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
631
632 if (pVM->hm.s.svm.fAlwaysFlushTLB)
633 {
634 /*
635 * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
636 */
637 pCpu->uCurrentAsid = 1;
638 pVCpu->hm.s.uCurrentAsid = 1;
639 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
640 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
641 }
642 else if (pVCpu->hm.s.fForceTLBFlush)
643 {
644 if (fNewAsid)
645 {
646 ++pCpu->uCurrentAsid;
647 bool fHitASIDLimit = false;
648 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
649 {
650 pCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */
651 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
652 fHitASIDLimit = true;
653
654 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
655 {
656 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
657 pCpu->fFlushAsidBeforeUse = true;
658 }
659 else
660 {
661 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
662 pCpu->fFlushAsidBeforeUse = false;
663 }
664 }
665
666 if ( !fHitASIDLimit
667 && pCpu->fFlushAsidBeforeUse)
668 {
669 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
670 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
671 else
672 {
673 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
674 pCpu->fFlushAsidBeforeUse = false;
675 }
676 }
677
678 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
679 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
680 }
681 else
682 {
683 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
684 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
685 else
686 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
687 }
688
689 pVCpu->hm.s.fForceTLBFlush = false;
690 }
691 else
692 {
693 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
694 * not be executed. See hmQueueInvlPage() where it is commented
695 * out. Support individual entry flushing someday. */
696 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
697 {
698 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
699 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
700 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
701 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVmcb->ctrl.TLBCtrl.n.u32ASID);
702 }
703 }
704
705 pVCpu->hm.s.TlbShootdown.cPages = 0;
706 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
707
708 /* Update VMCB with the ASID. */
709 if (pVmcb->ctrl.TLBCtrl.n.u32ASID != pVCpu->hm.s.uCurrentAsid)
710 {
711 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
712 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_ASID;
713 }
714
715 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
716 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
717 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
718 ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
719 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
720 ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
721
722#ifdef VBOX_WITH_STATISTICS
723 if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
724 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
725 else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
726 || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
727 {
728 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
729 }
730 else
731 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE)
732#endif
733}
734
735
736/** @name 64-bit guest on 32-bit host OS helper functions.
737 *
738 * The host CPU is still 64-bit capable but the host OS is running in 32-bit
739 * mode (code segment, paging). These wrappers/helpers perform the necessary
740 * bits for the 32->64 switcher.
741 *
742 * @{ */
743#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
744/**
745 * Prepares for and executes VMRUN (64-bit guests on a 32-bit host).
746 *
747 * @returns VBox status code.
748 * @param HCPhysVmcbHost Physical address of host VMCB.
749 * @param HCPhysVmcb Physical address of the VMCB.
750 * @param pCtx Pointer to the guest-CPU context.
751 * @param pVM Pointer to the VM.
752 * @param pVCpu Pointer to the VMCPU.
753 */
754DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
755{
756 uint32_t aParam[4];
757 aParam[0] = (uint32_t)(HCPhysVmcbHost); /* Param 1: HCPhysVmcbHost - Lo. */
758 aParam[1] = (uint32_t)(HCPhysVmcbHost >> 32); /* Param 1: HCPhysVmcbHost - Hi. */
759 aParam[2] = (uint32_t)(HCPhysVmcb); /* Param 2: HCPhysVmcb - Lo. */
760 aParam[3] = (uint32_t)(HCPhysVmcb >> 32); /* Param 2: HCPhysVmcb - Hi. */
761
762 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, 4, &aParam[0]);
763}
764
765
766/**
767 * Executes the specified VMRUN handler in 64-bit mode.
768 *
769 * @returns VBox status code.
770 * @param pVM Pointer to the VM.
771 * @param pVCpu Pointer to the VMCPU.
772 * @param pCtx Pointer to the guest-CPU context.
773 * @param enmOp The operation to perform.
774 * @param cbParam Number of parameters.
775 * @param paParam Array of 32-bit parameters.
776 */
777VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
778 uint32_t *paParam)
779{
780 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
781 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
782
783 /* Disable interrupts. */
784 RTHCUINTREG uOldEFlags = ASMIntDisableFlags();
785
786#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
787 RTCPUID idHostCpu = RTMpCpuId();
788 CPUMR0SetLApic(pVM, idHostCpu);
789#endif
790
791 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
792 CPUMSetHyperEIP(pVCpu, enmOp);
793 for (int i = (int)cbParam - 1; i >= 0; i--)
794 CPUMPushHyper(pVCpu, paParam[i]);
795
796 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
797 /* Call the switcher. */
798 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
799 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
800
801 /* Restore interrupts. */
802 ASMSetFlags(uOldEFlags);
803 return rc;
804}
805
806#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
807/** @} */
808
809
810/**
811 * Saves the host state.
812 *
813 * @returns VBox status code.
814 * @param pVM Pointer to the VM.
815 * @param pVCpu Pointer to the VMCPU.
816 *
817 * @remarks No-long-jump zone!!!
818 */
819VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
820{
821 NOREF(pVM);
822 NOREF(pVCpu);
823 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
824 return VINF_SUCCESS;
825}
826
827
828DECLINLINE(void) hmR0SvmAddXcptIntercept(uint32_t u32Xcpt)
829{
830 if (!(pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt))
831 {
832 pVmcb->ctrl.u32InterceptException |= RT_BIT(u32Xcpt);
833 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
834 }
835}
836
837DECLINLINE(void) hmR0SvmRemoveXcptIntercept(uint32_t u32Xcpt)
838{
839#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
840 if (pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt))
841 {
842 pVmcb->ctrl.u32InterceptException &= ~RT_BIT(u32Xcpt);
843 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
844 }
845#endif
846}
847
848
849/**
850 * Loads the guest control registers (CR0, CR2, CR3, CR4) into the VMCB.
851 *
852 * @returns VBox status code.
853 * @param pVCpu Pointer to the VMCPU.
854 * @param pCtx Pointer the guest-CPU context.
855 *
856 * @remarks No-long-jump zone!!!
857 */
858static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
859{
860 /*
861 * Guest CR0.
862 */
863 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
864 {
865 uint64_t u64GuestCR0 = pCtx->cr0;
866
867 /* Always enable caching. */
868 u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);
869
870 /*
871 * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
872 */
873 if (!pVM->hm.s.fNestedPaging)
874 {
875 u64GuestCR0 |= X86_CR0_PG; /* When Nested Paging is not available, use shadow page tables. */
876 u64GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
877 }
878
879 /*
880 * Guest FPU bits.
881 */
882 bool fInterceptNM = false;
883 bool fInterceptMF = false;
884 u64GuestCR0 |= X86_CR0_NE; /* Use internal x87 FPU exceptions handling rather than external interrupts. */
885 if (CPUMIsGuestFPUStateActive(pVCpu))
886 {
887 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
888 if (!(u64GuestCR0 & X86_CR0_NE))
889 {
890 Log4(("hmR0SvmLoadGuestControlRegs: Intercepting Guest CR0.MP Old-style FPU handling!!!\n"));
891 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);
892 fInterceptMF = true;
893 }
894 }
895 else
896 {
897 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
898 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
899 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
900 }
901
902 /*
903 * Update the exception intercept bitmap.
904 */
905 if (fInterceptNM)
906 hmR0SvmAddXcptIntercept(X86_XCPT_NM);
907 else
908 hmR0SvmRemoveXcptIntercept(X86_XCPT_NM);
909
910 if (fInterceptMF)
911 hmR0SvmAddXcptIntercept(X86_XCPT_MF);
912 else
913 hmR0SvmRemoveXcptIntercept(X86_XCPT_MF);
914
915 pVmcb->guest.u64CR0 = u64GuestCR0;
916 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
917 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
918 }
919
920 /*
921 * Guest CR2.
922 */
923 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR2)
924 {
925 pVmcb->guest.u64CR2 = pCtx->cr2;
926 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
927 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
928 }
929
930 /*
931 * Guest CR3.
932 */
933 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
934 {
935 if (pVM->hm.s.fNestedPaging)
936 {
937 PGMMODE enmShwPagingMode;
938#if HC_ARCH_BITS == 32
939 if (CPUMIsGuestInLongModeEx(pCtx))
940 enmShwPagingMode = PGMMODE_AMD64_NX;
941 else
942#endif
943 enmShwPagingMode = PGMGetHostMode(pVM);
944
945 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
946 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP;
947 Assert(pVmcb->ctrl.u64NestedPagingCR3);
948 pVmcb->guest.u64CR3 = pCtx->cr3;
949 }
950 else
951 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
952
953 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
954 pVCpu->hm.s.fContextUseFlags &= HM_CHANGED_GUEST_CR3;
955 }
956
957 /*
958 * Guest CR4.
959 */
960 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
961 {
962 uint64_t u64GuestCR4 = pCtx->cr4;
963 if (!pVM->hm.s.fNestedPaging)
964 {
965 switch (pVCpu->hm.s.enmShadowMode)
966 {
967 case PGMMODE_REAL:
968 case PGMMODE_PROTECTED: /* Protected mode, no paging. */
969 AssertFailed();
970 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
971
972 case PGMMODE_32_BIT: /* 32-bit paging. */
973 u64GuestCR4 &= ~X86_CR4_PAE;
974 break;
975
976 case PGMMODE_PAE: /* PAE paging. */
977 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
978 /** Must use PAE paging as we could use physical memory > 4 GB */
979 u64GuestCR4 |= X86_CR4_PAE;
980 break;
981
982 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
983 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
984#ifdef VBOX_ENABLE_64_BITS_GUESTS
985 break;
986#else
987 AssertFailed();
988 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
989#endif
990
991 default: /* shut up gcc */
992 AssertFailed();
993 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
994 }
995 }
996
997 pVmcb->guest.u64CR4 = u64GuestCR4;
998 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
999 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
1000 }
1001
1002 return VINF_SUCCESS;
1003}
1004
1005/**
1006 * Loads the guest segment registers into the VMCB.
1007 *
1008 * @returns VBox status code.
1009 * @param pVCpu Pointer to the VMCPU.
1010 * @param pCtx Pointer to the guest-CPU context.
1011 *
1012 * @remarks No-long-jump zone!!!
1013 */
1014static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
1015{
1016 /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
1017 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
1018 {
1019 HMSVM_LOAD_SEG_REG(CS, cs);
1020 HMSVM_LOAD_SEG_REG(SS, cs);
1021 HMSVM_LOAD_SEG_REG(DS, cs);
1022 HMSVM_LOAD_SEG_REG(ES, cs);
1023 HMSVM_LOAD_SEG_REG(FS, cs);
1024 HMSVM_LOAD_SEG_REG(GS, cs);
1025
1026 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
1027 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
1028 }
1029
1030 /* Guest TR. */
1031 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
1032 {
1033 HMSVM_LOAD_SEG_REG(TR, tr);
1034 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
1035 }
1036
1037 /* Guest LDTR. */
1038 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
1039 {
1040 HMSVM_LOAD_SEG_REG(LDTR, ldtr);
1041 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
1042 }
1043
1044 /* Guest GDTR. */
1045 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
1046 {
1047 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
1048 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
1049 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1050 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
1051 }
1052
1053 /* Guest IDTR. */
1054 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
1055 {
1056 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
1057 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
1058 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
1059 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
1060 }
1061}
1062
1063
1064/**
1065 * Loads the guest MSRs into the VMCB.
1066 *
1067 * @param pVCpu Pointer to the VMCPU.
1068 * @param pCtx Pointer to the guest-CPU context.
1069 *
1070 * @remarks No-long-jump zone!!!
1071 */
1072static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pCtx)
1073{
1074 /* Guest Sysenter MSRs. */
1075 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
1076 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
1077 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
1078
1079 /*
1080 * Guest EFER MSR.
1081 * AMD-V requires guest EFER.SVME to be set. Weird. .
1082 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
1083 */
1084 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
1085
1086 /* 64-bit MSRs. */
1087 if (CPUMIsGuestInLongModeEx(pCtx))
1088 {
1089 pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;
1090 pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;
1091 }
1092 else
1093 {
1094 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */
1095 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
1096 }
1097
1098 /** @todo The following are used in 64-bit only (SYSCALL/SYSRET) but they might
1099 * be writable in 32-bit mode. Clarify with AMD spec. */
1100 pVmcb->guest.u64STAR = pCtx->msrSTAR;
1101 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
1102 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
1103 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
1104 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1105}
1106
1107
1108/**
1109 * Loads the guest debug registers into the VMCB.
1110 *
1111 * @param pVCpu Pointer to the VMCPU.
1112 * @param pCtx Pointer to the guest-CPU context.
1113 *
1114 * @remarks No-long-jump zone!!!
1115 */
1116static void hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
1117{
1118 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
1119 return;
1120
1121 /** @todo Turn these into assertions if possible. */
1122 pCtx->dr[6] |= X86_DR6_INIT_VAL; /* Set reserved bits to 1. */
1123 pCtx->dr[6] &= ~RT_BIT(12); /* MBZ. */
1124
1125 pCtx->dr[7] &= 0xffffffff; /* Upper 32 bits MBZ. */
1126 pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */
1127 pCtx->dr[7] |= 0x400; /* MB1. */
1128
1129 /* Update DR6, DR7 with the guest values. */
1130 pVmcb->guest.u64DR7 = pCtx->dr[7];
1131 pVmcb->guest.u64DR6 = pCtx->dr[6];
1132 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1133
1134 bool fInterceptDB = false;
1135 bool fInterceptMovDRx = false;
1136 if (DBGFIsStepping(pVCpu))
1137 {
1138 /* AMD-V doesn't have any monitor-trap flag equivalent. Instead, enable tracing in the guest and trap #DB. */
1139 pVmcb->guest.u64RFlags |= X86_EFL_TF;
1140 fInterceptDB = true;
1141 }
1142
1143 if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
1144 {
1145 if (!CPUMIsHyperDebugStateActive(pVCpu))
1146 {
1147 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
1148 AssertRC(rc);
1149
1150 /* Update DR6, DR7 with the hypervisor values. */
1151 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
1152 pVmcb->guest.u64DR6 = CPUMGetHyperDR6(pVCpu);
1153 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
1154 }
1155 Assert(CPUMIsHyperDebugStateActive(pVCpu));
1156 fInterceptMovDRx = true;
1157 }
1158 else if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
1159 {
1160 if (!CPUMIsGuestDebugStateActive(pVCpu))
1161 {
1162 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
1163 AssertRC(rc);
1164 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
1165 }
1166 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1167 Assert(fInterceptMovDRx == false);
1168 }
1169 else if (!CPUMIsGuestDebugStateActive(pVCpu))
1170 {
1171 /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
1172 fInterceptMovDRx = true;
1173 }
1174
1175 if (fInterceptDB)
1176 hmR0SvmAddXcptIntercept(X86_XCPT_DB);
1177 else
1178 hmR0SvmRemoveXcptIntercept(X86_XCPT_DB);
1179
1180 if (fInterceptMovDRx)
1181 {
1182 if ( pVmcb->ctrl.u16InterceptRdDRx != 0xffff
1183 || pVmcb->ctrl.u16InterceptWrDRx != 0xffff)
1184 {
1185 pVmcb->ctrl.u16InterceptRdDRx = 0xffff;
1186 pVmcb->ctrl.u16InterceptWrDRx = 0xffff;
1187 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1188 }
1189 }
1190 else
1191 {
1192 if ( pVmcb->ctrl.u16InterceptRdDRx
1193 || pVmcb->ctrl.u16InterceptWrDRx)
1194 {
1195 pVmcb->ctrl.u16InterceptRdDRx = 0;
1196 pVmcb->ctrl.u16InterceptWrDRx = 0;
1197 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1198 }
1199 }
1200
1201 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
1202}
1203
1204/**
1205 * Sets up the appropriate function to run guest code.
1206 *
1207 * @returns VBox status code.
1208 * @param pVCpu Pointer to the VMCPU.
1209 * @param pCtx Pointer to the guest-CPU context.
1210 *
1211 * @remarks No-long-jump zone!!!
1212 */
1213static int hmR0SvmSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pCtx)
1214{
1215 if (CPUMIsGuestInLongModeEx(pCtx))
1216 {
1217#ifndef VBOX_ENABLE_64_BITS_GUESTS
1218 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1219#endif
1220 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
1221#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1222 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
1223 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
1224#else
1225 /* 64-bit host or hybrid host. */
1226 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64;
1227#endif
1228 }
1229 else
1230 {
1231 /* Guest is not in long mode, use the 32-bit handler. */
1232 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
1233 }
1234 return VINF_SUCCESS;
1235}
1236
1237
1238/**
1239 * Loads the guest state.
1240 *
1241 * @returns VBox status code.
1242 * @param pVM Pointer to the VM.
1243 * @param pVCpu Pointer to the VMCPU.
1244 * @param pCtx Pointer to the guest-CPU context.
1245 *
1246 * @remarks No-long-jump zone!!!
1247 */
1248VMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1249{
1250 AssertPtr(pVM);
1251 AssertPtr(pVCpu);
1252 AssertPtr(pCtx);
1253 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1254
1255 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1256 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
1257
1258 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
1259
1260 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pCtx);
1261 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1262
1263 hmR0SvmLoadGuestSegmentRegs(pVCpu, pCtx);
1264 hmR0SvmLoadGuestMsrs(pVCpu, pCtx);
1265
1266 pVmcb->guest.u64RIP = pCtx->rip;
1267 pVmcb->guest.u64RSP = pCtx->rsp;
1268 pVmcb->guest.u64RFlags = pCtx->eflags.u32;
1269 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
1270 pVmcb->guest.u64RAX = pCtx->rax;
1271
1272 /* hmR0SvmLoadGuestDebugRegs() must be called -after- updating guest RFLAGS as the RFLAGS may need to be changed. */
1273 hmR0SvmLoadGuestDebugRegs(pVCpu, pCtx);
1274
1275 rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx);
1276 AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
1277
1278 /* Clear any unused and reserved bits. */
1279 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_SYSENTER_CS_MSR
1280 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
1281 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
1282
1283 AssertMsg(!pVCpu->hm.s.fContextUseFlags,
1284 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
1285 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
1286
1287 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
1288
1289 return rc;
1290}
1291
1292
1293/**
1294 * Sets up the usage of TSC offsetting for the VCPU.
1295 *
1296 * @param pVCpu Pointer to the VMCPU.
1297 *
1298 * @remarks No-long-jump zone!!!
1299 */
1300static void hmR0SvmSetupTscOffsetting(PVMCPU pVCpu)
1301{
1302 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1303 if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset))
1304 {
1305 uint64_t u64CurTSC = ASMReadTSC();
1306 if (u64CurTSC + pVmcb->ctrl.u64TSCOffset > TMCpuTickGetLastSeen(pVCpu))
1307 {
1308 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
1309 pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
1310 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
1311 }
1312 else
1313 {
1314 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
1315 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
1316 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
1317 }
1318 }
1319 else
1320 {
1321 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
1322 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
1323 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
1324 }
1325
1326 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1327}
1328
1329
1330/**
1331 * Sets an event as a pending event to be injected into the guest.
1332 *
1333 * @param pVCpu Pointer to the VMCPU.
1334 * @param pEvent Pointer to the SVM event.
1335 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
1336 * page-fault.
1337 */
1338DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
1339{
1340 Assert(!pVCpu->hm.s.Event.fPending);
1341
1342 pVCpu->hm.s.Event.u64IntrInfo = pEvent->u;
1343 pVCpu->hm.s.Event.fPending = true;
1344 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
1345
1346#ifdef VBOX_STRICT
1347 if (GCPtrFaultAddress)
1348 {
1349 AssertMsg( pEvent->n.u8Vector == X86_XCPT_PF
1350 && pEvent->n.u3Type == SVM_EVENT_EXCEPTION,
1351 ("hmR0SvmSetPendingEvent: Setting fault-address for non-#PF. u8Vector=%#x Type=%#RX32 GCPtrFaultAddr=%#RGx\n",
1352 pEvent->n.u8Vector, (uint32_t)pEvent->n.u3Type, GCPtrFaultAddress));
1353 Assert(GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
1354 }
1355#endif
1356
1357 Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x ErrorCodeValid=%#x ErrorCode=%#RX32\n", pEvent->u,
1358 pEvent->n.u8Vector, pEvent->n.u3Type, (uint8_t)pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
1359}
1360
1361
1362/**
1363 * Injects an event into the guest upon VMRUN by updating the relevant field
1364 * in the VMCB.
1365 *
1366 * @param pVCpu Pointer to the VMCPU.
1367 * @param pVmcb Pointer to the guest VMCB.
1368 * @param pCtx Pointer to the guest-CPU context.
1369 * @param pEvent Pointer to the event.
1370 *
1371 * @remarks No-long-jump zone!!!
1372 * @remarks Requires CR0!
1373 */
1374DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent)
1375{
1376 pVmcb->ctrl.EventInject.u = pEvent->u;
1377 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
1378}
1379
1380
1381/**
1382 * Converts any TRPM trap into a pending SVM event. This is typically used when
1383 * entering from ring-3 (not longjmp returns).
1384 *
1385 * @param pVCpu Pointer to the VMCPU.
1386 */
1387static void hmR0SvmTrpmTrapToPendingEvent(PVMCPU pVCpu)
1388{
1389 Assert(TRPMHasTrap(pVCpu));
1390 Assert(!pVCpu->hm.s.Event.fPending);
1391
1392 uint8_t uVector;
1393 TRPMEVENT enmTrpmEvent;
1394 RTGCUINT uErrCode;
1395 RTGCUINTPTR GCPtrFaultAddress;
1396 uint8_t cbInstr;
1397
1398 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
1399 AssertRC(rc);
1400
1401 PSVMEVENT pEvent = &pVCpu->hm.s.Event;
1402 pEvent->u = 0;
1403 pEvent->n.u1Valid = 1;
1404
1405 /* Refer AMD spec. 15.20 "Event Injection" for the format. */
1406 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
1407 if (enmTrpmEvent == TRPM_TRAP)
1408 {
1409 pEvent->n.u3Type = SVM_EVENT_EXCEPTION;
1410 switch (uVector)
1411 {
1412 case X86_XCPT_PF:
1413 case X86_XCPT_DF:
1414 case X86_XCPT_TS:
1415 case X86_XCPT_NP:
1416 case X86_XCPT_SS:
1417 case X86_XCPT_GP:
1418 case X86_XCPT_AC:
1419 {
1420 pEvent->n.u32ErrorCode = uErrCode;
1421 pEvent->n.u1ErrorCodeValid = 1;
1422 break;
1423 }
1424 }
1425 }
1426 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
1427 {
1428 if (uVector == X86_XCPT_NMI)
1429 pEvent->n.u3Type = SVM_EVENT_NMI;
1430 else
1431 pEvent->n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
1432 }
1433 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
1434 pEvent->n.u3Type = SVM_EVENT_SOFTWARE_INT;
1435 else
1436 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
1437
1438 rc = TRPMResetTrap(pVCpu);
1439 AssertRC(rc);
1440
1441 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%#x uErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
1442 pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
1443}
1444
1445
1446/**
1447 * Converts any pending SVM event into a TRPM trap. Typically used when leaving
1448 * AMD-V to execute any instruction.
1449 *
1450 * @param pvCpu Pointer to the VMCPU.
1451 */
1452static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
1453{
1454 Assert(pVCpu->hm.s.Event.fPending);
1455 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
1456
1457 PSVMEVENT pEvent = &pVCpu->hm.s.Event;
1458 uint8_t uVector = pEvent->n.u8Vector;
1459 uint8_t uVectorType = pEvent->n.u3Type;
1460
1461 TRPMEVENT enmTrapType;
1462 switch (uVectorType)
1463 {
1464 case SVM_EVENT_EXTERNAL_IRQ
1465 case SVM_EVENT_NMI:
1466 enmTrapType = TRPM_HARDWARE_INT;
1467 break;
1468 case SVM_EVENT_SOFTWARE_INT:
1469 enmTrapType = TRPM_SOFTWARE_INT;
1470 break;
1471 case SVM_EVENT_EXCEPTION:
1472 enmTrapType = TRPM_TRAP;
1473 break;
1474 default:
1475 AssertMsgFailed(("Invalid pending-event type %#x\n", uVectorType));
1476 enmTrapType = TRPM_32BIT_HACK;
1477 break;
1478 }
1479
1480 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, uVectorType));
1481
1482 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
1483 AssertRC(rc);
1484
1485 if (pEvent->n.u1ErrorCodeValid)
1486 TRPMSetErrorCode(pVCpu, pEvent->n.u32ErrorCode);
1487
1488 if ( uVectorType == SVM_EVENT_EXCEPTION
1489 && uVector == X86_XCPT_PF)
1490 {
1491 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
1492 Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
1493 }
1494 else if (uVectorType == SVM_EVENT_SOFTWARE_INT)
1495 {
1496 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
1497 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
1498 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
1499 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
1500 }
1501 pVCpu->hm.s.Event.fPending = false;
1502}
1503
1504
1505/**
1506 * Gets the guest's interrupt-shadow.
1507 *
1508 * @returns The guest's interrupt-shadow.
1509 * @param pVCpu Pointer to the VMCPU.
1510 * @param pCtx Pointer to the guest-CPU context.
1511 *
1512 * @remarks No-long-jump zone!!!
1513 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
1514 */
1515DECLINLINE(uint32_t) hmR0SvmGetGuestIntrShadow(PVMCPU pVCpu, PCPUMCTX pCtx)
1516{
1517 /*
1518 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
1519 * inhibit interrupts or clear any existing interrupt-inhibition.
1520 */
1521 uint32_t uIntrState = 0;
1522 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1523 {
1524 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1525 {
1526 /*
1527 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
1528 * AMD-V, the flag's condition to be cleared is met and thus the cleared state is correct.
1529 */
1530 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1531 }
1532 else
1533 uIntrState = SVM_INTERRUPT_SHADOW_ACTIVE;
1534 }
1535 return uIntrState;
1536}
1537
1538
1539/**
1540 * Sets the virtual interrupt intercept control in the VMCB which
1541 * instructs AMD-V to cause a #VMEXIT as soon as the guest is in a state to
1542 * receive interrupts.
1543 *
1544 * @param pVmcb Pointer to the VMCB.
1545 */
1546DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
1547{
1548 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_VINTR))
1549 {
1550 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 1; /* A virtual interrupt is pending. */
1551 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* Not necessary as we #VMEXIT for delivering the interrupt. */
1552 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
1553 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
1554 }
1555}
1556
1557
1558/**
1559 * Injects any pending events into the guest if the guest is in a state to
1560 * receive them.
1561 *
1562 * @param pVCpu Pointer to the VMCPU.
1563 * @param pCtx Pointer to the guest-CPU context.
1564 */
1565static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
1566{
1567 Assert(!TRPMHasTrap(pVCpu));
1568
1569 const bool fIntShadow = !!hmR0SvmGetGuestIntrShadow(pVCpu, pCtx);
1570 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1571
1572 SVMEVENT Event;
1573 Event.u = 0;
1574 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
1575 {
1576 Event.u = pVCpu->hm.s.Event.u64IntrInfo;
1577 bool fInject = true;
1578 if ( fIntShadow
1579 && ( Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ
1580 || Event.n.u3Type == SVM_EVENT_NMI))
1581 {
1582 fInject = false;
1583 }
1584
1585 if ( fInject
1586 && Event.n.u1Valid)
1587 {
1588 pVCpu->hm.s.Event.fPending = false;
1589 hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event);
1590 }
1591 else
1592 hmR0SvmSetVirtIntrIntercept(pVmcb);
1593 } /** @todo SMI. SMIs take priority over NMIs. */
1594 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
1595 {
1596 if (!fIntShadow)
1597 {
1598 Log4(("Injecting NMI\n"));
1599 Event.n.u1Valid = 1;
1600 Event.n.u8Vector = X86_XCPT_NMI;
1601 Event.n.u3Type = SVM_EVENT_NMI;
1602
1603 hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event);
1604 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1605 }
1606 else
1607 hmR0SvmSetVirtIntrIntercept(pVmcb);
1608 }
1609 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
1610 {
1611 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
1612 const bool fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
1613 if ( !fBlockInt
1614 && !fIntShadow)
1615 {
1616 uint8_t u8Interrupt;
1617 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
1618 if (RT_SUCCESS(rc))
1619 {
1620 Log4(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));
1621
1622 Event.n.u1Valid = 1;
1623 Event.n.u8Vector = u8Interrupt;
1624 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
1625
1626 hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event);
1627 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
1628 }
1629 else
1630 {
1631 /** @todo Does this actually happen? If not turn it into an assertion. */
1632 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
1633 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
1634 }
1635 }
1636 else
1637 hmR0SvmSetVirtIntrIntercept(pVmcb);
1638 }
1639
1640 /* Update the guest interrupt shadow in the VMCB. */
1641 pVmcb->ctrl.u64IntShadow = !!fIntShadow;
1642}
1643
1644
1645/**
1646 * Check per-VM and per-VCPU force flag actions that require us to go back to
1647 * ring-3 for one reason or another.
1648 *
1649 * @returns VBox status code (information status code included).
1650 * @retval VINF_SUCCESS if we don't have any actions that require going back to
1651 * ring-3.
1652 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
1653 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
1654 * interrupts)
1655 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
1656 * all EMTs to be in ring-3.
1657 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
1658 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
1659 * to the EM loop.
1660 *
1661 * @param pVM Pointer to the VM.
1662 * @param pVCpu Pointer to the VMCPU.
1663 * @param pCtx Pointer to the guest-CPU context.
1664 */
1665static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1666{
1667 Assert(VMMRZCallRing3IsEnabled(pVCpu));
1668
1669 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
1670 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
1671 | VMCPU_FF_REQUEST | VMCPU_FF_HM_UPDATE_CR3))
1672 {
1673 /* Pending HM CR3 sync. No PAE PDPEs (VMCPU_FF_HM_UPDATE_PAE_PDPES) on AMD-V. */
1674 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1675 {
1676 rc = PGMUpdateCR3(pVCpu, pCtx->cr3);
1677 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
1678 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1679 }
1680
1681 /* Pending PGM C3 sync. */
1682 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
1683 {
1684 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
1685 if (rc != VINF_SUCCESS)
1686 {
1687 AssertRC(rc);
1688 Log4(("hmR0SvmCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
1689 return rc;
1690 }
1691 }
1692
1693 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
1694 /* -XXX- what was that about single stepping? */
1695 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
1696 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
1697 {
1698 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
1699 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
1700 Log4(("hmR0SvmCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
1701 return rc;
1702 }
1703
1704 /* Pending VM request packets, such as hardware interrupts. */
1705 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
1706 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1707 {
1708 Log4(("hmR0SvmCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
1709 return VINF_EM_PENDING_REQUEST;
1710 }
1711
1712 /* Pending PGM pool flushes. */
1713 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
1714 {
1715 Log4(("hmR0SvmCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
1716 return VINF_PGM_POOL_FLUSH_PENDING;
1717 }
1718
1719 /* Pending DMA requests. */
1720 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
1721 {
1722 Log4(("hmR0SvmCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
1723 return VINF_EM_RAW_TO_R3;
1724 }
1725 }
1726
1727 /* Paranoia. */
1728 Assert(rc != VERR_EM_INTERPRETER);
1729 return VINF_SUCCESS;
1730}
1731
1732
1733/**
1734 * Does the preparations before executing guest code in AMD-V.
1735 *
1736 * This may cause longjmps to ring-3 and may even result in rescheduling to the
1737 * recompiler. We must be cautious what we do here regarding committing
1738 * guest-state information into the the VMCB assuming we assuredly execute the
1739 * guest in AMD-V. If we fall back to the recompiler after updating the VMCB and
1740 * clearing the common-state (TRPM/forceflags), we must undo those changes so
1741 * that the recompiler can (and should) use them when it resumes guest
1742 * execution. Otherwise such operations must be done when we can no longer
1743 * exit to ring-3.
1744 *
1745 * @returns VBox status code (informational status codes included).
1746 * @retval VINF_SUCCESS if we can proceed with running the guest.
1747 * @retval VINF_* scheduling changes, we have to go back to ring-3.
1748 *
1749 * @param pVCpu Pointer to the VMCPU.
1750 * @param pCtx Pointer to the guest-CPU context.
1751 */
1752DECLINE(int) hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx)
1753{
1754 /* Check force flag actions that might require us to go back to ring-3. */
1755 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pCtx);
1756 if (rc != VINF_SUCCESS)
1757 return rc;
1758
1759#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
1760 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
1761 pVmxTransient->uEFlags = ASMIntDisableFlags();
1762 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
1763 {
1764 ASMSetFlags(pVmxTransient->uEFlags);
1765 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
1766 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
1767 return VINF_EM_RAW_INTERRUPT;
1768 }
1769 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1770 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1771#endif
1772
1773 /** @todo -XXX- TPR patching. */
1774
1775 /* Convert any pending TRPM traps to HM events for injection. */
1776 if (TRPMHasTrap(pVCpu))
1777 hmR0SvmTrpmTrapToPendingEvent(pVCpu);
1778
1779 hmR0SvmInjectPendingEvent(pVCpu, pCtx);
1780 return VINF_SUCCESS;
1781}
1782
1783
1784/**
1785 * Prepares to run guest code in VT-x and we've committed to doing so. This
1786 * means there is no backing out to ring-3 or anywhere else at this
1787 * point.
1788 *
1789 * @param pVM Pointer to the VM.
1790 * @param pVCpu Pointer to the VMCPU.
1791 * @param pCtx Pointer to the guest-CPU context.
1792 *
1793 * @remarks Called with preemption disabled.
1794 * @remarks No-long-jump zone!!!
1795 */
1796DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1797{
1798 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1799 Assert(VMMR0IsLogFlushDisabled(pVCpu));
1800
1801#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
1802 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
1803 pVmxTransient->uEFlags = ASMIntDisableFlags();
1804 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1805#endif
1806
1807 /*
1808 * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
1809 * We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run.
1810 */
1811 /** @todo The above assumption could be wrong. It's not documented what
1812 * should be done wrt to the VMCB Clean Bit, but we'll find out the
1813 * hard way. */
1814 pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
1815
1816 /* Load the guest state. */
1817 int rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx);
1818 AssertRC(rc);
1819 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
1820
1821}
1822
1823
1824/**
1825 * Wrapper for running the guest code in AMD-V.
1826 *
1827 * @returns VBox strict status code.
1828 * @param pVM Pointer to the VM.
1829 * @param pVCpu Pointer to the VMCPU.
1830 * @param pCtx Pointer to the guest-CPU context.
1831 *
1832 * @remarks No-long-jump zone!!!
1833 */
1834DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1835{
1836 /*
1837 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
1838 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
1839 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
1840 */
1841#ifdef VBOX_WITH_KERNEL_USING_XMM
1842 return HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
1843 pVCpu->hm.s.svm.pfnVMRun);
1844#else
1845 return pVCpu->hm.s.svm.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
1846#endif
1847}
1848
1849
1850/**
1851 * Runs the guest code using AMD-V.
1852 *
1853 * @returns VBox status code.
1854 * @param pVM Pointer to the VM.
1855 * @param pVCpu Pointer to the VMCPU.
1856 * @param pCtx Pointer to the guest CPU context.
1857 */
1858VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1859{
1860 Assert(VMMRZCallRing3IsEnabled(pVCpu));
1861 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1862
1863 uint32_t cLoops = 0;
1864 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
1865 int rc = VERR_INTERNAL_ERROR_5;
1866
1867 for (;; cLoops++)
1868 {
1869 Assert(!HMR0SuspendPending());
1870 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
1871 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
1872 (unsigned)RTMpCpuId(), cLoops));
1873
1874 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
1875 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
1876 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx);
1877 if (rc != VINF_SUCCESS)
1878 break;
1879
1880 /*
1881 * No longjmps to ring-3 from this point on!!!
1882 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
1883 * This also disables flushing of the R0-logger instance (if any).
1884 */
1885 VMMRZCallRing3Disable(pVCpu);
1886 VMMRZCallRing3RemoveNotification(pVCpu);
1887 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx);
1888
1889 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
1890
1891 /** -XXX- todo */
1892 }
1893
1894 return rc;
1895}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette