VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp@ 66230

Last change on this file since 66230 was 66230, checked in by vboxsync, 8 years ago

VMM: Nested Hw.virt: Build fix for Windows.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 28.2 KB
Line 
1/* $Id: HMSVMAll.cpp 66230 2017-03-23 15:00:51Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#include "HMInternal.h"
24#include <VBox/vmm/apic.h>
25#include <VBox/vmm/gim.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/vm.h>
29#include <VBox/vmm/hm_svm.h>
30
31
32#ifndef IN_RC
33/**
34 * Emulates a simple MOV TPR (CR8) instruction, used for TPR patching on 32-bit
35 * guests. This simply looks up the patch record at EIP and does the required.
36 *
37 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
38 * like how we want it to be (e.g. not followed by shr 4 as is usually done for
39 * TPR). See hmR3ReplaceTprInstr() for the details.
40 *
41 * @returns VBox status code.
42 * @retval VINF_SUCCESS if the access was handled successfully.
43 * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
44 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
45 *
46 * @param pVCpu The cross context virtual CPU structure.
47 * @param pCtx Pointer to the guest-CPU context.
48 * @param pfUpdateRipAndRF Whether the guest RIP/EIP has been updated as
49 * part of the TPR patch operation.
50 */
51static int hmSvmEmulateMovTpr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdateRipAndRF)
52{
53 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
54
55 /*
56 * We do this in a loop as we increment the RIP after a successful emulation
57 * and the new RIP may be a patched instruction which needs emulation as well.
58 */
59 bool fUpdateRipAndRF = false;
60 bool fPatchFound = false;
61 PVM pVM = pVCpu->CTX_SUFF(pVM);
62 for (;;)
63 {
64 bool fPending;
65 uint8_t u8Tpr;
66
67 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
68 if (!pPatch)
69 break;
70
71 fPatchFound = true;
72 switch (pPatch->enmType)
73 {
74 case HMTPRINSTR_READ:
75 {
76 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
77 AssertRC(rc);
78
79 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
80 AssertRC(rc);
81 pCtx->rip += pPatch->cbOp;
82 pCtx->eflags.Bits.u1RF = 0;
83 fUpdateRipAndRF = true;
84 break;
85 }
86
87 case HMTPRINSTR_WRITE_REG:
88 case HMTPRINSTR_WRITE_IMM:
89 {
90 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
91 {
92 uint32_t u32Val;
93 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
94 AssertRC(rc);
95 u8Tpr = u32Val;
96 }
97 else
98 u8Tpr = (uint8_t)pPatch->uSrcOperand;
99
100 int rc2 = APICSetTpr(pVCpu, u8Tpr);
101 AssertRC(rc2);
102 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
103
104 pCtx->rip += pPatch->cbOp;
105 pCtx->eflags.Bits.u1RF = 0;
106 fUpdateRipAndRF = true;
107 break;
108 }
109
110 default:
111 {
112 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
113 pVCpu->hm.s.u32HMError = pPatch->enmType;
114 *pfUpdateRipAndRF = fUpdateRipAndRF;
115 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
116 }
117 }
118 }
119
120 *pfUpdateRipAndRF = fUpdateRipAndRF;
121 if (fPatchFound)
122 return VINF_SUCCESS;
123 return VERR_NOT_FOUND;
124}
125#endif /* !IN_RC */
126
127
128/**
129 * Performs the operations necessary that are part of the vmmcall instruction
130 * execution in the guest.
131 *
132 * @returns Strict VBox status code (i.e. informational status codes too).
133 * @retval VINF_SUCCESS on successful handling, no \#UD needs to be thrown,
134 * update RIP and eflags.RF depending on @a pfUpdatedRipAndRF and
135 * continue guest execution.
136 * @retval VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating
137 * RIP.
138 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
139 *
140 * @param pVCpu The cross context virtual CPU structure.
141 * @param pCtx Pointer to the guest-CPU context.
142 * @param pfUpdatedRipAndRF Whether the guest RIP/EIP has been updated as
143 * part of handling the VMMCALL operation.
144 */
145VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdatedRipAndRF)
146{
147#ifndef IN_RC
148 /*
149 * TPR patched instruction emulation for 32-bit guests.
150 */
151 PVM pVM = pVCpu->CTX_SUFF(pVM);
152 if (pVM->hm.s.fTprPatchingAllowed)
153 {
154 int rc = hmSvmEmulateMovTpr(pVCpu, pCtx, pfUpdatedRipAndRF);
155 if (RT_SUCCESS(rc))
156 return VINF_SUCCESS;
157
158 if (rc != VERR_NOT_FOUND)
159 {
160 Log(("hmSvmExitVmmCall: hmSvmEmulateMovTpr returns %Rrc\n", rc));
161 return rc;
162 }
163 }
164#endif
165
166 /*
167 * Paravirtualized hypercalls.
168 */
169 *pfUpdatedRipAndRF = false;
170 if (pVCpu->hm.s.fHypercallsEnabled)
171 return GIMHypercall(pVCpu, pCtx);
172
173 return VERR_NOT_AVAILABLE;
174}
175
176
177/**
178 * Performs the operations necessary that are part of the vmrun instruction
179 * execution in the guest.
180 *
181 * @returns Strict VBox status code (i.e. informational status codes too).
182 * @retval VINF_SUCCESS successully executed VMRUN and entered nested-guest
183 * code execution.
184 * @retval VINF_SVM_VMEXIT when executing VMRUN causes a \#VMEXIT
185 * (SVM_EXIT_INVALID most likely).
186 *
187 * @param pVCpu The cross context virtual CPU structure.
188 * @param pCtx Pointer to the guest-CPU context.
189 * @param GCPhysVmcb Guest physical address of the VMCB to run.
190 */
191/** @todo move this to IEM and make the VMRUN version that can execute under
192 * hardware SVM here instead. */
193VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb)
194{
195 Assert(pVCpu);
196 Assert(pCtx);
197 PVM pVM = pVCpu->CTX_SUFF(pVM);
198
199 /*
200 * Cache the physical address of the VMCB for #VMEXIT exceptions.
201 */
202 pCtx->hwvirt.svm.GCPhysVmcb = GCPhysVmcb;
203
204 /*
205 * Save host state.
206 */
207 SVMVMCBSTATESAVE VmcbNstGst;
208 int rc = PGMPhysSimpleReadGCPhys(pVM, &VmcbNstGst, GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest), sizeof(SVMVMCBSTATESAVE));
209 if (RT_SUCCESS(rc))
210 {
211 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
212 pHostState->es = pCtx->es;
213 pHostState->cs = pCtx->cs;
214 pHostState->ss = pCtx->ss;
215 pHostState->ds = pCtx->ds;
216 pHostState->gdtr = pCtx->gdtr;
217 pHostState->idtr = pCtx->idtr;
218 pHostState->uEferMsr = pCtx->msrEFER;
219 pHostState->uCr0 = pCtx->cr0;
220 pHostState->uCr3 = pCtx->cr3;
221 pHostState->uCr4 = pCtx->cr4;
222 pHostState->rflags = pCtx->rflags;
223 pHostState->uRip = pCtx->rip;
224 pHostState->uRsp = pCtx->rsp;
225 pHostState->uRax = pCtx->rax;
226
227 /*
228 * Load the VMCB controls.
229 */
230 rc = PGMPhysSimpleReadGCPhys(pVM, &pCtx->hwvirt.svm.VmcbCtrl, GCPhysVmcb, sizeof(pCtx->hwvirt.svm.VmcbCtrl));
231 if (RT_SUCCESS(rc))
232 {
233 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl;
234
235 /*
236 * Validate guest-state and controls.
237 */
238 /* VMRUN must always be intercepted. */
239 if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN))
240 {
241 Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n"));
242 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
243 }
244
245 /* Nested paging. */
246 if ( pVmcbCtrl->NestedPaging.n.u1NestedPaging
247 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fNestedPaging)
248 {
249 Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n"));
250 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
251 }
252
253 /* AVIC. */
254 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
255 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fAvic)
256 {
257 Log(("HMSvmVmRun: AVIC not supported -> #VMEXIT\n"));
258 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
259 }
260
261 /* Last branch record (LBR) virtualization. */
262 if ( (pVmcbCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE)
263 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fLbrVirt)
264 {
265 Log(("HMSvmVmRun: LBR virtualization not supported -> #VMEXIT\n"));
266 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
267 }
268
269 /* Guest ASID. */
270 if (!pVmcbCtrl->TLBCtrl.n.u32ASID)
271 {
272 Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n"));
273 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
274 }
275
276 /* IO permission bitmap. */
277 RTGCPHYS GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
278 if ( (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)
279 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap))
280 {
281 Log(("HMSvmVmRun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));
282 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
283 }
284
285 /* MSR permission bitmap. */
286 RTGCPHYS GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
287 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
288 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap))
289 {
290 Log(("HMSvmVmRun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));
291 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
292 }
293
294 /* CR0. */
295 if ( !(VmcbNstGst.u64CR0 & X86_CR0_CD)
296 && (VmcbNstGst.u64CR0 & X86_CR0_NW))
297 {
298 Log(("HMSvmVmRun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64CR0));
299 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
300 }
301 if (VmcbNstGst.u64CR0 >> 32)
302 {
303 Log(("HMSvmVmRun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64CR0));
304 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
305 }
306 /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */
307
308 /* DR6 and DR7. */
309 if ( VmcbNstGst.u64DR6 >> 32
310 || VmcbNstGst.u64DR7 >> 32)
311 {
312 Log(("HMSvmVmRun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64DR6,
313 VmcbNstGst.u64DR6));
314 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
315 }
316
317 /*
318 * Copy segments from nested-guest VMCB state to the guest-CPU state.
319 *
320 * We do this here as we need to use the CS attributes and it's easier this way
321 * then using the VMCB format selectors. It doesn't really matter where we copy
322 * the state, we restore the guest-CPU context state on the \#VMEXIT anyway.
323 */
324 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, ES, es);
325 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, CS, cs);
326 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, SS, ss);
327 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, DS, ds);
328
329 /** @todo Segment attribute overrides by VMRUN. */
330
331 /*
332 * CPL adjustments and overrides.
333 *
334 * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL().
335 * We shall thus adjust both CS.DPL and SS.DPL here.
336 */
337 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = VmcbNstGst.u8CPL;
338 if (CPUMIsGuestInV86ModeEx(pCtx))
339 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 3;
340 if (CPUMIsGuestInRealModeEx(pCtx))
341 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 0;
342
343 /*
344 * Continue validating guest-state and controls.
345 */
346 /* EFER, CR0 and CR4. */
347 uint64_t uValidEfer;
348 rc = CPUMGetValidateEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer);
349 if (RT_FAILURE(rc))
350 {
351 Log(("HMSvmVmRun: EFER invalid uOldEfer=%#RX64 uValidEfer=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64EFER, uValidEfer));
352 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
353 }
354 Assert( !(uValidEfer & MSR_K6_EFER_LME)
355 || VmcbNstGst.u64CR0 & X86_CR0_PG);
356 bool const fSvm = (uValidEfer & MSR_K6_EFER_SVME);
357 bool const fLongModeSupported = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);
358 bool const fLongModeActiveOrEnabled = RT_BOOL(uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA));
359 bool const fLongModeEnabled = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
360 bool const fPaging = RT_BOOL(VmcbNstGst.u64CR0 & X86_CR0_PG);
361 bool const fPae = RT_BOOL(VmcbNstGst.u64CR4 & X86_CR4_PAE);
362 bool const fProtMode = RT_BOOL(VmcbNstGst.u64CR0 & X86_CR0_PE);
363 bool const fLongModeWithPaging = fLongModeEnabled && fPaging;
364 bool const fLongModeConformCS = pCtx->cs.Attr.n.u1Long && pCtx->cs.Attr.n.u1DefBig;
365 if ( !fSvm
366 || (!fLongModeSupported && fLongModeActiveOrEnabled)
367 || (fLongModeWithPaging && !fPae)
368 || (fLongModeWithPaging && !fProtMode)
369 || ( fLongModeEnabled
370 && fPaging
371 && fPae
372 && fLongModeConformCS))
373 {
374 Log(("HMSvmVmRun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer));
375 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
376 }
377
378 /*
379 * Preserve the required force-flags.
380 *
381 * We only preserve the force-flags that would affect the execution of the
382 * nested-guest (or the guest).
383 *
384 * - VMCPU_FF_INHIBIT_INTERRUPTS needn't be preserved as it's for a single
385 * instruction which is this VMRUN instruction itself.
386 *
387 * - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
388 * execution of a subsequent IRET instruction in the guest.
389 *
390 * - The remaining FFs (e.g. timers) can stay in place so that we will be
391 * able to generate interrupts that should cause #VMEXITs for the
392 * nested-guest.
393 */
394 /** @todo anything missed more here? */
395 pCtx->hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
396
397 /*
398 * Interrupt shadow.
399 */
400 if (pVmcbCtrl->u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
401 EMSetInhibitInterruptsPC(pVCpu, VmcbNstGst.u64RIP);
402
403 /*
404 * TLB flush control.
405 */
406 /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
407 if ( pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
408 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
409 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
410 PGMFlushTLB(pVCpu, VmcbNstGst.u64CR3, true /* fGlobal */);
411
412 /** @todo @bugref{7243}: SVM TSC offset, see tmCpuTickGetInternal. */
413
414 /*
415 * Copy the remaining guest state from the VMCB to the guest-CPU context.
416 */
417 pCtx->gdtr.cbGdt = VmcbNstGst.GDTR.u32Limit;
418 pCtx->gdtr.pGdt = VmcbNstGst.GDTR.u64Base;
419 pCtx->idtr.cbIdt = VmcbNstGst.IDTR.u32Limit;
420 pCtx->idtr.pIdt = VmcbNstGst.IDTR.u64Base;
421 pCtx->cr0 = VmcbNstGst.u64CR0;
422 pCtx->cr4 = VmcbNstGst.u64CR4;
423 pCtx->cr3 = VmcbNstGst.u64CR3;
424 pCtx->cr2 = VmcbNstGst.u64CR2;
425 pCtx->dr[6] = VmcbNstGst.u64DR6;
426 pCtx->dr[7] = VmcbNstGst.u64DR7;
427 pCtx->rflags.u = VmcbNstGst.u64RFlags;
428 pCtx->rax = VmcbNstGst.u64RAX;
429 pCtx->rsp = VmcbNstGst.u64RSP;
430 pCtx->rip = VmcbNstGst.u64RIP;
431
432 /* Mask DR6, DR7 bits mandatory set/clear bits. */
433 pCtx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
434 pCtx->dr[6] |= X86_DR6_RA1_MASK;
435 pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
436 pCtx->dr[7] |= X86_DR7_RA1_MASK;
437
438 /*
439 * Check for pending virtual interrupts.
440 */
441 if (pVmcbCtrl->IntCtrl.n.u1VIrqValid)
442 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
443
444 /*
445 * Clear global interrupt flags to allow interrupts in the guest.
446 */
447 pCtx->hwvirt.svm.fGif = 1;
448
449 /*
450 * Event injection.
451 */
452 PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject;
453 if (pEventInject->n.u1Valid)
454 {
455 uint8_t const uVector = pEventInject->n.u8Vector;
456 TRPMEVENT const enmType = HMSvmEventToTrpmEventType(pEventInject);
457 uint16_t const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0;
458
459 /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */
460 if (enmType == TRPM_32BIT_HACK)
461 {
462 Log(("HMSvmVmRun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type));
463 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
464 }
465 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION)
466 {
467 if ( uVector == X86_XCPT_NMI
468 || uVector > 31 /* X86_XCPT_MAX */)
469 {
470 Log(("HMSvmVmRun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector));
471 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
472 }
473 if ( uVector == X86_XCPT_BR
474 && CPUMIsGuestInLongModeEx(pCtx))
475 {
476 Log(("HMSvmVmRun: Cannot inject #BR when not in long mode -> #VMEXIT\n"));
477 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
478 }
479 /** @todo any others? */
480 }
481
482 /** @todo NRIP: Software interrupts can only be pushed properly if we support
483 * NRIP for the nested-guest to calculate the instruction length
484 * below. */
485 IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */);
486 }
487
488 return VINF_SUCCESS;
489 }
490
491 /* Shouldn't really happen as the caller should've validated the physical address already. */
492 Log(("HMSvmVmRun: Failed to read nested-guest VMCB control area at %#RGp -> #VMEXIT\n",
493 GCPhysVmcb));
494 return VERR_SVM_IPE_4;
495 }
496
497 /* Shouldn't really happen as the caller should've validated the physical address already. */
498 Log(("HMSvmVmRun: Failed to read nested-guest VMCB save-state area at %#RGp -> #VMEXIT\n",
499 GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest)));
500 return VERR_SVM_IPE_5;
501}
502
503
504/**
505 * SVM nested-guest \#VMEXIT handler.
506 *
507 * @returns Strict VBox status code.
508 * @retval VINF_SVM_VMEXIT when the \#VMEXIT is successful.
509 * @retval VERR_SVM_VMEXIT_FAILED when the \#VMEXIT failed restoring the guest's
510 * "host state" and a shutdown is required.
511 *
512 * @param pVCpu The cross context virtual CPU structure.
513 * @param pCtx The guest-CPU context.
514 * @param uExitCode The exit code.
515 * @param uExitInfo1 The exit info. 1 field.
516 * @param uExitInfo2 The exit info. 2 field.
517 */
518VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
519 uint64_t uExitInfo2)
520{
521 if ( CPUMIsGuestInNestedHwVirtMode(pCtx)
522 || uExitCode == SVM_EXIT_INVALID)
523 {
524 RT_NOREF(pVCpu);
525
526 pCtx->hwvirt.svm.fGif = 0;
527#ifdef VBOX_STRICT
528 RT_ZERO(pCtx->hwvirt.svm.VmcbCtrl);
529 RT_ZERO(pCtx->hwvirt.svm.HostState);
530 pCtx->hwvirt.svm.GCPhysVmcb = NIL_RTGCPHYS;
531#endif
532
533 /*
534 * Save the nested-guest state into the VMCB state-save area.
535 */
536 SVMVMCBSTATESAVE VmcbNstGst;
537 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, ES, es);
538 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, CS, cs);
539 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, SS, ss);
540 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, DS, ds);
541 VmcbNstGst.GDTR.u32Limit = pCtx->gdtr.cbGdt;
542 VmcbNstGst.GDTR.u64Base = pCtx->gdtr.pGdt;
543 VmcbNstGst.IDTR.u32Limit = pCtx->idtr.cbIdt;
544 VmcbNstGst.IDTR.u32Limit = pCtx->idtr.pIdt;
545 VmcbNstGst.u64EFER = pCtx->msrEFER;
546 VmcbNstGst.u64CR4 = pCtx->cr4;
547 VmcbNstGst.u64CR3 = pCtx->cr3;
548 VmcbNstGst.u64CR2 = pCtx->cr2;
549 VmcbNstGst.u64CR0 = pCtx->cr0;
550 /** @todo Nested paging. */
551 VmcbNstGst.u64RFlags = pCtx->rflags.u64;
552 VmcbNstGst.u64RIP = pCtx->rip;
553 VmcbNstGst.u64RSP = pCtx->rsp;
554 VmcbNstGst.u64RAX = pCtx->rax;
555 VmcbNstGst.u64DR7 = pCtx->dr[6];
556 VmcbNstGst.u64DR6 = pCtx->dr[7];
557 VmcbNstGst.u8CPL = pCtx->ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */
558
559 /* Save interrupt shadow of the nested-guest instruction if any. */
560 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
561 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
562 {
563 RT_ZERO(pCtx->hwvirt.svm.VmcbCtrl);
564 pCtx->hwvirt.svm.VmcbCtrl.u64IntShadow |= SVM_INTERRUPT_SHADOW_ACTIVE;
565 }
566
567 /*
568 * Save additional state and intercept information.
569 */
570 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
571 {
572 Assert(pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIrqValid);
573 Assert(pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u8VIrqVector);
574 }
575 /* Save V_TPR. */
576
577 /** @todo NRIP. */
578
579 /* Save exit information. */
580 pCtx->hwvirt.svm.VmcbCtrl.u64ExitCode = uExitCode;
581 pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo1 = uExitInfo1;
582 pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo2 = uExitInfo2;
583
584 /*
585 * Clear event injection.
586 */
587 pCtx->hwvirt.svm.VmcbCtrl.EventInject.n.u1Valid = 0;
588
589 /*
590 * Write back the VMCB controls to the guest VMCB in guest physical memory.
591 */
592 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->hwvirt.svm.GCPhysVmcb, &pCtx->hwvirt.svm.VmcbCtrl,
593 sizeof(pCtx->hwvirt.svm.VmcbCtrl));
594 if (RT_SUCCESS(rc))
595 {
596 /*
597 * Prepare for guest's "host mode" by clearing internal processor state bits.
598 *
599 * Some of these like TSC offset can then be used unconditionally in our TM code
600 * but the offset in the guest's VMCB will remain as it should as we've written
601 * back the VMCB controls above.
602 */
603 RT_ZERO(pCtx->hwvirt.svm.VmcbCtrl);
604#if 0
605 /* Clear TSC offset. */
606 pCtx->hwvirt.svm.VmcbCtrl.u64TSCOffset = 0;
607 pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIrqValid = 0;
608#endif
609 /* Restore guest's force-flags. */
610 if (pCtx->hwvirt.fLocalForcedActions)
611 VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions);
612
613 /* Clear nested-guest's interrupt pending. */
614 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
615 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
616
617 /** @todo Nested paging. */
618 /** @todo ASID. */
619
620 /*
621 * Reload the guest's "host state".
622 */
623 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
624 pCtx->es = pHostState->es;
625 pCtx->cs = pHostState->cs;
626 pCtx->ss = pHostState->ss;
627 pCtx->ds = pHostState->ds;
628 pCtx->gdtr = pHostState->gdtr;
629 pCtx->idtr = pHostState->idtr;
630 pCtx->msrEFER = pHostState->uEferMsr;
631 pCtx->cr0 = pHostState->uCr0 | X86_CR0_PE;
632 pCtx->cr3 = pHostState->uCr3;
633 pCtx->cr4 = pHostState->uCr4;
634 pCtx->rflags = pHostState->rflags;
635 pCtx->rflags.Bits.u1VM = 0;
636 pCtx->rip = pHostState->uRip;
637 pCtx->rsp = pHostState->uRsp;
638 pCtx->rax = pHostState->uRax;
639 /* The spec says "Disables all hardware breakpoints in DR7"... */
640 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
641 pCtx->dr[7] |= X86_DR7_RA1_MASK;
642
643 rc = VINF_SVM_VMEXIT;
644 }
645 else
646 {
647 Log(("HMNstGstSvmVmExit: Writing VMCB at %#RGp failed\n", pCtx->hwvirt.svm.GCPhysVmcb));
648 rc = VERR_SVM_VMEXIT_FAILED;
649 }
650
651 return rc;
652 }
653
654 Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode,
655 uExitInfo1, uExitInfo2));
656 RT_NOREF2(uExitInfo1, uExitInfo2);
657 return VERR_SVM_IPE_5;
658}
659
660
661/**
662 * Converts an SVM event type to a TRPM event type.
663 *
664 * @returns The TRPM event type.
665 * @retval TRPM_32BIT_HACK if the specified @a uType isn't among the set of
666 * recognized trap types.
667 *
668 * @param uType The SVM event type (see SVM_EVENT_XXX).
669 */
670VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent)
671{
672 uint8_t const uType = pEvent->n.u3Type;
673 switch (uType)
674 {
675 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT;
676 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT;
677 case SVM_EVENT_EXCEPTION:
678 case SVM_EVENT_NMI: return TRPM_TRAP;
679 default:
680 break;
681 }
682 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
683 return TRPM_32BIT_HACK;
684}
685
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette