VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp@ 66319

Last change on this file since 66319 was 66319, checked in by vboxsync, 8 years ago

VMM: Nested Hw.virt: Todos.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.9 KB
Line 
1/* $Id: HMSVMAll.cpp 66319 2017-03-29 06:33:37Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/apic.h>
26#include <VBox/vmm/gim.h>
27#include <VBox/vmm/hm.h>
28#include <VBox/vmm/iem.h>
29#include <VBox/vmm/vm.h>
30#include <VBox/vmm/hm_svm.h>
31
32
33#ifndef IN_RC
34/**
35 * Emulates a simple MOV TPR (CR8) instruction, used for TPR patching on 32-bit
36 * guests. This simply looks up the patch record at EIP and does the required.
37 *
38 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
39 * like how we want it to be (e.g. not followed by shr 4 as is usually done for
40 * TPR). See hmR3ReplaceTprInstr() for the details.
41 *
42 * @returns VBox status code.
43 * @retval VINF_SUCCESS if the access was handled successfully.
44 * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
45 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
46 *
47 * @param pVCpu The cross context virtual CPU structure.
48 * @param pCtx Pointer to the guest-CPU context.
49 * @param pfUpdateRipAndRF Whether the guest RIP/EIP has been updated as
50 * part of the TPR patch operation.
51 */
52static int hmSvmEmulateMovTpr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdateRipAndRF)
53{
54 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
55
56 /*
57 * We do this in a loop as we increment the RIP after a successful emulation
58 * and the new RIP may be a patched instruction which needs emulation as well.
59 */
60 bool fUpdateRipAndRF = false;
61 bool fPatchFound = false;
62 PVM pVM = pVCpu->CTX_SUFF(pVM);
63 for (;;)
64 {
65 bool fPending;
66 uint8_t u8Tpr;
67
68 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
69 if (!pPatch)
70 break;
71
72 fPatchFound = true;
73 switch (pPatch->enmType)
74 {
75 case HMTPRINSTR_READ:
76 {
77 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
78 AssertRC(rc);
79
80 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
81 AssertRC(rc);
82 pCtx->rip += pPatch->cbOp;
83 pCtx->eflags.Bits.u1RF = 0;
84 fUpdateRipAndRF = true;
85 break;
86 }
87
88 case HMTPRINSTR_WRITE_REG:
89 case HMTPRINSTR_WRITE_IMM:
90 {
91 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
92 {
93 uint32_t u32Val;
94 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
95 AssertRC(rc);
96 u8Tpr = u32Val;
97 }
98 else
99 u8Tpr = (uint8_t)pPatch->uSrcOperand;
100
101 int rc2 = APICSetTpr(pVCpu, u8Tpr);
102 AssertRC(rc2);
103 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
104
105 pCtx->rip += pPatch->cbOp;
106 pCtx->eflags.Bits.u1RF = 0;
107 fUpdateRipAndRF = true;
108 break;
109 }
110
111 default:
112 {
113 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
114 pVCpu->hm.s.u32HMError = pPatch->enmType;
115 *pfUpdateRipAndRF = fUpdateRipAndRF;
116 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
117 }
118 }
119 }
120
121 *pfUpdateRipAndRF = fUpdateRipAndRF;
122 if (fPatchFound)
123 return VINF_SUCCESS;
124 return VERR_NOT_FOUND;
125}
126#endif /* !IN_RC */
127
128
129/**
130 * Performs the operations necessary that are part of the vmmcall instruction
131 * execution in the guest.
132 *
133 * @returns Strict VBox status code (i.e. informational status codes too).
134 * @retval VINF_SUCCESS on successful handling, no \#UD needs to be thrown,
135 * update RIP and eflags.RF depending on @a pfUpdatedRipAndRF and
136 * continue guest execution.
137 * @retval VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating
138 * RIP.
139 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
140 *
141 * @param pVCpu The cross context virtual CPU structure.
142 * @param pCtx Pointer to the guest-CPU context.
143 * @param pfUpdatedRipAndRF Whether the guest RIP/EIP has been updated as
144 * part of handling the VMMCALL operation.
145 */
146VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdatedRipAndRF)
147{
148#ifndef IN_RC
149 /*
150 * TPR patched instruction emulation for 32-bit guests.
151 */
152 PVM pVM = pVCpu->CTX_SUFF(pVM);
153 if (pVM->hm.s.fTprPatchingAllowed)
154 {
155 int rc = hmSvmEmulateMovTpr(pVCpu, pCtx, pfUpdatedRipAndRF);
156 if (RT_SUCCESS(rc))
157 return VINF_SUCCESS;
158
159 if (rc != VERR_NOT_FOUND)
160 {
161 Log(("hmSvmExitVmmCall: hmSvmEmulateMovTpr returns %Rrc\n", rc));
162 return rc;
163 }
164 }
165#endif
166
167 /*
168 * Paravirtualized hypercalls.
169 */
170 *pfUpdatedRipAndRF = false;
171 if (pVCpu->hm.s.fHypercallsEnabled)
172 return GIMHypercall(pVCpu, pCtx);
173
174 return VERR_NOT_AVAILABLE;
175}
176
177
178/**
179 * Performs the operations necessary that are part of the vmrun instruction
180 * execution in the guest.
181 *
182 * @returns Strict VBox status code (i.e. informational status codes too).
183 * @retval VINF_SUCCESS successully executed VMRUN and entered nested-guest
184 * code execution.
185 * @retval VINF_SVM_VMEXIT when executing VMRUN causes a \#VMEXIT
186 * (SVM_EXIT_INVALID most likely).
187 *
188 * @param pVCpu The cross context virtual CPU structure.
189 * @param pCtx Pointer to the guest-CPU context.
190 * @param GCPhysVmcb Guest physical address of the VMCB to run.
191 */
192/** @todo move this to IEM and make the VMRUN version that can execute under
193 * hardware SVM here instead. */
194VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb)
195{
196 Assert(pVCpu);
197 Assert(pCtx);
198 PVM pVM = pVCpu->CTX_SUFF(pVM);
199
200 /*
201 * Cache the physical address of the VMCB for #VMEXIT exceptions.
202 */
203 pCtx->hwvirt.svm.GCPhysVmcb = GCPhysVmcb;
204
205 /*
206 * Save host state.
207 */
208 SVMVMCBSTATESAVE VmcbNstGst;
209 int rc = PGMPhysSimpleReadGCPhys(pVM, &VmcbNstGst, GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest), sizeof(SVMVMCBSTATESAVE));
210 if (RT_SUCCESS(rc))
211 {
212 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
213 pHostState->es = pCtx->es;
214 pHostState->cs = pCtx->cs;
215 pHostState->ss = pCtx->ss;
216 pHostState->ds = pCtx->ds;
217 pHostState->gdtr = pCtx->gdtr;
218 pHostState->idtr = pCtx->idtr;
219 pHostState->uEferMsr = pCtx->msrEFER;
220 pHostState->uCr0 = pCtx->cr0;
221 pHostState->uCr3 = pCtx->cr3;
222 pHostState->uCr4 = pCtx->cr4;
223 pHostState->rflags = pCtx->rflags;
224 pHostState->uRip = pCtx->rip;
225 pHostState->uRsp = pCtx->rsp;
226 pHostState->uRax = pCtx->rax;
227
228 /*
229 * Load the VMCB controls.
230 */
231 rc = PGMPhysSimpleReadGCPhys(pVM, &pCtx->hwvirt.svm.VmcbCtrl, GCPhysVmcb, sizeof(pCtx->hwvirt.svm.VmcbCtrl));
232 if (RT_SUCCESS(rc))
233 {
234 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl;
235
236 /*
237 * Validate guest-state and controls.
238 */
239 /* VMRUN must always be intercepted. */
240 if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN))
241 {
242 Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n"));
243 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
244 }
245
246 /* Nested paging. */
247 if ( pVmcbCtrl->NestedPaging.n.u1NestedPaging
248 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fNestedPaging)
249 {
250 Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n"));
251 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
252 }
253
254 /* AVIC. */
255 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
256 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fAvic)
257 {
258 Log(("HMSvmVmRun: AVIC not supported -> #VMEXIT\n"));
259 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
260 }
261
262 /* Last branch record (LBR) virtualization. */
263 if ( (pVmcbCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE)
264 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fLbrVirt)
265 {
266 Log(("HMSvmVmRun: LBR virtualization not supported -> #VMEXIT\n"));
267 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
268 }
269
270 /* Guest ASID. */
271 if (!pVmcbCtrl->TLBCtrl.n.u32ASID)
272 {
273 Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n"));
274 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
275 }
276
277 /* IO permission bitmap. */
278 RTGCPHYS GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
279 if ( (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)
280 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap))
281 {
282 Log(("HMSvmVmRun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));
283 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
284 }
285
286 /* MSR permission bitmap. */
287 RTGCPHYS GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
288 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
289 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap))
290 {
291 Log(("HMSvmVmRun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));
292 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
293 }
294
295 /* CR0. */
296 if ( !(VmcbNstGst.u64CR0 & X86_CR0_CD)
297 && (VmcbNstGst.u64CR0 & X86_CR0_NW))
298 {
299 Log(("HMSvmVmRun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64CR0));
300 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
301 }
302 if (VmcbNstGst.u64CR0 >> 32)
303 {
304 Log(("HMSvmVmRun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64CR0));
305 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
306 }
307 /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */
308
309 /* DR6 and DR7. */
310 if ( VmcbNstGst.u64DR6 >> 32
311 || VmcbNstGst.u64DR7 >> 32)
312 {
313 Log(("HMSvmVmRun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64DR6,
314 VmcbNstGst.u64DR6));
315 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
316 }
317
318 /** @todo gPAT MSR validation? */
319
320 /*
321 * Copy segments from nested-guest VMCB state to the guest-CPU state.
322 *
323 * We do this here as we need to use the CS attributes and it's easier this way
324 * then using the VMCB format selectors. It doesn't really matter where we copy
325 * the state, we restore the guest-CPU context state on the \#VMEXIT anyway.
326 */
327 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, ES, es);
328 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, CS, cs);
329 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, SS, ss);
330 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, DS, ds);
331
332 /** @todo Segment attribute overrides by VMRUN. */
333
334 /*
335 * CPL adjustments and overrides.
336 *
337 * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL().
338 * We shall thus adjust both CS.DPL and SS.DPL here.
339 */
340 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = VmcbNstGst.u8CPL;
341 if (CPUMIsGuestInV86ModeEx(pCtx))
342 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 3;
343 if (CPUMIsGuestInRealModeEx(pCtx))
344 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 0;
345
346 /*
347 * Continue validating guest-state and controls.
348 */
349 /* EFER, CR0 and CR4. */
350 uint64_t uValidEfer;
351 rc = CPUMGetValidateEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer);
352 if (RT_FAILURE(rc))
353 {
354 Log(("HMSvmVmRun: EFER invalid uOldEfer=%#RX64 uValidEfer=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64EFER, uValidEfer));
355 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
356 }
357 bool const fSvm = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME);
358 bool const fLongModeSupported = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);
359 bool const fLongModeEnabled = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
360 bool const fPaging = RT_BOOL(VmcbNstGst.u64CR0 & X86_CR0_PG);
361 bool const fPae = RT_BOOL(VmcbNstGst.u64CR4 & X86_CR4_PAE);
362 bool const fProtMode = RT_BOOL(VmcbNstGst.u64CR0 & X86_CR0_PE);
363 bool const fLongModeWithPaging = fLongModeEnabled && fPaging;
364 bool const fLongModeConformCS = pCtx->cs.Attr.n.u1Long && pCtx->cs.Attr.n.u1DefBig;
365 /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0). */
366 if (fLongModeWithPaging)
367 uValidEfer |= MSR_K6_EFER_LMA;
368 bool const fLongModeActiveOrEnabled = RT_BOOL(uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA));
369 if ( !fSvm
370 || (!fLongModeSupported && fLongModeActiveOrEnabled)
371 || (fLongModeWithPaging && !fPae)
372 || (fLongModeWithPaging && !fProtMode)
373 || ( fLongModeEnabled
374 && fPaging
375 && fPae
376 && fLongModeConformCS))
377 {
378 Log(("HMSvmVmRun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer));
379 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
380 }
381
382 /*
383 * Preserve the required force-flags.
384 *
385 * We only preserve the force-flags that would affect the execution of the
386 * nested-guest (or the guest).
387 *
388 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be preserved as it's for a single
389 * instruction which is this VMRUN instruction itself.
390 *
391 * - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
392 * execution of a subsequent IRET instruction in the guest.
393 *
394 * - The remaining FFs (e.g. timers) can stay in place so that we will be
395 * able to generate interrupts that should cause #VMEXITs for the
396 * nested-guest.
397 */
398 /** @todo anything missed more here? */
399 pCtx->hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
400
401 /*
402 * Interrupt shadow.
403 */
404 if (pVmcbCtrl->u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
405 EMSetInhibitInterruptsPC(pVCpu, VmcbNstGst.u64RIP);
406
407 /*
408 * TLB flush control.
409 */
410 /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
411 if ( pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
412 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
413 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
414 PGMFlushTLB(pVCpu, VmcbNstGst.u64CR3, true /* fGlobal */);
415
416 /** @todo @bugref{7243}: SVM TSC offset, see tmCpuTickGetInternal. */
417
418 /*
419 * Copy the remaining guest state from the VMCB to the guest-CPU context.
420 */
421 pCtx->gdtr.cbGdt = VmcbNstGst.GDTR.u32Limit;
422 pCtx->gdtr.pGdt = VmcbNstGst.GDTR.u64Base;
423 pCtx->idtr.cbIdt = VmcbNstGst.IDTR.u32Limit;
424 pCtx->idtr.pIdt = VmcbNstGst.IDTR.u64Base;
425 pCtx->cr0 = VmcbNstGst.u64CR0; /** @todo What about informing PGM about CR0.WP? */
426 pCtx->cr4 = VmcbNstGst.u64CR4;
427 pCtx->cr3 = VmcbNstGst.u64CR3;
428 pCtx->cr2 = VmcbNstGst.u64CR2;
429 pCtx->dr[6] = VmcbNstGst.u64DR6;
430 pCtx->dr[7] = VmcbNstGst.u64DR7;
431 pCtx->rflags.u = VmcbNstGst.u64RFlags;
432 pCtx->rax = VmcbNstGst.u64RAX;
433 pCtx->rsp = VmcbNstGst.u64RSP;
434 pCtx->rip = VmcbNstGst.u64RIP;
435 pCtx->msrEFER = uValidEfer;
436
437 /* Mask DR6, DR7 bits mandatory set/clear bits. */
438 pCtx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
439 pCtx->dr[6] |= X86_DR6_RA1_MASK;
440 pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
441 pCtx->dr[7] |= X86_DR7_RA1_MASK;
442
443 /*
444 * Check for pending virtual interrupts.
445 */
446 if (pVmcbCtrl->IntCtrl.n.u1VIrqPending)
447 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
448
449 /*
450 * Clear global interrupt flags to allow interrupts in the guest.
451 */
452 pCtx->hwvirt.svm.fGif = 1;
453
454 /*
455 * Event injection.
456 */
457 PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject;
458 if (pEventInject->n.u1Valid)
459 {
460 uint8_t const uVector = pEventInject->n.u8Vector;
461 TRPMEVENT const enmType = hmSvmEventToTrpmEventType(pEventInject);
462 uint16_t const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0;
463
464 /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */
465 if (enmType == TRPM_32BIT_HACK)
466 {
467 Log(("HMSvmVmRun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type));
468 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
469 }
470 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION)
471 {
472 if ( uVector == X86_XCPT_NMI
473 || uVector > 31 /* X86_XCPT_MAX */)
474 {
475 Log(("HMSvmVmRun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector));
476 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
477 }
478 if ( uVector == X86_XCPT_BR
479 && CPUMIsGuestInLongModeEx(pCtx))
480 {
481 Log(("HMSvmVmRun: Cannot inject #BR when not in long mode -> #VMEXIT\n"));
482 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
483 }
484 /** @todo any others? */
485 }
486
487 /*
488 * Update the exit interruption info field so that if an exception occurs
489 * while delivering the event causing a #VMEXIT, we only need to update
490 * the valid bit while the rest is already in place.
491 */
492 pVmcbCtrl->ExitIntInfo.u = pVmcbCtrl->EventInject.u;
493 pVmcbCtrl->ExitIntInfo.n.u1Valid = 0;
494
495 /** @todo NRIP: Software interrupts can only be pushed properly if we support
496 * NRIP for the nested-guest to calculate the instruction length
497 * below. */
498 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */);
499 if (rcStrict == VINF_SVM_VMEXIT)
500 return rcStrict;
501 }
502
503 return VINF_SUCCESS;
504 }
505
506 /* Shouldn't really happen as the caller should've validated the physical address already. */
507 Log(("HMSvmVmRun: Failed to read nested-guest VMCB control area at %#RGp -> #VMEXIT\n",
508 GCPhysVmcb));
509 return VERR_SVM_IPE_4;
510 }
511
512 /* Shouldn't really happen as the caller should've validated the physical address already. */
513 Log(("HMSvmVmRun: Failed to read nested-guest VMCB save-state area at %#RGp -> #VMEXIT\n",
514 GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest)));
515 return VERR_SVM_IPE_5;
516}
517
518
519/**
520 * SVM nested-guest \#VMEXIT handler.
521 *
522 * @returns Strict VBox status code.
523 * @retval VINF_SVM_VMEXIT when the \#VMEXIT is successful.
524 * @retval VERR_SVM_VMEXIT_FAILED when the \#VMEXIT failed restoring the guest's
525 * "host state" and a shutdown is required.
526 *
527 * @param pVCpu The cross context virtual CPU structure.
528 * @param pCtx The guest-CPU context.
529 * @param uExitCode The exit code.
530 * @param uExitInfo1 The exit info. 1 field.
531 * @param uExitInfo2 The exit info. 2 field.
532 */
533VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
534 uint64_t uExitInfo2)
535{
536 if ( CPUMIsGuestInNestedHwVirtMode(pCtx)
537 || uExitCode == SVM_EXIT_INVALID)
538 {
539 RT_NOREF(pVCpu);
540
541 pCtx->hwvirt.svm.fGif = 0;
542#ifdef VBOX_STRICT
543 RT_ZERO(pCtx->hwvirt.svm.VmcbCtrl);
544 RT_ZERO(pCtx->hwvirt.svm.HostState);
545 pCtx->hwvirt.svm.GCPhysVmcb = NIL_RTGCPHYS;
546#endif
547
548 /*
549 * Save the nested-guest state into the VMCB state-save area.
550 */
551 SVMVMCBSTATESAVE VmcbNstGst;
552 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, ES, es);
553 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, CS, cs);
554 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, SS, ss);
555 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, DS, ds);
556 VmcbNstGst.GDTR.u32Limit = pCtx->gdtr.cbGdt;
557 VmcbNstGst.GDTR.u64Base = pCtx->gdtr.pGdt;
558 VmcbNstGst.IDTR.u32Limit = pCtx->idtr.cbIdt;
559 VmcbNstGst.IDTR.u32Limit = pCtx->idtr.pIdt;
560 VmcbNstGst.u64EFER = pCtx->msrEFER;
561 VmcbNstGst.u64CR4 = pCtx->cr4;
562 VmcbNstGst.u64CR3 = pCtx->cr3;
563 VmcbNstGst.u64CR2 = pCtx->cr2;
564 VmcbNstGst.u64CR0 = pCtx->cr0;
565 /** @todo Nested paging. */
566 VmcbNstGst.u64RFlags = pCtx->rflags.u64;
567 VmcbNstGst.u64RIP = pCtx->rip;
568 VmcbNstGst.u64RSP = pCtx->rsp;
569 VmcbNstGst.u64RAX = pCtx->rax;
570 VmcbNstGst.u64DR7 = pCtx->dr[6];
571 VmcbNstGst.u64DR6 = pCtx->dr[7];
572 VmcbNstGst.u8CPL = pCtx->ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */
573
574 /* Save interrupt shadow of the nested-guest instruction if any. */
575 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
576 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
577 {
578 RT_ZERO(pCtx->hwvirt.svm.VmcbCtrl);
579 pCtx->hwvirt.svm.VmcbCtrl.u64IntShadow |= SVM_INTERRUPT_SHADOW_ACTIVE;
580 }
581
582 /*
583 * Save additional state and intercept information.
584 */
585 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
586 {
587 Assert(pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIrqPending);
588 Assert(pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u8VIntrVector);
589 }
590 /** @todo Save V_TPR, V_IRQ. */
591 /** @todo NRIP. */
592
593 /* Save exit information. */
594 pCtx->hwvirt.svm.VmcbCtrl.u64ExitCode = uExitCode;
595 pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo1 = uExitInfo1;
596 pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo2 = uExitInfo2;
597
598 /*
599 * Clear event injection in the VMCB.
600 */
601 pCtx->hwvirt.svm.VmcbCtrl.EventInject.n.u1Valid = 0;
602
603 /*
604 * Write back the VMCB controls to the guest VMCB in guest physical memory.
605 */
606 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->hwvirt.svm.GCPhysVmcb, &pCtx->hwvirt.svm.VmcbCtrl,
607 sizeof(pCtx->hwvirt.svm.VmcbCtrl));
608 if (RT_SUCCESS(rc))
609 {
610 /*
611 * Prepare for guest's "host mode" by clearing internal processor state bits.
612 *
613 * Some of these like TSC offset can then be used unconditionally in our TM code
614 * but the offset in the guest's VMCB will remain as it should as we've written
615 * back the VMCB controls above.
616 */
617 RT_ZERO(pCtx->hwvirt.svm.VmcbCtrl);
618#if 0
619 /* Clear TSC offset. */
620 pCtx->hwvirt.svm.VmcbCtrl.u64TSCOffset = 0;
621 pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIrqValid = 0;
622 pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIntrMasking = 0;
623#endif
624 /* Restore guest's force-flags. */
625 if (pCtx->hwvirt.fLocalForcedActions)
626 VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions);
627
628 /* Clear nested-guest's interrupt pending. */
629 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
630 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
631
632 /** @todo Nested paging. */
633 /** @todo ASID. */
634
635 /*
636 * Reload the guest's "host state".
637 */
638 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
639 pCtx->es = pHostState->es;
640 pCtx->cs = pHostState->cs;
641 pCtx->ss = pHostState->ss;
642 pCtx->ds = pHostState->ds;
643 pCtx->gdtr = pHostState->gdtr;
644 pCtx->idtr = pHostState->idtr;
645 pCtx->msrEFER = pHostState->uEferMsr;
646 pCtx->cr0 = pHostState->uCr0 | X86_CR0_PE;
647 pCtx->cr3 = pHostState->uCr3;
648 pCtx->cr4 = pHostState->uCr4;
649 pCtx->rflags = pHostState->rflags;
650 pCtx->rflags.Bits.u1VM = 0;
651 pCtx->rip = pHostState->uRip;
652 pCtx->rsp = pHostState->uRsp;
653 pCtx->rax = pHostState->uRax;
654 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
655 pCtx->dr[7] |= X86_DR7_RA1_MASK;
656
657 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
658 * raise #GP(0) in the guest. */
659
660 /** @todo check the loaded host-state for consistency. Figure out what
661 * exactly this involves? */
662
663 rc = VINF_SVM_VMEXIT;
664 }
665 else
666 {
667 Log(("HMNstGstSvmVmExit: Writing VMCB at %#RGp failed\n", pCtx->hwvirt.svm.GCPhysVmcb));
668 rc = VERR_SVM_VMEXIT_FAILED;
669 }
670
671 return rc;
672 }
673
674 Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode,
675 uExitInfo1, uExitInfo2));
676 RT_NOREF2(uExitInfo1, uExitInfo2);
677 return VERR_SVM_IPE_5;
678}
679
680
681/**
682 * Converts an SVM event type to a TRPM event type.
683 *
684 * @returns The TRPM event type.
685 * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set
686 * of recognized trap types.
687 *
688 * @param pEvent Pointer to the SVM event.
689 */
690VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pEvent)
691{
692 uint8_t const uType = pEvent->n.u3Type;
693 switch (uType)
694 {
695 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT;
696 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT;
697 case SVM_EVENT_EXCEPTION:
698 case SVM_EVENT_NMI: return TRPM_TRAP;
699 default:
700 break;
701 }
702 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
703 return TRPM_32BIT_HACK;
704}
705
706
707/**
708 * Checks whether an interrupt is pending for the nested-guest.
709 *
710 * @returns VBox status code.
711 * @retval true if there's a pending interrupt, false otherwise.
712 *
713 * @param pCtx The guest-CPU context.
714 */
715VMM_INT_DECL(bool) HMSvmNstGstIsInterruptPending(PCCPUMCTX pCtx)
716{
717 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl;
718 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
719 return false;
720
721 X86RFLAGS RFlags;
722 if (pVmcbCtrl->IntCtrl.n.u1VIntrMasking)
723 RFlags.u = pCtx->rflags.u;
724 else
725 RFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
726
727 if (!RFlags.Bits.u1IF)
728 return false;
729
730 return RT_BOOL(pVmcbCtrl->IntCtrl.n.u1VIrqPending);
731}
732
733
734/**
735 * Gets the pending nested-guest interrupt.
736 *
737 * @returns VBox status code.
738 * @retval VINF_SUCCESS on success.
739 * @retval VERR_APIC_INTR_MASKED_BY_TPR when an APIC interrupt is pending but
740 * can't be delivered due to TPR priority.
741 * @retval VERR_NO_DATA if there is no interrupt to be delivered (either APIC
742 * has been software-disabled since it flagged something was pending,
743 * or other reasons).
744 *
745 * @param pCtx The guest-CPU context.
746 * @param pu8Interrupt Where to store the interrupt.
747 */
748VMM_INT_DECL(int) HMSvmNstGstGetInterrupt(PCCPUMCTX pCtx, uint8_t *pu8Interrupt)
749{
750 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl;
751 /** @todo remove later, paranoia for now. */
752#ifdef DEBUG_ramshankar
753 Assert(HMSvmNstGstIsInterruptPending(pCtx));
754#endif
755
756 *pu8Interrupt = pVmcbCtrl->IntCtrl.n.u8VIntrVector;
757 if ( pVmcbCtrl->IntCtrl.n.u1IgnoreTPR
758 || pVmcbCtrl->IntCtrl.n.u4VIntrPrio > pVmcbCtrl->IntCtrl.n.u8VTPR)
759 return VINF_SUCCESS;
760
761 return VERR_APIC_INTR_MASKED_BY_TPR;
762}
763
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette