VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h@ 70464

Last change on this file since 70464 was 70464, checked in by vboxsync, 7 years ago

VMM/IEM: Nested Hw.virt: Only update EXTINTINFO on #VMEXIT when intercepting event injection through IEM, otherwise real hw should have updated it and we shouldn't overwrite it. Also fix only-in-IEM execution when not intercepting CLGI/STGI when the VGIF feature is being used.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 58.9 KB
Line 
1/* $Id: IEMAllCImplSvmInstr.cpp.h 70464 2018-01-05 09:27:16Z vboxsync $ */
2/** @file
3 * IEM - AMD-V (Secure Virtual Machine) instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Converts an IEM exception event type to an SVM event type.
21 *
22 * @returns The SVM event type.
23 * @retval UINT8_MAX if the specified type of event isn't among the set
24 * of recognized IEM event types.
25 *
26 * @param uVector The vector of the event.
27 * @param fIemXcptFlags The IEM exception / interrupt flags.
28 */
29IEM_STATIC uint8_t iemGetSvmEventType(uint32_t uVector, uint32_t fIemXcptFlags)
30{
31 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
32 {
33 if (uVector != X86_XCPT_NMI)
34 return SVM_EVENT_EXCEPTION;
35 return SVM_EVENT_NMI;
36 }
37
38 /* See AMD spec. Table 15-1. "Guest Exception or Interrupt Types". */
39 if (fIemXcptFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
40 return SVM_EVENT_EXCEPTION;
41
42 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_EXT_INT)
43 return SVM_EVENT_EXTERNAL_IRQ;
44
45 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
46 return SVM_EVENT_SOFTWARE_INT;
47
48 AssertMsgFailed(("iemGetSvmEventType: Invalid IEM xcpt/int. type %#x, uVector=%#x\n", fIemXcptFlags, uVector));
49 return UINT8_MAX;
50}
51
52
53/**
54 * Performs an SVM world-switch (VMRUN, \#VMEXIT) updating PGM and IEM internals.
55 *
56 * @returns Strict VBox status code.
57 * @param pVCpu The cross context virtual CPU structure.
58 * @param pCtx The guest-CPU context.
59 */
60DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPU pVCpu, PCPUMCTX pCtx)
61{
62 /*
63 * Flush the TLB with new CR3. This is required in case the PGM mode change
64 * below doesn't actually change anything.
65 */
66 PGMFlushTLB(pVCpu, pCtx->cr3, true);
67
68 /*
69 * Inform PGM about paging mode changes.
70 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
71 * see comment in iemMemPageTranslateAndCheckAccess().
72 */
73 int rc = PGMChangeMode(pVCpu, pCtx->cr0 | X86_CR0_PE, pCtx->cr4, pCtx->msrEFER);
74#ifdef IN_RING3
75 Assert(rc != VINF_PGM_CHANGE_MODE);
76#endif
77 AssertRCReturn(rc, rc);
78
79 /* Inform CPUM (recompiler), can later be removed. */
80 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
81
82 /* Re-initialize IEM cache/state after the drastic mode switch. */
83 iemReInitExec(pVCpu);
84 return rc;
85}
86
87
88/**
89 * SVM \#VMEXIT handler.
90 *
91 * @returns Strict VBox status code.
92 * @retval VINF_SVM_VMEXIT when the \#VMEXIT is successful.
93 * @retval VERR_SVM_VMEXIT_FAILED when the \#VMEXIT failed restoring the guest's
94 * "host state" and a shutdown is required.
95 *
96 * @param pVCpu The cross context virtual CPU structure.
97 * @param pCtx The guest-CPU context.
98 * @param uExitCode The exit code.
99 * @param uExitInfo1 The exit info. 1 field.
100 * @param uExitInfo2 The exit info. 2 field.
101 */
102IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
103{
104 VBOXSTRICTRC rcStrict;
105 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
106 || uExitCode == SVM_EXIT_INVALID)
107 {
108 LogFlow(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pCtx->cs.Sel,
109 pCtx->rip, uExitCode, uExitInfo1, uExitInfo2));
110
111 /*
112 * Disable the global interrupt flag to prevent interrupts during the 'atomic' world switch.
113 */
114 pCtx->hwvirt.svm.fGif = false;
115
116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
117 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
120
121 /*
122 * Save the nested-guest state into the VMCB state-save area.
123 */
124 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
125 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
126 PSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest;
127
128 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbNstGstState, ES, es);
129 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbNstGstState, CS, cs);
130 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbNstGstState, SS, ss);
131 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcbNstGstState, DS, ds);
132 pVmcbNstGstState->GDTR.u32Limit = pCtx->gdtr.cbGdt;
133 pVmcbNstGstState->GDTR.u64Base = pCtx->gdtr.pGdt;
134 pVmcbNstGstState->IDTR.u32Limit = pCtx->idtr.cbIdt;
135 pVmcbNstGstState->IDTR.u64Base = pCtx->idtr.pIdt;
136 pVmcbNstGstState->u64EFER = pCtx->msrEFER;
137 pVmcbNstGstState->u64CR4 = pCtx->cr4;
138 pVmcbNstGstState->u64CR3 = pCtx->cr3;
139 pVmcbNstGstState->u64CR2 = pCtx->cr2;
140 pVmcbNstGstState->u64CR0 = pCtx->cr0;
141 /** @todo Nested paging. */
142 pVmcbNstGstState->u64RFlags = pCtx->rflags.u64;
143 pVmcbNstGstState->u64RIP = pCtx->rip;
144 pVmcbNstGstState->u64RSP = pCtx->rsp;
145 pVmcbNstGstState->u64RAX = pCtx->rax;
146 pVmcbNstGstState->u64DR7 = pCtx->dr[7];
147 pVmcbNstGstState->u64DR6 = pCtx->dr[6];
148 pVmcbNstGstState->u8CPL = pCtx->ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */
149 Assert(CPUMGetGuestCPL(pVCpu) == pCtx->ss.Attr.n.u2Dpl);
150
151 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
152 /* Record any interrupt shadow of the nested-guest instruction into the nested-guest VMCB. */
153 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
154 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
155 {
156 pVmcbCtrl->IntShadow.n.u1IntShadow = 1;
157
158 /* Clear the inhibit-interrupt force-flag so as to not affect the outer guest. */
159 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
160 LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", pCtx->rip));
161 }
162
163 /*
164 * Save additional state and intercept information.
165 */
166 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
167 {
168 Assert(pVmcbCtrl->IntCtrl.n.u1VIrqPending);
169 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
170 }
171 else
172 pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
173
174 /** @todo Save V_TPR, V_IRQ. */
175 /** @todo NRIP. */
176
177 /* Save exit information. */
178 pVmcbCtrl->u64ExitCode = uExitCode;
179 pVmcbCtrl->u64ExitInfo1 = uExitInfo1;
180 pVmcbCtrl->u64ExitInfo2 = uExitInfo2;
181
182 /*
183 * Update the exit interrupt-information field if this #VMEXIT happened as a result
184 * of delivering an event through IEM.
185 *
186 * Don't update the exit interrupt-information field if the event wasn't being injected
187 * through IEM, as it may have been updated by real hardware if the nested-guest was
188 * executed using hardware-assisted SVM.
189 */
190 {
191 uint8_t uExitIntVector;
192 uint32_t uExitIntErr;
193 uint32_t fExitIntFlags;
194 bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,
195 NULL /* uExitIntCr2 */);
196 if (fRaisingEvent)
197 {
198 pVmcbCtrl->ExitIntInfo.n.u1Valid = 1;
199 pVmcbCtrl->ExitIntInfo.n.u8Vector = uExitIntVector;
200 pVmcbCtrl->ExitIntInfo.n.u3Type = iemGetSvmEventType(uExitIntVector, fExitIntFlags);
201 if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)
202 {
203 pVmcbCtrl->ExitIntInfo.n.u1ErrorCodeValid = true;
204 pVmcbCtrl->ExitIntInfo.n.u32ErrorCode = uExitIntErr;
205 }
206 }
207 }
208
209 /*
210 * Clear event injection in the VMCB.
211 */
212 pVmcbCtrl->EventInject.n.u1Valid = 0;
213
214 /*
215 * Notify HM in case the nested-guest was executed using hardware-assisted SVM (which
216 * would have modified some VMCB state) that need to be restored on #VMEXIT before
217 * writing the VMCB back to guest memory.
218 */
219 HMSvmNstGstVmExitNotify(pVCpu, pCtx);
220
221 /*
222 * Write back the nested-guest's VMCB to its guest physical memory location.
223 */
224 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->hwvirt.svm.GCPhysVmcb, pVmcbNstGst, sizeof(*pVmcbNstGst));
225
226 /*
227 * Prepare for guest's "host mode" by clearing internal processor state bits.
228 *
229 * We don't need to zero out the state-save area, just the controls should be
230 * sufficient because it has the critical bit of indicating whether we're inside
231 * the nested-guest or not.
232 */
233 memset(pVmcbNstGstCtrl, 0, sizeof(*pVmcbNstGstCtrl));
234 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
235
236 /*
237 * Restore the subset of force-flags that were preserved.
238 */
239 if (pCtx->hwvirt.fLocalForcedActions)
240 {
241 VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions);
242 pCtx->hwvirt.fLocalForcedActions = 0;
243 }
244
245 if (RT_SUCCESS(rcStrict))
246 {
247 /** @todo Nested paging. */
248 /** @todo ASID. */
249
250 /*
251 * Reload the guest's "host state".
252 */
253 CPUMSvmVmExitRestoreHostState(pVCpu, pCtx);
254
255 /*
256 * Update PGM, IEM and others of a world-switch.
257 */
258 rcStrict = iemSvmWorldSwitch(pVCpu, pCtx);
259 if (rcStrict == VINF_SUCCESS)
260 rcStrict = VINF_SVM_VMEXIT;
261 else if (RT_SUCCESS(rcStrict))
262 {
263 LogFlow(("iemSvmVmexit: Setting passup status from iemSvmWorldSwitch %Rrc\n", rcStrict));
264 iemSetPassUpStatus(pVCpu, rcStrict);
265 rcStrict = VINF_SVM_VMEXIT;
266 }
267 else
268 LogFlow(("iemSvmVmexit: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
269 }
270 else
271 {
272 LogFlow(("iemSvmVmexit: Writing VMCB at %#RGp failed. rc=%Rrc\n", pCtx->hwvirt.svm.GCPhysVmcb,
273 VBOXSTRICTRC_VAL(rcStrict)));
274 rcStrict = VERR_SVM_VMEXIT_FAILED;
275 }
276 }
277 else
278 {
279 Log(("iemSvmVmexit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode,
280 uExitInfo1, uExitInfo2));
281 AssertMsgFailed(("iemSvmVmexit: Unexpected SVM-exit failure uExitCode=%#RX64\n", uExitCode));
282 rcStrict = VERR_SVM_IPE_5;
283 }
284
285# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
286 /* CLGI/STGI may not have been intercepted and thus not executed in IEM. */
287 if (HMSvmIsVGifActive(pVCpu->CTX_SUFF(pVM)))
288 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
289# endif
290 return rcStrict;
291}
292
293
294/**
295 * Performs the operations necessary that are part of the vmrun instruction
296 * execution in the guest.
297 *
298 * @returns Strict VBox status code (i.e. informational status codes too).
299 * @retval VINF_SUCCESS successully executed VMRUN and entered nested-guest
300 * code execution.
301 * @retval VINF_SVM_VMEXIT when executing VMRUN causes a \#VMEXIT
302 * (SVM_EXIT_INVALID most likely).
303 *
304 * @param pVCpu The cross context virtual CPU structure.
305 * @param pCtx Pointer to the guest-CPU context.
306 * @param cbInstr The length of the VMRUN instruction.
307 * @param GCPhysVmcb Guest physical address of the VMCB to run.
308 */
309IEM_STATIC VBOXSTRICTRC iemSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr, RTGCPHYS GCPhysVmcb)
310{
311 LogFlow(("iemSvmVmrun\n"));
312
313#ifdef IN_RING0
314 /*
315 * Until PGM can handle switching the guest paging mode in ring-0,
316 * there's no point in trying to emulate VMRUN in ring-0 as we have
317 * to go back to ring-3 anyway, see @bugref{7243#c48}.
318 */
319 RT_NOREF(pVCpu, pCtx, cbInstr, GCPhysVmcb);
320 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
321#else
322
323 /*
324 * Cache the physical address of the VMCB for #VMEXIT exceptions.
325 */
326 pCtx->hwvirt.svm.GCPhysVmcb = GCPhysVmcb;
327
328 /*
329 * Save the host state.
330 */
331 CPUMSvmVmRunSaveHostState(pCtx, cbInstr);
332
333 /*
334 * Read the guest VMCB state.
335 */
336 PVM pVM = pVCpu->CTX_SUFF(pVM);
337 int rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pVmcb), GCPhysVmcb, sizeof(SVMVMCB));
338 if (RT_SUCCESS(rc))
339 {
340 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
341 PSVMVMCBSTATESAVE pVmcbNstGst = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->guest;
342
343 /*
344 * Validate guest-state and controls.
345 */
346 /* VMRUN must always be intercepted. */
347 if (!CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMRUN))
348 {
349 Log(("iemSvmVmrun: VMRUN instruction not intercepted -> #VMEXIT\n"));
350 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
351 }
352
353 /* Nested paging. */
354 if ( pVmcbCtrl->NestedPaging.n.u1NestedPaging
355 && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
356 {
357 Log(("iemSvmVmrun: Nested paging not supported -> #VMEXIT\n"));
358 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
359 }
360
361 /* AVIC. */
362 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
363 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
364 {
365 Log(("iemSvmVmrun: AVIC not supported -> #VMEXIT\n"));
366 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
367 }
368
369 /* Last branch record (LBR) virtualization. */
370 if ( pVmcbCtrl->LbrVirt.n.u1LbrVirt
371 && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
372 {
373 Log(("iemSvmVmrun: LBR virtualization not supported -> #VMEXIT\n"));
374 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
375 }
376
377 /* Virtualized VMSAVE/VMLOAD. */
378 if ( pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload
379 && !pVM->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
380 {
381 Log(("iemSvmVmrun: Virtualized VMSAVE/VMLOAD not supported -> #VMEXIT\n"));
382 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
383 }
384
385 /* Virtual GIF. */
386 if ( pVmcbCtrl->IntCtrl.n.u1VGifEnable
387 && !pVM->cpum.ro.GuestFeatures.fSvmVGif)
388 {
389 Log(("iemSvmVmrun: Virtual GIF not supported -> #VMEXIT\n"));
390 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
391 }
392
393 /* Guest ASID. */
394 if (!pVmcbCtrl->TLBCtrl.n.u32ASID)
395 {
396 Log(("iemSvmVmrun: Guest ASID is invalid -> #VMEXIT\n"));
397 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
398 }
399
400 /* IO permission bitmap. */
401 RTGCPHYS const GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
402 if ( (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)
403 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)
404 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + X86_PAGE_4K_SIZE)
405 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + (X86_PAGE_4K_SIZE << 1)))
406 {
407 Log(("iemSvmVmrun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));
408 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
409 }
410
411 /* MSR permission bitmap. */
412 RTGCPHYS const GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
413 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
414 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)
415 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap + X86_PAGE_4K_SIZE))
416 {
417 Log(("iemSvmVmrun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));
418 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
419 }
420
421 /* CR0. */
422 if ( !(pVmcbNstGst->u64CR0 & X86_CR0_CD)
423 && (pVmcbNstGst->u64CR0 & X86_CR0_NW))
424 {
425 Log(("iemSvmVmrun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
426 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
427 }
428 if (pVmcbNstGst->u64CR0 >> 32)
429 {
430 Log(("iemSvmVmrun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
431 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
432 }
433 /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */
434
435 /* DR6 and DR7. */
436 if ( pVmcbNstGst->u64DR6 >> 32
437 || pVmcbNstGst->u64DR7 >> 32)
438 {
439 Log(("iemSvmVmrun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64DR6,
440 pVmcbNstGst->u64DR6));
441 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
442 }
443
444 /** @todo gPAT MSR validation? */
445
446 /*
447 * Copy the IO permission bitmap into the cache.
448 */
449 Assert(pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap));
450 rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap,
451 SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);
452 if (RT_FAILURE(rc))
453 {
454 Log(("iemSvmVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));
455 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
456 }
457
458 /*
459 * Copy the MSR permission bitmap into the cache.
460 */
461 Assert(pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap));
462 rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap,
463 SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);
464 if (RT_FAILURE(rc))
465 {
466 Log(("iemSvmVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
467 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
468 }
469
470 /*
471 * Copy segments from nested-guest VMCB state to the guest-CPU state.
472 *
473 * We do this here as we need to use the CS attributes and it's easier this way
474 * then using the VMCB format selectors. It doesn't really matter where we copy
475 * the state, we restore the guest-CPU context state on the \#VMEXIT anyway.
476 */
477 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGst, ES, es);
478 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGst, CS, cs);
479 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGst, SS, ss);
480 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGst, DS, ds);
481
482 /** @todo Segment attribute overrides by VMRUN. */
483
484 /*
485 * CPL adjustments and overrides.
486 *
487 * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL().
488 * We shall thus adjust both CS.DPL and SS.DPL here.
489 */
490 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = pVmcbNstGst->u8CPL;
491 if (CPUMIsGuestInV86ModeEx(pCtx))
492 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 3;
493 if (CPUMIsGuestInRealModeEx(pCtx))
494 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 0;
495 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
496
497 /*
498 * Continue validating guest-state and controls.
499 *
500 * We pass CR0 as 0 to CPUMQueryValidatedGuestEfer below to skip the illegal
501 * EFER.LME bit transition check. We pass the nested-guest's EFER as both the
502 * old and new EFER value to not have any guest EFER bits influence the new
503 * nested-guest EFER.
504 */
505 uint64_t uValidEfer;
506 rc = CPUMQueryValidatedGuestEfer(pVM, 0 /* CR0 */, pVmcbNstGst->u64EFER, pVmcbNstGst->u64EFER, &uValidEfer);
507 if (RT_FAILURE(rc))
508 {
509 Log(("iemSvmVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64EFER));
510 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
511 }
512
513 /* Validate paging and CPU mode bits. */
514 bool const fSvm = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME);
515 bool const fLongModeSupported = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);
516 bool const fLongModeEnabled = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
517 bool const fPaging = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PG);
518 bool const fPae = RT_BOOL(pVmcbNstGst->u64CR4 & X86_CR4_PAE);
519 bool const fProtMode = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PE);
520 bool const fLongModeWithPaging = fLongModeEnabled && fPaging;
521 bool const fLongModeConformCS = pCtx->cs.Attr.n.u1Long && pCtx->cs.Attr.n.u1DefBig;
522 /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0). */
523 if (fLongModeWithPaging)
524 uValidEfer |= MSR_K6_EFER_LMA;
525 bool const fLongModeActiveOrEnabled = RT_BOOL(uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA));
526 if ( !fSvm
527 || (!fLongModeSupported && fLongModeActiveOrEnabled)
528 || (fLongModeWithPaging && !fPae)
529 || (fLongModeWithPaging && !fProtMode)
530 || ( fLongModeEnabled
531 && fPaging
532 && fPae
533 && fLongModeConformCS))
534 {
535 Log(("iemSvmVmrun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer));
536 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
537 }
538
539 /*
540 * Preserve the required force-flags.
541 *
542 * We only preserve the force-flags that would affect the execution of the
543 * nested-guest (or the guest).
544 *
545 * - VMCPU_FF_INHIBIT_INTERRUPTS need -not- be preserved as it's for a single
546 * instruction which is this VMRUN instruction itself.
547 *
548 * - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
549 * execution of a subsequent IRET instruction in the guest.
550 *
551 * - The remaining FFs (e.g. timers) can stay in place so that we will be
552 * able to generate interrupts that should cause #VMEXITs for the
553 * nested-guest.
554 */
555 pCtx->hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
556 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
557
558 /*
559 * Interrupt shadow.
560 */
561 if (pVmcbCtrl->IntShadow.n.u1IntShadow)
562 {
563 LogFlow(("iemSvmVmrun: setting interrupt shadow. inhibit PC=%#RX64\n", pVmcbNstGst->u64RIP));
564 /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */
565 EMSetInhibitInterruptsPC(pVCpu, pVmcbNstGst->u64RIP);
566 }
567
568 /*
569 * TLB flush control.
570 * Currently disabled since it's redundant as we unconditionally flush the TLB
571 * in iemSvmWorldSwitch() below.
572 */
573#if 0
574 /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
575 if ( pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
576 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
577 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
578 PGMFlushTLB(pVCpu, pVmcbNstGst->u64CR3, true /* fGlobal */);
579#endif
580
581 /*
582 * Copy the remaining guest state from the VMCB to the guest-CPU context.
583 */
584 pCtx->gdtr.cbGdt = pVmcbNstGst->GDTR.u32Limit;
585 pCtx->gdtr.pGdt = pVmcbNstGst->GDTR.u64Base;
586 pCtx->idtr.cbIdt = pVmcbNstGst->IDTR.u32Limit;
587 pCtx->idtr.pIdt = pVmcbNstGst->IDTR.u64Base;
588 CPUMSetGuestCR0(pVCpu, pVmcbNstGst->u64CR0);
589 CPUMSetGuestCR4(pVCpu, pVmcbNstGst->u64CR4);
590 pCtx->cr3 = pVmcbNstGst->u64CR3;
591 pCtx->cr2 = pVmcbNstGst->u64CR2;
592 pCtx->dr[6] = pVmcbNstGst->u64DR6;
593 pCtx->dr[7] = pVmcbNstGst->u64DR7;
594 pCtx->rflags.u64 = pVmcbNstGst->u64RFlags;
595 pCtx->rax = pVmcbNstGst->u64RAX;
596 pCtx->rsp = pVmcbNstGst->u64RSP;
597 pCtx->rip = pVmcbNstGst->u64RIP;
598 CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, uValidEfer);
599
600 /* Mask DR6, DR7 bits mandatory set/clear bits. */
601 pCtx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
602 pCtx->dr[6] |= X86_DR6_RA1_MASK;
603 pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
604 pCtx->dr[7] |= X86_DR7_RA1_MASK;
605
606 /*
607 * Check for pending virtual interrupts.
608 */
609 if (pVmcbCtrl->IntCtrl.n.u1VIrqPending)
610 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
611 else
612 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
613
614 /*
615 * Update PGM, IEM and others of a world-switch.
616 */
617 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu, pCtx);
618 if (rcStrict == VINF_SUCCESS)
619 { /* likely */ }
620 else if (RT_SUCCESS(rcStrict))
621 {
622 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch returned %Rrc, setting passup status\n", VBOXSTRICTRC_VAL(rcStrict)));
623 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
624 }
625 else
626 {
627 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
628 return rcStrict;
629 }
630
631 /*
632 * Clear global interrupt flags to allow interrupts in the guest.
633 */
634 pCtx->hwvirt.svm.fGif = true;
635
636 /*
637 * Event injection.
638 */
639 PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject;
640 pCtx->hwvirt.svm.fInterceptEvents = !pEventInject->n.u1Valid;
641 if (pEventInject->n.u1Valid)
642 {
643 uint8_t const uVector = pEventInject->n.u8Vector;
644 TRPMEVENT const enmType = HMSvmEventToTrpmEventType(pEventInject);
645 uint16_t const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0;
646
647 /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */
648 if (RT_UNLIKELY(enmType == TRPM_32BIT_HACK))
649 {
650 Log(("iemSvmVmrun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type));
651 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
652 }
653 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION)
654 {
655 if ( uVector == X86_XCPT_NMI
656 || uVector > X86_XCPT_LAST)
657 {
658 Log(("iemSvmVmrun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector));
659 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
660 }
661 if ( uVector == X86_XCPT_BR
662 && CPUMIsGuestInLongModeEx(pCtx))
663 {
664 Log(("iemSvmVmrun: Cannot inject #BR when not in long mode -> #VMEXIT\n"));
665 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
666 }
667 /** @todo any others? */
668 }
669
670 /*
671 * Invalidate the exit interrupt-information field here. This field is fully updated
672 * on #VMEXIT as events other than the one below can also cause intercepts during
673 * their injection (e.g. exceptions).
674 */
675 pVmcbCtrl->ExitIntInfo.n.u1Valid = 0;
676
677 /** @todo NRIP: Software interrupts can only be pushed properly if we support
678 * NRIP for the nested-guest to calculate the instruction length
679 * below. */
680 LogFlow(("iemSvmVmrun: Injecting event: %04x:%08RX64 uVector=%#x enmType=%d uErrorCode=%u cr2=%#RX64 efer=%#RX64\n",
681 pCtx->cs.Sel, pCtx->rip, uVector, enmType, uErrorCode, pCtx->cr2, pCtx->msrEFER));
682 rcStrict = IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */);
683 }
684 else
685 LogFlow(("iemSvmVmrun: Entering nested-guest: %04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 efer=%#RX64 efl=%#x\n",
686 pCtx->cs.Sel, pCtx->rip, pCtx->cr0, pCtx->cr3, pCtx->cr4, pCtx->msrEFER, pCtx->rflags.u64));
687
688 LogFlow(("iemSvmVmrun: returns %d\n", VBOXSTRICTRC_VAL(rcStrict)));
689
690# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
691 /* If CLGI/STGI isn't intercepted we force IEM-only nested-guest execution here. */
692 if (HMSvmIsVGifActive(pVM))
693 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
694# endif
695
696 return rcStrict;
697 }
698
699 /* Shouldn't really happen as the caller should've validated the physical address already. */
700 Log(("iemSvmVmrun: Failed to read nested-guest VMCB at %#RGp (rc=%Rrc) -> #VMEXIT\n", GCPhysVmcb, rc));
701 return rc;
702#endif
703}
704
705
706#if 0
707/**
708 * Handles nested-guest SVM control intercepts and performs the \#VMEXIT if the
709 * intercept is active.
710 *
711 * @returns Strict VBox status code.
712 * @retval VINF_SVM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
713 * we're not executing a nested-guest.
714 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
715 * successfully.
716 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
717 * failed and a shutdown needs to be initiated for the geust.
718 *
719 * @param pVCpu The cross context virtual CPU structure.
720 * @param pCtx The guest-CPU context.
721 * @param uExitCode The SVM exit code (see SVM_EXIT_XXX).
722 * @param uExitInfo1 The exit info. 1 field.
723 * @param uExitInfo2 The exit info. 2 field.
724 */
725VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstHandleCtrlIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
726 uint64_t uExitInfo2)
727{
728#define HMSVM_CTRL_INTERCEPT_VMEXIT(a_Intercept) \
729 do { \
730 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, (a_Intercept))) \
731 return iemSvmVmexit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); \
732 break; \
733 } while (0)
734
735 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
736 return VINF_HM_INTERCEPT_NOT_ACTIVE;
737
738 switch (uExitCode)
739 {
740 case SVM_EXIT_EXCEPTION_0: case SVM_EXIT_EXCEPTION_1: case SVM_EXIT_EXCEPTION_2: case SVM_EXIT_EXCEPTION_3:
741 case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: case SVM_EXIT_EXCEPTION_6: case SVM_EXIT_EXCEPTION_7:
742 case SVM_EXIT_EXCEPTION_8: case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11:
743 case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13: case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15:
744 case SVM_EXIT_EXCEPTION_16: case SVM_EXIT_EXCEPTION_17: case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19:
745 case SVM_EXIT_EXCEPTION_20: case SVM_EXIT_EXCEPTION_21: case SVM_EXIT_EXCEPTION_22: case SVM_EXIT_EXCEPTION_23:
746 case SVM_EXIT_EXCEPTION_24: case SVM_EXIT_EXCEPTION_25: case SVM_EXIT_EXCEPTION_26: case SVM_EXIT_EXCEPTION_27:
747 case SVM_EXIT_EXCEPTION_28: case SVM_EXIT_EXCEPTION_29: case SVM_EXIT_EXCEPTION_30: case SVM_EXIT_EXCEPTION_31:
748 {
749 if (CPUMIsGuestSvmXcptInterceptSet(pCtx, (X86XCPT)(uExitCode - SVM_EXIT_EXCEPTION_0)))
750 return iemSvmVmexit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
751 break;
752 }
753
754 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:
755 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:
756 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
757 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
758 {
759 if (CPUMIsGuestSvmWriteCRxInterceptSet(pCtx, uExitCode - SVM_EXIT_WRITE_CR0))
760 return iemSvmVmexit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
761 break;
762 }
763
764 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:
765 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:
766 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:
767 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:
768 {
769 if (CPUMIsGuestSvmReadCRxInterceptSet(pCtx, uExitCode - SVM_EXIT_READ_CR0))
770 return iemSvmVmexit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
771 break;
772 }
773
774 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:
775 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:
776 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:
777 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
778 {
779 if (CPUMIsGuestSvmReadDRxInterceptSet(pCtx, uExitCode - SVM_EXIT_READ_DR0))
780 return iemSvmVmexit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
781 break;
782 }
783
784 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:
785 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:
786 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
787 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
788 {
789 if (CPUMIsGuestSvmWriteDRxInterceptSet(pCtx, uExitCode - SVM_EXIT_WRITE_DR0))
790 return iemSvmVmexit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
791 break;
792 }
793
794 case SVM_EXIT_INTR: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INTR);
795 case SVM_EXIT_NMI: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_NMI);
796 case SVM_EXIT_SMI: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_SMI);
797 case SVM_EXIT_INIT: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INIT);
798 case SVM_EXIT_VINTR: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_VINTR);
799 case SVM_EXIT_CR0_SEL_WRITE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_CR0_SEL_WRITES);
800 case SVM_EXIT_IDTR_READ: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_IDTR_READS);
801 case SVM_EXIT_GDTR_READ: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_GDTR_READS);
802 case SVM_EXIT_LDTR_READ: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_LDTR_READS);
803 case SVM_EXIT_TR_READ: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_TR_READS);
804 case SVM_EXIT_IDTR_WRITE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_IDTR_WRITES);
805 case SVM_EXIT_GDTR_WRITE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_GDTR_WRITES);
806 case SVM_EXIT_LDTR_WRITE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_LDTR_WRITES);
807 case SVM_EXIT_TR_WRITE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_TR_WRITES);
808 case SVM_EXIT_RDTSC: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_RDTSC);
809 case SVM_EXIT_RDPMC: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_RDPMC);
810 case SVM_EXIT_PUSHF: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_PUSHF);
811 case SVM_EXIT_POPF: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_POPF);
812 case SVM_EXIT_CPUID: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_CPUID);
813 case SVM_EXIT_RSM: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_RSM);
814 case SVM_EXIT_IRET: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_IRET);
815 case SVM_EXIT_SWINT: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INTN);
816 case SVM_EXIT_INVD: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INVD);
817 case SVM_EXIT_PAUSE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_PAUSE);
818 case SVM_EXIT_HLT: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_HLT);
819 case SVM_EXIT_INVLPG: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INVLPG);
820 case SVM_EXIT_INVLPGA: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INVLPGA);
821 case SVM_EXIT_TASK_SWITCH: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_TASK_SWITCH);
822 case SVM_EXIT_FERR_FREEZE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_FERR_FREEZE);
823 case SVM_EXIT_SHUTDOWN: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_SHUTDOWN);
824 case SVM_EXIT_VMRUN: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_VMRUN);
825 case SVM_EXIT_VMMCALL: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_VMMCALL);
826 case SVM_EXIT_VMLOAD: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_VMLOAD);
827 case SVM_EXIT_VMSAVE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_VMSAVE);
828 case SVM_EXIT_STGI: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_STGI);
829 case SVM_EXIT_CLGI: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_CLGI);
830 case SVM_EXIT_SKINIT: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_SKINIT);
831 case SVM_EXIT_RDTSCP: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_RDTSCP);
832 case SVM_EXIT_ICEBP: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_ICEBP);
833 case SVM_EXIT_WBINVD: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_WBINVD);
834 case SVM_EXIT_MONITOR: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_MONITOR);
835 case SVM_EXIT_MWAIT: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_MWAIT);
836 case SVM_EXIT_MWAIT_ARMED: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_MWAIT_ARMED);
837 case SVM_EXIT_XSETBV: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_XSETBV);
838
839 case SVM_EXIT_IOIO:
840 AssertMsgFailed(("Use HMSvmNstGstHandleMsrIntercept!\n"));
841 return VERR_SVM_IPE_1;
842
843 case SVM_EXIT_MSR:
844 AssertMsgFailed(("Use HMSvmNstGstHandleMsrIntercept!\n"));
845 return VERR_SVM_IPE_1;
846
847 case SVM_EXIT_NPF:
848 case SVM_EXIT_AVIC_INCOMPLETE_IPI:
849 case SVM_EXIT_AVIC_NOACCEL:
850 AssertMsgFailed(("Todo Implement.\n"));
851 return VERR_SVM_IPE_1;
852
853 default:
854 AssertMsgFailed(("Unsupported SVM exit code %#RX64\n", uExitCode));
855 return VERR_SVM_IPE_1;
856 }
857
858 return VINF_HM_INTERCEPT_NOT_ACTIVE;
859
860#undef HMSVM_CTRL_INTERCEPT_VMEXIT
861}
862#endif
863
864
865/**
866 * Checks if the event intercepts and performs the \#VMEXIT if the corresponding
867 * intercept is active.
868 *
869 * @returns Strict VBox status code.
870 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
871 * we're not executing a nested-guest.
872 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
873 * successfully.
874 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
875 * failed and a shutdown needs to be initiated for the geust.
876 *
877 * @returns VBox strict status code.
878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
879 * @param u16Port The IO port being accessed.
880 * @param enmIoType The type of IO access.
881 * @param cbReg The IO operand size in bytes.
882 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
883 * @param iEffSeg The effective segment number.
884 * @param fRep Whether this is a repeating IO instruction (REP prefix).
885 * @param fStrIo Whether this is a string IO instruction.
886 * @param cbInstr The length of the IO instruction in bytes.
887 */
888IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr,
889 uint64_t uCr2)
890{
891 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
892
893 /*
894 * Handle SVM exception and software interrupt intercepts, see AMD spec. 15.12 "Exception Intercepts".
895 *
896 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
897 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
898 * even when they use a vector in the range 0 to 31.
899 * - ICEBP should not trigger #DB intercept, but its own intercept.
900 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
901 */
902 /* Check NMI intercept */
903 if ( u8Vector == X86_XCPT_NMI
904 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
905 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
906 {
907 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
908 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
909 }
910
911 /* Check ICEBP intercept. */
912 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
913 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
914 {
915 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
916 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
917 }
918
919 /* Check CPU exception intercepts. */
920 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
921 && IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
922 {
923 Assert(u8Vector <= X86_XCPT_LAST);
924 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
925 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
926 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists
927 && u8Vector == X86_XCPT_PF
928 && !(uErr & X86_TRAP_PF_ID))
929 {
930 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
931#ifdef IEM_WITH_CODE_TLB
932 AssertReleaseFailedReturn(VERR_IEM_IPE_5);
933#else
934 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
935 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
936 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
937 if ( cbCurrent > 0
938 && cbCurrent < sizeof(pVmcbCtrl->abInstr))
939 {
940 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
941 memcpy(&pVmcbCtrl->abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
942 }
943#endif
944 }
945 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept u32InterceptXcpt=%#RX32 u8Vector=%#x "
946 "uExitInfo1=%#RX64 uExitInfo2=%#RX64 -> #VMEXIT\n", pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl.u32InterceptXcpt,
947 u8Vector, uExitInfo1, uExitInfo2));
948 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
949 }
950
951 /* Check software interrupt (INTn) intercepts. */
952 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
953 | IEM_XCPT_FLAGS_BP_INSTR
954 | IEM_XCPT_FLAGS_ICEBP_INSTR
955 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
956 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
957 {
958 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? u8Vector : 0;
959 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
960 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
961 }
962
963 return VINF_HM_INTERCEPT_NOT_ACTIVE;
964}
965
966
967/**
968 * Checks the SVM IO permission bitmap and performs the \#VMEXIT if the
969 * corresponding intercept is active.
970 *
971 * @returns Strict VBox status code.
972 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
973 * we're not executing a nested-guest.
974 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
975 * successfully.
976 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
977 * failed and a shutdown needs to be initiated for the geust.
978 *
979 * @returns VBox strict status code.
980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
981 * @param u16Port The IO port being accessed.
982 * @param enmIoType The type of IO access.
983 * @param cbReg The IO operand size in bytes.
984 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
985 * @param iEffSeg The effective segment number.
986 * @param fRep Whether this is a repeating IO instruction (REP prefix).
987 * @param fStrIo Whether this is a string IO instruction.
988 * @param cbInstr The length of the IO instruction in bytes.
989 */
990IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
991 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
992{
993 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
994 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
995 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
996
997 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u)\n", u16Port, u16Port));
998
999 SVMIOIOEXITINFO IoExitInfo;
1000 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1001 void *pvIoBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
1002 bool const fIntercept = HMSvmIsIOInterceptActive(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo,
1003 &IoExitInfo);
1004 if (fIntercept)
1005 {
1006 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u) -> #VMEXIT\n", u16Port, u16Port));
1007 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_IOIO, IoExitInfo.u, pCtx->rip + cbInstr);
1008 }
1009
1010 /** @todo remove later (for debugging as VirtualBox always traps all IO
1011 * intercepts). */
1012 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n"));
1013 return VINF_HM_INTERCEPT_NOT_ACTIVE;
1014}
1015
1016
1017/**
1018 * Checks the SVM MSR permission bitmap and performs the \#VMEXIT if the
1019 * corresponding intercept is active.
1020 *
1021 * @returns Strict VBox status code.
1022 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the MSR permission bitmap does not
1023 * specify interception of the accessed MSR @a idMsr.
1024 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
1025 * successfully.
1026 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
1027 * failed and a shutdown needs to be initiated for the geust.
1028 *
1029 * @param pVCpu The cross context virtual CPU structure.
1030 * @param pCtx The guest-CPU context.
1031 * @param idMsr The MSR being accessed in the nested-guest.
1032 * @param fWrite Whether this is an MSR write access, @c false implies an
1033 * MSR read.
1034 */
1035IEM_STATIC VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t idMsr, bool fWrite)
1036{
1037 /*
1038 * Check if any MSRs are being intercepted.
1039 */
1040 Assert(CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MSR_PROT));
1041 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
1042
1043 uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
1044
1045 /*
1046 * Get the byte and bit offset of the permission bits corresponding to the MSR.
1047 */
1048 uint16_t offMsrpm;
1049 uint32_t uMsrpmBit;
1050 int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
1051 if (RT_SUCCESS(rc))
1052 {
1053 Assert(uMsrpmBit < 0x3fff);
1054 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
1055 if (fWrite)
1056 ++uMsrpmBit;
1057
1058 /*
1059 * Check if the bit is set, if so, trigger a #VMEXIT.
1060 */
1061 uint8_t *pbMsrpm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
1062 pbMsrpm += offMsrpm;
1063 if (ASMBitTest(pbMsrpm, uMsrpmBit))
1064 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
1065 }
1066 else
1067 {
1068 /*
1069 * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it.
1070 */
1071 Log(("iemSvmHandleMsrIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool -> #VMEXIT\n", idMsr, fWrite));
1072 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
1073 }
1074 return VINF_HM_INTERCEPT_NOT_ACTIVE;
1075}
1076
1077
1078
1079/**
1080 * Implements 'VMRUN'.
1081 */
1082IEM_CIMPL_DEF_0(iemCImpl_vmrun)
1083{
1084#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1085 RT_NOREF2(pVCpu, cbInstr);
1086 return VINF_EM_RAW_EMULATE_INSTR;
1087#else
1088 LogFlow(("iemCImpl_vmrun\n"));
1089 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1090 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmrun);
1091
1092 /** @todo Check effective address size using address size prefix. */
1093 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
1094 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1095 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1096 {
1097 Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1098 return iemRaiseGeneralProtectionFault0(pVCpu);
1099 }
1100
1101 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
1102 {
1103 Log(("vmrun: Guest intercept -> #VMEXIT\n"));
1104 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1105 }
1106
1107 VBOXSTRICTRC rcStrict = iemSvmVmrun(pVCpu, pCtx, cbInstr, GCPhysVmcb);
1108 if (rcStrict == VERR_SVM_VMEXIT_FAILED)
1109 {
1110 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
1111 rcStrict = VINF_EM_TRIPLE_FAULT;
1112 }
1113 return rcStrict;
1114#endif
1115}
1116
1117
1118/**
1119 * Implements 'VMMCALL'.
1120 */
1121IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
1122{
1123 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1124 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
1125 {
1126 Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
1127 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1128 }
1129
1130 bool fUpdatedRipAndRF;
1131 VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fUpdatedRipAndRF);
1132 if (RT_SUCCESS(rcStrict))
1133 {
1134 if (!fUpdatedRipAndRF)
1135 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1136 return rcStrict;
1137 }
1138
1139 return iemRaiseUndefinedOpcode(pVCpu);
1140}
1141
1142
1143/**
1144 * Implements 'VMLOAD'.
1145 */
1146IEM_CIMPL_DEF_0(iemCImpl_vmload)
1147{
1148#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1149 RT_NOREF2(pVCpu, cbInstr);
1150 return VINF_EM_RAW_EMULATE_INSTR;
1151#else
1152 LogFlow(("iemCImpl_vmload\n"));
1153 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1154 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
1155
1156 /** @todo Check effective address size using address size prefix. */
1157 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
1158 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1159 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1160 {
1161 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1162 return iemRaiseGeneralProtectionFault0(pVCpu);
1163 }
1164
1165 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
1166 {
1167 Log(("vmload: Guest intercept -> #VMEXIT\n"));
1168 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1169 }
1170
1171 SVMVMCBSTATESAVE VmcbNstGst;
1172 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest),
1173 sizeof(SVMVMCBSTATESAVE));
1174 if (rcStrict == VINF_SUCCESS)
1175 {
1176 LogFlow(("vmload: Loading VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
1177 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, FS, fs);
1178 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, GS, gs);
1179 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, TR, tr);
1180 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, LDTR, ldtr);
1181
1182 pCtx->msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase;
1183 pCtx->msrSTAR = VmcbNstGst.u64STAR;
1184 pCtx->msrLSTAR = VmcbNstGst.u64LSTAR;
1185 pCtx->msrCSTAR = VmcbNstGst.u64CSTAR;
1186 pCtx->msrSFMASK = VmcbNstGst.u64SFMASK;
1187
1188 pCtx->SysEnter.cs = VmcbNstGst.u64SysEnterCS;
1189 pCtx->SysEnter.esp = VmcbNstGst.u64SysEnterESP;
1190 pCtx->SysEnter.eip = VmcbNstGst.u64SysEnterEIP;
1191
1192 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1193 }
1194 return rcStrict;
1195#endif
1196}
1197
1198
1199/**
1200 * Implements 'VMSAVE'.
1201 */
1202IEM_CIMPL_DEF_0(iemCImpl_vmsave)
1203{
1204#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1205 RT_NOREF2(pVCpu, cbInstr);
1206 return VINF_EM_RAW_EMULATE_INSTR;
1207#else
1208 LogFlow(("iemCImpl_vmsave\n"));
1209 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1210 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
1211
1212 /** @todo Check effective address size using address size prefix. */
1213 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
1214 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1215 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1216 {
1217 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1218 return iemRaiseGeneralProtectionFault0(pVCpu);
1219 }
1220
1221 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
1222 {
1223 Log(("vmsave: Guest intercept -> #VMEXIT\n"));
1224 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1225 }
1226
1227 SVMVMCBSTATESAVE VmcbNstGst;
1228 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest),
1229 sizeof(SVMVMCBSTATESAVE));
1230 if (rcStrict == VINF_SUCCESS)
1231 {
1232 LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
1233 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, FS, fs);
1234 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, GS, gs);
1235 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, TR, tr);
1236 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, LDTR, ldtr);
1237
1238 VmcbNstGst.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1239 VmcbNstGst.u64STAR = pCtx->msrSTAR;
1240 VmcbNstGst.u64LSTAR = pCtx->msrLSTAR;
1241 VmcbNstGst.u64CSTAR = pCtx->msrCSTAR;
1242 VmcbNstGst.u64SFMASK = pCtx->msrSFMASK;
1243
1244 VmcbNstGst.u64SysEnterCS = pCtx->SysEnter.cs;
1245 VmcbNstGst.u64SysEnterESP = pCtx->SysEnter.esp;
1246 VmcbNstGst.u64SysEnterEIP = pCtx->SysEnter.eip;
1247
1248 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest), &VmcbNstGst,
1249 sizeof(SVMVMCBSTATESAVE));
1250 if (rcStrict == VINF_SUCCESS)
1251 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1252 }
1253 return rcStrict;
1254#endif
1255}
1256
1257
1258/**
1259 * Implements 'CLGI'.
1260 */
1261IEM_CIMPL_DEF_0(iemCImpl_clgi)
1262{
1263#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1264 RT_NOREF2(pVCpu, cbInstr);
1265 return VINF_EM_RAW_EMULATE_INSTR;
1266#else
1267 LogFlow(("iemCImpl_clgi\n"));
1268 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1269 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
1270 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
1271 {
1272 Log(("clgi: Guest intercept -> #VMEXIT\n"));
1273 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1274 }
1275
1276 pCtx->hwvirt.svm.fGif = false;
1277 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1278
1279# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1280 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
1281# else
1282 return VINF_SUCCESS;
1283# endif
1284#endif
1285}
1286
1287
1288/**
1289 * Implements 'STGI'.
1290 */
1291IEM_CIMPL_DEF_0(iemCImpl_stgi)
1292{
1293#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1294 RT_NOREF2(pVCpu, cbInstr);
1295 return VINF_EM_RAW_EMULATE_INSTR;
1296#else
1297 LogFlow(("iemCImpl_stgi\n"));
1298 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1299 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
1300 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
1301 {
1302 Log2(("stgi: Guest intercept -> #VMEXIT\n"));
1303 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1304 }
1305
1306 pCtx->hwvirt.svm.fGif = true;
1307 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1308
1309# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1310 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
1311# else
1312 return VINF_SUCCESS;
1313# endif
1314#endif
1315}
1316
1317
1318/**
1319 * Implements 'INVLPGA'.
1320 */
1321IEM_CIMPL_DEF_0(iemCImpl_invlpga)
1322{
1323 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1324 /** @todo Check effective address size using address size prefix. */
1325 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
1326 /** @todo PGM needs virtual ASID support. */
1327#if 0
1328 uint32_t const uAsid = pCtx->ecx;
1329#endif
1330
1331 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
1332 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
1333 {
1334 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
1335 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1336 }
1337
1338 PGMInvalidatePage(pVCpu, GCPtrPage);
1339 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1340 return VINF_SUCCESS;
1341}
1342
1343
1344/**
1345 * Implements 'SKINIT'.
1346 */
1347IEM_CIMPL_DEF_0(iemCImpl_skinit)
1348{
1349 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
1350
1351 uint32_t uIgnore;
1352 uint32_t fFeaturesECX;
1353 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0 /* iSubLeaf */, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
1354 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
1355 return iemRaiseUndefinedOpcode(pVCpu);
1356
1357 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
1358 {
1359 Log2(("skinit: Guest intercept -> #VMEXIT\n"));
1360 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1361 }
1362
1363 RT_NOREF(cbInstr);
1364 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
1365}
1366
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette