VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h@ 75632

Last change on this file since 75632 was 75611, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 Move the VMX APIC-access guest-physical page registration into IEM and got rid of the CPUM all context code that does not quite fit because we still have to declare the prototypes in the HM headers anyway, so just keep it in HM all context code for now.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 60.3 KB
Line 
1/* $Id: IEMAllCImplSvmInstr.cpp.h 75611 2018-11-20 11:20:25Z vboxsync $ */
2/** @file
3 * IEM - AMD-V (Secure Virtual Machine) instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
20/**
21 * Check the common SVM instruction preconditions.
22 */
23# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
24 do { \
25 if (!CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu))) \
26 { \
27 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
28 return iemRaiseUndefinedOpcode(a_pVCpu); \
29 } \
30 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
31 { \
32 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
33 return iemRaiseUndefinedOpcode(a_pVCpu); \
34 } \
35 if ((a_pVCpu)->iem.s.uCpl != 0) \
36 { \
37 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
38 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
39 } \
40 } while (0)
41
42
43/**
44 * Converts an IEM exception event type to an SVM event type.
45 *
46 * @returns The SVM event type.
47 * @retval UINT8_MAX if the specified type of event isn't among the set
48 * of recognized IEM event types.
49 *
50 * @param uVector The vector of the event.
51 * @param fIemXcptFlags The IEM exception / interrupt flags.
52 */
53IEM_STATIC uint8_t iemGetSvmEventType(uint32_t uVector, uint32_t fIemXcptFlags)
54{
55 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
56 {
57 if (uVector != X86_XCPT_NMI)
58 return SVM_EVENT_EXCEPTION;
59 return SVM_EVENT_NMI;
60 }
61
62 /* See AMD spec. Table 15-1. "Guest Exception or Interrupt Types". */
63 if (fIemXcptFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
64 return SVM_EVENT_EXCEPTION;
65
66 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_EXT_INT)
67 return SVM_EVENT_EXTERNAL_IRQ;
68
69 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
70 return SVM_EVENT_SOFTWARE_INT;
71
72 AssertMsgFailed(("iemGetSvmEventType: Invalid IEM xcpt/int. type %#x, uVector=%#x\n", fIemXcptFlags, uVector));
73 return UINT8_MAX;
74}
75
76
77/**
78 * Performs an SVM world-switch (VMRUN, \#VMEXIT) updating PGM and IEM internals.
79 *
80 * @returns Strict VBox status code.
81 * @param pVCpu The cross context virtual CPU structure.
82 */
83DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPU pVCpu)
84{
85 /*
86 * Inform PGM about paging mode changes.
87 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
88 * see comment in iemMemPageTranslateAndCheckAccess().
89 */
90 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
91# ifdef IN_RING3
92 Assert(rc != VINF_PGM_CHANGE_MODE);
93# endif
94 AssertRCReturn(rc, rc);
95
96 /* Inform CPUM (recompiler), can later be removed. */
97 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
98
99 /*
100 * Flush the TLB with new CR3. This is required in case the PGM mode change
101 * above doesn't actually change anything.
102 */
103 if (rc == VINF_SUCCESS)
104 {
105 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
106 AssertRCReturn(rc, rc);
107 }
108
109 /* Re-initialize IEM cache/state after the drastic mode switch. */
110 iemReInitExec(pVCpu);
111 return rc;
112}
113
114
115/**
116 * SVM \#VMEXIT handler.
117 *
118 * @returns Strict VBox status code.
119 * @retval VINF_SVM_VMEXIT when the \#VMEXIT is successful.
120 * @retval VERR_SVM_VMEXIT_FAILED when the \#VMEXIT failed restoring the guest's
121 * "host state" and a shutdown is required.
122 *
123 * @param pVCpu The cross context virtual CPU structure.
124 * @param uExitCode The exit code.
125 * @param uExitInfo1 The exit info. 1 field.
126 * @param uExitInfo2 The exit info. 2 field.
127 */
128IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
129{
130 VBOXSTRICTRC rcStrict;
131 if ( CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
132 || uExitCode == SVM_EXIT_INVALID)
133 {
134 LogFlow(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n",
135 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode, uExitInfo1, uExitInfo2));
136
137 /*
138 * Disable the global interrupt flag to prevent interrupts during the 'atomic' world switch.
139 */
140 pVCpu->cpum.GstCtx.hwvirt.fGif = false;
141
142 /*
143 * Map the nested-guest VMCB from its location in guest memory.
144 * Write exactly what the CPU does on #VMEXIT thereby preserving most other bits in the
145 * guest's VMCB in memory, see @bugref{7243#c113} and related comment on iemSvmVmrun().
146 */
147 PSVMVMCB pVmcbMem;
148 PGMPAGEMAPLOCK PgLockMem;
149 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
150 rcStrict = iemMemPageMap(pVCpu, pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, (void **)&pVmcbMem,
151 &PgLockMem);
152 if (rcStrict == VINF_SUCCESS)
153 {
154 /*
155 * Notify HM in case the nested-guest was executed using hardware-assisted SVM (which
156 * would have modified some VMCB state) that might need to be restored on #VMEXIT before
157 * writing the VMCB back to guest memory.
158 */
159 HMSvmNstGstVmExitNotify(pVCpu, IEM_GET_CTX(pVCpu));
160
161 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
162 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
163 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
164 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
165
166 /*
167 * Save the nested-guest state into the VMCB state-save area.
168 */
169 PSVMVMCBSTATESAVE pVmcbMemState = &pVmcbMem->guest;
170 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, ES, es);
171 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, CS, cs);
172 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, SS, ss);
173 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, DS, ds);
174 pVmcbMemState->GDTR.u32Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
175 pVmcbMemState->GDTR.u64Base = pVCpu->cpum.GstCtx.gdtr.pGdt;
176 pVmcbMemState->IDTR.u32Limit = pVCpu->cpum.GstCtx.idtr.cbIdt;
177 pVmcbMemState->IDTR.u64Base = pVCpu->cpum.GstCtx.idtr.pIdt;
178 pVmcbMemState->u64EFER = pVCpu->cpum.GstCtx.msrEFER;
179 pVmcbMemState->u64CR4 = pVCpu->cpum.GstCtx.cr4;
180 pVmcbMemState->u64CR3 = pVCpu->cpum.GstCtx.cr3;
181 pVmcbMemState->u64CR2 = pVCpu->cpum.GstCtx.cr2;
182 pVmcbMemState->u64CR0 = pVCpu->cpum.GstCtx.cr0;
183 /** @todo Nested paging. */
184 pVmcbMemState->u64RFlags = pVCpu->cpum.GstCtx.rflags.u64;
185 pVmcbMemState->u64RIP = pVCpu->cpum.GstCtx.rip;
186 pVmcbMemState->u64RSP = pVCpu->cpum.GstCtx.rsp;
187 pVmcbMemState->u64RAX = pVCpu->cpum.GstCtx.rax;
188 pVmcbMemState->u64DR7 = pVCpu->cpum.GstCtx.dr[7];
189 pVmcbMemState->u64DR6 = pVCpu->cpum.GstCtx.dr[6];
190 pVmcbMemState->u8CPL = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */
191 Assert(CPUMGetGuestCPL(pVCpu) == pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl);
192 if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, IEM_GET_CTX(pVCpu)))
193 pVmcbMemState->u64PAT = pVCpu->cpum.GstCtx.msrPAT;
194
195 /*
196 * Save additional state and intercept information.
197 *
198 * - V_IRQ: Tracked using VMCPU_FF_INTERRUPT_NESTED_GUEST force-flag and updated below.
199 * - V_TPR: Updated by iemCImpl_load_CrX or by the physical CPU for hardware-assisted
200 * SVM execution.
201 * - Interrupt shadow: Tracked using VMCPU_FF_INHIBIT_INTERRUPTS and RIP.
202 */
203 PSVMVMCBCTRL pVmcbMemCtrl = &pVmcbMem->ctrl;
204 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) /* V_IRQ. */
205 pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 0;
206 else
207 {
208 Assert(pVmcbCtrl->IntCtrl.n.u1VIrqPending);
209 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
210 }
211
212 pVmcbMemCtrl->IntCtrl.n.u8VTPR = pVmcbCtrl->IntCtrl.n.u8VTPR; /* V_TPR. */
213
214 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadow. */
215 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
216 {
217 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 1;
218 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
219 LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", pVCpu->cpum.GstCtx.rip));
220 }
221 else
222 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 0;
223
224 /*
225 * Save nRIP, instruction length and byte fields.
226 */
227 pVmcbMemCtrl->u64NextRIP = pVmcbCtrl->u64NextRIP;
228 pVmcbMemCtrl->cbInstrFetched = pVmcbCtrl->cbInstrFetched;
229 memcpy(&pVmcbMemCtrl->abInstr[0], &pVmcbCtrl->abInstr[0], sizeof(pVmcbMemCtrl->abInstr));
230
231 /*
232 * Save exit information.
233 */
234 pVmcbMemCtrl->u64ExitCode = uExitCode;
235 pVmcbMemCtrl->u64ExitInfo1 = uExitInfo1;
236 pVmcbMemCtrl->u64ExitInfo2 = uExitInfo2;
237
238 /*
239 * Update the exit interrupt-information field if this #VMEXIT happened as a result
240 * of delivering an event through IEM.
241 *
242 * Don't update the exit interrupt-information field if the event wasn't being injected
243 * through IEM, as it would have been updated by real hardware if the nested-guest was
244 * executed using hardware-assisted SVM.
245 */
246 {
247 uint8_t uExitIntVector;
248 uint32_t uExitIntErr;
249 uint32_t fExitIntFlags;
250 bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,
251 NULL /* uExitIntCr2 */);
252 if (fRaisingEvent)
253 {
254 pVmcbCtrl->ExitIntInfo.n.u1Valid = 1;
255 pVmcbCtrl->ExitIntInfo.n.u8Vector = uExitIntVector;
256 pVmcbCtrl->ExitIntInfo.n.u3Type = iemGetSvmEventType(uExitIntVector, fExitIntFlags);
257 if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)
258 {
259 pVmcbCtrl->ExitIntInfo.n.u1ErrorCodeValid = true;
260 pVmcbCtrl->ExitIntInfo.n.u32ErrorCode = uExitIntErr;
261 }
262 }
263 }
264
265 /*
266 * Save the exit interrupt-information field.
267 *
268 * We write the whole field including overwriting reserved bits as it was observed on an
269 * AMD Ryzen 5 Pro 1500 that the CPU does not preserve reserved bits in EXITINTINFO.
270 */
271 pVmcbMemCtrl->ExitIntInfo = pVmcbCtrl->ExitIntInfo;
272
273 /*
274 * Clear event injection.
275 */
276 pVmcbMemCtrl->EventInject.n.u1Valid = 0;
277
278 iemMemPageUnmap(pVCpu, pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, pVmcbMem, &PgLockMem);
279 }
280
281 /*
282 * Prepare for guest's "host mode" by clearing internal processor state bits.
283 *
284 * We don't need to zero out the state-save area, just the controls should be
285 * sufficient because it has the critical bit of indicating whether we're inside
286 * the nested-guest or not.
287 */
288 memset(pVmcbCtrl, 0, sizeof(*pVmcbCtrl));
289 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
290
291 /*
292 * Restore the subset of force-flags that were preserved.
293 */
294 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
295 {
296 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
297 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
298 }
299
300 if (rcStrict == VINF_SUCCESS)
301 {
302 /** @todo Nested paging. */
303 /** @todo ASID. */
304
305 /*
306 * Reload the guest's "host state".
307 */
308 CPUMSvmVmExitRestoreHostState(pVCpu, IEM_GET_CTX(pVCpu));
309
310 /*
311 * Update PGM, IEM and others of a world-switch.
312 */
313 rcStrict = iemSvmWorldSwitch(pVCpu);
314 if (rcStrict == VINF_SUCCESS)
315 rcStrict = VINF_SVM_VMEXIT;
316 else if (RT_SUCCESS(rcStrict))
317 {
318 LogFlow(("iemSvmVmexit: Setting passup status from iemSvmWorldSwitch %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
319 iemSetPassUpStatus(pVCpu, rcStrict);
320 rcStrict = VINF_SVM_VMEXIT;
321 }
322 else
323 LogFlow(("iemSvmVmexit: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
324 }
325 else
326 {
327 AssertMsgFailed(("iemSvmVmexit: Mapping VMCB at %#RGp failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, VBOXSTRICTRC_VAL(rcStrict)));
328 rcStrict = VERR_SVM_VMEXIT_FAILED;
329 }
330 }
331 else
332 {
333 AssertMsgFailed(("iemSvmVmexit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode, uExitInfo1, uExitInfo2));
334 rcStrict = VERR_SVM_IPE_3;
335 }
336
337# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
338 /* CLGI/STGI may not have been intercepted and thus not executed in IEM. */
339 if (HMSvmIsVGifActive(pVCpu->CTX_SUFF(pVM)))
340 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
341# endif
342 return rcStrict;
343}
344
345
346/**
347 * Performs the operations necessary that are part of the vmrun instruction
348 * execution in the guest.
349 *
350 * @returns Strict VBox status code (i.e. informational status codes too).
351 * @retval VINF_SUCCESS successully executed VMRUN and entered nested-guest
352 * code execution.
353 * @retval VINF_SVM_VMEXIT when executing VMRUN causes a \#VMEXIT
354 * (SVM_EXIT_INVALID most likely).
355 *
356 * @param pVCpu The cross context virtual CPU structure.
357 * @param cbInstr The length of the VMRUN instruction.
358 * @param GCPhysVmcb Guest physical address of the VMCB to run.
359 */
360IEM_STATIC VBOXSTRICTRC iemSvmVmrun(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPhysVmcb)
361{
362 LogFlow(("iemSvmVmrun\n"));
363
364 /*
365 * Cache the physical address of the VMCB for #VMEXIT exceptions.
366 */
367 pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb = GCPhysVmcb;
368
369 /*
370 * Save the host state.
371 */
372 CPUMSvmVmRunSaveHostState(IEM_GET_CTX(pVCpu), cbInstr);
373
374 /*
375 * Read the guest VMCB.
376 */
377 PVM pVM = pVCpu->CTX_SUFF(pVM);
378 int rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb), GCPhysVmcb, sizeof(SVMVMCB));
379 if (RT_SUCCESS(rc))
380 {
381 /*
382 * AMD-V seems to preserve reserved fields and only writes back selected, recognized
383 * fields on #VMEXIT. However, not all reserved bits are preserved (e.g, EXITINTINFO)
384 * but in our implementation we try to preserve as much as we possibly can.
385 *
386 * We could read the entire page here and only write back the relevant fields on
387 * #VMEXIT but since our internal VMCB is also being used by HM during hardware-assisted
388 * SVM execution, it creates a potential for a nested-hypervisor to set bits that are
389 * currently reserved but may be recognized as features bits in future CPUs causing
390 * unexpected & undesired results. Hence, we zero out unrecognized fields here as we
391 * typically enter hardware-assisted SVM soon anyway, see @bugref{7243#c113}.
392 */
393 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
394 PSVMVMCBSTATESAVE pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->guest;
395
396 RT_ZERO(pVmcbCtrl->u8Reserved0);
397 RT_ZERO(pVmcbCtrl->u8Reserved1);
398 RT_ZERO(pVmcbCtrl->u8Reserved2);
399 RT_ZERO(pVmcbNstGst->u8Reserved0);
400 RT_ZERO(pVmcbNstGst->u8Reserved1);
401 RT_ZERO(pVmcbNstGst->u8Reserved2);
402 RT_ZERO(pVmcbNstGst->u8Reserved3);
403 RT_ZERO(pVmcbNstGst->u8Reserved4);
404 RT_ZERO(pVmcbNstGst->u8Reserved5);
405 pVmcbCtrl->u32Reserved0 = 0;
406 pVmcbCtrl->TLBCtrl.n.u24Reserved = 0;
407 pVmcbCtrl->IntCtrl.n.u6Reserved = 0;
408 pVmcbCtrl->IntCtrl.n.u3Reserved = 0;
409 pVmcbCtrl->IntCtrl.n.u5Reserved = 0;
410 pVmcbCtrl->IntCtrl.n.u24Reserved = 0;
411 pVmcbCtrl->IntShadow.n.u30Reserved = 0;
412 pVmcbCtrl->ExitIntInfo.n.u19Reserved = 0;
413 pVmcbCtrl->NestedPagingCtrl.n.u29Reserved = 0;
414 pVmcbCtrl->EventInject.n.u19Reserved = 0;
415 pVmcbCtrl->LbrVirt.n.u30Reserved = 0;
416
417 /*
418 * Validate guest-state and controls.
419 */
420 /* VMRUN must always be intercepted. */
421 if (!CPUMIsGuestSvmCtrlInterceptSet(pVCpu, IEM_GET_CTX(pVCpu), SVM_CTRL_INTERCEPT_VMRUN))
422 {
423 Log(("iemSvmVmrun: VMRUN instruction not intercepted -> #VMEXIT\n"));
424 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
425 }
426
427 /* Nested paging. */
428 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
429 && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
430 {
431 Log(("iemSvmVmrun: Nested paging not supported -> Disabling\n"));
432 pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging = 0;
433 }
434
435 /* AVIC. */
436 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
437 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
438 {
439 Log(("iemSvmVmrun: AVIC not supported -> Disabling\n"));
440 pVmcbCtrl->IntCtrl.n.u1AvicEnable = 0;
441 }
442
443 /* Last branch record (LBR) virtualization. */
444 if ( pVmcbCtrl->LbrVirt.n.u1LbrVirt
445 && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
446 {
447 Log(("iemSvmVmrun: LBR virtualization not supported -> Disabling\n"));
448 pVmcbCtrl->LbrVirt.n.u1LbrVirt = 0;
449 }
450
451 /* Virtualized VMSAVE/VMLOAD. */
452 if ( pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload
453 && !pVM->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload)
454 {
455 Log(("iemSvmVmrun: Virtualized VMSAVE/VMLOAD not supported -> Disabling\n"));
456 pVmcbCtrl->LbrVirt.n.u1VirtVmsaveVmload = 0;
457 }
458
459 /* Virtual GIF. */
460 if ( pVmcbCtrl->IntCtrl.n.u1VGifEnable
461 && !pVM->cpum.ro.GuestFeatures.fSvmVGif)
462 {
463 Log(("iemSvmVmrun: Virtual GIF not supported -> Disabling\n"));
464 pVmcbCtrl->IntCtrl.n.u1VGifEnable = 0;
465 }
466
467 /* Guest ASID. */
468 if (!pVmcbCtrl->TLBCtrl.n.u32ASID)
469 {
470 Log(("iemSvmVmrun: Guest ASID is invalid -> #VMEXIT\n"));
471 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
472 }
473
474 /* Guest AVIC. */
475 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable
476 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
477 {
478 Log(("iemSvmVmrun: AVIC not supported -> Disabling\n"));
479 pVmcbCtrl->IntCtrl.n.u1AvicEnable = 0;
480 }
481
482 /* Guest Secure Encrypted Virtualization. */
483 if ( ( pVmcbCtrl->NestedPagingCtrl.n.u1Sev
484 || pVmcbCtrl->NestedPagingCtrl.n.u1SevEs)
485 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
486 {
487 Log(("iemSvmVmrun: SEV not supported -> Disabling\n"));
488 pVmcbCtrl->NestedPagingCtrl.n.u1Sev = 0;
489 pVmcbCtrl->NestedPagingCtrl.n.u1SevEs = 0;
490 }
491
492 /* Flush by ASID. */
493 if ( !pVM->cpum.ro.GuestFeatures.fSvmFlusbByAsid
494 && pVmcbCtrl->TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_NOTHING
495 && pVmcbCtrl->TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_ENTIRE)
496 {
497 Log(("iemSvmVmrun: Flush-by-ASID not supported -> #VMEXIT\n"));
498 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
499 }
500
501 /* IO permission bitmap. */
502 RTGCPHYS const GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
503 if ( (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)
504 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)
505 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + X86_PAGE_4K_SIZE)
506 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + (X86_PAGE_4K_SIZE << 1)))
507 {
508 Log(("iemSvmVmrun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));
509 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
510 }
511
512 /* MSR permission bitmap. */
513 RTGCPHYS const GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
514 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
515 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)
516 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap + X86_PAGE_4K_SIZE))
517 {
518 Log(("iemSvmVmrun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));
519 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
520 }
521
522 /* CR0. */
523 if ( !(pVmcbNstGst->u64CR0 & X86_CR0_CD)
524 && (pVmcbNstGst->u64CR0 & X86_CR0_NW))
525 {
526 Log(("iemSvmVmrun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
527 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
528 }
529 if (pVmcbNstGst->u64CR0 >> 32)
530 {
531 Log(("iemSvmVmrun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
532 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
533 }
534 /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */
535
536 /* DR6 and DR7. */
537 if ( pVmcbNstGst->u64DR6 >> 32
538 || pVmcbNstGst->u64DR7 >> 32)
539 {
540 Log(("iemSvmVmrun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64DR6,
541 pVmcbNstGst->u64DR6));
542 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
543 }
544
545 /*
546 * PAT (Page Attribute Table) MSR.
547 *
548 * The CPU only validates and loads it when nested-paging is enabled.
549 * See AMD spec. "15.25.4 Nested Paging and VMRUN/#VMEXIT".
550 */
551 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
552 && !CPUMIsPatMsrValid(pVmcbNstGst->u64PAT))
553 {
554 Log(("iemSvmVmrun: PAT invalid. u64PAT=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64PAT));
555 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
556 }
557
558 /*
559 * Copy the IO permission bitmap into the cache.
560 */
561 Assert(pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap));
562 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap,
563 SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);
564 if (RT_FAILURE(rc))
565 {
566 Log(("iemSvmVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));
567 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
568 }
569
570 /*
571 * Copy the MSR permission bitmap into the cache.
572 */
573 Assert(pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap));
574 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap,
575 SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);
576 if (RT_FAILURE(rc))
577 {
578 Log(("iemSvmVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
579 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
580 }
581
582 /*
583 * Copy segments from nested-guest VMCB state to the guest-CPU state.
584 *
585 * We do this here as we need to use the CS attributes and it's easier this way
586 * then using the VMCB format selectors. It doesn't really matter where we copy
587 * the state, we restore the guest-CPU context state on the \#VMEXIT anyway.
588 */
589 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, ES, es);
590 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, CS, cs);
591 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, SS, ss);
592 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, DS, ds);
593
594 /** @todo Segment attribute overrides by VMRUN. */
595
596 /*
597 * CPL adjustments and overrides.
598 *
599 * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL().
600 * We shall thus adjust both CS.DPL and SS.DPL here.
601 */
602 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = pVmcbNstGst->u8CPL;
603 if (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(pVCpu)))
604 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = 3;
605 if (CPUMIsGuestInRealModeEx(IEM_GET_CTX(pVCpu)))
606 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = 0;
607 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
608
609 /*
610 * Continue validating guest-state and controls.
611 *
612 * We pass CR0 as 0 to CPUMIsGuestEferMsrWriteValid() below to skip the illegal
613 * EFER.LME bit transition check. We pass the nested-guest's EFER as both the
614 * old and new EFER value to not have any guest EFER bits influence the new
615 * nested-guest EFER.
616 */
617 uint64_t uValidEfer;
618 rc = CPUMIsGuestEferMsrWriteValid(pVM, 0 /* CR0 */, pVmcbNstGst->u64EFER, pVmcbNstGst->u64EFER, &uValidEfer);
619 if (RT_FAILURE(rc))
620 {
621 Log(("iemSvmVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64EFER));
622 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
623 }
624
625 /* Validate paging and CPU mode bits. */
626 bool const fSvm = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME);
627 bool const fLongModeSupported = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);
628 bool const fLongModeEnabled = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
629 bool const fPaging = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PG);
630 bool const fPae = RT_BOOL(pVmcbNstGst->u64CR4 & X86_CR4_PAE);
631 bool const fProtMode = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PE);
632 bool const fLongModeWithPaging = fLongModeEnabled && fPaging;
633 bool const fLongModeConformCS = pVCpu->cpum.GstCtx.cs.Attr.n.u1Long && pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig;
634 /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0). */
635 if (fLongModeWithPaging)
636 uValidEfer |= MSR_K6_EFER_LMA;
637 bool const fLongModeActiveOrEnabled = RT_BOOL(uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA));
638 if ( !fSvm
639 || (!fLongModeSupported && fLongModeActiveOrEnabled)
640 || (fLongModeWithPaging && !fPae)
641 || (fLongModeWithPaging && !fProtMode)
642 || ( fLongModeEnabled
643 && fPaging
644 && fPae
645 && fLongModeConformCS))
646 {
647 Log(("iemSvmVmrun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer));
648 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
649 }
650
651 /*
652 * Preserve the required force-flags.
653 *
654 * We only preserve the force-flags that would affect the execution of the
655 * nested-guest (or the guest).
656 *
657 * - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
658 * execution of a subsequent IRET instruction in the guest.
659 *
660 * The remaining FFs (e.g. timers) can stay in place so that we will be able to
661 * generate interrupts that should cause #VMEXITs for the nested-guest.
662 *
663 * VMRUN has implicit GIF (Global Interrupt Flag) handling, we don't need to
664 * preserve VMCPU_FF_INHIBIT_INTERRUPTS.
665 */
666 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
667 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
668
669 /*
670 * Pause filter.
671 */
672 if (pVM->cpum.ro.GuestFeatures.fSvmPauseFilter)
673 {
674 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = pVmcbCtrl->u16PauseFilterCount;
675 if (pVM->cpum.ro.GuestFeatures.fSvmPauseFilterThreshold)
676 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold = pVmcbCtrl->u16PauseFilterCount;
677 }
678
679 /*
680 * Interrupt shadow.
681 */
682 if (pVmcbCtrl->IntShadow.n.u1IntShadow)
683 {
684 LogFlow(("iemSvmVmrun: setting interrupt shadow. inhibit PC=%#RX64\n", pVmcbNstGst->u64RIP));
685 /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */
686 EMSetInhibitInterruptsPC(pVCpu, pVmcbNstGst->u64RIP);
687 }
688
689 /*
690 * TLB flush control.
691 * Currently disabled since it's redundant as we unconditionally flush the TLB
692 * in iemSvmWorldSwitch() below.
693 */
694# if 0
695 /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
696 if ( pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
697 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
698 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
699 PGMFlushTLB(pVCpu, pVmcbNstGst->u64CR3, true /* fGlobal */);
700# endif
701
702 /*
703 * Copy the remaining guest state from the VMCB to the guest-CPU context.
704 */
705 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcbNstGst->GDTR.u32Limit;
706 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcbNstGst->GDTR.u64Base;
707 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcbNstGst->IDTR.u32Limit;
708 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcbNstGst->IDTR.u64Base;
709 CPUMSetGuestCR0(pVCpu, pVmcbNstGst->u64CR0);
710 CPUMSetGuestCR4(pVCpu, pVmcbNstGst->u64CR4);
711 pVCpu->cpum.GstCtx.cr3 = pVmcbNstGst->u64CR3;
712 pVCpu->cpum.GstCtx.cr2 = pVmcbNstGst->u64CR2;
713 pVCpu->cpum.GstCtx.dr[6] = pVmcbNstGst->u64DR6;
714 pVCpu->cpum.GstCtx.dr[7] = pVmcbNstGst->u64DR7;
715 pVCpu->cpum.GstCtx.rflags.u64 = pVmcbNstGst->u64RFlags;
716 pVCpu->cpum.GstCtx.rax = pVmcbNstGst->u64RAX;
717 pVCpu->cpum.GstCtx.rsp = pVmcbNstGst->u64RSP;
718 pVCpu->cpum.GstCtx.rip = pVmcbNstGst->u64RIP;
719 CPUMSetGuestEferMsrNoChecks(pVCpu, pVCpu->cpum.GstCtx.msrEFER, uValidEfer);
720 if (pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging)
721 pVCpu->cpum.GstCtx.msrPAT = pVmcbNstGst->u64PAT;
722
723 /* Mask DR6, DR7 bits mandatory set/clear bits. */
724 pVCpu->cpum.GstCtx.dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
725 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_RA1_MASK;
726 pVCpu->cpum.GstCtx.dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
727 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK;
728
729 /*
730 * Check for pending virtual interrupts.
731 */
732 if (pVmcbCtrl->IntCtrl.n.u1VIrqPending)
733 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
734 else
735 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
736
737 /*
738 * Update PGM, IEM and others of a world-switch.
739 */
740 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu);
741 if (rcStrict == VINF_SUCCESS)
742 { /* likely */ }
743 else if (RT_SUCCESS(rcStrict))
744 {
745 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch returned %Rrc, setting passup status\n", VBOXSTRICTRC_VAL(rcStrict)));
746 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
747 }
748 else
749 {
750 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
751 return rcStrict;
752 }
753
754 /*
755 * Clear global interrupt flags to allow interrupts in the guest.
756 */
757 pVCpu->cpum.GstCtx.hwvirt.fGif = true;
758
759 /*
760 * Event injection.
761 */
762 PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject;
763 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = !pEventInject->n.u1Valid;
764 if (pEventInject->n.u1Valid)
765 {
766 uint8_t const uVector = pEventInject->n.u8Vector;
767 TRPMEVENT const enmType = HMSvmEventToTrpmEventType(pEventInject);
768 uint16_t const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0;
769
770 /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */
771 if (RT_UNLIKELY(enmType == TRPM_32BIT_HACK))
772 {
773 Log(("iemSvmVmrun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type));
774 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
775 }
776 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION)
777 {
778 if ( uVector == X86_XCPT_NMI
779 || uVector > X86_XCPT_LAST)
780 {
781 Log(("iemSvmVmrun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector));
782 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
783 }
784 if ( uVector == X86_XCPT_BR
785 && CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
786 {
787 Log(("iemSvmVmrun: Cannot inject #BR when not in long mode -> #VMEXIT\n"));
788 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
789 }
790 /** @todo any others? */
791 }
792
793 /*
794 * Invalidate the exit interrupt-information field here. This field is fully updated
795 * on #VMEXIT as events other than the one below can also cause intercepts during
796 * their injection (e.g. exceptions).
797 */
798 pVmcbCtrl->ExitIntInfo.n.u1Valid = 0;
799
800 /*
801 * Clear the event injection valid bit here. While the AMD spec. mentions that the CPU
802 * clears this bit from the VMCB unconditionally on #VMEXIT, internally the CPU could be
803 * clearing it at any time, most likely before/after injecting the event. Since VirtualBox
804 * doesn't have any virtual-CPU internal representation of this bit, we clear/update the
805 * VMCB here. This also has the added benefit that we avoid the risk of injecting the event
806 * twice if we fallback to executing the nested-guest using hardware-assisted SVM after
807 * injecting the event through IEM here.
808 */
809 pVmcbCtrl->EventInject.n.u1Valid = 0;
810
811 /** @todo NRIP: Software interrupts can only be pushed properly if we support
812 * NRIP for the nested-guest to calculate the instruction length
813 * below. */
814 LogFlow(("iemSvmVmrun: Injecting event: %04x:%08RX64 vec=%#x type=%d uErr=%u cr2=%#RX64 cr3=%#RX64 efer=%#RX64\n",
815 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uVector, enmType, uErrorCode, pVCpu->cpum.GstCtx.cr2,
816 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.msrEFER));
817
818 /*
819 * We shall not inject the event here right away. There may be paging mode related updates
820 * as a result of the world-switch above that are yet to be honored. Instead flag the event
821 * as pending for injection.
822 */
823 TRPMAssertTrap(pVCpu, uVector, enmType);
824 if (pEventInject->n.u1ErrorCodeValid)
825 TRPMSetErrorCode(pVCpu, uErrorCode);
826 if ( enmType == TRPM_TRAP
827 && uVector == X86_XCPT_PF)
828 TRPMSetFaultAddress(pVCpu, pVCpu->cpum.GstCtx.cr2);
829 }
830 else
831 LogFlow(("iemSvmVmrun: Entering nested-guest: %04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 efer=%#RX64 efl=%#x\n",
832 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3,
833 pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER, pVCpu->cpum.GstCtx.rflags.u64));
834
835 LogFlow(("iemSvmVmrun: returns %d\n", VBOXSTRICTRC_VAL(rcStrict)));
836
837# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
838 /* If CLGI/STGI isn't intercepted we force IEM-only nested-guest execution here. */
839 if (HMSvmIsVGifActive(pVM))
840 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
841# endif
842
843 return rcStrict;
844 }
845
846 /* Shouldn't really happen as the caller should've validated the physical address already. */
847 Log(("iemSvmVmrun: Failed to read nested-guest VMCB at %#RGp (rc=%Rrc) -> #VMEXIT\n", GCPhysVmcb, rc));
848 return rc;
849}
850
851
852/**
853 * Checks if the event intercepts and performs the \#VMEXIT if the corresponding
854 * intercept is active.
855 *
856 * @returns Strict VBox status code.
857 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
858 * we're not executing a nested-guest.
859 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
860 * successfully.
861 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
862 * failed and a shutdown needs to be initiated for the geust.
863 *
864 * @returns VBox strict status code.
865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
866 * @param u8Vector The interrupt or exception vector.
867 * @param fFlags The exception flags (see IEM_XCPT_FLAGS_XXX).
868 * @param uErr The error-code associated with the exception.
869 * @param uCr2 The CR2 value in case of a \#PF exception.
870 */
871IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2)
872{
873 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
874
875 /*
876 * Handle SVM exception and software interrupt intercepts, see AMD spec. 15.12 "Exception Intercepts".
877 *
878 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_XCPT_2 #VMEXITs.
879 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
880 * even when they use a vector in the range 0 to 31.
881 * - ICEBP should not trigger #DB intercept, but its own intercept.
882 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
883 */
884 /* Check NMI intercept */
885 if ( u8Vector == X86_XCPT_NMI
886 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
887 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
888 {
889 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
890 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
891 }
892
893 /* Check ICEBP intercept. */
894 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
895 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
896 {
897 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
898 IEM_SVM_UPDATE_NRIP(pVCpu);
899 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
900 }
901
902 /* Check CPU exception intercepts. */
903 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
904 && IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
905 {
906 Assert(u8Vector <= X86_XCPT_LAST);
907 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
908 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
909 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists
910 && u8Vector == X86_XCPT_PF
911 && !(uErr & X86_TRAP_PF_ID))
912 {
913 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
914# ifdef IEM_WITH_CODE_TLB
915 uint8_t const *pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
916 uint8_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
917 pVmcbCtrl->cbInstrFetched = RT_MIN(cbInstrBuf, SVM_CTRL_GUEST_INSTR_BYTES_MAX);
918 if ( pbInstrBuf
919 && cbInstrBuf > 0)
920 memcpy(&pVmcbCtrl->abInstr[0], pbInstrBuf, pVmcbCtrl->cbInstrFetched);
921# else
922 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
923 pVmcbCtrl->cbInstrFetched = RT_MIN(cbOpcode, SVM_CTRL_GUEST_INSTR_BYTES_MAX);
924 if (cbOpcode > 0)
925 memcpy(&pVmcbCtrl->abInstr[0], &pVCpu->iem.s.abOpcode[0], pVmcbCtrl->cbInstrFetched);
926# endif
927 }
928 if (u8Vector == X86_XCPT_BR)
929 IEM_SVM_UPDATE_NRIP(pVCpu);
930 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept u32InterceptXcpt=%#RX32 u8Vector=%#x "
931 "uExitInfo1=%#RX64 uExitInfo2=%#RX64 -> #VMEXIT\n", pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl.u32InterceptXcpt,
932 u8Vector, uExitInfo1, uExitInfo2));
933 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_0 + u8Vector, uExitInfo1, uExitInfo2);
934 }
935
936 /* Check software interrupt (INTn) intercepts. */
937 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
938 | IEM_XCPT_FLAGS_BP_INSTR
939 | IEM_XCPT_FLAGS_ICEBP_INSTR
940 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
941 && IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
942 {
943 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? u8Vector : 0;
944 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
945 IEM_SVM_UPDATE_NRIP(pVCpu);
946 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
947 }
948
949 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
950}
951
952
953/**
954 * Checks the SVM IO permission bitmap and performs the \#VMEXIT if the
955 * corresponding intercept is active.
956 *
957 * @returns Strict VBox status code.
958 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the intercept is not active or
959 * we're not executing a nested-guest.
960 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
961 * successfully.
962 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
963 * failed and a shutdown needs to be initiated for the geust.
964 *
965 * @returns VBox strict status code.
966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
967 * @param u16Port The IO port being accessed.
968 * @param enmIoType The type of IO access.
969 * @param cbReg The IO operand size in bytes.
970 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
971 * @param iEffSeg The effective segment number.
972 * @param fRep Whether this is a repeating IO instruction (REP prefix).
973 * @param fStrIo Whether this is a string IO instruction.
974 * @param cbInstr The length of the IO instruction in bytes.
975 */
976IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
977 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
978{
979 Assert(IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
980 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
981 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
982
983 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u)\n", u16Port, u16Port));
984
985 SVMIOIOEXITINFO IoExitInfo;
986 void *pvIoBitmap = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap);
987 bool const fIntercept = HMSvmIsIOInterceptActive(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep,
988 fStrIo, &IoExitInfo);
989 if (fIntercept)
990 {
991 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u) -> #VMEXIT\n", u16Port, u16Port));
992 IEM_SVM_UPDATE_NRIP(pVCpu);
993 return iemSvmVmexit(pVCpu, SVM_EXIT_IOIO, IoExitInfo.u, pVCpu->cpum.GstCtx.rip + cbInstr);
994 }
995
996 /** @todo remove later (for debugging as VirtualBox always traps all IO
997 * intercepts). */
998 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n"));
999 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1000}
1001
1002
1003/**
1004 * Checks the SVM MSR permission bitmap and performs the \#VMEXIT if the
1005 * corresponding intercept is active.
1006 *
1007 * @returns Strict VBox status code.
1008 * @retval VINF_HM_INTERCEPT_NOT_ACTIVE if the MSR permission bitmap does not
1009 * specify interception of the accessed MSR @a idMsr.
1010 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred
1011 * successfully.
1012 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT
1013 * failed and a shutdown needs to be initiated for the geust.
1014 *
1015 * @param pVCpu The cross context virtual CPU structure.
1016 * @param idMsr The MSR being accessed in the nested-guest.
1017 * @param fWrite Whether this is an MSR write access, @c false implies an
1018 * MSR read.
1019 * @param cbInstr The length of the MSR read/write instruction in bytes.
1020 */
1021IEM_STATIC VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPU pVCpu, uint32_t idMsr, bool fWrite)
1022{
1023 /*
1024 * Check if any MSRs are being intercepted.
1025 */
1026 Assert(CPUMIsGuestSvmCtrlInterceptSet(pVCpu, IEM_GET_CTX(pVCpu), SVM_CTRL_INTERCEPT_MSR_PROT));
1027 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
1028
1029 uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;
1030
1031 /*
1032 * Get the byte and bit offset of the permission bits corresponding to the MSR.
1033 */
1034 uint16_t offMsrpm;
1035 uint8_t uMsrpmBit;
1036 int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
1037 if (RT_SUCCESS(rc))
1038 {
1039 Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
1040 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
1041 if (fWrite)
1042 ++uMsrpmBit;
1043
1044 /*
1045 * Check if the bit is set, if so, trigger a #VMEXIT.
1046 */
1047 uint8_t *pbMsrpm = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap);
1048 pbMsrpm += offMsrpm;
1049 if (*pbMsrpm & RT_BIT(uMsrpmBit))
1050 {
1051 IEM_SVM_UPDATE_NRIP(pVCpu);
1052 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
1053 }
1054 }
1055 else
1056 {
1057 /*
1058 * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it.
1059 */
1060 Log(("iemSvmHandleMsrIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool -> #VMEXIT\n", idMsr, fWrite));
1061 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
1062 }
1063 return VINF_SVM_INTERCEPT_NOT_ACTIVE;
1064}
1065
1066
1067
1068/**
1069 * Implements 'VMRUN'.
1070 */
1071IEM_CIMPL_DEF_0(iemCImpl_vmrun)
1072{
1073# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1074 RT_NOREF2(pVCpu, cbInstr);
1075 return VINF_EM_RAW_EMULATE_INSTR;
1076# else
1077 LogFlow(("iemCImpl_vmrun\n"));
1078 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmrun);
1079
1080 /** @todo Check effective address size using address size prefix. */
1081 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1082 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1083 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1084 {
1085 Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1086 return iemRaiseGeneralProtectionFault0(pVCpu);
1087 }
1088
1089 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
1090 {
1091 Log(("vmrun: Guest intercept -> #VMEXIT\n"));
1092 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1093 }
1094
1095 VBOXSTRICTRC rcStrict = iemSvmVmrun(pVCpu, cbInstr, GCPhysVmcb);
1096 if (rcStrict == VERR_SVM_VMEXIT_FAILED)
1097 {
1098 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
1099 rcStrict = VINF_EM_TRIPLE_FAULT;
1100 }
1101 return rcStrict;
1102# endif
1103}
1104
1105
1106/**
1107 * Implements 'VMLOAD'.
1108 */
1109IEM_CIMPL_DEF_0(iemCImpl_vmload)
1110{
1111# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1112 RT_NOREF2(pVCpu, cbInstr);
1113 return VINF_EM_RAW_EMULATE_INSTR;
1114# else
1115 LogFlow(("iemCImpl_vmload\n"));
1116 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
1117
1118 /** @todo Check effective address size using address size prefix. */
1119 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1120 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1121 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1122 {
1123 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1124 return iemRaiseGeneralProtectionFault0(pVCpu);
1125 }
1126
1127 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
1128 {
1129 Log(("vmload: Guest intercept -> #VMEXIT\n"));
1130 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1131 }
1132
1133 SVMVMCBSTATESAVE VmcbNstGst;
1134 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
1135 sizeof(SVMVMCBSTATESAVE));
1136 if (rcStrict == VINF_SUCCESS)
1137 {
1138 LogFlow(("vmload: Loading VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
1139 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
1140 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
1141 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
1142 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
1143
1144 pVCpu->cpum.GstCtx.msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase;
1145 pVCpu->cpum.GstCtx.msrSTAR = VmcbNstGst.u64STAR;
1146 pVCpu->cpum.GstCtx.msrLSTAR = VmcbNstGst.u64LSTAR;
1147 pVCpu->cpum.GstCtx.msrCSTAR = VmcbNstGst.u64CSTAR;
1148 pVCpu->cpum.GstCtx.msrSFMASK = VmcbNstGst.u64SFMASK;
1149
1150 pVCpu->cpum.GstCtx.SysEnter.cs = VmcbNstGst.u64SysEnterCS;
1151 pVCpu->cpum.GstCtx.SysEnter.esp = VmcbNstGst.u64SysEnterESP;
1152 pVCpu->cpum.GstCtx.SysEnter.eip = VmcbNstGst.u64SysEnterEIP;
1153
1154 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1155 }
1156 return rcStrict;
1157# endif
1158}
1159
1160
1161/**
1162 * Implements 'VMSAVE'.
1163 */
1164IEM_CIMPL_DEF_0(iemCImpl_vmsave)
1165{
1166# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1167 RT_NOREF2(pVCpu, cbInstr);
1168 return VINF_EM_RAW_EMULATE_INSTR;
1169# else
1170 LogFlow(("iemCImpl_vmsave\n"));
1171 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);
1172
1173 /** @todo Check effective address size using address size prefix. */
1174 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1175 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
1176 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
1177 {
1178 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
1179 return iemRaiseGeneralProtectionFault0(pVCpu);
1180 }
1181
1182 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
1183 {
1184 Log(("vmsave: Guest intercept -> #VMEXIT\n"));
1185 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1186 }
1187
1188 SVMVMCBSTATESAVE VmcbNstGst;
1189 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),
1190 sizeof(SVMVMCBSTATESAVE));
1191 if (rcStrict == VINF_SUCCESS)
1192 {
1193 LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
1194 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR
1195 | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS);
1196
1197 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
1198 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
1199 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);
1200 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);
1201
1202 VmcbNstGst.u64KernelGSBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
1203 VmcbNstGst.u64STAR = pVCpu->cpum.GstCtx.msrSTAR;
1204 VmcbNstGst.u64LSTAR = pVCpu->cpum.GstCtx.msrLSTAR;
1205 VmcbNstGst.u64CSTAR = pVCpu->cpum.GstCtx.msrCSTAR;
1206 VmcbNstGst.u64SFMASK = pVCpu->cpum.GstCtx.msrSFMASK;
1207
1208 VmcbNstGst.u64SysEnterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1209 VmcbNstGst.u64SysEnterESP = pVCpu->cpum.GstCtx.SysEnter.esp;
1210 VmcbNstGst.u64SysEnterEIP = pVCpu->cpum.GstCtx.SysEnter.eip;
1211
1212 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest), &VmcbNstGst,
1213 sizeof(SVMVMCBSTATESAVE));
1214 if (rcStrict == VINF_SUCCESS)
1215 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1216 }
1217 return rcStrict;
1218# endif
1219}
1220
1221
1222/**
1223 * Implements 'CLGI'.
1224 */
1225IEM_CIMPL_DEF_0(iemCImpl_clgi)
1226{
1227# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1228 RT_NOREF2(pVCpu, cbInstr);
1229 return VINF_EM_RAW_EMULATE_INSTR;
1230# else
1231 LogFlow(("iemCImpl_clgi\n"));
1232 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);
1233 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
1234 {
1235 Log(("clgi: Guest intercept -> #VMEXIT\n"));
1236 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1237 }
1238
1239 pVCpu->cpum.GstCtx.hwvirt.fGif = false;
1240 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1241
1242# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1243 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
1244# else
1245 return VINF_SUCCESS;
1246# endif
1247# endif
1248}
1249
1250
1251/**
1252 * Implements 'STGI'.
1253 */
1254IEM_CIMPL_DEF_0(iemCImpl_stgi)
1255{
1256# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1257 RT_NOREF2(pVCpu, cbInstr);
1258 return VINF_EM_RAW_EMULATE_INSTR;
1259# else
1260 LogFlow(("iemCImpl_stgi\n"));
1261 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);
1262 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
1263 {
1264 Log2(("stgi: Guest intercept -> #VMEXIT\n"));
1265 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1266 }
1267
1268 pVCpu->cpum.GstCtx.hwvirt.fGif = true;
1269 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1270
1271# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1272 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
1273# else
1274 return VINF_SUCCESS;
1275# endif
1276# endif
1277}
1278
1279
1280/**
1281 * Implements 'INVLPGA'.
1282 */
1283IEM_CIMPL_DEF_0(iemCImpl_invlpga)
1284{
1285 /** @todo Check effective address size using address size prefix. */
1286 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
1287 /** @todo PGM needs virtual ASID support. */
1288# if 0
1289 uint32_t const uAsid = pVCpu->cpum.GstCtx.ecx;
1290# endif
1291
1292 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
1293 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
1294 {
1295 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
1296 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1297 }
1298
1299 PGMInvalidatePage(pVCpu, GCPtrPage);
1300 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1301 return VINF_SUCCESS;
1302}
1303
1304
1305/**
1306 * Implements 'SKINIT'.
1307 */
1308IEM_CIMPL_DEF_0(iemCImpl_skinit)
1309{
1310 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
1311
1312 uint32_t uIgnore;
1313 uint32_t fFeaturesECX;
1314 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0 /* iSubLeaf */, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);
1315 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))
1316 return iemRaiseUndefinedOpcode(pVCpu);
1317
1318 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
1319 {
1320 Log2(("skinit: Guest intercept -> #VMEXIT\n"));
1321 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1322 }
1323
1324 RT_NOREF(cbInstr);
1325 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
1326}
1327
1328
1329/**
1330 * Implements SVM's implementation of PAUSE.
1331 */
1332IEM_CIMPL_DEF_0(iemCImpl_svm_pause)
1333{
1334 bool fCheckIntercept = true;
1335 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter)
1336 {
1337 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
1338
1339 /* TSC based pause-filter thresholding. */
1340 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold
1341 && pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold > 0)
1342 {
1343 uint64_t const uTick = TMCpuTickGet(pVCpu);
1344 if (uTick - pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick > pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold)
1345 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = CPUMGetGuestSvmPauseFilterCount(pVCpu, IEM_GET_CTX(pVCpu));
1346 pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick = uTick;
1347 }
1348
1349 /* Simple pause-filter counter. */
1350 if (pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter > 0)
1351 {
1352 --pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter;
1353 fCheckIntercept = false;
1354 }
1355 }
1356
1357 if (fCheckIntercept)
1358 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0);
1359
1360 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1361 return VINF_SUCCESS;
1362}
1363
1364#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
1365
1366/**
1367 * Common code for iemCImpl_vmmcall and iemCImpl_vmcall (latter in IEMAllCImplVmxInstr.cpp.h).
1368 */
1369IEM_CIMPL_DEF_1(iemCImpl_Hypercall, uint16_t, uDisOpcode)
1370{
1371 if (EMAreHypercallInstructionsEnabled(pVCpu))
1372 {
1373 NOREF(uDisOpcode);
1374 VBOXSTRICTRC rcStrict = GIMHypercallEx(pVCpu, IEM_GET_CTX(pVCpu), uDisOpcode, cbInstr);
1375 if (RT_SUCCESS(rcStrict))
1376 {
1377 if (rcStrict == VINF_SUCCESS)
1378 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1379 if ( rcStrict == VINF_SUCCESS
1380 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
1381 return VINF_SUCCESS;
1382 AssertMsgReturn(rcStrict == VINF_GIM_R3_HYPERCALL, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IEM_IPE_4);
1383 return rcStrict;
1384 }
1385 AssertMsgReturn( rcStrict == VERR_GIM_HYPERCALL_ACCESS_DENIED
1386 || rcStrict == VERR_GIM_HYPERCALLS_NOT_AVAILABLE
1387 || rcStrict == VERR_GIM_NOT_ENABLED
1388 || rcStrict == VERR_GIM_HYPERCALL_MEMORY_READ_FAILED
1389 || rcStrict == VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED,
1390 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IEM_IPE_4);
1391
1392 /* Raise #UD on all failures. */
1393 }
1394 return iemRaiseUndefinedOpcode(pVCpu);
1395}
1396
1397
1398/**
1399 * Implements 'VMMCALL'.
1400 */
1401IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
1402{
1403 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
1404 {
1405 Log(("vmmcall: Guest intercept -> #VMEXIT\n"));
1406 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1407 }
1408
1409#ifndef IN_RC
1410 /* This is a little bit more complicated than the VT-x version because HM/SVM may
1411 patch MOV CR8 instructions to speed up APIC.TPR access for 32-bit windows guests. */
1412 if (VM_IS_HM_ENABLED(pVCpu->CTX_SUFF(pVM)))
1413 {
1414 int rc = HMHCSvmMaybeMovTprHypercall(pVCpu);
1415 if (RT_SUCCESS(rc))
1416 {
1417 Log(("vmmcall: MovTrp\n"));
1418 return VINF_SUCCESS;
1419 }
1420 }
1421#endif
1422
1423 /* Join forces with vmcall. */
1424 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMMCALL);
1425}
1426
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette