VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 73787

Last change on this file since 73787 was 73756, checked in by vboxsync, 7 years ago

VMM/IEM: Nested VMX: bugref:9180 VMCLEAR skeleton.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 38.8 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 73756 2018-08-18 05:13:26Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Implements 'VMCALL'.
21 */
22IEM_CIMPL_DEF_0(iemCImpl_vmcall)
23{
24 /** @todo NSTVMX: intercept. */
25
26 /* Join forces with vmmcall. */
27 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
28}
29
30#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
31
32/**
33 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
34 * relative offsets.
35 */
36# ifdef IEM_WITH_CODE_TLB
37# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
38# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
39# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
40# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
41# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
42# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
43# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
44# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
45# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
46# else /* !IEM_WITH_CODE_TLB */
47# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
48 do \
49 { \
50 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
51 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
52 } while (0)
53
54# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
55
56# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
57 do \
58 { \
59 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
60 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
61 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
62 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
63 } while (0)
64
65# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
66 do \
67 { \
68 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
69 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
70 } while (0)
71
72# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
73 do \
74 { \
75 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
76 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
77 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
78 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
79 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
80 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
81 } while (0)
82
83# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
84 do \
85 { \
86 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
87 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
88 } while (0)
89
90# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
91 do \
92 { \
93 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
94 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
95 } while (0)
96
97# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
98 do \
99 { \
100 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
101 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
102 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
103 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
104 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
105 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
106 } while (0)
107# endif /* !IEM_WITH_CODE_TLB */
108
109/**
110 * Gets VM-exit instruction information along with any displacement for an
111 * instruction VM-exit.
112 *
113 * @returns The VM-exit instruction information.
114 * @param pVCpu The cross context virtual CPU structure.
115 * @param uExitReason The VM-exit reason.
116 * @param InstrId The VM-exit instruction identity (VMX_INSTR_ID_XXX) if
117 * any. Pass VMX_INSTR_ID_NONE otherwise.
118 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
119 * NULL.
120 */
121IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID InstrId, PRTGCPTR pGCPtrDisp)
122{
123 RTGCPTR GCPtrDisp;
124 VMXEXITINSTRINFO ExitInstrInfo;
125 ExitInstrInfo.u = 0;
126
127 /*
128 * Get and parse the ModR/M byte from our decoded opcodes.
129 */
130 uint8_t bRm;
131 uint8_t const offModRm = pVCpu->iem.s.offModRm;
132 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
133 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
134 {
135 /*
136 * ModR/M indicates register addressing.
137 */
138 ExitInstrInfo.All.u2Scaling = 0;
139 ExitInstrInfo.All.iReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
140 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
141 ExitInstrInfo.All.fIsRegOperand = 1;
142 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
143 ExitInstrInfo.All.iSegReg = 0;
144 ExitInstrInfo.All.iIdxReg = 0;
145 ExitInstrInfo.All.fIdxRegInvalid = 1;
146 ExitInstrInfo.All.iBaseReg = 0;
147 ExitInstrInfo.All.fBaseRegInvalid = 1;
148 ExitInstrInfo.All.iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
149
150 /* Displacement not applicable for register addressing. */
151 GCPtrDisp = 0;
152 }
153 else
154 {
155 /*
156 * ModR/M indicates memory addressing.
157 */
158 uint8_t uScale = 0;
159 bool fBaseRegValid = false;
160 bool fIdxRegValid = false;
161 uint8_t iBaseReg = 0;
162 uint8_t iIdxReg = 0;
163 uint8_t iReg2 = 0;
164 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
165 {
166 /*
167 * Parse the ModR/M, displacement for 16-bit addressing mode.
168 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
169 */
170 uint16_t u16Disp = 0;
171 uint8_t const offDisp = offModRm + sizeof(bRm);
172 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
173 {
174 /* Displacement without any registers. */
175 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
176 }
177 else
178 {
179 /* Register (index and base). */
180 switch (bRm & X86_MODRM_RM_MASK)
181 {
182 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
183 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
184 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
185 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
186 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
187 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
188 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
189 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
190 }
191
192 /* Register + displacement. */
193 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
194 {
195 case 0: break;
196 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
197 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
198 default:
199 {
200 /* Register addressing, handled at the beginning. */
201 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
202 break;
203 }
204 }
205 }
206
207 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
208 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
209 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
210 }
211 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
212 {
213 /*
214 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
215 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
216 */
217 uint32_t u32Disp = 0;
218 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
219 {
220 /* Displacement without any registers. */
221 uint8_t const offDisp = offModRm + sizeof(bRm);
222 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
223 }
224 else
225 {
226 /* Register (and perhaps scale, index and base). */
227 uint8_t offDisp = offModRm + sizeof(bRm);
228 iBaseReg = (bRm & X86_MODRM_RM_MASK);
229 if (iBaseReg == 4)
230 {
231 /* An SIB byte follows the ModR/M byte, parse it. */
232 uint8_t bSib;
233 uint8_t const offSib = offModRm + sizeof(bRm);
234 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
235
236 /* A displacement may follow SIB, update its offset. */
237 offDisp += sizeof(bSib);
238
239 /* Get the scale. */
240 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
241
242 /* Get the index register. */
243 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
244 fIdxRegValid = RT_BOOL(iIdxReg != 4);
245
246 /* Get the base register. */
247 iBaseReg = bSib & X86_SIB_BASE_MASK;
248 fBaseRegValid = true;
249 if (iBaseReg == 5)
250 {
251 if ((bRm & X86_MODRM_MOD_MASK) == 0)
252 {
253 /* Mod is 0 implies a 32-bit displacement with no base. */
254 fBaseRegValid = false;
255 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
256 }
257 else
258 {
259 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
260 iBaseReg = X86_GREG_xBP;
261 }
262 }
263 }
264
265 /* Register + displacement. */
266 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
267 {
268 case 0: /* Handled above */ break;
269 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
270 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
271 default:
272 {
273 /* Register addressing, handled at the beginning. */
274 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
275 break;
276 }
277 }
278 }
279
280 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
281 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
282 }
283 else
284 {
285 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
286
287 /*
288 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
289 * See Intel instruction spec. 2.2 "IA-32e Mode".
290 */
291 uint64_t u64Disp = 0;
292 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
293 if (fRipRelativeAddr)
294 {
295 /*
296 * RIP-relative addressing mode.
297 *
298 * The displacment is 32-bit signed implying an offset range of +/-2G.
299 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
300 */
301 uint8_t const offDisp = offModRm + sizeof(bRm);
302 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
303 }
304 else
305 {
306 uint8_t offDisp = offModRm + sizeof(bRm);
307
308 /*
309 * Register (and perhaps scale, index and base).
310 *
311 * REX.B extends the most-significant bit of the base register. However, REX.B
312 * is ignored while determining whether an SIB follows the opcode. Hence, we
313 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
314 *
315 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
316 */
317 iBaseReg = (bRm & X86_MODRM_RM_MASK);
318 if (iBaseReg == 4)
319 {
320 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
321 uint8_t bSib;
322 uint8_t const offSib = offModRm + sizeof(bRm);
323 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
324
325 /* Displacement may follow SIB, update its offset. */
326 offDisp += sizeof(bSib);
327
328 /* Get the scale. */
329 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
330
331 /* Get the index. */
332 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
333 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
334
335 /* Get the base. */
336 iBaseReg = (bSib & X86_SIB_BASE_MASK);
337 fBaseRegValid = true;
338 if (iBaseReg == 5)
339 {
340 if ((bRm & X86_MODRM_MOD_MASK) == 0)
341 {
342 /* Mod is 0 implies a signed 32-bit displacement with no base. */
343 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
344 }
345 else
346 {
347 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
348 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
349 }
350 }
351 }
352 iBaseReg |= pVCpu->iem.s.uRexB;
353
354 /* Register + displacement. */
355 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
356 {
357 case 0: /* Handled above */ break;
358 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
359 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
360 default:
361 {
362 /* Register addressing, handled at the beginning. */
363 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
364 break;
365 }
366 }
367 }
368
369 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
370 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
371 }
372
373 ExitInstrInfo.All.u2Scaling = uScale;
374 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory instructions. */
375 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
376 ExitInstrInfo.All.fIsRegOperand = 0;
377 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
378 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
379 ExitInstrInfo.All.iIdxReg = iIdxReg;
380 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
381 ExitInstrInfo.All.iBaseReg = iBaseReg;
382 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
383 ExitInstrInfo.All.iReg2 = iReg2;
384 }
385
386 /*
387 * Handle exceptions for certain instructions.
388 * (e.g. some instructions convey an instruction identity).
389 */
390 switch (uExitReason)
391 {
392 case VMX_EXIT_XDTR_ACCESS:
393 {
394 Assert(VMX_INSTR_ID_IS_VALID(InstrId));
395 ExitInstrInfo.GdtIdt.u2InstrId = VMX_INSTR_ID_GET_ID(InstrId);
396 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
397 break;
398 }
399
400 case VMX_EXIT_TR_ACCESS:
401 {
402 Assert(VMX_INSTR_ID_IS_VALID(InstrId));
403 ExitInstrInfo.LdtTr.u2InstrId = VMX_INSTR_ID_GET_ID(InstrId);
404 ExitInstrInfo.LdtTr.u2Undef0 = 0;
405 break;
406 }
407
408 case VMX_EXIT_RDRAND:
409 case VMX_EXIT_RDSEED:
410 {
411 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
412 break;
413 }
414 }
415
416 /* Update displacement and return the constructed VM-exit instruction information field. */
417 if (pGCPtrDisp)
418 *pGCPtrDisp = GCPtrDisp;
419 return ExitInstrInfo.u;
420}
421
422
423/**
424 * Implements VMSucceed for VMX instruction success.
425 *
426 * @param pVCpu The cross context virtual CPU structure.
427 */
428DECLINLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
429{
430 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
431}
432
433
434/**
435 * Implements VMFailInvalid for VMX instruction failure.
436 *
437 * @param pVCpu The cross context virtual CPU structure.
438 */
439DECLINLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
440{
441 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
442 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
443}
444
445
446/**
447 * Implements VMFailValid for VMX instruction failure.
448 *
449 * @param pVCpu The cross context virtual CPU structure.
450 * @param enmInsErr The VM instruction error.
451 */
452DECLINLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
453{
454 if (pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
455 {
456 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
457 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
458 /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */
459 RT_NOREF(enmInsErr);
460 }
461}
462
463
464/**
465 * Implements VMFail for VMX instruction failure.
466 *
467 * @param pVCpu The cross context virtual CPU structure.
468 * @param enmInsErr The VM instruction error.
469 */
470DECLINLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
471{
472 if (pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
473 {
474 iemVmxVmFailValid(pVCpu, enmInsErr);
475 /** @todo Set VM-instruction error field in the current virtual-VMCS. */
476 }
477 else
478 iemVmxVmFailInvalid(pVCpu);
479}
480
481
482/**
483 * VMCLEAR instruction execution worker.
484 *
485 * @param pVCpu The cross context virtual CPU structure.
486 * @param cbInstr The instruction length.
487 * @param GCPtrVmcs The linear address of the VMCS pointer.
488 * @param pExitInstrInfo Pointer to the VM-exit instruction information field.
489 * @param GCPtrDisp The displacement field for @a GCPtrVmcs if any.
490 *
491 * @remarks Common VMX instruction checks are already expected to by the caller,
492 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
493 */
494IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmcs, PCVMXEXITINSTRINFO pExitInstrInfo,
495 RTGCPTR GCPtrDisp)
496{
497 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
498 {
499 RT_NOREF(GCPtrDisp);
500 /** @todo NSTVMX: intercept. */
501 }
502 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
503
504 /* CPL. */
505 if (CPUMGetGuestCPL(pVCpu) > 0)
506 {
507 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
508 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_Cpl;
509 return iemRaiseGeneralProtectionFault0(pVCpu);
510 }
511
512 /** @todo NSTVMX: VMCLEAR impl. */
513 RT_NOREF(GCPtrVmcs); RT_NOREF(pExitInstrInfo); RT_NOREF(cbInstr);
514 return VINF_SUCCESS;
515}
516
517
518/**
519 * VMPTRST instruction execution worker.
520 *
521 * @param pVCpu The cross context virtual CPU structure.
522 * @param cbInstr The instruction length.
523 * @param GCPtrVmcs The linear address of where to store the current VMCS
524 * pointer.
525 * @param pExitInstrInfo Pointer to the VM-exit instruction information field.
526 * @param GCPtrDisp The displacement field for @a GCPtrVmcs if any.
527 *
528 * @remarks Common VMX instruction checks are already expected to by the caller,
529 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
530 */
531IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmcs, PCVMXEXITINSTRINFO pExitInstrInfo,
532 RTGCPTR GCPtrDisp)
533{
534 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
535 {
536 RT_NOREF(GCPtrDisp);
537 /** @todo NSTVMX: intercept. */
538 }
539 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
540
541 /* CPL. */
542 if (CPUMGetGuestCPL(pVCpu) > 0)
543 {
544 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
545 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_Cpl;
546 return iemRaiseGeneralProtectionFault0(pVCpu);
547 }
548
549 /* Set the VMCS pointer to the location specified by the destination memory operand. */
550 Assert(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
551 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, pExitInstrInfo->VmxXsave.iSegReg, GCPtrVmcs,
552 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs);
553 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
554 {
555 iemVmxVmSucceed(pVCpu);
556 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
557 return rcStrict;
558 }
559
560 Log(("vmptrld: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
561 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_PtrMap;
562 return rcStrict;
563}
564
565
566/**
567 * VMPTRLD instruction execution worker.
568 *
569 * @param pVCpu The cross context virtual CPU structure.
570 * @param cbInstr The instruction length.
571 * @param GCPtrVmcs The linear address of the current VMCS pointer.
572 * @param pExitInstrInfo Pointer to the VM-exit instruction information field.
573 * @param GCPtrDisp The displacement field for @a GCPtrVmcs if any.
574 *
575 * @remarks Common VMX instruction checks are already expected to by the caller,
576 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
577 */
578IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmcs, PCVMXEXITINSTRINFO pExitInstrInfo,
579 RTGCPTR GCPtrDisp)
580{
581 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
582 {
583 RT_NOREF(GCPtrDisp);
584 /** @todo NSTVMX: intercept. */
585 }
586 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
587
588 /* CPL. */
589 if (CPUMGetGuestCPL(pVCpu) > 0)
590 {
591 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
592 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Cpl;
593 return iemRaiseGeneralProtectionFault0(pVCpu);
594 }
595
596 /* Get the VMCS pointer from the location specified by the source memory operand. */
597 RTGCPHYS GCPhysVmcs;
598 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, pExitInstrInfo->VmxXsave.iSegReg, GCPtrVmcs);
599 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
600 {
601 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
602 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrMap;
603 return rcStrict;
604 }
605
606 /* VMCS pointer alignment. */
607 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
608 {
609 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
610 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrAlign;
611 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
612 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
613 return VINF_SUCCESS;
614 }
615
616 /* VMCS physical-address width limits. */
617 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
618 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
619 {
620 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
621 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrWidth;
622 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
623 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
624 return VINF_SUCCESS;
625 }
626
627 /* VMCS is not the VMXON region. */
628 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
629 {
630 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
631 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrVmxon;
632 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
633 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
634 return VINF_SUCCESS;
635 }
636
637 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
638 restriction imposed by our implementation. */
639 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
640 {
641 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
642 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrAbnormal;
643 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
644 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
645 return VINF_SUCCESS;
646 }
647
648 /* Read the VMCS revision ID from the VMCS. */
649 VMXVMCSREVID VmcsRevId;
650 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
651 if (RT_FAILURE(rc))
652 {
653 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
654 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrReadPhys;
655 return rc;
656 }
657
658 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
659 also check VMCS shadowing feature. */
660 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
661 || ( VmcsRevId.n.fIsShadowVmcs
662 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
663 {
664 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
665 {
666 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
667 VmcsRevId.n.u31RevisionId));
668 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_VmcsRevId;
669 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
670 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
671 return VINF_SUCCESS;
672 }
673
674 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
675 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_ShadowVmcs;
676 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
677 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
678 return VINF_SUCCESS;
679 }
680
681 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = GCPhysVmcs;
682 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Success;
683 iemVmxVmSucceed(pVCpu);
684 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
685 return VINF_SUCCESS;
686}
687
688
689/**
690 * VMXON instruction execution worker.
691 *
692 * @param pVCpu The cross context virtual CPU structure.
693 * @param cbInstr The instruction length.
694 * @param GCPtrVmxon The linear address of the VMXON pointer.
695 * @param pExitInstrInfo Pointer to the VM-exit instruction information field.
696 * @param GCPtrDisp The displacement field for @a GCPtrVmxon if any.
697 *
698 * @remarks Common VMX instruction checks are already expected to by the caller,
699 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
700 */
701IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, PCVMXEXITINSTRINFO pExitInstrInfo,
702 RTGCPTR GCPtrDisp)
703{
704#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
705 RT_NOREF5(pVCpu, cbInstr, GCPtrVmxon, pExitInstrInfo, GCPtrDisp);
706 return VINF_EM_RAW_EMULATE_INSTR;
707#else
708 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
709 {
710 /* CPL. */
711 if (pVCpu->iem.s.uCpl > 0)
712 {
713 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
714 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cpl;
715 return iemRaiseGeneralProtectionFault0(pVCpu);
716 }
717
718 /* A20M (A20 Masked) mode. */
719 if (!PGMPhysIsA20Enabled(pVCpu))
720 {
721 Log(("vmxon: A20M mode -> #GP(0)\n"));
722 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_A20M;
723 return iemRaiseGeneralProtectionFault0(pVCpu);
724 }
725
726 /* CR0 fixed bits. */
727 bool const fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest;
728 uint64_t const uCr0Fixed0 = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
729 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
730 {
731 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
732 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr0Fixed0;
733 return iemRaiseGeneralProtectionFault0(pVCpu);
734 }
735
736 /* CR4 fixed bits. */
737 if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0)
738 {
739 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
740 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr4Fixed0;
741 return iemRaiseGeneralProtectionFault0(pVCpu);
742 }
743
744 /* Feature control MSR's LOCK and VMXON bits. */
745 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
746 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
747 {
748 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
749 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_MsrFeatCtl;
750 return iemRaiseGeneralProtectionFault0(pVCpu);
751 }
752
753 /* Get the VMXON pointer from the location specified by the source memory operand. */
754 RTGCPHYS GCPhysVmxon;
755 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, pExitInstrInfo->VmxXsave.iSegReg, GCPtrVmxon);
756 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
757 {
758 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
759 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrMap;
760 return rcStrict;
761 }
762
763 /* VMXON region pointer alignment. */
764 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
765 {
766 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
767 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAlign;
768 iemVmxVmFailInvalid(pVCpu);
769 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
770 return VINF_SUCCESS;
771 }
772
773 /* VMXON physical-address width limits. */
774 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
775 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
776 {
777 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
778 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrWidth;
779 iemVmxVmFailInvalid(pVCpu);
780 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
781 return VINF_SUCCESS;
782 }
783
784 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
785 restriction imposed by our implementation. */
786 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
787 {
788 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
789 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAbnormal;
790 iemVmxVmFailInvalid(pVCpu);
791 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
792 return VINF_SUCCESS;
793 }
794
795 /* Read the VMCS revision ID from the VMXON region. */
796 VMXVMCSREVID VmcsRevId;
797 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
798 if (RT_FAILURE(rc))
799 {
800 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
801 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrReadPhys;
802 return rc;
803 }
804
805 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
806 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
807 {
808 /* Revision ID mismatch. */
809 if (!VmcsRevId.n.fIsShadowVmcs)
810 {
811 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
812 VmcsRevId.n.u31RevisionId));
813 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmcsRevId;
814 iemVmxVmFailInvalid(pVCpu);
815 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
816 return VINF_SUCCESS;
817 }
818
819 /* Shadow VMCS disallowed. */
820 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
821 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_ShadowVmcs;
822 iemVmxVmFailInvalid(pVCpu);
823 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
824 return VINF_SUCCESS;
825 }
826
827 /*
828 * Record that we're in VMX operation, block INIT, block and disable A20M.
829 */
830 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
831 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS;
832 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
833 /** @todo NSTVMX: clear address-range monitoring. */
834 /** @todo NSTVMX: Intel PT. */
835 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Success;
836 iemVmxVmSucceed(pVCpu);
837 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
838# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
839 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
840# else
841 return VINF_SUCCESS;
842# endif
843 }
844 else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
845 {
846 RT_NOREF(GCPtrDisp);
847 /** @todo NSTVMX: intercept. */
848 }
849
850 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
851
852 /* CPL. */
853 if (pVCpu->iem.s.uCpl > 0)
854 {
855 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
856 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRootCpl;
857 return iemRaiseGeneralProtectionFault0(pVCpu);
858 }
859
860 /* VMXON when already in VMX root mode. */
861 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
862 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRoot;
863 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
864 return VINF_SUCCESS;
865#endif
866}
867
868
869/**
870 * Implements 'VMXON'.
871 */
872IEM_CIMPL_DEF_1(iemCImpl_vmxon, RTGCPTR, GCPtrVmxon)
873{
874 RTGCPTR GCPtrDisp;
875 VMXEXITINSTRINFO ExitInstrInfo;
876 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMXON, VMX_INSTR_ID_NONE, &GCPtrDisp);
877 return iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, &ExitInstrInfo, GCPtrDisp);
878}
879
880
881/**
882 * Implements 'VMXOFF'.
883 */
884IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
885{
886# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
887 RT_NOREF2(pVCpu, cbInstr);
888 return VINF_EM_RAW_EMULATE_INSTR;
889# else
890 IEM_VMX_INSTR_COMMON_CHECKS(pVCpu, "vmxoff", kVmxVInstrDiag_Vmxoff);
891 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
892 {
893 Log(("vmxoff: Not in VMX root mode -> #GP(0)\n"));
894 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_VmxRoot;
895 return iemRaiseUndefinedOpcode(pVCpu);
896 }
897
898 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
899 {
900 /** @todo NSTVMX: intercept. */
901 }
902
903 /* CPL. */
904 if (pVCpu->iem.s.uCpl > 0)
905 {
906 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
907 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Cpl;
908 return iemRaiseGeneralProtectionFault0(pVCpu);
909 }
910
911 /* Dual monitor treatment of SMIs and SMM. */
912 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
913 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
914 {
915 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
916 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
917 return VINF_SUCCESS;
918 }
919
920 /*
921 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
922 */
923 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
924 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
925
926 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
927 { /** @todo NSTVMX: Unblock SMI. */ }
928 /** @todo NSTVMX: Unblock and enable A20M. */
929 /** @todo NSTVMX: Clear address-range monitoring. */
930
931 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Success;
932 iemVmxVmSucceed(pVCpu);
933 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
934# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
935 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
936# else
937 return VINF_SUCCESS;
938# endif
939# endif
940}
941
942
943/**
944 * Implements 'VMPTRLD'.
945 */
946IEM_CIMPL_DEF_1(iemCImpl_vmptrld, RTGCPTR, GCPtrVmcs)
947{
948 RTGCPTR GCPtrDisp;
949 VMXEXITINSTRINFO ExitInstrInfo;
950 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMPTRLD, VMX_INSTR_ID_NONE, &GCPtrDisp);
951 return iemVmxVmptrld(pVCpu, cbInstr, GCPtrVmcs, &ExitInstrInfo, GCPtrDisp);
952}
953
954
955/**
956 * Implements 'VMPTRST'.
957 */
958IEM_CIMPL_DEF_1(iemCImpl_vmptrst, RTGCPTR, GCPtrVmcs)
959{
960 RTGCPTR GCPtrDisp;
961 VMXEXITINSTRINFO ExitInstrInfo;
962 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMPTRST, VMX_INSTR_ID_NONE, &GCPtrDisp);
963 return iemVmxVmptrst(pVCpu, cbInstr, GCPtrVmcs, &ExitInstrInfo, GCPtrDisp);
964}
965
966
967/**
968 * Implements 'VMCLEAR'.
969 */
970IEM_CIMPL_DEF_1(iemCImpl_vmclear, RTGCPTR, GCPtrVmcs)
971{
972 RTGCPTR GCPtrDisp;
973 VMXEXITINSTRINFO ExitInstrInfo;
974 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMCLEAR, VMX_INSTR_ID_NONE, &GCPtrDisp);
975 return iemVmxVmclear(pVCpu, cbInstr, GCPtrVmcs, &ExitInstrInfo, GCPtrDisp);
976}
977
978#endif
979
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette