VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 73798

Last change on this file since 73798 was 73798, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 space nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 50.1 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 73798 2018-08-21 06:38:18Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Implements 'VMCALL'.
21 */
22IEM_CIMPL_DEF_0(iemCImpl_vmcall)
23{
24 /** @todo NSTVMX: intercept. */
25
26 /* Join forces with vmmcall. */
27 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
28}
29
30#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
31
32/**
33 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
34 * relative offsets.
35 */
36# ifdef IEM_WITH_CODE_TLB
37# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
38# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
39# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
40# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
41# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
42# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
43# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
44# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
45# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
46# else /* !IEM_WITH_CODE_TLB */
47# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
48 do \
49 { \
50 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
51 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
52 } while (0)
53
54# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
55
56# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
57 do \
58 { \
59 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
60 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
61 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
62 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
63 } while (0)
64
65# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
66 do \
67 { \
68 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
69 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
70 } while (0)
71
72# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
73 do \
74 { \
75 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
76 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
77 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
78 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
79 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
80 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
81 } while (0)
82
83# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
84 do \
85 { \
86 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
87 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
88 } while (0)
89
90# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
91 do \
92 { \
93 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
94 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
95 } while (0)
96
97# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
98 do \
99 { \
100 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
101 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
102 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
103 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
104 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
105 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
106 } while (0)
107# endif /* !IEM_WITH_CODE_TLB */
108
109
110#if 0 /* Disabled static fn until we use it with VMREAD/VMWRITE instruction implementation. */
111/**
112 * Returns whether the given VMCS field is valid and supported by our emulation.
113 *
114 * @param pVCpu The cross context virtual CPU structure.
115 * @param encField The VMCS field encoding.
116 *
117 * @remarks This takes into account the CPU features exposed to the guest.
118 */
119IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, VMXVMCSFIELDENC encField)
120{
121 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
122 switch (encField.u)
123 {
124 /*
125 * 16-bit fields.
126 */
127 /* Control fields. */
128 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
129 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
130 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
131
132 /* Guest-state fields. */
133 case VMX_VMCS16_GUEST_ES_SEL:
134 case VMX_VMCS16_GUEST_CS_SEL:
135 case VMX_VMCS16_GUEST_SS_SEL:
136 case VMX_VMCS16_GUEST_DS_SEL:
137 case VMX_VMCS16_GUEST_FS_SEL:
138 case VMX_VMCS16_GUEST_GS_SEL:
139 case VMX_VMCS16_GUEST_LDTR_SEL:
140 case VMX_VMCS16_GUEST_TR_SEL:
141 case VMX_VMCS16_GUEST_INTR_STATUS: return true;
142 case VMX_VMCS16_GUEST_PML_INDEX: return false;
143
144 /* Host-state fields. */
145 case VMX_VMCS16_HOST_ES_SEL:
146 case VMX_VMCS16_HOST_CS_SEL:
147 case VMX_VMCS16_HOST_SS_SEL:
148 case VMX_VMCS16_HOST_DS_SEL:
149 case VMX_VMCS16_HOST_FS_SEL:
150 case VMX_VMCS16_HOST_GS_SEL:
151 case VMX_VMCS16_HOST_TR_SEL: return true;
152
153 /*
154 * 64-bit fields.
155 */
156 /* Control fields. */
157 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
158 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
159 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
160 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
161 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
162 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
163 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
164 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
165 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
166 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
167 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
168 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
169 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
170 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
171 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
172 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return false;
173 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
174 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
175 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
176 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
177 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
178 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
179 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
180 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
181 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
182 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
183 case VMX_VMCS64_CTRL_EPTP_FULL:
184 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
185 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
186 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
187 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
188 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
189 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
190 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
191 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
192 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
193 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
194 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
195 {
196 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
197 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
198 }
199 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
200 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
201 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
202 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
203 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
204 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
205 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
206 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
207 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
208 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
209 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
210 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
211
212 /* Read-only data fields. */
213 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
214 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
215
216 /* Guest-state fields. */
217 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
218 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
219 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
220 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
221 case VMX_VMCS64_GUEST_PAT_FULL:
222 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
223 case VMX_VMCS64_GUEST_EFER_FULL:
224 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
225 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
226 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
227 case VMX_VMCS64_GUEST_PDPTE0_FULL:
228 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
229 case VMX_VMCS64_GUEST_PDPTE1_FULL:
230 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
231 case VMX_VMCS64_GUEST_PDPTE2_FULL:
232 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
233 case VMX_VMCS64_GUEST_PDPTE3_FULL:
234 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
235 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
236 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
237
238 /* Host-state fields. */
239 case VMX_VMCS64_HOST_PAT_FULL:
240 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
241 case VMX_VMCS64_HOST_EFER_FULL:
242 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
243 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
244 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
245
246 /*
247 * 32-bit fields.
248 */
249 /* Control fields. */
250 case VMX_VMCS32_CTRL_PIN_EXEC:
251 case VMX_VMCS32_CTRL_PROC_EXEC:
252 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
253 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
254 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
255 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
256 case VMX_VMCS32_CTRL_EXIT:
257 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
258 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
259 case VMX_VMCS32_CTRL_ENTRY:
260 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
261 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
262 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
263 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
264 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
265 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
266 case VMX_VMCS32_CTRL_PLE_GAP:
267 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
268
269 /* Read-only data fields. */
270 case VMX_VMCS32_RO_VM_INSTR_ERROR:
271 case VMX_VMCS32_RO_EXIT_REASON:
272 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
273 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
274 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
275 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
276 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
277 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
278
279 /* Guest-state fields. */
280 case VMX_VMCS32_GUEST_ES_LIMIT:
281 case VMX_VMCS32_GUEST_CS_LIMIT:
282 case VMX_VMCS32_GUEST_SS_LIMIT:
283 case VMX_VMCS32_GUEST_DS_LIMIT:
284 case VMX_VMCS32_GUEST_FS_LIMIT:
285 case VMX_VMCS32_GUEST_GS_LIMIT:
286 case VMX_VMCS32_GUEST_LDTR_LIMIT:
287 case VMX_VMCS32_GUEST_TR_LIMIT:
288 case VMX_VMCS32_GUEST_GDTR_LIMIT:
289 case VMX_VMCS32_GUEST_IDTR_LIMIT:
290 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
291 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
292 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
293 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
294 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
295 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
296 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
297 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
298 case VMX_VMCS32_GUEST_INT_STATE:
299 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
300 case VMX_VMCS32_GUEST_SMBASE:
301 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
302 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
303
304 /* Host-state fields. */
305 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
306
307 /*
308 * Natural-width fields.
309 */
310 /* Control fields. */
311 case VMX_VMCS_CTRL_CR0_MASK:
312 case VMX_VMCS_CTRL_CR4_MASK:
313 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
314 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
315 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
316 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
317 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
318 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
319
320 /* Read-only data fields. */
321 case VMX_VMCS_RO_EXIT_QUALIFICATION:
322 case VMX_VMCS_RO_IO_RCX:
323 case VMX_VMCS_RO_IO_RSX:
324 case VMX_VMCS_RO_IO_RDI:
325 case VMX_VMCS_RO_IO_RIP:
326 case VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR: return true;
327
328 /* Guest-state fields. */
329 case VMX_VMCS_GUEST_CR0:
330 case VMX_VMCS_GUEST_CR3:
331 case VMX_VMCS_GUEST_CR4:
332 case VMX_VMCS_GUEST_ES_BASE:
333 case VMX_VMCS_GUEST_CS_BASE:
334 case VMX_VMCS_GUEST_SS_BASE:
335 case VMX_VMCS_GUEST_DS_BASE:
336 case VMX_VMCS_GUEST_FS_BASE:
337 case VMX_VMCS_GUEST_GS_BASE:
338 case VMX_VMCS_GUEST_LDTR_BASE:
339 case VMX_VMCS_GUEST_TR_BASE:
340 case VMX_VMCS_GUEST_GDTR_BASE:
341 case VMX_VMCS_GUEST_IDTR_BASE:
342 case VMX_VMCS_GUEST_DR7:
343 case VMX_VMCS_GUEST_RSP:
344 case VMX_VMCS_GUEST_RIP:
345 case VMX_VMCS_GUEST_RFLAGS:
346 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
347 case VMX_VMCS_GUEST_SYSENTER_ESP:
348 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
349
350 /* Host-state fields. */
351 case VMX_VMCS_HOST_CR0:
352 case VMX_VMCS_HOST_CR3:
353 case VMX_VMCS_HOST_CR4:
354 case VMX_VMCS_HOST_FS_BASE:
355 case VMX_VMCS_HOST_GS_BASE:
356 case VMX_VMCS_HOST_TR_BASE:
357 case VMX_VMCS_HOST_GDTR_BASE:
358 case VMX_VMCS_HOST_IDTR_BASE:
359 case VMX_VMCS_HOST_SYSENTER_ESP:
360 case VMX_VMCS_HOST_SYSENTER_EIP:
361 case VMX_VMCS_HOST_RSP:
362 case VMX_VMCS_HOST_RIP: return true;
363 }
364
365 return false;
366}
367#endif
368
369/**
370 * Gets VM-exit instruction information along with any displacement for an
371 * instruction VM-exit.
372 *
373 * @returns The VM-exit instruction information.
374 * @param pVCpu The cross context virtual CPU structure.
375 * @param uExitReason The VM-exit reason.
376 * @param InstrId The VM-exit instruction identity (VMX_INSTR_ID_XXX) if
377 * any. Pass VMX_INSTR_ID_NONE otherwise.
378 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
379 * NULL.
380 */
381IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID InstrId, PRTGCPTR pGCPtrDisp)
382{
383 RTGCPTR GCPtrDisp;
384 VMXEXITINSTRINFO ExitInstrInfo;
385 ExitInstrInfo.u = 0;
386
387 /*
388 * Get and parse the ModR/M byte from our decoded opcodes.
389 */
390 uint8_t bRm;
391 uint8_t const offModRm = pVCpu->iem.s.offModRm;
392 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
393 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
394 {
395 /*
396 * ModR/M indicates register addressing.
397 */
398 ExitInstrInfo.All.u2Scaling = 0;
399 ExitInstrInfo.All.iReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
400 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
401 ExitInstrInfo.All.fIsRegOperand = 1;
402 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
403 ExitInstrInfo.All.iSegReg = 0;
404 ExitInstrInfo.All.iIdxReg = 0;
405 ExitInstrInfo.All.fIdxRegInvalid = 1;
406 ExitInstrInfo.All.iBaseReg = 0;
407 ExitInstrInfo.All.fBaseRegInvalid = 1;
408 ExitInstrInfo.All.iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
409
410 /* Displacement not applicable for register addressing. */
411 GCPtrDisp = 0;
412 }
413 else
414 {
415 /*
416 * ModR/M indicates memory addressing.
417 */
418 uint8_t uScale = 0;
419 bool fBaseRegValid = false;
420 bool fIdxRegValid = false;
421 uint8_t iBaseReg = 0;
422 uint8_t iIdxReg = 0;
423 uint8_t iReg2 = 0;
424 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
425 {
426 /*
427 * Parse the ModR/M, displacement for 16-bit addressing mode.
428 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
429 */
430 uint16_t u16Disp = 0;
431 uint8_t const offDisp = offModRm + sizeof(bRm);
432 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
433 {
434 /* Displacement without any registers. */
435 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
436 }
437 else
438 {
439 /* Register (index and base). */
440 switch (bRm & X86_MODRM_RM_MASK)
441 {
442 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
443 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
444 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
445 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
446 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
447 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
448 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
449 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
450 }
451
452 /* Register + displacement. */
453 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
454 {
455 case 0: break;
456 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
457 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
458 default:
459 {
460 /* Register addressing, handled at the beginning. */
461 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
462 break;
463 }
464 }
465 }
466
467 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
468 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
469 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
470 }
471 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
472 {
473 /*
474 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
475 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
476 */
477 uint32_t u32Disp = 0;
478 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
479 {
480 /* Displacement without any registers. */
481 uint8_t const offDisp = offModRm + sizeof(bRm);
482 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
483 }
484 else
485 {
486 /* Register (and perhaps scale, index and base). */
487 uint8_t offDisp = offModRm + sizeof(bRm);
488 iBaseReg = (bRm & X86_MODRM_RM_MASK);
489 if (iBaseReg == 4)
490 {
491 /* An SIB byte follows the ModR/M byte, parse it. */
492 uint8_t bSib;
493 uint8_t const offSib = offModRm + sizeof(bRm);
494 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
495
496 /* A displacement may follow SIB, update its offset. */
497 offDisp += sizeof(bSib);
498
499 /* Get the scale. */
500 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
501
502 /* Get the index register. */
503 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
504 fIdxRegValid = RT_BOOL(iIdxReg != 4);
505
506 /* Get the base register. */
507 iBaseReg = bSib & X86_SIB_BASE_MASK;
508 fBaseRegValid = true;
509 if (iBaseReg == 5)
510 {
511 if ((bRm & X86_MODRM_MOD_MASK) == 0)
512 {
513 /* Mod is 0 implies a 32-bit displacement with no base. */
514 fBaseRegValid = false;
515 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
516 }
517 else
518 {
519 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
520 iBaseReg = X86_GREG_xBP;
521 }
522 }
523 }
524
525 /* Register + displacement. */
526 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
527 {
528 case 0: /* Handled above */ break;
529 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
530 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
531 default:
532 {
533 /* Register addressing, handled at the beginning. */
534 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
535 break;
536 }
537 }
538 }
539
540 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
541 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
542 }
543 else
544 {
545 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
546
547 /*
548 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
549 * See Intel instruction spec. 2.2 "IA-32e Mode".
550 */
551 uint64_t u64Disp = 0;
552 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
553 if (fRipRelativeAddr)
554 {
555 /*
556 * RIP-relative addressing mode.
557 *
558 * The displacment is 32-bit signed implying an offset range of +/-2G.
559 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
560 */
561 uint8_t const offDisp = offModRm + sizeof(bRm);
562 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
563 }
564 else
565 {
566 uint8_t offDisp = offModRm + sizeof(bRm);
567
568 /*
569 * Register (and perhaps scale, index and base).
570 *
571 * REX.B extends the most-significant bit of the base register. However, REX.B
572 * is ignored while determining whether an SIB follows the opcode. Hence, we
573 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
574 *
575 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
576 */
577 iBaseReg = (bRm & X86_MODRM_RM_MASK);
578 if (iBaseReg == 4)
579 {
580 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
581 uint8_t bSib;
582 uint8_t const offSib = offModRm + sizeof(bRm);
583 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
584
585 /* Displacement may follow SIB, update its offset. */
586 offDisp += sizeof(bSib);
587
588 /* Get the scale. */
589 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
590
591 /* Get the index. */
592 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
593 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
594
595 /* Get the base. */
596 iBaseReg = (bSib & X86_SIB_BASE_MASK);
597 fBaseRegValid = true;
598 if (iBaseReg == 5)
599 {
600 if ((bRm & X86_MODRM_MOD_MASK) == 0)
601 {
602 /* Mod is 0 implies a signed 32-bit displacement with no base. */
603 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
604 }
605 else
606 {
607 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
608 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
609 }
610 }
611 }
612 iBaseReg |= pVCpu->iem.s.uRexB;
613
614 /* Register + displacement. */
615 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
616 {
617 case 0: /* Handled above */ break;
618 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
619 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
620 default:
621 {
622 /* Register addressing, handled at the beginning. */
623 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
624 break;
625 }
626 }
627 }
628
629 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
630 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
631 }
632
633 ExitInstrInfo.All.u2Scaling = uScale;
634 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory instructions. */
635 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
636 ExitInstrInfo.All.fIsRegOperand = 0;
637 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
638 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
639 ExitInstrInfo.All.iIdxReg = iIdxReg;
640 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
641 ExitInstrInfo.All.iBaseReg = iBaseReg;
642 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
643 ExitInstrInfo.All.iReg2 = iReg2;
644 }
645
646 /*
647 * Handle exceptions for certain instructions.
648 * (e.g. some instructions convey an instruction identity).
649 */
650 switch (uExitReason)
651 {
652 case VMX_EXIT_XDTR_ACCESS:
653 {
654 Assert(VMX_INSTR_ID_IS_VALID(InstrId));
655 ExitInstrInfo.GdtIdt.u2InstrId = VMX_INSTR_ID_GET_ID(InstrId);
656 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
657 break;
658 }
659
660 case VMX_EXIT_TR_ACCESS:
661 {
662 Assert(VMX_INSTR_ID_IS_VALID(InstrId));
663 ExitInstrInfo.LdtTr.u2InstrId = VMX_INSTR_ID_GET_ID(InstrId);
664 ExitInstrInfo.LdtTr.u2Undef0 = 0;
665 break;
666 }
667
668 case VMX_EXIT_RDRAND:
669 case VMX_EXIT_RDSEED:
670 {
671 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
672 break;
673 }
674 }
675
676 /* Update displacement and return the constructed VM-exit instruction information field. */
677 if (pGCPtrDisp)
678 *pGCPtrDisp = GCPtrDisp;
679 return ExitInstrInfo.u;
680}
681
682
683/**
684 * Implements VMSucceed for VMX instruction success.
685 *
686 * @param pVCpu The cross context virtual CPU structure.
687 */
688DECLINLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
689{
690 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
691}
692
693
694/**
695 * Implements VMFailInvalid for VMX instruction failure.
696 *
697 * @param pVCpu The cross context virtual CPU structure.
698 */
699DECLINLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
700{
701 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
702 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
703}
704
705
706/**
707 * Implements VMFailValid for VMX instruction failure.
708 *
709 * @param pVCpu The cross context virtual CPU structure.
710 * @param enmInsErr The VM instruction error.
711 */
712DECLINLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
713{
714 if (pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
715 {
716 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
717 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
718 /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */
719 RT_NOREF(enmInsErr);
720 }
721}
722
723
724/**
725 * Implements VMFail for VMX instruction failure.
726 *
727 * @param pVCpu The cross context virtual CPU structure.
728 * @param enmInsErr The VM instruction error.
729 */
730DECLINLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
731{
732 if (pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
733 {
734 iemVmxVmFailValid(pVCpu, enmInsErr);
735 /** @todo Set VM-instruction error field in the current virtual-VMCS. */
736 }
737 else
738 iemVmxVmFailInvalid(pVCpu);
739}
740
741
742/**
743 * VMCLEAR instruction execution worker.
744 *
745 * @param pVCpu The cross context virtual CPU structure.
746 * @param cbInstr The instruction length.
747 * @param GCPtrVmcs The linear address of the VMCS pointer.
748 * @param pExitInstrInfo Pointer to the VM-exit instruction information field.
749 * @param GCPtrDisp The displacement field for @a GCPtrVmcs if any.
750 *
751 * @remarks Common VMX instruction checks are already expected to by the caller,
752 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
753 */
754IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmcs, PCVMXEXITINSTRINFO pExitInstrInfo,
755 RTGCPTR GCPtrDisp)
756{
757 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
758 {
759 RT_NOREF(GCPtrDisp);
760 /** @todo NSTVMX: intercept. */
761 }
762 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
763
764 /* CPL. */
765 if (CPUMGetGuestCPL(pVCpu) > 0)
766 {
767 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
768 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_Cpl;
769 return iemRaiseGeneralProtectionFault0(pVCpu);
770 }
771
772 /** @todo NSTVMX: VMCLEAR impl. */
773 RT_NOREF(GCPtrVmcs); RT_NOREF(pExitInstrInfo); RT_NOREF(cbInstr);
774 return VINF_SUCCESS;
775}
776
777
778/**
779 * VMPTRST instruction execution worker.
780 *
781 * @param pVCpu The cross context virtual CPU structure.
782 * @param cbInstr The instruction length.
783 * @param GCPtrVmcs The linear address of where to store the current VMCS
784 * pointer.
785 * @param pExitInstrInfo Pointer to the VM-exit instruction information field.
786 * @param GCPtrDisp The displacement field for @a GCPtrVmcs if any.
787 *
788 * @remarks Common VMX instruction checks are already expected to by the caller,
789 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
790 */
791IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmcs, PCVMXEXITINSTRINFO pExitInstrInfo,
792 RTGCPTR GCPtrDisp)
793{
794 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
795 {
796 RT_NOREF(GCPtrDisp);
797 /** @todo NSTVMX: intercept. */
798 }
799 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
800
801 /* CPL. */
802 if (CPUMGetGuestCPL(pVCpu) > 0)
803 {
804 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
805 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_Cpl;
806 return iemRaiseGeneralProtectionFault0(pVCpu);
807 }
808
809 /* Set the VMCS pointer to the location specified by the destination memory operand. */
810 Assert(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
811 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, pExitInstrInfo->VmxXsave.iSegReg, GCPtrVmcs,
812 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs);
813 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
814 {
815 iemVmxVmSucceed(pVCpu);
816 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
817 return rcStrict;
818 }
819
820 Log(("vmptrld: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
821 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_PtrMap;
822 return rcStrict;
823}
824
825
826/**
827 * VMPTRLD instruction execution worker.
828 *
829 * @param pVCpu The cross context virtual CPU structure.
830 * @param cbInstr The instruction length.
831 * @param GCPtrVmcs The linear address of the current VMCS pointer.
832 * @param pExitInstrInfo Pointer to the VM-exit instruction information field.
833 * @param GCPtrDisp The displacement field for @a GCPtrVmcs if any.
834 *
835 * @remarks Common VMX instruction checks are already expected to by the caller,
836 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
837 */
838IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmcs, PCVMXEXITINSTRINFO pExitInstrInfo,
839 RTGCPTR GCPtrDisp)
840{
841 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
842 {
843 RT_NOREF(GCPtrDisp);
844 /** @todo NSTVMX: intercept. */
845 }
846 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
847
848 /* CPL. */
849 if (CPUMGetGuestCPL(pVCpu) > 0)
850 {
851 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
852 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Cpl;
853 return iemRaiseGeneralProtectionFault0(pVCpu);
854 }
855
856 /* Get the VMCS pointer from the location specified by the source memory operand. */
857 RTGCPHYS GCPhysVmcs;
858 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, pExitInstrInfo->VmxXsave.iSegReg, GCPtrVmcs);
859 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
860 {
861 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
862 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrMap;
863 return rcStrict;
864 }
865
866 /* VMCS pointer alignment. */
867 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
868 {
869 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
870 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrAlign;
871 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
872 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
873 return VINF_SUCCESS;
874 }
875
876 /* VMCS physical-address width limits. */
877 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
878 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
879 {
880 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
881 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrWidth;
882 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
883 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
884 return VINF_SUCCESS;
885 }
886
887 /* VMCS is not the VMXON region. */
888 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
889 {
890 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
891 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrVmxon;
892 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
893 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
894 return VINF_SUCCESS;
895 }
896
897 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
898 restriction imposed by our implementation. */
899 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
900 {
901 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
902 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrAbnormal;
903 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
904 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
905 return VINF_SUCCESS;
906 }
907
908 /* Read the VMCS revision ID from the VMCS. */
909 VMXVMCSREVID VmcsRevId;
910 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
911 if (RT_FAILURE(rc))
912 {
913 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
914 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrReadPhys;
915 return rc;
916 }
917
918 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
919 also check VMCS shadowing feature. */
920 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
921 || ( VmcsRevId.n.fIsShadowVmcs
922 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
923 {
924 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
925 {
926 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
927 VmcsRevId.n.u31RevisionId));
928 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_VmcsRevId;
929 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
930 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
931 return VINF_SUCCESS;
932 }
933
934 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
935 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_ShadowVmcs;
936 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
937 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
938 return VINF_SUCCESS;
939 }
940
941 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = GCPhysVmcs;
942 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Success;
943 iemVmxVmSucceed(pVCpu);
944 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
945 return VINF_SUCCESS;
946}
947
948
949/**
950 * VMXON instruction execution worker.
951 *
952 * @param pVCpu The cross context virtual CPU structure.
953 * @param cbInstr The instruction length.
954 * @param GCPtrVmxon The linear address of the VMXON pointer.
955 * @param pExitInstrInfo Pointer to the VM-exit instruction information field.
956 * @param GCPtrDisp The displacement field for @a GCPtrVmxon if any.
957 *
958 * @remarks Common VMX instruction checks are already expected to by the caller,
959 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
960 */
961IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, PCVMXEXITINSTRINFO pExitInstrInfo,
962 RTGCPTR GCPtrDisp)
963{
964#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
965 RT_NOREF5(pVCpu, cbInstr, GCPtrVmxon, pExitInstrInfo, GCPtrDisp);
966 return VINF_EM_RAW_EMULATE_INSTR;
967#else
968 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
969 {
970 /* CPL. */
971 if (pVCpu->iem.s.uCpl > 0)
972 {
973 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
974 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cpl;
975 return iemRaiseGeneralProtectionFault0(pVCpu);
976 }
977
978 /* A20M (A20 Masked) mode. */
979 if (!PGMPhysIsA20Enabled(pVCpu))
980 {
981 Log(("vmxon: A20M mode -> #GP(0)\n"));
982 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_A20M;
983 return iemRaiseGeneralProtectionFault0(pVCpu);
984 }
985
986 /* CR0 fixed bits. */
987 bool const fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest;
988 uint64_t const uCr0Fixed0 = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
989 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
990 {
991 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
992 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr0Fixed0;
993 return iemRaiseGeneralProtectionFault0(pVCpu);
994 }
995
996 /* CR4 fixed bits. */
997 if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0)
998 {
999 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
1000 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr4Fixed0;
1001 return iemRaiseGeneralProtectionFault0(pVCpu);
1002 }
1003
1004 /* Feature control MSR's LOCK and VMXON bits. */
1005 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
1006 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
1007 {
1008 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
1009 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_MsrFeatCtl;
1010 return iemRaiseGeneralProtectionFault0(pVCpu);
1011 }
1012
1013 /* Get the VMXON pointer from the location specified by the source memory operand. */
1014 RTGCPHYS GCPhysVmxon;
1015 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, pExitInstrInfo->VmxXsave.iSegReg, GCPtrVmxon);
1016 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1017 {
1018 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
1019 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrMap;
1020 return rcStrict;
1021 }
1022
1023 /* VMXON region pointer alignment. */
1024 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
1025 {
1026 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
1027 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAlign;
1028 iemVmxVmFailInvalid(pVCpu);
1029 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1030 return VINF_SUCCESS;
1031 }
1032
1033 /* VMXON physical-address width limits. */
1034 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
1035 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
1036 {
1037 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
1038 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrWidth;
1039 iemVmxVmFailInvalid(pVCpu);
1040 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1041 return VINF_SUCCESS;
1042 }
1043
1044 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
1045 restriction imposed by our implementation. */
1046 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
1047 {
1048 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
1049 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAbnormal;
1050 iemVmxVmFailInvalid(pVCpu);
1051 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1052 return VINF_SUCCESS;
1053 }
1054
1055 /* Read the VMCS revision ID from the VMXON region. */
1056 VMXVMCSREVID VmcsRevId;
1057 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
1058 if (RT_FAILURE(rc))
1059 {
1060 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
1061 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrReadPhys;
1062 return rc;
1063 }
1064
1065 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
1066 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
1067 {
1068 /* Revision ID mismatch. */
1069 if (!VmcsRevId.n.fIsShadowVmcs)
1070 {
1071 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
1072 VmcsRevId.n.u31RevisionId));
1073 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmcsRevId;
1074 iemVmxVmFailInvalid(pVCpu);
1075 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1076 return VINF_SUCCESS;
1077 }
1078
1079 /* Shadow VMCS disallowed. */
1080 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
1081 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_ShadowVmcs;
1082 iemVmxVmFailInvalid(pVCpu);
1083 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1084 return VINF_SUCCESS;
1085 }
1086
1087 /*
1088 * Record that we're in VMX operation, block INIT, block and disable A20M.
1089 */
1090 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
1091 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS;
1092 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
1093 /** @todo NSTVMX: clear address-range monitoring. */
1094 /** @todo NSTVMX: Intel PT. */
1095 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Success;
1096 iemVmxVmSucceed(pVCpu);
1097 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1098# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1099 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
1100# else
1101 return VINF_SUCCESS;
1102# endif
1103 }
1104 else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1105 {
1106 RT_NOREF(GCPtrDisp);
1107 /** @todo NSTVMX: intercept. */
1108 }
1109
1110 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1111
1112 /* CPL. */
1113 if (pVCpu->iem.s.uCpl > 0)
1114 {
1115 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1116 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRootCpl;
1117 return iemRaiseGeneralProtectionFault0(pVCpu);
1118 }
1119
1120 /* VMXON when already in VMX root mode. */
1121 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
1122 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRoot;
1123 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1124 return VINF_SUCCESS;
1125#endif
1126}
1127
1128
1129/**
1130 * Implements 'VMXON'.
1131 */
1132IEM_CIMPL_DEF_1(iemCImpl_vmxon, RTGCPTR, GCPtrVmxon)
1133{
1134 RTGCPTR GCPtrDisp;
1135 VMXEXITINSTRINFO ExitInstrInfo;
1136 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMXON, VMX_INSTR_ID_NONE, &GCPtrDisp);
1137 return iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, &ExitInstrInfo, GCPtrDisp);
1138}
1139
1140
1141/**
1142 * Implements 'VMXOFF'.
1143 */
1144IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
1145{
1146# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1147 RT_NOREF2(pVCpu, cbInstr);
1148 return VINF_EM_RAW_EMULATE_INSTR;
1149# else
1150 IEM_VMX_INSTR_COMMON_CHECKS(pVCpu, "vmxoff", kVmxVInstrDiag_Vmxoff);
1151 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
1152 {
1153 Log(("vmxoff: Not in VMX root mode -> #GP(0)\n"));
1154 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_VmxRoot;
1155 return iemRaiseUndefinedOpcode(pVCpu);
1156 }
1157
1158 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1159 {
1160 /** @todo NSTVMX: intercept. */
1161 }
1162
1163 /* CPL. */
1164 if (pVCpu->iem.s.uCpl > 0)
1165 {
1166 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1167 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Cpl;
1168 return iemRaiseGeneralProtectionFault0(pVCpu);
1169 }
1170
1171 /* Dual monitor treatment of SMIs and SMM. */
1172 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
1173 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
1174 {
1175 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
1176 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1177 return VINF_SUCCESS;
1178 }
1179
1180 /*
1181 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
1182 */
1183 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
1184 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
1185
1186 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
1187 { /** @todo NSTVMX: Unblock SMI. */ }
1188 /** @todo NSTVMX: Unblock and enable A20M. */
1189 /** @todo NSTVMX: Clear address-range monitoring. */
1190
1191 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Success;
1192 iemVmxVmSucceed(pVCpu);
1193 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1194# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1195 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
1196# else
1197 return VINF_SUCCESS;
1198# endif
1199# endif
1200}
1201
1202
1203/**
1204 * Implements 'VMPTRLD'.
1205 */
1206IEM_CIMPL_DEF_1(iemCImpl_vmptrld, RTGCPTR, GCPtrVmcs)
1207{
1208 RTGCPTR GCPtrDisp;
1209 VMXEXITINSTRINFO ExitInstrInfo;
1210 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMPTRLD, VMX_INSTR_ID_NONE, &GCPtrDisp);
1211 return iemVmxVmptrld(pVCpu, cbInstr, GCPtrVmcs, &ExitInstrInfo, GCPtrDisp);
1212}
1213
1214
1215/**
1216 * Implements 'VMPTRST'.
1217 */
1218IEM_CIMPL_DEF_1(iemCImpl_vmptrst, RTGCPTR, GCPtrVmcs)
1219{
1220 RTGCPTR GCPtrDisp;
1221 VMXEXITINSTRINFO ExitInstrInfo;
1222 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMPTRST, VMX_INSTR_ID_NONE, &GCPtrDisp);
1223 return iemVmxVmptrst(pVCpu, cbInstr, GCPtrVmcs, &ExitInstrInfo, GCPtrDisp);
1224}
1225
1226
1227/**
1228 * Implements 'VMCLEAR'.
1229 */
1230IEM_CIMPL_DEF_1(iemCImpl_vmclear, RTGCPTR, GCPtrVmcs)
1231{
1232 RTGCPTR GCPtrDisp;
1233 VMXEXITINSTRINFO ExitInstrInfo;
1234 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMCLEAR, VMX_INSTR_ID_NONE, &GCPtrDisp);
1235 return iemVmxVmclear(pVCpu, cbInstr, GCPtrVmcs, &ExitInstrInfo, GCPtrDisp);
1236}
1237
1238#endif
1239
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette