VirtualBox

source: vbox/trunk/include/VBox/vmm/hmvmxinline.h@ 76398

Last change on this file since 76398 was 76397, checked in by vboxsync, 6 years ago

VBox/vmm/hm_svm.h,hm_vmx.h: Try avoid including VBox/err.h in widely used headers, so split out the inline stuff from hm_vmx.h into hmvmxinline.h. bugref:9344

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.7 KB
Line 
1/** @file
2 * HM - VMX Structures and Definitions. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2017 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_hmvmxinline_h
27#define ___VBox_vmm_hmvmxinline_h
28
29#include <VBox/vmm/hm_vmx.h>
30#include <VBox/err.h>
31
32/* In Visual C++ versions prior to 2012, the vmx intrinsics are only available
33 when targeting AMD64. */
34#if RT_INLINE_ASM_USES_INTRIN >= 16 && defined(RT_ARCH_AMD64)
35# pragma warning(push)
36# pragma warning(disable:4668) /* Several incorrect __cplusplus uses. */
37# pragma warning(disable:4255) /* Incorrect __slwpcb prototype. */
38# include <intrin.h>
39# pragma warning(pop)
40/* We always want them as intrinsics, no functions. */
41# pragma intrinsic(__vmx_on)
42# pragma intrinsic(__vmx_off)
43# pragma intrinsic(__vmx_vmclear)
44# pragma intrinsic(__vmx_vmptrld)
45# pragma intrinsic(__vmx_vmread)
46# pragma intrinsic(__vmx_vmwrite)
47# define VMX_USE_MSC_INTRINSICS 1
48#else
49# define VMX_USE_MSC_INTRINSICS 0
50#endif
51
52
53/** @defgroup grp_hm_vmx_inline VMX Inline Helpers
54 * @ingroup grp_hm_vmx
55 * @{
56 */
57/**
58 * Gets the effective width of a VMCS field given it's encoding adjusted for
59 * HIGH/FULL access for 64-bit fields.
60 *
61 * @returns The effective VMCS field width.
62 * @param uFieldEnc The VMCS field encoding.
63 *
64 * @remarks Warning! This function does not verify the encoding is for a valid and
65 * supported VMCS field.
66 */
67DECLINLINE(uint8_t) HMVmxGetVmcsFieldWidthEff(uint32_t uFieldEnc)
68{
69 /* Only the "HIGH" parts of all 64-bit fields have bit 0 set. */
70 if (uFieldEnc & RT_BIT(0))
71 return VMXVMCSFIELDWIDTH_32BIT;
72
73 /* Bits 13:14 contains the width of the VMCS field, see VMXVMCSFIELDWIDTH_XXX. */
74 return (uFieldEnc >> 13) & 0x3;
75}
76
77/**
78 * Returns whether the given VMCS field is a read-only VMCS field or not.
79 *
80 * @returns @c true if it's a read-only field, @c false otherwise.
81 * @param uFieldEnc The VMCS field encoding.
82 *
83 * @remarks Warning! This function does not verify the encoding is for a valid and
84 * supported VMCS field.
85 */
86DECLINLINE(bool) HMVmxIsVmcsFieldReadOnly(uint32_t uFieldEnc)
87{
88 /* See Intel spec. B.4.2 "Natural-Width Read-Only Data Fields". */
89 return (RT_BF_GET(uFieldEnc, VMX_BF_VMCS_ENC_TYPE) == VMXVMCSFIELDTYPE_VMEXIT_INFO);
90}
91
92/**
93 * Returns whether the given VM-entry interruption-information type is valid or not.
94 *
95 * @returns @c true if it's a valid type, @c false otherwise.
96 * @param fSupportsMTF Whether the Monitor-Trap Flag CPU feature is supported.
97 * @param uType The VM-entry interruption-information type.
98 */
99DECLINLINE(bool) HMVmxIsEntryIntInfoTypeValid(bool fSupportsMTF, uint8_t uType)
100{
101 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
102 switch (uType)
103 {
104 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
105 case VMX_ENTRY_INT_INFO_TYPE_NMI:
106 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
107 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
108 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
109 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: return true;
110 case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT: return fSupportsMTF;
111 default:
112 return false;
113 }
114}
115
116/**
117 * Returns whether the given VM-entry interruption-information vector and type
118 * combination is valid or not.
119 *
120 * @returns @c true if it's a valid vector/type combination, @c false otherwise.
121 * @param uVector The VM-entry interruption-information vector.
122 * @param uType The VM-entry interruption-information type.
123 *
124 * @remarks Warning! This function does not validate the type field individually.
125 * Use it after verifying type is valid using HMVmxIsEntryIntInfoTypeValid.
126 */
127DECLINLINE(bool) HMVmxIsEntryIntInfoVectorValid(uint8_t uVector, uint8_t uType)
128{
129 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
130 if ( uType == VMX_ENTRY_INT_INFO_TYPE_NMI
131 && uVector != X86_XCPT_NMI)
132 return false;
133 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
134 && uVector > X86_XCPT_LAST)
135 return false;
136 if ( uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
137 && uVector != VMX_ENTRY_INT_INFO_VECTOR_MTF)
138 return false;
139 return true;
140}
141
142
143/**
144 * Returns whether or not the VM-exit is trap-like or fault-like.
145 *
146 * @returns @c true if it's a trap-like VM-exit, @c false otherwise.
147 * @param uExitReason The VM-exit reason.
148 *
149 * @remarks Warning! This does not validate the VM-exit reason.
150 */
151DECLINLINE(bool) HMVmxIsVmexitTrapLike(uint32_t uExitReason)
152{
153 /*
154 * Trap-like VM-exits - The instruction causing the VM-exit completes before the
155 * VM-exit occurs.
156 *
157 * Fault-like VM-exits - The instruction causing the VM-exit is not completed before
158 * the VM-exit occurs.
159 *
160 * See Intel spec. 25.5.2 "Monitor Trap Flag".
161 * See Intel spec. 29.1.4 "EOI Virtualization".
162 * See Intel spec. 29.4.3.3 "APIC-Write VM Exits".
163 * See Intel spec. 29.1.2 "TPR Virtualization".
164 */
165 /** @todo NSTVMX: r=ramshankar: What about VM-exits due to debug traps (single-step,
166 * I/O breakpoints, data breakpoints), debug exceptions (data breakpoint)
167 * delayed by MovSS blocking, machine-check exceptions. */
168 switch (uExitReason)
169 {
170 case VMX_EXIT_MTF:
171 case VMX_EXIT_VIRTUALIZED_EOI:
172 case VMX_EXIT_APIC_WRITE:
173 case VMX_EXIT_TPR_BELOW_THRESHOLD:
174 return true;
175 }
176 return false;
177}
178
179
180/**
181 * Returns whether the VM-entry is vectoring or not given the VM-entry interruption
182 * information field.
183 *
184 * @returns @c true if the VM-entry is vectoring, @c false otherwise.
185 * @param uEntryIntInfo The VM-entry interruption information field.
186 * @param pEntryIntInfoType The VM-entry interruption information type field.
187 * Optional, can be NULL. Only updated when this
188 * function returns @c true.
189 */
190DECLINLINE(bool) HMVmxIsVmentryVectoring(uint32_t uEntryIntInfo, uint8_t *pEntryIntInfoType)
191{
192 /*
193 * The definition of what is a vectoring VM-entry is taken
194 * from Intel spec. 26.6 "Special Features of VM Entry".
195 */
196 if (!VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
197 return false;
198
199 /* Scope and keep variable defines on top to satisy archaic c89 nonsense. */
200 {
201 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
202 switch (uType)
203 {
204 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
205 case VMX_ENTRY_INT_INFO_TYPE_NMI:
206 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
207 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
208 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
209 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
210 {
211 if (pEntryIntInfoType)
212 *pEntryIntInfoType = uType;
213 return true;
214 }
215 }
216 }
217 return false;
218}
219/** @} */
220
221
222/** @defgroup grp_hm_vmx_asm VMX Assembly Helpers
223 * @{
224 */
225
226/**
227 * Restores some host-state fields that need not be done on every VM-exit.
228 *
229 * @returns VBox status code.
230 * @param fRestoreHostFlags Flags of which host registers needs to be
231 * restored.
232 * @param pRestoreHost Pointer to the host-restore structure.
233 */
234DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);
235
236
237/**
238 * Dispatches an NMI to the host.
239 */
240DECLASM(int) VMXDispatchHostNmi(void);
241
242
243/**
244 * Executes VMXON.
245 *
246 * @returns VBox status code.
247 * @param HCPhysVmxOn Physical address of VMXON structure.
248 */
249#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
250DECLASM(int) VMXEnable(RTHCPHYS HCPhysVmxOn);
251#else
252DECLINLINE(int) VMXEnable(RTHCPHYS HCPhysVmxOn)
253{
254# if RT_INLINE_ASM_GNU_STYLE
255 int rc = VINF_SUCCESS;
256 __asm__ __volatile__ (
257 "push %3 \n\t"
258 "push %2 \n\t"
259 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
260 "ja 2f \n\t"
261 "je 1f \n\t"
262 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
263 "jmp 2f \n\t"
264 "1: \n\t"
265 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
266 "2: \n\t"
267 "add $8, %%esp \n\t"
268 :"=rm"(rc)
269 :"0"(VINF_SUCCESS),
270 "ir"((uint32_t)HCPhysVmxOn), /* don't allow direct memory reference here, */
271 "ir"((uint32_t)(HCPhysVmxOn >> 32)) /* this would not work with -fomit-frame-pointer */
272 :"memory"
273 );
274 return rc;
275
276# elif VMX_USE_MSC_INTRINSICS
277 unsigned char rcMsc = __vmx_on(&HCPhysVmxOn);
278 if (RT_LIKELY(rcMsc == 0))
279 return VINF_SUCCESS;
280 return rcMsc == 2 ? VERR_VMX_INVALID_VMXON_PTR : VERR_VMX_VMXON_FAILED;
281
282# else
283 int rc = VINF_SUCCESS;
284 __asm
285 {
286 push dword ptr [HCPhysVmxOn + 4]
287 push dword ptr [HCPhysVmxOn]
288 _emit 0xf3
289 _emit 0x0f
290 _emit 0xc7
291 _emit 0x34
292 _emit 0x24 /* VMXON [esp] */
293 jnc vmxon_good
294 mov dword ptr [rc], VERR_VMX_INVALID_VMXON_PTR
295 jmp the_end
296
297vmxon_good:
298 jnz the_end
299 mov dword ptr [rc], VERR_VMX_VMXON_FAILED
300the_end:
301 add esp, 8
302 }
303 return rc;
304# endif
305}
306#endif
307
308
309/**
310 * Executes VMXOFF.
311 */
312#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
313DECLASM(void) VMXDisable(void);
314#else
315DECLINLINE(void) VMXDisable(void)
316{
317# if RT_INLINE_ASM_GNU_STYLE
318 __asm__ __volatile__ (
319 ".byte 0x0f, 0x01, 0xc4 # VMXOFF \n\t"
320 );
321
322# elif VMX_USE_MSC_INTRINSICS
323 __vmx_off();
324
325# else
326 __asm
327 {
328 _emit 0x0f
329 _emit 0x01
330 _emit 0xc4 /* VMXOFF */
331 }
332# endif
333}
334#endif
335
336
337/**
338 * Executes VMCLEAR.
339 *
340 * @returns VBox status code.
341 * @param HCPhysVmcs Physical address of VM control structure.
342 */
343#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
344DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
345#else
346DECLINLINE(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs)
347{
348# if RT_INLINE_ASM_GNU_STYLE
349 int rc = VINF_SUCCESS;
350 __asm__ __volatile__ (
351 "push %3 \n\t"
352 "push %2 \n\t"
353 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
354 "jnc 1f \n\t"
355 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
356 "1: \n\t"
357 "add $8, %%esp \n\t"
358 :"=rm"(rc)
359 :"0"(VINF_SUCCESS),
360 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
361 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this would not work with -fomit-frame-pointer */
362 :"memory"
363 );
364 return rc;
365
366# elif VMX_USE_MSC_INTRINSICS
367 unsigned char rcMsc = __vmx_vmclear(&HCPhysVmcs);
368 if (RT_LIKELY(rcMsc == 0))
369 return VINF_SUCCESS;
370 return VERR_VMX_INVALID_VMCS_PTR;
371
372# else
373 int rc = VINF_SUCCESS;
374 __asm
375 {
376 push dword ptr [HCPhysVmcs + 4]
377 push dword ptr [HCPhysVmcs]
378 _emit 0x66
379 _emit 0x0f
380 _emit 0xc7
381 _emit 0x34
382 _emit 0x24 /* VMCLEAR [esp] */
383 jnc success
384 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
385success:
386 add esp, 8
387 }
388 return rc;
389# endif
390}
391#endif
392
393
394/**
395 * Executes VMPTRLD.
396 *
397 * @returns VBox status code.
398 * @param HCPhysVmcs Physical address of VMCS structure.
399 */
400#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
401DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs);
402#else
403DECLINLINE(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs)
404{
405# if RT_INLINE_ASM_GNU_STYLE
406 int rc = VINF_SUCCESS;
407 __asm__ __volatile__ (
408 "push %3 \n\t"
409 "push %2 \n\t"
410 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
411 "jnc 1f \n\t"
412 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
413 "1: \n\t"
414 "add $8, %%esp \n\t"
415 :"=rm"(rc)
416 :"0"(VINF_SUCCESS),
417 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
418 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this will not work with -fomit-frame-pointer */
419 );
420 return rc;
421
422# elif VMX_USE_MSC_INTRINSICS
423 unsigned char rcMsc = __vmx_vmptrld(&HCPhysVmcs);
424 if (RT_LIKELY(rcMsc == 0))
425 return VINF_SUCCESS;
426 return VERR_VMX_INVALID_VMCS_PTR;
427
428# else
429 int rc = VINF_SUCCESS;
430 __asm
431 {
432 push dword ptr [HCPhysVmcs + 4]
433 push dword ptr [HCPhysVmcs]
434 _emit 0x0f
435 _emit 0xc7
436 _emit 0x34
437 _emit 0x24 /* VMPTRLD [esp] */
438 jnc success
439 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
440
441success:
442 add esp, 8
443 }
444 return rc;
445# endif
446}
447#endif
448
449
450/**
451 * Executes VMPTRST.
452 *
453 * @returns VBox status code.
454 * @param pHCPhysVmcs Where to store the physical address of the current
455 * VMCS.
456 */
457DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pHCPhysVmcs);
458
459
460/**
461 * Executes VMWRITE.
462 *
463 * @returns VBox status code.
464 * @retval VINF_SUCCESS.
465 * @retval VERR_VMX_INVALID_VMCS_PTR.
466 * @retval VERR_VMX_INVALID_VMCS_FIELD.
467 *
468 * @param uFieldEnc VMCS field encoding.
469 * @param u32Val The 32-bit value to set.
470 *
471 * @remarks The values of the two status codes can be OR'ed together, the result
472 * will be VERR_VMX_INVALID_VMCS_PTR.
473 */
474#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
475DECLASM(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val);
476#else
477DECLINLINE(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val)
478{
479# if RT_INLINE_ASM_GNU_STYLE
480 int rc = VINF_SUCCESS;
481 __asm__ __volatile__ (
482 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
483 "ja 2f \n\t"
484 "je 1f \n\t"
485 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
486 "jmp 2f \n\t"
487 "1: \n\t"
488 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
489 "2: \n\t"
490 :"=rm"(rc)
491 :"0"(VINF_SUCCESS),
492 "a"(uFieldEnc),
493 "d"(u32Val)
494 );
495 return rc;
496
497# elif VMX_USE_MSC_INTRINSICS
498 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u32Val);
499 if (RT_LIKELY(rcMsc == 0))
500 return VINF_SUCCESS;
501 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
502
503#else
504 int rc = VINF_SUCCESS;
505 __asm
506 {
507 push dword ptr [u32Val]
508 mov eax, [uFieldEnc]
509 _emit 0x0f
510 _emit 0x79
511 _emit 0x04
512 _emit 0x24 /* VMWRITE eax, [esp] */
513 jnc valid_vmcs
514 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
515 jmp the_end
516
517valid_vmcs:
518 jnz the_end
519 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
520the_end:
521 add esp, 4
522 }
523 return rc;
524# endif
525}
526#endif
527
528/**
529 * Executes VMWRITE.
530 *
531 * @returns VBox status code.
532 * @retval VINF_SUCCESS.
533 * @retval VERR_VMX_INVALID_VMCS_PTR.
534 * @retval VERR_VMX_INVALID_VMCS_FIELD.
535 *
536 * @param uFieldEnc The VMCS field encoding.
537 * @param u64Val The 16, 32 or 64-bit value to set.
538 *
539 * @remarks The values of the two status codes can be OR'ed together, the result
540 * will be VERR_VMX_INVALID_VMCS_PTR.
541 */
542#if !defined(RT_ARCH_X86)
543# if !VMX_USE_MSC_INTRINSICS || ARCH_BITS != 64
544DECLASM(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val);
545# else /* VMX_USE_MSC_INTRINSICS */
546DECLINLINE(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val)
547{
548 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u64Val);
549 if (RT_LIKELY(rcMsc == 0))
550 return VINF_SUCCESS;
551 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
552}
553# endif /* VMX_USE_MSC_INTRINSICS */
554#else
555# define VMXWriteVmcs64(uFieldEnc, u64Val) VMXWriteVmcs64Ex(pVCpu, uFieldEnc, u64Val) /** @todo dead ugly, picking up pVCpu like this */
556VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t uFieldEnc, uint64_t u64Val);
557#endif
558
559#if ARCH_BITS == 32
560# define VMXWriteVmcsHstN VMXWriteVmcs32
561# define VMXWriteVmcsGstN(uFieldEnc, u64Val) VMXWriteVmcs64Ex(pVCpu, uFieldEnc, u64Val)
562#else /* ARCH_BITS == 64 */
563# define VMXWriteVmcsHstN VMXWriteVmcs64
564# define VMXWriteVmcsGstN VMXWriteVmcs64
565#endif
566
567
568/**
569 * Invalidate a page using INVEPT.
570 *
571 * @returns VBox status code.
572 * @param enmFlush Type of flush.
573 * @param pDescriptor Pointer to the descriptor.
574 */
575DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmFlush, uint64_t *pDescriptor);
576
577
578/**
579 * Invalidate a page using INVVPID.
580 *
581 * @returns VBox status code.
582 * @param enmFlush Type of flush.
583 * @param pDescriptor Pointer to the descriptor.
584 */
585DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmFlush, uint64_t *pDescriptor);
586
587
588/**
589 * Executes VMREAD for a 32-bit field.
590 *
591 * @returns VBox status code.
592 * @retval VINF_SUCCESS.
593 * @retval VERR_VMX_INVALID_VMCS_PTR.
594 * @retval VERR_VMX_INVALID_VMCS_FIELD.
595 *
596 * @param uFieldEnc The VMCS field encoding.
597 * @param pData Where to store VMCS field value.
598 *
599 * @remarks The values of the two status codes can be OR'ed together, the result
600 * will be VERR_VMX_INVALID_VMCS_PTR.
601 */
602#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
603DECLASM(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData);
604#else
605DECLINLINE(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData)
606{
607# if RT_INLINE_ASM_GNU_STYLE
608 int rc = VINF_SUCCESS;
609 __asm__ __volatile__ (
610 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
611 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
612 "ja 2f \n\t"
613 "je 1f \n\t"
614 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
615 "jmp 2f \n\t"
616 "1: \n\t"
617 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
618 "2: \n\t"
619 :"=&r"(rc),
620 "=d"(*pData)
621 :"a"(uFieldEnc),
622 "d"(0)
623 );
624 return rc;
625
626# elif VMX_USE_MSC_INTRINSICS
627 unsigned char rcMsc;
628# if ARCH_BITS == 32
629 rcMsc = __vmx_vmread(uFieldEnc, pData);
630# else
631 uint64_t u64Tmp;
632 rcMsc = __vmx_vmread(uFieldEnc, &u64Tmp);
633 *pData = (uint32_t)u64Tmp;
634# endif
635 if (RT_LIKELY(rcMsc == 0))
636 return VINF_SUCCESS;
637 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
638
639#else
640 int rc = VINF_SUCCESS;
641 __asm
642 {
643 sub esp, 4
644 mov dword ptr [esp], 0
645 mov eax, [uFieldEnc]
646 _emit 0x0f
647 _emit 0x78
648 _emit 0x04
649 _emit 0x24 /* VMREAD eax, [esp] */
650 mov edx, pData
651 pop dword ptr [edx]
652 jnc valid_vmcs
653 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
654 jmp the_end
655
656valid_vmcs:
657 jnz the_end
658 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
659the_end:
660 }
661 return rc;
662# endif
663}
664#endif
665
666/**
667 * Executes VMREAD for a 64-bit field.
668 *
669 * @returns VBox status code.
670 * @retval VINF_SUCCESS.
671 * @retval VERR_VMX_INVALID_VMCS_PTR.
672 * @retval VERR_VMX_INVALID_VMCS_FIELD.
673 *
674 * @param uFieldEnc The VMCS field encoding.
675 * @param pData Where to store VMCS field value.
676 *
677 * @remarks The values of the two status codes can be OR'ed together, the result
678 * will be VERR_VMX_INVALID_VMCS_PTR.
679 */
680#if (!defined(RT_ARCH_X86) && !VMX_USE_MSC_INTRINSICS)
681DECLASM(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData);
682#else
683DECLINLINE(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData)
684{
685# if VMX_USE_MSC_INTRINSICS
686 unsigned char rcMsc;
687# if ARCH_BITS == 32
688 size_t uLow;
689 size_t uHigh;
690 rcMsc = __vmx_vmread(uFieldEnc, &uLow);
691 rcMsc |= __vmx_vmread(uFieldEnc + 1, &uHigh);
692 *pData = RT_MAKE_U64(uLow, uHigh);
693# else
694 rcMsc = __vmx_vmread(uFieldEnc, pData);
695# endif
696 if (RT_LIKELY(rcMsc == 0))
697 return VINF_SUCCESS;
698 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
699
700# elif ARCH_BITS == 32
701 int rc;
702 uint32_t val_hi, val;
703 rc = VMXReadVmcs32(uFieldEnc, &val);
704 rc |= VMXReadVmcs32(uFieldEnc + 1, &val_hi);
705 AssertRC(rc);
706 *pData = RT_MAKE_U64(val, val_hi);
707 return rc;
708
709# else
710# error "Shouldn't be here..."
711# endif
712}
713#endif
714
715
716/**
717 * Gets the last instruction error value from the current VMCS.
718 *
719 * @returns VBox status code.
720 */
721DECLINLINE(uint32_t) VMXGetLastError(void)
722{
723#if ARCH_BITS == 64
724 uint64_t uLastError = 0;
725 int rc = VMXReadVmcs64(VMX_VMCS32_RO_VM_INSTR_ERROR, &uLastError);
726 AssertRC(rc);
727 return (uint32_t)uLastError;
728
729#else /* 32-bit host: */
730 uint32_t uLastError = 0;
731 int rc = VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &uLastError);
732 AssertRC(rc);
733 return uLastError;
734#endif
735}
736
737/** @} */
738
739#endif
740
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette