VirtualBox

source: vbox/trunk/include/VBox/vmm/hmvmxinline.h@ 78235

Last change on this file since 78235 was 78220, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 Hardware-assisted nested VT-x infrastructure changes and VM-entry implementation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.8 KB
Line 
1/** @file
2 * HM - VMX Structures and Definitions. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2019 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_hmvmxinline_h
27#define VBOX_INCLUDED_vmm_hmvmxinline_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <VBox/vmm/hm_vmx.h>
33#include <VBox/err.h>
34
35/* In Visual C++ versions prior to 2012, the vmx intrinsics are only available
36 when targeting AMD64. */
37#if RT_INLINE_ASM_USES_INTRIN >= 16 && defined(RT_ARCH_AMD64)
38# pragma warning(push)
39# pragma warning(disable:4668) /* Several incorrect __cplusplus uses. */
40# pragma warning(disable:4255) /* Incorrect __slwpcb prototype. */
41# include <intrin.h>
42# pragma warning(pop)
43/* We always want them as intrinsics, no functions. */
44# pragma intrinsic(__vmx_on)
45# pragma intrinsic(__vmx_off)
46# pragma intrinsic(__vmx_vmclear)
47# pragma intrinsic(__vmx_vmptrld)
48# pragma intrinsic(__vmx_vmread)
49# pragma intrinsic(__vmx_vmwrite)
50# define VMX_USE_MSC_INTRINSICS 1
51#else
52# define VMX_USE_MSC_INTRINSICS 0
53#endif
54
55
56/** @defgroup grp_hm_vmx_inline VMX Inline Helpers
57 * @ingroup grp_hm_vmx
58 * @{
59 */
60/**
61 * Gets the effective width of a VMCS field given it's encoding adjusted for
62 * HIGH/FULL access for 64-bit fields.
63 *
64 * @returns The effective VMCS field width.
65 * @param uFieldEnc The VMCS field encoding.
66 *
67 * @remarks Warning! This function does not verify the encoding is for a valid and
68 * supported VMCS field.
69 */
70DECLINLINE(uint8_t) HMVmxGetVmcsFieldWidthEff(uint32_t uFieldEnc)
71{
72 /* Only the "HIGH" parts of all 64-bit fields have bit 0 set. */
73 if (uFieldEnc & RT_BIT(0))
74 return VMXVMCSFIELDWIDTH_32BIT;
75
76 /* Bits 13:14 contains the width of the VMCS field, see VMXVMCSFIELDWIDTH_XXX. */
77 return (uFieldEnc >> 13) & 0x3;
78}
79
80/**
81 * Returns whether the given VMCS field is a read-only VMCS field or not.
82 *
83 * @returns @c true if it's a read-only field, @c false otherwise.
84 * @param uFieldEnc The VMCS field encoding.
85 *
86 * @remarks Warning! This function does not verify the encoding is for a valid and
87 * supported VMCS field.
88 */
89DECLINLINE(bool) HMVmxIsVmcsFieldReadOnly(uint32_t uFieldEnc)
90{
91 /* See Intel spec. B.4.2 "Natural-Width Read-Only Data Fields". */
92 return (RT_BF_GET(uFieldEnc, VMX_BF_VMCS_ENC_TYPE) == VMXVMCSFIELDTYPE_VMEXIT_INFO);
93}
94
95/**
96 * Returns whether the given VM-entry interruption-information type is valid or not.
97 *
98 * @returns @c true if it's a valid type, @c false otherwise.
99 * @param fSupportsMTF Whether the Monitor-Trap Flag CPU feature is supported.
100 * @param uType The VM-entry interruption-information type.
101 */
102DECLINLINE(bool) HMVmxIsEntryIntInfoTypeValid(bool fSupportsMTF, uint8_t uType)
103{
104 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
105 switch (uType)
106 {
107 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
108 case VMX_ENTRY_INT_INFO_TYPE_NMI:
109 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
110 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
111 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
112 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: return true;
113 case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT: return fSupportsMTF;
114 default:
115 return false;
116 }
117}
118
119/**
120 * Returns whether the given VM-entry interruption-information vector and type
121 * combination is valid or not.
122 *
123 * @returns @c true if it's a valid vector/type combination, @c false otherwise.
124 * @param uVector The VM-entry interruption-information vector.
125 * @param uType The VM-entry interruption-information type.
126 *
127 * @remarks Warning! This function does not validate the type field individually.
128 * Use it after verifying type is valid using HMVmxIsEntryIntInfoTypeValid.
129 */
130DECLINLINE(bool) HMVmxIsEntryIntInfoVectorValid(uint8_t uVector, uint8_t uType)
131{
132 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
133 if ( uType == VMX_ENTRY_INT_INFO_TYPE_NMI
134 && uVector != X86_XCPT_NMI)
135 return false;
136 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
137 && uVector > X86_XCPT_LAST)
138 return false;
139 if ( uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
140 && uVector != VMX_ENTRY_INT_INFO_VECTOR_MTF)
141 return false;
142 return true;
143}
144
145
146/**
147 * Returns whether or not the VM-exit is trap-like or fault-like.
148 *
149 * @returns @c true if it's a trap-like VM-exit, @c false otherwise.
150 * @param uExitReason The VM-exit reason.
151 *
152 * @remarks Warning! This does not validate the VM-exit reason.
153 */
154DECLINLINE(bool) HMVmxIsVmexitTrapLike(uint32_t uExitReason)
155{
156 /*
157 * Trap-like VM-exits - The instruction causing the VM-exit completes before the
158 * VM-exit occurs.
159 *
160 * Fault-like VM-exits - The instruction causing the VM-exit is not completed before
161 * the VM-exit occurs.
162 *
163 * See Intel spec. 25.5.2 "Monitor Trap Flag".
164 * See Intel spec. 29.1.4 "EOI Virtualization".
165 * See Intel spec. 29.4.3.3 "APIC-Write VM Exits".
166 * See Intel spec. 29.1.2 "TPR Virtualization".
167 */
168 /** @todo NSTVMX: r=ramshankar: What about VM-exits due to debug traps (single-step,
169 * I/O breakpoints, data breakpoints), debug exceptions (data breakpoint)
170 * delayed by MovSS blocking, machine-check exceptions. */
171 switch (uExitReason)
172 {
173 case VMX_EXIT_MTF:
174 case VMX_EXIT_VIRTUALIZED_EOI:
175 case VMX_EXIT_APIC_WRITE:
176 case VMX_EXIT_TPR_BELOW_THRESHOLD:
177 return true;
178 }
179 return false;
180}
181
182
183/**
184 * Returns whether the VM-entry is vectoring or not given the VM-entry interruption
185 * information field.
186 *
187 * @returns @c true if the VM-entry is vectoring, @c false otherwise.
188 * @param uEntryIntInfo The VM-entry interruption information field.
189 * @param pEntryIntInfoType The VM-entry interruption information type field.
190 * Optional, can be NULL. Only updated when this
191 * function returns @c true.
192 */
193DECLINLINE(bool) HMVmxIsVmentryVectoring(uint32_t uEntryIntInfo, uint8_t *pEntryIntInfoType)
194{
195 /*
196 * The definition of what is a vectoring VM-entry is taken
197 * from Intel spec. 26.6 "Special Features of VM Entry".
198 */
199 if (!VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
200 return false;
201
202 /* Scope and keep variable defines on top to satisy archaic c89 nonsense. */
203 {
204 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
205 switch (uType)
206 {
207 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
208 case VMX_ENTRY_INT_INFO_TYPE_NMI:
209 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
210 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
211 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
212 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
213 {
214 if (pEntryIntInfoType)
215 *pEntryIntInfoType = uType;
216 return true;
217 }
218 }
219 }
220 return false;
221}
222/** @} */
223
224
225/** @defgroup grp_hm_vmx_asm VMX Assembly Helpers
226 * @{
227 */
228
229/**
230 * Restores some host-state fields that need not be done on every VM-exit.
231 *
232 * @returns VBox status code.
233 * @param fRestoreHostFlags Flags of which host registers needs to be
234 * restored.
235 * @param pRestoreHost Pointer to the host-restore structure.
236 */
237DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);
238
239
240/**
241 * Dispatches an NMI to the host.
242 */
243DECLASM(int) VMXDispatchHostNmi(void);
244
245
246/**
247 * Executes VMXON.
248 *
249 * @returns VBox status code.
250 * @param HCPhysVmxOn Physical address of VMXON structure.
251 */
252#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
253DECLASM(int) VMXEnable(RTHCPHYS HCPhysVmxOn);
254#else
255DECLINLINE(int) VMXEnable(RTHCPHYS HCPhysVmxOn)
256{
257# if RT_INLINE_ASM_GNU_STYLE
258 int rc = VINF_SUCCESS;
259 __asm__ __volatile__ (
260 "push %3 \n\t"
261 "push %2 \n\t"
262 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
263 "ja 2f \n\t"
264 "je 1f \n\t"
265 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
266 "jmp 2f \n\t"
267 "1: \n\t"
268 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
269 "2: \n\t"
270 "add $8, %%esp \n\t"
271 :"=rm"(rc)
272 :"0"(VINF_SUCCESS),
273 "ir"((uint32_t)HCPhysVmxOn), /* don't allow direct memory reference here, */
274 "ir"((uint32_t)(HCPhysVmxOn >> 32)) /* this would not work with -fomit-frame-pointer */
275 :"memory"
276 );
277 return rc;
278
279# elif VMX_USE_MSC_INTRINSICS
280 unsigned char rcMsc = __vmx_on(&HCPhysVmxOn);
281 if (RT_LIKELY(rcMsc == 0))
282 return VINF_SUCCESS;
283 return rcMsc == 2 ? VERR_VMX_INVALID_VMXON_PTR : VERR_VMX_VMXON_FAILED;
284
285# else
286 int rc = VINF_SUCCESS;
287 __asm
288 {
289 push dword ptr [HCPhysVmxOn + 4]
290 push dword ptr [HCPhysVmxOn]
291 _emit 0xf3
292 _emit 0x0f
293 _emit 0xc7
294 _emit 0x34
295 _emit 0x24 /* VMXON [esp] */
296 jnc vmxon_good
297 mov dword ptr [rc], VERR_VMX_INVALID_VMXON_PTR
298 jmp the_end
299
300vmxon_good:
301 jnz the_end
302 mov dword ptr [rc], VERR_VMX_VMXON_FAILED
303the_end:
304 add esp, 8
305 }
306 return rc;
307# endif
308}
309#endif
310
311
312/**
313 * Executes VMXOFF.
314 */
315#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
316DECLASM(void) VMXDisable(void);
317#else
318DECLINLINE(void) VMXDisable(void)
319{
320# if RT_INLINE_ASM_GNU_STYLE
321 __asm__ __volatile__ (
322 ".byte 0x0f, 0x01, 0xc4 # VMXOFF \n\t"
323 );
324
325# elif VMX_USE_MSC_INTRINSICS
326 __vmx_off();
327
328# else
329 __asm
330 {
331 _emit 0x0f
332 _emit 0x01
333 _emit 0xc4 /* VMXOFF */
334 }
335# endif
336}
337#endif
338
339
340/**
341 * Executes VMCLEAR.
342 *
343 * @returns VBox status code.
344 * @param HCPhysVmcs Physical address of VM control structure.
345 */
346#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
347DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
348#else
349DECLINLINE(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs)
350{
351# if RT_INLINE_ASM_GNU_STYLE
352 int rc = VINF_SUCCESS;
353 __asm__ __volatile__ (
354 "push %3 \n\t"
355 "push %2 \n\t"
356 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
357 "jnc 1f \n\t"
358 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
359 "1: \n\t"
360 "add $8, %%esp \n\t"
361 :"=rm"(rc)
362 :"0"(VINF_SUCCESS),
363 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
364 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this would not work with -fomit-frame-pointer */
365 :"memory"
366 );
367 return rc;
368
369# elif VMX_USE_MSC_INTRINSICS
370 unsigned char rcMsc = __vmx_vmclear(&HCPhysVmcs);
371 if (RT_LIKELY(rcMsc == 0))
372 return VINF_SUCCESS;
373 return VERR_VMX_INVALID_VMCS_PTR;
374
375# else
376 int rc = VINF_SUCCESS;
377 __asm
378 {
379 push dword ptr [HCPhysVmcs + 4]
380 push dword ptr [HCPhysVmcs]
381 _emit 0x66
382 _emit 0x0f
383 _emit 0xc7
384 _emit 0x34
385 _emit 0x24 /* VMCLEAR [esp] */
386 jnc success
387 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
388success:
389 add esp, 8
390 }
391 return rc;
392# endif
393}
394#endif
395
396
397/**
398 * Executes VMPTRLD.
399 *
400 * @returns VBox status code.
401 * @param HCPhysVmcs Physical address of VMCS structure.
402 */
403#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
404DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
405#else
406DECLINLINE(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs)
407{
408# if RT_INLINE_ASM_GNU_STYLE
409 int rc = VINF_SUCCESS;
410 __asm__ __volatile__ (
411 "push %3 \n\t"
412 "push %2 \n\t"
413 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
414 "jnc 1f \n\t"
415 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
416 "1: \n\t"
417 "add $8, %%esp \n\t"
418 :"=rm"(rc)
419 :"0"(VINF_SUCCESS),
420 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
421 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this will not work with -fomit-frame-pointer */
422 );
423 return rc;
424
425# elif VMX_USE_MSC_INTRINSICS
426 unsigned char rcMsc = __vmx_vmptrld(&HCPhysVmcs);
427 if (RT_LIKELY(rcMsc == 0))
428 return VINF_SUCCESS;
429 return VERR_VMX_INVALID_VMCS_PTR;
430
431# else
432 int rc = VINF_SUCCESS;
433 __asm
434 {
435 push dword ptr [HCPhysVmcs + 4]
436 push dword ptr [HCPhysVmcs]
437 _emit 0x0f
438 _emit 0xc7
439 _emit 0x34
440 _emit 0x24 /* VMPTRLD [esp] */
441 jnc success
442 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
443
444success:
445 add esp, 8
446 }
447 return rc;
448# endif
449}
450#endif
451
452
453/**
454 * Executes VMPTRST.
455 *
456 * @returns VBox status code.
457 * @param pHCPhysVmcs Where to store the physical address of the current
458 * VMCS.
459 */
460DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pHCPhysVmcs);
461
462
463/**
464 * Executes VMWRITE.
465 *
466 * @returns VBox status code.
467 * @retval VINF_SUCCESS.
468 * @retval VERR_VMX_INVALID_VMCS_PTR.
469 * @retval VERR_VMX_INVALID_VMCS_FIELD.
470 *
471 * @param uFieldEnc VMCS field encoding.
472 * @param u32Val The 32-bit value to set.
473 *
474 * @remarks The values of the two status codes can be OR'ed together, the result
475 * will be VERR_VMX_INVALID_VMCS_PTR.
476 */
477#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
478DECLASM(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val);
479#else
480DECLINLINE(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val)
481{
482# if RT_INLINE_ASM_GNU_STYLE
483 int rc = VINF_SUCCESS;
484 __asm__ __volatile__ (
485 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
486 "ja 2f \n\t"
487 "je 1f \n\t"
488 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
489 "jmp 2f \n\t"
490 "1: \n\t"
491 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
492 "2: \n\t"
493 :"=rm"(rc)
494 :"0"(VINF_SUCCESS),
495 "a"(uFieldEnc),
496 "d"(u32Val)
497 );
498 return rc;
499
500# elif VMX_USE_MSC_INTRINSICS
501 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u32Val);
502 if (RT_LIKELY(rcMsc == 0))
503 return VINF_SUCCESS;
504 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
505
506#else
507 int rc = VINF_SUCCESS;
508 __asm
509 {
510 push dword ptr [u32Val]
511 mov eax, [uFieldEnc]
512 _emit 0x0f
513 _emit 0x79
514 _emit 0x04
515 _emit 0x24 /* VMWRITE eax, [esp] */
516 jnc valid_vmcs
517 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
518 jmp the_end
519
520valid_vmcs:
521 jnz the_end
522 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
523the_end:
524 add esp, 4
525 }
526 return rc;
527# endif
528}
529#endif
530
531/**
532 * Executes VMWRITE.
533 *
534 * @returns VBox status code.
535 * @retval VINF_SUCCESS.
536 * @retval VERR_VMX_INVALID_VMCS_PTR.
537 * @retval VERR_VMX_INVALID_VMCS_FIELD.
538 *
539 * @param uFieldEnc The VMCS field encoding.
540 * @param u64Val The 16, 32 or 64-bit value to set.
541 *
542 * @remarks The values of the two status codes can be OR'ed together, the result
543 * will be VERR_VMX_INVALID_VMCS_PTR.
544 */
545#if !defined(RT_ARCH_X86)
546# if !VMX_USE_MSC_INTRINSICS || ARCH_BITS != 64
547DECLASM(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val);
548# else /* VMX_USE_MSC_INTRINSICS */
549DECLINLINE(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val)
550{
551 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u64Val);
552 if (RT_LIKELY(rcMsc == 0))
553 return VINF_SUCCESS;
554 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
555}
556# endif /* VMX_USE_MSC_INTRINSICS */
557#else
558# define VMXWriteVmcs64(uFieldEnc, u64Val) VMXWriteVmcs64Ex(pVCpu, uFieldEnc, u64Val) /** @todo dead ugly, picking up pVCpu like this */
559VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t uFieldEnc, uint64_t u64Val);
560#endif
561
562#if ARCH_BITS == 32
563# define VMXWriteVmcsHstN VMXWriteVmcs32
564# define VMXWriteVmcsGstN(uFieldEnc, u64Val) VMXWriteVmcs64Ex(pVCpu, uFieldEnc, u64Val)
565#else /* ARCH_BITS == 64 */
566# define VMXWriteVmcsHstN VMXWriteVmcs64
567# define VMXWriteVmcsGstN VMXWriteVmcs64
568#endif
569
570
571/**
572 * Invalidate a page using INVEPT.
573 *
574 * @returns VBox status code.
575 * @param enmFlush Type of flush.
576 * @param pDescriptor Pointer to the descriptor.
577 */
578DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmFlush, uint64_t *pDescriptor);
579
580
581/**
582 * Invalidate a page using INVVPID.
583 *
584 * @returns VBox status code.
585 * @param enmFlush Type of flush.
586 * @param pDescriptor Pointer to the descriptor.
587 */
588DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmFlush, uint64_t *pDescriptor);
589
590
591/**
592 * Executes VMREAD for a 32-bit field.
593 *
594 * @returns VBox status code.
595 * @retval VINF_SUCCESS.
596 * @retval VERR_VMX_INVALID_VMCS_PTR.
597 * @retval VERR_VMX_INVALID_VMCS_FIELD.
598 *
599 * @param uFieldEnc The VMCS field encoding.
600 * @param pData Where to store VMCS field value.
601 *
602 * @remarks The values of the two status codes can be OR'ed together, the result
603 * will be VERR_VMX_INVALID_VMCS_PTR.
604 */
605#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
606DECLASM(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData);
607#else
608DECLINLINE(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData)
609{
610# if RT_INLINE_ASM_GNU_STYLE
611 int rc = VINF_SUCCESS;
612 __asm__ __volatile__ (
613 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
614 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
615 "ja 2f \n\t"
616 "je 1f \n\t"
617 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
618 "jmp 2f \n\t"
619 "1: \n\t"
620 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
621 "2: \n\t"
622 :"=&r"(rc),
623 "=d"(*pData)
624 :"a"(uFieldEnc),
625 "d"(0)
626 );
627 return rc;
628
629# elif VMX_USE_MSC_INTRINSICS
630 unsigned char rcMsc;
631# if ARCH_BITS == 32
632 rcMsc = __vmx_vmread(uFieldEnc, pData);
633# else
634 uint64_t u64Tmp;
635 rcMsc = __vmx_vmread(uFieldEnc, &u64Tmp);
636 *pData = (uint32_t)u64Tmp;
637# endif
638 if (RT_LIKELY(rcMsc == 0))
639 return VINF_SUCCESS;
640 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
641
642#else
643 int rc = VINF_SUCCESS;
644 __asm
645 {
646 sub esp, 4
647 mov dword ptr [esp], 0
648 mov eax, [uFieldEnc]
649 _emit 0x0f
650 _emit 0x78
651 _emit 0x04
652 _emit 0x24 /* VMREAD eax, [esp] */
653 mov edx, pData
654 pop dword ptr [edx]
655 jnc valid_vmcs
656 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
657 jmp the_end
658
659valid_vmcs:
660 jnz the_end
661 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
662the_end:
663 }
664 return rc;
665# endif
666}
667#endif
668
669/**
670 * Executes VMREAD for a 64-bit field.
671 *
672 * @returns VBox status code.
673 * @retval VINF_SUCCESS.
674 * @retval VERR_VMX_INVALID_VMCS_PTR.
675 * @retval VERR_VMX_INVALID_VMCS_FIELD.
676 *
677 * @param uFieldEnc The VMCS field encoding.
678 * @param pData Where to store VMCS field value.
679 *
680 * @remarks The values of the two status codes can be OR'ed together, the result
681 * will be VERR_VMX_INVALID_VMCS_PTR.
682 */
683#if (!defined(RT_ARCH_X86) && !VMX_USE_MSC_INTRINSICS)
684DECLASM(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData);
685#else
686DECLINLINE(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData)
687{
688# if VMX_USE_MSC_INTRINSICS
689 unsigned char rcMsc;
690# if ARCH_BITS == 32
691 size_t uLow;
692 size_t uHigh;
693 rcMsc = __vmx_vmread(uFieldEnc, &uLow);
694 rcMsc |= __vmx_vmread(uFieldEnc + 1, &uHigh);
695 *pData = RT_MAKE_U64(uLow, uHigh);
696# else
697 rcMsc = __vmx_vmread(uFieldEnc, pData);
698# endif
699 if (RT_LIKELY(rcMsc == 0))
700 return VINF_SUCCESS;
701 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
702
703# elif ARCH_BITS == 32
704 int rc;
705 uint32_t val_hi, val;
706 rc = VMXReadVmcs32(uFieldEnc, &val);
707 rc |= VMXReadVmcs32(uFieldEnc + 1, &val_hi);
708 AssertRC(rc);
709 *pData = RT_MAKE_U64(val, val_hi);
710 return rc;
711
712# else
713# error "Shouldn't be here..."
714# endif
715}
716#endif
717
718
719/**
720 * Gets the last instruction error value from the current VMCS.
721 *
722 * @returns VBox status code.
723 */
724DECLINLINE(uint32_t) VMXGetLastError(void)
725{
726#if ARCH_BITS == 64
727 uint64_t uLastError = 0;
728 int rc = VMXReadVmcs64(VMX_VMCS32_RO_VM_INSTR_ERROR, &uLastError);
729 AssertRC(rc);
730 return (uint32_t)uLastError;
731
732#else /* 32-bit host: */
733 uint32_t uLastError = 0;
734 int rc = VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &uLastError);
735 AssertRC(rc);
736 return uLastError;
737#endif
738}
739
740/** @} */
741
742#endif /* !VBOX_INCLUDED_vmm_hmvmxinline_h */
743
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette