VirtualBox

source: vbox/trunk/include/VBox/vmm/hmvmxinline.h@ 80147

Last change on this file since 80147 was 80147, checked in by vboxsync, 6 years ago

hmvmxinline.h: Nested VMX: bugref:9180 newline nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 39.6 KB
Line 
1/** @file
2 * HM - VMX Structures and Definitions. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2019 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_hmvmxinline_h
27#define VBOX_INCLUDED_vmm_hmvmxinline_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <VBox/vmm/hm_vmx.h>
33#include <VBox/err.h>
34
35/* In Visual C++ versions prior to 2012, the vmx intrinsics are only available
36 when targeting AMD64. */
37#if RT_INLINE_ASM_USES_INTRIN >= 16 && defined(RT_ARCH_AMD64)
38# pragma warning(push)
39# pragma warning(disable:4668) /* Several incorrect __cplusplus uses. */
40# pragma warning(disable:4255) /* Incorrect __slwpcb prototype. */
41# include <intrin.h>
42# pragma warning(pop)
43/* We always want them as intrinsics, no functions. */
44# pragma intrinsic(__vmx_on)
45# pragma intrinsic(__vmx_off)
46# pragma intrinsic(__vmx_vmclear)
47# pragma intrinsic(__vmx_vmptrld)
48# pragma intrinsic(__vmx_vmread)
49# pragma intrinsic(__vmx_vmwrite)
50# define VMX_USE_MSC_INTRINSICS 1
51#else
52# define VMX_USE_MSC_INTRINSICS 0
53#endif
54
55/* Skip checking VMREAD/VMWRITE failures on non-strict builds. */
56#ifndef VBOX_STRICT
57# define VBOX_WITH_VMREAD_VMWRITE_NOCHECK
58#endif
59
60
61/** @defgroup grp_hm_vmx_inline VMX Inline Helpers
62 * @ingroup grp_hm_vmx
63 * @{
64 */
65/**
66 * Gets the effective width of a VMCS field given it's encoding adjusted for
67 * HIGH/FULL access for 64-bit fields.
68 *
69 * @returns The effective VMCS field width.
70 * @param uFieldEnc The VMCS field encoding.
71 *
72 * @remarks Warning! This function does not verify the encoding is for a valid and
73 * supported VMCS field.
74 */
75DECLINLINE(uint8_t) VMXGetVmcsFieldWidthEff(uint32_t uFieldEnc)
76{
77 /* Only the "HIGH" parts of all 64-bit fields have bit 0 set. */
78 if (uFieldEnc & RT_BIT(0))
79 return VMXVMCSFIELDWIDTH_32BIT;
80
81 /* Bits 13:14 contains the width of the VMCS field, see VMXVMCSFIELDWIDTH_XXX. */
82 return (uFieldEnc >> 13) & 0x3;
83}
84
85
86/**
87 * Returns whether the given VMCS field is a read-only VMCS field or not.
88 *
89 * @returns @c true if it's a read-only field, @c false otherwise.
90 * @param uFieldEnc The VMCS field encoding.
91 *
92 * @remarks Warning! This function does not verify that the encoding is for a valid
93 * and/or supported VMCS field.
94 */
95DECLINLINE(bool) VMXIsVmcsFieldReadOnly(uint32_t uFieldEnc)
96{
97 /* See Intel spec. B.4.2 "Natural-Width Read-Only Data Fields". */
98 return (RT_BF_GET(uFieldEnc, VMX_BF_VMCSFIELD_TYPE) == VMXVMCSFIELDTYPE_VMEXIT_INFO);
99}
100
101
102/**
103 * Returns whether the given VM-entry interruption-information type is valid or not.
104 *
105 * @returns @c true if it's a valid type, @c false otherwise.
106 * @param fSupportsMTF Whether the Monitor-Trap Flag CPU feature is supported.
107 * @param uType The VM-entry interruption-information type.
108 */
109DECLINLINE(bool) VMXIsEntryIntInfoTypeValid(bool fSupportsMTF, uint8_t uType)
110{
111 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
112 switch (uType)
113 {
114 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
115 case VMX_ENTRY_INT_INFO_TYPE_NMI:
116 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
117 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
118 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
119 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: return true;
120 case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT: return fSupportsMTF;
121 default:
122 return false;
123 }
124}
125
126
127/**
128 * Returns whether the given VM-entry interruption-information vector and type
129 * combination is valid or not.
130 *
131 * @returns @c true if it's a valid vector/type combination, @c false otherwise.
132 * @param uVector The VM-entry interruption-information vector.
133 * @param uType The VM-entry interruption-information type.
134 *
135 * @remarks Warning! This function does not validate the type field individually.
136 * Use it after verifying type is valid using HMVmxIsEntryIntInfoTypeValid.
137 */
138DECLINLINE(bool) VMXIsEntryIntInfoVectorValid(uint8_t uVector, uint8_t uType)
139{
140 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
141 if ( uType == VMX_ENTRY_INT_INFO_TYPE_NMI
142 && uVector != X86_XCPT_NMI)
143 return false;
144 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
145 && uVector > X86_XCPT_LAST)
146 return false;
147 if ( uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
148 && uVector != VMX_ENTRY_INT_INFO_VECTOR_MTF)
149 return false;
150 return true;
151}
152
153
154/**
155 * Returns whether or not the VM-exit is trap-like or fault-like.
156 *
157 * @returns @c true if it's a trap-like VM-exit, @c false otherwise.
158 * @param uExitReason The VM-exit reason.
159 *
160 * @remarks Warning! This does not validate the VM-exit reason.
161 */
162DECLINLINE(bool) VMXIsVmexitTrapLike(uint32_t uExitReason)
163{
164 /*
165 * Trap-like VM-exits - The instruction causing the VM-exit completes before the
166 * VM-exit occurs.
167 *
168 * Fault-like VM-exits - The instruction causing the VM-exit is not completed before
169 * the VM-exit occurs.
170 *
171 * See Intel spec. 25.5.2 "Monitor Trap Flag".
172 * See Intel spec. 29.1.4 "EOI Virtualization".
173 * See Intel spec. 29.4.3.3 "APIC-Write VM Exits".
174 * See Intel spec. 29.1.2 "TPR Virtualization".
175 */
176 /** @todo NSTVMX: r=ramshankar: What about VM-exits due to debug traps (single-step,
177 * I/O breakpoints, data breakpoints), debug exceptions (data breakpoint)
178 * delayed by MovSS blocking, machine-check exceptions. */
179 switch (uExitReason)
180 {
181 case VMX_EXIT_MTF:
182 case VMX_EXIT_VIRTUALIZED_EOI:
183 case VMX_EXIT_APIC_WRITE:
184 case VMX_EXIT_TPR_BELOW_THRESHOLD:
185 return true;
186 }
187 return false;
188}
189
190
191/**
192 * Returns whether the VM-entry is vectoring or not given the VM-entry interruption
193 * information field.
194 *
195 * @returns @c true if the VM-entry is vectoring, @c false otherwise.
196 * @param uEntryIntInfo The VM-entry interruption information field.
197 * @param pEntryIntInfoType The VM-entry interruption information type field.
198 * Optional, can be NULL. Only updated when this
199 * function returns @c true.
200 */
201DECLINLINE(bool) VMXIsVmentryVectoring(uint32_t uEntryIntInfo, uint8_t *pEntryIntInfoType)
202{
203 /*
204 * The definition of what is a vectoring VM-entry is taken
205 * from Intel spec. 26.6 "Special Features of VM Entry".
206 */
207 if (!VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
208 return false;
209
210 /* Scope and keep variable defines on top to satisy archaic c89 nonsense. */
211 {
212 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
213 switch (uType)
214 {
215 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
216 case VMX_ENTRY_INT_INFO_TYPE_NMI:
217 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
218 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
219 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
220 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
221 {
222 if (pEntryIntInfoType)
223 *pEntryIntInfoType = uType;
224 return true;
225 }
226 }
227 }
228 return false;
229}
230/** @} */
231
232
233/** @defgroup grp_hm_vmx_asm VMX Assembly Helpers
234 * @{
235 */
236#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
237
238/**
239 * Restores some host-state fields that need not be done on every VM-exit.
240 *
241 * @returns VBox status code.
242 * @param fRestoreHostFlags Flags of which host registers needs to be
243 * restored.
244 * @param pRestoreHost Pointer to the host-restore structure.
245 */
246DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);
247
248
249/**
250 * Dispatches an NMI to the host.
251 */
252DECLASM(int) VMXDispatchHostNmi(void);
253
254
255/**
256 * Executes VMXON.
257 *
258 * @returns VBox status code.
259 * @param HCPhysVmxOn Physical address of VMXON structure.
260 */
261#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
262DECLASM(int) VMXEnable(RTHCPHYS HCPhysVmxOn);
263#else
264DECLINLINE(int) VMXEnable(RTHCPHYS HCPhysVmxOn)
265{
266# if VMX_USE_MSC_INTRINSICS
267 unsigned char rcMsc = __vmx_on(&HCPhysVmxOn);
268 if (RT_LIKELY(rcMsc == 0))
269 return VINF_SUCCESS;
270 return rcMsc == 2 ? VERR_VMX_INVALID_VMXON_PTR : VERR_VMX_VMXON_FAILED;
271
272# elif RT_INLINE_ASM_GNU_STYLE
273# ifdef RT_ARCH_AMD64
274 int rc;
275 __asm__ __volatile__ (
276 "pushq %2 \n\t"
277 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
278 "ja 2f \n\t"
279 "je 1f \n\t"
280 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
281 "jmp 2f \n\t"
282 "1: \n\t"
283 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
284 "2: \n\t"
285 "add $8, %%rsp \n\t"
286 :"=rm"(rc)
287 :"0"(VINF_SUCCESS),
288 "ir"(HCPhysVmxOn) /* don't allow direct memory reference here, */
289 /* this would not work with -fomit-frame-pointer */
290 :"memory"
291 );
292 return rc;
293# else
294 int rc;
295 __asm__ __volatile__ (
296 "push %3 \n\t"
297 "push %2 \n\t"
298 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
299 "ja 2f \n\t"
300 "je 1f \n\t"
301 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
302 "jmp 2f \n\t"
303 "1: \n\t"
304 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
305 "2: \n\t"
306 "add $8, %%esp \n\t"
307 :"=rm"(rc)
308 :"0"(VINF_SUCCESS),
309 "ir"((uint32_t)HCPhysVmxOn), /* don't allow direct memory reference here, */
310 "ir"((uint32_t)(HCPhysVmxOn >> 32)) /* this would not work with -fomit-frame-pointer */
311 :"memory"
312 );
313 return rc;
314# endif
315
316# elif defined(RT_ARCH_X86)
317 int rc = VINF_SUCCESS;
318 __asm
319 {
320 push dword ptr [HCPhysVmxOn + 4]
321 push dword ptr [HCPhysVmxOn]
322 _emit 0xf3
323 _emit 0x0f
324 _emit 0xc7
325 _emit 0x34
326 _emit 0x24 /* VMXON [esp] */
327 jnc vmxon_good
328 mov dword ptr [rc], VERR_VMX_INVALID_VMXON_PTR
329 jmp the_end
330
331vmxon_good:
332 jnz the_end
333 mov dword ptr [rc], VERR_VMX_VMXON_FAILED
334the_end:
335 add esp, 8
336 }
337 return rc;
338
339# else
340# error "Shouldn't be here..."
341# endif
342}
343#endif
344
345
346#if 0
347#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
348DECLASM(int) VMXEnable(RTHCPHYS HCPhysVmxOn);
349#else
350DECLINLINE(int) VMXEnable(RTHCPHYS HCPhysVmxOn)
351{
352# if RT_INLINE_ASM_GNU_STYLE
353 int rc = VINF_SUCCESS;
354 __asm__ __volatile__ (
355 "push %3 \n\t"
356 "push %2 \n\t"
357 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
358 "ja 2f \n\t"
359 "je 1f \n\t"
360 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
361 "jmp 2f \n\t"
362 "1: \n\t"
363 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
364 "2: \n\t"
365 "add $8, %%esp \n\t"
366 :"=rm"(rc)
367 :"0"(VINF_SUCCESS),
368 "ir"((uint32_t)HCPhysVmxOn), /* don't allow direct memory reference here, */
369 "ir"((uint32_t)(HCPhysVmxOn >> 32)) /* this would not work with -fomit-frame-pointer */
370 :"memory"
371 );
372 return rc;
373
374# elif VMX_USE_MSC_INTRINSICS
375 unsigned char rcMsc = __vmx_on(&HCPhysVmxOn);
376 if (RT_LIKELY(rcMsc == 0))
377 return VINF_SUCCESS;
378 return rcMsc == 2 ? VERR_VMX_INVALID_VMXON_PTR : VERR_VMX_VMXON_FAILED;
379
380# else
381 int rc = VINF_SUCCESS;
382 __asm
383 {
384 push dword ptr [HCPhysVmxOn + 4]
385 push dword ptr [HCPhysVmxOn]
386 _emit 0xf3
387 _emit 0x0f
388 _emit 0xc7
389 _emit 0x34
390 _emit 0x24 /* VMXON [esp] */
391 jnc vmxon_good
392 mov dword ptr [rc], VERR_VMX_INVALID_VMXON_PTR
393 jmp the_end
394
395vmxon_good:
396 jnz the_end
397 mov dword ptr [rc], VERR_VMX_VMXON_FAILED
398the_end:
399 add esp, 8
400 }
401 return rc;
402# endif
403}
404#endif
405#endif
406
407
408/**
409 * Executes VMXOFF.
410 */
411#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
412DECLASM(void) VMXDisable(void);
413#else
414DECLINLINE(void) VMXDisable(void)
415{
416# if VMX_USE_MSC_INTRINSICS
417 __vmx_off();
418
419# elif RT_INLINE_ASM_GNU_STYLE
420 __asm__ __volatile__ (
421 ".byte 0x0f, 0x01, 0xc4 # VMXOFF \n\t"
422 );
423
424# elif defined(RT_ARCH_X86)
425 __asm
426 {
427 _emit 0x0f
428 _emit 0x01
429 _emit 0xc4 /* VMXOFF */
430 }
431
432# else
433# error "Shouldn't be here..."
434# endif
435}
436#endif
437
438
439#if 0
440#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
441DECLASM(void) VMXDisable(void);
442#else
443DECLINLINE(void) VMXDisable(void)
444{
445# if RT_INLINE_ASM_GNU_STYLE
446 __asm__ __volatile__ (
447 ".byte 0x0f, 0x01, 0xc4 # VMXOFF \n\t"
448 );
449
450# elif VMX_USE_MSC_INTRINSICS
451 __vmx_off();
452
453# else
454 __asm
455 {
456 _emit 0x0f
457 _emit 0x01
458 _emit 0xc4 /* VMXOFF */
459 }
460# endif
461}
462#endif
463#endif
464
465
466/**
467 * Executes VMCLEAR.
468 *
469 * @returns VBox status code.
470 * @param HCPhysVmcs Physical address of VM control structure.
471 */
472#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
473DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
474#else
475DECLINLINE(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs)
476{
477# if VMX_USE_MSC_INTRINSICS
478 unsigned char rcMsc = __vmx_vmclear(&HCPhysVmcs);
479 if (RT_LIKELY(rcMsc == 0))
480 return VINF_SUCCESS;
481 return VERR_VMX_INVALID_VMCS_PTR;
482
483# elif RT_INLINE_ASM_GNU_STYLE
484# ifdef RT_ARCH_AMD64
485 int rc;
486 __asm__ __volatile__ (
487 "pushq %2 \n\t"
488 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
489 "jnc 1f \n\t"
490 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
491 "1: \n\t"
492 "add $8, %%rsp \n\t"
493 :"=rm"(rc)
494 :"0"(VINF_SUCCESS),
495 "ir"(HCPhysVmcs) /* don't allow direct memory reference here, */
496 /* this would not work with -fomit-frame-pointer */
497 :"memory"
498 );
499 return rc;
500# else
501 int rc;
502 __asm__ __volatile__ (
503 "push %3 \n\t"
504 "push %2 \n\t"
505 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
506 "jnc 1f \n\t"
507 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
508 "1: \n\t"
509 "add $8, %%esp \n\t"
510 :"=rm"(rc)
511 :"0"(VINF_SUCCESS),
512 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
513 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this would not work with -fomit-frame-pointer */
514 :"memory"
515 );
516 return rc;
517# endif
518
519# elif defined(RT_ARCH_X86)
520 int rc = VINF_SUCCESS;
521 __asm
522 {
523 push dword ptr [HCPhysVmcs + 4]
524 push dword ptr [HCPhysVmcs]
525 _emit 0x66
526 _emit 0x0f
527 _emit 0xc7
528 _emit 0x34
529 _emit 0x24 /* VMCLEAR [esp] */
530 jnc success
531 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
532success:
533 add esp, 8
534 }
535 return rc;
536
537# else
538# error "Shouldn't be here..."
539# endif
540}
541#endif
542
543
544#if 0
545#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
546DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
547#else
548DECLINLINE(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs)
549{
550# if RT_INLINE_ASM_GNU_STYLE
551 int rc = VINF_SUCCESS;
552 __asm__ __volatile__ (
553 "push %3 \n\t"
554 "push %2 \n\t"
555 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
556 "jnc 1f \n\t"
557 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
558 "1: \n\t"
559 "add $8, %%esp \n\t"
560 :"=rm"(rc)
561 :"0"(VINF_SUCCESS),
562 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
563 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this would not work with -fomit-frame-pointer */
564 :"memory"
565 );
566 return rc;
567
568# elif VMX_USE_MSC_INTRINSICS
569 unsigned char rcMsc = __vmx_vmclear(&HCPhysVmcs);
570 if (RT_LIKELY(rcMsc == 0))
571 return VINF_SUCCESS;
572 return VERR_VMX_INVALID_VMCS_PTR;
573
574# else
575 int rc = VINF_SUCCESS;
576 __asm
577 {
578 push dword ptr [HCPhysVmcs + 4]
579 push dword ptr [HCPhysVmcs]
580 _emit 0x66
581 _emit 0x0f
582 _emit 0xc7
583 _emit 0x34
584 _emit 0x24 /* VMCLEAR [esp] */
585 jnc success
586 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
587success:
588 add esp, 8
589 }
590 return rc;
591# endif
592}
593#endif
594#endif
595
596
597/**
598 * Executes VMPTRLD.
599 *
600 * @returns VBox status code.
601 * @param HCPhysVmcs Physical address of VMCS structure.
602 */
603#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
604DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
605#else
606DECLINLINE(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs)
607{
608# if VMX_USE_MSC_INTRINSICS
609 unsigned char rcMsc = __vmx_vmptrld(&HCPhysVmcs);
610 if (RT_LIKELY(rcMsc == 0))
611 return VINF_SUCCESS;
612 return VERR_VMX_INVALID_VMCS_PTR;
613
614# elif RT_INLINE_ASM_GNU_STYLE
615# ifdef RT_ARCH_AMD64
616 int rc;
617 __asm__ __volatile__ (
618 "pushq %2 \n\t"
619 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
620 "jnc 1f \n\t"
621 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
622 "1: \n\t"
623 "add $8, %%rsp \n\t"
624 :"=rm"(rc)
625 :"0"(VINF_SUCCESS),
626 "ir"(HCPhysVmcs) /* don't allow direct memory reference here, */
627 /* this will not work with -fomit-frame-pointer */
628 :"memory"
629 );
630 return rc;
631# else
632 int rc;
633 __asm__ __volatile__ (
634 "push %3 \n\t"
635 "push %2 \n\t"
636 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
637 "jnc 1f \n\t"
638 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
639 "1: \n\t"
640 "add $8, %%esp \n\t"
641 :"=rm"(rc)
642 :"0"(VINF_SUCCESS),
643 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
644 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this will not work with -fomit-frame-pointer */
645 :"memory"
646 );
647 return rc;
648# endif
649
650# elif defined(RT_ARCH_X86)
651 int rc = VINF_SUCCESS;
652 __asm
653 {
654 push dword ptr [HCPhysVmcs + 4]
655 push dword ptr [HCPhysVmcs]
656 _emit 0x0f
657 _emit 0xc7
658 _emit 0x34
659 _emit 0x24 /* VMPTRLD [esp] */
660 jnc success
661 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
662success:
663 add esp, 8
664 }
665 return rc;
666
667# else
668# error "Shouldn't be here..."
669# endif
670}
671#endif
672
673#if 0
674#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
675DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
676#else
677DECLINLINE(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs)
678{
679# if RT_INLINE_ASM_GNU_STYLE
680 int rc = VINF_SUCCESS;
681 __asm__ __volatile__ (
682 "push %3 \n\t"
683 "push %2 \n\t"
684 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
685 "jnc 1f \n\t"
686 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
687 "1: \n\t"
688 "add $8, %%esp \n\t"
689 :"=rm"(rc)
690 :"0"(VINF_SUCCESS),
691 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
692 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this will not work with -fomit-frame-pointer */
693 );
694 return rc;
695
696# elif VMX_USE_MSC_INTRINSICS
697 unsigned char rcMsc = __vmx_vmptrld(&HCPhysVmcs);
698 if (RT_LIKELY(rcMsc == 0))
699 return VINF_SUCCESS;
700 return VERR_VMX_INVALID_VMCS_PTR;
701
702# else
703 int rc = VINF_SUCCESS;
704 __asm
705 {
706 push dword ptr [HCPhysVmcs + 4]
707 push dword ptr [HCPhysVmcs]
708 _emit 0x0f
709 _emit 0xc7
710 _emit 0x34
711 _emit 0x24 /* VMPTRLD [esp] */
712 jnc success
713 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
714
715success:
716 add esp, 8
717 }
718 return rc;
719# endif
720}
721#endif
722#endif
723
724
725/**
726 * Executes VMPTRST.
727 *
728 * @returns VBox status code.
729 * @param pHCPhysVmcs Where to store the physical address of the current
730 * VMCS.
731 */
732DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pHCPhysVmcs);
733
734
735/**
736 * Executes VMWRITE for a 32-bit field.
737 *
738 * @returns VBox status code.
739 * @retval VINF_SUCCESS.
740 * @retval VERR_VMX_INVALID_VMCS_PTR.
741 * @retval VERR_VMX_INVALID_VMCS_FIELD.
742 *
743 * @param uFieldEnc VMCS field encoding.
744 * @param u32Val The 32-bit value to set.
745 *
746 * @remarks The values of the two status codes can be OR'ed together, the result
747 * will be VERR_VMX_INVALID_VMCS_PTR.
748 */
749#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
750DECLASM(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val);
751#else
752DECLINLINE(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val)
753{
754# if VMX_USE_MSC_INTRINSICS
755# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
756 __vmx_vmwrite(uFieldEnc, u32Val);
757 return VINF_SUCCESS;
758# else
759 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u32Val);
760 if (RT_LIKELY(rcMsc == 0))
761 return VINF_SUCCESS;
762 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
763# endif
764
765# elif RT_INLINE_ASM_GNU_STYLE
766# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
767 __asm__ __volatile__ (
768 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
769 :
770 :"a"(uFieldEnc),
771 "d"(u32Val)
772 );
773 return VINF_SUCCESS;
774# else
775 int rc;
776 __asm__ __volatile__ (
777 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
778 "ja 2f \n\t"
779 "je 1f \n\t"
780 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
781 "jmp 2f \n\t"
782 "1: \n\t"
783 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
784 "2: \n\t"
785 :"=rm"(rc)
786 :"0"(VINF_SUCCESS),
787 "a"(uFieldEnc),
788 "d"(u32Val)
789 );
790 return rc;
791# endif
792
793# elif defined(RT_ARCH_X86)
794 int rc = VINF_SUCCESS;
795 __asm
796 {
797 push dword ptr [u32Val]
798 mov eax, [uFieldEnc]
799 _emit 0x0f
800 _emit 0x79
801 _emit 0x04
802 _emit 0x24 /* VMWRITE eax, [esp] */
803 jnc valid_vmcs
804 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
805 jmp the_end
806valid_vmcs:
807 jnz the_end
808 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
809the_end:
810 add esp, 4
811 }
812 return rc;
813
814# else
815# error "Shouldn't be here..."
816# endif
817}
818#endif
819
820
821#if 0
822#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
823DECLASM(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val);
824#else
825DECLINLINE(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val)
826{
827# if RT_INLINE_ASM_GNU_STYLE
828 int rc = VINF_SUCCESS;
829 __asm__ __volatile__ (
830 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
831 "ja 2f \n\t"
832 "je 1f \n\t"
833 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
834 "jmp 2f \n\t"
835 "1: \n\t"
836 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
837 "2: \n\t"
838 :"=rm"(rc)
839 :"0"(VINF_SUCCESS),
840 "a"(uFieldEnc),
841 "d"(u32Val)
842 );
843 return rc;
844
845# elif VMX_USE_MSC_INTRINSICS
846 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u32Val);
847 if (RT_LIKELY(rcMsc == 0))
848 return VINF_SUCCESS;
849 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
850
851#else
852 int rc = VINF_SUCCESS;
853 __asm
854 {
855 push dword ptr [u32Val]
856 mov eax, [uFieldEnc]
857 _emit 0x0f
858 _emit 0x79
859 _emit 0x04
860 _emit 0x24 /* VMWRITE eax, [esp] */
861 jnc valid_vmcs
862 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
863 jmp the_end
864
865valid_vmcs:
866 jnz the_end
867 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
868the_end:
869 add esp, 4
870 }
871 return rc;
872# endif
873}
874#endif
875#endif
876
877
878/**
879 * Executes VMWRITE for a 64-bit field.
880 *
881 * @returns VBox status code.
882 * @retval VINF_SUCCESS.
883 * @retval VERR_VMX_INVALID_VMCS_PTR.
884 * @retval VERR_VMX_INVALID_VMCS_FIELD.
885 *
886 * @param uFieldEnc The VMCS field encoding.
887 * @param u64Val The 16, 32 or 64-bit value to set.
888 *
889 * @remarks The values of the two status codes can be OR'ed together, the result
890 * will be VERR_VMX_INVALID_VMCS_PTR.
891 */
892#if defined(RT_ARCH_X86) || (RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS)
893DECLASM(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val);
894#else
895DECLINLINE(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val)
896{
897# if VMX_USE_MSC_INTRINSICS
898# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
899 __vmx_vmwrite(uFieldEnc, u64Val);
900 return VINF_SUCCESS;
901# else
902 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u64Val);
903 if (RT_LIKELY(rcMsc == 0))
904 return VINF_SUCCESS;
905 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
906# endif
907
908# elif RT_INLINE_ASM_GNU_STYLE
909# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
910 __asm__ __volatile__ (
911 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
912 :
913 :"a"(uFieldEnc),
914 "d"(u64Val)
915 );
916 return VINF_SUCCESS;
917# else
918 int rc;
919 __asm__ __volatile__ (
920 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
921 "ja 2f \n\t"
922 "je 1f \n\t"
923 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
924 "jmp 2f \n\t"
925 "1: \n\t"
926 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
927 "2: \n\t"
928 :"=rm"(rc)
929 :"0"(VINF_SUCCESS),
930 "a"(uFieldEnc),
931 "d"(u64Val)
932 );
933 return rc;
934# endif
935
936# else
937# error "Shouldn't be here..."
938# endif
939}
940#endif
941
942
943#if 0
944#if (defined(RT_ARCH_AMD64) && VMX_USE_MSC_INTRINSICS)
945DECLINLINE(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val)
946{
947 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u64Val);
948 if (RT_LIKELY(rcMsc == 0))
949 return VINF_SUCCESS;
950 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
951}
952#else
953DECLASM(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val);
954#endif
955#endif
956
957
958/**
959 * Executes VMWRITE for a 16-bit VMCS field.
960 *
961 * @returns VBox status code.
962 * @retval VINF_SUCCESS.
963 * @retval VERR_VMX_INVALID_VMCS_PTR.
964 * @retval VERR_VMX_INVALID_VMCS_FIELD.
965 *
966 * @param uVmcsField The VMCS field.
967 * @param u16Val The 16-bit value to set.
968 *
969 * @remarks The values of the two status codes can be OR'ed together, the result
970 * will be VERR_VMX_INVALID_VMCS_PTR.
971 */
972DECLINLINE(int) VMXWriteVmcs16(uint32_t uVmcsField, uint16_t u16Val)
973{
974 AssertMsg(RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH) == VMX_VMCSFIELD_WIDTH_16BIT, ("%#RX32\n", uVmcsField));
975 return VMXWriteVmcs32(uVmcsField, u16Val);
976}
977
978
979/**
980 * Executes VMWRITE for a natural-width VMCS field.
981 */
982#ifdef RT_ARCH_AMD64
983# define VMXWriteVmcsNw VMXWriteVmcs64
984#else
985# define VMXWriteVmcsNw VMXWriteVmcs32
986#endif
987
988
989/**
990 * Invalidate a page using INVEPT.
991 *
992 * @returns VBox status code.
993 * @param enmFlush Type of flush.
994 * @param pDescriptor Pointer to the descriptor.
995 */
996DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmFlush, uint64_t *pDescriptor);
997
998
999/**
1000 * Invalidate a page using INVVPID.
1001 *
1002 * @returns VBox status code.
1003 * @param enmFlush Type of flush.
1004 * @param pDescriptor Pointer to the descriptor.
1005 */
1006DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmFlush, uint64_t *pDescriptor);
1007
1008
1009/**
1010 * Executes VMREAD for a 32-bit field.
1011 *
1012 * @returns VBox status code.
1013 * @retval VINF_SUCCESS.
1014 * @retval VERR_VMX_INVALID_VMCS_PTR.
1015 * @retval VERR_VMX_INVALID_VMCS_FIELD.
1016 *
1017 * @param uFieldEnc The VMCS field encoding.
1018 * @param pData Where to store VMCS field value.
1019 *
1020 * @remarks The values of the two status codes can be OR'ed together, the result
1021 * will be VERR_VMX_INVALID_VMCS_PTR.
1022 */
1023#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
1024DECLASM(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData);
1025#else
1026DECLINLINE(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData)
1027{
1028# if VMX_USE_MSC_INTRINSICS
1029# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1030 uint64_t u64Tmp = 0;
1031 __vmx_vmread(uFieldEnc, &u64Tmp);
1032 *pData = (uint32_t)u64Tmp;
1033 return VINF_SUCCESS;
1034# else
1035 unsigned char rcMsc;
1036 uint64_t u64Tmp;
1037 rcMsc = __vmx_vmread(uFieldEnc, &u64Tmp);
1038 *pData = (uint32_t)u64Tmp;
1039 if (RT_LIKELY(rcMsc == 0))
1040 return VINF_SUCCESS;
1041 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
1042# endif
1043
1044# elif RT_INLINE_ASM_GNU_STYLE
1045# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1046 __asm__ __volatile__ (
1047 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1048 :"=d"(*pData)
1049 :"a"(uFieldEnc),
1050 "d"(0)
1051 );
1052 return VINF_SUCCESS;
1053# else
1054 int rc;
1055 __asm__ __volatile__ (
1056 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
1057 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1058 "ja 2f \n\t"
1059 "je 1f \n\t"
1060 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1061 "jmp 2f \n\t"
1062 "1: \n\t"
1063 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
1064 "2: \n\t"
1065 :"=&r"(rc),
1066 "=d"(*pData)
1067 :"a"(uFieldEnc),
1068 "d"(0)
1069 );
1070 return rc;
1071# endif
1072
1073# elif defined(RT_ARCH_X86)
1074 int rc = VINF_SUCCESS;
1075 __asm
1076 {
1077 sub esp, 4
1078 mov dword ptr [esp], 0
1079 mov eax, [uFieldEnc]
1080 _emit 0x0f
1081 _emit 0x78
1082 _emit 0x04
1083 _emit 0x24 /* VMREAD eax, [esp] */
1084 mov edx, pData
1085 pop dword ptr [edx]
1086 jnc valid_vmcs
1087 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
1088 jmp the_end
1089valid_vmcs:
1090 jnz the_end
1091 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
1092the_end:
1093 }
1094 return rc;
1095
1096# else
1097# error "Shouldn't be here..."
1098# endif
1099}
1100#endif
1101
1102#if 0
1103#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
1104DECLASM(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData);
1105#else
1106DECLINLINE(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData)
1107{
1108# if RT_INLINE_ASM_GNU_STYLE
1109 int rc = VINF_SUCCESS;
1110 __asm__ __volatile__ (
1111 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
1112 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1113 "ja 2f \n\t"
1114 "je 1f \n\t"
1115 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1116 "jmp 2f \n\t"
1117 "1: \n\t"
1118 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
1119 "2: \n\t"
1120 :"=&r"(rc),
1121 "=d"(*pData)
1122 :"a"(uFieldEnc),
1123 "d"(0)
1124 );
1125 return rc;
1126
1127# elif VMX_USE_MSC_INTRINSICS
1128 unsigned char rcMsc;
1129# ifdef RT_ARCH_X86
1130 rcMsc = __vmx_vmread(uFieldEnc, pData);
1131# else
1132 uint64_t u64Tmp;
1133 rcMsc = __vmx_vmread(uFieldEnc, &u64Tmp);
1134 *pData = (uint32_t)u64Tmp;
1135# endif
1136 if (RT_LIKELY(rcMsc == 0))
1137 return VINF_SUCCESS;
1138 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
1139
1140#else
1141 int rc = VINF_SUCCESS;
1142 __asm
1143 {
1144 sub esp, 4
1145 mov dword ptr [esp], 0
1146 mov eax, [uFieldEnc]
1147 _emit 0x0f
1148 _emit 0x78
1149 _emit 0x04
1150 _emit 0x24 /* VMREAD eax, [esp] */
1151 mov edx, pData
1152 pop dword ptr [edx]
1153 jnc valid_vmcs
1154 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
1155 jmp the_end
1156
1157valid_vmcs:
1158 jnz the_end
1159 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
1160the_end:
1161 }
1162 return rc;
1163# endif
1164}
1165#endif
1166#endif
1167
1168
1169/**
1170 * Executes VMREAD for a 64-bit field.
1171 *
1172 * @returns VBox status code.
1173 * @retval VINF_SUCCESS.
1174 * @retval VERR_VMX_INVALID_VMCS_PTR.
1175 * @retval VERR_VMX_INVALID_VMCS_FIELD.
1176 *
1177 * @param uFieldEnc The VMCS field encoding.
1178 * @param pData Where to store VMCS field value.
1179 *
1180 * @remarks The values of the two status codes can be OR'ed together, the result
1181 * will be VERR_VMX_INVALID_VMCS_PTR.
1182 */
1183#if defined(RT_ARCH_X86) || (RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS)
1184DECLASM(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData);
1185#else
1186DECLINLINE(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData)
1187{
1188# if VMX_USE_MSC_INTRINSICS
1189# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1190 __vmx_vmread(uFieldEnc, pData);
1191 return VINF_SUCCESS;
1192# else
1193 unsigned char rcMsc;
1194 rcMsc = __vmx_vmread(uFieldEnc, pData);
1195 if (RT_LIKELY(rcMsc == 0))
1196 return VINF_SUCCESS;
1197 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
1198# endif
1199
1200# elif RT_INLINE_ASM_GNU_STYLE
1201# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1202 __asm__ __volatile__ (
1203 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1204 :"=d"(*pData)
1205 :"a"(uFieldEnc),
1206 "d"(0)
1207 );
1208 return VINF_SUCCESS;
1209# else
1210 int rc;
1211 __asm__ __volatile__ (
1212 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
1213 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1214 "ja 2f \n\t"
1215 "je 1f \n\t"
1216 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1217 "jmp 2f \n\t"
1218 "1: \n\t"
1219 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
1220 "2: \n\t"
1221 :"=&r"(rc),
1222 "=d"(*pData)
1223 :"a"(uFieldEnc),
1224 "d"(0)
1225 );
1226 return rc;
1227# endif
1228# else
1229# error "Shouldn't be here..."
1230# endif
1231}
1232#endif
1233
1234
1235#if 0
1236#if (!defined(RT_ARCH_X86) && !VMX_USE_MSC_INTRINSICS)
1237DECLASM(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData);
1238#else
1239DECLINLINE(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData)
1240{
1241# if VMX_USE_MSC_INTRINSICS
1242 unsigned char rcMsc;
1243# ifdef RT_ARCH_X86
1244 size_t uLow;
1245 size_t uHigh;
1246 rcMsc = __vmx_vmread(uFieldEnc, &uLow);
1247 rcMsc |= __vmx_vmread(uFieldEnc + 1, &uHigh);
1248 *pData = RT_MAKE_U64(uLow, uHigh);
1249# else
1250 rcMsc = __vmx_vmread(uFieldEnc, pData);
1251# endif
1252 if (RT_LIKELY(rcMsc == 0))
1253 return VINF_SUCCESS;
1254 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
1255
1256# elif defined(RT_ARCH_X86)
1257 int rc;
1258 uint32_t val_hi, val;
1259 rc = VMXReadVmcs32(uFieldEnc, &val);
1260 rc |= VMXReadVmcs32(uFieldEnc + 1, &val_hi);
1261 AssertRC(rc);
1262 *pData = RT_MAKE_U64(val, val_hi);
1263 return rc;
1264
1265# else
1266# error "Shouldn't be here..."
1267# endif
1268}
1269#endif
1270#endif
1271
1272
1273/**
1274 * Executes VMREAD for a 16-bit field.
1275 *
1276 * @returns VBox status code.
1277 * @retval VINF_SUCCESS.
1278 * @retval VERR_VMX_INVALID_VMCS_PTR.
1279 * @retval VERR_VMX_INVALID_VMCS_FIELD.
1280 *
1281 * @param uVmcsField The VMCS field.
1282 * @param pData Where to store VMCS field value.
1283 *
1284 * @remarks The values of the two status codes can be OR'ed together, the result
1285 * will be VERR_VMX_INVALID_VMCS_PTR.
1286 */
1287DECLINLINE(int) VMXReadVmcs16(uint32_t uVmcsField, uint16_t *pData)
1288{
1289 uint32_t u32Tmp;
1290 int rc;
1291 AssertMsg(RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH) == VMX_VMCSFIELD_WIDTH_16BIT, ("%#RX32\n", uVmcsField));
1292 rc = VMXReadVmcs32(uVmcsField, &u32Tmp);
1293 *pData = (uint16_t)u32Tmp;
1294 return rc;
1295}
1296
1297
1298/**
1299 * Executes VMREAD for a natural-width VMCS field.
1300 */
1301#ifdef RT_ARCH_AMD64
1302# define VMXReadVmcsNw VMXReadVmcs64
1303#else
1304# define VMXReadVmcsNw VMXReadVmcs32
1305#endif
1306
1307#endif /* RT_ARCH_AMD64 || RT_ARCH_X86 */
1308
1309/** @} */
1310
1311#endif /* !VBOX_INCLUDED_vmm_hmvmxinline_h */
1312
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette