VirtualBox

source: vbox/trunk/include/VBox/vmm/hmvmxinline.h@ 99582

Last change on this file since 99582 was 98103, checked in by vboxsync, 2 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 37.2 KB
Line 
1/** @file
2 * HM - VMX Structures and Definitions. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * The contents of this file may alternatively be used under the terms
25 * of the Common Development and Distribution License Version 1.0
26 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
27 * in the VirtualBox distribution, in which case the provisions of the
28 * CDDL are applicable instead of those of the GPL.
29 *
30 * You may elect to license modified versions of this file under the
31 * terms and conditions of either the GPL or the CDDL or both.
32 *
33 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
34 */
35
36#ifndef VBOX_INCLUDED_vmm_hmvmxinline_h
37#define VBOX_INCLUDED_vmm_hmvmxinline_h
38#ifndef RT_WITHOUT_PRAGMA_ONCE
39# pragma once
40#endif
41
42#include <VBox/vmm/hm_vmx.h>
43#include <VBox/err.h>
44
45/* In Visual C++ versions prior to 2012, the vmx intrinsics are only available
46 when targeting AMD64. */
47#if RT_INLINE_ASM_USES_INTRIN >= RT_MSC_VER_VS2010 && defined(RT_ARCH_AMD64)
48# include <iprt/sanitized/intrin.h>
49/* We always want them as intrinsics, no functions. */
50# pragma intrinsic(__vmx_on)
51# pragma intrinsic(__vmx_off)
52# pragma intrinsic(__vmx_vmclear)
53# pragma intrinsic(__vmx_vmptrld)
54# pragma intrinsic(__vmx_vmread)
55# pragma intrinsic(__vmx_vmwrite)
56# define VMX_USE_MSC_INTRINSICS 1
57#else
58# define VMX_USE_MSC_INTRINSICS 0
59#endif
60
61/**
62 * Whether we think the assembler supports VMX instructions.
63 *
64 * Guess that GCC 5 should have sufficient recent enough binutils.
65 */
66#if RT_INLINE_ASM_GNU_STYLE && RT_GNUC_PREREQ(5,0)
67# define VMX_USE_GNU_STYLE_INLINE_VMX_INSTRUCTIONS 1
68#else
69# define VMX_USE_GNU_STYLE_INLINE_VMX_INSTRUCTIONS 0
70#endif
71
72/** Whether we can use the subsection trick to put error handling code
73 * elsewhere. */
74#if VMX_USE_GNU_STYLE_INLINE_VMX_INSTRUCTIONS && defined(__ELF__)
75# define VMX_USE_GNU_STYLE_INLINE_SECTION_TRICK 1
76#else
77# define VMX_USE_GNU_STYLE_INLINE_SECTION_TRICK 0
78#endif
79
80/* Skip checking VMREAD/VMWRITE failures on non-strict builds. */
81#ifndef VBOX_STRICT
82# define VBOX_WITH_VMREAD_VMWRITE_NOCHECK
83#endif
84
85
86/** @defgroup grp_hm_vmx_inline VMX Inline Helpers
87 * @ingroup grp_hm_vmx
88 * @{
89 */
90/**
91 * Gets the effective width of a VMCS field given it's encoding adjusted for
92 * HIGH/FULL access for 64-bit fields.
93 *
94 * @returns The effective VMCS field width.
95 * @param uFieldEnc The VMCS field encoding.
96 *
97 * @remarks Warning! This function does not verify the encoding is for a valid and
98 * supported VMCS field.
99 */
100DECLINLINE(uint8_t) VMXGetVmcsFieldWidthEff(uint32_t uFieldEnc)
101{
102 /* Only the "HIGH" parts of all 64-bit fields have bit 0 set. */
103 if (uFieldEnc & RT_BIT(0))
104 return VMXVMCSFIELDWIDTH_32BIT;
105
106 /* Bits 13:14 contains the width of the VMCS field, see VMXVMCSFIELDWIDTH_XXX. */
107 return (uFieldEnc >> 13) & 0x3;
108}
109
110
111/**
112 * Returns whether the given VMCS field is a read-only VMCS field or not.
113 *
114 * @returns @c true if it's a read-only field, @c false otherwise.
115 * @param uFieldEnc The VMCS field encoding.
116 *
117 * @remarks Warning! This function does not verify that the encoding is for a valid
118 * and/or supported VMCS field.
119 */
120DECLINLINE(bool) VMXIsVmcsFieldReadOnly(uint32_t uFieldEnc)
121{
122 /* See Intel spec. B.4.2 "Natural-Width Read-Only Data Fields". */
123 return (RT_BF_GET(uFieldEnc, VMX_BF_VMCSFIELD_TYPE) == VMXVMCSFIELDTYPE_VMEXIT_INFO);
124}
125
126
127/**
128 * Returns whether the given VM-entry interruption-information type is valid or not.
129 *
130 * @returns @c true if it's a valid type, @c false otherwise.
131 * @param fSupportsMTF Whether the Monitor-Trap Flag CPU feature is supported.
132 * @param uType The VM-entry interruption-information type.
133 */
134DECLINLINE(bool) VMXIsEntryIntInfoTypeValid(bool fSupportsMTF, uint8_t uType)
135{
136 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
137 switch (uType)
138 {
139 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
140 case VMX_ENTRY_INT_INFO_TYPE_NMI:
141 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
142 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
143 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
144 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: return true;
145 case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT: return fSupportsMTF;
146 default:
147 return false;
148 }
149}
150
151
152/**
153 * Returns whether the given VM-entry interruption-information vector and type
154 * combination is valid or not.
155 *
156 * @returns @c true if it's a valid vector/type combination, @c false otherwise.
157 * @param uVector The VM-entry interruption-information vector.
158 * @param uType The VM-entry interruption-information type.
159 *
160 * @remarks Warning! This function does not validate the type field individually.
161 * Use it after verifying type is valid using HMVmxIsEntryIntInfoTypeValid.
162 */
163DECLINLINE(bool) VMXIsEntryIntInfoVectorValid(uint8_t uVector, uint8_t uType)
164{
165 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
166 if ( uType == VMX_ENTRY_INT_INFO_TYPE_NMI
167 && uVector != X86_XCPT_NMI)
168 return false;
169 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
170 && uVector > X86_XCPT_LAST)
171 return false;
172 if ( uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
173 && uVector != VMX_ENTRY_INT_INFO_VECTOR_MTF)
174 return false;
175 return true;
176}
177
178
179/**
180 * Returns whether or not the VM-exit is trap-like or fault-like.
181 *
182 * @returns @c true if it's a trap-like VM-exit, @c false otherwise.
183 * @param uExitReason The VM-exit reason.
184 *
185 * @remarks Warning! This does not validate the VM-exit reason.
186 */
187DECLINLINE(bool) VMXIsVmexitTrapLike(uint32_t uExitReason)
188{
189 /*
190 * Trap-like VM-exits - The instruction causing the VM-exit completes before the
191 * VM-exit occurs.
192 *
193 * Fault-like VM-exits - The instruction causing the VM-exit is not completed before
194 * the VM-exit occurs.
195 *
196 * See Intel spec. 25.5.2 "Monitor Trap Flag".
197 * See Intel spec. 29.1.4 "EOI Virtualization".
198 * See Intel spec. 29.4.3.3 "APIC-Write VM Exits".
199 * See Intel spec. 29.1.2 "TPR Virtualization".
200 */
201 /** @todo NSTVMX: r=ramshankar: What about VM-exits due to debug traps (single-step,
202 * I/O breakpoints, data breakpoints), debug exceptions (data breakpoint)
203 * delayed by MovSS blocking, machine-check exceptions. */
204 switch (uExitReason)
205 {
206 case VMX_EXIT_MTF:
207 case VMX_EXIT_VIRTUALIZED_EOI:
208 case VMX_EXIT_APIC_WRITE:
209 case VMX_EXIT_TPR_BELOW_THRESHOLD:
210 return true;
211 }
212 return false;
213}
214
215
216/**
217 * Returns whether the VM-entry is vectoring or not given the VM-entry interruption
218 * information field.
219 *
220 * @returns @c true if the VM-entry is vectoring, @c false otherwise.
221 * @param uEntryIntInfo The VM-entry interruption information field.
222 * @param pEntryIntInfoType The VM-entry interruption information type field.
223 * Optional, can be NULL. Only updated when this
224 * function returns @c true.
225 */
226DECLINLINE(bool) VMXIsVmentryVectoring(uint32_t uEntryIntInfo, uint8_t *pEntryIntInfoType)
227{
228 /*
229 * The definition of what is a vectoring VM-entry is taken
230 * from Intel spec. 26.6 "Special Features of VM Entry".
231 */
232 if (!VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
233 return false;
234
235 /* Scope and keep variable defines on top to satisy archaic c89 nonsense. */
236 {
237 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
238 switch (uType)
239 {
240 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
241 case VMX_ENTRY_INT_INFO_TYPE_NMI:
242 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
243 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
244 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
245 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
246 {
247 if (pEntryIntInfoType)
248 *pEntryIntInfoType = uType;
249 return true;
250 }
251 }
252 }
253 return false;
254}
255
256
257/**
258 * Gets the description for a VMX abort reason.
259 *
260 * @returns The descriptive string.
261 * @param enmAbort The VMX abort reason.
262 */
263DECLINLINE(const char *) VMXGetAbortDesc(VMXABORT enmAbort)
264{
265 switch (enmAbort)
266 {
267 case VMXABORT_NONE: return "VMXABORT_NONE";
268 case VMXABORT_SAVE_GUEST_MSRS: return "VMXABORT_SAVE_GUEST_MSRS";
269 case VMXBOART_HOST_PDPTE: return "VMXBOART_HOST_PDPTE";
270 case VMXABORT_CURRENT_VMCS_CORRUPT: return "VMXABORT_CURRENT_VMCS_CORRUPT";
271 case VMXABORT_LOAD_HOST_MSR: return "VMXABORT_LOAD_HOST_MSR";
272 case VMXABORT_MACHINE_CHECK_XCPT: return "VMXABORT_MACHINE_CHECK_XCPT";
273 case VMXABORT_HOST_NOT_IN_LONG_MODE: return "VMXABORT_HOST_NOT_IN_LONG_MODE";
274 default:
275 break;
276 }
277 return "Unknown/invalid";
278}
279
280
281/**
282 * Gets the description for a virtual VMCS state.
283 *
284 * @returns The descriptive string.
285 * @param fVmcsState The virtual-VMCS state.
286 */
287DECLINLINE(const char *) VMXGetVmcsStateDesc(uint8_t fVmcsState)
288{
289 switch (fVmcsState)
290 {
291 case VMX_V_VMCS_LAUNCH_STATE_CLEAR: return "Clear";
292 case VMX_V_VMCS_LAUNCH_STATE_LAUNCHED: return "Launched";
293 default: return "Unknown";
294 }
295}
296
297
298/**
299 * Gets the description for a VM-entry interruption information event type.
300 *
301 * @returns The descriptive string.
302 * @param uType The event type.
303 */
304DECLINLINE(const char *) VMXGetEntryIntInfoTypeDesc(uint8_t uType)
305{
306 switch (uType)
307 {
308 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT: return "External Interrupt";
309 case VMX_ENTRY_INT_INFO_TYPE_NMI: return "NMI";
310 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT: return "Hardware Exception";
311 case VMX_ENTRY_INT_INFO_TYPE_SW_INT: return "Software Interrupt";
312 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT: return "Priv. Software Exception";
313 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: return "Software Exception";
314 case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT: return "Other Event";
315 default:
316 break;
317 }
318 return "Unknown/invalid";
319}
320
321
322/**
323 * Gets the description for a VM-exit interruption information event type.
324 *
325 * @returns The descriptive string.
326 * @param uType The event type.
327 */
328DECLINLINE(const char *) VMXGetExitIntInfoTypeDesc(uint8_t uType)
329{
330 switch (uType)
331 {
332 case VMX_EXIT_INT_INFO_TYPE_EXT_INT: return "External Interrupt";
333 case VMX_EXIT_INT_INFO_TYPE_NMI: return "NMI";
334 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT: return "Hardware Exception";
335 case VMX_EXIT_INT_INFO_TYPE_SW_INT: return "Software Interrupt";
336 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT: return "Priv. Software Exception";
337 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT: return "Software Exception";
338 default:
339 break;
340 }
341 return "Unknown/invalid";
342}
343
344
345/**
346 * Gets the description for an IDT-vectoring information event type.
347 *
348 * @returns The descriptive string.
349 * @param uType The event type.
350 */
351DECLINLINE(const char *) VMXGetIdtVectoringInfoTypeDesc(uint8_t uType)
352{
353 switch (uType)
354 {
355 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT: return "External Interrupt";
356 case VMX_IDT_VECTORING_INFO_TYPE_NMI: return "NMI";
357 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT: return "Hardware Exception";
358 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT: return "Software Interrupt";
359 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT: return "Priv. Software Exception";
360 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: return "Software Exception";
361 default:
362 break;
363 }
364 return "Unknown/invalid";
365}
366
367
368/** @} */
369
370
371/** @defgroup grp_hm_vmx_asm VMX Assembly Helpers
372 * @{
373 */
374#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
375
376/**
377 * Dispatches an NMI to the host.
378 */
379DECLASM(int) VMXDispatchHostNmi(void);
380
381
382/**
383 * Executes VMXON.
384 *
385 * @returns VBox status code.
386 * @param HCPhysVmxOn Physical address of VMXON structure.
387 */
388#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
389DECLASM(int) VMXEnable(RTHCPHYS HCPhysVmxOn);
390#else
391DECLINLINE(int) VMXEnable(RTHCPHYS HCPhysVmxOn)
392{
393# if VMX_USE_MSC_INTRINSICS
394 unsigned char rcMsc = __vmx_on(&HCPhysVmxOn);
395 if (RT_LIKELY(rcMsc == 0))
396 return VINF_SUCCESS;
397 return rcMsc == 2 ? VERR_VMX_INVALID_VMXON_PTR : VERR_VMX_VMXON_FAILED;
398
399# elif RT_INLINE_ASM_GNU_STYLE
400# ifdef RT_ARCH_AMD64
401 int rc;
402 __asm__ __volatile__ (
403 "pushq %2 \n\t"
404 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
405 "ja 2f \n\t"
406 "je 1f \n\t"
407 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
408 "jmp 2f \n\t"
409 "1: \n\t"
410 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
411 "2: \n\t"
412 "add $8, %%rsp \n\t"
413 :"=rm"(rc)
414 :"0"(VINF_SUCCESS),
415 "ir"(HCPhysVmxOn) /* don't allow direct memory reference here, */
416 /* this would not work with -fomit-frame-pointer */
417 :"memory"
418 );
419 return rc;
420# else
421 int rc;
422 __asm__ __volatile__ (
423 "push %3 \n\t"
424 "push %2 \n\t"
425 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
426 "ja 2f \n\t"
427 "je 1f \n\t"
428 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
429 "jmp 2f \n\t"
430 "1: \n\t"
431 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
432 "2: \n\t"
433 "add $8, %%esp \n\t"
434 :"=rm"(rc)
435 :"0"(VINF_SUCCESS),
436 "ir"((uint32_t)HCPhysVmxOn), /* don't allow direct memory reference here, */
437 "ir"((uint32_t)(HCPhysVmxOn >> 32)) /* this would not work with -fomit-frame-pointer */
438 :"memory"
439 );
440 return rc;
441# endif
442
443# elif defined(RT_ARCH_X86)
444 int rc = VINF_SUCCESS;
445 __asm
446 {
447 push dword ptr [HCPhysVmxOn + 4]
448 push dword ptr [HCPhysVmxOn]
449 _emit 0xf3
450 _emit 0x0f
451 _emit 0xc7
452 _emit 0x34
453 _emit 0x24 /* VMXON [esp] */
454 jnc vmxon_good
455 mov dword ptr [rc], VERR_VMX_INVALID_VMXON_PTR
456 jmp the_end
457
458vmxon_good:
459 jnz the_end
460 mov dword ptr [rc], VERR_VMX_VMXON_FAILED
461the_end:
462 add esp, 8
463 }
464 return rc;
465
466# else
467# error "Shouldn't be here..."
468# endif
469}
470#endif
471
472
473/**
474 * Executes VMXOFF.
475 */
476#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
477DECLASM(void) VMXDisable(void);
478#else
479DECLINLINE(void) VMXDisable(void)
480{
481# if VMX_USE_MSC_INTRINSICS
482 __vmx_off();
483
484# elif RT_INLINE_ASM_GNU_STYLE
485 __asm__ __volatile__ (
486 ".byte 0x0f, 0x01, 0xc4 # VMXOFF \n\t"
487 );
488
489# elif defined(RT_ARCH_X86)
490 __asm
491 {
492 _emit 0x0f
493 _emit 0x01
494 _emit 0xc4 /* VMXOFF */
495 }
496
497# else
498# error "Shouldn't be here..."
499# endif
500}
501#endif
502
503
504/**
505 * Executes VMCLEAR.
506 *
507 * @returns VBox status code.
508 * @param HCPhysVmcs Physical address of VM control structure.
509 */
510#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
511DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
512#else
513DECLINLINE(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs)
514{
515# if VMX_USE_MSC_INTRINSICS
516 unsigned char rcMsc = __vmx_vmclear(&HCPhysVmcs);
517 if (RT_LIKELY(rcMsc == 0))
518 return VINF_SUCCESS;
519 return VERR_VMX_INVALID_VMCS_PTR;
520
521# elif RT_INLINE_ASM_GNU_STYLE
522# ifdef RT_ARCH_AMD64
523 int rc;
524 __asm__ __volatile__ (
525 "pushq %2 \n\t"
526 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
527 "jnc 1f \n\t"
528 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
529 "1: \n\t"
530 "add $8, %%rsp \n\t"
531 :"=rm"(rc)
532 :"0"(VINF_SUCCESS),
533 "ir"(HCPhysVmcs) /* don't allow direct memory reference here, */
534 /* this would not work with -fomit-frame-pointer */
535 :"memory"
536 );
537 return rc;
538# else
539 int rc;
540 __asm__ __volatile__ (
541 "push %3 \n\t"
542 "push %2 \n\t"
543 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
544 "jnc 1f \n\t"
545 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
546 "1: \n\t"
547 "add $8, %%esp \n\t"
548 :"=rm"(rc)
549 :"0"(VINF_SUCCESS),
550 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
551 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this would not work with -fomit-frame-pointer */
552 :"memory"
553 );
554 return rc;
555# endif
556
557# elif defined(RT_ARCH_X86)
558 int rc = VINF_SUCCESS;
559 __asm
560 {
561 push dword ptr [HCPhysVmcs + 4]
562 push dword ptr [HCPhysVmcs]
563 _emit 0x66
564 _emit 0x0f
565 _emit 0xc7
566 _emit 0x34
567 _emit 0x24 /* VMCLEAR [esp] */
568 jnc success
569 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
570success:
571 add esp, 8
572 }
573 return rc;
574
575# else
576# error "Shouldn't be here..."
577# endif
578}
579#endif
580
581
582/**
583 * Executes VMPTRLD.
584 *
585 * @returns VBox status code.
586 * @param HCPhysVmcs Physical address of VMCS structure.
587 */
588#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
589DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
590#else
591DECLINLINE(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs)
592{
593# if VMX_USE_MSC_INTRINSICS
594 unsigned char rcMsc = __vmx_vmptrld(&HCPhysVmcs);
595 if (RT_LIKELY(rcMsc == 0))
596 return VINF_SUCCESS;
597 return VERR_VMX_INVALID_VMCS_PTR;
598
599# elif RT_INLINE_ASM_GNU_STYLE
600# ifdef RT_ARCH_AMD64
601 int rc;
602 __asm__ __volatile__ (
603 "pushq %2 \n\t"
604 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
605 "jnc 1f \n\t"
606 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
607 "1: \n\t"
608 "add $8, %%rsp \n\t"
609 :"=rm"(rc)
610 :"0"(VINF_SUCCESS),
611 "ir"(HCPhysVmcs) /* don't allow direct memory reference here, */
612 /* this will not work with -fomit-frame-pointer */
613 :"memory"
614 );
615 return rc;
616# else
617 int rc;
618 __asm__ __volatile__ (
619 "push %3 \n\t"
620 "push %2 \n\t"
621 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
622 "jnc 1f \n\t"
623 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
624 "1: \n\t"
625 "add $8, %%esp \n\t"
626 :"=rm"(rc)
627 :"0"(VINF_SUCCESS),
628 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
629 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this will not work with -fomit-frame-pointer */
630 :"memory"
631 );
632 return rc;
633# endif
634
635# elif defined(RT_ARCH_X86)
636 int rc = VINF_SUCCESS;
637 __asm
638 {
639 push dword ptr [HCPhysVmcs + 4]
640 push dword ptr [HCPhysVmcs]
641 _emit 0x0f
642 _emit 0xc7
643 _emit 0x34
644 _emit 0x24 /* VMPTRLD [esp] */
645 jnc success
646 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
647success:
648 add esp, 8
649 }
650 return rc;
651
652# else
653# error "Shouldn't be here..."
654# endif
655}
656#endif
657
658
659/**
660 * Executes VMPTRST.
661 *
662 * @returns VBox status code.
663 * @param pHCPhysVmcs Where to store the physical address of the current
664 * VMCS.
665 */
666DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pHCPhysVmcs);
667
668
669/**
670 * Executes VMWRITE for a 32-bit field.
671 *
672 * @returns VBox status code.
673 * @retval VINF_SUCCESS.
674 * @retval VERR_VMX_INVALID_VMCS_PTR.
675 * @retval VERR_VMX_INVALID_VMCS_FIELD.
676 *
677 * @param uFieldEnc VMCS field encoding.
678 * @param u32Val The 32-bit value to set.
679 *
680 * @remarks The values of the two status codes can be OR'ed together, the result
681 * will be VERR_VMX_INVALID_VMCS_PTR.
682 */
683#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
684DECLASM(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val);
685#else
686DECLINLINE(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val)
687{
688# if VMX_USE_MSC_INTRINSICS
689# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
690 __vmx_vmwrite(uFieldEnc, u32Val);
691 return VINF_SUCCESS;
692# else
693 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u32Val);
694 if (RT_LIKELY(rcMsc == 0))
695 return VINF_SUCCESS;
696 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
697# endif
698
699# elif RT_INLINE_ASM_GNU_STYLE
700# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
701 __asm__ __volatile__ (
702 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
703 :
704 :"a"(uFieldEnc),
705 "d"(u32Val)
706 );
707 return VINF_SUCCESS;
708# else
709 int rc;
710 __asm__ __volatile__ (
711 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
712 "ja 2f \n\t"
713 "je 1f \n\t"
714 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
715 "jmp 2f \n\t"
716 "1: \n\t"
717 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
718 "2: \n\t"
719 :"=rm"(rc)
720 :"0"(VINF_SUCCESS),
721 "a"(uFieldEnc),
722 "d"(u32Val)
723 );
724 return rc;
725# endif
726
727# elif defined(RT_ARCH_X86)
728 int rc = VINF_SUCCESS;
729 __asm
730 {
731 push dword ptr [u32Val]
732 mov eax, [uFieldEnc]
733 _emit 0x0f
734 _emit 0x79
735 _emit 0x04
736 _emit 0x24 /* VMWRITE eax, [esp] */
737 jnc valid_vmcs
738 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
739 jmp the_end
740valid_vmcs:
741 jnz the_end
742 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
743the_end:
744 add esp, 4
745 }
746 return rc;
747
748# else
749# error "Shouldn't be here..."
750# endif
751}
752#endif
753
754
755/**
756 * Executes VMWRITE for a 64-bit field.
757 *
758 * @returns VBox status code.
759 * @retval VINF_SUCCESS.
760 * @retval VERR_VMX_INVALID_VMCS_PTR.
761 * @retval VERR_VMX_INVALID_VMCS_FIELD.
762 *
763 * @param uFieldEnc The VMCS field encoding.
764 * @param u64Val The 16, 32 or 64-bit value to set.
765 *
766 * @remarks The values of the two status codes can be OR'ed together, the result
767 * will be VERR_VMX_INVALID_VMCS_PTR.
768 */
769#if defined(RT_ARCH_X86) || (RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS)
770DECLASM(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val);
771#else
772DECLINLINE(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val)
773{
774# if VMX_USE_MSC_INTRINSICS
775# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
776 __vmx_vmwrite(uFieldEnc, u64Val);
777 return VINF_SUCCESS;
778# else
779 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u64Val);
780 if (RT_LIKELY(rcMsc == 0))
781 return VINF_SUCCESS;
782 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
783# endif
784
785# elif RT_INLINE_ASM_GNU_STYLE
786# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
787 __asm__ __volatile__ (
788 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
789 :
790 :"a"(uFieldEnc),
791 "d"(u64Val)
792 );
793 return VINF_SUCCESS;
794# else
795 int rc;
796 __asm__ __volatile__ (
797 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
798 "ja 2f \n\t"
799 "je 1f \n\t"
800 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
801 "jmp 2f \n\t"
802 "1: \n\t"
803 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
804 "2: \n\t"
805 :"=rm"(rc)
806 :"0"(VINF_SUCCESS),
807 "a"(uFieldEnc),
808 "d"(u64Val)
809 );
810 return rc;
811# endif
812
813# else
814# error "Shouldn't be here..."
815# endif
816}
817#endif
818
819
820/**
821 * Executes VMWRITE for a 16-bit VMCS field.
822 *
823 * @returns VBox status code.
824 * @retval VINF_SUCCESS.
825 * @retval VERR_VMX_INVALID_VMCS_PTR.
826 * @retval VERR_VMX_INVALID_VMCS_FIELD.
827 *
828 * @param uVmcsField The VMCS field.
829 * @param u16Val The 16-bit value to set.
830 *
831 * @remarks The values of the two status codes can be OR'ed together, the result
832 * will be VERR_VMX_INVALID_VMCS_PTR.
833 */
834DECLINLINE(int) VMXWriteVmcs16(uint32_t uVmcsField, uint16_t u16Val)
835{
836 AssertMsg(RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH) == VMX_VMCSFIELD_WIDTH_16BIT, ("%#RX32\n", uVmcsField));
837 return VMXWriteVmcs32(uVmcsField, u16Val);
838}
839
840
841/**
842 * Executes VMWRITE for a natural-width VMCS field.
843 */
844#ifdef RT_ARCH_AMD64
845# define VMXWriteVmcsNw VMXWriteVmcs64
846#else
847# define VMXWriteVmcsNw VMXWriteVmcs32
848#endif
849
850
851/**
852 * Invalidate a page using INVEPT.
853 *
854 * @returns VBox status code.
855 * @param enmFlush Type of flush.
856 * @param pDescriptor Pointer to the descriptor.
857 */
858DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmFlush, uint64_t *pDescriptor);
859
860
861/**
862 * Invalidate a page using INVVPID.
863 *
864 * @returns VBox status code.
865 * @param enmFlush Type of flush.
866 * @param pDescriptor Pointer to the descriptor.
867 */
868DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmFlush, uint64_t *pDescriptor);
869
870
871/**
872 * Executes VMREAD for a 32-bit field.
873 *
874 * @returns VBox status code.
875 * @retval VINF_SUCCESS.
876 * @retval VERR_VMX_INVALID_VMCS_PTR.
877 * @retval VERR_VMX_INVALID_VMCS_FIELD.
878 *
879 * @param uFieldEnc The VMCS field encoding.
880 * @param pData Where to store VMCS field value.
881 *
882 * @remarks The values of the two status codes can be OR'ed together, the result
883 * will be VERR_VMX_INVALID_VMCS_PTR.
884 */
885#if RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS
886DECLASM(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData);
887#else
888DECLINLINE(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData)
889{
890# if VMX_USE_MSC_INTRINSICS
891# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
892 uint64_t u64Tmp = 0;
893 __vmx_vmread(uFieldEnc, &u64Tmp);
894 *pData = (uint32_t)u64Tmp;
895 return VINF_SUCCESS;
896# else
897 unsigned char rcMsc;
898 uint64_t u64Tmp;
899 rcMsc = __vmx_vmread(uFieldEnc, &u64Tmp);
900 *pData = (uint32_t)u64Tmp;
901 if (RT_LIKELY(rcMsc == 0))
902 return VINF_SUCCESS;
903 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
904# endif
905
906# elif VMX_USE_GNU_STYLE_INLINE_VMX_INSTRUCTIONS
907 RTCCUINTREG uTmp = 0;
908# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
909 __asm__ __volatile__("vmread %[uField],%[uDst]"
910 : [uDst] "=mr" (uTmp)
911 : [uField] "r" ((RTCCUINTREG)uFieldEnc));
912 *pData = (uint32_t)uTmp;
913 return VINF_SUCCESS;
914# else
915#if 0
916 int rc;
917 __asm__ __volatile__("vmread %[uField],%[uDst]\n\t"
918 "movl %[rcSuccess],%[rc]\n\t"
919# if VMX_USE_GNU_STYLE_INLINE_SECTION_TRICK
920 "jna 1f\n\t"
921 ".section .text.vmread_failures, \"ax?\"\n\t"
922 "1:\n\t"
923 "movl %[rcInvalidVmcsPtr],%[rc]\n\t"
924 "jnz 2f\n\t"
925 "movl %[rcInvalidVmcsField],%[rc]\n\t"
926 "2:\n\t"
927 "jmp 3f\n\t"
928 ".previous\n\t"
929 "3:\n\t"
930# else
931 "ja 1f\n\t"
932 "movl %[rcInvalidVmcsPtr],%[rc]\n\t"
933 "jnz 1f\n\t"
934 "movl %[rcInvalidVmcsField],%[rc]\n\t"
935 "1:\n\t"
936# endif
937 : [uDst] "=mr" (uTmp)
938 , [rc] "=r" (rc)
939 : [uField] "r" ((RTCCUINTREG)uFieldEnc)
940 , [rcSuccess] "i" (VINF_SUCCESS)
941 , [rcInvalidVmcsPtr] "i" (VERR_VMX_INVALID_VMCS_PTR)
942 , [rcInvalidVmcsField] "i" (VERR_VMX_INVALID_VMCS_FIELD));
943 *pData = uTmp;
944 return rc;
945#else
946 int fSuccess, fFieldError;
947 __asm__ __volatile__("vmread %[uField],%[uDst]"
948 : [uDst] "=mr" (uTmp)
949 , "=@cca" (fSuccess)
950 , "=@ccnc" (fFieldError)
951 : [uField] "r" ((RTCCUINTREG)uFieldEnc));
952 *pData = uTmp;
953 return RT_LIKELY(fSuccess) ? VINF_SUCCESS : fFieldError ? VERR_VMX_INVALID_VMCS_FIELD : VERR_VMX_INVALID_VMCS_PTR;
954#endif
955# endif
956
957# elif RT_INLINE_ASM_GNU_STYLE
958# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
959 __asm__ __volatile__ (
960 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
961 :"=d"(*pData)
962 :"a"(uFieldEnc),
963 "d"(0)
964 );
965 return VINF_SUCCESS;
966# else
967 int rc;
968 __asm__ __volatile__ (
969 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
970 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
971 "ja 2f \n\t"
972 "je 1f \n\t"
973 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
974 "jmp 2f \n\t"
975 "1: \n\t"
976 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
977 "2: \n\t"
978 :"=&r"(rc),
979 "=d"(*pData)
980 :"a"(uFieldEnc),
981 "d"(0)
982 );
983 return rc;
984# endif
985
986# elif defined(RT_ARCH_X86)
987 int rc = VINF_SUCCESS;
988 __asm
989 {
990 sub esp, 4
991 mov dword ptr [esp], 0
992 mov eax, [uFieldEnc]
993 _emit 0x0f
994 _emit 0x78
995 _emit 0x04
996 _emit 0x24 /* VMREAD eax, [esp] */
997 mov edx, pData
998 pop dword ptr [edx]
999 jnc valid_vmcs
1000 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
1001 jmp the_end
1002valid_vmcs:
1003 jnz the_end
1004 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
1005the_end:
1006 }
1007 return rc;
1008
1009# else
1010# error "Shouldn't be here..."
1011# endif
1012}
1013#endif
1014
1015
1016/**
1017 * Executes VMREAD for a 64-bit field.
1018 *
1019 * @returns VBox status code.
1020 * @retval VINF_SUCCESS.
1021 * @retval VERR_VMX_INVALID_VMCS_PTR.
1022 * @retval VERR_VMX_INVALID_VMCS_FIELD.
1023 *
1024 * @param uFieldEnc The VMCS field encoding.
1025 * @param pData Where to store VMCS field value.
1026 *
1027 * @remarks The values of the two status codes can be OR'ed together, the result
1028 * will be VERR_VMX_INVALID_VMCS_PTR.
1029 */
1030#if defined(RT_ARCH_X86) || (RT_INLINE_ASM_EXTERNAL && !VMX_USE_MSC_INTRINSICS)
1031DECLASM(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData);
1032#else
1033DECLINLINE(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData)
1034{
1035# if VMX_USE_MSC_INTRINSICS
1036# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1037 __vmx_vmread(uFieldEnc, pData);
1038 return VINF_SUCCESS;
1039# else
1040 unsigned char rcMsc;
1041 rcMsc = __vmx_vmread(uFieldEnc, pData);
1042 if (RT_LIKELY(rcMsc == 0))
1043 return VINF_SUCCESS;
1044 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
1045# endif
1046
1047# elif VMX_USE_GNU_STYLE_INLINE_VMX_INSTRUCTIONS
1048 uint64_t uTmp = 0;
1049# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1050 __asm__ __volatile__("vmreadq %[uField],%[uDst]"
1051 : [uDst] "=m" (uTmp)
1052 : [uField] "r" ((uint64_t)uFieldEnc));
1053 *pData = uTmp;
1054 return VINF_SUCCESS;
1055# elif 0
1056 int rc;
1057 __asm__ __volatile__("vmreadq %[uField],%[uDst]\n\t"
1058 "movl %[rcSuccess],%[rc]\n\t"
1059# if VMX_USE_GNU_STYLE_INLINE_SECTION_TRICK
1060 "jna 1f\n\t"
1061 ".section .text.vmread_failures, \"ax?\"\n\t"
1062 "1:\n\t"
1063 "movl %[rcInvalidVmcsPtr],%[rc]\n\t"
1064 "jnz 2f\n\t"
1065 "movl %[rcInvalidVmcsField],%[rc]\n\t"
1066 "2:\n\t"
1067 "jmp 3f\n\t"
1068 ".previous\n\t"
1069 "3:\n\t"
1070# else
1071 "ja 1f\n\t"
1072 "movl %[rcInvalidVmcsPtr],%[rc]\n\t"
1073 "jnz 1f\n\t"
1074 "movl %[rcInvalidVmcsField],%[rc]\n\t"
1075 "1:\n\t"
1076# endif
1077 : [uDst] "=mr" (uTmp)
1078 , [rc] "=r" (rc)
1079 : [uField] "r" ((uint64_t)uFieldEnc)
1080 , [rcSuccess] "i" (VINF_SUCCESS)
1081 , [rcInvalidVmcsPtr] "i" (VERR_VMX_INVALID_VMCS_PTR)
1082 , [rcInvalidVmcsField] "i" (VERR_VMX_INVALID_VMCS_FIELD)
1083 );
1084 *pData = uTmp;
1085 return rc;
1086# else
1087 int fSuccess, fFieldError;
1088 __asm__ __volatile__("vmread %[uField],%[uDst]"
1089 : [uDst] "=mr" (uTmp)
1090 , "=@cca" (fSuccess)
1091 , "=@ccnc" (fFieldError)
1092 : [uField] "r" ((RTCCUINTREG)uFieldEnc));
1093 *pData = uTmp;
1094 return RT_LIKELY(fSuccess) ? VINF_SUCCESS : fFieldError ? VERR_VMX_INVALID_VMCS_FIELD : VERR_VMX_INVALID_VMCS_PTR;
1095# endif
1096
1097# elif RT_INLINE_ASM_GNU_STYLE
1098# ifdef VBOX_WITH_VMREAD_VMWRITE_NOCHECK
1099 __asm__ __volatile__ (
1100 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1101 :"=d"(*pData)
1102 :"a"(uFieldEnc),
1103 "d"(0)
1104 );
1105 return VINF_SUCCESS;
1106# else
1107 int rc;
1108 __asm__ __volatile__ (
1109 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
1110 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
1111 "ja 2f \n\t"
1112 "je 1f \n\t"
1113 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
1114 "jmp 2f \n\t"
1115 "1: \n\t"
1116 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
1117 "2: \n\t"
1118 :"=&r"(rc),
1119 "=d"(*pData)
1120 :"a"(uFieldEnc),
1121 "d"(0)
1122 );
1123 return rc;
1124# endif
1125
1126# else
1127# error "Shouldn't be here..."
1128# endif
1129}
1130#endif
1131
1132
1133/**
1134 * Executes VMREAD for a 16-bit field.
1135 *
1136 * @returns VBox status code.
1137 * @retval VINF_SUCCESS.
1138 * @retval VERR_VMX_INVALID_VMCS_PTR.
1139 * @retval VERR_VMX_INVALID_VMCS_FIELD.
1140 *
1141 * @param uVmcsField The VMCS field.
1142 * @param pData Where to store VMCS field value.
1143 *
1144 * @remarks The values of the two status codes can be OR'ed together, the result
1145 * will be VERR_VMX_INVALID_VMCS_PTR.
1146 */
1147DECLINLINE(int) VMXReadVmcs16(uint32_t uVmcsField, uint16_t *pData)
1148{
1149 uint32_t u32Tmp;
1150 int rc;
1151 AssertMsg(RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH) == VMX_VMCSFIELD_WIDTH_16BIT, ("%#RX32\n", uVmcsField));
1152 rc = VMXReadVmcs32(uVmcsField, &u32Tmp);
1153 *pData = (uint16_t)u32Tmp;
1154 return rc;
1155}
1156
1157
1158/**
1159 * Executes VMREAD for a natural-width VMCS field.
1160 */
1161#ifdef RT_ARCH_AMD64
1162# define VMXReadVmcsNw VMXReadVmcs64
1163#else
1164# define VMXReadVmcsNw VMXReadVmcs32
1165#endif
1166
1167#endif /* RT_ARCH_AMD64 || RT_ARCH_X86 */
1168
1169/** @} */
1170
1171#endif /* !VBOX_INCLUDED_vmm_hmvmxinline_h */
1172
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette