VirtualBox

source: vbox/trunk/include/VBox/vmm/hmvmxinline.h@ 80093

Last change on this file since 80093 was 80093, checked in by vboxsync, 5 years ago

hmvmxinline.h: Kicking out 32-bit host support [nit]. bugref:9511

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.6 KB
Line 
1/** @file
2 * HM - VMX Structures and Definitions. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2019 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_hmvmxinline_h
27#define VBOX_INCLUDED_vmm_hmvmxinline_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <VBox/vmm/hm_vmx.h>
33#include <VBox/err.h>
34
35/* In Visual C++ versions prior to 2012, the vmx intrinsics are only available
36 when targeting AMD64. */
37#if RT_INLINE_ASM_USES_INTRIN >= 16 && defined(RT_ARCH_AMD64)
38# pragma warning(push)
39# pragma warning(disable:4668) /* Several incorrect __cplusplus uses. */
40# pragma warning(disable:4255) /* Incorrect __slwpcb prototype. */
41# include <intrin.h>
42# pragma warning(pop)
43/* We always want them as intrinsics, no functions. */
44# pragma intrinsic(__vmx_on)
45# pragma intrinsic(__vmx_off)
46# pragma intrinsic(__vmx_vmclear)
47# pragma intrinsic(__vmx_vmptrld)
48# pragma intrinsic(__vmx_vmread)
49# pragma intrinsic(__vmx_vmwrite)
50# define VMX_USE_MSC_INTRINSICS 1
51#else
52# define VMX_USE_MSC_INTRINSICS 0
53#endif
54
55
56/** @defgroup grp_hm_vmx_inline VMX Inline Helpers
57 * @ingroup grp_hm_vmx
58 * @{
59 */
60/**
61 * Gets the effective width of a VMCS field given it's encoding adjusted for
62 * HIGH/FULL access for 64-bit fields.
63 *
64 * @returns The effective VMCS field width.
65 * @param uFieldEnc The VMCS field encoding.
66 *
67 * @remarks Warning! This function does not verify the encoding is for a valid and
68 * supported VMCS field.
69 */
70DECLINLINE(uint8_t) HMVmxGetVmcsFieldWidthEff(uint32_t uFieldEnc)
71{
72 /* Only the "HIGH" parts of all 64-bit fields have bit 0 set. */
73 if (uFieldEnc & RT_BIT(0))
74 return VMXVMCSFIELDWIDTH_32BIT;
75
76 /* Bits 13:14 contains the width of the VMCS field, see VMXVMCSFIELDWIDTH_XXX. */
77 return (uFieldEnc >> 13) & 0x3;
78}
79
80/**
81 * Returns whether the given VMCS field is a read-only VMCS field or not.
82 *
83 * @returns @c true if it's a read-only field, @c false otherwise.
84 * @param uFieldEnc The VMCS field encoding.
85 *
86 * @remarks Warning! This function does not verify that the encoding is for a valid
87 * and/or supported VMCS field.
88 */
89DECLINLINE(bool) HMVmxIsVmcsFieldReadOnly(uint32_t uFieldEnc)
90{
91 /* See Intel spec. B.4.2 "Natural-Width Read-Only Data Fields". */
92 return (RT_BF_GET(uFieldEnc, VMX_BF_VMCSFIELD_TYPE) == VMXVMCSFIELDTYPE_VMEXIT_INFO);
93}
94
95/**
96 * Returns whether the given VM-entry interruption-information type is valid or not.
97 *
98 * @returns @c true if it's a valid type, @c false otherwise.
99 * @param fSupportsMTF Whether the Monitor-Trap Flag CPU feature is supported.
100 * @param uType The VM-entry interruption-information type.
101 */
102DECLINLINE(bool) HMVmxIsEntryIntInfoTypeValid(bool fSupportsMTF, uint8_t uType)
103{
104 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
105 switch (uType)
106 {
107 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
108 case VMX_ENTRY_INT_INFO_TYPE_NMI:
109 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
110 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
111 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
112 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: return true;
113 case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT: return fSupportsMTF;
114 default:
115 return false;
116 }
117}
118
119/**
120 * Returns whether the given VM-entry interruption-information vector and type
121 * combination is valid or not.
122 *
123 * @returns @c true if it's a valid vector/type combination, @c false otherwise.
124 * @param uVector The VM-entry interruption-information vector.
125 * @param uType The VM-entry interruption-information type.
126 *
127 * @remarks Warning! This function does not validate the type field individually.
128 * Use it after verifying type is valid using HMVmxIsEntryIntInfoTypeValid.
129 */
130DECLINLINE(bool) HMVmxIsEntryIntInfoVectorValid(uint8_t uVector, uint8_t uType)
131{
132 /* See Intel spec. 26.2.1.3 "VM-Entry Control Fields". */
133 if ( uType == VMX_ENTRY_INT_INFO_TYPE_NMI
134 && uVector != X86_XCPT_NMI)
135 return false;
136 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
137 && uVector > X86_XCPT_LAST)
138 return false;
139 if ( uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
140 && uVector != VMX_ENTRY_INT_INFO_VECTOR_MTF)
141 return false;
142 return true;
143}
144
145
146/**
147 * Returns whether or not the VM-exit is trap-like or fault-like.
148 *
149 * @returns @c true if it's a trap-like VM-exit, @c false otherwise.
150 * @param uExitReason The VM-exit reason.
151 *
152 * @remarks Warning! This does not validate the VM-exit reason.
153 */
154DECLINLINE(bool) HMVmxIsVmexitTrapLike(uint32_t uExitReason)
155{
156 /*
157 * Trap-like VM-exits - The instruction causing the VM-exit completes before the
158 * VM-exit occurs.
159 *
160 * Fault-like VM-exits - The instruction causing the VM-exit is not completed before
161 * the VM-exit occurs.
162 *
163 * See Intel spec. 25.5.2 "Monitor Trap Flag".
164 * See Intel spec. 29.1.4 "EOI Virtualization".
165 * See Intel spec. 29.4.3.3 "APIC-Write VM Exits".
166 * See Intel spec. 29.1.2 "TPR Virtualization".
167 */
168 /** @todo NSTVMX: r=ramshankar: What about VM-exits due to debug traps (single-step,
169 * I/O breakpoints, data breakpoints), debug exceptions (data breakpoint)
170 * delayed by MovSS blocking, machine-check exceptions. */
171 switch (uExitReason)
172 {
173 case VMX_EXIT_MTF:
174 case VMX_EXIT_VIRTUALIZED_EOI:
175 case VMX_EXIT_APIC_WRITE:
176 case VMX_EXIT_TPR_BELOW_THRESHOLD:
177 return true;
178 }
179 return false;
180}
181
182
183/**
184 * Returns whether the VM-entry is vectoring or not given the VM-entry interruption
185 * information field.
186 *
187 * @returns @c true if the VM-entry is vectoring, @c false otherwise.
188 * @param uEntryIntInfo The VM-entry interruption information field.
189 * @param pEntryIntInfoType The VM-entry interruption information type field.
190 * Optional, can be NULL. Only updated when this
191 * function returns @c true.
192 */
193DECLINLINE(bool) HMVmxIsVmentryVectoring(uint32_t uEntryIntInfo, uint8_t *pEntryIntInfoType)
194{
195 /*
196 * The definition of what is a vectoring VM-entry is taken
197 * from Intel spec. 26.6 "Special Features of VM Entry".
198 */
199 if (!VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
200 return false;
201
202 /* Scope and keep variable defines on top to satisy archaic c89 nonsense. */
203 {
204 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
205 switch (uType)
206 {
207 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
208 case VMX_ENTRY_INT_INFO_TYPE_NMI:
209 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
210 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
211 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
212 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
213 {
214 if (pEntryIntInfoType)
215 *pEntryIntInfoType = uType;
216 return true;
217 }
218 }
219 }
220 return false;
221}
222/** @} */
223
224
225/** @defgroup grp_hm_vmx_asm VMX Assembly Helpers
226 * @{
227 */
228#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
229
230/**
231 * Restores some host-state fields that need not be done on every VM-exit.
232 *
233 * @returns VBox status code.
234 * @param fRestoreHostFlags Flags of which host registers needs to be
235 * restored.
236 * @param pRestoreHost Pointer to the host-restore structure.
237 */
238DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);
239
240
241/**
242 * Dispatches an NMI to the host.
243 */
244DECLASM(int) VMXDispatchHostNmi(void);
245
246
247/**
248 * Executes VMXON.
249 *
250 * @returns VBox status code.
251 * @param HCPhysVmxOn Physical address of VMXON structure.
252 */
253#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
254DECLASM(int) VMXEnable(RTHCPHYS HCPhysVmxOn);
255#else
256DECLINLINE(int) VMXEnable(RTHCPHYS HCPhysVmxOn)
257{
258# if RT_INLINE_ASM_GNU_STYLE
259 int rc = VINF_SUCCESS;
260 __asm__ __volatile__ (
261 "push %3 \n\t"
262 "push %2 \n\t"
263 ".byte 0xf3, 0x0f, 0xc7, 0x34, 0x24 # VMXON [esp] \n\t"
264 "ja 2f \n\t"
265 "je 1f \n\t"
266 "movl $" RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0 \n\t"
267 "jmp 2f \n\t"
268 "1: \n\t"
269 "movl $" RT_XSTR(VERR_VMX_VMXON_FAILED)", %0 \n\t"
270 "2: \n\t"
271 "add $8, %%esp \n\t"
272 :"=rm"(rc)
273 :"0"(VINF_SUCCESS),
274 "ir"((uint32_t)HCPhysVmxOn), /* don't allow direct memory reference here, */
275 "ir"((uint32_t)(HCPhysVmxOn >> 32)) /* this would not work with -fomit-frame-pointer */
276 :"memory"
277 );
278 return rc;
279
280# elif VMX_USE_MSC_INTRINSICS
281 unsigned char rcMsc = __vmx_on(&HCPhysVmxOn);
282 if (RT_LIKELY(rcMsc == 0))
283 return VINF_SUCCESS;
284 return rcMsc == 2 ? VERR_VMX_INVALID_VMXON_PTR : VERR_VMX_VMXON_FAILED;
285
286# else
287 int rc = VINF_SUCCESS;
288 __asm
289 {
290 push dword ptr [HCPhysVmxOn + 4]
291 push dword ptr [HCPhysVmxOn]
292 _emit 0xf3
293 _emit 0x0f
294 _emit 0xc7
295 _emit 0x34
296 _emit 0x24 /* VMXON [esp] */
297 jnc vmxon_good
298 mov dword ptr [rc], VERR_VMX_INVALID_VMXON_PTR
299 jmp the_end
300
301vmxon_good:
302 jnz the_end
303 mov dword ptr [rc], VERR_VMX_VMXON_FAILED
304the_end:
305 add esp, 8
306 }
307 return rc;
308# endif
309}
310#endif
311
312
313/**
314 * Executes VMXOFF.
315 */
316#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
317DECLASM(void) VMXDisable(void);
318#else
319DECLINLINE(void) VMXDisable(void)
320{
321# if RT_INLINE_ASM_GNU_STYLE
322 __asm__ __volatile__ (
323 ".byte 0x0f, 0x01, 0xc4 # VMXOFF \n\t"
324 );
325
326# elif VMX_USE_MSC_INTRINSICS
327 __vmx_off();
328
329# else
330 __asm
331 {
332 _emit 0x0f
333 _emit 0x01
334 _emit 0xc4 /* VMXOFF */
335 }
336# endif
337}
338#endif
339
340
341/**
342 * Executes VMCLEAR.
343 *
344 * @returns VBox status code.
345 * @param HCPhysVmcs Physical address of VM control structure.
346 */
347#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
348DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
349#else
350DECLINLINE(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs)
351{
352# if RT_INLINE_ASM_GNU_STYLE
353 int rc = VINF_SUCCESS;
354 __asm__ __volatile__ (
355 "push %3 \n\t"
356 "push %2 \n\t"
357 ".byte 0x66, 0x0f, 0xc7, 0x34, 0x24 # VMCLEAR [esp] \n\t"
358 "jnc 1f \n\t"
359 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
360 "1: \n\t"
361 "add $8, %%esp \n\t"
362 :"=rm"(rc)
363 :"0"(VINF_SUCCESS),
364 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
365 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this would not work with -fomit-frame-pointer */
366 :"memory"
367 );
368 return rc;
369
370# elif VMX_USE_MSC_INTRINSICS
371 unsigned char rcMsc = __vmx_vmclear(&HCPhysVmcs);
372 if (RT_LIKELY(rcMsc == 0))
373 return VINF_SUCCESS;
374 return VERR_VMX_INVALID_VMCS_PTR;
375
376# else
377 int rc = VINF_SUCCESS;
378 __asm
379 {
380 push dword ptr [HCPhysVmcs + 4]
381 push dword ptr [HCPhysVmcs]
382 _emit 0x66
383 _emit 0x0f
384 _emit 0xc7
385 _emit 0x34
386 _emit 0x24 /* VMCLEAR [esp] */
387 jnc success
388 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
389success:
390 add esp, 8
391 }
392 return rc;
393# endif
394}
395#endif
396
397
398/**
399 * Executes VMPTRLD.
400 *
401 * @returns VBox status code.
402 * @param HCPhysVmcs Physical address of VMCS structure.
403 */
404#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
405DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
406#else
407DECLINLINE(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs)
408{
409# if RT_INLINE_ASM_GNU_STYLE
410 int rc = VINF_SUCCESS;
411 __asm__ __volatile__ (
412 "push %3 \n\t"
413 "push %2 \n\t"
414 ".byte 0x0f, 0xc7, 0x34, 0x24 # VMPTRLD [esp] \n\t"
415 "jnc 1f \n\t"
416 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
417 "1: \n\t"
418 "add $8, %%esp \n\t"
419 :"=rm"(rc)
420 :"0"(VINF_SUCCESS),
421 "ir"((uint32_t)HCPhysVmcs), /* don't allow direct memory reference here, */
422 "ir"((uint32_t)(HCPhysVmcs >> 32)) /* this will not work with -fomit-frame-pointer */
423 );
424 return rc;
425
426# elif VMX_USE_MSC_INTRINSICS
427 unsigned char rcMsc = __vmx_vmptrld(&HCPhysVmcs);
428 if (RT_LIKELY(rcMsc == 0))
429 return VINF_SUCCESS;
430 return VERR_VMX_INVALID_VMCS_PTR;
431
432# else
433 int rc = VINF_SUCCESS;
434 __asm
435 {
436 push dword ptr [HCPhysVmcs + 4]
437 push dword ptr [HCPhysVmcs]
438 _emit 0x0f
439 _emit 0xc7
440 _emit 0x34
441 _emit 0x24 /* VMPTRLD [esp] */
442 jnc success
443 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
444
445success:
446 add esp, 8
447 }
448 return rc;
449# endif
450}
451#endif
452
453
454/**
455 * Executes VMPTRST.
456 *
457 * @returns VBox status code.
458 * @param pHCPhysVmcs Where to store the physical address of the current
459 * VMCS.
460 */
461DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pHCPhysVmcs);
462
463
464/**
465 * Executes VMWRITE for a 32-bit field.
466 *
467 * @returns VBox status code.
468 * @retval VINF_SUCCESS.
469 * @retval VERR_VMX_INVALID_VMCS_PTR.
470 * @retval VERR_VMX_INVALID_VMCS_FIELD.
471 *
472 * @param uFieldEnc VMCS field encoding.
473 * @param u32Val The 32-bit value to set.
474 *
475 * @remarks The values of the two status codes can be OR'ed together, the result
476 * will be VERR_VMX_INVALID_VMCS_PTR.
477 */
478#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
479DECLASM(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val);
480#else
481DECLINLINE(int) VMXWriteVmcs32(uint32_t uFieldEnc, uint32_t u32Val)
482{
483# if RT_INLINE_ASM_GNU_STYLE
484 int rc = VINF_SUCCESS;
485 __asm__ __volatile__ (
486 ".byte 0x0f, 0x79, 0xc2 # VMWRITE eax, edx \n\t"
487 "ja 2f \n\t"
488 "je 1f \n\t"
489 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
490 "jmp 2f \n\t"
491 "1: \n\t"
492 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
493 "2: \n\t"
494 :"=rm"(rc)
495 :"0"(VINF_SUCCESS),
496 "a"(uFieldEnc),
497 "d"(u32Val)
498 );
499 return rc;
500
501# elif VMX_USE_MSC_INTRINSICS
502 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u32Val);
503 if (RT_LIKELY(rcMsc == 0))
504 return VINF_SUCCESS;
505 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
506
507#else
508 int rc = VINF_SUCCESS;
509 __asm
510 {
511 push dword ptr [u32Val]
512 mov eax, [uFieldEnc]
513 _emit 0x0f
514 _emit 0x79
515 _emit 0x04
516 _emit 0x24 /* VMWRITE eax, [esp] */
517 jnc valid_vmcs
518 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
519 jmp the_end
520
521valid_vmcs:
522 jnz the_end
523 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
524the_end:
525 add esp, 4
526 }
527 return rc;
528# endif
529}
530#endif
531
532
533/**
534 * Executes VMWRITE for a 64-bit field.
535 *
536 * @returns VBox status code.
537 * @retval VINF_SUCCESS.
538 * @retval VERR_VMX_INVALID_VMCS_PTR.
539 * @retval VERR_VMX_INVALID_VMCS_FIELD.
540 *
541 * @param uFieldEnc The VMCS field encoding.
542 * @param u64Val The 16, 32 or 64-bit value to set.
543 *
544 * @remarks The values of the two status codes can be OR'ed together, the result
545 * will be VERR_VMX_INVALID_VMCS_PTR.
546 */
547#if (defined(RT_ARCH_AMD64) && VMX_USE_MSC_INTRINSICS)
548DECLINLINE(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val)
549{
550 unsigned char rcMsc = __vmx_vmwrite(uFieldEnc, u64Val);
551 if (RT_LIKELY(rcMsc == 0))
552 return VINF_SUCCESS;
553 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
554}
555#else
556DECLASM(int) VMXWriteVmcs64(uint32_t uFieldEnc, uint64_t u64Val);
557#endif
558
559
560/**
561 * Executes VMWRITE for a 16-bit VMCS field.
562 *
563 * @returns VBox status code.
564 * @retval VINF_SUCCESS.
565 * @retval VERR_VMX_INVALID_VMCS_PTR.
566 * @retval VERR_VMX_INVALID_VMCS_FIELD.
567 *
568 * @param uVmcsField The VMCS field.
569 * @param u16Val The 16-bit value to set.
570 *
571 * @remarks The values of the two status codes can be OR'ed together, the result
572 * will be VERR_VMX_INVALID_VMCS_PTR.
573 */
574DECLINLINE(int) VMXWriteVmcs16(uint32_t uVmcsField, uint16_t u16Val)
575{
576 AssertMsg(RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH) == VMX_VMCSFIELD_WIDTH_16BIT, ("%#RX32\n", uVmcsField));
577 return VMXWriteVmcs32(uVmcsField, u16Val);
578}
579
580
581#ifdef RT_ARCH_AMD64
582# define VMXWriteVmcsHstN VMXWriteVmcs64
583# define VMXWriteVmcsGstN VMXWriteVmcs64
584#else
585# define VMXWriteVmcsHstN VMXWriteVmcs32
586# define VMXWriteVmcsGstN VMXWriteVmcs32
587#endif
588
589
590/**
591 * Invalidate a page using INVEPT.
592 *
593 * @returns VBox status code.
594 * @param enmFlush Type of flush.
595 * @param pDescriptor Pointer to the descriptor.
596 */
597DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmFlush, uint64_t *pDescriptor);
598
599
600/**
601 * Invalidate a page using INVVPID.
602 *
603 * @returns VBox status code.
604 * @param enmFlush Type of flush.
605 * @param pDescriptor Pointer to the descriptor.
606 */
607DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmFlush, uint64_t *pDescriptor);
608
609
610/**
611 * Executes VMREAD for a 32-bit field.
612 *
613 * @returns VBox status code.
614 * @retval VINF_SUCCESS.
615 * @retval VERR_VMX_INVALID_VMCS_PTR.
616 * @retval VERR_VMX_INVALID_VMCS_FIELD.
617 *
618 * @param uFieldEnc The VMCS field encoding.
619 * @param pData Where to store VMCS field value.
620 *
621 * @remarks The values of the two status codes can be OR'ed together, the result
622 * will be VERR_VMX_INVALID_VMCS_PTR.
623 */
624#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS)
625DECLASM(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData);
626#else
627DECLINLINE(int) VMXReadVmcs32(uint32_t uFieldEnc, uint32_t *pData)
628{
629# if RT_INLINE_ASM_GNU_STYLE
630 int rc = VINF_SUCCESS;
631 __asm__ __volatile__ (
632 "movl $" RT_XSTR(VINF_SUCCESS)", %0 \n\t"
633 ".byte 0x0f, 0x78, 0xc2 # VMREAD eax, edx \n\t"
634 "ja 2f \n\t"
635 "je 1f \n\t"
636 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0 \n\t"
637 "jmp 2f \n\t"
638 "1: \n\t"
639 "movl $" RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0 \n\t"
640 "2: \n\t"
641 :"=&r"(rc),
642 "=d"(*pData)
643 :"a"(uFieldEnc),
644 "d"(0)
645 );
646 return rc;
647
648# elif VMX_USE_MSC_INTRINSICS
649 unsigned char rcMsc;
650# ifdef RT_ARCH_X86
651 rcMsc = __vmx_vmread(uFieldEnc, pData);
652# else
653 uint64_t u64Tmp;
654 rcMsc = __vmx_vmread(uFieldEnc, &u64Tmp);
655 *pData = (uint32_t)u64Tmp;
656# endif
657 if (RT_LIKELY(rcMsc == 0))
658 return VINF_SUCCESS;
659 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
660
661#else
662 int rc = VINF_SUCCESS;
663 __asm
664 {
665 sub esp, 4
666 mov dword ptr [esp], 0
667 mov eax, [uFieldEnc]
668 _emit 0x0f
669 _emit 0x78
670 _emit 0x04
671 _emit 0x24 /* VMREAD eax, [esp] */
672 mov edx, pData
673 pop dword ptr [edx]
674 jnc valid_vmcs
675 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
676 jmp the_end
677
678valid_vmcs:
679 jnz the_end
680 mov dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
681the_end:
682 }
683 return rc;
684# endif
685}
686#endif
687
688
689/**
690 * Executes VMREAD for a 64-bit field.
691 *
692 * @returns VBox status code.
693 * @retval VINF_SUCCESS.
694 * @retval VERR_VMX_INVALID_VMCS_PTR.
695 * @retval VERR_VMX_INVALID_VMCS_FIELD.
696 *
697 * @param uFieldEnc The VMCS field encoding.
698 * @param pData Where to store VMCS field value.
699 *
700 * @remarks The values of the two status codes can be OR'ed together, the result
701 * will be VERR_VMX_INVALID_VMCS_PTR.
702 */
703#if (!defined(RT_ARCH_X86) && !VMX_USE_MSC_INTRINSICS)
704DECLASM(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData);
705#else
706DECLINLINE(int) VMXReadVmcs64(uint32_t uFieldEnc, uint64_t *pData)
707{
708# if VMX_USE_MSC_INTRINSICS
709 unsigned char rcMsc;
710# ifdef RT_ARCH_X86
711 size_t uLow;
712 size_t uHigh;
713 rcMsc = __vmx_vmread(uFieldEnc, &uLow);
714 rcMsc |= __vmx_vmread(uFieldEnc + 1, &uHigh);
715 *pData = RT_MAKE_U64(uLow, uHigh);
716# else
717 rcMsc = __vmx_vmread(uFieldEnc, pData);
718# endif
719 if (RT_LIKELY(rcMsc == 0))
720 return VINF_SUCCESS;
721 return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
722
723# elif defined(RT_ARCH_X86)
724 int rc;
725 uint32_t val_hi, val;
726 rc = VMXReadVmcs32(uFieldEnc, &val);
727 rc |= VMXReadVmcs32(uFieldEnc + 1, &val_hi);
728 AssertRC(rc);
729 *pData = RT_MAKE_U64(val, val_hi);
730 return rc;
731
732# else
733# error "Shouldn't be here..."
734# endif
735}
736#endif
737
738
739/**
740 * Executes VMREAD for a 64-bit field.
741 *
742 * @returns VBox status code.
743 * @retval VINF_SUCCESS.
744 * @retval VERR_VMX_INVALID_VMCS_PTR.
745 * @retval VERR_VMX_INVALID_VMCS_FIELD.
746 *
747 * @param uVmcsField The VMCS field.
748 * @param pData Where to store VMCS field value.
749 *
750 * @remarks The values of the two status codes can be OR'ed together, the result
751 * will be VERR_VMX_INVALID_VMCS_PTR.
752 */
753DECLINLINE(int) VMXReadVmcs16(uint32_t uVmcsField, uint16_t *pData)
754{
755 uint32_t u32Tmp;
756 int rc;
757 AssertMsg(RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH) == VMX_VMCSFIELD_WIDTH_16BIT, ("%#RX32\n", uVmcsField));
758 rc = VMXReadVmcs32(uVmcsField, &u32Tmp);
759 *pData = (uint16_t)u32Tmp;
760 return rc;
761}
762
763
764#ifdef RT_ARCH_AMD64
765# define VMXReadVmcsHstN VMXReadVmcs64
766# define VMXReadVmcsGstN VMXReadVmcs64
767#else
768# define VMXReadVmcsHstN VMXReadVmcs32
769# define VMXReadVmcsGstN VMXReadVmcs32
770#endif
771
772#endif /* RT_ARCH_AMD64 || RT_ARCH_X86 */
773
774/** @} */
775
776#endif /* !VBOX_INCLUDED_vmm_hmvmxinline_h */
777
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette