VirtualBox

source: vbox/trunk/include/VBox/vmm/hm.h@ 74491

Last change on this file since 74491 was 74389, checked in by vboxsync, 6 years ago

VMM/CPUM, IEM, HM: Nested VMX: bugref:9180 VM-exit bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 12.0 KB
Line 
1/** @file
2 * HM - Intel/AMD VM Hardware Assisted Virtualization Manager (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2017 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_hm_h
27#define ___VBox_vmm_hm_h
28
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/cpum.h>
31#include <VBox/vmm/vmm.h>
32#include <VBox/vmm/hm_svm.h>
33#include <VBox/vmm/hm_vmx.h>
34#include <VBox/vmm/trpm.h>
35#include <iprt/mp.h>
36
37
38/** @defgroup grp_hm The Hardware Assisted Virtualization Manager API
39 * @ingroup grp_vmm
40 * @{
41 */
42
43RT_C_DECLS_BEGIN
44
45/**
46 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
47 *
48 * @retval true if used.
49 * @retval false if software virtualization (raw-mode) or NEM is used.
50 *
51 * @param a_pVM The cross context VM structure.
52 * @deprecated Please use VM_IS_RAW_MODE_ENABLED, VM_IS_HM_OR_NEM_ENABLED, or
53 * VM_IS_HM_ENABLED instead.
54 * @internal
55 */
56#if defined(VBOX_STRICT) && defined(IN_RING3)
57# define HMIsEnabled(a_pVM) HMIsEnabledNotMacro(a_pVM)
58#else
59# define HMIsEnabled(a_pVM) ((a_pVM)->fHMEnabled)
60#endif
61
62/**
63 * Checks whether raw-mode context is required for HM purposes
64 *
65 * @retval true if required by HM for doing switching the cpu to 64-bit mode.
66 * @retval false if not required by HM.
67 *
68 * @param a_pVM The cross context VM structure.
69 * @internal
70 */
71#if HC_ARCH_BITS == 64
72# define HMIsRawModeCtxNeeded(a_pVM) (false)
73#else
74# define HMIsRawModeCtxNeeded(a_pVM) ((a_pVM)->fHMNeedRawModeCtx)
75#endif
76
77/**
78 * Checks whether we're in the special hardware virtualization context.
79 * @returns true / false.
80 * @param a_pVCpu The caller's cross context virtual CPU structure.
81 * @thread EMT
82 */
83#ifdef IN_RING0
84# define HMIsInHwVirtCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_HM)
85#else
86# define HMIsInHwVirtCtx(a_pVCpu) (false)
87#endif
88
89/**
90 * Checks whether we're in the special hardware virtualization context and we
91 * cannot perform long jump without guru meditating and possibly messing up the
92 * host and/or guest state.
93 *
94 * This is after we've turned interrupts off and such.
95 *
96 * @returns true / false.
97 * @param a_pVCpu The caller's cross context virtual CPU structure.
98 * @thread EMT
99 */
100#ifdef IN_RING0
101# define HMIsInHwVirtNoLongJmpCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_EXEC)
102#else
103# define HMIsInHwVirtNoLongJmpCtx(a_pVCpu) (false)
104#endif
105
106/**
107 * 64-bit raw-mode (intermediate memory context) operations.
108 *
109 * These are special hypervisor eip values used when running 64-bit guests on
110 * 32-bit hosts. Each operation corresponds to a routine.
111 *
112 * @note Duplicated in the assembly code!
113 */
114typedef enum HM64ON32OP
115{
116 HM64ON32OP_INVALID = 0,
117 HM64ON32OP_VMXRCStartVM64,
118 HM64ON32OP_SVMRCVMRun64,
119 HM64ON32OP_HMRCSaveGuestFPU64,
120 HM64ON32OP_HMRCSaveGuestDebug64,
121 HM64ON32OP_HMRCTestSwitcher64,
122 HM64ON32OP_END,
123 HM64ON32OP_32BIT_HACK = 0x7fffffff
124} HM64ON32OP;
125
126/** @name All-context HM API.
127 * @{ */
128VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM);
129VMMDECL(bool) HMCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx);
130VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
131VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM);
132VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu);
133VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable);
134VMM_INT_DECL(bool) HMIsSvmActive(PVM pVM);
135VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM);
136VMM_INT_DECL(bool) HMIsVmxSupported(PVM pVM);
137VMM_INT_DECL(const char *) HMVmxGetDiagDesc(VMXVDIAG enmDiag);
138VMM_INT_DECL(const char *) HMVmxGetAbortDesc(VMXABORT enmAbort);
139VMM_INT_DECL(void) HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode);
140/** @} */
141
142/** @name All-context VMX helpers.
143 * These are VMX functions (based on VMX specs.) that may be used by IEM/REM and
144 * not VirtualBox functions that are used for hardware-assisted VMX. Those are
145 * declared below under the !IN_RC section.
146 * @{ */
147VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs);
148VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue);
149VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx);
150VMM_INT_DECL(int) HMVmxEntryIntInfoInjectTrpmEvent(PVMCPU pVCpu, uint32_t uEntryIntInfo, uint32_t uErrCode,
151 uint32_t cbInstr, RTGCUINTPTR GCPtrFaultAddress);
152/** @} */
153
154/** @name All-context SVM helpers.
155 *
156 * These are SVM functions (based on AMD specs.) that may be used by IEM/REM and
157 * not VirtualBox functions that are used for hardware-assisted SVM. Those are
158 * declared below under the !IN_RC section.
159 * @{ */
160VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pSvmEvent);
161VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
162VMM_INT_DECL(bool) HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
163 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
164 PSVMIOIOEXITINFO pIoExitInfo);
165/** @} */
166
167#ifndef IN_RC
168
169/** @name R0, R3 HM (VMX/SVM agnostic) handlers.
170 * @{ */
171VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu);
172VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM);
173VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt);
174VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys);
175VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM);
176VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM);
177VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM);
178VMM_INT_DECL(bool) HMIsMsrBitmapActive(PVM pVM);
179/** @} */
180
181/** @name R0, R3 SVM handlers.
182 * @{ */
183VMM_INT_DECL(bool) HMSvmIsVGifActive(PVM pVM);
184VMM_INT_DECL(uint64_t) HMSvmNstGstApplyTscOffset(PVMCPU pVCpu, uint64_t uTicks);
185# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
186VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx);
187# endif
188VMM_INT_DECL(int) HMSvmIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
189VMM_INT_DECL(int) HMHCSvmMaybeMovTprHypercall(PVMCPU pVCpu);
190/** @} */
191
192#else /* Nops in RC: */
193
194/** @name RC HM (VMX/SVM agnostic) handlers.
195 * @{ */
196# define HMFlushTLB(pVCpu) do { } while (0)
197# define HMFlushTLBOnAllVCpus(pVM) do { } while (0)
198# define HMInvalidatePageOnAllVCpus(pVM, GCVirt) do { } while (0)
199# define HMInvalidatePhysPage(pVM, GCVirt) do { } while (0)
200# define HMAreNestedPagingAndFullGuestExecEnabled(pVM) false
201# define HMIsLongModeAllowed(pVM) false
202# define HMIsNestedPagingActive(pVM) false
203# define HMIsMsrBitmapsActive(pVM) false
204/** @} */
205
206/** @name RC SVM handlers.
207 * @{ */
208# define HMSvmIsVGifActive(pVM) false
209# define HMSvmNstGstApplyTscOffset(pVCpu, uTicks) (uTicks)
210# define HMSvmNstGstVmExitNotify(pVCpu, pCtx) do { } while (0)
211# define HMSvmIsSubjectToErratum170(puFamily, puModel, puStepping) false
212# define HMHCSvmMaybeMovTprHypercall(pVCpu) do { } while (0)
213/** @} */
214
215#endif
216
217#ifdef IN_RING0
218/** @defgroup grp_hm_r0 The HM ring-0 Context API
219 * @{
220 */
221VMMR0_INT_DECL(int) HMR0Init(void);
222VMMR0_INT_DECL(int) HMR0Term(void);
223VMMR0_INT_DECL(int) HMR0InitVM(PVM pVM);
224VMMR0_INT_DECL(int) HMR0TermVM(PVM pVM);
225VMMR0_INT_DECL(int) HMR0EnableAllCpus(PVM pVM);
226# ifdef VBOX_WITH_RAW_MODE
227VMMR0_INT_DECL(int) HMR0EnterSwitcher(PVM pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled);
228VMMR0_INT_DECL(void) HMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled);
229# endif
230
231VMMR0_INT_DECL(int) HMR0SetupVM(PVM pVM);
232VMMR0_INT_DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu);
233VMMR0_INT_DECL(int) HMR0Enter(PVMCPU pVCpu);
234VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPU pVCpu);
235VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser);
236VMMR0_INT_DECL(void) HMR0NotifyCpumUnloadedGuestFpuState(PVMCPU VCpu);
237VMMR0_INT_DECL(void) HMR0NotifyCpumModifiedHostCr0(PVMCPU VCpu);
238VMMR0_INT_DECL(bool) HMR0SuspendPending(void);
239VMMR0_INT_DECL(int) HMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
240VMMR0_INT_DECL(int) HMR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat);
241
242# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
243VMMR0_INT_DECL(int) HMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
244VMMR0_INT_DECL(int) HMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
245VMMR0_INT_DECL(int) HMR0TestSwitcher3264(PVM pVM);
246# endif
247
248/** @} */
249#endif /* IN_RING0 */
250
251
252#ifdef IN_RING3
253/** @defgroup grp_hm_r3 The HM ring-3 Context API
254 * @{
255 */
256VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM);
257VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM);
258VMMR3DECL(bool) HMR3IsVirtApicRegsEnabled(PUVM pUVM);
259VMMR3DECL(bool) HMR3IsPostedIntrsEnabled(PUVM pUVM);
260VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM);
261VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM);
262VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM);
263VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM);
264
265VMMR3_INT_DECL(bool) HMR3IsEventPending(PVMCPU pVCpu);
266VMMR3_INT_DECL(int) HMR3Init(PVM pVM);
267VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
268VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM);
269VMMR3_INT_DECL(int) HMR3Term(PVM pVM);
270VMMR3_INT_DECL(void) HMR3Reset(PVM pVM);
271VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu);
272VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode);
273VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM);
274VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu);
275VMMR3_INT_DECL(bool) HMR3IsActive(PVMCPU pVCpu);
276VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
277VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
278VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu);
279VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx);
280VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM);
281VMMR3DECL(const char *) HMR3GetVmxExitName(uint32_t uExit);
282VMMR3DECL(const char *) HMR3GetSvmExitName(uint32_t uExit);
283/** @} */
284#endif /* IN_RING3 */
285
286/** @} */
287RT_C_DECLS_END
288
289
290#endif
291
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette