VirtualBox

source: vbox/trunk/include/VBox/vmm/hm.h@ 78454

Last change on this file since 78454 was 78254, checked in by vboxsync, 6 years ago

VMM/HM: const bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 12.1 KB
Line 
1/** @file
2 * HM - Intel/AMD VM Hardware Assisted Virtualization Manager (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2019 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_hm_h
27#define VBOX_INCLUDED_vmm_hm_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <VBox/vmm/pgm.h>
33#include <VBox/vmm/cpum.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm_svm.h>
36#include <VBox/vmm/hm_vmx.h>
37#include <VBox/vmm/trpm.h>
38#include <iprt/mp.h>
39
40
41/** @defgroup grp_hm The Hardware Assisted Virtualization Manager API
42 * @ingroup grp_vmm
43 * @{
44 */
45
46RT_C_DECLS_BEGIN
47
48/**
49 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
50 *
51 * @retval true if used.
52 * @retval false if software virtualization (raw-mode) or NEM is used.
53 *
54 * @param a_pVM The cross context VM structure.
55 * @deprecated Please use VM_IS_RAW_MODE_ENABLED, VM_IS_HM_OR_NEM_ENABLED, or
56 * VM_IS_HM_ENABLED instead.
57 * @internal
58 */
59#if defined(VBOX_STRICT) && defined(IN_RING3)
60# define HMIsEnabled(a_pVM) HMIsEnabledNotMacro(a_pVM)
61#else
62# define HMIsEnabled(a_pVM) ((a_pVM)->fHMEnabled)
63#endif
64
65/**
66 * Checks whether raw-mode context is required for HM purposes
67 *
68 * @retval true if required by HM for doing switching the cpu to 64-bit mode.
69 * @retval false if not required by HM.
70 *
71 * @param a_pVM The cross context VM structure.
72 * @internal
73 */
74#if HC_ARCH_BITS == 64
75# define HMIsRawModeCtxNeeded(a_pVM) (false)
76#else
77# define HMIsRawModeCtxNeeded(a_pVM) ((a_pVM)->fHMNeedRawModeCtx)
78#endif
79
80/**
81 * Checks whether we're in the special hardware virtualization context.
82 * @returns true / false.
83 * @param a_pVCpu The caller's cross context virtual CPU structure.
84 * @thread EMT
85 */
86#ifdef IN_RING0
87# define HMIsInHwVirtCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_HM)
88#else
89# define HMIsInHwVirtCtx(a_pVCpu) (false)
90#endif
91
92/**
93 * Checks whether we're in the special hardware virtualization context and we
94 * cannot perform long jump without guru meditating and possibly messing up the
95 * host and/or guest state.
96 *
97 * This is after we've turned interrupts off and such.
98 *
99 * @returns true / false.
100 * @param a_pVCpu The caller's cross context virtual CPU structure.
101 * @thread EMT
102 */
103#ifdef IN_RING0
104# define HMIsInHwVirtNoLongJmpCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_EXEC)
105#else
106# define HMIsInHwVirtNoLongJmpCtx(a_pVCpu) (false)
107#endif
108
109/**
110 * 64-bit raw-mode (intermediate memory context) operations.
111 *
112 * These are special hypervisor eip values used when running 64-bit guests on
113 * 32-bit hosts. Each operation corresponds to a routine.
114 *
115 * @note Duplicated in the assembly code!
116 */
117typedef enum HM64ON32OP
118{
119 HM64ON32OP_INVALID = 0,
120 HM64ON32OP_VMXRCStartVM64,
121 HM64ON32OP_SVMRCVMRun64,
122 HM64ON32OP_HMRCSaveGuestFPU64,
123 HM64ON32OP_HMRCSaveGuestDebug64,
124 HM64ON32OP_HMRCTestSwitcher64,
125 HM64ON32OP_END,
126 HM64ON32OP_32BIT_HACK = 0x7fffffff
127} HM64ON32OP;
128
129/** @name All-context HM API.
130 * @{ */
131VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM);
132VMMDECL(bool) HMCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx);
133VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
134VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM);
135VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu);
136VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable);
137VMM_INT_DECL(bool) HMIsSvmActive(PVM pVM);
138VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM);
139VMM_INT_DECL(const char *) HMGetVmxDiagDesc(VMXVDIAG enmDiag);
140VMM_INT_DECL(const char *) HMGetVmxAbortDesc(VMXABORT enmAbort);
141VMM_INT_DECL(const char *) HMGetVmxVmcsStateDesc(uint8_t fVmcsState);
142VMM_INT_DECL(const char *) HMGetVmxIdtVectoringInfoTypeDesc(uint8_t uType);
143VMM_INT_DECL(const char *) HMGetVmxExitIntInfoTypeDesc(uint8_t uType);
144VMM_INT_DECL(const char *) HMGetVmxEntryIntInfoTypeDesc(uint8_t uType);
145VMM_INT_DECL(const char *) HMGetVmxExitName(uint32_t uExit);
146VMM_INT_DECL(const char *) HMGetSvmExitName(uint32_t uExit);
147VMM_INT_DECL(void) HMDumpHwvirtVmxState(PVMCPU pVCpu);
148VMM_INT_DECL(void) HMHCChangedPagingMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode);
149VMM_INT_DECL(void) HMGetVmxMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pMsrs, PVMXMSRS pVmxMsrs);
150VMM_INT_DECL(void) HMGetSvmMsrsFromHwvirtMsrs(PCSUPHWVIRTMSRS pMsrs, PSVMMSRS pSvmMsrs);
151/** @} */
152
153/** @name All-context VMX helpers.
154 *
155 * These are hardware-assisted VMX functions (used by IEM/REM/CPUM and HM). Helpers
156 * based purely on the Intel VT-x specification (used by IEM/REM and HM) can be
157 * found in CPUM.
158 * @{ */
159VMM_INT_DECL(bool) HMCanExecuteVmxGuest(PVMCPU pVCpu, PCCPUMCTX pCtx);
160VMM_INT_DECL(TRPMEVENT) HMVmxEventToTrpmEventType(uint32_t uIntInfo);
161/** @} */
162
163/** @name All-context SVM helpers.
164 *
165 * These are hardware-assisted SVM functions (used by IEM/REM/CPUM and HM). Helpers
166 * based purely on the AMD SVM specification (used by IEM/REM and HM) can be found
167 * in CPUM.
168 * @{ */
169VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pSvmEvent, uint8_t uVector);
170/** @} */
171
172#ifndef IN_RC
173
174/** @name R0, R3 HM (VMX/SVM agnostic) handlers.
175 * @{ */
176VMM_INT_DECL(int) HMFlushTlb(PVMCPU pVCpu);
177VMM_INT_DECL(int) HMFlushTlbOnAllVCpus(PVM pVM);
178VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt);
179VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys);
180VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM);
181VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM);
182VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM);
183VMM_INT_DECL(bool) HMIsMsrBitmapActive(PVM pVM);
184# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
185VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx);
186# endif
187/** @} */
188
189/** @name R0, R3 SVM handlers.
190 * @{ */
191VMM_INT_DECL(bool) HMIsSvmVGifActive(PVM pVM);
192VMM_INT_DECL(uint64_t) HMApplySvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks);
193VMM_INT_DECL(uint64_t) HMRemoveSvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks);
194# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
195VMM_INT_DECL(void) HMNotifySvmNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx);
196# endif
197VMM_INT_DECL(int) HMIsSubjectToSvmErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
198VMM_INT_DECL(int) HMHCMaybeMovTprSvmHypercall(PVMCPU pVCpu);
199/** @} */
200
201#else /* Nops in RC: */
202
203/** @name RC HM (VMX/SVM agnostic) handlers.
204 * @{ */
205# define HMFlushTlb(pVCpu) do { } while (0)
206# define HMFlushTlbOnAllVCpus(pVM) do { } while (0)
207# define HMInvalidatePageOnAllVCpus(pVM, GCVirt) do { } while (0)
208# define HMInvalidatePhysPage(pVM, GCVirt) do { } while (0)
209# define HMAreNestedPagingAndFullGuestExecEnabled(pVM) false
210# define HMIsLongModeAllowed(pVM) false
211# define HMIsNestedPagingActive(pVM) false
212# define HMIsMsrBitmapsActive(pVM) false
213/** @} */
214
215/** @name RC SVM handlers.
216 * @{ */
217# define HMIsSvmVGifActive(pVM) false
218# define HMApplySvmNstGstTscOffset(pVCpu, uTicks) (uTicks)
219# define HMRemoveSvmNstGstTscOffset(pVCpu, uTicks) (uTicks)
220# define HMNotifySvmNstGstVmexit(pVCpu, pCtx) do { } while (0)
221# define HMIsSubjectToSvmErratum170(puFamily, puModel, puStepping) false
222# define HMHCMaybeMovTprSvmHypercall(pVCpu) do { } while (0)
223/** @} */
224
225#endif
226
227#ifdef IN_RING0
228/** @defgroup grp_hm_r0 The HM ring-0 Context API
229 * @{
230 */
231VMMR0_INT_DECL(int) HMR0Init(void);
232VMMR0_INT_DECL(int) HMR0Term(void);
233VMMR0_INT_DECL(int) HMR0InitVM(PVM pVM);
234VMMR0_INT_DECL(int) HMR0TermVM(PVM pVM);
235VMMR0_INT_DECL(int) HMR0EnableAllCpus(PVM pVM);
236# ifdef VBOX_WITH_RAW_MODE
237VMMR0_INT_DECL(int) HMR0EnterSwitcher(PVM pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled);
238VMMR0_INT_DECL(void) HMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled);
239# endif
240
241VMMR0_INT_DECL(int) HMR0SetupVM(PVM pVM);
242VMMR0_INT_DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu);
243VMMR0_INT_DECL(int) HMR0Enter(PVMCPU pVCpu);
244VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPU pVCpu);
245VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser);
246VMMR0_INT_DECL(void) HMR0NotifyCpumUnloadedGuestFpuState(PVMCPU VCpu);
247VMMR0_INT_DECL(void) HMR0NotifyCpumModifiedHostCr0(PVMCPU VCpu);
248VMMR0_INT_DECL(bool) HMR0SuspendPending(void);
249VMMR0_INT_DECL(int) HMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
250VMMR0_INT_DECL(int) HMR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat);
251
252# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
253VMMR0_INT_DECL(int) HMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
254VMMR0_INT_DECL(int) HMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
255VMMR0_INT_DECL(int) HMR0TestSwitcher3264(PVM pVM);
256# endif
257
258/** @} */
259#endif /* IN_RING0 */
260
261
262#ifdef IN_RING3
263/** @defgroup grp_hm_r3 The HM ring-3 Context API
264 * @{
265 */
266VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM);
267VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM);
268VMMR3DECL(bool) HMR3IsVirtApicRegsEnabled(PUVM pUVM);
269VMMR3DECL(bool) HMR3IsPostedIntrsEnabled(PUVM pUVM);
270VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM);
271VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM);
272VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM);
273VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM);
274
275VMMR3_INT_DECL(int) HMR3Init(PVM pVM);
276VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
277VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM);
278VMMR3_INT_DECL(int) HMR3Term(PVM pVM);
279VMMR3_INT_DECL(void) HMR3Reset(PVM pVM);
280VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu);
281VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode);
282VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM);
283VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu);
284VMMR3_INT_DECL(bool) HMR3IsActive(PCVMCPU pVCpu);
285VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
286VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
287VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu);
288VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCCPUMCTX pCtx);
289VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM);
290/** @} */
291#endif /* IN_RING3 */
292
293/** @} */
294RT_C_DECLS_END
295
296
297#endif /* !VBOX_INCLUDED_vmm_hm_h */
298
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette