VirtualBox

source: vbox/trunk/include/VBox/vmm/hm.h@ 73389

Last change on this file since 73389 was 73389, checked in by vboxsync, 6 years ago

VMM, SUPDrv: Nested VMX: bugref:9180 Implement some of the VMX MSRs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 10.8 KB
Line 
1/** @file
2 * HM - Intel/AMD VM Hardware Assisted Virtualization Manager (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2017 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_hm_h
27#define ___VBox_vmm_hm_h
28
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/cpum.h>
31#include <VBox/vmm/vmm.h>
32#include <VBox/vmm/hm_svm.h>
33#include <VBox/vmm/hm_vmx.h>
34#include <VBox/vmm/trpm.h>
35#include <iprt/mp.h>
36
37
38/** @defgroup grp_hm The Hardware Assisted Virtualization Manager API
39 * @ingroup grp_vmm
40 * @{
41 */
42
43RT_C_DECLS_BEGIN
44
45/**
46 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
47 *
48 * @retval true if used.
49 * @retval false if software virtualization (raw-mode) or NEM is used.
50 *
51 * @param a_pVM The cross context VM structure.
52 * @deprecated Please use VM_IS_RAW_MODE_ENABLED, VM_IS_HM_OR_NEM_ENABLED, or
53 * VM_IS_HM_ENABLED instead.
54 * @internal
55 */
56#if defined(VBOX_STRICT) && defined(IN_RING3)
57# define HMIsEnabled(a_pVM) HMIsEnabledNotMacro(a_pVM)
58#else
59# define HMIsEnabled(a_pVM) ((a_pVM)->fHMEnabled)
60#endif
61
62/**
63 * Checks whether raw-mode context is required for HM purposes
64 *
65 * @retval true if required by HM for doing switching the cpu to 64-bit mode.
66 * @retval false if not required by HM.
67 *
68 * @param a_pVM The cross context VM structure.
69 * @internal
70 */
71#if HC_ARCH_BITS == 64
72# define HMIsRawModeCtxNeeded(a_pVM) (false)
73#else
74# define HMIsRawModeCtxNeeded(a_pVM) ((a_pVM)->fHMNeedRawModeCtx)
75#endif
76
77/**
78 * Checks whether we're in the special hardware virtualization context.
79 * @returns true / false.
80 * @param a_pVCpu The caller's cross context virtual CPU structure.
81 * @thread EMT
82 */
83#ifdef IN_RING0
84# define HMIsInHwVirtCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_HM)
85#else
86# define HMIsInHwVirtCtx(a_pVCpu) (false)
87#endif
88
89/**
90 * Checks whether we're in the special hardware virtualization context and we
91 * cannot perform long jump without guru meditating and possibly messing up the
92 * host and/or guest state.
93 *
94 * This is after we've turned interrupts off and such.
95 *
96 * @returns true / false.
97 * @param a_pVCpu The caller's cross context virtual CPU structure.
98 * @thread EMT
99 */
100#ifdef IN_RING0
101# define HMIsInHwVirtNoLongJmpCtx(a_pVCpu) (VMCPU_GET_STATE(a_pVCpu) == VMCPUSTATE_STARTED_EXEC)
102#else
103# define HMIsInHwVirtNoLongJmpCtx(a_pVCpu) (false)
104#endif
105
106/**
107 * 64-bit raw-mode (intermediate memory context) operations.
108 *
109 * These are special hypervisor eip values used when running 64-bit guests on
110 * 32-bit hosts. Each operation corresponds to a routine.
111 *
112 * @note Duplicated in the assembly code!
113 */
114typedef enum HM64ON32OP
115{
116 HM64ON32OP_INVALID = 0,
117 HM64ON32OP_VMXRCStartVM64,
118 HM64ON32OP_SVMRCVMRun64,
119 HM64ON32OP_HMRCSaveGuestFPU64,
120 HM64ON32OP_HMRCSaveGuestDebug64,
121 HM64ON32OP_HMRCTestSwitcher64,
122 HM64ON32OP_END,
123 HM64ON32OP_32BIT_HACK = 0x7fffffff
124} HM64ON32OP;
125
126/** @name All-context HM API.
127 * @{ */
128VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM);
129VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
130VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM);
131VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu);
132VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping);
133VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable);
134VMM_INT_DECL(bool) HMIsSvmActive(PVM pVM);
135VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM);
136VMM_INT_DECL(bool) HMIsVmxSupported(PVM pVM);
137VMM_INT_DECL(void) HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode);
138VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs);
139#if 0
140VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue);
141#endif
142/** @} */
143
144/** @name All-context SVM helpers.
145 *
146 * These are SVM functions (based on AMD specs.) that may be used by IEM/REM and
147 * not VirtualBox functions that are used for hardware-assisted SVM. Those are
148 * declared below under the !IN_RC section.
149 * @{ */
150VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pSvmEvent);
151VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
152VMM_INT_DECL(bool) HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
153 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
154 PSVMIOIOEXITINFO pIoExitInfo);
155VMM_INT_DECL(int) HMHCSvmMaybeMovTprHypercall(PVMCPU pVCpu);
156/** @} */
157
158#ifndef IN_RC
159VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu);
160VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM);
161VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt);
162VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys);
163VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM);
164VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM);
165VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM);
166VMM_INT_DECL(bool) HMIsMsrBitmapActive(PVM pVM);
167VMM_INT_DECL(bool) HMSvmIsVGifActive(PVM pVM);
168VMM_INT_DECL(uint64_t) HMSvmNstGstApplyTscOffset(PVMCPU pVCpu, uint64_t uTicks);
169# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
170VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx);
171# endif
172#else /* Nops in RC: */
173# define HMFlushTLB(pVCpu) do { } while (0)
174# define HMFlushTLBOnAllVCpus(pVM) do { } while (0)
175# define HMInvalidatePageOnAllVCpus(pVM, GCVirt) do { } while (0)
176# define HMInvalidatePhysPage(pVM, GCVirt) do { } while (0)
177# define HMAreNestedPagingAndFullGuestExecEnabled(pVM) false
178# define HMIsLongModeAllowed(pVM) false
179# define HMIsNestedPagingActive(pVM) false
180# define HMIsMsrBitmapsActive(pVM) false
181# define HMSvmIsVGifActive(pVM) false
182# define HMSvmNstGstApplyTscOffset(pVCpu, uTicks) (uTicks)
183# define HMSvmNstGstVmExitNotify(pVCpu, pCtx) do { } while (0)
184#endif
185
186#ifdef IN_RING0
187/** @defgroup grp_hm_r0 The HM ring-0 Context API
188 * @{
189 */
190VMMR0_INT_DECL(int) HMR0Init(void);
191VMMR0_INT_DECL(int) HMR0Term(void);
192VMMR0_INT_DECL(int) HMR0InitVM(PVM pVM);
193VMMR0_INT_DECL(int) HMR0TermVM(PVM pVM);
194VMMR0_INT_DECL(int) HMR0EnableAllCpus(PVM pVM);
195# ifdef VBOX_WITH_RAW_MODE
196VMMR0_INT_DECL(int) HMR0EnterSwitcher(PVM pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled);
197VMMR0_INT_DECL(void) HMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled);
198# endif
199
200VMMR0_INT_DECL(int) HMR0SetupVM(PVM pVM);
201VMMR0_INT_DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu);
202VMMR0_INT_DECL(int) HMR0Enter(PVMCPU pVCpu);
203VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPU pVCpu);
204VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser);
205VMMR0_INT_DECL(void) HMR0NotifyCpumUnloadedGuestFpuState(PVMCPU VCpu);
206VMMR0_INT_DECL(void) HMR0NotifyCpumModifiedHostCr0(PVMCPU VCpu);
207VMMR0_INT_DECL(bool) HMR0SuspendPending(void);
208VMMR0_INT_DECL(int) HMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
209VMMR0_INT_DECL(int) HMR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat);
210
211# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
212VMMR0_INT_DECL(int) HMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
213VMMR0_INT_DECL(int) HMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
214VMMR0_INT_DECL(int) HMR0TestSwitcher3264(PVM pVM);
215# endif
216
217/** @} */
218#endif /* IN_RING0 */
219
220
221#ifdef IN_RING3
222/** @defgroup grp_hm_r3 The HM ring-3 Context API
223 * @{
224 */
225VMMR3DECL(bool) HMR3IsEnabled(PUVM pUVM);
226VMMR3DECL(bool) HMR3IsNestedPagingActive(PUVM pUVM);
227VMMR3DECL(bool) HMR3IsVirtApicRegsEnabled(PUVM pUVM);
228VMMR3DECL(bool) HMR3IsPostedIntrsEnabled(PUVM pUVM);
229VMMR3DECL(bool) HMR3IsVpidActive(PUVM pUVM);
230VMMR3DECL(bool) HMR3IsUXActive(PUVM pUVM);
231VMMR3DECL(bool) HMR3IsSvmEnabled(PUVM pUVM);
232VMMR3DECL(bool) HMR3IsVmxEnabled(PUVM pUVM);
233
234VMMR3_INT_DECL(bool) HMR3IsEventPending(PVMCPU pVCpu);
235VMMR3_INT_DECL(int) HMR3Init(PVM pVM);
236VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
237VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM);
238VMMR3_INT_DECL(int) HMR3Term(PVM pVM);
239VMMR3_INT_DECL(void) HMR3Reset(PVM pVM);
240VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu);
241VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode);
242VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx);
243VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM);
244VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu);
245VMMR3_INT_DECL(bool) HMR3IsActive(PVMCPU pVCpu);
246VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
247VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
248VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu);
249VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx);
250VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM);
251VMMR3DECL(const char *) HMR3GetVmxExitName(uint32_t uExit);
252VMMR3DECL(const char *) HMR3GetSvmExitName(uint32_t uExit);
253/** @} */
254#endif /* IN_RING3 */
255
256/** @} */
257RT_C_DECLS_END
258
259
260#endif
261
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette