VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWVMXR0.h@ 43387

Last change on this file since 43387 was 43387, checked in by vboxsync, 12 years ago

VMM: HM cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 16.9 KB
Line 
1/* $Id: HWVMXR0.h 43387 2012-09-21 09:40:25Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HWVMXR0_h
19#define ___HWVMXR0_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/hm_vmx.h>
29
30RT_C_DECLS_BEGIN
31
32/** @defgroup grp_vmx_int Internal
33 * @ingroup grp_vmx
34 * @internal
35 * @{
36 */
37
38/* Read cache indices. */
39#define VMX_VMCS64_GUEST_RIP_CACHE_IDX 0
40#define VMX_VMCS64_GUEST_RSP_CACHE_IDX 1
41#define VMX_VMCS_GUEST_RFLAGS_CACHE_IDX 2
42#define VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE_CACHE_IDX 3
43#define VMX_VMCS_CTRL_CR0_READ_SHADOW_CACHE_IDX 4
44#define VMX_VMCS64_GUEST_CR0_CACHE_IDX 5
45#define VMX_VMCS_CTRL_CR4_READ_SHADOW_CACHE_IDX 6
46#define VMX_VMCS64_GUEST_CR4_CACHE_IDX 7
47#define VMX_VMCS64_GUEST_DR7_CACHE_IDX 8
48#define VMX_VMCS32_GUEST_SYSENTER_CS_CACHE_IDX 9
49#define VMX_VMCS64_GUEST_SYSENTER_EIP_CACHE_IDX 10
50#define VMX_VMCS64_GUEST_SYSENTER_ESP_CACHE_IDX 11
51#define VMX_VMCS32_GUEST_GDTR_LIMIT_CACHE_IDX 12
52#define VMX_VMCS64_GUEST_GDTR_BASE_CACHE_IDX 13
53#define VMX_VMCS32_GUEST_IDTR_LIMIT_CACHE_IDX 14
54#define VMX_VMCS64_GUEST_IDTR_BASE_CACHE_IDX 15
55#define VMX_VMCS16_GUEST_FIELD_CS_CACHE_IDX 16
56#define VMX_VMCS32_GUEST_CS_LIMIT_CACHE_IDX 17
57#define VMX_VMCS64_GUEST_CS_BASE_CACHE_IDX 18
58#define VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS_CACHE_IDX 19
59#define VMX_VMCS16_GUEST_FIELD_DS_CACHE_IDX 20
60#define VMX_VMCS32_GUEST_DS_LIMIT_CACHE_IDX 21
61#define VMX_VMCS64_GUEST_DS_BASE_CACHE_IDX 22
62#define VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS_CACHE_IDX 23
63#define VMX_VMCS16_GUEST_FIELD_ES_CACHE_IDX 24
64#define VMX_VMCS32_GUEST_ES_LIMIT_CACHE_IDX 25
65#define VMX_VMCS64_GUEST_ES_BASE_CACHE_IDX 26
66#define VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS_CACHE_IDX 27
67#define VMX_VMCS16_GUEST_FIELD_FS_CACHE_IDX 28
68#define VMX_VMCS32_GUEST_FS_LIMIT_CACHE_IDX 29
69#define VMX_VMCS64_GUEST_FS_BASE_CACHE_IDX 30
70#define VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS_CACHE_IDX 31
71#define VMX_VMCS16_GUEST_FIELD_GS_CACHE_IDX 32
72#define VMX_VMCS32_GUEST_GS_LIMIT_CACHE_IDX 33
73#define VMX_VMCS64_GUEST_GS_BASE_CACHE_IDX 34
74#define VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS_CACHE_IDX 35
75#define VMX_VMCS16_GUEST_FIELD_SS_CACHE_IDX 36
76#define VMX_VMCS32_GUEST_SS_LIMIT_CACHE_IDX 37
77#define VMX_VMCS64_GUEST_SS_BASE_CACHE_IDX 38
78#define VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS_CACHE_IDX 39
79#define VMX_VMCS16_GUEST_FIELD_TR_CACHE_IDX 40
80#define VMX_VMCS32_GUEST_TR_LIMIT_CACHE_IDX 41
81#define VMX_VMCS64_GUEST_TR_BASE_CACHE_IDX 42
82#define VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS_CACHE_IDX 43
83#define VMX_VMCS16_GUEST_FIELD_LDTR_CACHE_IDX 44
84#define VMX_VMCS32_GUEST_LDTR_LIMIT_CACHE_IDX 45
85#define VMX_VMCS64_GUEST_LDTR_BASE_CACHE_IDX 46
86#define VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS_CACHE_IDX 47
87#define VMX_VMCS32_RO_EXIT_REASON_CACHE_IDX 48
88#define VMX_VMCS32_RO_VM_INSTR_ERROR_CACHE_IDX 49
89#define VMX_VMCS32_RO_EXIT_INSTR_LENGTH_CACHE_IDX 50
90#define VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE_CACHE_IDX 51
91#define VMX_VMCS32_RO_EXIT_INSTR_INFO_CACHE_IDX 52
92#define VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO_CACHE_IDX 53
93#define VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX 54
94#define VMX_VMCS32_RO_IDT_INFO_CACHE_IDX 55
95#define VMX_VMCS32_RO_IDT_ERRCODE_CACHE_IDX 56
96#define VMX_VMCS_MAX_CACHE_IDX (VMX_VMCS32_RO_IDT_ERRCODE_CACHE_IDX+1)
97#define VMX_VMCS64_GUEST_CR3_CACHE_IDX 57
98#define VMX_VMCS_EXIT_PHYS_ADDR_FULL_CACHE_IDX 58
99#define VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX (VMX_VMCS_EXIT_PHYS_ADDR_FULL_CACHE_IDX+1)
100
101
102#ifdef IN_RING0
103
104/**
105 * Enters the VT-x session.
106 *
107 * @returns VBox status code.
108 * @param pVM Pointer to the VM.
109 * @param pVCpu Pointer to the VM CPU.
110 * @param pCpu Pointer to the CPU info struct.
111 */
112VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu);
113
114/**
115 * Leaves the VT-x session.
116 *
117 * @returns VBox status code.
118 * @param pVM Pointer to the VM.
119 * @param pVCpu Pointer to the VMCPU.
120 * @param pCtx Pointer to the guest CPU context.
121 */
122VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
123
124VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem);
125
126/**
127 * Deactivates VT-x on the current CPU.
128 *
129 * @returns VBox status code.
130 * @param pCpu Pointer to the CPU info struct.
131 * @param pvPageCpu Pointer to the global CPU page.
132 * @param pPageCpuPhys Physical address of the global CPU page.
133 */
134VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
135
136/**
137 * Does Ring-0 per VM VT-x initialization.
138 *
139 * @returns VBox status code.
140 * @param pVM Pointer to the VM.
141 */
142VMMR0DECL(int) VMXR0InitVM(PVM pVM);
143
144/**
145 * Does Ring-0 per VM VT-x termination.
146 *
147 * @returns VBox status code.
148 * @param pVM Pointer to the VM.
149 */
150VMMR0DECL(int) VMXR0TermVM(PVM pVM);
151
152/**
153 * Sets up VT-x for the specified VM.
154 *
155 * @returns VBox status code.
156 * @param pVM Pointer to the VM.
157 */
158VMMR0DECL(int) VMXR0SetupVM(PVM pVM);
159
160
161/**
162 * Save the host state.
163 *
164 * @returns VBox status code.
165 * @param pVM Pointer to the VM.
166 * @param pVCpu Pointer to the VMCPU.
167 */
168VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu);
169
170/**
171 * Loads the guest state.
172 *
173 * @returns VBox status code.
174 * @param pVM Pointer to the VM.
175 * @param pVCpu Pointer to the VMCPU.
176 * @param pCtx Pointer to the guest CPU context.
177 */
178VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
179
180
181/**
182 * Runs guest code in a VT-x VM.
183 *
184 * @returns VBox status code.
185 * @param pVM Pointer to the VM.
186 * @param pVCpu Pointer to the VMCPU.
187 * @param pCtx Pointer to the guest CPU context.
188 */
189VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
190
191
192# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
193/**
194 * Executes the specified handler in 64-bit mode.
195 *
196 * @returns VBox status code.
197 * @param pVM Pointer to the VM.
198 * @param pVCpu Pointer to the VMCPU.
199 * @param pCtx Pointer to the guest CPU context.
200 * @param pfnHandler Pointer to the RC handler function.
201 * @param cbParam Number of parameters.
202 * @param paParam Array of 32-bit parameters.
203 */
204VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
205 uint32_t *paParam);
206# endif
207
208# define VMX_WRITE_SELREG(REG, reg) \
209 do \
210 { \
211 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_##REG, pCtx->reg.Sel); \
212 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, pCtx->reg.u32Limit); \
213 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_##REG##_BASE, pCtx->reg.u64Base); \
214 if ((pCtx->eflags.u32 & X86_EFL_VM)) \
215 { \
216 /* Must override this or else VT-x will fail with invalid guest state errors. */ \
217 /* DPL=3, present, code/data, r/w/accessed. */ \
218 val = (pCtx->reg.Attr.u & ~0xFF) | 0xF3; \
219 } \
220 else \
221 if ( CPUMIsGuestInRealModeEx(pCtx) \
222 && !pVM->hm.s.vmx.fUnrestrictedGuest) \
223 { \
224 /* Must override this or else VT-x will fail with invalid guest state errors. */ \
225 /* DPL=3, present, code/data, r/w/accessed. */ \
226 val = 0xf3; \
227 } \
228 else \
229 if ( ( pCtx->reg.Sel \
230 || !CPUMIsGuestInPagedProtectedModeEx(pCtx) \
231 || (!pCtx->cs.Attr.n.u1DefBig && !CPUMIsGuestIn64BitCodeEx(pCtx)) \
232 ) \
233 && pCtx->reg.Attr.n.u1Present == 1) \
234 { \
235 val = pCtx->reg.Attr.u | X86_SEL_TYPE_ACCESSED; \
236 } \
237 else \
238 val = 0x10000; /* Invalid guest state error otherwise. (BIT(16) = Unusable) */ \
239 \
240 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, val); \
241 } while (0)
242
243# define VMX_READ_SELREG(REG, reg) \
244 do \
245 { \
246 VMXReadCachedVMCS(VMX_VMCS16_GUEST_FIELD_##REG, &val); \
247 pCtx->reg.Sel = val; \
248 pCtx->reg.ValidSel = val; \
249 pCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \
250 VMXReadCachedVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, &val); \
251 pCtx->reg.u32Limit = val; \
252 VMXReadCachedVMCS(VMX_VMCS64_GUEST_##REG##_BASE, &val); \
253 pCtx->reg.u64Base = val; \
254 VMXReadCachedVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, &val); \
255 pCtx->reg.Attr.u = val; \
256 } while (0)
257
258/* Don't read from the cache in this macro; used only in case of failure where the cache is out of sync. */
259# define VMX_LOG_SELREG(REG, szSelReg, val) \
260 do \
261 { \
262 VMXReadVMCS(VMX_VMCS16_GUEST_FIELD_##REG, &(val)); \
263 Log(("%s Selector %x\n", szSelReg, (val))); \
264 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, &(val)); \
265 Log(("%s Limit %x\n", szSelReg, (val))); \
266 VMXReadVMCS(VMX_VMCS64_GUEST_##REG##_BASE, &(val)); \
267 Log(("%s Base %RX64\n", szSelReg, (uint64_t)(val))); \
268 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, &(val)); \
269 Log(("%s Attributes %x\n", szSelReg, (val))); \
270 } while (0)
271
272/**
273 * Cache VMCS writes for performance reasons (Darwin) and for running 64 bits
274 * guests on 32-bit hosts.
275 *
276 * @param pVCpu Pointer to the VMCPU.
277 * @param idxField VMCS field index.
278 * @param u64Val 16, 32 or 64 bits value.
279 */
280VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val);
281
282#ifdef VMX_USE_CACHED_VMCS_ACCESSES
283/**
284 * Return value of cached VMCS read for performance reasons (Darwin) and for running 64 bits guests on 32 bits hosts.
285 *
286 * @param pVCpu Pointer to the VMCPU.
287 * @param idxField VMCS cache index (not VMCS field index!)
288 * @param pVal 16, 32 or 64 bits value.
289 */
290DECLINLINE(int) VMXReadCachedVMCSEx(PVMCPU pVCpu, uint32_t idxCache, RTGCUINTREG *pVal)
291{
292 Assert(idxCache <= VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX);
293 *pVal = pVCpu->hm.s.vmx.VMCSCache.Read.aFieldVal[idxCache];
294 return VINF_SUCCESS;
295}
296#endif
297
298/**
299 * Return value of cached VMCS read for performance reasons (Darwin) and for
300 * running 64 bits guests on 32-bit hosts.
301 *
302 * @param idxField VMCS field index.
303 * @param pVal Value pointer (out).
304 */
305#ifdef VMX_USE_CACHED_VMCS_ACCESSES
306# define VMXReadCachedVMCS(idxField, pVal) VMXReadCachedVMCSEx(pVCpu, idxField##_CACHE_IDX, pVal)
307#else
308# define VMXReadCachedVMCS(idxField, pVal) VMXReadVMCS(idxField, pVal)
309#endif
310
311/**
312 * Setup cached VMCS for performance reasons (Darwin) and for running 64-bit
313 * guests on 32-bit hosts.
314 *
315 * @param pCache The cache.
316 * @param idxField VMCS field index.
317 */
318#define VMXSetupCachedReadVMCS(pCache, idxField) \
319{ \
320 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
321 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
322 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
323}
324
325#define VMX_SETUP_SELREG(REG, pCache) \
326{ \
327 VMXSetupCachedReadVMCS(pCache, VMX_VMCS16_GUEST_FIELD_##REG); \
328 VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_GUEST_##REG##_LIMIT); \
329 VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_##REG##_BASE); \
330 VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS); \
331}
332
333/**
334 * Prepares for and executes VMLAUNCH (32-bit guest mode).
335 *
336 * @returns VBox status code.
337 * @param fResume Whether to vmlauch/vmresume.
338 * @param pCtx Pointer to the guest CPU context.
339 * @param pCache Pointer to the VMCS cache.
340 * @param pVM Pointer to the VM.
341 * @param pVCpu Pointer to the VMCPU.
342 */
343DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
344
345/**
346 * Prepares for and executes VMLAUNCH (64-bit guest mode).
347 *
348 * @returns VBox status code.
349 * @param fResume Whether to vmlauch/vmresume.
350 * @param pCtx Pointer to the guest CPU context.
351 * @param pCache Pointer to the VMCS cache.
352 * @param pVM Pointer to the VM.
353 * @param pVCpu Pointer to the VMCPU.
354 */
355DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
356
357# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
358/**
359 * Prepares for and executes VMLAUNCH (64-bit guest mode).
360 *
361 * @returns VBox status code
362 * @param fResume Whether to vmlauch/vmresume.
363 * @param pCtx Pointer to the guest CPU context.
364 * @param pCache Pointer to the VMCS cache.
365 * @param pVM Pointer to the VM.
366 * @param pVCpu Pointer to the VMCPU.
367 */
368DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
369# endif
370
371#endif /* IN_RING0 */
372
373/** @} */
374
375RT_C_DECLS_END
376
377#endif /* ___HWVMXR0_h */
378
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette