VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWVMXR0.h@ 41728

Last change on this file since 41728 was 41330, checked in by vboxsync, 13 years ago

VMM/VMMR0/HWVMXR0: cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.6 KB
Line 
1/* $Id: HWVMXR0.h 41330 2012-05-16 11:25:16Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HWVMXR0_h
19#define ___HWVMXR0_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hwaccm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/hwacc_vmx.h>
29
30RT_C_DECLS_BEGIN
31
32/** @defgroup grp_vmx_int Internal
33 * @ingroup grp_vmx
34 * @internal
35 * @{
36 */
37
38/* Read cache indices. */
39#define VMX_VMCS64_GUEST_RIP_CACHE_IDX 0
40#define VMX_VMCS64_GUEST_RSP_CACHE_IDX 1
41#define VMX_VMCS_GUEST_RFLAGS_CACHE_IDX 2
42#define VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE_CACHE_IDX 3
43#define VMX_VMCS_CTRL_CR0_READ_SHADOW_CACHE_IDX 4
44#define VMX_VMCS64_GUEST_CR0_CACHE_IDX 5
45#define VMX_VMCS_CTRL_CR4_READ_SHADOW_CACHE_IDX 6
46#define VMX_VMCS64_GUEST_CR4_CACHE_IDX 7
47#define VMX_VMCS64_GUEST_DR7_CACHE_IDX 8
48#define VMX_VMCS32_GUEST_SYSENTER_CS_CACHE_IDX 9
49#define VMX_VMCS64_GUEST_SYSENTER_EIP_CACHE_IDX 10
50#define VMX_VMCS64_GUEST_SYSENTER_ESP_CACHE_IDX 11
51#define VMX_VMCS32_GUEST_GDTR_LIMIT_CACHE_IDX 12
52#define VMX_VMCS64_GUEST_GDTR_BASE_CACHE_IDX 13
53#define VMX_VMCS32_GUEST_IDTR_LIMIT_CACHE_IDX 14
54#define VMX_VMCS64_GUEST_IDTR_BASE_CACHE_IDX 15
55#define VMX_VMCS16_GUEST_FIELD_CS_CACHE_IDX 16
56#define VMX_VMCS32_GUEST_CS_LIMIT_CACHE_IDX 17
57#define VMX_VMCS64_GUEST_CS_BASE_CACHE_IDX 18
58#define VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS_CACHE_IDX 19
59#define VMX_VMCS16_GUEST_FIELD_DS_CACHE_IDX 20
60#define VMX_VMCS32_GUEST_DS_LIMIT_CACHE_IDX 21
61#define VMX_VMCS64_GUEST_DS_BASE_CACHE_IDX 22
62#define VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS_CACHE_IDX 23
63#define VMX_VMCS16_GUEST_FIELD_ES_CACHE_IDX 24
64#define VMX_VMCS32_GUEST_ES_LIMIT_CACHE_IDX 25
65#define VMX_VMCS64_GUEST_ES_BASE_CACHE_IDX 26
66#define VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS_CACHE_IDX 27
67#define VMX_VMCS16_GUEST_FIELD_FS_CACHE_IDX 28
68#define VMX_VMCS32_GUEST_FS_LIMIT_CACHE_IDX 29
69#define VMX_VMCS64_GUEST_FS_BASE_CACHE_IDX 30
70#define VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS_CACHE_IDX 31
71#define VMX_VMCS16_GUEST_FIELD_GS_CACHE_IDX 32
72#define VMX_VMCS32_GUEST_GS_LIMIT_CACHE_IDX 33
73#define VMX_VMCS64_GUEST_GS_BASE_CACHE_IDX 34
74#define VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS_CACHE_IDX 35
75#define VMX_VMCS16_GUEST_FIELD_SS_CACHE_IDX 36
76#define VMX_VMCS32_GUEST_SS_LIMIT_CACHE_IDX 37
77#define VMX_VMCS64_GUEST_SS_BASE_CACHE_IDX 38
78#define VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS_CACHE_IDX 39
79#define VMX_VMCS16_GUEST_FIELD_TR_CACHE_IDX 40
80#define VMX_VMCS32_GUEST_TR_LIMIT_CACHE_IDX 41
81#define VMX_VMCS64_GUEST_TR_BASE_CACHE_IDX 42
82#define VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS_CACHE_IDX 43
83#define VMX_VMCS16_GUEST_FIELD_LDTR_CACHE_IDX 44
84#define VMX_VMCS32_GUEST_LDTR_LIMIT_CACHE_IDX 45
85#define VMX_VMCS64_GUEST_LDTR_BASE_CACHE_IDX 46
86#define VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS_CACHE_IDX 47
87#define VMX_VMCS32_RO_EXIT_REASON_CACHE_IDX 48
88#define VMX_VMCS32_RO_VM_INSTR_ERROR_CACHE_IDX 49
89#define VMX_VMCS32_RO_EXIT_INSTR_LENGTH_CACHE_IDX 50
90#define VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE_CACHE_IDX 51
91#define VMX_VMCS32_RO_EXIT_INSTR_INFO_CACHE_IDX 52
92#define VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO_CACHE_IDX 53
93#define VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX 54
94#define VMX_VMCS32_RO_IDT_INFO_CACHE_IDX 55
95#define VMX_VMCS32_RO_IDT_ERRCODE_CACHE_IDX 56
96#define VMX_VMCS_MAX_CACHE_IDX (VMX_VMCS32_RO_IDT_ERRCODE_CACHE_IDX+1)
97#define VMX_VMCS64_GUEST_CR3_CACHE_IDX 57
98#define VMX_VMCS_EXIT_PHYS_ADDR_FULL_CACHE_IDX 58
99#define VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX (VMX_VMCS_EXIT_PHYS_ADDR_FULL_CACHE_IDX+1)
100
101
102#ifdef IN_RING0
103
104/**
105 * Enters the VT-x session.
106 *
107 * @returns VBox status code.
108 * @param pVM Pointer to the VM.
109 * @param pVCpu Pointer to the VM CPU.
110 * @param pCpu Pointer to the CPU info struct.
111 */
112VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu);
113
114/**
115 * Leaves the VT-x session.
116 *
117 * @returns VBox status code.
118 * @param pVM Pointer to the VM.
119 * @param pVCpu Pointer to the VMCPU.
120 * @param pCtx Pointer to the guest CPU context.
121 */
122VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
123
124
125/**
126 * Sets up and activates VT-x on the current CPU.
127 *
128 * @returns VBox status code.
129 * @param pCpu Pointer to the CPU info struct.
130 * @param pVM Pointer to the VM. (can be NULL after a resume)
131 * @param pvPageCpu Pointer to the global CPU page.
132 * @param pPageCpuPhys Physical address of the global CPU page.
133 */
134VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
135
136/**
137 * Deactivates VT-x on the current CPU.
138 *
139 * @returns VBox status code.
140 * @param pCpu Pointer to the CPU info struct.
141 * @param pvPageCpu Pointer to the global CPU page.
142 * @param pPageCpuPhys Physical address of the global CPU page.
143 */
144VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
145
146/**
147 * Does Ring-0 per VM VT-x initialization.
148 *
149 * @returns VBox status code.
150 * @param pVM Pointer to the VM.
151 */
152VMMR0DECL(int) VMXR0InitVM(PVM pVM);
153
154/**
155 * Does Ring-0 per VM VT-x termination.
156 *
157 * @returns VBox status code.
158 * @param pVM Pointer to the VM.
159 */
160VMMR0DECL(int) VMXR0TermVM(PVM pVM);
161
162/**
163 * Sets up VT-x for the specified VM.
164 *
165 * @returns VBox status code.
166 * @param pVM Pointer to the VM.
167 */
168VMMR0DECL(int) VMXR0SetupVM(PVM pVM);
169
170
171/**
172 * Save the host state.
173 *
174 * @returns VBox status code.
175 * @param pVM Pointer to the VM.
176 * @param pVCpu Pointer to the VMCPU.
177 */
178VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu);
179
180/**
181 * Loads the guest state.
182 *
183 * @returns VBox status code.
184 * @param pVM Pointer to the VM.
185 * @param pVCpu Pointer to the VMCPU.
186 * @param pCtx Pointer to the guest CPU context.
187 */
188VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
189
190
191/**
192 * Runs guest code in a VT-x VM.
193 *
194 * @returns VBox status code.
195 * @param pVM Pointer to the VM.
196 * @param pVCpu Pointer to the VMCPU.
197 * @param pCtx Pointer to the guest CPU context.
198 */
199VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
200
201
202# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
203/**
204 * Executes the specified handler in 64-bit mode.
205 *
206 * @returns VBox status code.
207 * @param pVM Pointer to the VM.
208 * @param pVCpu Pointer to the VMCPU.
209 * @param pCtx Pointer to the guest CPU context.
210 * @param pfnHandler Pointer to the RC handler function.
211 * @param cbParam Number of parameters.
212 * @param paParam Array of 32-bit parameters.
213 */
214VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
215 uint32_t *paParam);
216# endif
217
218# define VMX_WRITE_SELREG(REG, reg) \
219{ \
220 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_##REG, pCtx->reg); \
221 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, pCtx->reg##Hid.u32Limit); \
222 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_##REG##_BASE, pCtx->reg##Hid.u64Base); \
223 if ((pCtx->eflags.u32 & X86_EFL_VM)) \
224 { \
225 /* Must override this or else VT-x will fail with invalid guest state errors. */ \
226 /* DPL=3, present, code/data, r/w/accessed. */ \
227 val = (pCtx->reg##Hid.Attr.u & ~0xFF) | 0xF3; \
228 } \
229 else \
230 if ( CPUMIsGuestInRealModeEx(pCtx) \
231 && !pVM->hwaccm.s.vmx.fUnrestrictedGuest) \
232 { \
233 /* Must override this or else VT-x will fail with invalid guest state errors. */ \
234 /* DPL=3, present, code/data, r/w/accessed. */ \
235 val = 0xf3; \
236 } \
237 else \
238 if ( ( pCtx->reg \
239 || !CPUMIsGuestInPagedProtectedModeEx(pCtx) \
240 || (!pCtx->csHid.Attr.n.u1DefBig && !CPUMIsGuestIn64BitCodeEx(pCtx)) \
241 ) \
242 && pCtx->reg##Hid.Attr.n.u1Present == 1) \
243 { \
244 val = pCtx->reg##Hid.Attr.u | X86_SEL_TYPE_ACCESSED; \
245 } \
246 else \
247 val = 0x10000; /* Invalid guest state error otherwise. (BIT(16) = Unusable) */ \
248 \
249 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, val); \
250}
251
252# define VMX_READ_SELREG(REG, reg) \
253{ \
254 VMXReadCachedVMCS(VMX_VMCS16_GUEST_FIELD_##REG, &val); \
255 pCtx->reg = val; \
256 VMXReadCachedVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, &val); \
257 pCtx->reg##Hid.u32Limit = val; \
258 VMXReadCachedVMCS(VMX_VMCS64_GUEST_##REG##_BASE, &val); \
259 pCtx->reg##Hid.u64Base = val; \
260 VMXReadCachedVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, &val); \
261 pCtx->reg##Hid.Attr.u = val; \
262}
263
264/* Don't read from the cache in this macro; used only in case of failure where the cache is out of sync. */
265# define VMX_LOG_SELREG(REG, szSelReg, val) \
266{ \
267 VMXReadVMCS(VMX_VMCS16_GUEST_FIELD_##REG, &(val)); \
268 Log(("%s Selector %x\n", szSelReg, (val))); \
269 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, &(val)); \
270 Log(("%s Limit %x\n", szSelReg, (val))); \
271 VMXReadVMCS(VMX_VMCS64_GUEST_##REG##_BASE, &(val)); \
272 Log(("%s Base %RX64\n", szSelReg, (uint64_t)(val))); \
273 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, &(val)); \
274 Log(("%s Attributes %x\n", szSelReg, (val))); \
275}
276
277/**
278 * Cache VMCS writes for performance reasons (Darwin) and for running 64 bits
279 * guests on 32-bit hosts.
280 *
281 * @param pVCpu Pointer to the VMCPU.
282 * @param idxField VMCS field index.
283 * @param u64Val 16, 32 or 64 bits value.
284 */
285VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val);
286
287#ifdef VMX_USE_CACHED_VMCS_ACCESSES
288/**
289 * Return value of cached VMCS read for performance reasons (Darwin) and for running 64 bits guests on 32 bits hosts.
290 *
291 * @param pVCpu Pointer to the VMCPU.
292 * @param idxField VMCS cache index (not VMCS field index!)
293 * @param pVal 16, 32 or 64 bits value.
294 */
295DECLINLINE(int) VMXReadCachedVMCSEx(PVMCPU pVCpu, uint32_t idxCache, RTGCUINTREG *pVal)
296{
297 Assert(idxCache <= VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX);
298 *pVal = pVCpu->hwaccm.s.vmx.VMCSCache.Read.aFieldVal[idxCache];
299 return VINF_SUCCESS;
300}
301#endif
302
303/**
304 * Return value of cached VMCS read for performance reasons (Darwin) and for
305 * running 64 bits guests on 32-bit hosts.
306 *
307 * @param idxField VMCS field index.
308 * @param pVal Value pointer (out).
309 */
310#ifdef VMX_USE_CACHED_VMCS_ACCESSES
311# define VMXReadCachedVMCS(idxField, pVal) VMXReadCachedVMCSEx(pVCpu, idxField##_CACHE_IDX, pVal)
312#else
313# define VMXReadCachedVMCS(idxField, pVal) VMXReadVMCS(idxField, pVal)
314#endif
315
316/**
317 * Setup cached VMCS for performance reasons (Darwin) and for running 64-bit
318 * guests on 32-bit hosts.
319 *
320 * @param pCache The cache.
321 * @param idxField VMCS field index.
322 */
323#define VMXSetupCachedReadVMCS(pCache, idxField) \
324{ \
325 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
326 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
327 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
328}
329
330#define VMX_SETUP_SELREG(REG, pCache) \
331{ \
332 VMXSetupCachedReadVMCS(pCache, VMX_VMCS16_GUEST_FIELD_##REG); \
333 VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_GUEST_##REG##_LIMIT); \
334 VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_##REG##_BASE); \
335 VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS); \
336}
337
338/**
339 * Prepares for and executes VMLAUNCH (32-bit guest mode).
340 *
341 * @returns VBox status code.
342 * @param fResume Whether to vmlauch/vmresume.
343 * @param pCtx Pointer to the guest CPU context.
344 * @param pCache Pointer to the VMCS cache.
345 * @param pVM Pointer to the VM.
346 * @param pVCpu Pointer to the VMCPU.
347 */
348DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
349
350/**
351 * Prepares for and executes VMLAUNCH (64-bit guest mode).
352 *
353 * @returns VBox status code.
354 * @param fResume Whether to vmlauch/vmresume.
355 * @param pCtx Pointer to the guest CPU context.
356 * @param pCache Pointer to the VMCS cache.
357 * @param pVM Pointer to the VM.
358 * @param pVCpu Pointer to the VMCPU.
359 */
360DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
361
362# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
363/**
364 * Prepares for and executes VMLAUNCH (64-bit guest mode).
365 *
366 * @returns VBox status code
367 * @param fResume Whether to vmlauch/vmresume.
368 * @param pCtx Pointer to the guest CPU context.
369 * @param pCache Pointer to the VMCS cache.
370 * @param pVM Pointer to the VM.
371 * @param pVCpu Pointer to the VMCPU.
372 */
373DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
374# endif
375
376#endif /* IN_RING0 */
377
378/** @} */
379
380RT_C_DECLS_END
381
382#endif /* ___HWVMXR0_h */
383
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette