VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWVMXR0.h@ 42046

Last change on this file since 42046 was 41906, checked in by vboxsync, 12 years ago

CPUM: Combined the visible and hidden selector register data into one structure. Preparing for lazily resolving+caching of hidden registers in raw-mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.3 KB
Line 
1/* $Id: HWVMXR0.h 41906 2012-06-24 15:44:03Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___HWVMXR0_h
19#define ___HWVMXR0_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/dis.h>
26#include <VBox/vmm/hwaccm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/hwacc_vmx.h>
29
30RT_C_DECLS_BEGIN
31
32/** @defgroup grp_vmx_int Internal
33 * @ingroup grp_vmx
34 * @internal
35 * @{
36 */
37
38/* Read cache indices. */
39#define VMX_VMCS64_GUEST_RIP_CACHE_IDX 0
40#define VMX_VMCS64_GUEST_RSP_CACHE_IDX 1
41#define VMX_VMCS_GUEST_RFLAGS_CACHE_IDX 2
42#define VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE_CACHE_IDX 3
43#define VMX_VMCS_CTRL_CR0_READ_SHADOW_CACHE_IDX 4
44#define VMX_VMCS64_GUEST_CR0_CACHE_IDX 5
45#define VMX_VMCS_CTRL_CR4_READ_SHADOW_CACHE_IDX 6
46#define VMX_VMCS64_GUEST_CR4_CACHE_IDX 7
47#define VMX_VMCS64_GUEST_DR7_CACHE_IDX 8
48#define VMX_VMCS32_GUEST_SYSENTER_CS_CACHE_IDX 9
49#define VMX_VMCS64_GUEST_SYSENTER_EIP_CACHE_IDX 10
50#define VMX_VMCS64_GUEST_SYSENTER_ESP_CACHE_IDX 11
51#define VMX_VMCS32_GUEST_GDTR_LIMIT_CACHE_IDX 12
52#define VMX_VMCS64_GUEST_GDTR_BASE_CACHE_IDX 13
53#define VMX_VMCS32_GUEST_IDTR_LIMIT_CACHE_IDX 14
54#define VMX_VMCS64_GUEST_IDTR_BASE_CACHE_IDX 15
55#define VMX_VMCS16_GUEST_FIELD_CS_CACHE_IDX 16
56#define VMX_VMCS32_GUEST_CS_LIMIT_CACHE_IDX 17
57#define VMX_VMCS64_GUEST_CS_BASE_CACHE_IDX 18
58#define VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS_CACHE_IDX 19
59#define VMX_VMCS16_GUEST_FIELD_DS_CACHE_IDX 20
60#define VMX_VMCS32_GUEST_DS_LIMIT_CACHE_IDX 21
61#define VMX_VMCS64_GUEST_DS_BASE_CACHE_IDX 22
62#define VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS_CACHE_IDX 23
63#define VMX_VMCS16_GUEST_FIELD_ES_CACHE_IDX 24
64#define VMX_VMCS32_GUEST_ES_LIMIT_CACHE_IDX 25
65#define VMX_VMCS64_GUEST_ES_BASE_CACHE_IDX 26
66#define VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS_CACHE_IDX 27
67#define VMX_VMCS16_GUEST_FIELD_FS_CACHE_IDX 28
68#define VMX_VMCS32_GUEST_FS_LIMIT_CACHE_IDX 29
69#define VMX_VMCS64_GUEST_FS_BASE_CACHE_IDX 30
70#define VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS_CACHE_IDX 31
71#define VMX_VMCS16_GUEST_FIELD_GS_CACHE_IDX 32
72#define VMX_VMCS32_GUEST_GS_LIMIT_CACHE_IDX 33
73#define VMX_VMCS64_GUEST_GS_BASE_CACHE_IDX 34
74#define VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS_CACHE_IDX 35
75#define VMX_VMCS16_GUEST_FIELD_SS_CACHE_IDX 36
76#define VMX_VMCS32_GUEST_SS_LIMIT_CACHE_IDX 37
77#define VMX_VMCS64_GUEST_SS_BASE_CACHE_IDX 38
78#define VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS_CACHE_IDX 39
79#define VMX_VMCS16_GUEST_FIELD_TR_CACHE_IDX 40
80#define VMX_VMCS32_GUEST_TR_LIMIT_CACHE_IDX 41
81#define VMX_VMCS64_GUEST_TR_BASE_CACHE_IDX 42
82#define VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS_CACHE_IDX 43
83#define VMX_VMCS16_GUEST_FIELD_LDTR_CACHE_IDX 44
84#define VMX_VMCS32_GUEST_LDTR_LIMIT_CACHE_IDX 45
85#define VMX_VMCS64_GUEST_LDTR_BASE_CACHE_IDX 46
86#define VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS_CACHE_IDX 47
87#define VMX_VMCS32_RO_EXIT_REASON_CACHE_IDX 48
88#define VMX_VMCS32_RO_VM_INSTR_ERROR_CACHE_IDX 49
89#define VMX_VMCS32_RO_EXIT_INSTR_LENGTH_CACHE_IDX 50
90#define VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE_CACHE_IDX 51
91#define VMX_VMCS32_RO_EXIT_INSTR_INFO_CACHE_IDX 52
92#define VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO_CACHE_IDX 53
93#define VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX 54
94#define VMX_VMCS32_RO_IDT_INFO_CACHE_IDX 55
95#define VMX_VMCS32_RO_IDT_ERRCODE_CACHE_IDX 56
96#define VMX_VMCS_MAX_CACHE_IDX (VMX_VMCS32_RO_IDT_ERRCODE_CACHE_IDX+1)
97#define VMX_VMCS64_GUEST_CR3_CACHE_IDX 57
98#define VMX_VMCS_EXIT_PHYS_ADDR_FULL_CACHE_IDX 58
99#define VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX (VMX_VMCS_EXIT_PHYS_ADDR_FULL_CACHE_IDX+1)
100
101
102#ifdef IN_RING0
103
104/**
105 * Enters the VT-x session.
106 *
107 * @returns VBox status code.
108 * @param pVM Pointer to the VM.
109 * @param pVCpu Pointer to the VM CPU.
110 * @param pCpu Pointer to the CPU info struct.
111 */
112VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu);
113
114/**
115 * Leaves the VT-x session.
116 *
117 * @returns VBox status code.
118 * @param pVM Pointer to the VM.
119 * @param pVCpu Pointer to the VMCPU.
120 * @param pCtx Pointer to the guest CPU context.
121 */
122VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
123
124
125/**
126 * Sets up and activates VT-x on the current CPU.
127 *
128 * @returns VBox status code.
129 * @param pCpu Pointer to the CPU info struct.
130 * @param pVM Pointer to the VM. (can be NULL after a resume)
131 * @param pvPageCpu Pointer to the global CPU page.
132 * @param pPageCpuPhys Physical address of the global CPU page.
133 */
134VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
135
136/**
137 * Deactivates VT-x on the current CPU.
138 *
139 * @returns VBox status code.
140 * @param pCpu Pointer to the CPU info struct.
141 * @param pvPageCpu Pointer to the global CPU page.
142 * @param pPageCpuPhys Physical address of the global CPU page.
143 */
144VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
145
146/**
147 * Does Ring-0 per VM VT-x initialization.
148 *
149 * @returns VBox status code.
150 * @param pVM Pointer to the VM.
151 */
152VMMR0DECL(int) VMXR0InitVM(PVM pVM);
153
154/**
155 * Does Ring-0 per VM VT-x termination.
156 *
157 * @returns VBox status code.
158 * @param pVM Pointer to the VM.
159 */
160VMMR0DECL(int) VMXR0TermVM(PVM pVM);
161
162/**
163 * Sets up VT-x for the specified VM.
164 *
165 * @returns VBox status code.
166 * @param pVM Pointer to the VM.
167 */
168VMMR0DECL(int) VMXR0SetupVM(PVM pVM);
169
170
171/**
172 * Save the host state.
173 *
174 * @returns VBox status code.
175 * @param pVM Pointer to the VM.
176 * @param pVCpu Pointer to the VMCPU.
177 */
178VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu);
179
180/**
181 * Loads the guest state.
182 *
183 * @returns VBox status code.
184 * @param pVM Pointer to the VM.
185 * @param pVCpu Pointer to the VMCPU.
186 * @param pCtx Pointer to the guest CPU context.
187 */
188VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
189
190
191/**
192 * Runs guest code in a VT-x VM.
193 *
194 * @returns VBox status code.
195 * @param pVM Pointer to the VM.
196 * @param pVCpu Pointer to the VMCPU.
197 * @param pCtx Pointer to the guest CPU context.
198 */
199VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
200
201
202# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
203/**
204 * Executes the specified handler in 64-bit mode.
205 *
206 * @returns VBox status code.
207 * @param pVM Pointer to the VM.
208 * @param pVCpu Pointer to the VMCPU.
209 * @param pCtx Pointer to the guest CPU context.
210 * @param pfnHandler Pointer to the RC handler function.
211 * @param cbParam Number of parameters.
212 * @param paParam Array of 32-bit parameters.
213 */
214VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
215 uint32_t *paParam);
216# endif
217
218# define VMX_WRITE_SELREG(REG, reg) \
219 do \
220 { \
221 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_##REG, pCtx->reg.Sel); \
222 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, pCtx->reg.u32Limit); \
223 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_##REG##_BASE, pCtx->reg.u64Base); \
224 if ((pCtx->eflags.u32 & X86_EFL_VM)) \
225 { \
226 /* Must override this or else VT-x will fail with invalid guest state errors. */ \
227 /* DPL=3, present, code/data, r/w/accessed. */ \
228 val = (pCtx->reg.Attr.u & ~0xFF) | 0xF3; \
229 } \
230 else \
231 if ( CPUMIsGuestInRealModeEx(pCtx) \
232 && !pVM->hwaccm.s.vmx.fUnrestrictedGuest) \
233 { \
234 /* Must override this or else VT-x will fail with invalid guest state errors. */ \
235 /* DPL=3, present, code/data, r/w/accessed. */ \
236 val = 0xf3; \
237 } \
238 else \
239 if ( ( pCtx->reg.Sel \
240 || !CPUMIsGuestInPagedProtectedModeEx(pCtx) \
241 || (!pCtx->cs.Attr.n.u1DefBig && !CPUMIsGuestIn64BitCodeEx(pCtx)) \
242 ) \
243 && pCtx->reg.Attr.n.u1Present == 1) \
244 { \
245 val = pCtx->reg.Attr.u | X86_SEL_TYPE_ACCESSED; \
246 } \
247 else \
248 val = 0x10000; /* Invalid guest state error otherwise. (BIT(16) = Unusable) */ \
249 \
250 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, val); \
251 } while (0)
252
253# define VMX_READ_SELREG(REG, reg) \
254 do \
255 { \
256 VMXReadCachedVMCS(VMX_VMCS16_GUEST_FIELD_##REG, &val); \
257 pCtx->reg.Sel = val; \
258 pCtx->reg.ValidSel = val; \
259 pCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \
260 VMXReadCachedVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, &val); \
261 pCtx->reg.u32Limit = val; \
262 VMXReadCachedVMCS(VMX_VMCS64_GUEST_##REG##_BASE, &val); \
263 pCtx->reg.u64Base = val; \
264 VMXReadCachedVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, &val); \
265 pCtx->reg.Attr.u = val; \
266 } while (0)
267
268/* Don't read from the cache in this macro; used only in case of failure where the cache is out of sync. */
269# define VMX_LOG_SELREG(REG, szSelReg, val) \
270 do \
271 { \
272 VMXReadVMCS(VMX_VMCS16_GUEST_FIELD_##REG, &(val)); \
273 Log(("%s Selector %x\n", szSelReg, (val))); \
274 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, &(val)); \
275 Log(("%s Limit %x\n", szSelReg, (val))); \
276 VMXReadVMCS(VMX_VMCS64_GUEST_##REG##_BASE, &(val)); \
277 Log(("%s Base %RX64\n", szSelReg, (uint64_t)(val))); \
278 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, &(val)); \
279 Log(("%s Attributes %x\n", szSelReg, (val))); \
280 } while (0)
281
282/**
283 * Cache VMCS writes for performance reasons (Darwin) and for running 64 bits
284 * guests on 32-bit hosts.
285 *
286 * @param pVCpu Pointer to the VMCPU.
287 * @param idxField VMCS field index.
288 * @param u64Val 16, 32 or 64 bits value.
289 */
290VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val);
291
292#ifdef VMX_USE_CACHED_VMCS_ACCESSES
293/**
294 * Return value of cached VMCS read for performance reasons (Darwin) and for running 64 bits guests on 32 bits hosts.
295 *
296 * @param pVCpu Pointer to the VMCPU.
297 * @param idxField VMCS cache index (not VMCS field index!)
298 * @param pVal 16, 32 or 64 bits value.
299 */
300DECLINLINE(int) VMXReadCachedVMCSEx(PVMCPU pVCpu, uint32_t idxCache, RTGCUINTREG *pVal)
301{
302 Assert(idxCache <= VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX);
303 *pVal = pVCpu->hwaccm.s.vmx.VMCSCache.Read.aFieldVal[idxCache];
304 return VINF_SUCCESS;
305}
306#endif
307
308/**
309 * Return value of cached VMCS read for performance reasons (Darwin) and for
310 * running 64 bits guests on 32-bit hosts.
311 *
312 * @param idxField VMCS field index.
313 * @param pVal Value pointer (out).
314 */
315#ifdef VMX_USE_CACHED_VMCS_ACCESSES
316# define VMXReadCachedVMCS(idxField, pVal) VMXReadCachedVMCSEx(pVCpu, idxField##_CACHE_IDX, pVal)
317#else
318# define VMXReadCachedVMCS(idxField, pVal) VMXReadVMCS(idxField, pVal)
319#endif
320
321/**
322 * Setup cached VMCS for performance reasons (Darwin) and for running 64-bit
323 * guests on 32-bit hosts.
324 *
325 * @param pCache The cache.
326 * @param idxField VMCS field index.
327 */
328#define VMXSetupCachedReadVMCS(pCache, idxField) \
329{ \
330 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
331 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
332 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
333}
334
335#define VMX_SETUP_SELREG(REG, pCache) \
336{ \
337 VMXSetupCachedReadVMCS(pCache, VMX_VMCS16_GUEST_FIELD_##REG); \
338 VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_GUEST_##REG##_LIMIT); \
339 VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_##REG##_BASE); \
340 VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS); \
341}
342
343/**
344 * Prepares for and executes VMLAUNCH (32-bit guest mode).
345 *
346 * @returns VBox status code.
347 * @param fResume Whether to vmlauch/vmresume.
348 * @param pCtx Pointer to the guest CPU context.
349 * @param pCache Pointer to the VMCS cache.
350 * @param pVM Pointer to the VM.
351 * @param pVCpu Pointer to the VMCPU.
352 */
353DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
354
355/**
356 * Prepares for and executes VMLAUNCH (64-bit guest mode).
357 *
358 * @returns VBox status code.
359 * @param fResume Whether to vmlauch/vmresume.
360 * @param pCtx Pointer to the guest CPU context.
361 * @param pCache Pointer to the VMCS cache.
362 * @param pVM Pointer to the VM.
363 * @param pVCpu Pointer to the VMCPU.
364 */
365DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
366
367# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
368/**
369 * Prepares for and executes VMLAUNCH (64-bit guest mode).
370 *
371 * @returns VBox status code
372 * @param fResume Whether to vmlauch/vmresume.
373 * @param pCtx Pointer to the guest CPU context.
374 * @param pCache Pointer to the VMCS cache.
375 * @param pVM Pointer to the VM.
376 * @param pVCpu Pointer to the VMCPU.
377 */
378DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
379# endif
380
381#endif /* IN_RING0 */
382
383/** @} */
384
385RT_C_DECLS_END
386
387#endif /* ___HWVMXR0_h */
388
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette