VirtualBox

source: vbox/trunk/src/VBox/VMM/include/SELMInternal.h@ 44528

Last change on this file since 44528 was 44528, checked in by vboxsync, 12 years ago

header (C) fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 17.9 KB
Line 
1/* $Id: SELMInternal.h 44528 2013-02-04 14:27:54Z vboxsync $ */
2/** @file
3 * SELM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___SELMInternal_h
19#define ___SELMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/stam.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/log.h>
26#include <iprt/x86.h>
27
28
29
30/** @defgroup grp_selm_int Internals
31 * @ingroup grp_selm
32 * @internal
33 * @{
34 */
35
36/** The number of GDTS allocated for our GDT. (full size) */
37#define SELM_GDT_ELEMENTS 8192
38
39/** aHyperSel index to retrieve hypervisor selectors */
40/** The Flat CS selector used by the VMM inside the GC. */
41#define SELM_HYPER_SEL_CS 0
42/** The Flat DS selector used by the VMM inside the GC. */
43#define SELM_HYPER_SEL_DS 1
44/** The 64-bit mode CS selector used by the VMM inside the GC. */
45#define SELM_HYPER_SEL_CS64 2
46/** The TSS selector used by the VMM inside the GC. */
47#define SELM_HYPER_SEL_TSS 3
48/** The TSS selector for taking trap 08 (\#DF). */
49#define SELM_HYPER_SEL_TSS_TRAP08 4
50/** Number of GDTs we need for internal use */
51#define SELM_HYPER_SEL_MAX (SELM_HYPER_SEL_TSS_TRAP08 + 1)
52
53
54/** Default GDT selectors we use for the hypervisor. */
55#define SELM_HYPER_DEFAULT_SEL_CS ((SELM_GDT_ELEMENTS - 0x1) << 3)
56#define SELM_HYPER_DEFAULT_SEL_DS ((SELM_GDT_ELEMENTS - 0x2) << 3)
57#define SELM_HYPER_DEFAULT_SEL_CS64 ((SELM_GDT_ELEMENTS - 0x3) << 3)
58#define SELM_HYPER_DEFAULT_SEL_TSS ((SELM_GDT_ELEMENTS - 0x4) << 3)
59#define SELM_HYPER_DEFAULT_SEL_TSS_TRAP08 ((SELM_GDT_ELEMENTS - 0x5) << 3)
60/** The lowest value default we use. */
61#define SELM_HYPER_DEFAULT_BASE SELM_HYPER_DEFAULT_SEL_TSS_TRAP08
62
63/**
64 * Converts a SELM pointer into a VM pointer.
65 * @returns Pointer to the VM structure the SELM is part of.
66 * @param pSELM Pointer to SELM instance data.
67 */
68#define SELM2VM(pSELM) ( (PVM)((char *)pSELM - pSELM->offVM) )
69
70
71
72/**
73 * SELM Data (part of VM)
74 */
75typedef struct SELM
76{
77 /** Offset to the VM structure.
78 * See SELM2VM(). */
79 RTINT offVM;
80
81 /** Flat CS, DS, 64 bit mode CS, TSS & trap 8 TSS. */
82 RTSEL aHyperSel[SELM_HYPER_SEL_MAX];
83
84 /** Pointer to the GCs - R3 Ptr.
85 * This size is governed by SELM_GDT_ELEMENTS. */
86 R3PTRTYPE(PX86DESC) paGdtR3;
87 /** Pointer to the GCs - RC Ptr.
88 * This is not initialized until the first relocation because it's used to
89 * check if the shadow GDT virtual handler requires deregistration. */
90 RCPTRTYPE(PX86DESC) paGdtRC;
91 /** Current (last) Guest's GDTR.
92 * The pGdt member is set to RTRCPTR_MAX if we're not monitoring the guest GDT. */
93 VBOXGDTR GuestGdtr;
94 /** The current (last) effective Guest GDT size. */
95 RTUINT cbEffGuestGdtLimit;
96
97 uint32_t padding0;
98
99 /** R3 pointer to the LDT shadow area in HMA. */
100 R3PTRTYPE(void *) pvLdtR3;
101 /** RC pointer to the LDT shadow area in HMA. */
102 RCPTRTYPE(void *) pvLdtRC;
103#if GC_ARCH_BITS == 64
104 RTRCPTR padding1;
105#endif
106 /** The address of the guest LDT.
107 * RTRCPTR_MAX if not monitored. */
108 RTGCPTR GCPtrGuestLdt;
109 /** Current LDT limit, both Guest and Shadow. */
110 RTUINT cbLdtLimit;
111 /** Current LDT offset relative to pvLdtR3/pvLdtRC. */
112 RTUINT offLdtHyper;
113#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
114 uint32_t padding2[2];
115#endif
116 /** TSS. (This is 16 byte aligned!)
117 * @todo I/O bitmap & interrupt redirection table? */
118 VBOXTSS Tss;
119
120 /** TSS for trap 08 (\#DF). */
121 VBOXTSS TssTrap08;
122
123 /** Monitored shadow TSS address. */
124 RCPTRTYPE(void *) pvMonShwTssRC;
125#if GC_ARCH_BITS == 64
126 RTRCPTR padding3;
127#endif
128 /** GC Pointer to the current Guest's TSS.
129 * RTRCPTR_MAX if not monitored. */
130 RTGCPTR GCPtrGuestTss;
131 /** The size of the guest TSS. */
132 RTUINT cbGuestTss;
133 /** Set if it's a 32-bit TSS. */
134 bool fGuestTss32Bit;
135 /** The size of the Guest's TSS part we're monitoring. */
136 RTUINT cbMonitoredGuestTss;
137 /** The guest TSS selector at last sync (part of monitoring).
138 * Contains RTSEL_MAX if not set. */
139 RTSEL GCSelTss;
140 /** The last known offset of the I/O bitmap.
141 * This is only used if we monitor the bitmap. */
142 uint16_t offGuestIoBitmap;
143
144 /** Indicates that the Guest GDT access handler have been registered. */
145 bool fGDTRangeRegistered;
146
147 /** Indicates whether LDT/GDT/TSS monitoring and syncing is disabled. */
148 bool fDisableMonitoring;
149
150 /** Indicates whether the TSS stack selector & base address need to be refreshed. */
151 bool fSyncTSSRing0Stack;
152 bool fPadding2[1+2];
153
154 /** SELMR3UpdateFromCPUM() profiling. */
155 STAMPROFILE StatUpdateFromCPUM;
156 /** SELMR3SyncTSS() profiling. */
157 STAMPROFILE StatTSSSync;
158
159 /** GC: The number of handled writes to the Guest's GDT. */
160 STAMCOUNTER StatRCWriteGuestGDTHandled;
161 /** GC: The number of unhandled write to the Guest's GDT. */
162 STAMCOUNTER StatRCWriteGuestGDTUnhandled;
163 /** GC: The number of times writes to Guest's LDT was detected. */
164 STAMCOUNTER StatRCWriteGuestLDT;
165 /** GC: The number of handled writes to the Guest's TSS. */
166 STAMCOUNTER StatRCWriteGuestTSSHandled;
167 /** GC: The number of handled writes to the Guest's TSS where we detected a change. */
168 STAMCOUNTER StatRCWriteGuestTSSHandledChanged;
169 /** GC: The number of handled redir writes to the Guest's TSS where we detected a change. */
170 STAMCOUNTER StatRCWriteGuestTSSRedir;
171 /** GC: The number of unhandled writes to the Guest's TSS. */
172 STAMCOUNTER StatRCWriteGuestTSSUnhandled;
173 /** The number of times we had to relocate our hypervisor selectors. */
174 STAMCOUNTER StatHyperSelsChanged;
175 /** The number of times we had find free hypervisor selectors. */
176 STAMCOUNTER StatScanForHyperSels;
177 /** Counts the times we detected state selectors in SELMR3UpdateFromCPUM. */
178 STAMCOUNTER aStatDetectedStaleSReg[X86_SREG_COUNT];
179 /** Counts the times we were called with already state selectors in
180 * SELMR3UpdateFromCPUM. */
181 STAMCOUNTER aStatAlreadyStaleSReg[X86_SREG_COUNT];
182 /** Counts the times we found a stale selector becomming valid again. */
183 STAMCOUNTER StatStaleToUnstaleSReg;
184#ifdef VBOX_WITH_STATISTICS
185 /** Times we updated hidden selector registers in CPUMR3UpdateFromCPUM. */
186 STAMCOUNTER aStatUpdatedSReg[X86_SREG_COUNT];
187 STAMCOUNTER StatLoadHidSelGst;
188 STAMCOUNTER StatLoadHidSelShw;
189#endif
190 STAMCOUNTER StatLoadHidSelReadErrors;
191 STAMCOUNTER StatLoadHidSelGstNoGood;
192} SELM, *PSELM;
193
194RT_C_DECLS_BEGIN
195
196VMMRCDECL(int) selmRCGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
197VMMRCDECL(int) selmRCGuestLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
198VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
199
200VMMRCDECL(int) selmRCShadowGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
201VMMRCDECL(int) selmRCShadowLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
202VMMRCDECL(int) selmRCShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
203
204void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
205
206RT_C_DECLS_END
207
208
209#ifdef VBOX_WITH_RAW_MODE_NOT_R0
210
211/**
212 * Checks if a shadow descriptor table entry is good for the given segment
213 * register.
214 *
215 * @returns @c true if good, @c false if not.
216 * @param pSReg The segment register.
217 * @param pShwDesc The shadow descriptor table entry.
218 * @param iSReg The segment register index (X86_SREG_XXX).
219 * @param uCpl The CPL.
220 */
221DECLINLINE(bool) selmIsShwDescGoodForSReg(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg, uint32_t uCpl)
222{
223 /*
224 * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals.
225 */
226
227 if (!pShwDesc->Gen.u1Present)
228 {
229 Log(("selmIsShwDescGoodForSReg: Not present\n"));
230 return false;
231 }
232
233 if (!pShwDesc->Gen.u1DescType)
234 {
235 Log(("selmIsShwDescGoodForSReg: System descriptor\n"));
236 return false;
237 }
238
239 if (iSReg == X86_SREG_SS)
240 {
241 if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
242 {
243 Log(("selmIsShwDescGoodForSReg: Stack must be writable\n"));
244 return false;
245 }
246 if (uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)
247 {
248 Log(("selmIsShwDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available));
249 return false;
250 }
251 }
252 else
253 {
254 if (iSReg == X86_SREG_CS)
255 {
256 if (!(pShwDesc->Gen.u4Type & X86_SEL_TYPE_CODE))
257 {
258 Log(("selmIsShwDescGoodForSReg: CS needs code segment\n"));
259 return false;
260 }
261 }
262 else if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
263 {
264 Log(("selmIsShwDescGoodForSReg: iSReg=%u execute only\n", iSReg));
265 return false;
266 }
267
268 if ( (pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
269 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
270 && ( ( (pSReg->Sel & X86_SEL_RPL) > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available
271 && (pSReg->Sel & X86_SEL_RPL) != pShwDesc->Gen.u1Available )
272 || uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available ) )
273 {
274 Log(("selmIsShwDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u\n", iSReg,
275 pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available, uCpl, pSReg->Sel & X86_SEL_RPL));
276 return false;
277 }
278 }
279
280 return true;
281}
282
283
284/**
285 * Checks if a guest descriptor table entry is good for the given segment
286 * register.
287 *
288 * @returns @c true if good, @c false if not.
289 * @param pVCpu The current virtual CPU.
290 * @param pSReg The segment register.
291 * @param pGstDesc The guest descriptor table entry.
292 * @param iSReg The segment register index (X86_SREG_XXX).
293 * @param uCpl The CPL.
294 */
295DECLINLINE(bool) selmIsGstDescGoodForSReg(PVMCPU pVCpu, PCCPUMSELREG pSReg, PCX86DESC pGstDesc, uint32_t iSReg, uint32_t uCpl)
296{
297 /*
298 * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals.
299 */
300
301 if (!pGstDesc->Gen.u1Present)
302 {
303 Log(("selmIsGstDescGoodForSReg: Not present\n"));
304 return false;
305 }
306
307 if (!pGstDesc->Gen.u1DescType)
308 {
309 Log(("selmIsGstDescGoodForSReg: System descriptor\n"));
310 return false;
311 }
312
313 if (iSReg == X86_SREG_SS)
314 {
315 if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
316 {
317 Log(("selmIsGstDescGoodForSReg: Stack must be writable\n"));
318 return false;
319 }
320 if (uCpl > pGstDesc->Gen.u2Dpl)
321 {
322 Log(("selmIsGstDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pGstDesc->Gen.u2Dpl));
323 return false;
324 }
325 }
326 else
327 {
328 if (iSReg == X86_SREG_CS)
329 {
330 if (!(pGstDesc->Gen.u4Type & X86_SEL_TYPE_CODE))
331 {
332 Log(("selmIsGstDescGoodForSReg: CS needs code segment\n"));
333 return false;
334 }
335 }
336 else if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
337 {
338 Log(("selmIsGstDescGoodForSReg: iSReg=%u execute only\n", iSReg));
339 return false;
340 }
341
342 if ( (pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
343 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
344 && ( ( (pSReg->Sel & X86_SEL_RPL) > pGstDesc->Gen.u2Dpl
345 && ( (pSReg->Sel & X86_SEL_RPL) != 1
346 || !CPUMIsGuestInRawMode(pVCpu) ) )
347 || uCpl > (unsigned)pGstDesc->Gen.u2Dpl
348 )
349 )
350 {
351 Log(("selmIsGstDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u InRawMode=%u\n", iSReg,
352 pGstDesc->Gen.u2Dpl, uCpl, pSReg->Sel & X86_SEL_RPL, CPUMIsGuestInRawMode(pVCpu)));
353 return false;
354 }
355 }
356
357 return true;
358}
359
360
361/**
362 * Converts a guest GDT or LDT entry to a shadow table entry.
363 *
364 * @param pDesc Guest entry on input, shadow entry on return.
365 */
366DECL_FORCE_INLINE(void) selmGuestToShadowDesc(PX86DESC pDesc)
367{
368 /*
369 * Code and data selectors are generally 1:1, with the
370 * 'little' adjustment we do for DPL 0 selectors.
371 */
372 if (pDesc->Gen.u1DescType)
373 {
374 /*
375 * Hack for A-bit against Trap E on read-only GDT.
376 */
377 /** @todo Fix this by loading ds and cs before turning off WP. */
378 pDesc->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
379
380 /*
381 * All DPL 0 code and data segments are squeezed into DPL 1.
382 *
383 * We're skipping conforming segments here because those
384 * cannot give us any trouble.
385 */
386 if ( pDesc->Gen.u2Dpl == 0
387 && (pDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
388 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
389 {
390 pDesc->Gen.u2Dpl = 1;
391 pDesc->Gen.u1Available = 1;
392 }
393 else
394 pDesc->Gen.u1Available = 0;
395 }
396 else
397 {
398 /*
399 * System type selectors are marked not present.
400 * Recompiler or special handling is required for these.
401 */
402 /** @todo what about interrupt gates and rawr0? */
403 pDesc->Gen.u1Present = 0;
404 }
405}
406
407
408/**
409 * Checks if a segment register is stale given the shadow descriptor table
410 * entry.
411 *
412 * @returns @c true if stale, @c false if not.
413 * @param pSReg The segment register.
414 * @param pShwDesc The shadow descriptor entry.
415 * @param iSReg The segment register number (X86_SREG_XXX).
416 */
417DECLINLINE(bool) selmIsSRegStale32(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg)
418{
419 if ( pSReg->Attr.n.u1Present != pShwDesc->Gen.u1Present
420 || pSReg->Attr.n.u4Type != pShwDesc->Gen.u4Type
421 || pSReg->Attr.n.u1DescType != pShwDesc->Gen.u1DescType
422 || pSReg->Attr.n.u1DefBig != pShwDesc->Gen.u1DefBig
423 || pSReg->Attr.n.u1Granularity != pShwDesc->Gen.u1Granularity
424 || pSReg->Attr.n.u2Dpl != pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)
425 {
426 Log(("selmIsSRegStale32: Attributes changed (%#x -> %#x)\n", pSReg->Attr.u, X86DESC_GET_HID_ATTR(pShwDesc)));
427 return true;
428 }
429
430 if (pSReg->u64Base != X86DESC_BASE(pShwDesc))
431 {
432 Log(("selmIsSRegStale32: base changed (%#llx -> %#llx)\n", pSReg->u64Base, X86DESC_BASE(pShwDesc)));
433 return true;
434 }
435
436 if (pSReg->u32Limit != X86DESC_LIMIT_G(pShwDesc))
437 {
438 Log(("selmIsSRegStale32: limit changed (%#x -> %#x)\n", pSReg->u32Limit, X86DESC_LIMIT_G(pShwDesc)));
439 return true;
440 }
441
442 return false;
443}
444
445
446/**
447 * Loads the hidden bits of a selector register from a shadow descriptor table
448 * entry.
449 *
450 * @param pSReg The segment register in question.
451 * @param pShwDesc The shadow descriptor table entry.
452 */
453DECLINLINE(void) selmLoadHiddenSRegFromShadowDesc(PCPUMSELREG pSReg, PCX86DESC pShwDesc)
454{
455 pSReg->Attr.u = X86DESC_GET_HID_ATTR(pShwDesc);
456 pSReg->Attr.n.u2Dpl -= pSReg->Attr.n.u1Available;
457 Assert(pSReg->Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
458 pSReg->u32Limit = X86DESC_LIMIT_G(pShwDesc);
459 pSReg->u64Base = X86DESC_BASE(pShwDesc);
460 pSReg->ValidSel = pSReg->Sel;
461 if (pSReg->Attr.n.u1Available)
462 pSReg->ValidSel &= ~(RTSEL)1;
463 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
464}
465
466
467/**
468 * Loads the hidden bits of a selector register from a guest descriptor table
469 * entry.
470 *
471 * @param pVCpu The current virtual CPU.
472 * @param pSReg The segment register in question.
473 * @param pGstDesc The guest descriptor table entry.
474 */
475DECLINLINE(void) selmLoadHiddenSRegFromGuestDesc(PVMCPU pVCpu, PCPUMSELREG pSReg, PCX86DESC pGstDesc)
476{
477 pSReg->Attr.u = X86DESC_GET_HID_ATTR(pGstDesc);
478 pSReg->Attr.n.u4Type |= X86_SEL_TYPE_ACCESSED;
479 pSReg->u32Limit = X86DESC_LIMIT_G(pGstDesc);
480 pSReg->u64Base = X86DESC_BASE(pGstDesc);
481 pSReg->ValidSel = pSReg->Sel;
482 if ((pSReg->ValidSel & 1) && CPUMIsGuestInRawMode(pVCpu))
483 pSReg->ValidSel &= ~(RTSEL)1;
484 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
485}
486
487#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
488
489/** @} */
490
491#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette