VirtualBox

source: vbox/trunk/src/VBox/VMM/include/SELMInternal.h@ 45478

Last change on this file since 45478 was 45276, checked in by vboxsync, 12 years ago

Ring-1 compression patches, courtesy of trivirt AG:

  • main: diff to remove the hwvirt requirement for QNX
  • rem: diff for dealing with raw ring 0/1 selectors and general changes to allowed guest execution states
  • vmm: changes for using the guest's TSS selector index as our hypervisor TSS selector (makes str safe) (VBOX_WITH_SAFE_STR )
  • vmm: changes for dealing with guest ring 1 code (VBOX_WITH_RAW_RING1)
  • vmm: change to emulate smsw in RC/R0 (QNX uses this old style instruction a lot so going to qemu for emulation is very expensive)
  • vmm: change (hack) to kick out patm virtual handlers in case they conflict with guest GDT/TSS write monitors; we should allow multiple handlers per page, but that change would be rather invasive
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 18.8 KB
Line 
1/* $Id: SELMInternal.h 45276 2013-04-02 08:17:11Z vboxsync $ */
2/** @file
3 * SELM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___SELMInternal_h
19#define ___SELMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/vmm/stam.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/log.h>
26#include <iprt/x86.h>
27#include <VBox/vmm/em.h>
28
29
30
31/** @defgroup grp_selm_int Internals
32 * @ingroup grp_selm
33 * @internal
34 * @{
35 */
36
37/**
38 * Enable or disable tracking of Shadow GDT/LDT/TSS.
39 * @{
40 */
41#define SELM_TRACK_SHADOW_GDT_CHANGES
42#define SELM_TRACK_SHADOW_LDT_CHANGES
43#define SELM_TRACK_SHADOW_TSS_CHANGES
44/** @} */
45
46/**
47 * Enable or disable tracking of Guest GDT/LDT/TSS.
48 * @{
49 */
50#define SELM_TRACK_GUEST_GDT_CHANGES
51#define SELM_TRACK_GUEST_LDT_CHANGES
52#define SELM_TRACK_GUEST_TSS_CHANGES
53/** @} */
54
55
56/** The number of GDTS allocated for our GDT. (full size) */
57#define SELM_GDT_ELEMENTS 8192
58
59/** aHyperSel index to retrieve hypervisor selectors */
60/** The Flat CS selector used by the VMM inside the GC. */
61#define SELM_HYPER_SEL_CS 0
62/** The Flat DS selector used by the VMM inside the GC. */
63#define SELM_HYPER_SEL_DS 1
64/** The 64-bit mode CS selector used by the VMM inside the GC. */
65#define SELM_HYPER_SEL_CS64 2
66/** The TSS selector used by the VMM inside the GC. */
67#define SELM_HYPER_SEL_TSS 3
68/** The TSS selector for taking trap 08 (\#DF). */
69#define SELM_HYPER_SEL_TSS_TRAP08 4
70/** Number of GDTs we need for internal use */
71#define SELM_HYPER_SEL_MAX (SELM_HYPER_SEL_TSS_TRAP08 + 1)
72
73
74/** Default GDT selectors we use for the hypervisor. */
75#define SELM_HYPER_DEFAULT_SEL_CS ((SELM_GDT_ELEMENTS - 0x1) << 3)
76#define SELM_HYPER_DEFAULT_SEL_DS ((SELM_GDT_ELEMENTS - 0x2) << 3)
77#define SELM_HYPER_DEFAULT_SEL_CS64 ((SELM_GDT_ELEMENTS - 0x3) << 3)
78#define SELM_HYPER_DEFAULT_SEL_TSS ((SELM_GDT_ELEMENTS - 0x4) << 3)
79#define SELM_HYPER_DEFAULT_SEL_TSS_TRAP08 ((SELM_GDT_ELEMENTS - 0x5) << 3)
80/** The lowest value default we use. */
81#define SELM_HYPER_DEFAULT_BASE SELM_HYPER_DEFAULT_SEL_TSS_TRAP08
82
83/**
84 * Converts a SELM pointer into a VM pointer.
85 * @returns Pointer to the VM structure the SELM is part of.
86 * @param pSELM Pointer to SELM instance data.
87 */
88#define SELM2VM(pSELM) ( (PVM)((char *)pSELM - pSELM->offVM) )
89
90
91
92/**
93 * SELM Data (part of VM)
94 */
95typedef struct SELM
96{
97 /** Offset to the VM structure.
98 * See SELM2VM(). */
99 RTINT offVM;
100
101 /** Flat CS, DS, 64 bit mode CS, TSS & trap 8 TSS. */
102 RTSEL aHyperSel[SELM_HYPER_SEL_MAX];
103
104 /** Pointer to the GCs - R3 Ptr.
105 * This size is governed by SELM_GDT_ELEMENTS. */
106 R3PTRTYPE(PX86DESC) paGdtR3;
107 /** Pointer to the GCs - RC Ptr.
108 * This is not initialized until the first relocation because it's used to
109 * check if the shadow GDT virtual handler requires deregistration. */
110 RCPTRTYPE(PX86DESC) paGdtRC;
111 /** Current (last) Guest's GDTR.
112 * The pGdt member is set to RTRCPTR_MAX if we're not monitoring the guest GDT. */
113 VBOXGDTR GuestGdtr;
114 /** The current (last) effective Guest GDT size. */
115 RTUINT cbEffGuestGdtLimit;
116
117 uint32_t padding0;
118
119 /** R3 pointer to the LDT shadow area in HMA. */
120 R3PTRTYPE(void *) pvLdtR3;
121 /** RC pointer to the LDT shadow area in HMA. */
122 RCPTRTYPE(void *) pvLdtRC;
123#if GC_ARCH_BITS == 64
124 RTRCPTR padding1;
125#endif
126 /** The address of the guest LDT.
127 * RTRCPTR_MAX if not monitored. */
128 RTGCPTR GCPtrGuestLdt;
129 /** Current LDT limit, both Guest and Shadow. */
130 RTUINT cbLdtLimit;
131 /** Current LDT offset relative to pvLdtR3/pvLdtRC. */
132 RTUINT offLdtHyper;
133#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
134 uint32_t padding2[2];
135#endif
136 /** TSS. (This is 16 byte aligned!)
137 * @todo I/O bitmap & interrupt redirection table? */
138 VBOXTSS Tss;
139
140 /** TSS for trap 08 (\#DF). */
141 VBOXTSS TssTrap08;
142
143 /** Monitored shadow TSS address. */
144 RCPTRTYPE(void *) pvMonShwTssRC;
145#if GC_ARCH_BITS == 64
146 RTRCPTR padding3;
147#endif
148 /** GC Pointer to the current Guest's TSS.
149 * RTRCPTR_MAX if not monitored. */
150 RTGCPTR GCPtrGuestTss;
151 /** The size of the guest TSS. */
152 RTUINT cbGuestTss;
153 /** Set if it's a 32-bit TSS. */
154 bool fGuestTss32Bit;
155 /** The size of the Guest's TSS part we're monitoring. */
156 RTUINT cbMonitoredGuestTss;
157 /** The guest TSS selector at last sync (part of monitoring).
158 * Contains RTSEL_MAX if not set. */
159 RTSEL GCSelTss;
160 /** The last known offset of the I/O bitmap.
161 * This is only used if we monitor the bitmap. */
162 uint16_t offGuestIoBitmap;
163
164 /** Indicates that the Guest GDT access handler have been registered. */
165 bool fGDTRangeRegistered;
166
167 /** Indicates whether LDT/GDT/TSS monitoring and syncing is disabled. */
168 bool fDisableMonitoring;
169
170 /** Indicates whether the TSS stack selector & base address need to be refreshed. */
171 bool fSyncTSSRing0Stack;
172 bool fPadding2[1+2];
173
174 /** SELMR3UpdateFromCPUM() profiling. */
175 STAMPROFILE StatUpdateFromCPUM;
176 /** SELMR3SyncTSS() profiling. */
177 STAMPROFILE StatTSSSync;
178
179 /** GC: The number of handled writes to the Guest's GDT. */
180 STAMCOUNTER StatRCWriteGuestGDTHandled;
181 /** GC: The number of unhandled write to the Guest's GDT. */
182 STAMCOUNTER StatRCWriteGuestGDTUnhandled;
183 /** GC: The number of times writes to Guest's LDT was detected. */
184 STAMCOUNTER StatRCWriteGuestLDT;
185 /** GC: The number of handled writes to the Guest's TSS. */
186 STAMCOUNTER StatRCWriteGuestTSSHandled;
187 /** GC: The number of handled writes to the Guest's TSS where we detected a change. */
188 STAMCOUNTER StatRCWriteGuestTSSHandledChanged;
189 /** GC: The number of handled redir writes to the Guest's TSS where we detected a change. */
190 STAMCOUNTER StatRCWriteGuestTSSRedir;
191 /** GC: The number of unhandled writes to the Guest's TSS. */
192 STAMCOUNTER StatRCWriteGuestTSSUnhandled;
193 /** The number of times we had to relocate our hypervisor selectors. */
194 STAMCOUNTER StatHyperSelsChanged;
195 /** The number of times we had find free hypervisor selectors. */
196 STAMCOUNTER StatScanForHyperSels;
197 /** Counts the times we detected state selectors in SELMR3UpdateFromCPUM. */
198 STAMCOUNTER aStatDetectedStaleSReg[X86_SREG_COUNT];
199 /** Counts the times we were called with already state selectors in
200 * SELMR3UpdateFromCPUM. */
201 STAMCOUNTER aStatAlreadyStaleSReg[X86_SREG_COUNT];
202 /** Counts the times we found a stale selector becomming valid again. */
203 STAMCOUNTER StatStaleToUnstaleSReg;
204#ifdef VBOX_WITH_STATISTICS
205 /** Times we updated hidden selector registers in CPUMR3UpdateFromCPUM. */
206 STAMCOUNTER aStatUpdatedSReg[X86_SREG_COUNT];
207 STAMCOUNTER StatLoadHidSelGst;
208 STAMCOUNTER StatLoadHidSelShw;
209#endif
210 STAMCOUNTER StatLoadHidSelReadErrors;
211 STAMCOUNTER StatLoadHidSelGstNoGood;
212} SELM, *PSELM;
213
214RT_C_DECLS_BEGIN
215
216VMMRCDECL(int) selmRCGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
217VMMRCDECL(int) selmRCGuestLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
218VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
219
220VMMRCDECL(int) selmRCShadowGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
221VMMRCDECL(int) selmRCShadowLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
222VMMRCDECL(int) selmRCShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
223
224void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
225#ifdef VBOX_WITH_RAW_RING1
226void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
227#endif
228
229RT_C_DECLS_END
230
231
232#ifdef VBOX_WITH_RAW_MODE_NOT_R0
233
234/**
235 * Checks if a shadow descriptor table entry is good for the given segment
236 * register.
237 *
238 * @returns @c true if good, @c false if not.
239 * @param pSReg The segment register.
240 * @param pShwDesc The shadow descriptor table entry.
241 * @param iSReg The segment register index (X86_SREG_XXX).
242 * @param uCpl The CPL.
243 */
244DECLINLINE(bool) selmIsShwDescGoodForSReg(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg, uint32_t uCpl)
245{
246 /*
247 * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals.
248 */
249
250 if (!pShwDesc->Gen.u1Present)
251 {
252 Log(("selmIsShwDescGoodForSReg: Not present\n"));
253 return false;
254 }
255
256 if (!pShwDesc->Gen.u1DescType)
257 {
258 Log(("selmIsShwDescGoodForSReg: System descriptor\n"));
259 return false;
260 }
261
262 if (iSReg == X86_SREG_SS)
263 {
264 if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
265 {
266 Log(("selmIsShwDescGoodForSReg: Stack must be writable\n"));
267 return false;
268 }
269 if (uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)
270 {
271 Log(("selmIsShwDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available));
272 return false;
273 }
274 }
275 else
276 {
277 if (iSReg == X86_SREG_CS)
278 {
279 if (!(pShwDesc->Gen.u4Type & X86_SEL_TYPE_CODE))
280 {
281 Log(("selmIsShwDescGoodForSReg: CS needs code segment\n"));
282 return false;
283 }
284 }
285 else if ((pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
286 {
287 Log(("selmIsShwDescGoodForSReg: iSReg=%u execute only\n", iSReg));
288 return false;
289 }
290
291 if ( (pShwDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
292 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
293 && ( ( (pSReg->Sel & X86_SEL_RPL) > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available
294 && (pSReg->Sel & X86_SEL_RPL) != pShwDesc->Gen.u1Available )
295 || uCpl > (unsigned)pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available ) )
296 {
297 Log(("selmIsShwDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u\n", iSReg,
298 pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available, uCpl, pSReg->Sel & X86_SEL_RPL));
299 return false;
300 }
301 }
302
303 return true;
304}
305
306
307/**
308 * Checks if a guest descriptor table entry is good for the given segment
309 * register.
310 *
311 * @returns @c true if good, @c false if not.
312 * @param pVCpu The current virtual CPU.
313 * @param pSReg The segment register.
314 * @param pGstDesc The guest descriptor table entry.
315 * @param iSReg The segment register index (X86_SREG_XXX).
316 * @param uCpl The CPL.
317 */
318DECLINLINE(bool) selmIsGstDescGoodForSReg(PVMCPU pVCpu, PCCPUMSELREG pSReg, PCX86DESC pGstDesc, uint32_t iSReg, uint32_t uCpl)
319{
320 /*
321 * See iemMiscValidateNewSS, iemCImpl_LoadSReg and intel+amd manuals.
322 */
323
324 if (!pGstDesc->Gen.u1Present)
325 {
326 Log(("selmIsGstDescGoodForSReg: Not present\n"));
327 return false;
328 }
329
330 if (!pGstDesc->Gen.u1DescType)
331 {
332 Log(("selmIsGstDescGoodForSReg: System descriptor\n"));
333 return false;
334 }
335
336 if (iSReg == X86_SREG_SS)
337 {
338 if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
339 {
340 Log(("selmIsGstDescGoodForSReg: Stack must be writable\n"));
341 return false;
342 }
343 if (uCpl > pGstDesc->Gen.u2Dpl)
344 {
345 Log(("selmIsGstDescGoodForSReg: CPL(%d) > DPL(%d)\n", uCpl, pGstDesc->Gen.u2Dpl));
346 return false;
347 }
348 }
349 else
350 {
351 if (iSReg == X86_SREG_CS)
352 {
353 if (!(pGstDesc->Gen.u4Type & X86_SEL_TYPE_CODE))
354 {
355 Log(("selmIsGstDescGoodForSReg: CS needs code segment\n"));
356 return false;
357 }
358 }
359 else if ((pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
360 {
361 Log(("selmIsGstDescGoodForSReg: iSReg=%u execute only\n", iSReg));
362 return false;
363 }
364
365 if ( (pGstDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
366 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
367 && ( ( (pSReg->Sel & X86_SEL_RPL) > pGstDesc->Gen.u2Dpl
368 && ( (pSReg->Sel & X86_SEL_RPL) != 1
369 || !CPUMIsGuestInRawMode(pVCpu) ) )
370 || uCpl > (unsigned)pGstDesc->Gen.u2Dpl
371 )
372 )
373 {
374 Log(("selmIsGstDescGoodForSReg: iSReg=%u DPL=%u CPL=%u RPL=%u InRawMode=%u\n", iSReg,
375 pGstDesc->Gen.u2Dpl, uCpl, pSReg->Sel & X86_SEL_RPL, CPUMIsGuestInRawMode(pVCpu)));
376 return false;
377 }
378 }
379
380 return true;
381}
382
383
384/**
385 * Converts a guest GDT or LDT entry to a shadow table entry.
386 *
387 * @param pVM The VM handle.
388 * @param pDesc Guest entry on input, shadow entry on return.
389 */
390DECL_FORCE_INLINE(void) selmGuestToShadowDesc(PVM pVM, PX86DESC pDesc)
391{
392 /*
393 * Code and data selectors are generally 1:1, with the
394 * 'little' adjustment we do for DPL 0 selectors.
395 */
396 if (pDesc->Gen.u1DescType)
397 {
398 /*
399 * Hack for A-bit against Trap E on read-only GDT.
400 */
401 /** @todo Fix this by loading ds and cs before turning off WP. */
402 pDesc->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
403
404 /*
405 * All DPL 0 code and data segments are squeezed into DPL 1.
406 *
407 * We're skipping conforming segments here because those
408 * cannot give us any trouble.
409 */
410 if ( pDesc->Gen.u2Dpl == 0
411 && (pDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
412 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
413 {
414 pDesc->Gen.u2Dpl = 1;
415 pDesc->Gen.u1Available = 1;
416 }
417# ifdef VBOX_WITH_RAW_RING1
418 else
419 if ( pDesc->Gen.u2Dpl == 1
420// && EMIsRawRing1Enabled(pVM)
421 && (pDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
422 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
423 {
424 pDesc->Gen.u2Dpl = 2;
425 pDesc->Gen.u1Available = 1;
426 }
427# endif
428 else
429 pDesc->Gen.u1Available = 0;
430 }
431 else
432 {
433 /*
434 * System type selectors are marked not present.
435 * Recompiler or special handling is required for these.
436 */
437 /** @todo what about interrupt gates and rawr0? */
438 pDesc->Gen.u1Present = 0;
439 }
440}
441
442
443/**
444 * Checks if a segment register is stale given the shadow descriptor table
445 * entry.
446 *
447 * @returns @c true if stale, @c false if not.
448 * @param pSReg The segment register.
449 * @param pShwDesc The shadow descriptor entry.
450 * @param iSReg The segment register number (X86_SREG_XXX).
451 */
452DECLINLINE(bool) selmIsSRegStale32(PCCPUMSELREG pSReg, PCX86DESC pShwDesc, uint32_t iSReg)
453{
454 if ( pSReg->Attr.n.u1Present != pShwDesc->Gen.u1Present
455 || pSReg->Attr.n.u4Type != pShwDesc->Gen.u4Type
456 || pSReg->Attr.n.u1DescType != pShwDesc->Gen.u1DescType
457 || pSReg->Attr.n.u1DefBig != pShwDesc->Gen.u1DefBig
458 || pSReg->Attr.n.u1Granularity != pShwDesc->Gen.u1Granularity
459 || pSReg->Attr.n.u2Dpl != pShwDesc->Gen.u2Dpl - pShwDesc->Gen.u1Available)
460 {
461 Log(("selmIsSRegStale32: Attributes changed (%#x -> %#x)\n", pSReg->Attr.u, X86DESC_GET_HID_ATTR(pShwDesc)));
462 return true;
463 }
464
465 if (pSReg->u64Base != X86DESC_BASE(pShwDesc))
466 {
467 Log(("selmIsSRegStale32: base changed (%#llx -> %#llx)\n", pSReg->u64Base, X86DESC_BASE(pShwDesc)));
468 return true;
469 }
470
471 if (pSReg->u32Limit != X86DESC_LIMIT_G(pShwDesc))
472 {
473 Log(("selmIsSRegStale32: limit changed (%#x -> %#x)\n", pSReg->u32Limit, X86DESC_LIMIT_G(pShwDesc)));
474 return true;
475 }
476
477 return false;
478}
479
480
481/**
482 * Loads the hidden bits of a selector register from a shadow descriptor table
483 * entry.
484 *
485 * @param pSReg The segment register in question.
486 * @param pShwDesc The shadow descriptor table entry.
487 */
488DECLINLINE(void) selmLoadHiddenSRegFromShadowDesc(PCPUMSELREG pSReg, PCX86DESC pShwDesc)
489{
490 pSReg->Attr.u = X86DESC_GET_HID_ATTR(pShwDesc);
491 pSReg->Attr.n.u2Dpl -= pSReg->Attr.n.u1Available;
492 Assert(pSReg->Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
493 pSReg->u32Limit = X86DESC_LIMIT_G(pShwDesc);
494 pSReg->u64Base = X86DESC_BASE(pShwDesc);
495 pSReg->ValidSel = pSReg->Sel;
496 if (pSReg->Attr.n.u1Available)
497 pSReg->ValidSel &= ~(RTSEL)1;
498 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
499}
500
501
502/**
503 * Loads the hidden bits of a selector register from a guest descriptor table
504 * entry.
505 *
506 * @param pVCpu The current virtual CPU.
507 * @param pSReg The segment register in question.
508 * @param pGstDesc The guest descriptor table entry.
509 */
510DECLINLINE(void) selmLoadHiddenSRegFromGuestDesc(PVMCPU pVCpu, PCPUMSELREG pSReg, PCX86DESC pGstDesc)
511{
512 pSReg->Attr.u = X86DESC_GET_HID_ATTR(pGstDesc);
513 pSReg->Attr.n.u4Type |= X86_SEL_TYPE_ACCESSED;
514 pSReg->u32Limit = X86DESC_LIMIT_G(pGstDesc);
515 pSReg->u64Base = X86DESC_BASE(pGstDesc);
516 pSReg->ValidSel = pSReg->Sel;
517 if ((pSReg->ValidSel & 1) && CPUMIsGuestInRawMode(pVCpu))
518 pSReg->ValidSel &= ~(RTSEL)1;
519 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
520}
521
522#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
523
524/** @} */
525
526#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette