VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/SELMAll.cpp@ 17119

Last change on this file since 17119 was 17035, checked in by vboxsync, 16 years ago

VMM,REM: Brushed up the TR/TSS shadowing. We're now relying on the hidden TR registers in SELM and CPUM/REM will make sure these are always in sync. Joined CPUMGetGuestTRHid and CPUMGetGuestTR. Kicked out sync_tr (unused now) and SELMGCGetRing1Stack.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 39.1 KB
Line 
1/* $Id: SELMAll.cpp 17035 2009-02-23 22:26:39Z vboxsync $ */
2/** @file
3 * SELM All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_SELM
27#include <VBox/selm.h>
28#include <VBox/stam.h>
29#include <VBox/mm.h>
30#include <VBox/pgm.h>
31#include "SELMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/x86.h>
34#include <VBox/err.h>
35#include <VBox/param.h>
36#include <iprt/assert.h>
37#include <VBox/log.h>
38
39
40
41#ifndef IN_RING0
42
43/**
44 * Converts a GC selector based address to a flat address.
45 *
46 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
47 * for that.
48 *
49 * @returns Flat address.
50 * @param pVM VM Handle.
51 * @param Sel Selector part.
52 * @param Addr Address part.
53 * @remarks Don't use when in long mode.
54 */
55VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
56{
57 Assert(!CPUMIsGuestInLongMode(pVM)); /* DON'T USE! */
58
59 /** @todo check the limit. */
60 X86DESC Desc;
61 if (!(Sel & X86_SEL_LDT))
62 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
63 else
64 {
65 /** @todo handle LDT pages not present! */
66 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
67 Desc = paLDT[Sel >> X86_SEL_SHIFT];
68 }
69
70 return (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
71}
72#endif /* !IN_RING0 */
73
74
75/**
76 * Converts a GC selector based address to a flat address.
77 *
78 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
79 * for that.
80 *
81 * @returns Flat address.
82 * @param pVM VM Handle.
83 * @param SelReg Selector register
84 * @param pCtxCore CPU context
85 * @param Addr Address part.
86 */
87VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DIS_SELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
88{
89 PCPUMSELREGHID pHiddenSel;
90 RTSEL Sel;
91 int rc;
92
93 rc = DISFetchRegSegEx(pCtxCore, SelReg, &Sel, &pHiddenSel); AssertRC(rc);
94
95 /*
96 * Deal with real & v86 mode first.
97 */
98 if ( CPUMIsGuestInRealMode(pVM)
99 || pCtxCore->eflags.Bits.u1VM)
100 {
101 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
102 if (CPUMAreHiddenSelRegsValid(pVM))
103 uFlat += pHiddenSel->u64Base;
104 else
105 uFlat += ((RTGCUINTPTR)Sel << 4);
106 return (RTGCPTR)uFlat;
107 }
108
109#ifdef IN_RING0
110 Assert(CPUMAreHiddenSelRegsValid(pVM));
111#else
112 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */
113 if (!CPUMAreHiddenSelRegsValid(pVM))
114 return SELMToFlatBySel(pVM, Sel, Addr);
115#endif
116
117 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
118 if ( CPUMIsGuestInLongMode(pVM)
119 && pCtxCore->csHid.Attr.n.u1Long)
120 {
121 switch (SelReg)
122 {
123 case DIS_SELREG_FS:
124 case DIS_SELREG_GS:
125 return (RTGCPTR)(pHiddenSel->u64Base + Addr);
126
127 default:
128 return Addr; /* base 0 */
129 }
130 }
131
132 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
133 Assert(pHiddenSel->u64Base <= 0xffffffff);
134 return ((pHiddenSel->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
135}
136
137
138/**
139 * Converts a GC selector based address to a flat address.
140 *
141 * Some basic checking is done, but not all kinds yet.
142 *
143 * @returns VBox status
144 * @param pVM VM Handle.
145 * @param SelReg Selector register
146 * @param pCtxCore CPU context
147 * @param Addr Address part.
148 * @param fFlags SELMTOFLAT_FLAGS_*
149 * GDT entires are valid.
150 * @param ppvGC Where to store the GC flat address.
151 */
152VMMDECL(int) SELMToFlatEx(PVM pVM, DIS_SELREG SelReg, PCCPUMCTXCORE pCtxCore, RTGCPTR Addr, unsigned fFlags, PRTGCPTR ppvGC)
153{
154 /*
155 * Fetch the selector first.
156 */
157 PCPUMSELREGHID pHiddenSel;
158 RTSEL Sel;
159 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &Sel, &pHiddenSel);
160 AssertRC(rc);
161
162 /*
163 * Deal with real & v86 mode first.
164 */
165 if ( CPUMIsGuestInRealMode(pVM)
166 || pCtxCore->eflags.Bits.u1VM)
167 {
168 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
169 if (ppvGC)
170 {
171 if ( pHiddenSel
172 && CPUMAreHiddenSelRegsValid(pVM))
173 *ppvGC = (RTGCPTR)(pHiddenSel->u64Base + uFlat);
174 else
175 *ppvGC = (RTGCPTR)(((RTGCUINTPTR)Sel << 4) + uFlat);
176 }
177 return VINF_SUCCESS;
178 }
179
180
181 uint32_t u32Limit;
182 RTGCPTR pvFlat;
183 uint32_t u1Present, u1DescType, u1Granularity, u4Type;
184
185 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */
186#ifndef IN_RC
187 if ( pHiddenSel
188 && CPUMAreHiddenSelRegsValid(pVM))
189 {
190 bool fCheckLimit = true;
191
192 u1Present = pHiddenSel->Attr.n.u1Present;
193 u1Granularity = pHiddenSel->Attr.n.u1Granularity;
194 u1DescType = pHiddenSel->Attr.n.u1DescType;
195 u4Type = pHiddenSel->Attr.n.u4Type;
196 u32Limit = pHiddenSel->u32Limit;
197
198 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
199 if ( CPUMIsGuestInLongMode(pVM)
200 && pCtxCore->csHid.Attr.n.u1Long)
201 {
202 fCheckLimit = false;
203 switch (SelReg)
204 {
205 case DIS_SELREG_FS:
206 case DIS_SELREG_GS:
207 pvFlat = (pHiddenSel->u64Base + Addr);
208 break;
209
210 default:
211 pvFlat = Addr;
212 break;
213 }
214 }
215 else
216 {
217 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
218 Assert(pHiddenSel->u64Base <= 0xffffffff);
219 pvFlat = (RTGCPTR)((pHiddenSel->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
220 }
221
222 /*
223 * Check if present.
224 */
225 if (u1Present)
226 {
227 /*
228 * Type check.
229 */
230 switch (u4Type)
231 {
232
233 /** Read only selector type. */
234 case X86_SEL_TYPE_RO:
235 case X86_SEL_TYPE_RO_ACC:
236 case X86_SEL_TYPE_RW:
237 case X86_SEL_TYPE_RW_ACC:
238 case X86_SEL_TYPE_EO:
239 case X86_SEL_TYPE_EO_ACC:
240 case X86_SEL_TYPE_ER:
241 case X86_SEL_TYPE_ER_ACC:
242 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
243 {
244 /** @todo fix this mess */
245 }
246 /* check limit. */
247 if (fCheckLimit && (RTGCUINTPTR)Addr > u32Limit)
248 return VERR_OUT_OF_SELECTOR_BOUNDS;
249 /* ok */
250 if (ppvGC)
251 *ppvGC = pvFlat;
252 return VINF_SUCCESS;
253
254 case X86_SEL_TYPE_EO_CONF:
255 case X86_SEL_TYPE_EO_CONF_ACC:
256 case X86_SEL_TYPE_ER_CONF:
257 case X86_SEL_TYPE_ER_CONF_ACC:
258 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
259 {
260 /** @todo fix this mess */
261 }
262 /* check limit. */
263 if (fCheckLimit && (RTGCUINTPTR)Addr > u32Limit)
264 return VERR_OUT_OF_SELECTOR_BOUNDS;
265 /* ok */
266 if (ppvGC)
267 *ppvGC = pvFlat;
268 return VINF_SUCCESS;
269
270 case X86_SEL_TYPE_RO_DOWN:
271 case X86_SEL_TYPE_RO_DOWN_ACC:
272 case X86_SEL_TYPE_RW_DOWN:
273 case X86_SEL_TYPE_RW_DOWN_ACC:
274 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
275 {
276 /** @todo fix this mess */
277 }
278 /* check limit. */
279 if (fCheckLimit)
280 {
281 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
282 return VERR_OUT_OF_SELECTOR_BOUNDS;
283 if ((RTGCUINTPTR)Addr <= u32Limit)
284 return VERR_OUT_OF_SELECTOR_BOUNDS;
285 }
286 /* ok */
287 if (ppvGC)
288 *ppvGC = pvFlat;
289 return VINF_SUCCESS;
290
291 default:
292 return VERR_INVALID_SELECTOR;
293
294 }
295 }
296 }
297# ifndef IN_RING0
298 else
299# endif
300#endif /* !IN_RC */
301#ifndef IN_RING0
302 {
303 X86DESC Desc;
304
305 if (!(Sel & X86_SEL_LDT))
306 {
307 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
308 && (unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.GuestGdtr.cbGdt)
309 return VERR_INVALID_SELECTOR;
310 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
311 }
312 else
313 {
314 if ((unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.cbLdtLimit)
315 return VERR_INVALID_SELECTOR;
316
317 /** @todo handle LDT page(s) not present! */
318 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
319 Desc = paLDT[Sel >> X86_SEL_SHIFT];
320 }
321
322 /* calc limit. */
323 u32Limit = X86DESC_LIMIT(Desc);
324 if (Desc.Gen.u1Granularity)
325 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
326
327 /* calc address assuming straight stuff. */
328 pvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
329
330 u1Present = Desc.Gen.u1Present;
331 u1Granularity = Desc.Gen.u1Granularity;
332 u1DescType = Desc.Gen.u1DescType;
333 u4Type = Desc.Gen.u4Type;
334
335 /*
336 * Check if present.
337 */
338 if (u1Present)
339 {
340 /*
341 * Type check.
342 */
343# define BOTH(a, b) ((a << 16) | b)
344 switch (BOTH(u1DescType, u4Type))
345 {
346
347 /** Read only selector type. */
348 case BOTH(1,X86_SEL_TYPE_RO):
349 case BOTH(1,X86_SEL_TYPE_RO_ACC):
350 case BOTH(1,X86_SEL_TYPE_RW):
351 case BOTH(1,X86_SEL_TYPE_RW_ACC):
352 case BOTH(1,X86_SEL_TYPE_EO):
353 case BOTH(1,X86_SEL_TYPE_EO_ACC):
354 case BOTH(1,X86_SEL_TYPE_ER):
355 case BOTH(1,X86_SEL_TYPE_ER_ACC):
356 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
357 {
358 /** @todo fix this mess */
359 }
360 /* check limit. */
361 if ((RTGCUINTPTR)Addr > u32Limit)
362 return VERR_OUT_OF_SELECTOR_BOUNDS;
363 /* ok */
364 if (ppvGC)
365 *ppvGC = pvFlat;
366 return VINF_SUCCESS;
367
368 case BOTH(1,X86_SEL_TYPE_EO_CONF):
369 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
370 case BOTH(1,X86_SEL_TYPE_ER_CONF):
371 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
372 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
373 {
374 /** @todo fix this mess */
375 }
376 /* check limit. */
377 if ((RTGCUINTPTR)Addr > u32Limit)
378 return VERR_OUT_OF_SELECTOR_BOUNDS;
379 /* ok */
380 if (ppvGC)
381 *ppvGC = pvFlat;
382 return VINF_SUCCESS;
383
384 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
385 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
386 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
387 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
388 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
389 {
390 /** @todo fix this mess */
391 }
392 /* check limit. */
393 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
394 return VERR_OUT_OF_SELECTOR_BOUNDS;
395 if ((RTGCUINTPTR)Addr <= u32Limit)
396 return VERR_OUT_OF_SELECTOR_BOUNDS;
397
398 /* ok */
399 if (ppvGC)
400 *ppvGC = pvFlat;
401 return VINF_SUCCESS;
402
403 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
404 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
405 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
406 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
407 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
408 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
409 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
410 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
411 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
412 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
413 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
414 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
415 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
416 {
417 /** @todo fix this mess */
418 }
419 /* check limit. */
420 if ((RTGCUINTPTR)Addr > u32Limit)
421 return VERR_OUT_OF_SELECTOR_BOUNDS;
422 /* ok */
423 if (ppvGC)
424 *ppvGC = pvFlat;
425 return VINF_SUCCESS;
426
427 default:
428 return VERR_INVALID_SELECTOR;
429
430 }
431# undef BOTH
432 }
433 }
434#endif /* !IN_RING0 */
435 return VERR_SELECTOR_NOT_PRESENT;
436}
437
438
439#ifndef IN_RING0
440/**
441 * Converts a GC selector based address to a flat address.
442 *
443 * Some basic checking is done, but not all kinds yet.
444 *
445 * @returns VBox status
446 * @param pVM VM Handle.
447 * @param eflags Current eflags
448 * @param Sel Selector part.
449 * @param Addr Address part.
450 * @param pHiddenSel Hidden selector register (can be NULL)
451 * @param fFlags SELMTOFLAT_FLAGS_*
452 * GDT entires are valid.
453 * @param ppvGC Where to store the GC flat address.
454 * @param pcb Where to store the bytes from *ppvGC which can be accessed according to
455 * the selector. NULL is allowed.
456 * @remarks Don't use when in long mode.
457 */
458VMMDECL(int) SELMToFlatBySelEx(PVM pVM, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr, CPUMSELREGHID *pHiddenSel, unsigned fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
459{
460 Assert(!CPUMIsGuestInLongMode(pVM)); /* DON'T USE! */
461
462 /*
463 * Deal with real & v86 mode first.
464 */
465 if ( CPUMIsGuestInRealMode(pVM)
466 || eflags.Bits.u1VM)
467 {
468 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
469 if (ppvGC)
470 {
471 if ( pHiddenSel
472 && CPUMAreHiddenSelRegsValid(pVM))
473 *ppvGC = (RTGCPTR)(pHiddenSel->u64Base + uFlat);
474 else
475 *ppvGC = (RTGCPTR)(((RTGCUINTPTR)Sel << 4) + uFlat);
476 }
477 if (pcb)
478 *pcb = 0x10000 - uFlat;
479 return VINF_SUCCESS;
480 }
481
482
483 uint32_t u32Limit;
484 RTGCPTR pvFlat;
485 uint32_t u1Present, u1DescType, u1Granularity, u4Type;
486
487 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */
488 if ( pHiddenSel
489 && CPUMAreHiddenSelRegsValid(pVM))
490 {
491 u1Present = pHiddenSel->Attr.n.u1Present;
492 u1Granularity = pHiddenSel->Attr.n.u1Granularity;
493 u1DescType = pHiddenSel->Attr.n.u1DescType;
494 u4Type = pHiddenSel->Attr.n.u4Type;
495
496 u32Limit = pHiddenSel->u32Limit;
497 pvFlat = (RTGCPTR)(pHiddenSel->u64Base + (RTGCUINTPTR)Addr);
498
499 if ( !CPUMIsGuestInLongMode(pVM)
500 || !pHiddenSel->Attr.n.u1Long)
501 {
502 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
503 pvFlat &= 0xffffffff;
504 }
505 }
506 else
507 {
508 X86DESC Desc;
509
510 if (!(Sel & X86_SEL_LDT))
511 {
512 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
513 && (unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.GuestGdtr.cbGdt)
514 return VERR_INVALID_SELECTOR;
515 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
516 }
517 else
518 {
519 if ((unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.cbLdtLimit)
520 return VERR_INVALID_SELECTOR;
521
522 /** @todo handle LDT page(s) not present! */
523 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
524 Desc = paLDT[Sel >> X86_SEL_SHIFT];
525 }
526
527 /* calc limit. */
528 u32Limit = X86DESC_LIMIT(Desc);
529 if (Desc.Gen.u1Granularity)
530 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
531
532 /* calc address assuming straight stuff. */
533 pvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
534
535 u1Present = Desc.Gen.u1Present;
536 u1Granularity = Desc.Gen.u1Granularity;
537 u1DescType = Desc.Gen.u1DescType;
538 u4Type = Desc.Gen.u4Type;
539 }
540
541 /*
542 * Check if present.
543 */
544 if (u1Present)
545 {
546 /*
547 * Type check.
548 */
549#define BOTH(a, b) ((a << 16) | b)
550 switch (BOTH(u1DescType, u4Type))
551 {
552
553 /** Read only selector type. */
554 case BOTH(1,X86_SEL_TYPE_RO):
555 case BOTH(1,X86_SEL_TYPE_RO_ACC):
556 case BOTH(1,X86_SEL_TYPE_RW):
557 case BOTH(1,X86_SEL_TYPE_RW_ACC):
558 case BOTH(1,X86_SEL_TYPE_EO):
559 case BOTH(1,X86_SEL_TYPE_EO_ACC):
560 case BOTH(1,X86_SEL_TYPE_ER):
561 case BOTH(1,X86_SEL_TYPE_ER_ACC):
562 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
563 {
564 /** @todo fix this mess */
565 }
566 /* check limit. */
567 if ((RTGCUINTPTR)Addr > u32Limit)
568 return VERR_OUT_OF_SELECTOR_BOUNDS;
569 /* ok */
570 if (ppvGC)
571 *ppvGC = pvFlat;
572 if (pcb)
573 *pcb = u32Limit - (uint32_t)Addr + 1;
574 return VINF_SUCCESS;
575
576 case BOTH(1,X86_SEL_TYPE_EO_CONF):
577 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
578 case BOTH(1,X86_SEL_TYPE_ER_CONF):
579 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
580 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
581 {
582 /** @todo fix this mess */
583 }
584 /* check limit. */
585 if ((RTGCUINTPTR)Addr > u32Limit)
586 return VERR_OUT_OF_SELECTOR_BOUNDS;
587 /* ok */
588 if (ppvGC)
589 *ppvGC = pvFlat;
590 if (pcb)
591 *pcb = u32Limit - (uint32_t)Addr + 1;
592 return VINF_SUCCESS;
593
594 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
595 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
596 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
597 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
598 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
599 {
600 /** @todo fix this mess */
601 }
602 /* check limit. */
603 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
604 return VERR_OUT_OF_SELECTOR_BOUNDS;
605 if ((RTGCUINTPTR)Addr <= u32Limit)
606 return VERR_OUT_OF_SELECTOR_BOUNDS;
607
608 /* ok */
609 if (ppvGC)
610 *ppvGC = pvFlat;
611 if (pcb)
612 *pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
613 return VINF_SUCCESS;
614
615 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
616 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
617 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
618 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
619 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
620 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
621 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
622 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
623 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
624 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
625 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
626 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
627 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
628 {
629 /** @todo fix this mess */
630 }
631 /* check limit. */
632 if ((RTGCUINTPTR)Addr > u32Limit)
633 return VERR_OUT_OF_SELECTOR_BOUNDS;
634 /* ok */
635 if (ppvGC)
636 *ppvGC = pvFlat;
637 if (pcb)
638 *pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
639 return VINF_SUCCESS;
640
641 default:
642 return VERR_INVALID_SELECTOR;
643
644 }
645#undef BOTH
646 }
647 return VERR_SELECTOR_NOT_PRESENT;
648}
649#endif /* !IN_RING0 */
650
651
652/**
653 * Validates and converts a GC selector based code address to a flat
654 * address when in real or v8086 mode.
655 *
656 * @returns VINF_SUCCESS.
657 * @param pVM VM Handle.
658 * @param SelCS Selector part.
659 * @param pHidCS The hidden CS register part. Optional.
660 * @param Addr Address part.
661 * @param ppvFlat Where to store the flat address.
662 */
663DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVM pVM, RTSEL SelCS, PCPUMSELREGHID pHidCS, RTGCPTR Addr, PRTGCPTR ppvFlat)
664{
665 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
666 if (!pHidCS || !CPUMAreHiddenSelRegsValid(pVM))
667 uFlat += ((RTGCUINTPTR)SelCS << 4);
668 else
669 uFlat += pHidCS->u64Base;
670 *ppvFlat = (RTGCPTR)uFlat;
671 return VINF_SUCCESS;
672}
673
674
675#ifndef IN_RING0
676/**
677 * Validates and converts a GC selector based code address to a flat
678 * address when in protected/long mode using the standard algorithm.
679 *
680 * @returns VBox status code.
681 * @param pVM VM Handle.
682 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
683 * A full selector can be passed, we'll only use the RPL part.
684 * @param SelCS Selector part.
685 * @param Addr Address part.
686 * @param ppvFlat Where to store the flat address.
687 * @param pcBits Where to store the segment bitness (16/32/64). Optional.
688 */
689DECLINLINE(int) selmValidateAndConvertCSAddrStd(PVM pVM, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr, PRTGCPTR ppvFlat, uint32_t *pcBits)
690{
691 Assert(!CPUMAreHiddenSelRegsValid(pVM));
692
693 /** @todo validate limit! */
694 X86DESC Desc;
695 if (!(SelCS & X86_SEL_LDT))
696 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
697 else
698 {
699 /** @todo handle LDT page(s) not present! */
700 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
701 Desc = paLDT[SelCS >> X86_SEL_SHIFT];
702 }
703
704 /*
705 * Check if present.
706 */
707 if (Desc.Gen.u1Present)
708 {
709 /*
710 * Type check.
711 */
712 if ( Desc.Gen.u1DescType == 1
713 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
714 {
715 /*
716 * Check level.
717 */
718 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
719 if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
720 ? uLevel <= Desc.Gen.u2Dpl
721 : uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
722 )
723 {
724 /*
725 * Limit check.
726 */
727 uint32_t u32Limit = X86DESC_LIMIT(Desc);
728 if (Desc.Gen.u1Granularity)
729 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
730 if ((RTGCUINTPTR)Addr <= u32Limit)
731 {
732 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc));
733 if (pcBits)
734 *pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
735 return VINF_SUCCESS;
736 }
737 return VERR_OUT_OF_SELECTOR_BOUNDS;
738 }
739 return VERR_INVALID_RPL;
740 }
741 return VERR_NOT_CODE_SELECTOR;
742 }
743 return VERR_SELECTOR_NOT_PRESENT;
744}
745#endif /* !IN_RING0 */
746
747
748/**
749 * Validates and converts a GC selector based code address to a flat
750 * address when in protected/long mode using the standard algorithm.
751 *
752 * @returns VBox status code.
753 * @param pVM VM Handle.
754 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
755 * A full selector can be passed, we'll only use the RPL part.
756 * @param SelCS Selector part.
757 * @param Addr Address part.
758 * @param ppvFlat Where to store the flat address.
759 */
760DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVM pVM, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREGHID pHidCS, RTGCPTR Addr, PRTGCPTR ppvFlat)
761{
762 /*
763 * Check if present.
764 */
765 if (pHidCS->Attr.n.u1Present)
766 {
767 /*
768 * Type check.
769 */
770 if ( pHidCS->Attr.n.u1DescType == 1
771 && (pHidCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
772 {
773 /*
774 * Check level.
775 */
776 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
777 if ( !(pHidCS->Attr.n.u4Type & X86_SEL_TYPE_CONF)
778 ? uLevel <= pHidCS->Attr.n.u2Dpl
779 : uLevel >= pHidCS->Attr.n.u2Dpl /* hope I got this right now... */
780 )
781 {
782 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
783 if ( CPUMIsGuestInLongMode(pVM)
784 && pHidCS->Attr.n.u1Long)
785 {
786 *ppvFlat = Addr;
787 return VINF_SUCCESS;
788 }
789
790 /*
791 * Limit check. Note that the limit in the hidden register is the
792 * final value. The granularity bit was included in its calculation.
793 */
794 uint32_t u32Limit = pHidCS->u32Limit;
795 if ((RTGCUINTPTR)Addr <= u32Limit)
796 {
797 *ppvFlat = (RTGCPTR)( (RTGCUINTPTR)Addr + pHidCS->u64Base );
798 return VINF_SUCCESS;
799 }
800 return VERR_OUT_OF_SELECTOR_BOUNDS;
801 }
802 Log(("Invalid RPL Attr.n.u4Type=%x cpl=%x dpl=%x\n", pHidCS->Attr.n.u4Type, uLevel, pHidCS->Attr.n.u2Dpl));
803 return VERR_INVALID_RPL;
804 }
805 return VERR_NOT_CODE_SELECTOR;
806 }
807 return VERR_SELECTOR_NOT_PRESENT;
808}
809
810
811#ifdef IN_RC
812/**
813 * Validates and converts a GC selector based code address to a flat address.
814 *
815 * This is like SELMValidateAndConvertCSAddr + SELMIsSelector32Bit but with
816 * invalid hidden CS data. It's customized for dealing efficiently with CS
817 * at GC trap time.
818 *
819 * @returns VBox status code.
820 * @param pVM VM Handle.
821 * @param eflags Current eflags
822 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
823 * A full selector can be passed, we'll only use the RPL part.
824 * @param SelCS Selector part.
825 * @param Addr Address part.
826 * @param ppvFlat Where to store the flat address.
827 * @param pcBits Where to store the 64-bit/32-bit/16-bit indicator.
828 */
829VMMDECL(int) SELMValidateAndConvertCSAddrGCTrap(PVM pVM, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr, PRTGCPTR ppvFlat, uint32_t *pcBits)
830{
831 if ( CPUMIsGuestInRealMode(pVM)
832 || eflags.Bits.u1VM)
833 {
834 *pcBits = 16;
835 return selmValidateAndConvertCSAddrRealMode(pVM, SelCS, NULL, Addr, ppvFlat);
836 }
837 return selmValidateAndConvertCSAddrStd(pVM, SelCPL, SelCS, Addr, ppvFlat, pcBits);
838}
839#endif /* IN_RC */
840
841
842/**
843 * Validates and converts a GC selector based code address to a flat address.
844 *
845 * @returns VBox status code.
846 * @param pVM VM Handle.
847 * @param eflags Current eflags
848 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming!
849 * A full selector can be passed, we'll only use the RPL part.
850 * @param SelCS Selector part.
851 * @param pHiddenSel The hidden CS selector register.
852 * @param Addr Address part.
853 * @param ppvFlat Where to store the flat address.
854 */
855VMMDECL(int) SELMValidateAndConvertCSAddr(PVM pVM, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, CPUMSELREGHID *pHiddenCSSel, RTGCPTR Addr, PRTGCPTR ppvFlat)
856{
857 if ( CPUMIsGuestInRealMode(pVM)
858 || eflags.Bits.u1VM)
859 return selmValidateAndConvertCSAddrRealMode(pVM, SelCS, pHiddenCSSel, Addr, ppvFlat);
860
861#ifdef IN_RING0
862 Assert(CPUMAreHiddenSelRegsValid(pVM));
863#else
864 /** @todo when we're in 16 bits mode, we should cut off the address as well? (like in selmValidateAndConvertCSAddrRealMode) */
865 if (!CPUMAreHiddenSelRegsValid(pVM))
866 return selmValidateAndConvertCSAddrStd(pVM, SelCPL, SelCS, Addr, ppvFlat, NULL);
867#endif
868 return selmValidateAndConvertCSAddrHidden(pVM, SelCPL, SelCS, pHiddenCSSel, Addr, ppvFlat);
869}
870
871
872#ifndef IN_RING0
873/**
874 * Return the cpu mode corresponding to the (CS) selector
875 *
876 * @returns DISCPUMODE according to the selector type (16, 32 or 64 bits)
877 * @param pVM VM Handle.
878 * @param Sel The selector.
879 */
880static DISCPUMODE selmGetCpuModeFromSelector(PVM pVM, RTSEL Sel)
881{
882 Assert(!CPUMAreHiddenSelRegsValid(pVM));
883
884 /** @todo validate limit! */
885 X86DESC Desc;
886 if (!(Sel & X86_SEL_LDT))
887 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
888 else
889 {
890 /** @todo handle LDT page(s) not present! */
891 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
892 Desc = paLDT[Sel >> X86_SEL_SHIFT];
893 }
894 return (Desc.Gen.u1DefBig) ? CPUMODE_32BIT : CPUMODE_16BIT;
895}
896#endif /* !IN_RING0 */
897
898
899/**
900 * Return the cpu mode corresponding to the (CS) selector
901 *
902 * @returns DISCPUMODE according to the selector type (16, 32 or 64 bits)
903 * @param pVM VM Handle.
904 * @param eflags Current eflags register
905 * @param Sel The selector.
906 * @param pHiddenSel The hidden selector register.
907 */
908VMMDECL(DISCPUMODE) SELMGetCpuModeFromSelector(PVM pVM, X86EFLAGS eflags, RTSEL Sel, CPUMSELREGHID *pHiddenSel)
909{
910#ifdef IN_RING0
911 Assert(CPUMAreHiddenSelRegsValid(pVM));
912#else /* !IN_RING0 */
913 if (!CPUMAreHiddenSelRegsValid(pVM))
914 {
915 /*
916 * Deal with real & v86 mode first.
917 */
918 if ( CPUMIsGuestInRealMode(pVM)
919 || eflags.Bits.u1VM)
920 return CPUMODE_16BIT;
921
922 return selmGetCpuModeFromSelector(pVM, Sel);
923 }
924#endif /* !IN_RING0 */
925 if ( CPUMIsGuestInLongMode(pVM)
926 && pHiddenSel->Attr.n.u1Long)
927 return CPUMODE_64BIT;
928
929 /* Else compatibility or 32 bits mode. */
930 return (pHiddenSel->Attr.n.u1DefBig) ? CPUMODE_32BIT : CPUMODE_16BIT;
931}
932
933
934/**
935 * Returns Hypervisor's Trap 08 (\#DF) selector.
936 *
937 * @returns Hypervisor's Trap 08 (\#DF) selector.
938 * @param pVM VM Handle.
939 */
940VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
941{
942 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
943}
944
945
946/**
947 * Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
948 *
949 * @param pVM VM Handle.
950 * @param u32EIP EIP of Trap 08 handler.
951 */
952VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
953{
954 pVM->selm.s.TssTrap08.eip = u32EIP;
955}
956
957
958/**
959 * Sets ss:esp for ring1 in main Hypervisor's TSS.
960 *
961 * @param pVM VM Handle.
962 * @param ss Ring1 SS register value. Pass 0 if invalid.
963 * @param esp Ring1 ESP register value.
964 */
965void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
966{
967 Assert((ss & 1) || esp == 0);
968 pVM->selm.s.Tss.ss1 = ss;
969 pVM->selm.s.Tss.esp1 = (uint32_t)esp;
970}
971
972
973#ifndef IN_RING0
974/**
975 * Gets ss:esp for ring1 in main Hypervisor's TSS.
976 *
977 * Returns SS=0 if the ring-1 stack isn't valid.
978 *
979 * @returns VBox status code.
980 * @param pVM VM Handle.
981 * @param pSS Ring1 SS register value.
982 * @param pEsp Ring1 ESP register value.
983 */
984VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
985{
986 if (pVM->selm.s.fSyncTSSRing0Stack)
987 {
988 RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
989 int rc;
990 VBOXTSS tss;
991
992 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
993
994# ifdef IN_RC
995 bool fTriedAlready = false;
996
997l_tryagain:
998 rc = MMGCRamRead(pVM, &tss.ss0, (RCPTRTYPE(void *))(GCPtrTss + RT_OFFSETOF(VBOXTSS, ss0)), sizeof(tss.ss0));
999 rc |= MMGCRamRead(pVM, &tss.esp0, (RCPTRTYPE(void *))(GCPtrTss + RT_OFFSETOF(VBOXTSS, esp0)), sizeof(tss.esp0));
1000# ifdef DEBUG
1001 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, (RCPTRTYPE(void *))(GCPtrTss + RT_OFFSETOF(VBOXTSS, offIoBitmap)), sizeof(tss.offIoBitmap));
1002# endif
1003
1004 if (RT_FAILURE(rc))
1005 {
1006 if (!fTriedAlready)
1007 {
1008 /* Shadow page might be out of sync. Sync and try again */
1009 /** @todo might cross page boundary */
1010 fTriedAlready = true;
1011 rc = PGMPrefetchPage(pVM, (RTGCPTR)GCPtrTss);
1012 if (rc != VINF_SUCCESS)
1013 return rc;
1014 goto l_tryagain;
1015 }
1016 AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
1017 return rc;
1018 }
1019
1020# else /* !IN_RC */
1021 /* Reading too much. Could be cheaper than two seperate calls though. */
1022 rc = PGMPhysSimpleReadGCPtr(pVM, &tss, GCPtrTss, sizeof(VBOXTSS));
1023 if (RT_FAILURE(rc))
1024 {
1025 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
1026 return rc;
1027 }
1028# endif /* !IN_RC */
1029
1030# ifdef LOG_ENABLED
1031 uint32_t ssr0 = pVM->selm.s.Tss.ss1;
1032 uint32_t espr0 = pVM->selm.s.Tss.esp1;
1033 ssr0 &= ~1;
1034
1035 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
1036 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
1037
1038 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
1039# endif
1040 /* Update our TSS structure for the guest's ring 1 stack */
1041 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
1042 pVM->selm.s.fSyncTSSRing0Stack = false;
1043 }
1044
1045 *pSS = pVM->selm.s.Tss.ss1;
1046 *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
1047
1048 return VINF_SUCCESS;
1049}
1050#endif /* !IN_RING0 */
1051
1052
1053/**
1054 * Returns Guest TSS pointer
1055 *
1056 * @returns Pointer to the guest TSS, RTRCPTR_MAX if not being monitored.
1057 * @param pVM VM Handle.
1058 */
1059VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM)
1060{
1061 return (RTGCPTR)pVM->selm.s.GCPtrGuestTss;
1062}
1063
1064
1065/**
1066 * Validates a CS selector.
1067 *
1068 * @returns VBox status code.
1069 * @param pSelInfo Pointer to the selector information for the CS selector.
1070 * @param SelCPL The selector defining the CPL (SS).
1071 */
1072VMMDECL(int) SELMSelInfoValidateCS(PCSELMSELINFO pSelInfo, RTSEL SelCPL)
1073{
1074 /*
1075 * Check if present.
1076 */
1077 if (pSelInfo->Raw.Gen.u1Present)
1078 {
1079 /*
1080 * Type check.
1081 */
1082 if ( pSelInfo->Raw.Gen.u1DescType == 1
1083 && (pSelInfo->Raw.Gen.u4Type & X86_SEL_TYPE_CODE))
1084 {
1085 /*
1086 * Check level.
1087 */
1088 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, pSelInfo->Sel & X86_SEL_RPL);
1089 if ( !(pSelInfo->Raw.Gen.u4Type & X86_SEL_TYPE_CONF)
1090 ? uLevel <= pSelInfo->Raw.Gen.u2Dpl
1091 : uLevel >= pSelInfo->Raw.Gen.u2Dpl /* hope I got this right now... */
1092 )
1093 return VINF_SUCCESS;
1094 return VERR_INVALID_RPL;
1095 }
1096 return VERR_NOT_CODE_SELECTOR;
1097 }
1098 return VERR_SELECTOR_NOT_PRESENT;
1099}
1100
1101#ifndef IN_RING0
1102
1103/**
1104 * Gets the hypervisor code selector (CS).
1105 * @returns CS selector.
1106 * @param pVM The VM handle.
1107 */
1108VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
1109{
1110 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
1111}
1112
1113
1114/**
1115 * Gets the 64-mode hypervisor code selector (CS64).
1116 * @returns CS selector.
1117 * @param pVM The VM handle.
1118 */
1119VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
1120{
1121 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
1122}
1123
1124
1125/**
1126 * Gets the hypervisor data selector (DS).
1127 * @returns DS selector.
1128 * @param pVM The VM handle.
1129 */
1130VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
1131{
1132 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
1133}
1134
1135
1136/**
1137 * Gets the hypervisor TSS selector.
1138 * @returns TSS selector.
1139 * @param pVM The VM handle.
1140 */
1141VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
1142{
1143 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
1144}
1145
1146
1147/**
1148 * Gets the hypervisor TSS Trap 8 selector.
1149 * @returns TSS Trap 8 selector.
1150 * @param pVM The VM handle.
1151 */
1152VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
1153{
1154 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1155}
1156
1157/**
1158 * Gets the address for the hypervisor GDT.
1159 *
1160 * @returns The GDT address.
1161 * @param pVM The VM handle.
1162 * @remark This is intended only for very special use, like in the world
1163 * switchers. Don't exploit this API!
1164 */
1165VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
1166{
1167 /*
1168 * Always convert this from the HC pointer since we can be
1169 * called before the first relocation and have to work correctly
1170 * without having dependencies on the relocation order.
1171 */
1172 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
1173}
1174
1175#endif /* !IN_RING0 */
1176
1177/**
1178 * Gets info about the current TSS.
1179 *
1180 * @returns VBox status code.
1181 * @retval VINF_SUCCESS if we've got a TSS loaded.
1182 * @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
1183 *
1184 * @param pVM The VM handle.
1185 * @param pGCPtrTss Where to store the TSS address.
1186 * @param pcbTss Where to store the TSS size limit.
1187 * @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
1188 */
1189VMMDECL(int) SELMGetTSSInfo(PVM pVM, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
1190{
1191 /*
1192 * The TR hidden register is always valid.
1193 */
1194 CPUMSELREGHID trHid;
1195 RTSEL tr = CPUMGetGuestTR(pVM, &trHid);
1196 if (!(tr & X86_SEL_MASK))
1197 return VERR_SELM_NO_TSS;
1198
1199 *pGCPtrTss = trHid.u64Base;
1200 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
1201 if (pfCanHaveIOBitmap)
1202 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1203 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1204 return VINF_SUCCESS;
1205}
1206
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette