VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/SELMAll.cpp@ 43030

Last change on this file since 43030 was 42427, checked in by vboxsync, 12 years ago

VMM: Fixed some selector arithmetic, introducing a new constand and renaming and old one to make things clearer. Also added CPUMGetGuestLdtrEx and make some (but not all) of SELM use this instead of shadow GDT.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 35.1 KB
Line 
1/* $Id: SELMAll.cpp 42427 2012-07-26 23:48:01Z vboxsync $ */
2/** @file
3 * SELM All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_SELM
23#include <VBox/vmm/selm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/pgm.h>
27#include <VBox/vmm/hwaccm.h>
28#include "SELMInternal.h"
29#include <VBox/vmm/vm.h>
30#include <VBox/err.h>
31#include <VBox/param.h>
32#include <iprt/assert.h>
33#include <VBox/vmm/vmm.h>
34#include <iprt/x86.h>
35
36
37/*******************************************************************************
38* Global Variables *
39*******************************************************************************/
40#if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
41/** Segment register names. */
42static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
43#endif
44
45
46
47#ifdef VBOX_WITH_RAW_MODE_NOT_R0
48/**
49 * Converts a GC selector based address to a flat address.
50 *
51 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
52 * for that.
53 *
54 * @returns Flat address.
55 * @param pVM Pointer to the VM.
56 * @param Sel Selector part.
57 * @param Addr Address part.
58 * @remarks Don't use when in long mode.
59 */
60VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
61{
62 Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */
63
64 /** @todo check the limit. */
65 X86DESC Desc;
66 if (!(Sel & X86_SEL_LDT))
67 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
68 else
69 {
70 /** @todo handle LDT pages not present! */
71 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
72 Desc = paLDT[Sel >> X86_SEL_SHIFT];
73 }
74
75 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)) & 0xffffffff);
76}
77#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
78
79
80/**
81 * Converts a GC selector based address to a flat address.
82 *
83 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
84 * for that.
85 *
86 * @returns Flat address.
87 * @param pVM Pointer to the VM.
88 * @param SelReg Selector register
89 * @param pCtxCore CPU context
90 * @param Addr Address part.
91 */
92VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
93{
94 PCPUMSELREG pSReg;
95 PVMCPU pVCpu = VMMGetCpu(pVM);
96
97 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg); AssertRC(rc);
98
99 /*
100 * Deal with real & v86 mode first.
101 */
102 if ( pCtxCore->eflags.Bits.u1VM
103 || CPUMIsGuestInRealMode(pVCpu))
104 {
105 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
106 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
107 uFlat += pSReg->u64Base;
108 else
109 uFlat += (RTGCUINTPTR)pSReg->Sel << 4;
110 return (RTGCPTR)uFlat;
111 }
112
113#ifdef VBOX_WITH_RAW_MODE_NOT_R0
114 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
115 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
116 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
117 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
118 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
119#else
120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
122#endif
123
124 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
125 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
126 if ( pCtxCore->cs.Attr.n.u1Long
127 && CPUMIsGuestInLongMode(pVCpu))
128 {
129 switch (SelReg)
130 {
131 case DISSELREG_FS:
132 case DISSELREG_GS:
133 return (RTGCPTR)(pSReg->u64Base + Addr);
134
135 default:
136 return Addr; /* base 0 */
137 }
138 }
139
140 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
141 Assert(pSReg->u64Base <= 0xffffffff);
142 return ((pSReg->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
143}
144
145
146/**
147 * Converts a GC selector based address to a flat address.
148 *
149 * Some basic checking is done, but not all kinds yet.
150 *
151 * @returns VBox status
152 * @param pVCpu Pointer to the VMCPU.
153 * @param SelReg Selector register.
154 * @param pCtxCore CPU context.
155 * @param Addr Address part.
156 * @param fFlags SELMTOFLAT_FLAGS_*
157 * GDT entires are valid.
158 * @param ppvGC Where to store the GC flat address.
159 */
160VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags, PRTGCPTR ppvGC)
161{
162 /*
163 * Fetch the selector first.
164 */
165 PCPUMSELREG pSReg;
166 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg);
167 AssertRCReturn(rc, rc); AssertPtr(pSReg);
168
169 /*
170 * Deal with real & v86 mode first.
171 */
172 if ( pCtxCore->eflags.Bits.u1VM
173 || CPUMIsGuestInRealMode(pVCpu))
174 {
175 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
176 if (ppvGC)
177 {
178 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
179 *ppvGC = pSReg->u64Base + uFlat;
180 else
181 *ppvGC = ((RTGCUINTPTR)pSReg->Sel << 4) + uFlat;
182 }
183 return VINF_SUCCESS;
184 }
185
186
187#ifdef VBOX_WITH_RAW_MODE_NOT_R0
188 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
189 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
190 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
191 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
192#else
193 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
194 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
195#endif
196
197 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
198 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
199 RTGCPTR pvFlat;
200 bool fCheckLimit = true;
201 if ( pCtxCore->cs.Attr.n.u1Long
202 && CPUMIsGuestInLongMode(pVCpu))
203 {
204 fCheckLimit = false;
205 switch (SelReg)
206 {
207 case DISSELREG_FS:
208 case DISSELREG_GS:
209 pvFlat = pSReg->u64Base + Addr;
210 break;
211
212 default:
213 pvFlat = Addr;
214 break;
215 }
216 }
217 else
218 {
219 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
220 Assert(pSReg->u64Base <= UINT32_C(0xffffffff));
221 pvFlat = pSReg->u64Base + Addr;
222 pvFlat &= UINT32_C(0xffffffff);
223 }
224
225 /*
226 * Check type if present.
227 */
228 if (pSReg->Attr.n.u1Present)
229 {
230 switch (pSReg->Attr.n.u4Type)
231 {
232 /* Read only selector type. */
233 case X86_SEL_TYPE_RO:
234 case X86_SEL_TYPE_RO_ACC:
235 case X86_SEL_TYPE_RW:
236 case X86_SEL_TYPE_RW_ACC:
237 case X86_SEL_TYPE_EO:
238 case X86_SEL_TYPE_EO_ACC:
239 case X86_SEL_TYPE_ER:
240 case X86_SEL_TYPE_ER_ACC:
241 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
242 {
243 /** @todo fix this mess */
244 }
245 /* check limit. */
246 if (fCheckLimit && Addr > pSReg->u32Limit)
247 return VERR_OUT_OF_SELECTOR_BOUNDS;
248 /* ok */
249 if (ppvGC)
250 *ppvGC = pvFlat;
251 return VINF_SUCCESS;
252
253 case X86_SEL_TYPE_EO_CONF:
254 case X86_SEL_TYPE_EO_CONF_ACC:
255 case X86_SEL_TYPE_ER_CONF:
256 case X86_SEL_TYPE_ER_CONF_ACC:
257 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
258 {
259 /** @todo fix this mess */
260 }
261 /* check limit. */
262 if (fCheckLimit && Addr > pSReg->u32Limit)
263 return VERR_OUT_OF_SELECTOR_BOUNDS;
264 /* ok */
265 if (ppvGC)
266 *ppvGC = pvFlat;
267 return VINF_SUCCESS;
268
269 case X86_SEL_TYPE_RO_DOWN:
270 case X86_SEL_TYPE_RO_DOWN_ACC:
271 case X86_SEL_TYPE_RW_DOWN:
272 case X86_SEL_TYPE_RW_DOWN_ACC:
273 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
274 {
275 /** @todo fix this mess */
276 }
277 /* check limit. */
278 if (fCheckLimit)
279 {
280 if (!pSReg->Attr.n.u1Granularity && Addr > UINT32_C(0xffff))
281 return VERR_OUT_OF_SELECTOR_BOUNDS;
282 if (Addr <= pSReg->u32Limit)
283 return VERR_OUT_OF_SELECTOR_BOUNDS;
284 }
285 /* ok */
286 if (ppvGC)
287 *ppvGC = pvFlat;
288 return VINF_SUCCESS;
289
290 default:
291 return VERR_INVALID_SELECTOR;
292
293 }
294 }
295 return VERR_SELECTOR_NOT_PRESENT;
296}
297
298
299#ifdef VBOX_WITH_RAW_MODE_NOT_R0
300/**
301 * Converts a GC selector based address to a flat address.
302 *
303 * Some basic checking is done, but not all kinds yet.
304 *
305 * @returns VBox status
306 * @param pVCpu Pointer to the VMCPU.
307 * @param eflags Current eflags
308 * @param Sel Selector part.
309 * @param Addr Address part.
310 * @param fFlags SELMTOFLAT_FLAGS_*
311 * GDT entires are valid.
312 * @param ppvGC Where to store the GC flat address.
313 * @param pcb Where to store the bytes from *ppvGC which can be accessed according to
314 * the selector. NULL is allowed.
315 * @remarks Don't use when in long mode.
316 */
317VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr,
318 uint32_t fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
319{
320 Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! (Accessing shadow GDT/LDT.) */
321
322 /*
323 * Deal with real & v86 mode first.
324 */
325 if ( eflags.Bits.u1VM
326 || CPUMIsGuestInRealMode(pVCpu))
327 {
328 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
329 if (ppvGC)
330 *ppvGC = ((RTGCUINTPTR)Sel << 4) + uFlat;
331 if (pcb)
332 *pcb = 0x10000 - uFlat;
333 return VINF_SUCCESS;
334 }
335
336 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
337 X86DESC Desc;
338 PVM pVM = pVCpu->CTX_SUFF(pVM);
339 if (!(Sel & X86_SEL_LDT))
340 {
341 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
342 && (Sel | X86_SEL_RPL_LDT) > pVM->selm.s.GuestGdtr.cbGdt)
343 return VERR_INVALID_SELECTOR;
344 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
345 }
346 else
347 {
348 if ((Sel | X86_SEL_RPL_LDT) > pVM->selm.s.cbLdtLimit)
349 return VERR_INVALID_SELECTOR;
350
351 /** @todo handle LDT page(s) not present! */
352 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
353 Desc = paLDT[Sel >> X86_SEL_SHIFT];
354 }
355
356 /* calc limit. */
357 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
358
359 /* calc address assuming straight stuff. */
360 RTGCPTR pvFlat = Addr + X86DESC_BASE(&Desc);
361
362 /* Cut the address to 32 bits. */
363 Assert(!CPUMIsGuestInLongMode(pVCpu));
364 pvFlat &= 0xffffffff;
365
366 uint8_t u1Present = Desc.Gen.u1Present;
367 uint8_t u1Granularity = Desc.Gen.u1Granularity;
368 uint8_t u1DescType = Desc.Gen.u1DescType;
369 uint8_t u4Type = Desc.Gen.u4Type;
370
371 /*
372 * Check if present.
373 */
374 if (u1Present)
375 {
376 /*
377 * Type check.
378 */
379#define BOTH(a, b) ((a << 16) | b)
380 switch (BOTH(u1DescType, u4Type))
381 {
382
383 /** Read only selector type. */
384 case BOTH(1,X86_SEL_TYPE_RO):
385 case BOTH(1,X86_SEL_TYPE_RO_ACC):
386 case BOTH(1,X86_SEL_TYPE_RW):
387 case BOTH(1,X86_SEL_TYPE_RW_ACC):
388 case BOTH(1,X86_SEL_TYPE_EO):
389 case BOTH(1,X86_SEL_TYPE_EO_ACC):
390 case BOTH(1,X86_SEL_TYPE_ER):
391 case BOTH(1,X86_SEL_TYPE_ER_ACC):
392 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
393 {
394 /** @todo fix this mess */
395 }
396 /* check limit. */
397 if ((RTGCUINTPTR)Addr > u32Limit)
398 return VERR_OUT_OF_SELECTOR_BOUNDS;
399 /* ok */
400 if (ppvGC)
401 *ppvGC = pvFlat;
402 if (pcb)
403 *pcb = u32Limit - (uint32_t)Addr + 1;
404 return VINF_SUCCESS;
405
406 case BOTH(1,X86_SEL_TYPE_EO_CONF):
407 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
408 case BOTH(1,X86_SEL_TYPE_ER_CONF):
409 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
410 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
411 {
412 /** @todo fix this mess */
413 }
414 /* check limit. */
415 if ((RTGCUINTPTR)Addr > u32Limit)
416 return VERR_OUT_OF_SELECTOR_BOUNDS;
417 /* ok */
418 if (ppvGC)
419 *ppvGC = pvFlat;
420 if (pcb)
421 *pcb = u32Limit - (uint32_t)Addr + 1;
422 return VINF_SUCCESS;
423
424 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
425 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
426 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
427 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
428 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
429 {
430 /** @todo fix this mess */
431 }
432 /* check limit. */
433 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
434 return VERR_OUT_OF_SELECTOR_BOUNDS;
435 if ((RTGCUINTPTR)Addr <= u32Limit)
436 return VERR_OUT_OF_SELECTOR_BOUNDS;
437
438 /* ok */
439 if (ppvGC)
440 *ppvGC = pvFlat;
441 if (pcb)
442 *pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
443 return VINF_SUCCESS;
444
445 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
446 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
447 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
448 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
449 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
450 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
451 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
452 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
453 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
454 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
455 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
456 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
457 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
458 {
459 /** @todo fix this mess */
460 }
461 /* check limit. */
462 if ((RTGCUINTPTR)Addr > u32Limit)
463 return VERR_OUT_OF_SELECTOR_BOUNDS;
464 /* ok */
465 if (ppvGC)
466 *ppvGC = pvFlat;
467 if (pcb)
468 *pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
469 return VINF_SUCCESS;
470
471 default:
472 return VERR_INVALID_SELECTOR;
473
474 }
475#undef BOTH
476 }
477 return VERR_SELECTOR_NOT_PRESENT;
478}
479#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
480
481
482#ifdef VBOX_WITH_RAW_MODE_NOT_R0
483
484static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg,
485 RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg)
486{
487 /*
488 * Try read the entry.
489 */
490 X86DESC GstDesc;
491 int rc = PGMPhysReadGCPtr(pVCpu, &GstDesc, GCPtrDesc, sizeof(GstDesc));
492 if (RT_FAILURE(rc))
493 {
494 Log(("SELMLoadHiddenSelectorReg: Error reading descriptor %s=%#x: %Rrc\n", g_aszSRegNms[iSReg], Sel, rc));
495 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelReadErrors);
496 return;
497 }
498
499 /*
500 * Validate it and load it.
501 */
502 if (!selmIsGstDescGoodForSReg(pVCpu, pSReg, &GstDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
503 {
504 Log(("SELMLoadHiddenSelectorReg: Guest table entry is no good (%s=%#x): %.8Rhxs\n", g_aszSRegNms[iSReg], Sel, &GstDesc));
505 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGstNoGood);
506 return;
507 }
508
509 selmLoadHiddenSRegFromGuestDesc(pVCpu, pSReg, &GstDesc);
510 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (gst)\n",
511 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
512 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGst);
513}
514
515
516/**
517 * CPUM helper that loads the hidden selector register from the descriptor table
518 * when executing with raw-mode.
519 *
520 * @remarks This is only used when in legacy protected mode!
521 *
522 * @param pVCpu Pointer to the current virtual CPU.
523 * @param pCtx The guest CPU context.
524 * @param pSReg The selector register.
525 *
526 * @todo Deal 100% correctly with stale selectors. What's more evil is
527 * invalid page table entries, which isn't impossible to imagine for
528 * LDT entries for instance, though unlikely. Currently, we turn a
529 * blind eye to these issues and return the old hidden registers,
530 * though we don't set the valid flag, so that we'll try loading them
531 * over and over again till we succeed loading something.
532 */
533VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg)
534{
535 Assert(pCtx->cr0 & X86_CR0_PE);
536 Assert(!(pCtx->msrEFER & MSR_K6_EFER_LMA));
537
538 PVM pVM = pVCpu->CTX_SUFF(pVM);
539 Assert(pVM->cCpus == 1);
540
541
542 /*
543 * Get the shadow descriptor table entry and validate it.
544 * Should something go amiss, try the guest table.
545 */
546 RTSEL const Sel = pSReg->Sel;
547 uint32_t const iSReg = pSReg - CPUMCTX_FIRST_SREG(pCtx); Assert(iSReg < X86_SREG_COUNT);
548 PCX86DESC pShwDesc;
549 if (!(Sel & X86_SEL_LDT))
550 {
551 /** @todo this shall not happen, we shall check for these things when executing
552 * LGDT */
553 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->gdtr.cbGdt);
554
555 pShwDesc = &pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
556 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)
557 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
558 {
559 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK), Sel, iSReg);
560 return;
561 }
562 }
563 else
564 {
565 /** @todo this shall not happen, we shall check for these things when executing
566 * LLDT */
567 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->ldtr.u32Limit);
568
569 pShwDesc = (PCX86DESC)((uintptr_t)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper + (Sel & X86_SEL_MASK));
570 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT)
571 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
572 {
573 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK), Sel, iSReg);
574 return;
575 }
576 }
577
578 /*
579 * All fine, load it.
580 */
581 selmLoadHiddenSRegFromShadowDesc(pSReg, pShwDesc);
582 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelShw);
583 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (shw)\n",
584 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
585}
586
587#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
588
589/**
590 * Validates and converts a GC selector based code address to a flat
591 * address when in real or v8086 mode.
592 *
593 * @returns VINF_SUCCESS.
594 * @param pVCpu Pointer to the VMCPU.
595 * @param SelCS Selector part.
596 * @param pHidCS The hidden CS register part. Optional.
597 * @param Addr Address part.
598 * @param ppvFlat Where to store the flat address.
599 */
600DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVMCPU pVCpu, RTSEL SelCS, PCCPUMSELREGHID pSReg, RTGCPTR Addr,
601 PRTGCPTR ppvFlat)
602{
603 RTGCUINTPTR uFlat = Addr & 0xffff;
604 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
605 uFlat += (RTGCUINTPTR)SelCS << 4;
606 else
607 uFlat += pSReg->u64Base;
608 *ppvFlat = uFlat;
609 return VINF_SUCCESS;
610}
611
612
613#ifdef VBOX_WITH_RAW_MODE_NOT_R0
614/**
615 * Validates and converts a GC selector based code address to a flat address
616 * when in protected/long mode using the raw-mode algorithm.
617 *
618 * @returns VBox status code.
619 * @param pVM Pointer to the VM.
620 * @param pVCpu Pointer to the VMCPU.
621 * @param SelCPL Current privilege level. Get this from SS - CS might be
622 * conforming! A full selector can be passed, we'll only
623 * use the RPL part.
624 * @param SelCS Selector part.
625 * @param Addr Address part.
626 * @param ppvFlat Where to store the flat address.
627 * @param pcBits Where to store the segment bitness (16/32/64). Optional.
628 */
629DECLINLINE(int) selmValidateAndConvertCSAddrRawMode(PVM pVM, PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr,
630 PRTGCPTR ppvFlat, uint32_t *pcBits)
631{
632 NOREF(pVCpu);
633 /** @todo validate limit! */
634 X86DESC Desc;
635 if (!(SelCS & X86_SEL_LDT))
636 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
637 else
638 {
639 /** @todo handle LDT page(s) not present! */
640 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
641 Desc = paLDT[SelCS >> X86_SEL_SHIFT];
642 }
643
644 /*
645 * Check if present.
646 */
647 if (Desc.Gen.u1Present)
648 {
649 /*
650 * Type check.
651 */
652 if ( Desc.Gen.u1DescType == 1
653 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
654 {
655 /*
656 * Check level.
657 */
658 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
659 if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
660 ? uLevel <= Desc.Gen.u2Dpl
661 : uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
662 )
663 {
664 /*
665 * Limit check.
666 */
667 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
668 if ((RTGCUINTPTR)Addr <= u32Limit)
669 {
670 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc));
671 /* Cut the address to 32 bits. */
672 *ppvFlat &= 0xffffffff;
673
674 if (pcBits)
675 *pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
676 return VINF_SUCCESS;
677 }
678 return VERR_OUT_OF_SELECTOR_BOUNDS;
679 }
680 return VERR_INVALID_RPL;
681 }
682 return VERR_NOT_CODE_SELECTOR;
683 }
684 return VERR_SELECTOR_NOT_PRESENT;
685}
686#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
687
688
689/**
690 * Validates and converts a GC selector based code address to a flat address
691 * when in protected/long mode using the standard hidden selector registers
692 *
693 * @returns VBox status code.
694 * @param pVCpu Pointer to the VMCPU.
695 * @param SelCPL Current privilege level. Get this from SS - CS might be
696 * conforming! A full selector can be passed, we'll only
697 * use the RPL part.
698 * @param SelCS Selector part.
699 * @param pSRegCS The full CS selector register.
700 * @param Addr The address (think IP/EIP/RIP).
701 * @param ppvFlat Where to store the flat address upon successful return.
702 */
703DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pSRegCS,
704 RTGCPTR Addr, PRTGCPTR ppvFlat)
705{
706 /*
707 * Check if present.
708 */
709 if (pSRegCS->Attr.n.u1Present)
710 {
711 /*
712 * Type check.
713 */
714 if ( pSRegCS->Attr.n.u1DescType == 1
715 && (pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
716 {
717 /*
718 * Check level.
719 */
720 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
721 if ( !(pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CONF)
722 ? uLevel <= pSRegCS->Attr.n.u2Dpl
723 : uLevel >= pSRegCS->Attr.n.u2Dpl /* hope I got this right now... */
724 )
725 {
726 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
727 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
728 if ( pSRegCS->Attr.n.u1Long
729 && CPUMIsGuestInLongMode(pVCpu))
730 {
731 *ppvFlat = Addr;
732 return VINF_SUCCESS;
733 }
734
735 /*
736 * Limit check. Note that the limit in the hidden register is the
737 * final value. The granularity bit was included in its calculation.
738 */
739 uint32_t u32Limit = pSRegCS->u32Limit;
740 if ((RTGCUINTPTR)Addr <= u32Limit)
741 {
742 *ppvFlat = Addr + pSRegCS->u64Base;
743 return VINF_SUCCESS;
744 }
745
746 return VERR_OUT_OF_SELECTOR_BOUNDS;
747 }
748 Log(("selmValidateAndConvertCSAddrHidden: Invalid RPL Attr.n.u4Type=%x cpl=%x dpl=%x\n",
749 pSRegCS->Attr.n.u4Type, uLevel, pSRegCS->Attr.n.u2Dpl));
750 return VERR_INVALID_RPL;
751 }
752 return VERR_NOT_CODE_SELECTOR;
753 }
754 return VERR_SELECTOR_NOT_PRESENT;
755}
756
757
758/**
759 * Validates and converts a GC selector based code address to a flat address.
760 *
761 * @returns VBox status code.
762 * @param pVCpu Pointer to the VMCPU.
763 * @param Efl Current EFLAGS.
764 * @param SelCPL Current privilege level. Get this from SS - CS might be
765 * conforming! A full selector can be passed, we'll only
766 * use the RPL part.
767 * @param SelCS Selector part.
768 * @param pSRegCS The full CS selector register.
769 * @param Addr The address (think IP/EIP/RIP).
770 * @param ppvFlat Where to store the flat address upon successful return.
771 */
772VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS Efl, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREG pSRegCS,
773 RTGCPTR Addr, PRTGCPTR ppvFlat)
774{
775 if ( Efl.Bits.u1VM
776 || CPUMIsGuestInRealMode(pVCpu))
777 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, pSRegCS, Addr, ppvFlat);
778
779#ifdef VBOX_WITH_RAW_MODE_NOT_R0
780 /* Use the hidden registers when possible, updating them if outdate. */
781 if (!pSRegCS)
782 return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL);
783
784 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS))
785 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS);
786
787 /* Undo ring compression. */
788 if ((SelCPL & X86_SEL_RPL) == 1 && !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))
789 SelCPL &= ~X86_SEL_RPL;
790 Assert(pSRegCS->Sel == SelCS);
791 if ((SelCS & X86_SEL_RPL) == 1 && !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))
792 SelCS &= ~X86_SEL_RPL;
793#else
794 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS));
795 Assert(pSRegCS->Sel == SelCS);
796#endif
797
798 return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pSRegCS, Addr, ppvFlat);
799}
800
801
802/**
803 * Returns Hypervisor's Trap 08 (\#DF) selector.
804 *
805 * @returns Hypervisor's Trap 08 (\#DF) selector.
806 * @param pVM Pointer to the VM.
807 */
808VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
809{
810 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
811}
812
813
814/**
815 * Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
816 *
817 * @param pVM Pointer to the VM.
818 * @param u32EIP EIP of Trap 08 handler.
819 */
820VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
821{
822 pVM->selm.s.TssTrap08.eip = u32EIP;
823}
824
825
826/**
827 * Sets ss:esp for ring1 in main Hypervisor's TSS.
828 *
829 * @param pVM Pointer to the VM.
830 * @param ss Ring1 SS register value. Pass 0 if invalid.
831 * @param esp Ring1 ESP register value.
832 */
833void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
834{
835 Assert((ss & 1) || esp == 0);
836 pVM->selm.s.Tss.ss1 = ss;
837 pVM->selm.s.Tss.esp1 = (uint32_t)esp;
838}
839
840
841#ifdef VBOX_WITH_RAW_MODE_NOT_R0
842/**
843 * Gets ss:esp for ring1 in main Hypervisor's TSS.
844 *
845 * Returns SS=0 if the ring-1 stack isn't valid.
846 *
847 * @returns VBox status code.
848 * @param pVM Pointer to the VM.
849 * @param pSS Ring1 SS register value.
850 * @param pEsp Ring1 ESP register value.
851 */
852VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
853{
854 Assert(pVM->cCpus == 1);
855 PVMCPU pVCpu = &pVM->aCpus[0];
856
857 if (pVM->selm.s.fSyncTSSRing0Stack)
858 {
859 RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
860 int rc;
861 VBOXTSS tss;
862
863 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
864
865# ifdef IN_RC
866 bool fTriedAlready = false;
867
868l_tryagain:
869 PVBOXTSS pTss = (PVBOXTSS)(uintptr_t)GCPtrTss;
870 rc = MMGCRamRead(pVM, &tss.ss0, &pTss->ss0, sizeof(tss.ss0));
871 rc |= MMGCRamRead(pVM, &tss.esp0, &pTss->esp0, sizeof(tss.esp0));
872# ifdef DEBUG
873 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, &pTss->offIoBitmap, sizeof(tss.offIoBitmap));
874# endif
875
876 if (RT_FAILURE(rc))
877 {
878 if (!fTriedAlready)
879 {
880 /* Shadow page might be out of sync. Sync and try again */
881 /** @todo might cross page boundary */
882 fTriedAlready = true;
883 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
884 if (rc != VINF_SUCCESS)
885 return rc;
886 goto l_tryagain;
887 }
888 AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
889 return rc;
890 }
891
892# else /* !IN_RC */
893 /* Reading too much. Could be cheaper than two separate calls though. */
894 rc = PGMPhysSimpleReadGCPtr(pVCpu, &tss, GCPtrTss, sizeof(VBOXTSS));
895 if (RT_FAILURE(rc))
896 {
897 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
898 return rc;
899 }
900# endif /* !IN_RC */
901
902# ifdef LOG_ENABLED
903 uint32_t ssr0 = pVM->selm.s.Tss.ss1;
904 uint32_t espr0 = pVM->selm.s.Tss.esp1;
905 ssr0 &= ~1;
906
907 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
908 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
909
910 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
911# endif
912 /* Update our TSS structure for the guest's ring 1 stack */
913 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
914 pVM->selm.s.fSyncTSSRing0Stack = false;
915 }
916
917 *pSS = pVM->selm.s.Tss.ss1;
918 *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
919
920 return VINF_SUCCESS;
921}
922#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
923
924
925/**
926 * Returns Guest TSS pointer
927 *
928 * @returns Pointer to the guest TSS, RTRCPTR_MAX if not being monitored.
929 * @param pVM Pointer to the VM.
930 */
931VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM)
932{
933 return (RTGCPTR)pVM->selm.s.GCPtrGuestTss;
934}
935
936#ifdef VBOX_WITH_RAW_MODE_NOT_R0
937
938/**
939 * Gets the hypervisor code selector (CS).
940 * @returns CS selector.
941 * @param pVM Pointer to the VM.
942 */
943VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
944{
945 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
946}
947
948
949/**
950 * Gets the 64-mode hypervisor code selector (CS64).
951 * @returns CS selector.
952 * @param pVM Pointer to the VM.
953 */
954VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
955{
956 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
957}
958
959
960/**
961 * Gets the hypervisor data selector (DS).
962 * @returns DS selector.
963 * @param pVM Pointer to the VM.
964 */
965VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
966{
967 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
968}
969
970
971/**
972 * Gets the hypervisor TSS selector.
973 * @returns TSS selector.
974 * @param pVM Pointer to the VM.
975 */
976VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
977{
978 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
979}
980
981
982/**
983 * Gets the hypervisor TSS Trap 8 selector.
984 * @returns TSS Trap 8 selector.
985 * @param pVM Pointer to the VM.
986 */
987VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
988{
989 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
990}
991
992/**
993 * Gets the address for the hypervisor GDT.
994 *
995 * @returns The GDT address.
996 * @param pVM Pointer to the VM.
997 * @remark This is intended only for very special use, like in the world
998 * switchers. Don't exploit this API!
999 */
1000VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
1001{
1002 /*
1003 * Always convert this from the HC pointer since we can be
1004 * called before the first relocation and have to work correctly
1005 * without having dependencies on the relocation order.
1006 */
1007 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
1008}
1009
1010#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1011
1012/**
1013 * Gets info about the current TSS.
1014 *
1015 * @returns VBox status code.
1016 * @retval VINF_SUCCESS if we've got a TSS loaded.
1017 * @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
1018 *
1019 * @param pVM Pointer to the VM.
1020 * @param pVCpu Pointer to the VMCPU.
1021 * @param pGCPtrTss Where to store the TSS address.
1022 * @param pcbTss Where to store the TSS size limit.
1023 * @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
1024 */
1025VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
1026{
1027 NOREF(pVM);
1028
1029 /*
1030 * The TR hidden register is always valid.
1031 */
1032 CPUMSELREGHID trHid;
1033 RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
1034 if (!(tr & X86_SEL_MASK_OFF_RPL))
1035 return VERR_SELM_NO_TSS;
1036
1037 *pGCPtrTss = trHid.u64Base;
1038 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
1039 if (pfCanHaveIOBitmap)
1040 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1041 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1042 return VINF_SUCCESS;
1043}
1044
1045
1046
1047/**
1048 * Notification callback which is called whenever there is a chance that a CR3
1049 * value might have changed.
1050 * This is called by PGM.
1051 *
1052 * @param pVM Pointer to the VM.
1053 * @param pVCpu Pointer to the VMCPU.
1054 */
1055VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
1056{
1057 /** @todo SMP support!! */
1058 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
1059 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
1060}
1061
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette