VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/SELMAll.cpp@ 47681

Last change on this file since 47681 was 45786, checked in by vboxsync, 12 years ago

Move HMRCA.asm into the switcher code so we don't need VMMRC.rc.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 35.8 KB
Line 
1/* $Id: SELMAll.cpp 45786 2013-04-26 22:35:59Z vboxsync $ */
2/** @file
3 * SELM All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_SELM
23#include <VBox/vmm/selm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/mm.h>
27#include <VBox/vmm/hm.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/hm.h>
30#include "SELMInternal.h"
31#include <VBox/vmm/vm.h>
32#include <VBox/err.h>
33#include <VBox/param.h>
34#include <iprt/assert.h>
35#include <VBox/vmm/vmm.h>
36#include <iprt/x86.h>
37
38#include "SELMInline.h"
39
40
41/*******************************************************************************
42* Global Variables *
43*******************************************************************************/
44#if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
45/** Segment register names. */
46static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
47#endif
48
49
50
51#ifdef VBOX_WITH_RAW_MODE_NOT_R0
52/**
53 * Converts a GC selector based address to a flat address.
54 *
55 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
56 * for that.
57 *
58 * @returns Flat address.
59 * @param pVM Pointer to the VM.
60 * @param Sel Selector part.
61 * @param Addr Address part.
62 * @remarks Don't use when in long mode.
63 */
64VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
65{
66 Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */
67 Assert(!HMIsEnabled(pVM));
68
69 /** @todo check the limit. */
70 X86DESC Desc;
71 if (!(Sel & X86_SEL_LDT))
72 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
73 else
74 {
75 /** @todo handle LDT pages not present! */
76 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
77 Desc = paLDT[Sel >> X86_SEL_SHIFT];
78 }
79
80 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)) & 0xffffffff);
81}
82#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
83
84
85/**
86 * Converts a GC selector based address to a flat address.
87 *
88 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
89 * for that.
90 *
91 * @returns Flat address.
92 * @param pVM Pointer to the VM.
93 * @param SelReg Selector register
94 * @param pCtxCore CPU context
95 * @param Addr Address part.
96 */
97VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
98{
99 PCPUMSELREG pSReg;
100 PVMCPU pVCpu = VMMGetCpu(pVM);
101
102 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg); AssertRC(rc);
103
104 /*
105 * Deal with real & v86 mode first.
106 */
107 if ( pCtxCore->eflags.Bits.u1VM
108 || CPUMIsGuestInRealMode(pVCpu))
109 {
110 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
111 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
112 uFlat += pSReg->u64Base;
113 else
114 uFlat += (RTGCUINTPTR)pSReg->Sel << 4;
115 return (RTGCPTR)uFlat;
116 }
117
118#ifdef VBOX_WITH_RAW_MODE_NOT_R0
119 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
120 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
121 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
122 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
123 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
124#else
125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
127#endif
128
129 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
130 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
131 if ( pCtxCore->cs.Attr.n.u1Long
132 && CPUMIsGuestInLongMode(pVCpu))
133 {
134 switch (SelReg)
135 {
136 case DISSELREG_FS:
137 case DISSELREG_GS:
138 return (RTGCPTR)(pSReg->u64Base + Addr);
139
140 default:
141 return Addr; /* base 0 */
142 }
143 }
144
145 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
146 Assert(pSReg->u64Base <= 0xffffffff);
147 return ((pSReg->u64Base + (RTGCUINTPTR)Addr) & 0xffffffff);
148}
149
150
151/**
152 * Converts a GC selector based address to a flat address.
153 *
154 * Some basic checking is done, but not all kinds yet.
155 *
156 * @returns VBox status
157 * @param pVCpu Pointer to the VMCPU.
158 * @param SelReg Selector register.
159 * @param pCtxCore CPU context.
160 * @param Addr Address part.
161 * @param fFlags SELMTOFLAT_FLAGS_*
162 * GDT entires are valid.
163 * @param ppvGC Where to store the GC flat address.
164 */
165VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags, PRTGCPTR ppvGC)
166{
167 /*
168 * Fetch the selector first.
169 */
170 PCPUMSELREG pSReg;
171 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg);
172 AssertRCReturn(rc, rc); AssertPtr(pSReg);
173
174 /*
175 * Deal with real & v86 mode first.
176 */
177 if ( pCtxCore->eflags.Bits.u1VM
178 || CPUMIsGuestInRealMode(pVCpu))
179 {
180 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
181 if (ppvGC)
182 {
183 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
184 *ppvGC = pSReg->u64Base + uFlat;
185 else
186 *ppvGC = ((RTGCUINTPTR)pSReg->Sel << 4) + uFlat;
187 }
188 return VINF_SUCCESS;
189 }
190
191#ifdef VBOX_WITH_RAW_MODE_NOT_R0
192 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
193 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
194 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
195 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
196#else
197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
198 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
199#endif
200
201 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
202 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
203 RTGCPTR pvFlat;
204 bool fCheckLimit = true;
205 if ( pCtxCore->cs.Attr.n.u1Long
206 && CPUMIsGuestInLongMode(pVCpu))
207 {
208 fCheckLimit = false;
209 switch (SelReg)
210 {
211 case DISSELREG_FS:
212 case DISSELREG_GS:
213 pvFlat = pSReg->u64Base + Addr;
214 break;
215
216 default:
217 pvFlat = Addr;
218 break;
219 }
220 }
221 else
222 {
223 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
224 Assert(pSReg->u64Base <= UINT32_C(0xffffffff));
225 pvFlat = pSReg->u64Base + Addr;
226 pvFlat &= UINT32_C(0xffffffff);
227 }
228
229 /*
230 * Check type if present.
231 */
232 if (pSReg->Attr.n.u1Present)
233 {
234 switch (pSReg->Attr.n.u4Type)
235 {
236 /* Read only selector type. */
237 case X86_SEL_TYPE_RO:
238 case X86_SEL_TYPE_RO_ACC:
239 case X86_SEL_TYPE_RW:
240 case X86_SEL_TYPE_RW_ACC:
241 case X86_SEL_TYPE_EO:
242 case X86_SEL_TYPE_EO_ACC:
243 case X86_SEL_TYPE_ER:
244 case X86_SEL_TYPE_ER_ACC:
245 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
246 {
247 /** @todo fix this mess */
248 }
249 /* check limit. */
250 if (fCheckLimit && Addr > pSReg->u32Limit)
251 return VERR_OUT_OF_SELECTOR_BOUNDS;
252 /* ok */
253 if (ppvGC)
254 *ppvGC = pvFlat;
255 return VINF_SUCCESS;
256
257 case X86_SEL_TYPE_EO_CONF:
258 case X86_SEL_TYPE_EO_CONF_ACC:
259 case X86_SEL_TYPE_ER_CONF:
260 case X86_SEL_TYPE_ER_CONF_ACC:
261 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
262 {
263 /** @todo fix this mess */
264 }
265 /* check limit. */
266 if (fCheckLimit && Addr > pSReg->u32Limit)
267 return VERR_OUT_OF_SELECTOR_BOUNDS;
268 /* ok */
269 if (ppvGC)
270 *ppvGC = pvFlat;
271 return VINF_SUCCESS;
272
273 case X86_SEL_TYPE_RO_DOWN:
274 case X86_SEL_TYPE_RO_DOWN_ACC:
275 case X86_SEL_TYPE_RW_DOWN:
276 case X86_SEL_TYPE_RW_DOWN_ACC:
277 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
278 {
279 /** @todo fix this mess */
280 }
281 /* check limit. */
282 if (fCheckLimit)
283 {
284 if (!pSReg->Attr.n.u1Granularity && Addr > UINT32_C(0xffff))
285 return VERR_OUT_OF_SELECTOR_BOUNDS;
286 if (Addr <= pSReg->u32Limit)
287 return VERR_OUT_OF_SELECTOR_BOUNDS;
288 }
289 /* ok */
290 if (ppvGC)
291 *ppvGC = pvFlat;
292 return VINF_SUCCESS;
293
294 default:
295 return VERR_INVALID_SELECTOR;
296
297 }
298 }
299 return VERR_SELECTOR_NOT_PRESENT;
300}
301
302
303#ifdef VBOX_WITH_RAW_MODE_NOT_R0
304/**
305 * Converts a GC selector based address to a flat address.
306 *
307 * Some basic checking is done, but not all kinds yet.
308 *
309 * @returns VBox status
310 * @param pVCpu Pointer to the VMCPU.
311 * @param eflags Current eflags
312 * @param Sel Selector part.
313 * @param Addr Address part.
314 * @param fFlags SELMTOFLAT_FLAGS_*
315 * GDT entires are valid.
316 * @param ppvGC Where to store the GC flat address.
317 * @param pcb Where to store the bytes from *ppvGC which can be accessed according to
318 * the selector. NULL is allowed.
319 * @remarks Don't use when in long mode.
320 */
321VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr,
322 uint32_t fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
323{
324 Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! (Accessing shadow GDT/LDT.) */
325 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
326
327 /*
328 * Deal with real & v86 mode first.
329 */
330 if ( eflags.Bits.u1VM
331 || CPUMIsGuestInRealMode(pVCpu))
332 {
333 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
334 if (ppvGC)
335 *ppvGC = ((RTGCUINTPTR)Sel << 4) + uFlat;
336 if (pcb)
337 *pcb = 0x10000 - uFlat;
338 return VINF_SUCCESS;
339 }
340
341 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
342 X86DESC Desc;
343 PVM pVM = pVCpu->CTX_SUFF(pVM);
344 if (!(Sel & X86_SEL_LDT))
345 {
346 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
347 && (Sel | X86_SEL_RPL_LDT) > pVM->selm.s.GuestGdtr.cbGdt)
348 return VERR_INVALID_SELECTOR;
349 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
350 }
351 else
352 {
353 if ((Sel | X86_SEL_RPL_LDT) > pVM->selm.s.cbLdtLimit)
354 return VERR_INVALID_SELECTOR;
355
356 /** @todo handle LDT page(s) not present! */
357 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
358 Desc = paLDT[Sel >> X86_SEL_SHIFT];
359 }
360
361 /* calc limit. */
362 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
363
364 /* calc address assuming straight stuff. */
365 RTGCPTR pvFlat = Addr + X86DESC_BASE(&Desc);
366
367 /* Cut the address to 32 bits. */
368 Assert(!CPUMIsGuestInLongMode(pVCpu));
369 pvFlat &= 0xffffffff;
370
371 uint8_t u1Present = Desc.Gen.u1Present;
372 uint8_t u1Granularity = Desc.Gen.u1Granularity;
373 uint8_t u1DescType = Desc.Gen.u1DescType;
374 uint8_t u4Type = Desc.Gen.u4Type;
375
376 /*
377 * Check if present.
378 */
379 if (u1Present)
380 {
381 /*
382 * Type check.
383 */
384#define BOTH(a, b) ((a << 16) | b)
385 switch (BOTH(u1DescType, u4Type))
386 {
387
388 /** Read only selector type. */
389 case BOTH(1,X86_SEL_TYPE_RO):
390 case BOTH(1,X86_SEL_TYPE_RO_ACC):
391 case BOTH(1,X86_SEL_TYPE_RW):
392 case BOTH(1,X86_SEL_TYPE_RW_ACC):
393 case BOTH(1,X86_SEL_TYPE_EO):
394 case BOTH(1,X86_SEL_TYPE_EO_ACC):
395 case BOTH(1,X86_SEL_TYPE_ER):
396 case BOTH(1,X86_SEL_TYPE_ER_ACC):
397 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
398 {
399 /** @todo fix this mess */
400 }
401 /* check limit. */
402 if ((RTGCUINTPTR)Addr > u32Limit)
403 return VERR_OUT_OF_SELECTOR_BOUNDS;
404 /* ok */
405 if (ppvGC)
406 *ppvGC = pvFlat;
407 if (pcb)
408 *pcb = u32Limit - (uint32_t)Addr + 1;
409 return VINF_SUCCESS;
410
411 case BOTH(1,X86_SEL_TYPE_EO_CONF):
412 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
413 case BOTH(1,X86_SEL_TYPE_ER_CONF):
414 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
415 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
416 {
417 /** @todo fix this mess */
418 }
419 /* check limit. */
420 if ((RTGCUINTPTR)Addr > u32Limit)
421 return VERR_OUT_OF_SELECTOR_BOUNDS;
422 /* ok */
423 if (ppvGC)
424 *ppvGC = pvFlat;
425 if (pcb)
426 *pcb = u32Limit - (uint32_t)Addr + 1;
427 return VINF_SUCCESS;
428
429 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
430 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
431 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
432 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
433 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
434 {
435 /** @todo fix this mess */
436 }
437 /* check limit. */
438 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
439 return VERR_OUT_OF_SELECTOR_BOUNDS;
440 if ((RTGCUINTPTR)Addr <= u32Limit)
441 return VERR_OUT_OF_SELECTOR_BOUNDS;
442
443 /* ok */
444 if (ppvGC)
445 *ppvGC = pvFlat;
446 if (pcb)
447 *pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
448 return VINF_SUCCESS;
449
450 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
451 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
452 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
453 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
454 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
455 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
456 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
457 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
458 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
459 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
460 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
461 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
462 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
463 {
464 /** @todo fix this mess */
465 }
466 /* check limit. */
467 if ((RTGCUINTPTR)Addr > u32Limit)
468 return VERR_OUT_OF_SELECTOR_BOUNDS;
469 /* ok */
470 if (ppvGC)
471 *ppvGC = pvFlat;
472 if (pcb)
473 *pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
474 return VINF_SUCCESS;
475
476 default:
477 return VERR_INVALID_SELECTOR;
478
479 }
480#undef BOTH
481 }
482 return VERR_SELECTOR_NOT_PRESENT;
483}
484#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
485
486
487#ifdef VBOX_WITH_RAW_MODE_NOT_R0
488
489static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg,
490 RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg)
491{
492 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
493
494 /*
495 * Try read the entry.
496 */
497 X86DESC GstDesc;
498 int rc = PGMPhysReadGCPtr(pVCpu, &GstDesc, GCPtrDesc, sizeof(GstDesc));
499 if (RT_FAILURE(rc))
500 {
501 Log(("SELMLoadHiddenSelectorReg: Error reading descriptor %s=%#x: %Rrc\n", g_aszSRegNms[iSReg], Sel, rc));
502 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelReadErrors);
503 return;
504 }
505
506 /*
507 * Validate it and load it.
508 */
509 if (!selmIsGstDescGoodForSReg(pVCpu, pSReg, &GstDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
510 {
511 Log(("SELMLoadHiddenSelectorReg: Guest table entry is no good (%s=%#x): %.8Rhxs\n", g_aszSRegNms[iSReg], Sel, &GstDesc));
512 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGstNoGood);
513 return;
514 }
515
516 selmLoadHiddenSRegFromGuestDesc(pVCpu, pSReg, &GstDesc);
517 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (gst)\n",
518 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
519 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGst);
520}
521
522
523/**
524 * CPUM helper that loads the hidden selector register from the descriptor table
525 * when executing with raw-mode.
526 *
527 * @remarks This is only used when in legacy protected mode!
528 *
529 * @param pVCpu Pointer to the current virtual CPU.
530 * @param pCtx The guest CPU context.
531 * @param pSReg The selector register.
532 *
533 * @todo Deal 100% correctly with stale selectors. What's more evil is
534 * invalid page table entries, which isn't impossible to imagine for
535 * LDT entries for instance, though unlikely. Currently, we turn a
536 * blind eye to these issues and return the old hidden registers,
537 * though we don't set the valid flag, so that we'll try loading them
538 * over and over again till we succeed loading something.
539 */
540VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg)
541{
542 Assert(pCtx->cr0 & X86_CR0_PE);
543 Assert(!(pCtx->msrEFER & MSR_K6_EFER_LMA));
544
545 PVM pVM = pVCpu->CTX_SUFF(pVM);
546 Assert(pVM->cCpus == 1);
547 Assert(!HMIsEnabled(pVM));
548
549
550 /*
551 * Get the shadow descriptor table entry and validate it.
552 * Should something go amiss, try the guest table.
553 */
554 RTSEL const Sel = pSReg->Sel;
555 uint32_t const iSReg = pSReg - CPUMCTX_FIRST_SREG(pCtx); Assert(iSReg < X86_SREG_COUNT);
556 PCX86DESC pShwDesc;
557 if (!(Sel & X86_SEL_LDT))
558 {
559 /** @todo this shall not happen, we shall check for these things when executing
560 * LGDT */
561 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->gdtr.cbGdt);
562
563 pShwDesc = &pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
564 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)
565 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
566 {
567 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK), Sel, iSReg);
568 return;
569 }
570 }
571 else
572 {
573 /** @todo this shall not happen, we shall check for these things when executing
574 * LLDT */
575 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->ldtr.u32Limit);
576
577 pShwDesc = (PCX86DESC)((uintptr_t)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper + (Sel & X86_SEL_MASK));
578 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT)
579 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
580 {
581 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK), Sel, iSReg);
582 return;
583 }
584 }
585
586 /*
587 * All fine, load it.
588 */
589 selmLoadHiddenSRegFromShadowDesc(pSReg, pShwDesc);
590 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelShw);
591 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (shw)\n",
592 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
593}
594
595#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
596
597/**
598 * Validates and converts a GC selector based code address to a flat
599 * address when in real or v8086 mode.
600 *
601 * @returns VINF_SUCCESS.
602 * @param pVCpu Pointer to the VMCPU.
603 * @param SelCS Selector part.
604 * @param pHidCS The hidden CS register part. Optional.
605 * @param Addr Address part.
606 * @param ppvFlat Where to store the flat address.
607 */
608DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVMCPU pVCpu, RTSEL SelCS, PCCPUMSELREGHID pSReg, RTGCPTR Addr,
609 PRTGCPTR ppvFlat)
610{
611 RTGCUINTPTR uFlat = Addr & 0xffff;
612 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
613 uFlat += (RTGCUINTPTR)SelCS << 4;
614 else
615 uFlat += pSReg->u64Base;
616 *ppvFlat = uFlat;
617 return VINF_SUCCESS;
618}
619
620
621#ifdef VBOX_WITH_RAW_MODE_NOT_R0
622/**
623 * Validates and converts a GC selector based code address to a flat address
624 * when in protected/long mode using the raw-mode algorithm.
625 *
626 * @returns VBox status code.
627 * @param pVM Pointer to the VM.
628 * @param pVCpu Pointer to the VMCPU.
629 * @param SelCPL Current privilege level. Get this from SS - CS might be
630 * conforming! A full selector can be passed, we'll only
631 * use the RPL part.
632 * @param SelCS Selector part.
633 * @param Addr Address part.
634 * @param ppvFlat Where to store the flat address.
635 * @param pcBits Where to store the segment bitness (16/32/64). Optional.
636 */
637DECLINLINE(int) selmValidateAndConvertCSAddrRawMode(PVM pVM, PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr,
638 PRTGCPTR ppvFlat, uint32_t *pcBits)
639{
640 NOREF(pVCpu);
641 Assert(!HMIsEnabled(pVM));
642
643 /** @todo validate limit! */
644 X86DESC Desc;
645 if (!(SelCS & X86_SEL_LDT))
646 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
647 else
648 {
649 /** @todo handle LDT page(s) not present! */
650 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
651 Desc = paLDT[SelCS >> X86_SEL_SHIFT];
652 }
653
654 /*
655 * Check if present.
656 */
657 if (Desc.Gen.u1Present)
658 {
659 /*
660 * Type check.
661 */
662 if ( Desc.Gen.u1DescType == 1
663 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
664 {
665 /*
666 * Check level.
667 */
668 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
669 if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
670 ? uLevel <= Desc.Gen.u2Dpl
671 : uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
672 )
673 {
674 /*
675 * Limit check.
676 */
677 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
678 if ((RTGCUINTPTR)Addr <= u32Limit)
679 {
680 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc));
681 /* Cut the address to 32 bits. */
682 *ppvFlat &= 0xffffffff;
683
684 if (pcBits)
685 *pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
686 return VINF_SUCCESS;
687 }
688 return VERR_OUT_OF_SELECTOR_BOUNDS;
689 }
690 return VERR_INVALID_RPL;
691 }
692 return VERR_NOT_CODE_SELECTOR;
693 }
694 return VERR_SELECTOR_NOT_PRESENT;
695}
696#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
697
698
699/**
700 * Validates and converts a GC selector based code address to a flat address
701 * when in protected/long mode using the standard hidden selector registers
702 *
703 * @returns VBox status code.
704 * @param pVCpu Pointer to the VMCPU.
705 * @param SelCPL Current privilege level. Get this from SS - CS might be
706 * conforming! A full selector can be passed, we'll only
707 * use the RPL part.
708 * @param SelCS Selector part.
709 * @param pSRegCS The full CS selector register.
710 * @param Addr The address (think IP/EIP/RIP).
711 * @param ppvFlat Where to store the flat address upon successful return.
712 */
713DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pSRegCS,
714 RTGCPTR Addr, PRTGCPTR ppvFlat)
715{
716 /*
717 * Check if present.
718 */
719 if (pSRegCS->Attr.n.u1Present)
720 {
721 /*
722 * Type check.
723 */
724 if ( pSRegCS->Attr.n.u1DescType == 1
725 && (pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
726 {
727 /*
728 * Check level.
729 */
730 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
731 if ( !(pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CONF)
732 ? uLevel <= pSRegCS->Attr.n.u2Dpl
733 : uLevel >= pSRegCS->Attr.n.u2Dpl /* hope I got this right now... */
734 )
735 {
736 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
737 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
738 if ( pSRegCS->Attr.n.u1Long
739 && CPUMIsGuestInLongMode(pVCpu))
740 {
741 *ppvFlat = Addr;
742 return VINF_SUCCESS;
743 }
744
745 /*
746 * Limit check. Note that the limit in the hidden register is the
747 * final value. The granularity bit was included in its calculation.
748 */
749 uint32_t u32Limit = pSRegCS->u32Limit;
750 if ((RTGCUINTPTR)Addr <= u32Limit)
751 {
752 *ppvFlat = Addr + pSRegCS->u64Base;
753 return VINF_SUCCESS;
754 }
755
756 return VERR_OUT_OF_SELECTOR_BOUNDS;
757 }
758 Log(("selmValidateAndConvertCSAddrHidden: Invalid RPL Attr.n.u4Type=%x cpl=%x dpl=%x\n",
759 pSRegCS->Attr.n.u4Type, uLevel, pSRegCS->Attr.n.u2Dpl));
760 return VERR_INVALID_RPL;
761 }
762 return VERR_NOT_CODE_SELECTOR;
763 }
764 return VERR_SELECTOR_NOT_PRESENT;
765}
766
767
768/**
769 * Validates and converts a GC selector based code address to a flat address.
770 *
771 * @returns VBox status code.
772 * @param pVCpu Pointer to the VMCPU.
773 * @param Efl Current EFLAGS.
774 * @param SelCPL Current privilege level. Get this from SS - CS might be
775 * conforming! A full selector can be passed, we'll only
776 * use the RPL part.
777 * @param SelCS Selector part.
778 * @param pSRegCS The full CS selector register.
779 * @param Addr The address (think IP/EIP/RIP).
780 * @param ppvFlat Where to store the flat address upon successful return.
781 */
782VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS Efl, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREG pSRegCS,
783 RTGCPTR Addr, PRTGCPTR ppvFlat)
784{
785 if ( Efl.Bits.u1VM
786 || CPUMIsGuestInRealMode(pVCpu))
787 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, pSRegCS, Addr, ppvFlat);
788
789#ifdef VBOX_WITH_RAW_MODE_NOT_R0
790 /* Use the hidden registers when possible, updating them if outdate. */
791 if (!pSRegCS)
792 return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL);
793
794 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS))
795 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS);
796
797 /* Undo ring compression. */
798 if ((SelCPL & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
799 SelCPL &= ~X86_SEL_RPL;
800 Assert(pSRegCS->Sel == SelCS);
801 if ((SelCS & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
802 SelCS &= ~X86_SEL_RPL;
803#else
804 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS));
805 Assert(pSRegCS->Sel == SelCS);
806#endif
807
808 return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pSRegCS, Addr, ppvFlat);
809}
810
811
812/**
813 * Returns Hypervisor's Trap 08 (\#DF) selector.
814 *
815 * @returns Hypervisor's Trap 08 (\#DF) selector.
816 * @param pVM Pointer to the VM.
817 */
818VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
819{
820 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
821}
822
823
824/**
825 * Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
826 *
827 * @param pVM Pointer to the VM.
828 * @param u32EIP EIP of Trap 08 handler.
829 */
830VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
831{
832 pVM->selm.s.TssTrap08.eip = u32EIP;
833}
834
835
836/**
837 * Sets ss:esp for ring1 in main Hypervisor's TSS.
838 *
839 * @param pVM Pointer to the VM.
840 * @param ss Ring1 SS register value. Pass 0 if invalid.
841 * @param esp Ring1 ESP register value.
842 */
843void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
844{
845 Assert(!HMIsEnabled(pVM));
846 Assert((ss & 1) || esp == 0);
847 pVM->selm.s.Tss.ss1 = ss;
848 pVM->selm.s.Tss.esp1 = (uint32_t)esp;
849}
850
851
852#ifdef VBOX_WITH_RAW_RING1
853/**
854 * Sets ss:esp for ring1 in main Hypervisor's TSS.
855 *
856 * @param pVM Pointer to the VM.
857 * @param ss Ring2 SS register value. Pass 0 if invalid.
858 * @param esp Ring2 ESP register value.
859 */
860void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
861{
862 Assert(!HMIsEnabled(pVM));
863 Assert((ss & 3) == 2 || esp == 0);
864 pVM->selm.s.Tss.ss2 = ss;
865 pVM->selm.s.Tss.esp2 = (uint32_t)esp;
866}
867#endif
868
869
870#ifdef VBOX_WITH_RAW_MODE_NOT_R0
871/**
872 * Gets ss:esp for ring1 in main Hypervisor's TSS.
873 *
874 * Returns SS=0 if the ring-1 stack isn't valid.
875 *
876 * @returns VBox status code.
877 * @param pVM Pointer to the VM.
878 * @param pSS Ring1 SS register value.
879 * @param pEsp Ring1 ESP register value.
880 */
881VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
882{
883 Assert(!HMIsEnabled(pVM));
884 Assert(pVM->cCpus == 1);
885 PVMCPU pVCpu = &pVM->aCpus[0];
886
887#ifdef SELM_TRACK_GUEST_TSS_CHANGES
888 if (pVM->selm.s.fSyncTSSRing0Stack)
889 {
890#endif
891 RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
892 int rc;
893 VBOXTSS tss;
894
895 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
896
897# ifdef IN_RC
898 bool fTriedAlready = false;
899
900l_tryagain:
901 PVBOXTSS pTss = (PVBOXTSS)(uintptr_t)GCPtrTss;
902 rc = MMGCRamRead(pVM, &tss.ss0, &pTss->ss0, sizeof(tss.ss0));
903 rc |= MMGCRamRead(pVM, &tss.esp0, &pTss->esp0, sizeof(tss.esp0));
904# ifdef DEBUG
905 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, &pTss->offIoBitmap, sizeof(tss.offIoBitmap));
906# endif
907
908 if (RT_FAILURE(rc))
909 {
910 if (!fTriedAlready)
911 {
912 /* Shadow page might be out of sync. Sync and try again */
913 /** @todo might cross page boundary */
914 fTriedAlready = true;
915 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
916 if (rc != VINF_SUCCESS)
917 return rc;
918 goto l_tryagain;
919 }
920 AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
921 return rc;
922 }
923
924# else /* !IN_RC */
925 /* Reading too much. Could be cheaper than two separate calls though. */
926 rc = PGMPhysSimpleReadGCPtr(pVCpu, &tss, GCPtrTss, sizeof(VBOXTSS));
927 if (RT_FAILURE(rc))
928 {
929 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
930 return rc;
931 }
932# endif /* !IN_RC */
933
934# ifdef LOG_ENABLED
935 uint32_t ssr0 = pVM->selm.s.Tss.ss1;
936 uint32_t espr0 = pVM->selm.s.Tss.esp1;
937 ssr0 &= ~1;
938
939 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
940 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
941
942 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
943# endif
944 /* Update our TSS structure for the guest's ring 1 stack */
945 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
946 pVM->selm.s.fSyncTSSRing0Stack = false;
947#ifdef SELM_TRACK_GUEST_TSS_CHANGES
948 }
949#endif
950
951 *pSS = pVM->selm.s.Tss.ss1;
952 *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
953
954 return VINF_SUCCESS;
955}
956#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
957
958
959#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
960
961/**
962 * Gets the hypervisor code selector (CS).
963 * @returns CS selector.
964 * @param pVM Pointer to the VM.
965 */
966VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
967{
968 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
969}
970
971
972/**
973 * Gets the 64-mode hypervisor code selector (CS64).
974 * @returns CS selector.
975 * @param pVM Pointer to the VM.
976 */
977VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
978{
979 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
980}
981
982
983/**
984 * Gets the hypervisor data selector (DS).
985 * @returns DS selector.
986 * @param pVM Pointer to the VM.
987 */
988VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
989{
990 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
991}
992
993
994/**
995 * Gets the hypervisor TSS selector.
996 * @returns TSS selector.
997 * @param pVM Pointer to the VM.
998 */
999VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
1000{
1001 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
1002}
1003
1004
1005/**
1006 * Gets the hypervisor TSS Trap 8 selector.
1007 * @returns TSS Trap 8 selector.
1008 * @param pVM Pointer to the VM.
1009 */
1010VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
1011{
1012 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1013}
1014
1015/**
1016 * Gets the address for the hypervisor GDT.
1017 *
1018 * @returns The GDT address.
1019 * @param pVM Pointer to the VM.
1020 * @remark This is intended only for very special use, like in the world
1021 * switchers. Don't exploit this API!
1022 */
1023VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
1024{
1025 /*
1026 * Always convert this from the HC pointer since we can be
1027 * called before the first relocation and have to work correctly
1028 * without having dependencies on the relocation order.
1029 */
1030 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
1031}
1032
1033#endif /* defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) */
1034
1035/**
1036 * Gets info about the current TSS.
1037 *
1038 * @returns VBox status code.
1039 * @retval VINF_SUCCESS if we've got a TSS loaded.
1040 * @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
1041 *
1042 * @param pVM Pointer to the VM.
1043 * @param pVCpu Pointer to the VMCPU.
1044 * @param pGCPtrTss Where to store the TSS address.
1045 * @param pcbTss Where to store the TSS size limit.
1046 * @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
1047 */
1048VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
1049{
1050 NOREF(pVM);
1051
1052 /*
1053 * The TR hidden register is always valid.
1054 */
1055 CPUMSELREGHID trHid;
1056 RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
1057 if (!(tr & X86_SEL_MASK_OFF_RPL))
1058 return VERR_SELM_NO_TSS;
1059
1060 *pGCPtrTss = trHid.u64Base;
1061 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
1062 if (pfCanHaveIOBitmap)
1063 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1064 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1065 return VINF_SUCCESS;
1066}
1067
1068
1069
1070/**
1071 * Notification callback which is called whenever there is a chance that a CR3
1072 * value might have changed.
1073 * This is called by PGM.
1074 *
1075 * @param pVM Pointer to the VM.
1076 * @param pVCpu Pointer to the VMCPU.
1077 */
1078VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
1079{
1080 /** @todo SMP support!! (64-bit guest scenario, primarily) */
1081 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
1082 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
1083}
1084
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette