VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/SELMAll.cpp@ 55979

Last change on this file since 55979 was 55899, checked in by vboxsync, 10 years ago

PGM: Added an access origin to memory read & write calls that respects handlers. This will later be passed to the access handler, so that things like the page pool (and potentially others) can query IEM about instruction details when needed.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 35.2 KB
Line 
1/* $Id: SELMAll.cpp 55899 2015-05-18 09:47:57Z vboxsync $ */
2/** @file
3 * SELM All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_SELM
23#include <VBox/vmm/selm.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/mm.h>
27#include <VBox/vmm/hm.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/hm.h>
30#include "SELMInternal.h"
31#include <VBox/vmm/vm.h>
32#include <VBox/err.h>
33#include <VBox/param.h>
34#include <iprt/assert.h>
35#include <VBox/vmm/vmm.h>
36#include <iprt/x86.h>
37
38#include "SELMInline.h"
39
40
41/*******************************************************************************
42* Global Variables *
43*******************************************************************************/
44#if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
45/** Segment register names. */
46static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
47#endif
48
49
50
51#ifdef VBOX_WITH_RAW_MODE_NOT_R0
52/**
53 * Converts a GC selector based address to a flat address.
54 *
55 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
56 * for that.
57 *
58 * @returns Flat address.
59 * @param pVM Pointer to the VM.
60 * @param Sel Selector part.
61 * @param Addr Address part.
62 * @remarks Don't use when in long mode.
63 */
64VMMDECL(RTGCPTR) SELMToFlatBySel(PVM pVM, RTSEL Sel, RTGCPTR Addr)
65{
66 Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM))); /* DON'T USE! */
67 Assert(!HMIsEnabled(pVM));
68
69 /** @todo check the limit. */
70 X86DESC Desc;
71 if (!(Sel & X86_SEL_LDT))
72 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
73 else
74 {
75 /** @todo handle LDT pages not present! */
76 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
77 Desc = paLDT[Sel >> X86_SEL_SHIFT];
78 }
79
80 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)) & 0xffffffff);
81}
82#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
83
84
85/**
86 * Converts a GC selector based address to a flat address.
87 *
88 * No limit checks are done. Use the SELMToFlat*() or SELMValidate*() functions
89 * for that.
90 *
91 * @returns Flat address.
92 * @param pVM Pointer to the VM.
93 * @param SelReg Selector register
94 * @param pCtxCore CPU context
95 * @param Addr Address part.
96 */
97VMMDECL(RTGCPTR) SELMToFlat(PVM pVM, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr)
98{
99 PCPUMSELREG pSReg;
100 PVMCPU pVCpu = VMMGetCpu(pVM);
101
102 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg); AssertRC(rc);
103
104 /*
105 * Deal with real & v86 mode first.
106 */
107 if ( pCtxCore->eflags.Bits.u1VM
108 || CPUMIsGuestInRealMode(pVCpu))
109 {
110 uint32_t uFlat = (uint32_t)Addr & 0xffff;
111 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
112 uFlat += (uint32_t)pSReg->u64Base;
113 else
114 uFlat += (uint32_t)pSReg->Sel << 4;
115 return (RTGCPTR)uFlat;
116 }
117
118#ifdef VBOX_WITH_RAW_MODE_NOT_R0
119 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
120 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
121 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
122 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
123 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
124#else
125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
127#endif
128
129 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
130 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
131 if ( pCtxCore->cs.Attr.n.u1Long
132 && CPUMIsGuestInLongMode(pVCpu))
133 {
134 switch (SelReg)
135 {
136 case DISSELREG_FS:
137 case DISSELREG_GS:
138 return (RTGCPTR)(pSReg->u64Base + Addr);
139
140 default:
141 return Addr; /* base 0 */
142 }
143 }
144
145 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
146 Assert(pSReg->u64Base <= 0xffffffff);
147 return (uint32_t)pSReg->u64Base + (uint32_t)Addr;
148}
149
150
151/**
152 * Converts a GC selector based address to a flat address.
153 *
154 * Some basic checking is done, but not all kinds yet.
155 *
156 * @returns VBox status
157 * @param pVCpu Pointer to the VMCPU.
158 * @param SelReg Selector register.
159 * @param pCtxCore CPU context.
160 * @param Addr Address part.
161 * @param fFlags SELMTOFLAT_FLAGS_*
162 * GDT entires are valid.
163 * @param ppvGC Where to store the GC flat address.
164 */
165VMMDECL(int) SELMToFlatEx(PVMCPU pVCpu, DISSELREG SelReg, PCPUMCTXCORE pCtxCore, RTGCPTR Addr, uint32_t fFlags, PRTGCPTR ppvGC)
166{
167 /*
168 * Fetch the selector first.
169 */
170 PCPUMSELREG pSReg;
171 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &pSReg);
172 AssertRCReturn(rc, rc); AssertPtr(pSReg);
173
174 /*
175 * Deal with real & v86 mode first.
176 */
177 if ( pCtxCore->eflags.Bits.u1VM
178 || CPUMIsGuestInRealMode(pVCpu))
179 {
180 if (ppvGC)
181 {
182 uint32_t uFlat = (uint32_t)Addr & 0xffff;
183 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
184 *ppvGC = (uint32_t)pSReg->u64Base + uFlat;
185 else
186 *ppvGC = ((uint32_t)pSReg->Sel << 4) + uFlat;
187 }
188 return VINF_SUCCESS;
189 }
190
191#ifdef VBOX_WITH_RAW_MODE_NOT_R0
192 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
193 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
194 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))
195 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);
196#else
197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
198 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs));
199#endif
200
201 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
202 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
203 RTGCPTR pvFlat;
204 bool fCheckLimit = true;
205 if ( pCtxCore->cs.Attr.n.u1Long
206 && CPUMIsGuestInLongMode(pVCpu))
207 {
208 fCheckLimit = false;
209 switch (SelReg)
210 {
211 case DISSELREG_FS:
212 case DISSELREG_GS:
213 pvFlat = pSReg->u64Base + Addr;
214 break;
215
216 default:
217 pvFlat = Addr;
218 break;
219 }
220 }
221 else
222 {
223 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */
224 Assert(pSReg->u64Base <= UINT32_C(0xffffffff));
225 pvFlat = (uint32_t)pSReg->u64Base + (uint32_t)Addr;
226 Assert(pvFlat <= UINT32_MAX);
227 }
228
229 /*
230 * Check type if present.
231 */
232 if (pSReg->Attr.n.u1Present)
233 {
234 switch (pSReg->Attr.n.u4Type)
235 {
236 /* Read only selector type. */
237 case X86_SEL_TYPE_RO:
238 case X86_SEL_TYPE_RO_ACC:
239 case X86_SEL_TYPE_RW:
240 case X86_SEL_TYPE_RW_ACC:
241 case X86_SEL_TYPE_EO:
242 case X86_SEL_TYPE_EO_ACC:
243 case X86_SEL_TYPE_ER:
244 case X86_SEL_TYPE_ER_ACC:
245 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
246 {
247 /** @todo fix this mess */
248 }
249 /* check limit. */
250 if (fCheckLimit && Addr > pSReg->u32Limit)
251 return VERR_OUT_OF_SELECTOR_BOUNDS;
252 /* ok */
253 if (ppvGC)
254 *ppvGC = pvFlat;
255 return VINF_SUCCESS;
256
257 case X86_SEL_TYPE_EO_CONF:
258 case X86_SEL_TYPE_EO_CONF_ACC:
259 case X86_SEL_TYPE_ER_CONF:
260 case X86_SEL_TYPE_ER_CONF_ACC:
261 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
262 {
263 /** @todo fix this mess */
264 }
265 /* check limit. */
266 if (fCheckLimit && Addr > pSReg->u32Limit)
267 return VERR_OUT_OF_SELECTOR_BOUNDS;
268 /* ok */
269 if (ppvGC)
270 *ppvGC = pvFlat;
271 return VINF_SUCCESS;
272
273 case X86_SEL_TYPE_RO_DOWN:
274 case X86_SEL_TYPE_RO_DOWN_ACC:
275 case X86_SEL_TYPE_RW_DOWN:
276 case X86_SEL_TYPE_RW_DOWN_ACC:
277 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
278 {
279 /** @todo fix this mess */
280 }
281 /* check limit. */
282 if (fCheckLimit)
283 {
284 if (!pSReg->Attr.n.u1Granularity && Addr > UINT32_C(0xffff))
285 return VERR_OUT_OF_SELECTOR_BOUNDS;
286 if (Addr <= pSReg->u32Limit)
287 return VERR_OUT_OF_SELECTOR_BOUNDS;
288 }
289 /* ok */
290 if (ppvGC)
291 *ppvGC = pvFlat;
292 return VINF_SUCCESS;
293
294 default:
295 return VERR_INVALID_SELECTOR;
296
297 }
298 }
299 return VERR_SELECTOR_NOT_PRESENT;
300}
301
302
303#ifdef VBOX_WITH_RAW_MODE_NOT_R0
304/**
305 * Converts a GC selector based address to a flat address.
306 *
307 * Some basic checking is done, but not all kinds yet.
308 *
309 * @returns VBox status
310 * @param pVCpu Pointer to the VMCPU.
311 * @param eflags Current eflags
312 * @param Sel Selector part.
313 * @param Addr Address part.
314 * @param fFlags SELMTOFLAT_FLAGS_*
315 * GDT entires are valid.
316 * @param ppvGC Where to store the GC flat address.
317 * @param pcb Where to store the bytes from *ppvGC which can be accessed according to
318 * the selector. NULL is allowed.
319 * @remarks Don't use when in long mode.
320 */
321VMMDECL(int) SELMToFlatBySelEx(PVMCPU pVCpu, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr,
322 uint32_t fFlags, PRTGCPTR ppvGC, uint32_t *pcb)
323{
324 Assert(!CPUMIsGuestInLongMode(pVCpu)); /* DON'T USE! (Accessing shadow GDT/LDT.) */
325 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
326
327 /*
328 * Deal with real & v86 mode first.
329 */
330 if ( eflags.Bits.u1VM
331 || CPUMIsGuestInRealMode(pVCpu))
332 {
333 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff;
334 if (ppvGC)
335 *ppvGC = ((RTGCUINTPTR)Sel << 4) + uFlat;
336 if (pcb)
337 *pcb = 0x10000 - uFlat;
338 return VINF_SUCCESS;
339 }
340
341 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */
342 X86DESC Desc;
343 PVM pVM = pVCpu->CTX_SUFF(pVM);
344 if (!(Sel & X86_SEL_LDT))
345 {
346 if ( !(fFlags & SELMTOFLAT_FLAGS_HYPER)
347 && (Sel | X86_SEL_RPL_LDT) > pVM->selm.s.GuestGdtr.cbGdt)
348 return VERR_INVALID_SELECTOR;
349 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
350 }
351 else
352 {
353 if ((Sel | X86_SEL_RPL_LDT) > pVM->selm.s.cbLdtLimit)
354 return VERR_INVALID_SELECTOR;
355
356 /** @todo handle LDT page(s) not present! */
357 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
358 Desc = paLDT[Sel >> X86_SEL_SHIFT];
359 }
360
361 /* calc limit. */
362 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
363
364 /* calc address assuming straight stuff. */
365 RTGCPTR pvFlat = Addr + X86DESC_BASE(&Desc);
366
367 /* Cut the address to 32 bits. */
368 Assert(!CPUMIsGuestInLongMode(pVCpu));
369 pvFlat &= 0xffffffff;
370
371 uint8_t u1Present = Desc.Gen.u1Present;
372 uint8_t u1Granularity = Desc.Gen.u1Granularity;
373 uint8_t u1DescType = Desc.Gen.u1DescType;
374 uint8_t u4Type = Desc.Gen.u4Type;
375
376 /*
377 * Check if present.
378 */
379 if (u1Present)
380 {
381 /*
382 * Type check.
383 */
384#define BOTH(a, b) ((a << 16) | b)
385 switch (BOTH(u1DescType, u4Type))
386 {
387
388 /** Read only selector type. */
389 case BOTH(1,X86_SEL_TYPE_RO):
390 case BOTH(1,X86_SEL_TYPE_RO_ACC):
391 case BOTH(1,X86_SEL_TYPE_RW):
392 case BOTH(1,X86_SEL_TYPE_RW_ACC):
393 case BOTH(1,X86_SEL_TYPE_EO):
394 case BOTH(1,X86_SEL_TYPE_EO_ACC):
395 case BOTH(1,X86_SEL_TYPE_ER):
396 case BOTH(1,X86_SEL_TYPE_ER_ACC):
397 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
398 {
399 /** @todo fix this mess */
400 }
401 /* check limit. */
402 if ((RTGCUINTPTR)Addr > u32Limit)
403 return VERR_OUT_OF_SELECTOR_BOUNDS;
404 /* ok */
405 if (ppvGC)
406 *ppvGC = pvFlat;
407 if (pcb)
408 *pcb = u32Limit - (uint32_t)Addr + 1;
409 return VINF_SUCCESS;
410
411 case BOTH(1,X86_SEL_TYPE_EO_CONF):
412 case BOTH(1,X86_SEL_TYPE_EO_CONF_ACC):
413 case BOTH(1,X86_SEL_TYPE_ER_CONF):
414 case BOTH(1,X86_SEL_TYPE_ER_CONF_ACC):
415 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
416 {
417 /** @todo fix this mess */
418 }
419 /* check limit. */
420 if ((RTGCUINTPTR)Addr > u32Limit)
421 return VERR_OUT_OF_SELECTOR_BOUNDS;
422 /* ok */
423 if (ppvGC)
424 *ppvGC = pvFlat;
425 if (pcb)
426 *pcb = u32Limit - (uint32_t)Addr + 1;
427 return VINF_SUCCESS;
428
429 case BOTH(1,X86_SEL_TYPE_RO_DOWN):
430 case BOTH(1,X86_SEL_TYPE_RO_DOWN_ACC):
431 case BOTH(1,X86_SEL_TYPE_RW_DOWN):
432 case BOTH(1,X86_SEL_TYPE_RW_DOWN_ACC):
433 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
434 {
435 /** @todo fix this mess */
436 }
437 /* check limit. */
438 if (!u1Granularity && (RTGCUINTPTR)Addr > (RTGCUINTPTR)0xffff)
439 return VERR_OUT_OF_SELECTOR_BOUNDS;
440 if ((RTGCUINTPTR)Addr <= u32Limit)
441 return VERR_OUT_OF_SELECTOR_BOUNDS;
442
443 /* ok */
444 if (ppvGC)
445 *ppvGC = pvFlat;
446 if (pcb)
447 *pcb = (RTGCUINTPTR)(u1Granularity ? 0xffffffff : 0xffff) - (RTGCUINTPTR)Addr + 1;
448 return VINF_SUCCESS;
449
450 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_AVAIL):
451 case BOTH(0,X86_SEL_TYPE_SYS_LDT):
452 case BOTH(0,X86_SEL_TYPE_SYS_286_TSS_BUSY):
453 case BOTH(0,X86_SEL_TYPE_SYS_286_CALL_GATE):
454 case BOTH(0,X86_SEL_TYPE_SYS_TASK_GATE):
455 case BOTH(0,X86_SEL_TYPE_SYS_286_INT_GATE):
456 case BOTH(0,X86_SEL_TYPE_SYS_286_TRAP_GATE):
457 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_AVAIL):
458 case BOTH(0,X86_SEL_TYPE_SYS_386_TSS_BUSY):
459 case BOTH(0,X86_SEL_TYPE_SYS_386_CALL_GATE):
460 case BOTH(0,X86_SEL_TYPE_SYS_386_INT_GATE):
461 case BOTH(0,X86_SEL_TYPE_SYS_386_TRAP_GATE):
462 if (!(fFlags & SELMTOFLAT_FLAGS_NO_PL))
463 {
464 /** @todo fix this mess */
465 }
466 /* check limit. */
467 if ((RTGCUINTPTR)Addr > u32Limit)
468 return VERR_OUT_OF_SELECTOR_BOUNDS;
469 /* ok */
470 if (ppvGC)
471 *ppvGC = pvFlat;
472 if (pcb)
473 *pcb = 0xffffffff - (RTGCUINTPTR)pvFlat + 1; /* Depends on the type.. fixme if we care. */
474 return VINF_SUCCESS;
475
476 default:
477 return VERR_INVALID_SELECTOR;
478
479 }
480#undef BOTH
481 }
482 return VERR_SELECTOR_NOT_PRESENT;
483}
484#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
485
486
487#ifdef VBOX_WITH_RAW_MODE_NOT_R0
488
489static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg,
490 RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg)
491{
492 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
493
494 /*
495 * Try read the entry.
496 */
497 X86DESC GstDesc;
498 int rc = PGMPhysReadGCPtr(pVCpu, &GstDesc, GCPtrDesc, sizeof(GstDesc), PGMACCESSORIGIN_IOM);
499 if (RT_FAILURE(rc))
500 {
501 Log(("SELMLoadHiddenSelectorReg: Error reading descriptor %s=%#x: %Rrc\n", g_aszSRegNms[iSReg], Sel, rc));
502 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelReadErrors);
503 return;
504 }
505
506 /*
507 * Validate it and load it.
508 */
509 if (!selmIsGstDescGoodForSReg(pVCpu, pSReg, &GstDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
510 {
511 Log(("SELMLoadHiddenSelectorReg: Guest table entry is no good (%s=%#x): %.8Rhxs\n", g_aszSRegNms[iSReg], Sel, &GstDesc));
512 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGstNoGood);
513 return;
514 }
515
516 selmLoadHiddenSRegFromGuestDesc(pVCpu, pSReg, &GstDesc);
517 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (gst)\n",
518 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
519 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGst);
520}
521
522
523/**
524 * CPUM helper that loads the hidden selector register from the descriptor table
525 * when executing with raw-mode.
526 *
527 * @remarks This is only used when in legacy protected mode!
528 *
529 * @param pVCpu Pointer to the current virtual CPU.
530 * @param pCtx The guest CPU context.
531 * @param pSReg The selector register.
532 *
533 * @todo Deal 100% correctly with stale selectors. What's more evil is
534 * invalid page table entries, which isn't impossible to imagine for
535 * LDT entries for instance, though unlikely. Currently, we turn a
536 * blind eye to these issues and return the old hidden registers,
537 * though we don't set the valid flag, so that we'll try loading them
538 * over and over again till we succeed loading something.
539 */
540VMM_INT_DECL(void) SELMLoadHiddenSelectorReg(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg)
541{
542 Assert(pCtx->cr0 & X86_CR0_PE);
543 Assert(!(pCtx->msrEFER & MSR_K6_EFER_LMA));
544
545 PVM pVM = pVCpu->CTX_SUFF(pVM);
546 Assert(pVM->cCpus == 1);
547 Assert(!HMIsEnabled(pVM));
548
549
550 /*
551 * Get the shadow descriptor table entry and validate it.
552 * Should something go amiss, try the guest table.
553 */
554 RTSEL const Sel = pSReg->Sel;
555 uint32_t const iSReg = pSReg - CPUMCTX_FIRST_SREG(pCtx); Assert(iSReg < X86_SREG_COUNT);
556 PCX86DESC pShwDesc;
557 if (!(Sel & X86_SEL_LDT))
558 {
559 /** @todo this shall not happen, we shall check for these things when executing
560 * LGDT */
561 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->gdtr.cbGdt);
562
563 pShwDesc = &pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT];
564 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)
565 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
566 {
567 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK), Sel, iSReg);
568 return;
569 }
570 }
571 else
572 {
573 /** @todo this shall not happen, we shall check for these things when executing
574 * LLDT */
575 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->ldtr.u32Limit);
576
577 pShwDesc = (PCX86DESC)((uintptr_t)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper + (Sel & X86_SEL_MASK));
578 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT)
579 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu)))
580 {
581 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK), Sel, iSReg);
582 return;
583 }
584 }
585
586 /*
587 * All fine, load it.
588 */
589 selmLoadHiddenSRegFromShadowDesc(pSReg, pShwDesc);
590 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelShw);
591 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (shw)\n",
592 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel));
593}
594
595#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
596
597/**
598 * Validates and converts a GC selector based code address to a flat
599 * address when in real or v8086 mode.
600 *
601 * @returns VINF_SUCCESS.
602 * @param pVCpu Pointer to the VMCPU.
603 * @param SelCS Selector part.
604 * @param pHidCS The hidden CS register part. Optional.
605 * @param Addr Address part.
606 * @param ppvFlat Where to store the flat address.
607 */
608DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVMCPU pVCpu, RTSEL SelCS, PCCPUMSELREGHID pSReg, RTGCPTR Addr,
609 PRTGCPTR ppvFlat)
610{
611 NOREF(pVCpu);
612 uint32_t uFlat = Addr & 0xffff;
613 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
614 uFlat += (uint32_t)SelCS << 4;
615 else
616 uFlat += (uint32_t)pSReg->u64Base;
617 *ppvFlat = uFlat;
618 return VINF_SUCCESS;
619}
620
621
622#ifdef VBOX_WITH_RAW_MODE_NOT_R0
623/**
624 * Validates and converts a GC selector based code address to a flat address
625 * when in protected/long mode using the raw-mode algorithm.
626 *
627 * @returns VBox status code.
628 * @param pVM Pointer to the VM.
629 * @param pVCpu Pointer to the VMCPU.
630 * @param SelCPL Current privilege level. Get this from SS - CS might be
631 * conforming! A full selector can be passed, we'll only
632 * use the RPL part.
633 * @param SelCS Selector part.
634 * @param Addr Address part.
635 * @param ppvFlat Where to store the flat address.
636 * @param pcBits Where to store the segment bitness (16/32/64). Optional.
637 */
638DECLINLINE(int) selmValidateAndConvertCSAddrRawMode(PVM pVM, PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr,
639 PRTGCPTR ppvFlat, uint32_t *pcBits)
640{
641 NOREF(pVCpu);
642 Assert(!HMIsEnabled(pVM));
643
644 /** @todo validate limit! */
645 X86DESC Desc;
646 if (!(SelCS & X86_SEL_LDT))
647 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];
648 else
649 {
650 /** @todo handle LDT page(s) not present! */
651 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper);
652 Desc = paLDT[SelCS >> X86_SEL_SHIFT];
653 }
654
655 /*
656 * Check if present.
657 */
658 if (Desc.Gen.u1Present)
659 {
660 /*
661 * Type check.
662 */
663 if ( Desc.Gen.u1DescType == 1
664 && (Desc.Gen.u4Type & X86_SEL_TYPE_CODE))
665 {
666 /*
667 * Check level.
668 */
669 unsigned uLevel = RT_MAX(SelCPL & X86_SEL_RPL, SelCS & X86_SEL_RPL);
670 if ( !(Desc.Gen.u4Type & X86_SEL_TYPE_CONF)
671 ? uLevel <= Desc.Gen.u2Dpl
672 : uLevel >= Desc.Gen.u2Dpl /* hope I got this right now... */
673 )
674 {
675 /*
676 * Limit check.
677 */
678 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc);
679 if ((RTGCUINTPTR)Addr <= u32Limit)
680 {
681 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc));
682 /* Cut the address to 32 bits. */
683 *ppvFlat &= 0xffffffff;
684
685 if (pcBits)
686 *pcBits = Desc.Gen.u1DefBig ? 32 : 16; /** @todo GUEST64 */
687 return VINF_SUCCESS;
688 }
689 return VERR_OUT_OF_SELECTOR_BOUNDS;
690 }
691 return VERR_INVALID_RPL;
692 }
693 return VERR_NOT_CODE_SELECTOR;
694 }
695 return VERR_SELECTOR_NOT_PRESENT;
696}
697#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
698
699
700/**
701 * Validates and converts a GC selector based code address to a flat address
702 * when in protected/long mode using the standard hidden selector registers
703 *
704 * @returns VBox status code.
705 * @param pVCpu Pointer to the VMCPU.
706 * @param SelCPL Current privilege level. Get this from SS - CS might be
707 * conforming! A full selector can be passed, we'll only
708 * use the RPL part.
709 * @param SelCS Selector part.
710 * @param pSRegCS The full CS selector register.
711 * @param Addr The address (think IP/EIP/RIP).
712 * @param ppvFlat Where to store the flat address upon successful return.
713 */
714DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pSRegCS,
715 RTGCPTR Addr, PRTGCPTR ppvFlat)
716{
717 /*
718 * Check if present.
719 */
720 if (pSRegCS->Attr.n.u1Present)
721 {
722 /*
723 * Type check.
724 */
725 if ( pSRegCS->Attr.n.u1DescType == 1
726 && (pSRegCS->Attr.n.u4Type & X86_SEL_TYPE_CODE))
727 {
728 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0
729 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */
730 if ( pSRegCS->Attr.n.u1Long
731 && CPUMIsGuestInLongMode(pVCpu))
732 {
733 *ppvFlat = Addr;
734 return VINF_SUCCESS;
735 }
736
737 /*
738 * Limit check. Note that the limit in the hidden register is the
739 * final value. The granularity bit was included in its calculation.
740 */
741 uint32_t u32Limit = pSRegCS->u32Limit;
742 if ((uint32_t)Addr <= u32Limit)
743 {
744 *ppvFlat = (uint32_t)Addr + (uint32_t)pSRegCS->u64Base;
745 return VINF_SUCCESS;
746 }
747
748 return VERR_OUT_OF_SELECTOR_BOUNDS;
749 }
750 return VERR_NOT_CODE_SELECTOR;
751 }
752 return VERR_SELECTOR_NOT_PRESENT;
753}
754
755
756/**
757 * Validates and converts a GC selector based code address to a flat address.
758 *
759 * @returns VBox status code.
760 * @param pVCpu Pointer to the VMCPU.
761 * @param Efl Current EFLAGS.
762 * @param SelCPL Current privilege level. Get this from SS - CS might be
763 * conforming! A full selector can be passed, we'll only
764 * use the RPL part.
765 * @param SelCS Selector part.
766 * @param pSRegCS The full CS selector register.
767 * @param Addr The address (think IP/EIP/RIP).
768 * @param ppvFlat Where to store the flat address upon successful return.
769 */
770VMMDECL(int) SELMValidateAndConvertCSAddr(PVMCPU pVCpu, X86EFLAGS Efl, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREG pSRegCS,
771 RTGCPTR Addr, PRTGCPTR ppvFlat)
772{
773 if ( Efl.Bits.u1VM
774 || CPUMIsGuestInRealMode(pVCpu))
775 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, pSRegCS, Addr, ppvFlat);
776
777#ifdef VBOX_WITH_RAW_MODE_NOT_R0
778 /* Use the hidden registers when possible, updating them if outdate. */
779 if (!pSRegCS)
780 return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL);
781
782 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS))
783 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS);
784
785 /* Undo ring compression. */
786 if ((SelCPL & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
787 SelCPL &= ~X86_SEL_RPL;
788 Assert(pSRegCS->Sel == SelCS);
789 if ((SelCS & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
790 SelCS &= ~X86_SEL_RPL;
791#else
792 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS));
793 Assert(pSRegCS->Sel == SelCS);
794#endif
795
796 return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pSRegCS, Addr, ppvFlat);
797}
798
799
800/**
801 * Returns Hypervisor's Trap 08 (\#DF) selector.
802 *
803 * @returns Hypervisor's Trap 08 (\#DF) selector.
804 * @param pVM Pointer to the VM.
805 */
806VMMDECL(RTSEL) SELMGetTrap8Selector(PVM pVM)
807{
808 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
809}
810
811
812/**
813 * Sets EIP of Hypervisor's Trap 08 (\#DF) TSS.
814 *
815 * @param pVM Pointer to the VM.
816 * @param u32EIP EIP of Trap 08 handler.
817 */
818VMMDECL(void) SELMSetTrap8EIP(PVM pVM, uint32_t u32EIP)
819{
820 pVM->selm.s.TssTrap08.eip = u32EIP;
821}
822
823
824/**
825 * Sets ss:esp for ring1 in main Hypervisor's TSS.
826 *
827 * @param pVM Pointer to the VM.
828 * @param ss Ring1 SS register value. Pass 0 if invalid.
829 * @param esp Ring1 ESP register value.
830 */
831void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
832{
833 Assert(!HMIsEnabled(pVM));
834 Assert((ss & 1) || esp == 0);
835 pVM->selm.s.Tss.ss1 = ss;
836 pVM->selm.s.Tss.esp1 = (uint32_t)esp;
837}
838
839
840#ifdef VBOX_WITH_RAW_RING1
841/**
842 * Sets ss:esp for ring1 in main Hypervisor's TSS.
843 *
844 * @param pVM Pointer to the VM.
845 * @param ss Ring2 SS register value. Pass 0 if invalid.
846 * @param esp Ring2 ESP register value.
847 */
848void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
849{
850 Assert(!HMIsEnabled(pVM));
851 Assert((ss & 3) == 2 || esp == 0);
852 pVM->selm.s.Tss.ss2 = ss;
853 pVM->selm.s.Tss.esp2 = (uint32_t)esp;
854}
855#endif
856
857
858#ifdef VBOX_WITH_RAW_MODE_NOT_R0
859/**
860 * Gets ss:esp for ring1 in main Hypervisor's TSS.
861 *
862 * Returns SS=0 if the ring-1 stack isn't valid.
863 *
864 * @returns VBox status code.
865 * @param pVM Pointer to the VM.
866 * @param pSS Ring1 SS register value.
867 * @param pEsp Ring1 ESP register value.
868 */
869VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
870{
871 Assert(!HMIsEnabled(pVM));
872 Assert(pVM->cCpus == 1);
873 PVMCPU pVCpu = &pVM->aCpus[0];
874
875#ifdef SELM_TRACK_GUEST_TSS_CHANGES
876 if (pVM->selm.s.fSyncTSSRing0Stack)
877 {
878#endif
879 RTGCPTR GCPtrTss = pVM->selm.s.GCPtrGuestTss;
880 int rc;
881 VBOXTSS tss;
882
883 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss);
884
885# ifdef IN_RC
886 bool fTriedAlready = false;
887
888l_tryagain:
889 PVBOXTSS pTss = (PVBOXTSS)(uintptr_t)GCPtrTss;
890 rc = MMGCRamRead(pVM, &tss.ss0, &pTss->ss0, sizeof(tss.ss0));
891 rc |= MMGCRamRead(pVM, &tss.esp0, &pTss->esp0, sizeof(tss.esp0));
892# ifdef DEBUG
893 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, &pTss->offIoBitmap, sizeof(tss.offIoBitmap));
894# endif
895
896 if (RT_FAILURE(rc))
897 {
898 if (!fTriedAlready)
899 {
900 /* Shadow page might be out of sync. Sync and try again */
901 /** @todo might cross page boundary */
902 fTriedAlready = true;
903 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
904 if (rc != VINF_SUCCESS)
905 return rc;
906 goto l_tryagain;
907 }
908 AssertMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
909 return rc;
910 }
911
912# else /* !IN_RC */
913 /* Reading too much. Could be cheaper than two separate calls though. */
914 rc = PGMPhysSimpleReadGCPtr(pVCpu, &tss, GCPtrTss, sizeof(VBOXTSS));
915 if (RT_FAILURE(rc))
916 {
917 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));
918 return rc;
919 }
920# endif /* !IN_RC */
921
922# ifdef LOG_ENABLED
923 uint32_t ssr0 = pVM->selm.s.Tss.ss1;
924 uint32_t espr0 = pVM->selm.s.Tss.esp1;
925 ssr0 &= ~1;
926
927 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
928 Log(("SELMGetRing1Stack: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
929
930 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
931# endif
932 /* Update our TSS structure for the guest's ring 1 stack */
933 selmSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0);
934 pVM->selm.s.fSyncTSSRing0Stack = false;
935#ifdef SELM_TRACK_GUEST_TSS_CHANGES
936 }
937#endif
938
939 *pSS = pVM->selm.s.Tss.ss1;
940 *pEsp = (RTGCPTR32)pVM->selm.s.Tss.esp1;
941
942 return VINF_SUCCESS;
943}
944#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
945
946
947#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
948
949/**
950 * Gets the hypervisor code selector (CS).
951 * @returns CS selector.
952 * @param pVM Pointer to the VM.
953 */
954VMMDECL(RTSEL) SELMGetHyperCS(PVM pVM)
955{
956 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
957}
958
959
960/**
961 * Gets the 64-mode hypervisor code selector (CS64).
962 * @returns CS selector.
963 * @param pVM Pointer to the VM.
964 */
965VMMDECL(RTSEL) SELMGetHyperCS64(PVM pVM)
966{
967 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64];
968}
969
970
971/**
972 * Gets the hypervisor data selector (DS).
973 * @returns DS selector.
974 * @param pVM Pointer to the VM.
975 */
976VMMDECL(RTSEL) SELMGetHyperDS(PVM pVM)
977{
978 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
979}
980
981
982/**
983 * Gets the hypervisor TSS selector.
984 * @returns TSS selector.
985 * @param pVM Pointer to the VM.
986 */
987VMMDECL(RTSEL) SELMGetHyperTSS(PVM pVM)
988{
989 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS];
990}
991
992
993/**
994 * Gets the hypervisor TSS Trap 8 selector.
995 * @returns TSS Trap 8 selector.
996 * @param pVM Pointer to the VM.
997 */
998VMMDECL(RTSEL) SELMGetHyperTSSTrap08(PVM pVM)
999{
1000 return pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
1001}
1002
1003/**
1004 * Gets the address for the hypervisor GDT.
1005 *
1006 * @returns The GDT address.
1007 * @param pVM Pointer to the VM.
1008 * @remark This is intended only for very special use, like in the world
1009 * switchers. Don't exploit this API!
1010 */
1011VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM)
1012{
1013 /*
1014 * Always convert this from the HC pointer since we can be
1015 * called before the first relocation and have to work correctly
1016 * without having dependencies on the relocation order.
1017 */
1018 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3);
1019}
1020
1021#endif /* defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) */
1022
1023/**
1024 * Gets info about the current TSS.
1025 *
1026 * @returns VBox status code.
1027 * @retval VINF_SUCCESS if we've got a TSS loaded.
1028 * @retval VERR_SELM_NO_TSS if we haven't got a TSS (rather unlikely).
1029 *
1030 * @param pVM Pointer to the VM.
1031 * @param pVCpu Pointer to the VMCPU.
1032 * @param pGCPtrTss Where to store the TSS address.
1033 * @param pcbTss Where to store the TSS size limit.
1034 * @param pfCanHaveIOBitmap Where to store the can-have-I/O-bitmap indicator. (optional)
1035 */
1036VMMDECL(int) SELMGetTSSInfo(PVM pVM, PVMCPU pVCpu, PRTGCUINTPTR pGCPtrTss, PRTGCUINTPTR pcbTss, bool *pfCanHaveIOBitmap)
1037{
1038 NOREF(pVM);
1039
1040 /*
1041 * The TR hidden register is always valid.
1042 */
1043 CPUMSELREGHID trHid;
1044 RTSEL tr = CPUMGetGuestTR(pVCpu, &trHid);
1045 if (!(tr & X86_SEL_MASK_OFF_RPL))
1046 return VERR_SELM_NO_TSS;
1047
1048 *pGCPtrTss = trHid.u64Base;
1049 *pcbTss = trHid.u32Limit + (trHid.u32Limit != UINT32_MAX); /* be careful. */
1050 if (pfCanHaveIOBitmap)
1051 *pfCanHaveIOBitmap = trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1052 || trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1053 return VINF_SUCCESS;
1054}
1055
1056
1057
1058/**
1059 * Notification callback which is called whenever there is a chance that a CR3
1060 * value might have changed.
1061 * This is called by PGM.
1062 *
1063 * @param pVM Pointer to the VM.
1064 * @param pVCpu Pointer to the VMCPU.
1065 */
1066VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
1067{
1068 /** @todo SMP support!! (64-bit guest scenario, primarily) */
1069 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);
1070 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
1071}
1072
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette