VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/SELMRC.cpp@ 56690

Last change on this file since 56690 was 56287, checked in by vboxsync, 10 years ago

VMM: Updated (C) year.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 25.8 KB
Line 
1/* $Id: SELMRC.cpp 56287 2015-06-09 11:15:22Z vboxsync $ */
2/** @file
3 * SELM - The Selector Manager, Guest Context.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_SELM
22#include <VBox/vmm/selm.h>
23#include <VBox/vmm/mm.h>
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/trpm.h>
26#include "SELMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/vmm.h>
29#include <VBox/vmm/pgm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36
37#include "SELMInline.h"
38
39
40/*******************************************************************************
41* Global Variables *
42*******************************************************************************/
43#ifdef LOG_ENABLED
44/** Segment register names. */
45static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
46#endif
47
48
49#ifdef SELM_TRACK_GUEST_GDT_CHANGES
50
51/**
52 * Synchronizes one GDT entry (guest -> shadow).
53 *
54 * @returns VBox strict status code (appropriate for trap handling and GC
55 * return).
56 * @retval VINF_SUCCESS
57 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
58 * @retval VINF_SELM_SYNC_GDT
59 *
60 * @param pVM Pointer to the VM.
61 * @param pVCpu The current virtual CPU.
62 * @param pCtx CPU context for the current CPU.
63 * @param iGDTEntry The GDT entry to sync.
64 *
65 * @remarks Caller checks that this isn't the LDT entry!
66 */
67static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry)
68{
69 Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu)));
70
71 /*
72 * Validate the offset.
73 */
74 VBOXGDTR GdtrGuest;
75 CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
76 unsigned offEntry = iGDTEntry * sizeof(X86DESC);
77 if ( iGDTEntry >= SELM_GDT_ELEMENTS
78 || offEntry > GdtrGuest.cbGdt)
79 return VINF_SUCCESS; /* ignore */
80
81 /*
82 * Read the guest descriptor.
83 */
84 X86DESC Desc;
85 int rc = MMGCRamRead(pVM, &Desc, (uint8_t *)(uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
86 if (RT_FAILURE(rc))
87 {
88 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, (uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
89 if (RT_FAILURE(rc))
90 {
91 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
92 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
93 /* return VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */
94 return VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
95 }
96 }
97
98 /*
99 * Check for conflicts.
100 */
101 RTSEL Sel = iGDTEntry << X86_SEL_SHIFT;
102 Assert( !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] & ~X86_SEL_MASK_OFF_RPL)
103 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] & ~X86_SEL_MASK_OFF_RPL)
104 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] & ~X86_SEL_MASK_OFF_RPL)
105 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] & ~X86_SEL_MASK_OFF_RPL)
106 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK_OFF_RPL));
107 if ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == Sel
108 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == Sel
109 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == Sel
110 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == Sel
111 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == Sel)
112 {
113 if (Desc.Gen.u1Present)
114 {
115 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc));
116 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
117 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
118 return VINF_SELM_SYNC_GDT; /** @todo this status code is ignored, unfortunately. */
119 }
120 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc));
121
122 /* Note: we can't continue below or else we'll change the shadow descriptor!! */
123 /* When the guest makes the selector present, then we'll do a GDT sync. */
124 return VINF_SUCCESS;
125 }
126
127 /*
128 * Convert the guest selector to a shadow selector and update the shadow GDT.
129 */
130 selmGuestToShadowDesc(pVM, &Desc);
131 PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry];
132 //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF ));
133 //Log(("N: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(Desc)), X86DESC_LIMIT(Desc), (Desc.au32[1] >> 8) & 0xFFFF ));
134 *pShwDescr = Desc;
135
136 /*
137 * Detect and mark stale registers.
138 */
139 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
140 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
141 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
142 {
143 if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
144 {
145 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
146 {
147 if (selmIsSRegStale32(&paSReg[iSReg], &Desc, iSReg))
148 {
149 Log(("GDT write to selector in %s register %04X (now stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
150 paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE;
151 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
152 /* rcStrict = VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */
153 rcStrict = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
154 }
155 else if (paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE)
156 {
157 Log(("GDT write to selector in %s register %04X (no longer stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
158 paSReg[iSReg].fFlags &= ~CPUMSELREG_FLAGS_STALE;
159 }
160 else
161 Log(("GDT write to selector in %s register %04X (no important change)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
162 }
163 else
164 Log(("GDT write to selector in %s register %04X (out of sync)\n", paSReg[iSReg].Sel));
165 }
166 }
167
168 /** @todo Detect stale LDTR as well? */
169
170 return rcStrict;
171}
172
173
174/**
175 * Synchronizes any segment registers refering to the given GDT entry.
176 *
177 * This is called before any changes performed and shadowed, so it's possible to
178 * look in both the shadow and guest descriptor table entries for hidden
179 * register content.
180 *
181 * @param pVM Pointer to the VM.
182 * @param pVCpu The current virtual CPU.
183 * @param pCtx The CPU context.
184 * @param iGDTEntry The GDT entry to sync.
185 */
186void selmRCSyncGdtSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry)
187{
188 /*
189 * Validate the offset.
190 */
191 VBOXGDTR GdtrGuest;
192 CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
193 unsigned offEntry = iGDTEntry * sizeof(X86DESC);
194 if ( iGDTEntry >= SELM_GDT_ELEMENTS
195 || offEntry > GdtrGuest.cbGdt)
196 return;
197
198 /*
199 * Sync outdated segment registers using this entry.
200 */
201 PCX86DESC pDesc = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry];
202 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
203 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
204 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
205 {
206 if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
207 {
208 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
209 {
210 if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl))
211 {
212 selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc);
213 Log(("selmRCSyncGDTSegRegs: Updated %s\n", g_aszSRegNms[iSReg]));
214 }
215 else
216 Log(("selmRCSyncGDTSegRegs: Bad shadow descriptor %#x (for %s): %.8Rhxs \n",
217 iGDTEntry, g_aszSRegNms[iSReg], pDesc));
218 }
219 }
220 }
221}
222
223
224/**
225 * Syncs hidden selector register parts before emulating a GDT change.
226 *
227 * This is shared between the selmRCGuestGDTWritePfHandler and
228 * selmGuestGDTWriteHandler.
229 *
230 * @param pVM Pointer to the cross context VM structure.
231 * @param pVCpu Pointer to the cross context virtual CPU structure.
232 * @param offGuestTss The offset into the TSS of the write that was made.
233 * @param cbWrite The number of bytes written.
234 * @param pCtx The current CPU context.
235 */
236void selmRCGuestGdtPreWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx)
237{
238 uint32_t iGdt = offGuestGdt >> X86_SEL_SHIFT;
239 uint32_t const iGdtLast = (offGuestGdt + cbWrite - 1) >> X86_SEL_SHIFT;
240 do
241 {
242 selmRCSyncGdtSegRegs(pVM, pVCpu, pCtx, iGdt);
243 iGdt++;
244 } while (iGdt <= iGdtLast);
245}
246
247
248/**
249 * Checks the guest GDT for changes after a write has been emulated.
250 *
251 *
252 * This is shared between the selmRCGuestGDTWritePfHandler and
253 * selmGuestGDTWriteHandler.
254 *
255 * @retval VINF_SUCCESS
256 * @retval VINF_SELM_SYNC_GDT
257 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
258 *
259 * @param pVM Pointer to the cross context VM structure.
260 * @param pVCpu Pointer to the cross context virtual CPU structure.
261 * @param offGuestTss The offset into the TSS of the write that was made.
262 * @param cbWrite The number of bytes written.
263 * @param pCtx The current CPU context.
264 */
265VBOXSTRICTRC selmRCGuestGdtPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx)
266{
267 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
268
269 /* Check if the LDT was in any way affected. Do not sync the
270 shadow GDT if that's the case or we might have trouble in
271 the world switcher (or so they say). */
272 uint32_t const iGdtFirst = offGuestGdt >> X86_SEL_SHIFT;
273 uint32_t const iGdtLast = (offGuestGdt + cbWrite - 1) >> X86_SEL_SHIFT;
274 uint32_t const iLdt = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
275 if (iGdtFirst <= iLdt && iGdtLast >= iLdt)
276 {
277 Log(("LDTR selector change -> fall back to HC!!\n"));
278 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
279 rcStrict = VINF_SELM_SYNC_GDT;
280 /** @todo Implement correct stale LDT handling. */
281 }
282 else
283 {
284 /* Sync the shadow GDT and continue provided the update didn't
285 cause any segment registers to go stale in any way. */
286 uint32_t iGdt = iGdtFirst;
287 do
288 {
289 VBOXSTRICTRC rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pCtx, iGdt);
290 Assert(rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT || rcStrict2 == VINF_SELM_SYNC_GDT);
291 if (rcStrict == VINF_SUCCESS)
292 rcStrict = rcStrict2;
293 iGdt++;
294 } while ( iGdt <= iGdtLast
295 && (rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT));
296 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT)
297 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
298 }
299 return rcStrict;
300}
301
302
303/**
304 * @callback_method_impl{FNPGMVIRTHANDLER, Guest GDT write access \#PF handler }
305 */
306DECLEXPORT(VBOXSTRICTRC) selmRCGuestGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
307 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
308{
309 LogFlow(("selmRCGuestGDTWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
310 NOREF(pvRange); NOREF(pvUser);
311
312 /*
313 * Check if any selectors might be affected.
314 */
315 selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offRange, 8 /*cbWrite*/, CPUMCTX_FROM_CORE(pRegFrame));
316
317 /*
318 * Attempt to emulate the instruction and sync the affected entries.
319 */
320 uint32_t cb;
321 VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
322 if (RT_SUCCESS(rcStrict) && cb)
323 rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offRange, cb, CPUMCTX_FROM_CORE(pRegFrame));
324 else
325 {
326 Assert(RT_FAILURE(rcStrict));
327 if (rcStrict == VERR_EM_INTERPRETER)
328 rcStrict = VINF_EM_RAW_EMULATE_INSTR; /* No, not VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT, see PGM_PHYS_RW_IS_SUCCESS. */
329 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
330 }
331
332 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
333 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
334 else
335 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
336 return rcStrict;
337}
338
339#endif /* SELM_TRACK_GUEST_GDT_CHANGES */
340
341#ifdef SELM_TRACK_GUEST_LDT_CHANGES
342/**
343 * @callback_method_impl{FNPGMVIRTHANDLER, Guest LDT write access \#PF handler }
344 */
345DECLEXPORT(VBOXSTRICTRC) selmRCGuestLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
346 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
347{
348 /** @todo To be implemented... or not. */
349 ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
350 NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
351
352 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
353 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
354 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
355}
356#endif
357
358
359#ifdef SELM_TRACK_GUEST_TSS_CHANGES
360
361/**
362 * Read wrapper used by selmRCGuestTSSWriteHandler.
363 * @returns VBox status code (appropriate for trap handling and GC return).
364 * @param pVM Pointer to the VM.
365 * @param pvDst Where to put the bits we read.
366 * @param pvSrc Guest address to read from.
367 * @param cb The number of bytes to read.
368 */
369DECLINLINE(int) selmRCReadTssBits(PVM pVM, PVMCPU pVCpu, void *pvDst, void const *pvSrc, size_t cb)
370{
371 int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
372 if (RT_SUCCESS(rc))
373 return VINF_SUCCESS;
374
375 /** @todo use different fallback? */
376 rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
377 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
378 if (rc == VINF_SUCCESS)
379 {
380 rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
381 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc));
382 }
383 return rc;
384}
385
386
387/**
388 * Checks the guest TSS for changes after a write has been emulated.
389 *
390 * This is shared between the
391 *
392 * @returns Strict VBox status code appropriate for raw-mode returns.
393 * @param pVM Pointer to the cross context VM structure.
394 * @param pVCpu Pointer to the cross context virtual CPU structure.
395 * @param offGuestTss The offset into the TSS of the write that was made.
396 * @param cbWrite The number of bytes written.
397 */
398VBOXSTRICTRC selmRCGuestTssPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestTss, uint32_t cbWrite)
399{
400 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
401
402 /*
403 * If it's on the same page as the esp0 and ss0 fields or actually one of them,
404 * then check if any of these has changed.
405 */
406/** @todo just read the darn fields and put them on the stack. */
407 PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
408 if ( PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
409 && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offGuestTss)
410 && ( pGuestTss->esp0 != pVM->selm.s.Tss.esp1
411 || pGuestTss->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
412 )
413 {
414 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
415 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
416 pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
417 pVM->selm.s.Tss.ss1 = pGuestTss->ss0 | 1;
418 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
419 }
420# ifdef VBOX_WITH_RAW_RING1
421 else if ( EMIsRawRing1Enabled(pVM)
422 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
423 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offGuestTss)
424 && ( pGuestTss->esp1 != pVM->selm.s.Tss.esp2
425 || pGuestTss->ss1 != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
426 )
427 {
428 Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
429 (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
430 pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
431 pVM->selm.s.Tss.ss2 = (pGuestTss->ss1 & ~1) | 2;
432 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
433 }
434# endif
435 /* Handle misaligned TSS in a safe manner (just in case). */
436 else if ( offGuestTss >= RT_UOFFSETOF(VBOXTSS, esp0)
437 && offGuestTss < RT_UOFFSETOF(VBOXTSS, padding_ss0))
438 {
439 struct
440 {
441 uint32_t esp0;
442 uint16_t ss0;
443 uint16_t padding_ss0;
444 } s;
445 AssertCompileSize(s, 8);
446 rcStrict = selmRCReadTssBits(pVM, pVCpu, &s, &pGuestTss->esp0, sizeof(s));
447 if ( rcStrict == VINF_SUCCESS
448 && ( s.esp0 != pVM->selm.s.Tss.esp1
449 || s.ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
450 )
451 {
452 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
453 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
454 pVM->selm.s.Tss.esp1 = s.esp0;
455 pVM->selm.s.Tss.ss1 = s.ss0 | 1;
456 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
457 }
458 }
459
460 /*
461 * If VME is enabled we need to check if the interrupt redirection bitmap
462 * needs updating.
463 */
464 if ( offGuestTss >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
465 && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
466 {
467 if (offGuestTss - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
468 {
469 uint16_t offIoBitmap = pGuestTss->offIoBitmap;
470 if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
471 {
472 Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
473 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
474 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
475 }
476 else
477 Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
478 }
479 else
480 {
481 /** @todo not sure how the partial case is handled; probably not allowed */
482 uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
483 if ( offIntRedirBitmap <= offGuestTss
484 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offGuestTss + cbWrite
485 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
486 {
487 Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offGuestTss=%x cbWrite=%x\n",
488 pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offGuestTss, cbWrite));
489
490 /** @todo only update the changed part. */
491 for (uint32_t i = 0; rcStrict == VINF_SUCCESS && i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
492 rcStrict = selmRCReadTssBits(pVM, pVCpu, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
493 (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
494 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
495 }
496 }
497 }
498
499 /*
500 * Return to ring-3 for a full resync if any of the above fails... (?)
501 */
502 if (rcStrict != VINF_SUCCESS)
503 {
504 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
505 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
506 if (RT_SUCCESS(rcStrict))
507 rcStrict = VINF_SUCCESS;
508 }
509
510 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
511 return rcStrict;
512}
513
514
515/**
516 * @callback_method_impl{FNPGMVIRTHANDLER, Guest TSS write access \#PF handler}
517 */
518DECLEXPORT(VBOXSTRICTRC) selmRCGuestTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
519 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
520{
521 LogFlow(("selmRCGuestTSSWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
522 NOREF(pvRange); NOREF(pvUser);
523
524 /*
525 * Try emulate the access.
526 */
527 uint32_t cb;
528 VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
529 if ( RT_SUCCESS(rcStrict)
530 && cb)
531 rcStrict = selmRCGuestTssPostWriteCheck(pVM, pVCpu, offRange, cb);
532 else
533 {
534 AssertMsg(RT_FAILURE(rcStrict), ("cb=%u rcStrict=%#x\n", cb, VBOXSTRICTRC_VAL(rcStrict)));
535 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
536 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
537 if (rcStrict == VERR_EM_INTERPRETER)
538 rcStrict = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
539 }
540 return rcStrict;
541}
542
543#endif /* SELM_TRACK_GUEST_TSS_CHANGES */
544
545#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
546/**
547 * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.
548 *
549 * @returns VBox status code (appropriate for trap handling and GC return).
550 * @param pVM Pointer to the VM.
551 * @param pVCpu Pointer to the cross context CPU context for the
552 * calling EMT.
553 * @param uErrorCode CPU Error code.
554 * @param pRegFrame Trap register frame.
555 * @param pvFault The fault address (cr2).
556 * @param pvRange The base address of the handled virtual range.
557 * @param offRange The offset of the access into this range.
558 * (If it's a EIP range this is the EIP, if not it's pvFault.)
559 * @param pvUser Unused.
560 */
561DECLEXPORT(VBOXSTRICTRC) selmRCShadowGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
562 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
563{
564 LogRel(("FATAL ERROR: selmRCShadowGDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
565 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
566 return VERR_SELM_SHADOW_GDT_WRITE;
567}
568#endif
569
570
571#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
572/**
573 * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.
574 *
575 * @returns VBox status code (appropriate for trap handling and GC return).
576 * @param pVM Pointer to the VM.
577 * @param pVCpu Pointer to the cross context CPU context for the
578 * calling EMT.
579 * @param uErrorCode CPU Error code.
580 * @param pRegFrame Trap register frame.
581 * @param pvFault The fault address (cr2).
582 * @param pvRange The base address of the handled virtual range.
583 * @param offRange The offset of the access into this range.
584 * (If it's a EIP range this is the EIP, if not it's pvFault.)
585 * @param pvUser Unused.
586 */
587DECLEXPORT(VBOXSTRICTRC) selmRCShadowLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
588 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
589{
590 LogRel(("FATAL ERROR: selmRCShadowLDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
591 Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE));
592 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
593 return VERR_SELM_SHADOW_LDT_WRITE;
594}
595#endif
596
597
598#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
599/**
600 * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.
601 *
602 * @returns VBox status code (appropriate for trap handling and GC return).
603 * @param pVM Pointer to the VM.
604 * @param pVCpu Pointer to the cross context CPU context for the
605 * calling EMT.
606 * @param uErrorCode CPU Error code.
607 * @param pRegFrame Trap register frame.
608 * @param pvFault The fault address (cr2).
609 * @param pvRange The base address of the handled virtual range.
610 * @param offRange The offset of the access into this range.
611 * (If it's a EIP range this is the EIP, if not it's pvFault.)
612 * @param pvUser Unused.
613 */
614DECLEXPORT(VBOXSTRICTRC) selmRCShadowTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
615 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
616{
617 LogRel(("FATAL ERROR: selmRCShadowTSSWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
618 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
619 return VERR_SELM_SHADOW_TSS_WRITE;
620}
621#endif
622
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette