VirtualBox

source: vbox/trunk/src/VBox/VMM/SELM.cpp@ 1485

Last change on this file since 1485 was 1485, checked in by vboxsync, 18 years ago

More build fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 85.5 KB
Line 
1/* $Id: SELM.cpp 1485 2007-03-14 18:50:17Z vboxsync $ */
2/** @file
3 * SELM - The Selector manager.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_SELM
26#include <VBox/selm.h>
27#include <VBox/cpum.h>
28#include <VBox/stam.h>
29#include <VBox/mm.h>
30#include <VBox/pdm.h>
31#include <VBox/pgm.h>
32#include <VBox/trpm.h>
33#include <VBox/dbgf.h>
34#include "SELMInternal.h"
35#include <VBox/vm.h>
36#include <VBox/err.h>
37#include <VBox/param.h>
38
39#include <iprt/assert.h>
40#include <VBox/log.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/string.h>
45#include "x86context.h"
46
47
48/**
49 * Enable or disable tracking of Guest's GDT/LDT/TSS.
50 * @{
51 */
52#define SELM_TRACK_GUEST_GDT_CHANGES
53#define SELM_TRACK_GUEST_LDT_CHANGES
54#define SELM_TRACK_GUEST_TSS_CHANGES
55/** @} */
56
57/**
58 * Enable or disable tracking of Shadow GDT/LDT/TSS.
59 * @{
60 */
61#define SELM_TRACK_SHADOW_GDT_CHANGES
62#define SELM_TRACK_SHADOW_LDT_CHANGES
63#define SELM_TRACK_SHADOW_TSS_CHANGES
64/** @} */
65
66
67/** SELM saved state version. */
68#define SELM_SAVED_STATE_VERSION 5
69
70/*******************************************************************************
71* Internal Functions *
72*******************************************************************************/
73static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM);
74static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
75static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
76static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
77static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
78static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
79static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
80//static DECLCALLBACK(void) selmR3InfoTss(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
81//static DECLCALLBACK(void) selmR3InfoTssGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
82static DECLCALLBACK(int) selmGuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
83static DECLCALLBACK(int) selmGuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
84static DECLCALLBACK(int) selmGuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
85
86
87
88/**
89 * Initializes the SELM.
90 *
91 * @returns VBox status code.
92 * @param pVM The VM to operate on.
93 */
94SELMR3DECL(int) SELMR3Init(PVM pVM)
95{
96 LogFlow(("SELMR3Init\n"));
97
98 /*
99 * Assert alignment and sizes.
100 * (The TSS block requires contiguous back.)
101 */
102 AssertCompile(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding)); AssertRelease(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding));
103 AssertCompileMemberAlignment(VM, selm.s, 32); AssertRelease(!(RT_OFFSETOF(VM, selm.s) & 31));
104#if 0 /* doesn't work */
105 AssertCompile((RT_OFFSETOF(VM, selm.s.Tss) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.Tss));
106 AssertCompile((RT_OFFSETOF(VM, selm.s.TssTrap08) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.TssTrap08));
107#endif
108 AssertRelease((RT_OFFSETOF(VM, selm.s.Tss) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.Tss));
109 AssertRelease((RT_OFFSETOF(VM, selm.s.TssTrap08) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.TssTrap08));
110
111 /*
112 * Init the structure.
113 */
114 pVM->selm.s.offVM = RT_OFFSETOF(VM, selm);
115 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] = (SELM_GDT_ELEMENTS - 0x1) << 3;
116 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] = (SELM_GDT_ELEMENTS - 0x2) << 3;
117 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] = (SELM_GDT_ELEMENTS - 0x3) << 3;
118 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = (SELM_GDT_ELEMENTS - 0x4) << 3;
119 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = (SELM_GDT_ELEMENTS - 0x5) << 3;
120
121 /*
122 * Allocate GDT table.
123 */
124 int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtHC[0]) * SELM_GDT_ELEMENTS,
125 PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdtHC);
126 AssertRCReturn(rc, rc);
127
128 /*
129 * Allocate LDT area.
130 */
131 rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.HCPtrLdt);
132 AssertRCReturn(rc, rc);
133
134 /*
135 * Init Guest's and Shadow GDT, LDT, TSS changes control variables.
136 */
137 pVM->selm.s.cbEffGuestGdtLimit = 0;
138 pVM->selm.s.GuestGdtr.pGdt = ~0;
139 pVM->selm.s.GCPtrGuestLdt = ~0;
140 pVM->selm.s.GCPtrGuestTss = ~0;
141
142 pVM->selm.s.paGdtGC = 0;
143 pVM->selm.s.GCPtrLdt = ~0;
144 pVM->selm.s.GCPtrTss = ~0;
145 pVM->selm.s.GCSelTss = ~0;
146
147 pVM->selm.s.fDisableMonitoring = false;
148 pVM->selm.s.fSyncTSSRing0Stack = false;
149
150 /*
151 * Register the saved state data unit.
152 */
153 rc = SSMR3RegisterInternal(pVM, "selm", 1, SELM_SAVED_STATE_VERSION, sizeof(SELM),
154 NULL, selmR3Save, NULL,
155 NULL, selmR3Load, selmR3LoadDone);
156 if (VBOX_FAILURE(rc))
157 return rc;
158
159 /*
160 * Statistics.
161 */
162 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT.");
163 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT.");
164 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected.");
165 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS.");
166 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
167 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS.");
168 STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
169 STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
170
171 STAM_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors.");
172 STAM_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors.");
173
174 /*
175 * Default action when entering raw mode for the first time
176 */
177 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
178 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
179 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
180
181 /*
182 * Register info handlers.
183 */
184 DBGFR3InfoRegisterInternal(pVM, "gdt", "Displays the shadow GDT. No arguments.", &selmR3InfoGdt);
185 DBGFR3InfoRegisterInternal(pVM, "gdtguest", "Displays the guest GDT. No arguments.", &selmR3InfoGdtGuest);
186 DBGFR3InfoRegisterInternal(pVM, "ldt", "Displays the shadow LDT. No arguments.", &selmR3InfoLdt);
187 DBGFR3InfoRegisterInternal(pVM, "ldtguest", "Displays the guest LDT. No arguments.", &selmR3InfoLdtGuest);
188 //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss);
189 //DBGFR3InfoRegisterInternal(pVM, "tssguest", "Displays the guest TSS. No arguments.", &selmR3InfoTssGuest);
190
191 return rc;
192}
193
194
195/**
196 * Finalizes HMA page attributes.
197 *
198 * @returns VBox status code.
199 * @param pVM The VM handle.
200 */
201SELMR3DECL(int) SELMR3InitFinalize(PVM pVM)
202{
203 /*
204 * Make Double Fault work with WP enabled?
205 *
206 * The double fault is a task switch and thus requires write access to the GDT of the TSS
207 * (to set it busy), to the old TSS (to store state), and to the Trap 8 TSS for the back link.
208 *
209 * Since we in enabling write access to these pages make ourself vulnerable to attacks,
210 * it is not possible to do this by default.
211 */
212 bool f;
213 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f);
214#if !defined(DEBUG_bird) && !defined(__AMD64__) /** @todo Remember to remove __AMD64__ here! */
215 if (VBOX_SUCCESS(rc) && f)
216#endif
217 {
218 PVBOXDESC paGdt = pVM->selm.s.paGdtHC;
219 rc = PGMMapSetPage(pVM, MMHyperHC2GC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3]), sizeof(paGdt[0]),
220 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
221 AssertRC(rc);
222 rc = PGMMapSetPage(pVM, MMHyperHC2GC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3]), sizeof(paGdt[0]),
223 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
224 AssertRC(rc);
225 rc = PGMMapSetPage(pVM, VM_GUEST_ADDR(pVM, &pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]), sizeof(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]),
226 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
227 AssertRC(rc);
228 rc = PGMMapSetPage(pVM, VM_GUEST_ADDR(pVM, &pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]), sizeof(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]),
229 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
230 AssertRC(rc);
231 }
232 return VINF_SUCCESS;
233}
234
235
236/**
237 * Setup the hypervisor GDT selectors in our shadow table
238 *
239 * @param pVM The VM handle.
240 */
241static void selmR3SetupHyperGDTSelectors(PVM pVM)
242{
243 PVBOXDESC paGdt = pVM->selm.s.paGdtHC;
244
245 /*
246 * Set up global code and data descriptors for use in the guest context.
247 * Both are wide open (base 0, limit 4GB)
248 */
249 PVBOXDESC pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] >> 3];
250 pDesc->Gen.u16LimitLow = 0xffff;
251 pDesc->Gen.u4LimitHigh = 0xf;
252 pDesc->Gen.u16BaseLow = 0;
253 pDesc->Gen.u8BaseHigh1 = 0;
254 pDesc->Gen.u8BaseHigh2 = 0;
255 pDesc->Gen.u4Type = X86_SELTYPE_MEM_EXECUTEREAD_ACC;
256 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
257 pDesc->Gen.u2Dpl = 0; /* supervisor */
258 pDesc->Gen.u1Present = 1;
259 pDesc->Gen.u1Available = 0;
260 pDesc->Gen.u1Reserved = 0;
261 pDesc->Gen.u1DefBig = 1; /* def 32 bit */
262 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
263
264 /* data */
265 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] >> 3];
266 pDesc->Gen.u16LimitLow = 0xffff;
267 pDesc->Gen.u4LimitHigh = 0xf;
268 pDesc->Gen.u16BaseLow = 0;
269 pDesc->Gen.u8BaseHigh1 = 0;
270 pDesc->Gen.u8BaseHigh2 = 0;
271 pDesc->Gen.u4Type = X86_SELTYPE_MEM_READWRITE_ACC;
272 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
273 pDesc->Gen.u2Dpl = 0; /* supervisor */
274 pDesc->Gen.u1Present = 1;
275 pDesc->Gen.u1Available = 0;
276 pDesc->Gen.u1Reserved = 0;
277 pDesc->Gen.u1DefBig = 1; /* big */
278 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
279
280 /* 64-bit mode code (& data?) */
281 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] >> 3];
282 pDesc->Gen.u16LimitLow = 0xffff;
283 pDesc->Gen.u4LimitHigh = 0xf;
284 pDesc->Gen.u16BaseLow = 0;
285 pDesc->Gen.u8BaseHigh1 = 0;
286 pDesc->Gen.u8BaseHigh2 = 0;
287 pDesc->Gen.u4Type = X86_SELTYPE_MEM_EXECUTEREAD_ACC;
288 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
289 pDesc->Gen.u2Dpl = 0; /* supervisor */
290 pDesc->Gen.u1Present = 1;
291 pDesc->Gen.u1Available = 0;
292 pDesc->Gen.u1Reserved = 1; /* The Long (L) attribute bit. */
293 pDesc->Gen.u1DefBig = 0; /* With L=1 this must be 0. */
294 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
295
296 /*
297 * TSS descriptor
298 */
299 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3];
300 RTGCPTR pGCTSS = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss);
301 pDesc->Gen.u16BaseLow = RT_LOWORD(pGCTSS);
302 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(pGCTSS);
303 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(pGCTSS);
304 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
305 pDesc->Gen.u4LimitHigh = 0;
306 pDesc->Gen.u4Type = X86_SELTYPE_SYS_386_TSS_AVAIL;
307 pDesc->Gen.u1DescType = 0; /* system */
308 pDesc->Gen.u2Dpl = 0; /* supervisor */
309 pDesc->Gen.u1Present = 1;
310 pDesc->Gen.u1Available = 0;
311 pDesc->Gen.u1Reserved = 0;
312 pDesc->Gen.u1DefBig = 0;
313 pDesc->Gen.u1Granularity = 0; /* byte limit */
314
315 /*
316 * TSS descriptor for trap 08
317 */
318 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3];
319 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
320 pDesc->Gen.u4LimitHigh = 0;
321 pGCTSS = VM_GUEST_ADDR(pVM, &pVM->selm.s.TssTrap08);
322 pDesc->Gen.u16BaseLow = RT_LOWORD(pGCTSS);
323 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(pGCTSS);
324 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(pGCTSS);
325 pDesc->Gen.u4Type = X86_SELTYPE_SYS_386_TSS_AVAIL;
326 pDesc->Gen.u1DescType = 0; /* system */
327 pDesc->Gen.u2Dpl = 0; /* supervisor */
328 pDesc->Gen.u1Present = 1;
329 pDesc->Gen.u1Available = 0;
330 pDesc->Gen.u1Reserved = 0;
331 pDesc->Gen.u1DefBig = 0;
332 pDesc->Gen.u1Granularity = 0; /* byte limit */
333}
334
335/**
336 * Applies relocations to data and code managed by this
337 * component. This function will be called at init and
338 * whenever the VMM need to relocate it self inside the GC.
339 *
340 * @param pVM The VM.
341 */
342SELMR3DECL(void) SELMR3Relocate(PVM pVM)
343{
344 PVBOXDESC paGdt = pVM->selm.s.paGdtHC;
345 LogFlow(("SELMR3Relocate\n"));
346
347 /*
348 * Update GDTR and selector.
349 */
350 CPUMSetHyperGDTR(pVM, MMHyperHC2GC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1);
351
352 /** @todo selector relocations should be a seperate operation? */
353 CPUMSetHyperCS(pVM, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]);
354 CPUMSetHyperDS(pVM, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
355 CPUMSetHyperES(pVM, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
356 CPUMSetHyperSS(pVM, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
357 CPUMSetHyperTR(pVM, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]);
358
359 selmR3SetupHyperGDTSelectors(pVM);
360
361/** @todo SELM must be called when any of the CR3s changes during a cpu mode change. */
362/** @todo PGM knows the proper CR3 values these days, not CPUM. */
363 /*
364 * Update the TSSes.
365 */
366 /* Current TSS */
367 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVM);
368 pVM->selm.s.Tss.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
369 pVM->selm.s.Tss.esp0 = VMMGetStackGC(pVM);
370 pVM->selm.s.Tss.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
371 pVM->selm.s.Tss.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
372 pVM->selm.s.Tss.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
373 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
374
375 /* trap 08 */
376 pVM->selm.s.TssTrap08.cr3 = PGMGetInterGCCR3(pVM); /* this should give use better survival chances. */
377 pVM->selm.s.TssTrap08.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
378 pVM->selm.s.TssTrap08.ss = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
379 pVM->selm.s.TssTrap08.esp0 = VMMGetStackGC(pVM) - PAGE_SIZE / 2; /* upper half can be analysed this way. */
380 pVM->selm.s.TssTrap08.esp = pVM->selm.s.TssTrap08.esp0;
381 pVM->selm.s.TssTrap08.ebp = pVM->selm.s.TssTrap08.esp0;
382 pVM->selm.s.TssTrap08.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
383 pVM->selm.s.TssTrap08.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
384 pVM->selm.s.TssTrap08.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
385 pVM->selm.s.TssTrap08.fs = 0;
386 pVM->selm.s.TssTrap08.gs = 0;
387 pVM->selm.s.TssTrap08.selLdt = 0;
388 pVM->selm.s.TssTrap08.eflags = 0x2; /* all cleared */
389 pVM->selm.s.TssTrap08.ecx = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */
390 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.ecx;
391 pVM->selm.s.TssTrap08.eax = pVM->selm.s.TssTrap08.ecx;
392 pVM->selm.s.TssTrap08.edx = VM_GUEST_ADDR(pVM, pVM); /* setup edx VM address. */
393 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.edx;
394 pVM->selm.s.TssTrap08.ebx = pVM->selm.s.TssTrap08.edx;
395 pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);
396 /* TRPM will be updating the eip */
397
398 if (!pVM->selm.s.fDisableMonitoring)
399 {
400 /*
401 * Update shadow GDT/LDT/TSS write access handlers.
402 */
403 int rc;
404#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
405 if (pVM->selm.s.paGdtGC != 0)
406 {
407 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtGC);
408 AssertRC(rc);
409 }
410 pVM->selm.s.paGdtGC = MMHyperHC2GC(pVM, paGdt);
411 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.paGdtGC,
412 pVM->selm.s.paGdtGC + SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1,
413 0, 0, "selmgcShadowGDTWriteHandler", 0, "Shadow GDT write access handler");
414 AssertRC(rc);
415#endif
416#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
417 if (pVM->selm.s.GCPtrTss != ~0U)
418 {
419 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrTss);
420 AssertRC(rc);
421 }
422 pVM->selm.s.GCPtrTss = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss);
423 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.GCPtrTss,
424 pVM->selm.s.GCPtrTss + sizeof(pVM->selm.s.Tss) - 1,
425 0, 0, "selmgcShadowTSSWriteHandler", 0, "Shadow TSS write access handler");
426 AssertRC(rc);
427#endif
428
429 /*
430 * Update the GC LDT region handler and address.
431 */
432#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
433 if (pVM->selm.s.GCPtrLdt != ~0U)
434 {
435 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrLdt);
436 AssertRC(rc);
437 }
438#endif
439 pVM->selm.s.GCPtrLdt = MMHyperHC2GC(pVM, pVM->selm.s.HCPtrLdt);
440#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
441 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.GCPtrLdt,
442 pVM->selm.s.GCPtrLdt + _64K + PAGE_SIZE - 1,
443 0, 0, "selmgcShadowLDTWriteHandler", 0, "Shadow LDT write access handler");
444 AssertRC(rc);
445#endif
446 }
447}
448
449
450/**
451 * Notification callback which is called whenever there is a chance that a CR3
452 * value might have changed.
453 * This is called by PGM.
454 *
455 * @param pVM The VM handle
456 */
457SELMR3DECL(void) SELMR3PagingModeChanged(PVM pVM)
458{
459 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVM);
460 pVM->selm.s.TssTrap08.cr3 = PGMGetInterGCCR3(pVM);
461}
462
463
464/**
465 * Terminates the SELM.
466 *
467 * Termination means cleaning up and freeing all resources,
468 * the VM it self is at this point powered off or suspended.
469 *
470 * @returns VBox status code.
471 * @param pVM The VM to operate on.
472 */
473SELMR3DECL(int) SELMR3Term(PVM pVM)
474{
475 return 0;
476}
477
478
479/**
480 * The VM is being reset.
481 *
482 * For the SELM component this means that any GDT/LDT/TSS monitors
483 * needs to be removed.
484 *
485 * @param pVM VM handle.
486 */
487SELMR3DECL(void) SELMR3Reset(PVM pVM)
488{
489 LogFlow(("SELMR3Reset:\n"));
490 VM_ASSERT_EMT(pVM);
491
492 /*
493 * Uninstall guest GDT/LDT/TSS write access handlers.
494 */
495 int rc;
496#ifdef SELM_TRACK_GUEST_GDT_CHANGES
497 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
498 {
499 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
500 AssertRC(rc);
501 pVM->selm.s.GuestGdtr.pGdt = ~0U;
502 pVM->selm.s.GuestGdtr.cbGdt = 0;
503 }
504 pVM->selm.s.fGDTRangeRegistered = false;
505#endif
506#ifdef SELM_TRACK_GUEST_LDT_CHANGES
507 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
508 {
509 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
510 AssertRC(rc);
511 pVM->selm.s.GCPtrGuestLdt = ~0U;
512 }
513#endif
514#ifdef SELM_TRACK_GUEST_TSS_CHANGES
515 if (pVM->selm.s.GCPtrGuestTss != ~0U)
516 {
517 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
518 AssertRC(rc);
519 pVM->selm.s.GCPtrGuestTss = ~0U;
520 pVM->selm.s.GCSelTss = ~0;
521 }
522#endif
523
524 /*
525 * Re-initialize other members.
526 */
527 pVM->selm.s.cbLdtLimit = 0;
528 pVM->selm.s.offLdtHyper = 0;
529 pVM->selm.s.cbMonitoredGuestTss = 0;
530
531 pVM->selm.s.fSyncTSSRing0Stack = false;
532
533 /*
534 * Default action when entering raw mode for the first time
535 */
536 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
537 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
538 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
539}
540
541/**
542 * Disable GDT/LDT/TSS monitoring and syncing
543 *
544 * @param pVM The VM to operate on.
545 */
546SELMR3DECL(void) SELMR3DisableMonitoring(PVM pVM)
547{
548 /*
549 * Uninstall guest GDT/LDT/TSS write access handlers.
550 */
551 int rc;
552#ifdef SELM_TRACK_GUEST_GDT_CHANGES
553 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
554 {
555 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
556 AssertRC(rc);
557 pVM->selm.s.GuestGdtr.pGdt = ~0U;
558 pVM->selm.s.GuestGdtr.cbGdt = 0;
559 }
560 pVM->selm.s.fGDTRangeRegistered = false;
561#endif
562#ifdef SELM_TRACK_GUEST_LDT_CHANGES
563 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
564 {
565 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
566 AssertRC(rc);
567 pVM->selm.s.GCPtrGuestLdt = ~0U;
568 }
569#endif
570#ifdef SELM_TRACK_GUEST_TSS_CHANGES
571 if (pVM->selm.s.GCPtrGuestTss != ~0U)
572 {
573 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
574 AssertRC(rc);
575 pVM->selm.s.GCPtrGuestTss = ~0U;
576 pVM->selm.s.GCSelTss = ~0;
577 }
578#endif
579
580 /*
581 * Unregister shadow GDT/LDT/TSS write access handlers.
582 */
583#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
584 if (pVM->selm.s.paGdtGC != 0)
585 {
586 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtGC);
587 AssertRC(rc);
588 pVM->selm.s.paGdtGC = 0;
589 }
590#endif
591#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
592 if (pVM->selm.s.GCPtrTss != ~0U)
593 {
594 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrTss);
595 AssertRC(rc);
596 pVM->selm.s.GCPtrTss = ~0U;
597 }
598#endif
599#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
600 if (pVM->selm.s.GCPtrLdt != ~0U)
601 {
602 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrLdt);
603 AssertRC(rc);
604 pVM->selm.s.GCPtrLdt = ~0U;
605 }
606#endif
607
608 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
609 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
610 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
611
612 pVM->selm.s.fDisableMonitoring = true;
613}
614
615/**
616 * Execute state save operation.
617 *
618 * @returns VBox status code.
619 * @param pVM VM Handle.
620 * @param pSSM SSM operation handle.
621 */
622static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM)
623{
624 LogFlow(("selmR3Save:\n"));
625
626 /*
627 * Save the basic bits - fortunately all the other things can be resynced on load.
628 */
629 PSELM pSelm = &pVM->selm.s;
630
631 SSMR3PutBool(pSSM, pSelm->fDisableMonitoring);
632 SSMR3PutBool(pSSM, pSelm->fSyncTSSRing0Stack);
633 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS]);
634 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_DS]);
635 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]);
636 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]); //reserved for DS64.
637 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_TSS]);
638 return SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]);
639}
640
641
642/**
643 * Execute state load operation.
644 *
645 * @returns VBox status code.
646 * @param pVM VM Handle.
647 * @param pSSM SSM operation handle.
648 * @param u32Version Data layout version.
649 */
650static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
651{
652 LogFlow(("selmR3Load:\n"));
653
654 /*
655 * Validate version.
656 */
657 if (u32Version != SELM_SAVED_STATE_VERSION)
658 {
659 Log(("selmR3Load: Invalid version u32Version=%d!\n", u32Version));
660 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
661 }
662
663 /*
664 * Do a reset.
665 */
666 SELMR3Reset(pVM);
667
668 /* Get the monitoring flag. */
669 SSMR3GetBool(pSSM, &pVM->selm.s.fDisableMonitoring);
670
671 /* Get the TSS state flag. */
672 SSMR3GetBool(pSSM, &pVM->selm.s.fSyncTSSRing0Stack);
673
674 /*
675 * Get the selectors.
676 */
677 RTSEL SelCS;
678 SSMR3GetSel(pSSM, &SelCS);
679 RTSEL SelDS;
680 SSMR3GetSel(pSSM, &SelDS);
681 RTSEL SelCS64;
682 SSMR3GetSel(pSSM, &SelCS64);
683 RTSEL SelDS64;
684 SSMR3GetSel(pSSM, &SelDS64);
685 RTSEL SelTSS;
686 SSMR3GetSel(pSSM, &SelTSS);
687 RTSEL SelTSSTrap08;
688 SSMR3GetSel(pSSM, &SelTSSTrap08);
689
690 /* Copy the selectors; they will be checked during relocation. */
691 PSELM pSelm = &pVM->selm.s;
692 pSelm->aHyperSel[SELM_HYPER_SEL_CS] = SelCS;
693 pSelm->aHyperSel[SELM_HYPER_SEL_DS] = SelDS;
694 pSelm->aHyperSel[SELM_HYPER_SEL_CS64] = SelCS64;
695 pSelm->aHyperSel[SELM_HYPER_SEL_TSS] = SelTSS;
696 pSelm->aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SelTSSTrap08;
697
698 return VINF_SUCCESS;
699}
700
701
702/**
703 * Sync the GDT, LDT and TSS after loading the state.
704 *
705 * Just to play save, we set the FFs to force syncing before
706 * executing GC code.
707 *
708 * @returns VBox status code.
709 * @param pVM VM Handle.
710 * @param pSSM SSM operation handle.
711 */
712static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
713{
714 LogFlow(("selmR3LoadDone:\n"));
715
716 /*
717 * Don't do anything if it's a load failure.
718 */
719 int rc = SSMR3HandleGetStatus(pSSM);
720 if (VBOX_FAILURE(rc))
721 return VINF_SUCCESS;
722
723 /*
724 * Do the syncing if we're in protected mode.
725 */
726 if (PGMGetGuestMode(pVM) != PGMMODE_REAL)
727 {
728 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
729 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
730 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
731 SELMR3UpdateFromCPUM(pVM);
732 }
733
734 /*
735 * Flag everything for resync on next raw mode entry.
736 */
737 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
738 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
739 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
740
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Updates the Guest GDT & LDT virtualization based on current CPU state.
747 *
748 * @returns VBox status code.
749 * @param pVM The VM to operate on.
750 */
751SELMR3DECL(int) SELMR3UpdateFromCPUM(PVM pVM)
752{
753 int rc = VINF_SUCCESS;
754
755 if (pVM->selm.s.fDisableMonitoring)
756 {
757 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
758 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
759 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
760
761 return VINF_SUCCESS;
762 }
763
764 STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a);
765
766 /*
767 * GDT sync
768 */
769 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_GDT))
770 {
771 /*
772 * Always assume the best
773 */
774 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
775
776 /* If the GDT was changed, then make sure the LDT is checked too */
777 /** @todo only do this if the actual ldtr selector was changed; this is a bit excessive */
778 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
779 /* Same goes for the TSS selector */
780 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
781
782 /*
783 * Get the GDTR and check if there is anything to do (there usually is).
784 */
785 VBOXGDTR GDTR;
786 CPUMGetGuestGDTR(pVM, &GDTR);
787 if (GDTR.cbGdt < sizeof(VBOXDESC))
788 {
789 Log(("No GDT entries...\n"));
790 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
791 return VINF_SUCCESS;
792 }
793
794 /*
795 * Read the Guest GDT.
796 * ASSUMES that the entire GDT is in memory.
797 */
798 RTUINT cbEffLimit = GDTR.cbGdt;
799 PVBOXDESC pGDTE = &pVM->selm.s.paGdtHC[1];
800 rc = PGMPhysReadGCPtr(pVM, pGDTE, GDTR.pGdt + sizeof(VBOXDESC), cbEffLimit + 1 - sizeof(VBOXDESC));
801 if (VBOX_FAILURE(rc))
802 {
803 /*
804 * Read it page by page.
805 *
806 * Keep track of the last valid page and delay memsets and
807 * adjust cbEffLimit to reflect the effective size. The latter
808 * is something we do in the belief that the guest will probably
809 * never actually commit the last page, thus allowing us to keep
810 * our selectors in the high end of the GDT.
811 */
812 RTUINT cbLeft = cbEffLimit + 1 - sizeof(VBOXDESC);
813 RTGCPTR GCPtrSrc = (RTGCPTR)GDTR.pGdt + sizeof(VBOXDESC);
814 uint8_t *pu8Dst = (uint8_t *)&pVM->selm.s.paGdtHC[1];
815 uint8_t *pu8DstInvalid = pu8Dst;
816
817 while (cbLeft)
818 {
819 RTUINT cb = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
820 cb = RT_MIN(cb, cbLeft);
821 rc = PGMPhysReadGCPtr(pVM, pu8Dst, GCPtrSrc, cb);
822 if (VBOX_SUCCESS(rc))
823 {
824 if (pu8DstInvalid != pu8Dst)
825 memset(pu8DstInvalid, 0, pu8Dst - pu8DstInvalid);
826 GCPtrSrc += cb;
827 pu8Dst += cb;
828 pu8DstInvalid = pu8Dst;
829 }
830 else if ( rc == VERR_PAGE_NOT_PRESENT
831 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
832 {
833 GCPtrSrc += cb;
834 pu8Dst += cb;
835 }
836 else
837 {
838 AssertReleaseMsgFailed(("Couldn't read GDT at %RX32, rc=%Vrc!\n", GDTR.pGdt, rc));
839 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
840 return VERR_NOT_IMPLEMENTED;
841 }
842 cbLeft -= cb;
843 }
844
845 /* any invalid pages at the end? */
846 if (pu8DstInvalid != pu8Dst)
847 {
848 cbEffLimit = pu8DstInvalid - (uint8_t *)pVM->selm.s.paGdtHC - 1;
849 /* If any GDTEs was invalidated, zero them. */
850 if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit)
851 memset(pu8DstInvalid + cbEffLimit + 1, 0, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit);
852 }
853
854 /* keep track of the effective limit. */
855 if (cbEffLimit != pVM->selm.s.cbEffGuestGdtLimit)
856 {
857 Log(("SELMR3UpdateFromCPUM: cbEffGuestGdtLimit=%#x -> %#x (actual %#x)\n",
858 pVM->selm.s.cbEffGuestGdtLimit, cbEffLimit, GDTR.cbGdt));
859 pVM->selm.s.cbEffGuestGdtLimit = cbEffLimit;
860 }
861 }
862
863 /*
864 * Check if the Guest GDT intrudes on our GDT entries.
865 */
866 /** @todo we should try to minimize relocations by making sure our current selectors can be reused. */
867 RTSEL aHyperSel[SELM_HYPER_SEL_MAX];
868 if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
869 {
870 PVBOXDESC pGDTEStart = pVM->selm.s.paGdtHC;
871 PVBOXDESC pGDTE = (PVBOXDESC)((char *)pGDTEStart + GDTR.cbGdt + 1 - sizeof(VBOXDESC));
872 int iGDT = 0;
873
874 Log(("Internal SELM GDT conflict: use non-present entries\n"));
875 STAM_COUNTER_INC(&pVM->selm.s.StatScanForHyperSels);
876 while (pGDTE > pGDTEStart)
877 {
878 /* We can reuse non-present entries */
879 if (!pGDTE->Gen.u1Present)
880 {
881 aHyperSel[iGDT] = ((uintptr_t)pGDTE - (uintptr_t)pVM->selm.s.paGdtHC) / sizeof(VBOXDESC);
882 aHyperSel[iGDT] = aHyperSel[iGDT] << X86_SEL_SHIFT;
883 Log(("SELM: Found unused GDT %04X\n", aHyperSel[iGDT]));
884 iGDT++;
885 if (iGDT >= SELM_HYPER_SEL_MAX)
886 break;
887 }
888
889 pGDTE--;
890 }
891 if (iGDT != SELM_HYPER_SEL_MAX)
892 {
893 AssertReleaseMsgFailed(("Internal SELM GDT conflict.\n"));
894 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
895 return VERR_NOT_IMPLEMENTED;
896 }
897 }
898 else
899 {
900 aHyperSel[SELM_HYPER_SEL_CS] = SELM_HYPER_DEFAULT_SEL_CS;
901 aHyperSel[SELM_HYPER_SEL_DS] = SELM_HYPER_DEFAULT_SEL_DS;
902 aHyperSel[SELM_HYPER_SEL_CS64] = SELM_HYPER_DEFAULT_SEL_CS64;
903 aHyperSel[SELM_HYPER_SEL_TSS] = SELM_HYPER_DEFAULT_SEL_TSS;
904 aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SELM_HYPER_DEFAULT_SEL_TSS_TRAP08;
905 }
906
907 /*
908 * Work thru the copied GDT entries adjusting them for correct virtualization.
909 */
910 PVBOXDESC pGDTEEnd = (PVBOXDESC)((char *)pGDTE + cbEffLimit + 1 - sizeof(VBOXDESC));
911 while (pGDTE < pGDTEEnd)
912 {
913 if (pGDTE->Gen.u1Present)
914 {
915 /*
916 * Code and data selectors are generally 1:1, with the
917 * 'little' adjustment we do for DPL 0 selectors.
918 */
919 if (pGDTE->Gen.u1DescType)
920 {
921 /*
922 * Hack for A-bit against Trap E on read-only GDT.
923 */
924 /** @todo Fix this by loading ds and cs before turning off WP. */
925 pGDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
926
927 /*
928 * All DPL 0 code and data segments are squeezed into DPL 1.
929 *
930 * We're skipping conforming segments here because those
931 * cannot give us any trouble.
932 */
933 if ( pGDTE->Gen.u2Dpl == 0
934 && (pGDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
935 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
936 pGDTE->Gen.u2Dpl = 1;
937 }
938 else
939 {
940 /*
941 * System type selectors are marked not present.
942 * Recompiler or special handling is required for these.
943 */
944 /** @todo what about interrupt gates and rawr0? */
945 pGDTE->Gen.u1Present = 0;
946 }
947 }
948
949 /* Next GDT entry. */
950 pGDTE++;
951 }
952
953 /*
954 * Check if our hypervisor selectors were changed.
955 */
956 if ( aHyperSel[SELM_HYPER_SEL_CS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]
957 || aHyperSel[SELM_HYPER_SEL_DS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]
958 || aHyperSel[SELM_HYPER_SEL_CS64] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64]
959 || aHyperSel[SELM_HYPER_SEL_TSS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]
960 || aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08])
961 {
962 /* Reinitialize our hypervisor GDTs */
963 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] = aHyperSel[SELM_HYPER_SEL_CS];
964 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] = aHyperSel[SELM_HYPER_SEL_DS];
965 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] = aHyperSel[SELM_HYPER_SEL_CS64];
966 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = aHyperSel[SELM_HYPER_SEL_TSS];
967 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
968
969 STAM_COUNTER_INC(&pVM->selm.s.StatHyperSelsChanged);
970
971 /*
972 * Do the relocation callbacks to let everyone update their hyper selector dependencies.
973 * (SELMR3Relocate will call selmR3SetupHyperGDTSelectors() for us.)
974 */
975 VMR3Relocate(pVM, 0);
976 }
977 else if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
978 /* We overwrote all entries above, so we have to save them again. */
979 selmR3SetupHyperGDTSelectors(pVM);
980
981 /*
982 * Adjust the cached GDT limit.
983 * Any GDT entries which have been removed must be cleared.
984 */
985 if (pVM->selm.s.GuestGdtr.cbGdt != GDTR.cbGdt)
986 {
987 if (pVM->selm.s.GuestGdtr.cbGdt > GDTR.cbGdt)
988 memset(pGDTE, 0, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt);
989#ifndef SELM_TRACK_GUEST_GDT_CHANGES
990 pVM->selm.s.GuestGdtr.cbGdt = GDTR.cbGdt;
991#endif
992 }
993
994#ifdef SELM_TRACK_GUEST_GDT_CHANGES
995 /*
996 * Check if Guest's GDTR is changed.
997 */
998 if ( GDTR.pGdt != pVM->selm.s.GuestGdtr.pGdt
999 || GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
1000 {
1001 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%08X cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt));
1002
1003 /*
1004 * [Re]Register write virtual handler for guest's GDT.
1005 */
1006 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
1007 {
1008 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
1009 AssertRC(rc);
1010 }
1011
1012 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,
1013 0, selmGuestGDTWriteHandler, "selmgcGuestGDTWriteHandler", 0, "Guest GDT write access handler");
1014 if (VBOX_FAILURE(rc))
1015 return rc;
1016
1017 /* Update saved Guest GDTR. */
1018 pVM->selm.s.GuestGdtr = GDTR;
1019 pVM->selm.s.fGDTRangeRegistered = true;
1020 }
1021#endif
1022 }
1023
1024 /*
1025 * TSS sync
1026 */
1027 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS))
1028 {
1029 SELMR3SyncTSS(pVM);
1030 }
1031
1032 /*
1033 * LDT sync
1034 */
1035 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_LDT))
1036 {
1037 /*
1038 * Always assume the best
1039 */
1040 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
1041
1042 /*
1043 * LDT handling is done similarly to the GDT handling with a shadow
1044 * array. However, since the LDT is expected to be swappable (at least
1045 * some ancient OSes makes it swappable) it must be floating and
1046 * synced on a per-page basis.
1047 *
1048 * Eventually we will change this to be fully on demand. Meaning that
1049 * we will only sync pages containing LDT selectors actually used and
1050 * let the #PF handler lazily sync pages as they are used.
1051 * (This applies to GDT too, when we start making OS/2 fast.)
1052 */
1053
1054 /*
1055 * First, determin the current LDT selector.
1056 */
1057 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1058 if ((SelLdt & X86_SEL_MASK) == 0)
1059 {
1060 /* ldtr = 0 - update hyper LDTR and deregister any active handler. */
1061 CPUMSetHyperLDTR(pVM, 0);
1062#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1063 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1064 {
1065 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1066 AssertRC(rc);
1067 pVM->selm.s.GCPtrGuestLdt = ~0U;
1068 }
1069#endif
1070 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1071 return VINF_SUCCESS;
1072 }
1073
1074 /*
1075 * Get the LDT selector.
1076 */
1077 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelLdt >> X86_SEL_SHIFT];
1078 RTGCPTR GCPtrLdt = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1079 unsigned cbLdt = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1080 if (pDesc->Gen.u1Granularity)
1081 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1082
1083 /*
1084 * Validate it.
1085 */
1086 if ( !cbLdt
1087 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt
1088 || pDesc->Gen.u1DescType
1089 || pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1090 {
1091 AssertMsg(!cbLdt, ("Invalid LDT %04x!\n", SelLdt));
1092
1093 /* cbLdt > 0:
1094 * This is quite impossible, so we do as most people do when faced with
1095 * the impossible, we simply ignore it.
1096 */
1097 CPUMSetHyperLDTR(pVM, 0);
1098#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1099 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1100 {
1101 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1102 AssertRC(rc);
1103 pVM->selm.s.GCPtrGuestLdt = ~0U;
1104 }
1105#endif
1106 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1107 return VINF_SUCCESS;
1108 }
1109 /** @todo check what intel does about odd limits. */
1110 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(VBOXDESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1111
1112 /*
1113 * Use the cached guest ldt address if the descriptor has already been modified (see below)
1114 * (this is necessary due to redundant LDT updates; see todo above at GDT sync)
1115 */
1116 if (MMHyperIsInsideArea(pVM, GCPtrLdt) == true)
1117 GCPtrLdt = pVM->selm.s.GCPtrGuestLdt; /* use the old one */
1118
1119
1120#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1121 /** @todo Handle only present LDT segments. */
1122 // if (pDesc->Gen.u1Present)
1123 {
1124 /*
1125 * Check if Guest's LDT address/limit is changed.
1126 */
1127 if ( GCPtrLdt != pVM->selm.s.GCPtrGuestLdt
1128 || cbLdt != pVM->selm.s.cbLdtLimit)
1129 {
1130 Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %VGv:%04x to %VGv:%04x. (GDTR=%VGv:%04x)\n",
1131 pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1132
1133 /*
1134 * [Re]Register write virtual handler for guest's GDT.
1135 * In the event of LDT overlapping something, don't install it just assume it's being updated.
1136 */
1137 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1138 {
1139 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1140 AssertRC(rc);
1141 }
1142#ifdef DEBUG
1143 if (pDesc->Gen.u1Present)
1144 Log(("LDT selector marked not present!!\n"));
1145#endif
1146 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */,
1147 0, selmGuestLDTWriteHandler, "selmgcGuestLDTWriteHandler", 0, "Guest LDT write access handler");
1148 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
1149 {
1150 /** @todo investigate the various cases where conflicts happen and try avoid them by enh. the instruction emulation. */
1151 pVM->selm.s.GCPtrGuestLdt = ~0;
1152 Log(("WARNING: Guest LDT (%VGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=%VGv:%04x)\n",
1153 GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1154 }
1155 else if (VBOX_SUCCESS(rc))
1156 pVM->selm.s.GCPtrGuestLdt = GCPtrLdt;
1157 else
1158 {
1159 CPUMSetHyperLDTR(pVM, 0);
1160 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1161 return rc;
1162 }
1163
1164 pVM->selm.s.cbLdtLimit = cbLdt;
1165 }
1166 }
1167#else
1168 pVM->selm.s.cbLdtLimit = cbLdt;
1169#endif
1170
1171 /*
1172 * Calc Shadow LDT base.
1173 */
1174 unsigned off;
1175 pVM->selm.s.offLdtHyper = off = (GCPtrLdt & PAGE_OFFSET_MASK);
1176 RTGCPTR GCPtrShadowLDT = (RTGCPTR)((RTGCUINTPTR)pVM->selm.s.GCPtrLdt + off);
1177 PVBOXDESC pShadowLDT = (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1178
1179 /*
1180 * Enable the LDT selector in the shadow GDT.
1181 */
1182 pDesc->Gen.u1Present = 1;
1183 pDesc->Gen.u16BaseLow = RT_LOWORD(GCPtrShadowLDT);
1184 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(GCPtrShadowLDT);
1185 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(GCPtrShadowLDT);
1186 pDesc->Gen.u1Available = 0;
1187 pDesc->Gen.u1Reserved = 0;
1188 if (cbLdt > 0xffff)
1189 {
1190 cbLdt = 0xffff;
1191 pDesc->Gen.u4LimitHigh = 0;
1192 pDesc->Gen.u16LimitLow = pDesc->Gen.u1Granularity ? 0xf : 0xffff;
1193 }
1194
1195 /*
1196 * Set Hyper LDTR and notify TRPM.
1197 */
1198 CPUMSetHyperLDTR(pVM, SelLdt);
1199
1200 /*
1201 * Loop synchronising the LDT page by page.
1202 */
1203 /** @todo investigate how intel handle various operations on half present cross page entries. */
1204 off = GCPtrLdt & (sizeof(VBOXDESC) - 1);
1205 AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt));
1206
1207 /* Note: Do not skip the first selector; unlike the GDT, a zero LDT selector is perfectly valid. */
1208 unsigned cbLeft = cbLdt + 1;
1209 PVBOXDESC pLDTE = pShadowLDT;
1210 while (cbLeft)
1211 {
1212 /*
1213 * Read a chunk.
1214 */
1215 unsigned cbChunk = PAGE_SIZE - ((RTGCUINTPTR)GCPtrLdt & PAGE_OFFSET_MASK);
1216 if (cbChunk > cbLeft)
1217 cbChunk = cbLeft;
1218 rc = PGMPhysReadGCPtr(pVM, pShadowLDT, GCPtrLdt, cbChunk);
1219 if (VBOX_SUCCESS(rc))
1220 {
1221 /*
1222 * Mark page
1223 */
1224 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D);
1225 AssertRC(rc);
1226
1227 /*
1228 * Loop thru the available LDT entries.
1229 * Figure out where to start and end and the potential cross pageness of
1230 * things adds a little complexity. pLDTE is updated there and not in the
1231 * 'next' part of the loop. The pLDTEEnd is inclusive.
1232 */
1233 PVBOXDESC pLDTEEnd = (PVBOXDESC)((uintptr_t)pShadowLDT + cbChunk) - 1;
1234 if (pLDTE + 1 < pShadowLDT)
1235 pLDTE = (PVBOXDESC)((uintptr_t)pShadowLDT + off);
1236 while (pLDTE <= pLDTEEnd)
1237 {
1238 if (pLDTE->Gen.u1Present)
1239 {
1240 /*
1241 * Code and data selectors are generally 1:1, with the
1242 * 'little' adjustment we do for DPL 0 selectors.
1243 */
1244 if (pLDTE->Gen.u1DescType)
1245 {
1246 /*
1247 * Hack for A-bit against Trap E on read-only GDT.
1248 */
1249 /** @todo Fix this by loading ds and cs before turning off WP. */
1250 if (!(pLDTE->Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1251 pLDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1252
1253 /*
1254 * All DPL 0 code and data segments are squeezed into DPL 1.
1255 *
1256 * We're skipping conforming segments here because those
1257 * cannot give us any trouble.
1258 */
1259 if ( pLDTE->Gen.u2Dpl == 0
1260 && (pLDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1261 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
1262 pLDTE->Gen.u2Dpl = 1;
1263 }
1264 else
1265 {
1266 /*
1267 * System type selectors are marked not present.
1268 * Recompiler or special handling is required for these.
1269 */
1270 /** @todo what about interrupt gates and rawr0? */
1271 pLDTE->Gen.u1Present = 0;
1272 }
1273 }
1274
1275 /* Next LDT entry. */
1276 pLDTE++;
1277 }
1278 }
1279 else
1280 {
1281 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%d\n", rc));
1282 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0);
1283 AssertRC(rc);
1284 }
1285
1286 /*
1287 * Advance to the next page.
1288 */
1289 cbLeft -= cbChunk;
1290 GCPtrShadowLDT += cbChunk;
1291 pShadowLDT = (PVBOXDESC)((char *)pShadowLDT + cbChunk);
1292 GCPtrLdt += cbChunk;
1293 }
1294 }
1295
1296 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1297 return VINF_SUCCESS;
1298}
1299
1300
1301/**
1302 * \#PF Handler callback for virtual access handler ranges.
1303 *
1304 * Important to realize that a physical page in a range can have aliases, and
1305 * for ALL and WRITE handlers these will also trigger.
1306 *
1307 * @returns VINF_SUCCESS if the handler have carried out the operation.
1308 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1309 * @param pVM VM Handle.
1310 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1311 * @param pvPtr The HC mapping of that address.
1312 * @param pvBuf What the guest is reading/writing.
1313 * @param cbBuf How much it's reading/writing.
1314 * @param enmAccessType The access type.
1315 * @param pvUser User argument.
1316 */
1317static DECLCALLBACK(int) selmGuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1318{
1319 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1320 Log(("selmGuestGDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1321 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
1322
1323 return VINF_PGM_HANDLER_DO_DEFAULT;
1324}
1325
1326/**
1327 * \#PF Handler callback for virtual access handler ranges.
1328 *
1329 * Important to realize that a physical page in a range can have aliases, and
1330 * for ALL and WRITE handlers these will also trigger.
1331 *
1332 * @returns VINF_SUCCESS if the handler have carried out the operation.
1333 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1334 * @param pVM VM Handle.
1335 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1336 * @param pvPtr The HC mapping of that address.
1337 * @param pvBuf What the guest is reading/writing.
1338 * @param cbBuf How much it's reading/writing.
1339 * @param enmAccessType The access type.
1340 * @param pvUser User argument.
1341 */
1342static DECLCALLBACK(int) selmGuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1343{
1344 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1345 Log(("selmGuestLDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1346 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
1347 return VINF_PGM_HANDLER_DO_DEFAULT;
1348}
1349
1350/**
1351 * \#PF Handler callback for virtual access handler ranges.
1352 *
1353 * Important to realize that a physical page in a range can have aliases, and
1354 * for ALL and WRITE handlers these will also trigger.
1355 *
1356 * @returns VINF_SUCCESS if the handler have carried out the operation.
1357 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1358 * @param pVM VM Handle.
1359 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1360 * @param pvPtr The HC mapping of that address.
1361 * @param pvBuf What the guest is reading/writing.
1362 * @param cbBuf How much it's reading/writing.
1363 * @param enmAccessType The access type.
1364 * @param pvUser User argument.
1365 */
1366static DECLCALLBACK(int) selmGuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1367{
1368 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1369 Log(("selmGuestTSSWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1370 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1371 return VINF_PGM_HANDLER_DO_DEFAULT;
1372}
1373
1374/**
1375 * Check if the TSS ring 0 stack selector and pointer were updated (for now)
1376 *
1377 * @returns VBox status code.
1378 * @param pVM The VM to operate on.
1379 */
1380SELMR3DECL(int) SELMR3SyncTSS(PVM pVM)
1381{
1382 int rc;
1383
1384 if (pVM->selm.s.fDisableMonitoring)
1385 {
1386 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
1387 return VINF_SUCCESS;
1388 }
1389
1390/** @todo r=bird: SELMR3SyncTSS should be VMMAll code.
1391 * All the base, size, flags and stuff must be kept up to date in the CPUM tr register.
1392 */
1393 STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a);
1394
1395 Assert(!VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_GDT));
1396 Assert(VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS));
1397
1398 /*
1399 * TSS sync
1400 */
1401 RTSEL SelTss = CPUMGetGuestTR(pVM);
1402 if (SelTss & X86_SEL_MASK)
1403 {
1404 /** @todo r=bird: strictly speaking, this is wrong as we shouldn't bother with changes to
1405 * the TSS selector once its loaded. There are a bunch of this kind of problems (see Sander's
1406 * comment in the unzip defect)
1407 * The first part here should only be done when we're loading TR. The latter part which is
1408 * updating of the ss0:esp0 pair can be done by the access handler now since we can trap all
1409 * accesses, also REM ones. */
1410
1411 /*
1412 * Guest TR is not NULL.
1413 */
1414 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelTss >> X86_SEL_SHIFT];
1415 RTGCPTR GCPtrTss = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1416 unsigned cbTss = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1417 if (pDesc->Gen.u1Granularity)
1418 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1419 cbTss++;
1420 pVM->selm.s.cbGuestTss = cbTss;
1421 pVM->selm.s.fGuestTss32Bit = pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1422 || pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1423
1424 /* Don't bother with anything but the core structure. (Actually all we care for is the r0 ss.) */
1425 if (cbTss > sizeof(VBOXTSS))
1426 cbTss = sizeof(VBOXTSS);
1427 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbTss - 1) >> PAGE_SHIFT),
1428 ("GCPtrTss=%VGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss));
1429
1430 // All system GDTs are marked not present above. That explains why this check fails.
1431 //if (pDesc->Gen.u1Present)
1432 /** @todo Handle only present TSS segments. */
1433 {
1434 /*
1435 * Check if Guest's TSS is changed.
1436 */
1437 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss
1438 || cbTss != pVM->selm.s.cbMonitoredGuestTss)
1439 {
1440 Log(("SELMR3UpdateFromCPUM: Guest's TSS is changed to pTss=%08X cbTss=%08X cbGuestTss\n", GCPtrTss, cbTss, pVM->selm.s.cbGuestTss));
1441
1442 /*
1443 * Validate it.
1444 */
1445 if ( SelTss & X86_SEL_LDT
1446 || !cbTss
1447 || SelTss >= pVM->selm.s.GuestGdtr.cbGdt
1448 || pDesc->Gen.u1DescType
1449 || ( pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
1450 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
1451 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL
1452 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) )
1453 {
1454 AssertMsgFailed(("Invalid Guest TSS %04x!\n", SelTss));
1455 }
1456 else
1457 {
1458 /*
1459 * [Re]Register write virtual handler for guest's TSS.
1460 */
1461 if (pVM->selm.s.GCPtrGuestTss != ~0U)
1462 {
1463 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
1464 AssertRC(rc);
1465 }
1466
1467 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbTss - 1,
1468 0, selmGuestTSSWriteHandler, "selmgcGuestTSSWriteHandler", 0, "Guest TSS write access handler");
1469 if (VBOX_FAILURE(rc))
1470 {
1471 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1472 return rc;
1473 }
1474
1475 /* Update saved Guest TSS info. */
1476 pVM->selm.s.GCPtrGuestTss = GCPtrTss;
1477 pVM->selm.s.cbMonitoredGuestTss = cbTss;
1478 pVM->selm.s.GCSelTss = SelTss;
1479 }
1480 }
1481
1482 /* Update the ring 0 stack selector and base address */
1483 /* feeling very lazy; reading too much */
1484 VBOXTSS tss;
1485 rc = PGMPhysReadGCPtr(pVM, &tss, GCPtrTss, sizeof(VBOXTSS));
1486 if (VBOX_SUCCESS(rc))
1487 {
1488 #ifdef DEBUG
1489 uint32_t ssr0, espr0;
1490
1491 SELMGetRing1Stack(pVM, &ssr0, &espr0);
1492 ssr0 &= ~1;
1493
1494 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
1495 Log(("SELMR3SyncTSS: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
1496 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
1497 #endif
1498 /* Update our TSS structure for the guest's ring 1 stack */
1499 SELMSetRing1Stack(pVM, tss.ss0 | 1, tss.esp0);
1500 }
1501 else
1502 {
1503 /* Note: the ring 0 stack selector and base address are updated on demand in this case. */
1504
1505 /* Note: handle these dependencies better! */
1506 TRPMR3SetGuestTrapHandler(pVM, 0x2E, TRPM_INVALID_HANDLER);
1507 TRPMR3SetGuestTrapHandler(pVM, 0x80, TRPM_INVALID_HANDLER);
1508 pVM->selm.s.fSyncTSSRing0Stack = true;
1509 }
1510 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
1511 }
1512 }
1513
1514 STAM_PROFILE_STOP(&pVM->selm.s.StatTSSSync, a);
1515 return VINF_SUCCESS;
1516}
1517
1518
1519/**
1520 * Compares the Guest GDT and LDT with the shadow tables.
1521 * This is a VBOX_STRICT only function.
1522 *
1523 * @returns VBox status code.
1524 * @param pVM The VM Handle.
1525 */
1526SELMR3DECL(int) SELMR3DebugCheck(PVM pVM)
1527{
1528#ifdef VBOX_STRICT
1529 /*
1530 * Get GDTR and check for conflict.
1531 */
1532 VBOXGDTR GDTR;
1533 CPUMGetGuestGDTR(pVM, &GDTR);
1534 if (GDTR.cbGdt == 0)
1535 return VINF_SUCCESS;
1536
1537 if (GDTR.cbGdt >= (unsigned)(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> X86_SEL_SHIFT))
1538 Log(("SELMR3DebugCheck: guest GDT size forced us to look for unused selectors.\n"));
1539
1540 if (GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
1541 Log(("SELMR3DebugCheck: limits have changed! new=%d old=%d\n", GDTR.cbGdt, pVM->selm.s.GuestGdtr.cbGdt));
1542
1543 /*
1544 * Loop thru the GDT checking each entry.
1545 */
1546 RTGCPTR GCPtrGDTEGuest = GDTR.pGdt;
1547 PVBOXDESC pGDTE = pVM->selm.s.paGdtHC;
1548 PVBOXDESC pGDTEEnd = (PVBOXDESC)((uintptr_t)pGDTE + GDTR.cbGdt);
1549 while (pGDTE < pGDTEEnd)
1550 {
1551 VBOXDESC GDTEGuest;
1552 int rc = PGMPhysReadGCPtr(pVM, &GDTEGuest, GCPtrGDTEGuest, sizeof(GDTEGuest));
1553 if (VBOX_SUCCESS(rc))
1554 {
1555 if (pGDTE->Gen.u1DescType || pGDTE->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1556 {
1557 if ( pGDTE->Gen.u16LimitLow != GDTEGuest.Gen.u16LimitLow
1558 || pGDTE->Gen.u4LimitHigh != GDTEGuest.Gen.u4LimitHigh
1559 || pGDTE->Gen.u16BaseLow != GDTEGuest.Gen.u16BaseLow
1560 || pGDTE->Gen.u8BaseHigh1 != GDTEGuest.Gen.u8BaseHigh1
1561 || pGDTE->Gen.u8BaseHigh2 != GDTEGuest.Gen.u8BaseHigh2
1562 || pGDTE->Gen.u1DefBig != GDTEGuest.Gen.u1DefBig
1563 || pGDTE->Gen.u1DescType != GDTEGuest.Gen.u1DescType)
1564 {
1565 unsigned iGDT = pGDTE - pVM->selm.s.paGdtHC;
1566 SELMR3DumpDescriptor(*pGDTE, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, shadow");
1567 SELMR3DumpDescriptor(GDTEGuest, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, guest");
1568 }
1569 }
1570 }
1571
1572 /* Advance to the next descriptor. */
1573 GCPtrGDTEGuest += sizeof(VBOXDESC);
1574 pGDTE++;
1575 }
1576
1577
1578 /*
1579 * LDT?
1580 */
1581 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1582 if ((SelLdt & X86_SEL_MASK) == 0)
1583 return VINF_SUCCESS;
1584 if (SelLdt > GDTR.cbGdt)
1585 {
1586 Log(("SELMR3DebugCheck: ldt is out of bound SelLdt=%#x\n", SelLdt));
1587 return VERR_INTERNAL_ERROR;
1588 }
1589 VBOXDESC LDTDesc;
1590 int rc = PGMPhysReadGCPtr(pVM, &LDTDesc, GDTR.pGdt + (SelLdt & X86_SEL_MASK), sizeof(LDTDesc));
1591 if (VBOX_FAILURE(rc))
1592 {
1593 Log(("SELMR3DebugCheck: Failed to read LDT descriptor. rc=%d\n", rc));
1594 return rc;
1595 }
1596 RTGCPTR GCPtrLDTEGuest = LDTDesc.Gen.u16BaseLow | (LDTDesc.Gen.u8BaseHigh1 << 16) | (LDTDesc.Gen.u8BaseHigh2 << 24);
1597 unsigned cbLdt = LDTDesc.Gen.u16LimitLow | (LDTDesc.Gen.u4LimitHigh << 16);
1598 if (LDTDesc.Gen.u1Granularity)
1599 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1600
1601 /*
1602 * Validate it.
1603 */
1604 if (!cbLdt)
1605 return VINF_SUCCESS;
1606 /** @todo check what intel does about odd limits. */
1607 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(VBOXDESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1608 if ( LDTDesc.Gen.u1DescType
1609 || LDTDesc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT
1610 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt)
1611 {
1612 Log(("SELmR3DebugCheck: Invalid LDT %04x!\n", SelLdt));
1613 return VERR_INTERNAL_ERROR;
1614 }
1615
1616 /*
1617 * Loop thru the LDT checking each entry.
1618 */
1619 unsigned off = (GCPtrLDTEGuest & PAGE_OFFSET_MASK);
1620 PVBOXDESC pLDTE = (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1621 PVBOXDESC pLDTEEnd = (PVBOXDESC)((uintptr_t)pGDTE + cbLdt);
1622 while (pLDTE < pLDTEEnd)
1623 {
1624 VBOXDESC LDTEGuest;
1625 int rc = PGMPhysReadGCPtr(pVM, &LDTEGuest, GCPtrLDTEGuest, sizeof(LDTEGuest));
1626 if (VBOX_SUCCESS(rc))
1627 {
1628 if ( pLDTE->Gen.u16LimitLow != LDTEGuest.Gen.u16LimitLow
1629 || pLDTE->Gen.u4LimitHigh != LDTEGuest.Gen.u4LimitHigh
1630 || pLDTE->Gen.u16BaseLow != LDTEGuest.Gen.u16BaseLow
1631 || pLDTE->Gen.u8BaseHigh1 != LDTEGuest.Gen.u8BaseHigh1
1632 || pLDTE->Gen.u8BaseHigh2 != LDTEGuest.Gen.u8BaseHigh2
1633 || pLDTE->Gen.u1DefBig != LDTEGuest.Gen.u1DefBig
1634 || pLDTE->Gen.u1DescType != LDTEGuest.Gen.u1DescType)
1635 {
1636 unsigned iLDT = pLDTE - (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1637 SELMR3DumpDescriptor(*pLDTE, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, shadow");
1638 SELMR3DumpDescriptor(LDTEGuest, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, guest");
1639 }
1640 }
1641
1642 /* Advance to the next descriptor. */
1643 GCPtrLDTEGuest += sizeof(VBOXDESC);
1644 pLDTE++;
1645 }
1646
1647#else
1648 NOREF(pVM);
1649#endif
1650
1651 return VINF_SUCCESS;
1652}
1653
1654
1655/**
1656 * Validates the RawR0 TSS values against the one in the Guest TSS.
1657 *
1658 * @returns true if it matches.
1659 * @returns false and assertions on mismatch..
1660 * @param pVM VM Handle.
1661 */
1662SELMR3DECL(bool) SELMR3CheckTSS(PVM pVM)
1663{
1664#ifdef VBOX_STRICT
1665
1666 RTSEL SelTss = CPUMGetGuestTR(pVM);
1667 if (SelTss & X86_SEL_MASK)
1668 {
1669 AssertMsg((SelTss & X86_SEL_MASK) == (pVM->selm.s.GCSelTss & X86_SEL_MASK), ("New TSS selector = %04X, old TSS selector = %04X\n", SelTss, pVM->selm.s.GCSelTss));
1670
1671 /*
1672 * Guest TR is not NULL.
1673 */
1674 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelTss >> X86_SEL_SHIFT];
1675 RTGCPTR GCPtrTss = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1676 unsigned cbTss = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1677 if (pDesc->Gen.u1Granularity)
1678 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1679 cbTss++;
1680 /* Don't bother with anything but the core structure. (Actually all we care for is the r0 ss.) */
1681 if (cbTss > sizeof(VBOXTSS))
1682 cbTss = sizeof(VBOXTSS);
1683 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbTss - 1) >> PAGE_SHIFT),
1684 ("GCPtrTss=%VGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss));
1685
1686 // All system GDTs are marked not present above. That explains why this check fails.
1687 //if (pDesc->Gen.u1Present)
1688 /** @todo Handle only present TSS segments. */
1689 {
1690 /*
1691 * Check if Guest's TSS was changed.
1692 */
1693 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss
1694 || cbTss != pVM->selm.s.cbMonitoredGuestTss)
1695 {
1696 AssertMsgFailed(("Guest's TSS (Sel 0x%X) is changed from %RGv:%04x to %RGv:%04x\n",
1697 SelTss, pVM->selm.s.GCPtrGuestTss, pVM->selm.s.cbMonitoredGuestTss,
1698 GCPtrTss, cbTss));
1699 }
1700 }
1701 }
1702
1703 if (!pVM->selm.s.fSyncTSSRing0Stack)
1704 {
1705 RTGCPTR pGuestTSS = pVM->selm.s.GCPtrGuestTss;
1706 uint32_t ESPR0;
1707 int rc = PGMPhysReadGCPtr(pVM, &ESPR0, pGuestTSS + RT_OFFSETOF(VBOXTSS, esp0), sizeof(ESPR0));
1708 if (VBOX_SUCCESS(rc))
1709 {
1710 RTSEL SelSS0;
1711 rc = PGMPhysReadGCPtr(pVM, &SelSS0, pGuestTSS + RT_OFFSETOF(VBOXTSS, ss0), sizeof(SelSS0));
1712 if (VBOX_SUCCESS(rc))
1713 {
1714 if ( ESPR0 == pVM->selm.s.Tss.esp1
1715 && SelSS0 == (pVM->selm.s.Tss.ss1 & ~1))
1716 return true;
1717
1718 RTGCPHYS GCPhys;
1719 uint64_t fFlags;
1720
1721 rc = PGMGstGetPage(pVM, pGuestTSS, &fFlags, &GCPhys);
1722 AssertRC(rc);
1723 AssertMsgFailed(("TSS out of sync!! (%04X:%08X vs %04X:%08X (guest)) Tss=%VGv Phys=%VGp\n",
1724 (pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1, SelSS0, ESPR0, pGuestTSS, GCPhys));
1725 }
1726 else
1727 AssertRC(rc);
1728 }
1729 else
1730 /* Happens during early Windows XP boot when it is switching page tables. */
1731 Assert(rc == VINF_SUCCESS || ((rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT) && !(CPUMGetGuestEFlags(pVM) & X86_EFL_IF)));
1732 }
1733 return false;
1734#else
1735 NOREF(pVM);
1736 return true;
1737#endif
1738}
1739
1740
1741/**
1742 * Returns flat address and limit of LDT by LDT selector from guest GDTR.
1743 *
1744 * Fully validate selector.
1745 *
1746 * @returns VBox status.
1747 * @param pVM VM Handle.
1748 * @param SelLdt LDT selector.
1749 * @param ppvLdt Where to store the flat address of LDT.
1750 * @param pcbLimit Where to store LDT limit.
1751 */
1752SELMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit)
1753{
1754 /* Get guest GDTR. */
1755 VBOXGDTR GDTR;
1756 CPUMGetGuestGDTR(pVM, &GDTR);
1757
1758 /* Check selector TI and GDT limit. */
1759 if ( SelLdt & X86_SEL_LDT
1760 || (SelLdt > GDTR.cbGdt))
1761 return VERR_INVALID_SELECTOR;
1762
1763 /* Read descriptor from GC. */
1764 VBOXDESC Desc;
1765 int rc = PGMPhysReadGCPtr(pVM, (void *)&Desc, (RTGCPTR)(GDTR.pGdt + (SelLdt & X86_SEL_MASK)), sizeof(Desc));
1766 if (VBOX_FAILURE(rc))
1767 {
1768 /* fatal */
1769 AssertMsgFailed(("Can't read LDT descriptor for selector=%04X\n", SelLdt));
1770 return VERR_SELECTOR_NOT_PRESENT;
1771 }
1772
1773 /* Check if LDT descriptor is not present. */
1774 if (Desc.Gen.u1Present == 0)
1775 return VERR_SELECTOR_NOT_PRESENT;
1776
1777 /* Check LDT descriptor type. */
1778 if ( Desc.Gen.u1DescType == 1
1779 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1780 return VERR_INVALID_SELECTOR;
1781
1782 /* LDT descriptor is ok. */
1783 if (ppvLdt)
1784 {
1785 *ppvLdt = (RTGCPTR)( (Desc.Gen.u8BaseHigh2 << 24)
1786 | (Desc.Gen.u8BaseHigh1 << 16)
1787 | Desc.Gen.u16BaseLow);
1788 *pcbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1789 }
1790 return VINF_SUCCESS;
1791}
1792
1793
1794/**
1795 * Gets information about a selector.
1796 * Intended for the debugger mostly and will prefer the guest
1797 * descriptor tables over the shadow ones.
1798 *
1799 * @returns VINF_SUCCESS on success.
1800 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table.
1801 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present.
1802 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page
1803 * backing the selector table wasn't present.
1804 * @returns Other VBox status code on other errors.
1805 *
1806 * @param pVM VM handle.
1807 * @param Sel The selector to get info about.
1808 * @param pSelInfo Where to store the information.
1809 */
1810SELMR3DECL(int) SELMR3GetSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo)
1811{
1812 Assert(pSelInfo);
1813
1814 /*
1815 * Read the descriptor entry
1816 */
1817 VBOXDESC Desc;
1818 if ( !(Sel & X86_SEL_LDT)
1819 && ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)
1820 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)
1821 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)
1822 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)
1823 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK))
1824 )
1825 {
1826 /*
1827 * Hypervisor descriptor.
1828 */
1829 pSelInfo->fHyper = true;
1830 Desc = pVM->selm.s.paGdtHC[Sel >> X86_SEL_SHIFT];
1831 }
1832 else if (CPUMIsGuestInProtectedMode(pVM))
1833 {
1834 /*
1835 * Read it from the guest descriptor table.
1836 */
1837 pSelInfo->fHyper = false;
1838
1839 VBOXGDTR Gdtr;
1840 RTGCPTR GCPtrDesc;
1841 CPUMGetGuestGDTR(pVM, &Gdtr);
1842 if (!(Sel & X86_SEL_LDT))
1843 {
1844 /* GDT */
1845 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > (unsigned)Gdtr.cbGdt)
1846 return VERR_INVALID_SELECTOR;
1847 GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
1848 }
1849 else
1850 {
1851 /*
1852 * LDT - must locate the LDT first...
1853 */
1854 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1855 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(VBOXDESC) /* the first selector is invalid, right? */
1856 || (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > (unsigned)Gdtr.cbGdt)
1857 return VERR_INVALID_SELECTOR;
1858 GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);
1859 int rc = PGMPhysReadGCPtr(pVM, &Desc, GCPtrDesc, sizeof(Desc));
1860 if (VBOX_FAILURE(rc))
1861 return rc;
1862
1863 /* validate the LDT descriptor. */
1864 if (Desc.Gen.u1Present == 0)
1865 return VERR_SELECTOR_NOT_PRESENT;
1866 if ( Desc.Gen.u1DescType == 1
1867 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1868 return VERR_INVALID_SELECTOR;
1869
1870 unsigned cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1871 if (Desc.Gen.u1Granularity)
1872 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1873 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > cbLimit)
1874 return VERR_INVALID_SELECTOR;
1875
1876 /* calc the descriptor location. */
1877 GCPtrDesc = (Desc.Gen.u8BaseHigh2 << 24)
1878 | (Desc.Gen.u8BaseHigh1 << 16)
1879 | Desc.Gen.u16BaseLow;
1880 GCPtrDesc += (Sel & X86_SEL_MASK);
1881 }
1882
1883 /* read the descriptor. */
1884 int rc = PGMPhysReadGCPtr(pVM, &Desc, GCPtrDesc, sizeof(Desc));
1885 if (VBOX_FAILURE(rc))
1886 return rc;
1887 }
1888 else
1889 {
1890 /*
1891 * We're in real mode.
1892 */
1893 pSelInfo->Sel = Sel;
1894 pSelInfo->GCPtrBase = Sel << 4;
1895 pSelInfo->cbLimit = 0xffff;
1896 pSelInfo->fHyper = false;
1897 pSelInfo->fRealMode = true;
1898 memset(&pSelInfo->Raw, 0, sizeof(pSelInfo->Raw));
1899 return VINF_SUCCESS;
1900 }
1901
1902 /*
1903 * Extract the base and limit
1904 */
1905 pSelInfo->Sel = Sel;
1906 pSelInfo->Raw = Desc;
1907 pSelInfo->cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1908 if (Desc.Gen.u1Granularity)
1909 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1910 pSelInfo->GCPtrBase = (Desc.Gen.u8BaseHigh2 << 24)
1911 | (Desc.Gen.u8BaseHigh1 << 16)
1912 | Desc.Gen.u16BaseLow;
1913 pSelInfo->fRealMode = false;
1914
1915 return VINF_SUCCESS;
1916}
1917
1918
1919/**
1920 * Gets information about a selector from the shadow tables.
1921 *
1922 * This is intended to be faster than the SELMR3GetSelectorInfo() method, but requires
1923 * that the caller ensures that the shadow tables are up to date.
1924 *
1925 * @returns VINF_SUCCESS on success.
1926 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table.
1927 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present.
1928 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page
1929 * backing the selector table wasn't present.
1930 * @returns Other VBox status code on other errors.
1931 *
1932 * @param pVM VM handle.
1933 * @param Sel The selector to get info about.
1934 * @param pSelInfo Where to store the information.
1935 */
1936SELMR3DECL(int) SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo)
1937{
1938 Assert(pSelInfo);
1939
1940 /*
1941 * Read the descriptor entry
1942 */
1943 VBOXDESC Desc;
1944 if (!(Sel & X86_SEL_LDT))
1945 {
1946 /*
1947 * Global descriptor.
1948 */
1949 Desc = pVM->selm.s.paGdtHC[Sel >> X86_SEL_SHIFT];
1950 pSelInfo->fHyper = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)
1951 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)
1952 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)
1953 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)
1954 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK);
1955 /** @todo check that the GDT offset is valid. */
1956 }
1957 else
1958 {
1959 /*
1960 * Local Descriptor.
1961 */
1962 PVBOXDESC paLDT = (PVBOXDESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper);
1963 Desc = paLDT[Sel >> X86_SEL_SHIFT];
1964 /** @todo check if the LDT page is actually available. */
1965 /** @todo check that the LDT offset is valid. */
1966 pSelInfo->fHyper = false;
1967 }
1968
1969 /*
1970 * Extract the base and limit
1971 */
1972 pSelInfo->Sel = Sel;
1973 pSelInfo->Raw = Desc;
1974 pSelInfo->cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1975 if (Desc.Gen.u1Granularity)
1976 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1977 pSelInfo->GCPtrBase = (Desc.Gen.u8BaseHigh2 << 24)
1978 | (Desc.Gen.u8BaseHigh1 << 16)
1979 | Desc.Gen.u16BaseLow;
1980 pSelInfo->fRealMode = false;
1981
1982 return VINF_SUCCESS;
1983}
1984
1985
1986/**
1987 * Formats a descriptor.
1988 *
1989 * @param Desc Descriptor to format.
1990 * @param Sel Selector number.
1991 * @param pszOutput Output buffer.
1992 * @param cchOutput Size of output buffer.
1993 */
1994static void selmR3FormatDescriptor(VBOXDESC Desc, RTSEL Sel, char *pszOutput, size_t cchOutput)
1995{
1996 /*
1997 * Make variable description string.
1998 */
1999 static struct
2000 {
2001 unsigned cch;
2002 const char *psz;
2003 } const aTypes[32] =
2004 {
2005 #define STRENTRY(str) { sizeof(str) - 1, str }
2006 /* system */
2007 STRENTRY("Reserved0 "), /* 0x00 */
2008 STRENTRY("TSS16Avail "), /* 0x01 */
2009 STRENTRY("LDT "), /* 0x02 */
2010 STRENTRY("TSS16Busy "), /* 0x03 */
2011 STRENTRY("Call16 "), /* 0x04 */
2012 STRENTRY("Task "), /* 0x05 */
2013 STRENTRY("Int16 "), /* 0x06 */
2014 STRENTRY("Trap16 "), /* 0x07 */
2015 STRENTRY("Reserved8 "), /* 0x08 */
2016 STRENTRY("TSS32Avail "), /* 0x09 */
2017 STRENTRY("ReservedA "), /* 0x0a */
2018 STRENTRY("TSS32Busy "), /* 0x0b */
2019 STRENTRY("Call32 "), /* 0x0c */
2020 STRENTRY("ReservedD "), /* 0x0d */
2021 STRENTRY("Int32 "), /* 0x0e */
2022 STRENTRY("Trap32 "), /* 0x0f */
2023 /* non system */
2024 STRENTRY("DataRO "), /* 0x10 */
2025 STRENTRY("DataRO Accessed "), /* 0x11 */
2026 STRENTRY("DataRW "), /* 0x12 */
2027 STRENTRY("DataRW Accessed "), /* 0x13 */
2028 STRENTRY("DataDownRO "), /* 0x14 */
2029 STRENTRY("DataDownRO Accessed "), /* 0x15 */
2030 STRENTRY("DataDownRW "), /* 0x16 */
2031 STRENTRY("DataDownRW Accessed "), /* 0x17 */
2032 STRENTRY("CodeEO "), /* 0x18 */
2033 STRENTRY("CodeEO Accessed "), /* 0x19 */
2034 STRENTRY("CodeER "), /* 0x1a */
2035 STRENTRY("CodeER Accessed "), /* 0x1b */
2036 STRENTRY("CodeConfEO "), /* 0x1c */
2037 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
2038 STRENTRY("CodeConfER "), /* 0x1e */
2039 STRENTRY("CodeConfER Accessed ") /* 0x1f */
2040 #undef SYSENTRY
2041 };
2042 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
2043 char szMsg[128];
2044 char *psz = &szMsg[0];
2045 unsigned i = Desc.Gen.u1DescType << 4 | Desc.Gen.u4Type;
2046 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
2047 psz += aTypes[i].cch;
2048
2049 if (Desc.Gen.u1Present)
2050 ADD_STR(psz, "Present ");
2051 else
2052 ADD_STR(psz, "Not-Present ");
2053 if (Desc.Gen.u1Granularity)
2054 ADD_STR(psz, "Page ");
2055 if (Desc.Gen.u1DefBig)
2056 ADD_STR(psz, "32-bit ");
2057 else
2058 ADD_STR(psz, "16-bit ");
2059 #undef ADD_STR
2060 *psz = '\0';
2061
2062 /*
2063 * Limit and Base and format the output.
2064 */
2065 uint32_t u32Limit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
2066 if (Desc.Gen.u1Granularity)
2067 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
2068 uint32_t u32Base = Desc.Gen.u8BaseHigh2 << 24 | Desc.Gen.u8BaseHigh1 << 16 | Desc.Gen.u16BaseLow;
2069
2070 RTStrPrintf(pszOutput, cchOutput, "%04x - %08x %08x - base=%08x limit=%08x dpl=%d %s",
2071 Sel, Desc.au32[0], Desc.au32[1], u32Base, u32Limit, Desc.Gen.u2Dpl, szMsg);
2072}
2073
2074
2075/**
2076 * Dumps a descriptor.
2077 *
2078 * @param Desc Descriptor to dump.
2079 * @param Sel Selector number.
2080 * @param pszMsg Message to prepend the log entry with.
2081 */
2082SELMR3DECL(void) SELMR3DumpDescriptor(VBOXDESC Desc, RTSEL Sel, const char *pszMsg)
2083{
2084 char szOutput[128];
2085 selmR3FormatDescriptor(Desc, Sel, &szOutput[0], sizeof(szOutput));
2086 Log(("%s: %s\n", pszMsg, szOutput));
2087 NOREF(szOutput[0]);
2088}
2089
2090
2091/**
2092 * Display the shadow gdt.
2093 *
2094 * @param pVM VM Handle.
2095 * @param pHlp The info helpers.
2096 * @param pszArgs Arguments, ignored.
2097 */
2098static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2099{
2100 pHlp->pfnPrintf(pHlp, "Shadow GDT (GCAddr=%VGv):\n", MMHyperHC2GC(pVM, pVM->selm.s.paGdtHC));
2101 for (unsigned iGDT = 0; iGDT < SELM_GDT_ELEMENTS; iGDT++)
2102 {
2103 if (pVM->selm.s.paGdtHC[iGDT].Gen.u1Present)
2104 {
2105 char szOutput[128];
2106 selmR3FormatDescriptor(pVM->selm.s.paGdtHC[iGDT], iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2107 const char *psz = "";
2108 if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] >> X86_SEL_SHIFT))
2109 psz = " HyperCS";
2110 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] >> X86_SEL_SHIFT))
2111 psz = " HyperDS";
2112 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] >> X86_SEL_SHIFT))
2113 psz = " HyperCS64";
2114 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> X86_SEL_SHIFT))
2115 psz = " HyperTSS";
2116 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> X86_SEL_SHIFT))
2117 psz = " HyperTSSTrap08";
2118 pHlp->pfnPrintf(pHlp, "%s%s\n", szOutput, psz);
2119 }
2120 }
2121}
2122
2123
2124/**
2125 * Display the guest gdt.
2126 *
2127 * @param pVM VM Handle.
2128 * @param pHlp The info helpers.
2129 * @param pszArgs Arguments, ignored.
2130 */
2131static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2132{
2133 VBOXGDTR GDTR;
2134 CPUMGetGuestGDTR(pVM, &GDTR);
2135 RTGCPTR pGDTGC = (RTGCPTR)GDTR.pGdt;
2136 unsigned cGDTs = ((unsigned)GDTR.cbGdt + 1) / sizeof(VBOXDESC);
2137
2138 pHlp->pfnPrintf(pHlp, "Guest GDT (GCAddr=%VGv limit=%x):\n", pGDTGC, GDTR.cbGdt);
2139 for (unsigned iGDT = 0; iGDT < cGDTs; iGDT++, pGDTGC += sizeof(VBOXDESC))
2140 {
2141 VBOXDESC GDTE;
2142 int rc = PGMPhysReadGCPtr(pVM, &GDTE, pGDTGC, sizeof(GDTE));
2143 if (VBOX_SUCCESS(rc))
2144 {
2145 if (GDTE.Gen.u1Present)
2146 {
2147 char szOutput[128];
2148 selmR3FormatDescriptor(GDTE, iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2149 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2150 }
2151 }
2152 else if (rc == VERR_PAGE_NOT_PRESENT)
2153 {
2154 if ((pGDTGC & PAGE_OFFSET_MASK) + sizeof(VBOXDESC) - 1 < sizeof(VBOXDESC))
2155 pHlp->pfnPrintf(pHlp, "%04 - page not present (GCAddr=%VGv)\n", iGDT << X86_SEL_SHIFT, pGDTGC);
2156 }
2157 else
2158 pHlp->pfnPrintf(pHlp, "%04 - read error rc=%Vrc GCAddr=%VGv\n", iGDT << X86_SEL_SHIFT, rc, pGDTGC);
2159 }
2160}
2161
2162
2163/**
2164 * Display the shadow ldt.
2165 *
2166 * @param pVM VM Handle.
2167 * @param pHlp The info helpers.
2168 * @param pszArgs Arguments, ignored.
2169 */
2170static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2171{
2172 unsigned cLDTs = ((unsigned)pVM->selm.s.cbLdtLimit + 1) >> X86_SEL_SHIFT;
2173 PVBOXDESC paLDT = (PVBOXDESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper);
2174 pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=%VGv limit=%d):\n", pVM->selm.s.GCPtrLdt + pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit);
2175 for (unsigned iLDT = 0; iLDT < cLDTs; iLDT++)
2176 {
2177 if (paLDT[iLDT].Gen.u1Present)
2178 {
2179 char szOutput[128];
2180 selmR3FormatDescriptor(paLDT[iLDT], (iLDT << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2181 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2182 }
2183 }
2184}
2185
2186
2187/**
2188 * Display the guest ldt.
2189 *
2190 * @param pVM VM Handle.
2191 * @param pHlp The info helpers.
2192 * @param pszArgs Arguments, ignored.
2193 */
2194static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2195{
2196 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
2197 if (!(SelLdt & X86_SEL_MASK))
2198 {
2199 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): Null-Selector\n", SelLdt);
2200 return;
2201 }
2202
2203 RTGCPTR pLdtGC;
2204 unsigned cbLdt;
2205 int rc = SELMGetLDTFromSel(pVM, SelLdt, &pLdtGC, &cbLdt);
2206 if (VBOX_FAILURE(rc))
2207 {
2208 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): rc=%Vrc\n", SelLdt, rc);
2209 return;
2210 }
2211
2212 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%VGv limit=%x):\n", SelLdt, pLdtGC, cbLdt);
2213 unsigned cLdts = (cbLdt + 1) >> X86_SEL_SHIFT;
2214 for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, pLdtGC += sizeof(VBOXDESC))
2215 {
2216 VBOXDESC LdtE;
2217 int rc = PGMPhysReadGCPtr(pVM, &LdtE, pLdtGC, sizeof(LdtE));
2218 if (VBOX_SUCCESS(rc))
2219 {
2220 if (LdtE.Gen.u1Present)
2221 {
2222 char szOutput[128];
2223 selmR3FormatDescriptor(LdtE, (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2224 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2225 }
2226 }
2227 else if (rc == VERR_PAGE_NOT_PRESENT)
2228 {
2229 if ((pLdtGC & PAGE_OFFSET_MASK) + sizeof(VBOXDESC) - 1 < sizeof(VBOXDESC))
2230 pHlp->pfnPrintf(pHlp, "%04 - page not present (GCAddr=%VGv)\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, pLdtGC);
2231 }
2232 else
2233 pHlp->pfnPrintf(pHlp, "%04 - read error rc=%Vrc GCAddr=%VGv\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, rc, pLdtGC);
2234 }
2235}
2236
2237
2238/**
2239 * Dumps the hypervisor GDT
2240 *
2241 * @param pVM VM handle.
2242 */
2243SELMR3DECL(void) SELMR3DumpHyperGDT(PVM pVM)
2244{
2245 DBGFR3Info(pVM, "gdt", NULL, NULL);
2246}
2247
2248/**
2249 * Dumps the hypervisor LDT
2250 *
2251 * @param pVM VM handle.
2252 */
2253SELMR3DECL(void) SELMR3DumpHyperLDT(PVM pVM)
2254{
2255 DBGFR3Info(pVM, "ldt", NULL, NULL);
2256}
2257
2258/**
2259 * Dumps the guest GDT
2260 *
2261 * @param pVM VM handle.
2262 */
2263SELMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM)
2264{
2265 DBGFR3Info(pVM, "gdtguest", NULL, NULL);
2266}
2267
2268/**
2269 * Dumps the guest LDT
2270 *
2271 * @param pVM VM handle.
2272 */
2273SELMR3DECL(void) SELMR3DumpGuestLDT(PVM pVM)
2274{
2275 DBGFR3Info(pVM, "ldtguest", NULL, NULL);
2276}
2277
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette