VirtualBox

source: vbox/trunk/src/VBox/VMM/SELM.cpp@ 1468

Last change on this file since 1468 was 1450, checked in by vboxsync, 18 years ago

r=bird

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 85.1 KB
Line 
1/* $Id: SELM.cpp 1450 2007-03-13 16:13:50Z vboxsync $ */
2/** @file
3 * SELM - The Selector manager.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_SELM
26#include <VBox/selm.h>
27#include <VBox/cpum.h>
28#include <VBox/stam.h>
29#include <VBox/mm.h>
30#include <VBox/pdm.h>
31#include <VBox/pgm.h>
32#include <VBox/trpm.h>
33#include <VBox/dbgf.h>
34#include "SELMInternal.h"
35#include <VBox/vm.h>
36#include <VBox/err.h>
37#include <VBox/param.h>
38
39#include <iprt/assert.h>
40#include <VBox/log.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/string.h>
45#include "x86context.h"
46
47
48/**
49 * Enable or disable tracking of Guest's GDT/LDT/TSS.
50 * @{
51 */
52#define SELM_TRACK_GUEST_GDT_CHANGES
53#define SELM_TRACK_GUEST_LDT_CHANGES
54#define SELM_TRACK_GUEST_TSS_CHANGES
55/** @} */
56
57/**
58 * Enable or disable tracking of Shadow GDT/LDT/TSS.
59 * @{
60 */
61#define SELM_TRACK_SHADOW_GDT_CHANGES
62#define SELM_TRACK_SHADOW_LDT_CHANGES
63#define SELM_TRACK_SHADOW_TSS_CHANGES
64/** @} */
65
66
67/** SELM saved state version. */
68#define SELM_SAVED_STATE_VERSION 5
69
70/*******************************************************************************
71* Internal Functions *
72*******************************************************************************/
73static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM);
74static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
75static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
76static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
77static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
78static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
79static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
80//static DECLCALLBACK(void) selmR3InfoTss(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
81//static DECLCALLBACK(void) selmR3InfoTssGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
82static DECLCALLBACK(int) selmGuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
83static DECLCALLBACK(int) selmGuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
84static DECLCALLBACK(int) selmGuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
85
86
87
88/**
89 * Initializes the SELM.
90 *
91 * @returns VBox status code.
92 * @param pVM The VM to operate on.
93 */
94SELMR3DECL(int) SELMR3Init(PVM pVM)
95{
96 LogFlow(("SELMR3Init\n"));
97
98 /*
99 * Assert alignment and sizes.
100 */
101 AssertCompileMemberAlignment(VM, selm.s, 32); AssertRelease(!(RT_OFFSETOF(VM, selm.s) & 31));
102 AssertCompileMemberAlignment(VM, selm.s.Tss, 16); AssertRelease(!(RT_OFFSETOF(VM, selm.s.Tss) & 15));
103 AssertCompile(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding)); AssertRelease(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding));
104
105 /*
106 * Init the structure.
107 */
108 pVM->selm.s.offVM = RT_OFFSETOF(VM, selm);
109 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] = (SELM_GDT_ELEMENTS - 0x1) << 3;
110 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] = (SELM_GDT_ELEMENTS - 0x2) << 3;
111 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] = (SELM_GDT_ELEMENTS - 0x3) << 3;
112 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = (SELM_GDT_ELEMENTS - 0x4) << 3;
113 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = (SELM_GDT_ELEMENTS - 0x5) << 3;
114
115 /*
116 * Allocate GDT table.
117 */
118 int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtHC[0]) * SELM_GDT_ELEMENTS,
119 PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdtHC);
120 AssertRCReturn(rc, rc);
121
122 /*
123 * Allocate LDT area.
124 */
125 rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.HCPtrLdt);
126 AssertRCReturn(rc, rc);
127
128 /*
129 * Init Guest's and Shadow GDT, LDT, TSS changes control variables.
130 */
131 pVM->selm.s.cbEffGuestGdtLimit = 0;
132 pVM->selm.s.GuestGdtr.pGdt = ~0;
133 pVM->selm.s.GCPtrGuestLdt = ~0;
134 pVM->selm.s.GCPtrGuestTss = ~0;
135
136 pVM->selm.s.paGdtGC = 0;
137 pVM->selm.s.GCPtrLdt = ~0;
138 pVM->selm.s.GCPtrTss = ~0;
139 pVM->selm.s.GCSelTss = ~0;
140
141 pVM->selm.s.fDisableMonitoring = false;
142 pVM->selm.s.fSyncTSSRing0Stack = false;
143
144 /*
145 * Register the saved state data unit.
146 */
147 rc = SSMR3RegisterInternal(pVM, "selm", 1, SELM_SAVED_STATE_VERSION, sizeof(SELM),
148 NULL, selmR3Save, NULL,
149 NULL, selmR3Load, selmR3LoadDone);
150 if (VBOX_FAILURE(rc))
151 return rc;
152
153 /*
154 * Statistics.
155 */
156 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT.");
157 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT.");
158 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected.");
159 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS.");
160 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
161 STAM_REG(pVM, &pVM->selm.s.StatGCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS.");
162 STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
163 STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
164
165 STAM_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors.");
166 STAM_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors.");
167
168 /*
169 * Default action when entering raw mode for the first time
170 */
171 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
172 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
173 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
174
175 /*
176 * Register info handlers.
177 */
178 DBGFR3InfoRegisterInternal(pVM, "gdt", "Displays the shadow GDT. No arguments.", &selmR3InfoGdt);
179 DBGFR3InfoRegisterInternal(pVM, "gdtguest", "Displays the guest GDT. No arguments.", &selmR3InfoGdtGuest);
180 DBGFR3InfoRegisterInternal(pVM, "ldt", "Displays the shadow LDT. No arguments.", &selmR3InfoLdt);
181 DBGFR3InfoRegisterInternal(pVM, "ldtguest", "Displays the guest LDT. No arguments.", &selmR3InfoLdtGuest);
182 //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss);
183 //DBGFR3InfoRegisterInternal(pVM, "tssguest", "Displays the guest TSS. No arguments.", &selmR3InfoTssGuest);
184
185 return rc;
186}
187
188
189/**
190 * Finalizes HMA page attributes.
191 *
192 * @returns VBox status code.
193 * @param pVM The VM handle.
194 */
195SELMR3DECL(int) SELMR3InitFinalize(PVM pVM)
196{
197 /*
198 * Make Double Fault work with WP enabled?
199 *
200 * The double fault is a task switch and thus requires write access to the GDT of the TSS
201 * (to set it busy), to the old TSS (to store state), and to the Trap 8 TSS for the back link.
202 *
203 * Since we in enabling write access to these pages make ourself vulnerable to attacks,
204 * it is not possible to do this by default.
205 */
206 bool f;
207 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f);
208#if !defined(DEBUG_bird) && !defined(__AMD64__) /** @todo Remember to remove __AMD64__ here! */
209 if (VBOX_SUCCESS(rc) && f)
210#endif
211 {
212 PVBOXDESC paGdt = pVM->selm.s.paGdtHC;
213 rc = PGMMapSetPage(pVM, MMHyperHC2GC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3]), sizeof(paGdt[0]),
214 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
215 AssertRC(rc);
216 rc = PGMMapSetPage(pVM, MMHyperHC2GC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3]), sizeof(paGdt[0]),
217 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
218 AssertRC(rc);
219 rc = PGMMapSetPage(pVM, VM_GUEST_ADDR(pVM, &pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]), sizeof(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]),
220 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
221 AssertRC(rc);
222 rc = PGMMapSetPage(pVM, VM_GUEST_ADDR(pVM, &pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]), sizeof(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]),
223 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D);
224 AssertRC(rc);
225 }
226 return VINF_SUCCESS;
227}
228
229
230/**
231 * Setup the hypervisor GDT selectors in our shadow table
232 *
233 * @param pVM The VM handle.
234 */
235static void selmR3SetupHyperGDTSelectors(PVM pVM)
236{
237 PVBOXDESC paGdt = pVM->selm.s.paGdtHC;
238
239 /*
240 * Set up global code and data descriptors for use in the guest context.
241 * Both are wide open (base 0, limit 4GB)
242 */
243 PVBOXDESC pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] >> 3];
244 pDesc->Gen.u16LimitLow = 0xffff;
245 pDesc->Gen.u4LimitHigh = 0xf;
246 pDesc->Gen.u16BaseLow = 0;
247 pDesc->Gen.u8BaseHigh1 = 0;
248 pDesc->Gen.u8BaseHigh2 = 0;
249 pDesc->Gen.u4Type = X86_SELTYPE_MEM_EXECUTEREAD_ACC;
250 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
251 pDesc->Gen.u2Dpl = 0; /* supervisor */
252 pDesc->Gen.u1Present = 1;
253 pDesc->Gen.u1Available = 0;
254 pDesc->Gen.u1Reserved = 0;
255 pDesc->Gen.u1DefBig = 1; /* def 32 bit */
256 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
257
258 /* data */
259 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] >> 3];
260 pDesc->Gen.u16LimitLow = 0xffff;
261 pDesc->Gen.u4LimitHigh = 0xf;
262 pDesc->Gen.u16BaseLow = 0;
263 pDesc->Gen.u8BaseHigh1 = 0;
264 pDesc->Gen.u8BaseHigh2 = 0;
265 pDesc->Gen.u4Type = X86_SELTYPE_MEM_READWRITE_ACC;
266 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
267 pDesc->Gen.u2Dpl = 0; /* supervisor */
268 pDesc->Gen.u1Present = 1;
269 pDesc->Gen.u1Available = 0;
270 pDesc->Gen.u1Reserved = 0;
271 pDesc->Gen.u1DefBig = 1; /* big */
272 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
273
274 /* 64-bit mode code (& data?) */
275 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] >> 3];
276 pDesc->Gen.u16LimitLow = 0xffff;
277 pDesc->Gen.u4LimitHigh = 0xf;
278 pDesc->Gen.u16BaseLow = 0;
279 pDesc->Gen.u8BaseHigh1 = 0;
280 pDesc->Gen.u8BaseHigh2 = 0;
281 pDesc->Gen.u4Type = X86_SELTYPE_MEM_EXECUTEREAD_ACC;
282 pDesc->Gen.u1DescType = 1; /* not system, but code/data */
283 pDesc->Gen.u2Dpl = 0; /* supervisor */
284 pDesc->Gen.u1Present = 1;
285 pDesc->Gen.u1Available = 0;
286 pDesc->Gen.u1Reserved = 1; /* The Long (L) attribute bit. */
287 pDesc->Gen.u1DefBig = 0; /* With L=1 this must be 0. */
288 pDesc->Gen.u1Granularity = 1; /* 4KB limit */
289
290 /*
291 * TSS descriptor
292 */
293 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3];
294 RTGCPTR pGCTSS = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss);
295 pDesc->Gen.u16BaseLow = RT_LOWORD(pGCTSS);
296 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(pGCTSS);
297 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(pGCTSS);
298 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
299 pDesc->Gen.u4LimitHigh = 0;
300 pDesc->Gen.u4Type = X86_SELTYPE_SYS_386_TSS_AVAIL;
301 pDesc->Gen.u1DescType = 0; /* system */
302 pDesc->Gen.u2Dpl = 0; /* supervisor */
303 pDesc->Gen.u1Present = 1;
304 pDesc->Gen.u1Available = 0;
305 pDesc->Gen.u1Reserved = 0;
306 pDesc->Gen.u1DefBig = 0;
307 pDesc->Gen.u1Granularity = 0; /* byte limit */
308
309 /*
310 * TSS descriptor for trap 08
311 */
312 pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3];
313 pDesc->Gen.u16LimitLow = sizeof(VBOXTSS) - 1;
314 pDesc->Gen.u4LimitHigh = 0;
315 pGCTSS = VM_GUEST_ADDR(pVM, &pVM->selm.s.TssTrap08);
316 pDesc->Gen.u16BaseLow = RT_LOWORD(pGCTSS);
317 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(pGCTSS);
318 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(pGCTSS);
319 pDesc->Gen.u4Type = X86_SELTYPE_SYS_386_TSS_AVAIL;
320 pDesc->Gen.u1DescType = 0; /* system */
321 pDesc->Gen.u2Dpl = 0; /* supervisor */
322 pDesc->Gen.u1Present = 1;
323 pDesc->Gen.u1Available = 0;
324 pDesc->Gen.u1Reserved = 0;
325 pDesc->Gen.u1DefBig = 0;
326 pDesc->Gen.u1Granularity = 0; /* byte limit */
327}
328
329/**
330 * Applies relocations to data and code managed by this
331 * component. This function will be called at init and
332 * whenever the VMM need to relocate it self inside the GC.
333 *
334 * @param pVM The VM.
335 */
336SELMR3DECL(void) SELMR3Relocate(PVM pVM)
337{
338 PVBOXDESC paGdt = pVM->selm.s.paGdtHC;
339 LogFlow(("SELMR3Relocate\n"));
340
341 /*
342 * Update GDTR and selector.
343 */
344 CPUMSetHyperGDTR(pVM, MMHyperHC2GC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1);
345
346 /** @todo selector relocations should be a seperate operation? */
347 CPUMSetHyperCS(pVM, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]);
348 CPUMSetHyperDS(pVM, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
349 CPUMSetHyperES(pVM, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
350 CPUMSetHyperSS(pVM, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
351 CPUMSetHyperTR(pVM, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]);
352
353 selmR3SetupHyperGDTSelectors(pVM);
354
355/** @todo SELM must be called when any of the CR3s changes during a cpu mode change. */
356/** @todo PGM knows the proper CR3 values these days, not CPUM. */
357 /*
358 * Update the TSSes.
359 */
360 /* Current TSS */
361 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVM);
362 pVM->selm.s.Tss.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
363 pVM->selm.s.Tss.esp0 = VMMGetStackGC(pVM);
364 pVM->selm.s.Tss.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
365 pVM->selm.s.Tss.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
366 pVM->selm.s.Tss.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
367 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
368
369 /* trap 08 */
370 pVM->selm.s.TssTrap08.cr3 = PGMGetInterGCCR3(pVM); /* this should give use better survival chances. */
371 pVM->selm.s.TssTrap08.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
372 pVM->selm.s.TssTrap08.ss = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
373 pVM->selm.s.TssTrap08.esp0 = VMMGetStackGC(pVM) - PAGE_SIZE / 2; /* upper half can be analysed this way. */
374 pVM->selm.s.TssTrap08.esp = pVM->selm.s.TssTrap08.esp0;
375 pVM->selm.s.TssTrap08.ebp = pVM->selm.s.TssTrap08.esp0;
376 pVM->selm.s.TssTrap08.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
377 pVM->selm.s.TssTrap08.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
378 pVM->selm.s.TssTrap08.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
379 pVM->selm.s.TssTrap08.fs = 0;
380 pVM->selm.s.TssTrap08.gs = 0;
381 pVM->selm.s.TssTrap08.selLdt = 0;
382 pVM->selm.s.TssTrap08.eflags = 0x2; /* all cleared */
383 pVM->selm.s.TssTrap08.ecx = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */
384 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.ecx;
385 pVM->selm.s.TssTrap08.eax = pVM->selm.s.TssTrap08.ecx;
386 pVM->selm.s.TssTrap08.edx = VM_GUEST_ADDR(pVM, pVM); /* setup edx VM address. */
387 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.edx;
388 pVM->selm.s.TssTrap08.ebx = pVM->selm.s.TssTrap08.edx;
389 pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);
390 /* TRPM will be updating the eip */
391
392 if (!pVM->selm.s.fDisableMonitoring)
393 {
394 /*
395 * Update shadow GDT/LDT/TSS write access handlers.
396 */
397 int rc;
398#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
399 if (pVM->selm.s.paGdtGC != 0)
400 {
401 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtGC);
402 AssertRC(rc);
403 }
404 pVM->selm.s.paGdtGC = MMHyperHC2GC(pVM, paGdt);
405 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.paGdtGC,
406 pVM->selm.s.paGdtGC + SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1,
407 0, 0, "selmgcShadowGDTWriteHandler", 0, "Shadow GDT write access handler");
408 AssertRC(rc);
409#endif
410#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
411 if (pVM->selm.s.GCPtrTss != ~0U)
412 {
413 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrTss);
414 AssertRC(rc);
415 }
416 pVM->selm.s.GCPtrTss = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss);
417 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.GCPtrTss,
418 pVM->selm.s.GCPtrTss + sizeof(pVM->selm.s.Tss) - 1,
419 0, 0, "selmgcShadowTSSWriteHandler", 0, "Shadow TSS write access handler");
420 AssertRC(rc);
421#endif
422
423 /*
424 * Update the GC LDT region handler and address.
425 */
426#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
427 if (pVM->selm.s.GCPtrLdt != ~0U)
428 {
429 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrLdt);
430 AssertRC(rc);
431 }
432#endif
433 pVM->selm.s.GCPtrLdt = MMHyperHC2GC(pVM, pVM->selm.s.HCPtrLdt);
434#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
435 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.GCPtrLdt,
436 pVM->selm.s.GCPtrLdt + _64K + PAGE_SIZE - 1,
437 0, 0, "selmgcShadowLDTWriteHandler", 0, "Shadow LDT write access handler");
438 AssertRC(rc);
439#endif
440 }
441}
442
443
444/**
445 * Notification callback which is called whenever there is a chance that a CR3
446 * value might have changed.
447 * This is called by PGM.
448 *
449 * @param pVM The VM handle
450 */
451SELMR3DECL(void) SELMR3PagingModeChanged(PVM pVM)
452{
453 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVM);
454 pVM->selm.s.TssTrap08.cr3 = PGMGetInterGCCR3(pVM);
455}
456
457
458/**
459 * Terminates the SELM.
460 *
461 * Termination means cleaning up and freeing all resources,
462 * the VM it self is at this point powered off or suspended.
463 *
464 * @returns VBox status code.
465 * @param pVM The VM to operate on.
466 */
467SELMR3DECL(int) SELMR3Term(PVM pVM)
468{
469 return 0;
470}
471
472
473/**
474 * The VM is being reset.
475 *
476 * For the SELM component this means that any GDT/LDT/TSS monitors
477 * needs to be removed.
478 *
479 * @param pVM VM handle.
480 */
481SELMR3DECL(void) SELMR3Reset(PVM pVM)
482{
483 LogFlow(("SELMR3Reset:\n"));
484 VM_ASSERT_EMT(pVM);
485
486 /*
487 * Uninstall guest GDT/LDT/TSS write access handlers.
488 */
489 int rc;
490#ifdef SELM_TRACK_GUEST_GDT_CHANGES
491 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
492 {
493 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
494 AssertRC(rc);
495 pVM->selm.s.GuestGdtr.pGdt = ~0U;
496 pVM->selm.s.GuestGdtr.cbGdt = 0;
497 }
498 pVM->selm.s.fGDTRangeRegistered = false;
499#endif
500#ifdef SELM_TRACK_GUEST_LDT_CHANGES
501 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
502 {
503 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
504 AssertRC(rc);
505 pVM->selm.s.GCPtrGuestLdt = ~0U;
506 }
507#endif
508#ifdef SELM_TRACK_GUEST_TSS_CHANGES
509 if (pVM->selm.s.GCPtrGuestTss != ~0U)
510 {
511 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
512 AssertRC(rc);
513 pVM->selm.s.GCPtrGuestTss = ~0U;
514 pVM->selm.s.GCSelTss = ~0;
515 }
516#endif
517
518 /*
519 * Re-initialize other members.
520 */
521 pVM->selm.s.cbLdtLimit = 0;
522 pVM->selm.s.offLdtHyper = 0;
523 pVM->selm.s.cbMonitoredGuestTss = 0;
524
525 pVM->selm.s.fSyncTSSRing0Stack = false;
526
527 /*
528 * Default action when entering raw mode for the first time
529 */
530 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
531 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
532 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
533}
534
535/**
536 * Disable GDT/LDT/TSS monitoring and syncing
537 *
538 * @param pVM The VM to operate on.
539 */
540SELMR3DECL(void) SELMR3DisableMonitoring(PVM pVM)
541{
542 /*
543 * Uninstall guest GDT/LDT/TSS write access handlers.
544 */
545 int rc;
546#ifdef SELM_TRACK_GUEST_GDT_CHANGES
547 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
548 {
549 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
550 AssertRC(rc);
551 pVM->selm.s.GuestGdtr.pGdt = ~0U;
552 pVM->selm.s.GuestGdtr.cbGdt = 0;
553 }
554 pVM->selm.s.fGDTRangeRegistered = false;
555#endif
556#ifdef SELM_TRACK_GUEST_LDT_CHANGES
557 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
558 {
559 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
560 AssertRC(rc);
561 pVM->selm.s.GCPtrGuestLdt = ~0U;
562 }
563#endif
564#ifdef SELM_TRACK_GUEST_TSS_CHANGES
565 if (pVM->selm.s.GCPtrGuestTss != ~0U)
566 {
567 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
568 AssertRC(rc);
569 pVM->selm.s.GCPtrGuestTss = ~0U;
570 pVM->selm.s.GCSelTss = ~0;
571 }
572#endif
573
574 /*
575 * Unregister shadow GDT/LDT/TSS write access handlers.
576 */
577#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
578 if (pVM->selm.s.paGdtGC != 0)
579 {
580 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtGC);
581 AssertRC(rc);
582 pVM->selm.s.paGdtGC = 0;
583 }
584#endif
585#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
586 if (pVM->selm.s.GCPtrTss != ~0U)
587 {
588 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrTss);
589 AssertRC(rc);
590 pVM->selm.s.GCPtrTss = ~0U;
591 }
592#endif
593#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
594 if (pVM->selm.s.GCPtrLdt != ~0U)
595 {
596 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrLdt);
597 AssertRC(rc);
598 pVM->selm.s.GCPtrLdt = ~0U;
599 }
600#endif
601
602 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
603 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
604 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
605
606 pVM->selm.s.fDisableMonitoring = true;
607}
608
609/**
610 * Execute state save operation.
611 *
612 * @returns VBox status code.
613 * @param pVM VM Handle.
614 * @param pSSM SSM operation handle.
615 */
616static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM)
617{
618 LogFlow(("selmR3Save:\n"));
619
620 /*
621 * Save the basic bits - fortunately all the other things can be resynced on load.
622 */
623 PSELM pSelm = &pVM->selm.s;
624
625 SSMR3PutBool(pSSM, pSelm->fDisableMonitoring);
626 SSMR3PutBool(pSSM, pSelm->fSyncTSSRing0Stack);
627 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS]);
628 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_DS]);
629 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]);
630 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]); //reserved for DS64.
631 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_TSS]);
632 return SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]);
633}
634
635
636/**
637 * Execute state load operation.
638 *
639 * @returns VBox status code.
640 * @param pVM VM Handle.
641 * @param pSSM SSM operation handle.
642 * @param u32Version Data layout version.
643 */
644static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
645{
646 LogFlow(("selmR3Load:\n"));
647
648 /*
649 * Validate version.
650 */
651 if (u32Version != SELM_SAVED_STATE_VERSION)
652 {
653 Log(("selmR3Load: Invalid version u32Version=%d!\n", u32Version));
654 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
655 }
656
657 /*
658 * Do a reset.
659 */
660 SELMR3Reset(pVM);
661
662 /* Get the monitoring flag. */
663 SSMR3GetBool(pSSM, &pVM->selm.s.fDisableMonitoring);
664
665 /* Get the TSS state flag. */
666 SSMR3GetBool(pSSM, &pVM->selm.s.fSyncTSSRing0Stack);
667
668 /*
669 * Get the selectors.
670 */
671 RTSEL SelCS;
672 SSMR3GetSel(pSSM, &SelCS);
673 RTSEL SelDS;
674 SSMR3GetSel(pSSM, &SelDS);
675 RTSEL SelCS64;
676 SSMR3GetSel(pSSM, &SelCS64);
677 RTSEL SelDS64;
678 SSMR3GetSel(pSSM, &SelDS64);
679 RTSEL SelTSS;
680 SSMR3GetSel(pSSM, &SelTSS);
681 RTSEL SelTSSTrap08;
682 SSMR3GetSel(pSSM, &SelTSSTrap08);
683
684 /* Copy the selectors; they will be checked during relocation. */
685 PSELM pSelm = &pVM->selm.s;
686 pSelm->aHyperSel[SELM_HYPER_SEL_CS] = SelCS;
687 pSelm->aHyperSel[SELM_HYPER_SEL_DS] = SelDS;
688 pSelm->aHyperSel[SELM_HYPER_SEL_CS64] = SelCS64;
689 pSelm->aHyperSel[SELM_HYPER_SEL_TSS] = SelTSS;
690 pSelm->aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SelTSSTrap08;
691
692 return VINF_SUCCESS;
693}
694
695
696/**
697 * Sync the GDT, LDT and TSS after loading the state.
698 *
699 * Just to play save, we set the FFs to force syncing before
700 * executing GC code.
701 *
702 * @returns VBox status code.
703 * @param pVM VM Handle.
704 * @param pSSM SSM operation handle.
705 */
706static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
707{
708 LogFlow(("selmR3LoadDone:\n"));
709
710 /*
711 * Don't do anything if it's a load failure.
712 */
713 int rc = SSMR3HandleGetStatus(pSSM);
714 if (VBOX_FAILURE(rc))
715 return VINF_SUCCESS;
716
717 /*
718 * Do the syncing if we're in protected mode.
719 */
720 if (PGMGetGuestMode(pVM) != PGMMODE_REAL)
721 {
722 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
723 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
724 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
725 SELMR3UpdateFromCPUM(pVM);
726 }
727
728 /*
729 * Flag everything for resync on next raw mode entry.
730 */
731 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
732 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
733 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
734
735 return VINF_SUCCESS;
736}
737
738
739/**
740 * Updates the Guest GDT & LDT virtualization based on current CPU state.
741 *
742 * @returns VBox status code.
743 * @param pVM The VM to operate on.
744 */
745SELMR3DECL(int) SELMR3UpdateFromCPUM(PVM pVM)
746{
747 int rc = VINF_SUCCESS;
748
749 if (pVM->selm.s.fDisableMonitoring)
750 {
751 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
752 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
753 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
754
755 return VINF_SUCCESS;
756 }
757
758 STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a);
759
760 /*
761 * GDT sync
762 */
763 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_GDT))
764 {
765 /*
766 * Always assume the best
767 */
768 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_GDT);
769
770 /* If the GDT was changed, then make sure the LDT is checked too */
771 /** @todo only do this if the actual ldtr selector was changed; this is a bit excessive */
772 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
773 /* Same goes for the TSS selector */
774 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
775
776 /*
777 * Get the GDTR and check if there is anything to do (there usually is).
778 */
779 VBOXGDTR GDTR;
780 CPUMGetGuestGDTR(pVM, &GDTR);
781 if (GDTR.cbGdt < sizeof(VBOXDESC))
782 {
783 Log(("No GDT entries...\n"));
784 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
785 return VINF_SUCCESS;
786 }
787
788 /*
789 * Read the Guest GDT.
790 * ASSUMES that the entire GDT is in memory.
791 */
792 RTUINT cbEffLimit = GDTR.cbGdt;
793 PVBOXDESC pGDTE = &pVM->selm.s.paGdtHC[1];
794 rc = PGMPhysReadGCPtr(pVM, pGDTE, GDTR.pGdt + sizeof(VBOXDESC), cbEffLimit + 1 - sizeof(VBOXDESC));
795 if (VBOX_FAILURE(rc))
796 {
797 /*
798 * Read it page by page.
799 *
800 * Keep track of the last valid page and delay memsets and
801 * adjust cbEffLimit to reflect the effective size. The latter
802 * is something we do in the belief that the guest will probably
803 * never actually commit the last page, thus allowing us to keep
804 * our selectors in the high end of the GDT.
805 */
806 RTUINT cbLeft = cbEffLimit + 1 - sizeof(VBOXDESC);
807 RTGCPTR GCPtrSrc = (RTGCPTR)GDTR.pGdt + sizeof(VBOXDESC);
808 uint8_t *pu8Dst = (uint8_t *)&pVM->selm.s.paGdtHC[1];
809 uint8_t *pu8DstInvalid = pu8Dst;
810
811 while (cbLeft)
812 {
813 RTUINT cb = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
814 cb = RT_MIN(cb, cbLeft);
815 rc = PGMPhysReadGCPtr(pVM, pu8Dst, GCPtrSrc, cb);
816 if (VBOX_SUCCESS(rc))
817 {
818 if (pu8DstInvalid != pu8Dst)
819 memset(pu8DstInvalid, 0, pu8Dst - pu8DstInvalid);
820 GCPtrSrc += cb;
821 pu8Dst += cb;
822 pu8DstInvalid = pu8Dst;
823 }
824 else if ( rc == VERR_PAGE_NOT_PRESENT
825 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
826 {
827 GCPtrSrc += cb;
828 pu8Dst += cb;
829 }
830 else
831 {
832 AssertReleaseMsgFailed(("Couldn't read GDT at %RX32, rc=%Vrc!\n", GDTR.pGdt, rc));
833 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
834 return VERR_NOT_IMPLEMENTED;
835 }
836 cbLeft -= cb;
837 }
838
839 /* any invalid pages at the end? */
840 if (pu8DstInvalid != pu8Dst)
841 {
842 cbEffLimit = pu8DstInvalid - (uint8_t *)pVM->selm.s.paGdtHC - 1;
843 /* If any GDTEs was invalidated, zero them. */
844 if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit)
845 memset(pu8DstInvalid + cbEffLimit + 1, 0, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit);
846 }
847
848 /* keep track of the effective limit. */
849 if (cbEffLimit != pVM->selm.s.cbEffGuestGdtLimit)
850 {
851 Log(("SELMR3UpdateFromCPUM: cbEffGuestGdtLimit=%#x -> %#x (actual %#x)\n",
852 pVM->selm.s.cbEffGuestGdtLimit, cbEffLimit, GDTR.cbGdt));
853 pVM->selm.s.cbEffGuestGdtLimit = cbEffLimit;
854 }
855 }
856
857 /*
858 * Check if the Guest GDT intrudes on our GDT entries.
859 */
860 /** @todo we should try to minimize relocations by making sure our current selectors can be reused. */
861 RTSEL aHyperSel[SELM_HYPER_SEL_MAX];
862 if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
863 {
864 PVBOXDESC pGDTEStart = pVM->selm.s.paGdtHC;
865 PVBOXDESC pGDTE = (PVBOXDESC)((char *)pGDTEStart + GDTR.cbGdt + 1 - sizeof(VBOXDESC));
866 int iGDT = 0;
867
868 Log(("Internal SELM GDT conflict: use non-present entries\n"));
869 STAM_COUNTER_INC(&pVM->selm.s.StatScanForHyperSels);
870 while (pGDTE > pGDTEStart)
871 {
872 /* We can reuse non-present entries */
873 if (!pGDTE->Gen.u1Present)
874 {
875 aHyperSel[iGDT] = ((uintptr_t)pGDTE - (uintptr_t)pVM->selm.s.paGdtHC) / sizeof(VBOXDESC);
876 aHyperSel[iGDT] = aHyperSel[iGDT] << X86_SEL_SHIFT;
877 Log(("SELM: Found unused GDT %04X\n", aHyperSel[iGDT]));
878 iGDT++;
879 if (iGDT >= SELM_HYPER_SEL_MAX)
880 break;
881 }
882
883 pGDTE--;
884 }
885 if (iGDT != SELM_HYPER_SEL_MAX)
886 {
887 AssertReleaseMsgFailed(("Internal SELM GDT conflict.\n"));
888 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
889 return VERR_NOT_IMPLEMENTED;
890 }
891 }
892 else
893 {
894 aHyperSel[SELM_HYPER_SEL_CS] = SELM_HYPER_DEFAULT_SEL_CS;
895 aHyperSel[SELM_HYPER_SEL_DS] = SELM_HYPER_DEFAULT_SEL_DS;
896 aHyperSel[SELM_HYPER_SEL_CS64] = SELM_HYPER_DEFAULT_SEL_CS64;
897 aHyperSel[SELM_HYPER_SEL_TSS] = SELM_HYPER_DEFAULT_SEL_TSS;
898 aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SELM_HYPER_DEFAULT_SEL_TSS_TRAP08;
899 }
900
901 /*
902 * Work thru the copied GDT entries adjusting them for correct virtualization.
903 */
904 PVBOXDESC pGDTEEnd = (PVBOXDESC)((char *)pGDTE + cbEffLimit + 1 - sizeof(VBOXDESC));
905 while (pGDTE < pGDTEEnd)
906 {
907 if (pGDTE->Gen.u1Present)
908 {
909 /*
910 * Code and data selectors are generally 1:1, with the
911 * 'little' adjustment we do for DPL 0 selectors.
912 */
913 if (pGDTE->Gen.u1DescType)
914 {
915 /*
916 * Hack for A-bit against Trap E on read-only GDT.
917 */
918 /** @todo Fix this by loading ds and cs before turning off WP. */
919 pGDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
920
921 /*
922 * All DPL 0 code and data segments are squeezed into DPL 1.
923 *
924 * We're skipping conforming segments here because those
925 * cannot give us any trouble.
926 */
927 if ( pGDTE->Gen.u2Dpl == 0
928 && (pGDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
929 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
930 pGDTE->Gen.u2Dpl = 1;
931 }
932 else
933 {
934 /*
935 * System type selectors are marked not present.
936 * Recompiler or special handling is required for these.
937 */
938 /** @todo what about interrupt gates and rawr0? */
939 pGDTE->Gen.u1Present = 0;
940 }
941 }
942
943 /* Next GDT entry. */
944 pGDTE++;
945 }
946
947 /*
948 * Check if our hypervisor selectors were changed.
949 */
950 if ( aHyperSel[SELM_HYPER_SEL_CS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]
951 || aHyperSel[SELM_HYPER_SEL_DS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]
952 || aHyperSel[SELM_HYPER_SEL_CS64] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64]
953 || aHyperSel[SELM_HYPER_SEL_TSS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]
954 || aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08])
955 {
956 /* Reinitialize our hypervisor GDTs */
957 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] = aHyperSel[SELM_HYPER_SEL_CS];
958 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] = aHyperSel[SELM_HYPER_SEL_DS];
959 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] = aHyperSel[SELM_HYPER_SEL_CS64];
960 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = aHyperSel[SELM_HYPER_SEL_TSS];
961 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = aHyperSel[SELM_HYPER_SEL_TSS_TRAP08];
962
963 STAM_COUNTER_INC(&pVM->selm.s.StatHyperSelsChanged);
964
965 /*
966 * Do the relocation callbacks to let everyone update their hyper selector dependencies.
967 * (SELMR3Relocate will call selmR3SetupHyperGDTSelectors() for us.)
968 */
969 VMR3Relocate(pVM, 0);
970 }
971 else if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
972 /* We overwrote all entries above, so we have to save them again. */
973 selmR3SetupHyperGDTSelectors(pVM);
974
975 /*
976 * Adjust the cached GDT limit.
977 * Any GDT entries which have been removed must be cleared.
978 */
979 if (pVM->selm.s.GuestGdtr.cbGdt != GDTR.cbGdt)
980 {
981 if (pVM->selm.s.GuestGdtr.cbGdt > GDTR.cbGdt)
982 memset(pGDTE, 0, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt);
983#ifndef SELM_TRACK_GUEST_GDT_CHANGES
984 pVM->selm.s.GuestGdtr.cbGdt = GDTR.cbGdt;
985#endif
986 }
987
988#ifdef SELM_TRACK_GUEST_GDT_CHANGES
989 /*
990 * Check if Guest's GDTR is changed.
991 */
992 if ( GDTR.pGdt != pVM->selm.s.GuestGdtr.pGdt
993 || GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
994 {
995 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%08X cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt));
996
997 /*
998 * [Re]Register write virtual handler for guest's GDT.
999 */
1000 if (pVM->selm.s.GuestGdtr.pGdt != ~0U && pVM->selm.s.fGDTRangeRegistered)
1001 {
1002 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);
1003 AssertRC(rc);
1004 }
1005
1006 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,
1007 0, selmGuestGDTWriteHandler, "selmgcGuestGDTWriteHandler", 0, "Guest GDT write access handler");
1008 if (VBOX_FAILURE(rc))
1009 return rc;
1010
1011 /* Update saved Guest GDTR. */
1012 pVM->selm.s.GuestGdtr = GDTR;
1013 pVM->selm.s.fGDTRangeRegistered = true;
1014 }
1015#endif
1016 }
1017
1018 /*
1019 * TSS sync
1020 */
1021 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS))
1022 {
1023 SELMR3SyncTSS(pVM);
1024 }
1025
1026 /*
1027 * LDT sync
1028 */
1029 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_LDT))
1030 {
1031 /*
1032 * Always assume the best
1033 */
1034 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_LDT);
1035
1036 /*
1037 * LDT handling is done similarly to the GDT handling with a shadow
1038 * array. However, since the LDT is expected to be swappable (at least
1039 * some ancient OSes makes it swappable) it must be floating and
1040 * synced on a per-page basis.
1041 *
1042 * Eventually we will change this to be fully on demand. Meaning that
1043 * we will only sync pages containing LDT selectors actually used and
1044 * let the #PF handler lazily sync pages as they are used.
1045 * (This applies to GDT too, when we start making OS/2 fast.)
1046 */
1047
1048 /*
1049 * First, determin the current LDT selector.
1050 */
1051 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1052 if ((SelLdt & X86_SEL_MASK) == 0)
1053 {
1054 /* ldtr = 0 - update hyper LDTR and deregister any active handler. */
1055 CPUMSetHyperLDTR(pVM, 0);
1056#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1057 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1058 {
1059 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1060 AssertRC(rc);
1061 pVM->selm.s.GCPtrGuestLdt = ~0U;
1062 }
1063#endif
1064 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1065 return VINF_SUCCESS;
1066 }
1067
1068 /*
1069 * Get the LDT selector.
1070 */
1071 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelLdt >> X86_SEL_SHIFT];
1072 RTGCPTR GCPtrLdt = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1073 unsigned cbLdt = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1074 if (pDesc->Gen.u1Granularity)
1075 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1076
1077 /*
1078 * Validate it.
1079 */
1080 if ( !cbLdt
1081 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt
1082 || pDesc->Gen.u1DescType
1083 || pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1084 {
1085 AssertMsg(!cbLdt, ("Invalid LDT %04x!\n", SelLdt));
1086
1087 /* cbLdt > 0:
1088 * This is quite impossible, so we do as most people do when faced with
1089 * the impossible, we simply ignore it.
1090 */
1091 CPUMSetHyperLDTR(pVM, 0);
1092#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1093 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1094 {
1095 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1096 AssertRC(rc);
1097 pVM->selm.s.GCPtrGuestLdt = ~0U;
1098 }
1099#endif
1100 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1101 return VINF_SUCCESS;
1102 }
1103 /** @todo check what intel does about odd limits. */
1104 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(VBOXDESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1105
1106 /*
1107 * Use the cached guest ldt address if the descriptor has already been modified (see below)
1108 * (this is necessary due to redundant LDT updates; see todo above at GDT sync)
1109 */
1110 if (MMHyperIsInsideArea(pVM, GCPtrLdt) == true)
1111 GCPtrLdt = pVM->selm.s.GCPtrGuestLdt; /* use the old one */
1112
1113
1114#ifdef SELM_TRACK_GUEST_LDT_CHANGES
1115 /** @todo Handle only present LDT segments. */
1116 // if (pDesc->Gen.u1Present)
1117 {
1118 /*
1119 * Check if Guest's LDT address/limit is changed.
1120 */
1121 if ( GCPtrLdt != pVM->selm.s.GCPtrGuestLdt
1122 || cbLdt != pVM->selm.s.cbLdtLimit)
1123 {
1124 Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %VGv:%04x to %VGv:%04x. (GDTR=%VGv:%04x)\n",
1125 pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1126
1127 /*
1128 * [Re]Register write virtual handler for guest's GDT.
1129 * In the event of LDT overlapping something, don't install it just assume it's being updated.
1130 */
1131 if (pVM->selm.s.GCPtrGuestLdt != ~0U)
1132 {
1133 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt);
1134 AssertRC(rc);
1135 }
1136#ifdef DEBUG
1137 if (pDesc->Gen.u1Present)
1138 Log(("LDT selector marked not present!!\n"));
1139#endif
1140 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */,
1141 0, selmGuestLDTWriteHandler, "selmgcGuestLDTWriteHandler", 0, "Guest LDT write access handler");
1142 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
1143 {
1144 /** @todo investigate the various cases where conflicts happen and try avoid them by enh. the instruction emulation. */
1145 pVM->selm.s.GCPtrGuestLdt = ~0;
1146 Log(("WARNING: Guest LDT (%VGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=%VGv:%04x)\n",
1147 GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
1148 }
1149 else if (VBOX_SUCCESS(rc))
1150 pVM->selm.s.GCPtrGuestLdt = GCPtrLdt;
1151 else
1152 {
1153 CPUMSetHyperLDTR(pVM, 0);
1154 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1155 return rc;
1156 }
1157
1158 pVM->selm.s.cbLdtLimit = cbLdt;
1159 }
1160 }
1161#else
1162 pVM->selm.s.cbLdtLimit = cbLdt;
1163#endif
1164
1165 /*
1166 * Calc Shadow LDT base.
1167 */
1168 unsigned off;
1169 pVM->selm.s.offLdtHyper = off = (GCPtrLdt & PAGE_OFFSET_MASK);
1170 RTGCPTR GCPtrShadowLDT = (RTGCPTR)((RTGCUINTPTR)pVM->selm.s.GCPtrLdt + off);
1171 PVBOXDESC pShadowLDT = (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1172
1173 /*
1174 * Enable the LDT selector in the shadow GDT.
1175 */
1176 pDesc->Gen.u1Present = 1;
1177 pDesc->Gen.u16BaseLow = RT_LOWORD(GCPtrShadowLDT);
1178 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(GCPtrShadowLDT);
1179 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(GCPtrShadowLDT);
1180 pDesc->Gen.u1Available = 0;
1181 pDesc->Gen.u1Reserved = 0;
1182 if (cbLdt > 0xffff)
1183 {
1184 cbLdt = 0xffff;
1185 pDesc->Gen.u4LimitHigh = 0;
1186 pDesc->Gen.u16LimitLow = pDesc->Gen.u1Granularity ? 0xf : 0xffff;
1187 }
1188
1189 /*
1190 * Set Hyper LDTR and notify TRPM.
1191 */
1192 CPUMSetHyperLDTR(pVM, SelLdt);
1193
1194 /*
1195 * Loop synchronising the LDT page by page.
1196 */
1197 /** @todo investigate how intel handle various operations on half present cross page entries. */
1198 off = GCPtrLdt & (sizeof(VBOXDESC) - 1);
1199 AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt));
1200
1201 /* Note: Do not skip the first selector; unlike the GDT, a zero LDT selector is perfectly valid. */
1202 unsigned cbLeft = cbLdt + 1;
1203 PVBOXDESC pLDTE = pShadowLDT;
1204 while (cbLeft)
1205 {
1206 /*
1207 * Read a chunk.
1208 */
1209 unsigned cbChunk = PAGE_SIZE - ((RTGCUINTPTR)GCPtrLdt & PAGE_OFFSET_MASK);
1210 if (cbChunk > cbLeft)
1211 cbChunk = cbLeft;
1212 rc = PGMPhysReadGCPtr(pVM, pShadowLDT, GCPtrLdt, cbChunk);
1213 if (VBOX_SUCCESS(rc))
1214 {
1215 /*
1216 * Mark page
1217 */
1218 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D);
1219 AssertRC(rc);
1220
1221 /*
1222 * Loop thru the available LDT entries.
1223 * Figure out where to start and end and the potential cross pageness of
1224 * things adds a little complexity. pLDTE is updated there and not in the
1225 * 'next' part of the loop. The pLDTEEnd is inclusive.
1226 */
1227 PVBOXDESC pLDTEEnd = (PVBOXDESC)((uintptr_t)pShadowLDT + cbChunk) - 1;
1228 if (pLDTE + 1 < pShadowLDT)
1229 pLDTE = (PVBOXDESC)((uintptr_t)pShadowLDT + off);
1230 while (pLDTE <= pLDTEEnd)
1231 {
1232 if (pLDTE->Gen.u1Present)
1233 {
1234 /*
1235 * Code and data selectors are generally 1:1, with the
1236 * 'little' adjustment we do for DPL 0 selectors.
1237 */
1238 if (pLDTE->Gen.u1DescType)
1239 {
1240 /*
1241 * Hack for A-bit against Trap E on read-only GDT.
1242 */
1243 /** @todo Fix this by loading ds and cs before turning off WP. */
1244 if (!(pLDTE->Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1245 pLDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1246
1247 /*
1248 * All DPL 0 code and data segments are squeezed into DPL 1.
1249 *
1250 * We're skipping conforming segments here because those
1251 * cannot give us any trouble.
1252 */
1253 if ( pLDTE->Gen.u2Dpl == 0
1254 && (pLDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
1255 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
1256 pLDTE->Gen.u2Dpl = 1;
1257 }
1258 else
1259 {
1260 /*
1261 * System type selectors are marked not present.
1262 * Recompiler or special handling is required for these.
1263 */
1264 /** @todo what about interrupt gates and rawr0? */
1265 pLDTE->Gen.u1Present = 0;
1266 }
1267 }
1268
1269 /* Next LDT entry. */
1270 pLDTE++;
1271 }
1272 }
1273 else
1274 {
1275 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%d\n", rc));
1276 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0);
1277 AssertRC(rc);
1278 }
1279
1280 /*
1281 * Advance to the next page.
1282 */
1283 cbLeft -= cbChunk;
1284 GCPtrShadowLDT += cbChunk;
1285 pShadowLDT = (PVBOXDESC)((char *)pShadowLDT + cbChunk);
1286 GCPtrLdt += cbChunk;
1287 }
1288 }
1289
1290 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1291 return VINF_SUCCESS;
1292}
1293
1294
1295/**
1296 * \#PF Handler callback for virtual access handler ranges.
1297 *
1298 * Important to realize that a physical page in a range can have aliases, and
1299 * for ALL and WRITE handlers these will also trigger.
1300 *
1301 * @returns VINF_SUCCESS if the handler have carried out the operation.
1302 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1303 * @param pVM VM Handle.
1304 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1305 * @param pvPtr The HC mapping of that address.
1306 * @param pvBuf What the guest is reading/writing.
1307 * @param cbBuf How much it's reading/writing.
1308 * @param enmAccessType The access type.
1309 * @param pvUser User argument.
1310 */
1311static DECLCALLBACK(int) selmGuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1312{
1313 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1314 Log(("selmGuestGDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1315 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
1316
1317 return VINF_PGM_HANDLER_DO_DEFAULT;
1318}
1319
1320/**
1321 * \#PF Handler callback for virtual access handler ranges.
1322 *
1323 * Important to realize that a physical page in a range can have aliases, and
1324 * for ALL and WRITE handlers these will also trigger.
1325 *
1326 * @returns VINF_SUCCESS if the handler have carried out the operation.
1327 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1328 * @param pVM VM Handle.
1329 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1330 * @param pvPtr The HC mapping of that address.
1331 * @param pvBuf What the guest is reading/writing.
1332 * @param cbBuf How much it's reading/writing.
1333 * @param enmAccessType The access type.
1334 * @param pvUser User argument.
1335 */
1336static DECLCALLBACK(int) selmGuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1337{
1338 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1339 Log(("selmGuestLDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1340 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
1341 return VINF_PGM_HANDLER_DO_DEFAULT;
1342}
1343
1344/**
1345 * \#PF Handler callback for virtual access handler ranges.
1346 *
1347 * Important to realize that a physical page in a range can have aliases, and
1348 * for ALL and WRITE handlers these will also trigger.
1349 *
1350 * @returns VINF_SUCCESS if the handler have carried out the operation.
1351 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1352 * @param pVM VM Handle.
1353 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
1354 * @param pvPtr The HC mapping of that address.
1355 * @param pvBuf What the guest is reading/writing.
1356 * @param cbBuf How much it's reading/writing.
1357 * @param enmAccessType The access type.
1358 * @param pvUser User argument.
1359 */
1360static DECLCALLBACK(int) selmGuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1361{
1362 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1363 Log(("selmGuestTSSWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));
1364 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1365 return VINF_PGM_HANDLER_DO_DEFAULT;
1366}
1367
1368/**
1369 * Check if the TSS ring 0 stack selector and pointer were updated (for now)
1370 *
1371 * @returns VBox status code.
1372 * @param pVM The VM to operate on.
1373 */
1374SELMR3DECL(int) SELMR3SyncTSS(PVM pVM)
1375{
1376 int rc;
1377
1378 if (pVM->selm.s.fDisableMonitoring)
1379 {
1380 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
1381 return VINF_SUCCESS;
1382 }
1383
1384/** @todo r=bird: SELMR3SyncTSS should be VMMAll code.
1385 * All the base, size, flags and stuff must be kept up to date in the CPUM tr register.
1386 */
1387 STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a);
1388
1389 Assert(!VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_GDT));
1390 Assert(VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS));
1391
1392 /*
1393 * TSS sync
1394 */
1395 RTSEL SelTss = CPUMGetGuestTR(pVM);
1396 if (SelTss & X86_SEL_MASK)
1397 {
1398 /** @todo r=bird: strictly speaking, this is wrong as we shouldn't bother with changes to
1399 * the TSS selector once its loaded. There are a bunch of this kind of problems (see Sander's
1400 * comment in the unzip defect)
1401 * The first part here should only be done when we're loading TR. The latter part which is
1402 * updating of the ss0:esp0 pair can be done by the access handler now since we can trap all
1403 * accesses, also REM ones. */
1404
1405 /*
1406 * Guest TR is not NULL.
1407 */
1408 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelTss >> X86_SEL_SHIFT];
1409 RTGCPTR GCPtrTss = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1410 unsigned cbTss = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1411 if (pDesc->Gen.u1Granularity)
1412 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1413 cbTss++;
1414 pVM->selm.s.cbGuestTss = cbTss;
1415 pVM->selm.s.fGuestTss32Bit = pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
1416 || pDesc->Gen.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY;
1417
1418 /* Don't bother with anything but the core structure. (Actually all we care for is the r0 ss.) */
1419 if (cbTss > sizeof(VBOXTSS))
1420 cbTss = sizeof(VBOXTSS);
1421 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbTss - 1) >> PAGE_SHIFT),
1422 ("GCPtrTss=%VGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss));
1423
1424 // All system GDTs are marked not present above. That explains why this check fails.
1425 //if (pDesc->Gen.u1Present)
1426 /** @todo Handle only present TSS segments. */
1427 {
1428 /*
1429 * Check if Guest's TSS is changed.
1430 */
1431 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss
1432 || cbTss != pVM->selm.s.cbMonitoredGuestTss)
1433 {
1434 Log(("SELMR3UpdateFromCPUM: Guest's TSS is changed to pTss=%08X cbTss=%08X cbGuestTss\n", GCPtrTss, cbTss, pVM->selm.s.cbGuestTss));
1435
1436 /*
1437 * Validate it.
1438 */
1439 if ( SelTss & X86_SEL_LDT
1440 || !cbTss
1441 || SelTss >= pVM->selm.s.GuestGdtr.cbGdt
1442 || pDesc->Gen.u1DescType
1443 || ( pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
1444 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
1445 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL
1446 && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) )
1447 {
1448 AssertMsgFailed(("Invalid Guest TSS %04x!\n", SelTss));
1449 }
1450 else
1451 {
1452 /*
1453 * [Re]Register write virtual handler for guest's TSS.
1454 */
1455 if (pVM->selm.s.GCPtrGuestTss != ~0U)
1456 {
1457 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss);
1458 AssertRC(rc);
1459 }
1460
1461 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbTss - 1,
1462 0, selmGuestTSSWriteHandler, "selmgcGuestTSSWriteHandler", 0, "Guest TSS write access handler");
1463 if (VBOX_FAILURE(rc))
1464 {
1465 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a);
1466 return rc;
1467 }
1468
1469 /* Update saved Guest TSS info. */
1470 pVM->selm.s.GCPtrGuestTss = GCPtrTss;
1471 pVM->selm.s.cbMonitoredGuestTss = cbTss;
1472 pVM->selm.s.GCSelTss = SelTss;
1473 }
1474 }
1475
1476 /* Update the ring 0 stack selector and base address */
1477 /* feeling very lazy; reading too much */
1478 VBOXTSS tss;
1479 rc = PGMPhysReadGCPtr(pVM, &tss, GCPtrTss, sizeof(VBOXTSS));
1480 if (VBOX_SUCCESS(rc))
1481 {
1482 #ifdef DEBUG
1483 uint32_t ssr0, espr0;
1484
1485 SELMGetRing1Stack(pVM, &ssr0, &espr0);
1486 ssr0 &= ~1;
1487
1488 if (ssr0 != tss.ss0 || espr0 != tss.esp0)
1489 Log(("SELMR3SyncTSS: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0));
1490 Log(("offIoBitmap=%#x\n", tss.offIoBitmap));
1491 #endif
1492 /* Update our TSS structure for the guest's ring 1 stack */
1493 SELMSetRing1Stack(pVM, tss.ss0 | 1, tss.esp0);
1494 }
1495 else
1496 {
1497 /* Note: the ring 0 stack selector and base address are updated on demand in this case. */
1498
1499 /* Note: handle these dependencies better! */
1500 TRPMR3SetGuestTrapHandler(pVM, 0x2E, TRPM_INVALID_HANDLER);
1501 TRPMR3SetGuestTrapHandler(pVM, 0x80, TRPM_INVALID_HANDLER);
1502 pVM->selm.s.fSyncTSSRing0Stack = true;
1503 }
1504 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
1505 }
1506 }
1507
1508 STAM_PROFILE_STOP(&pVM->selm.s.StatTSSSync, a);
1509 return VINF_SUCCESS;
1510}
1511
1512
1513/**
1514 * Compares the Guest GDT and LDT with the shadow tables.
1515 * This is a VBOX_STRICT only function.
1516 *
1517 * @returns VBox status code.
1518 * @param pVM The VM Handle.
1519 */
1520SELMR3DECL(int) SELMR3DebugCheck(PVM pVM)
1521{
1522#ifdef VBOX_STRICT
1523 /*
1524 * Get GDTR and check for conflict.
1525 */
1526 VBOXGDTR GDTR;
1527 CPUMGetGuestGDTR(pVM, &GDTR);
1528 if (GDTR.cbGdt == 0)
1529 return VINF_SUCCESS;
1530
1531 if (GDTR.cbGdt >= (unsigned)(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> X86_SEL_SHIFT))
1532 Log(("SELMR3DebugCheck: guest GDT size forced us to look for unused selectors.\n"));
1533
1534 if (GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt)
1535 Log(("SELMR3DebugCheck: limits have changed! new=%d old=%d\n", GDTR.cbGdt, pVM->selm.s.GuestGdtr.cbGdt));
1536
1537 /*
1538 * Loop thru the GDT checking each entry.
1539 */
1540 RTGCPTR GCPtrGDTEGuest = GDTR.pGdt;
1541 PVBOXDESC pGDTE = pVM->selm.s.paGdtHC;
1542 PVBOXDESC pGDTEEnd = (PVBOXDESC)((uintptr_t)pGDTE + GDTR.cbGdt);
1543 while (pGDTE < pGDTEEnd)
1544 {
1545 VBOXDESC GDTEGuest;
1546 int rc = PGMPhysReadGCPtr(pVM, &GDTEGuest, GCPtrGDTEGuest, sizeof(GDTEGuest));
1547 if (VBOX_SUCCESS(rc))
1548 {
1549 if (pGDTE->Gen.u1DescType || pGDTE->Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1550 {
1551 if ( pGDTE->Gen.u16LimitLow != GDTEGuest.Gen.u16LimitLow
1552 || pGDTE->Gen.u4LimitHigh != GDTEGuest.Gen.u4LimitHigh
1553 || pGDTE->Gen.u16BaseLow != GDTEGuest.Gen.u16BaseLow
1554 || pGDTE->Gen.u8BaseHigh1 != GDTEGuest.Gen.u8BaseHigh1
1555 || pGDTE->Gen.u8BaseHigh2 != GDTEGuest.Gen.u8BaseHigh2
1556 || pGDTE->Gen.u1DefBig != GDTEGuest.Gen.u1DefBig
1557 || pGDTE->Gen.u1DescType != GDTEGuest.Gen.u1DescType)
1558 {
1559 unsigned iGDT = pGDTE - pVM->selm.s.paGdtHC;
1560 SELMR3DumpDescriptor(*pGDTE, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, shadow");
1561 SELMR3DumpDescriptor(GDTEGuest, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, guest");
1562 }
1563 }
1564 }
1565
1566 /* Advance to the next descriptor. */
1567 GCPtrGDTEGuest += sizeof(VBOXDESC);
1568 pGDTE++;
1569 }
1570
1571
1572 /*
1573 * LDT?
1574 */
1575 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1576 if ((SelLdt & X86_SEL_MASK) == 0)
1577 return VINF_SUCCESS;
1578 if (SelLdt > GDTR.cbGdt)
1579 {
1580 Log(("SELMR3DebugCheck: ldt is out of bound SelLdt=%#x\n", SelLdt));
1581 return VERR_INTERNAL_ERROR;
1582 }
1583 VBOXDESC LDTDesc;
1584 int rc = PGMPhysReadGCPtr(pVM, &LDTDesc, GDTR.pGdt + (SelLdt & X86_SEL_MASK), sizeof(LDTDesc));
1585 if (VBOX_FAILURE(rc))
1586 {
1587 Log(("SELMR3DebugCheck: Failed to read LDT descriptor. rc=%d\n", rc));
1588 return rc;
1589 }
1590 RTGCPTR GCPtrLDTEGuest = LDTDesc.Gen.u16BaseLow | (LDTDesc.Gen.u8BaseHigh1 << 16) | (LDTDesc.Gen.u8BaseHigh2 << 24);
1591 unsigned cbLdt = LDTDesc.Gen.u16LimitLow | (LDTDesc.Gen.u4LimitHigh << 16);
1592 if (LDTDesc.Gen.u1Granularity)
1593 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1594
1595 /*
1596 * Validate it.
1597 */
1598 if (!cbLdt)
1599 return VINF_SUCCESS;
1600 /** @todo check what intel does about odd limits. */
1601 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(VBOXDESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
1602 if ( LDTDesc.Gen.u1DescType
1603 || LDTDesc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT
1604 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt)
1605 {
1606 Log(("SELmR3DebugCheck: Invalid LDT %04x!\n", SelLdt));
1607 return VERR_INTERNAL_ERROR;
1608 }
1609
1610 /*
1611 * Loop thru the LDT checking each entry.
1612 */
1613 unsigned off = (GCPtrLDTEGuest & PAGE_OFFSET_MASK);
1614 PVBOXDESC pLDTE = (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1615 PVBOXDESC pLDTEEnd = (PVBOXDESC)((uintptr_t)pGDTE + cbLdt);
1616 while (pLDTE < pLDTEEnd)
1617 {
1618 VBOXDESC LDTEGuest;
1619 int rc = PGMPhysReadGCPtr(pVM, &LDTEGuest, GCPtrLDTEGuest, sizeof(LDTEGuest));
1620 if (VBOX_SUCCESS(rc))
1621 {
1622 if ( pLDTE->Gen.u16LimitLow != LDTEGuest.Gen.u16LimitLow
1623 || pLDTE->Gen.u4LimitHigh != LDTEGuest.Gen.u4LimitHigh
1624 || pLDTE->Gen.u16BaseLow != LDTEGuest.Gen.u16BaseLow
1625 || pLDTE->Gen.u8BaseHigh1 != LDTEGuest.Gen.u8BaseHigh1
1626 || pLDTE->Gen.u8BaseHigh2 != LDTEGuest.Gen.u8BaseHigh2
1627 || pLDTE->Gen.u1DefBig != LDTEGuest.Gen.u1DefBig
1628 || pLDTE->Gen.u1DescType != LDTEGuest.Gen.u1DescType)
1629 {
1630 unsigned iLDT = pLDTE - (PVBOXDESC)((uintptr_t)pVM->selm.s.HCPtrLdt + off);
1631 SELMR3DumpDescriptor(*pLDTE, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, shadow");
1632 SELMR3DumpDescriptor(LDTEGuest, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, guest");
1633 }
1634 }
1635
1636 /* Advance to the next descriptor. */
1637 GCPtrLDTEGuest += sizeof(VBOXDESC);
1638 pLDTE++;
1639 }
1640
1641#else
1642 NOREF(pVM);
1643#endif
1644
1645 return VINF_SUCCESS;
1646}
1647
1648
1649/**
1650 * Validates the RawR0 TSS values against the one in the Guest TSS.
1651 *
1652 * @returns true if it matches.
1653 * @returns false and assertions on mismatch..
1654 * @param pVM VM Handle.
1655 */
1656SELMR3DECL(bool) SELMR3CheckTSS(PVM pVM)
1657{
1658#ifdef VBOX_STRICT
1659
1660 RTSEL SelTss = CPUMGetGuestTR(pVM);
1661 if (SelTss & X86_SEL_MASK)
1662 {
1663 AssertMsg((SelTss & X86_SEL_MASK) == (pVM->selm.s.GCSelTss & X86_SEL_MASK), ("New TSS selector = %04X, old TSS selector = %04X\n", SelTss, pVM->selm.s.GCSelTss));
1664
1665 /*
1666 * Guest TR is not NULL.
1667 */
1668 PVBOXDESC pDesc = &pVM->selm.s.paGdtHC[SelTss >> X86_SEL_SHIFT];
1669 RTGCPTR GCPtrTss = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
1670 unsigned cbTss = pDesc->Gen.u16LimitLow | (pDesc->Gen.u4LimitHigh << 16);
1671 if (pDesc->Gen.u1Granularity)
1672 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1673 cbTss++;
1674 /* Don't bother with anything but the core structure. (Actually all we care for is the r0 ss.) */
1675 if (cbTss > sizeof(VBOXTSS))
1676 cbTss = sizeof(VBOXTSS);
1677 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + cbTss - 1) >> PAGE_SHIFT),
1678 ("GCPtrTss=%VGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss));
1679
1680 // All system GDTs are marked not present above. That explains why this check fails.
1681 //if (pDesc->Gen.u1Present)
1682 /** @todo Handle only present TSS segments. */
1683 {
1684 /*
1685 * Check if Guest's TSS was changed.
1686 */
1687 if ( GCPtrTss != pVM->selm.s.GCPtrGuestTss
1688 || cbTss != pVM->selm.s.cbMonitoredGuestTss)
1689 {
1690 AssertMsgFailed(("Guest's TSS (Sel 0x%X) is changed from %RGv:%04x to %RGv:%04x\n",
1691 SelTss, pVM->selm.s.GCPtrGuestTss, pVM->selm.s.cbMonitoredGuestTss,
1692 GCPtrTss, cbTss));
1693 }
1694 }
1695 }
1696
1697 if (!pVM->selm.s.fSyncTSSRing0Stack)
1698 {
1699 RTGCPTR pGuestTSS = pVM->selm.s.GCPtrGuestTss;
1700 uint32_t ESPR0;
1701 int rc = PGMPhysReadGCPtr(pVM, &ESPR0, pGuestTSS + RT_OFFSETOF(VBOXTSS, esp0), sizeof(ESPR0));
1702 if (VBOX_SUCCESS(rc))
1703 {
1704 RTSEL SelSS0;
1705 rc = PGMPhysReadGCPtr(pVM, &SelSS0, pGuestTSS + RT_OFFSETOF(VBOXTSS, ss0), sizeof(SelSS0));
1706 if (VBOX_SUCCESS(rc))
1707 {
1708 if ( ESPR0 == pVM->selm.s.Tss.esp1
1709 && SelSS0 == (pVM->selm.s.Tss.ss1 & ~1))
1710 return true;
1711
1712 RTGCPHYS GCPhys;
1713 uint64_t fFlags;
1714
1715 rc = PGMGstGetPage(pVM, pGuestTSS, &fFlags, &GCPhys);
1716 AssertRC(rc);
1717 AssertMsgFailed(("TSS out of sync!! (%04X:%08X vs %04X:%08X (guest)) Tss=%VGv Phys=%VGp\n",
1718 (pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1, SelSS0, ESPR0, pGuestTSS, GCPhys));
1719 }
1720 else
1721 AssertRC(rc);
1722 }
1723 else
1724 /* Happens during early Windows XP boot when it is switching page tables. */
1725 Assert(rc == VINF_SUCCESS || ((rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT) && !(CPUMGetGuestEFlags(pVM) & X86_EFL_IF)));
1726 }
1727 return false;
1728#else
1729 NOREF(pVM);
1730 return true;
1731#endif
1732}
1733
1734
1735/**
1736 * Returns flat address and limit of LDT by LDT selector from guest GDTR.
1737 *
1738 * Fully validate selector.
1739 *
1740 * @returns VBox status.
1741 * @param pVM VM Handle.
1742 * @param SelLdt LDT selector.
1743 * @param ppvLdt Where to store the flat address of LDT.
1744 * @param pcbLimit Where to store LDT limit.
1745 */
1746SELMDECL(int) SELMGetLDTFromSel(PVM pVM, RTSEL SelLdt, PRTGCPTR ppvLdt, unsigned *pcbLimit)
1747{
1748 /* Get guest GDTR. */
1749 VBOXGDTR GDTR;
1750 CPUMGetGuestGDTR(pVM, &GDTR);
1751
1752 /* Check selector TI and GDT limit. */
1753 if ( SelLdt & X86_SEL_LDT
1754 || (SelLdt > GDTR.cbGdt))
1755 return VERR_INVALID_SELECTOR;
1756
1757 /* Read descriptor from GC. */
1758 VBOXDESC Desc;
1759 int rc = PGMPhysReadGCPtr(pVM, (void *)&Desc, (RTGCPTR)(GDTR.pGdt + (SelLdt & X86_SEL_MASK)), sizeof(Desc));
1760 if (VBOX_FAILURE(rc))
1761 {
1762 /* fatal */
1763 AssertMsgFailed(("Can't read LDT descriptor for selector=%04X\n", SelLdt));
1764 return VERR_SELECTOR_NOT_PRESENT;
1765 }
1766
1767 /* Check if LDT descriptor is not present. */
1768 if (Desc.Gen.u1Present == 0)
1769 return VERR_SELECTOR_NOT_PRESENT;
1770
1771 /* Check LDT descriptor type. */
1772 if ( Desc.Gen.u1DescType == 1
1773 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1774 return VERR_INVALID_SELECTOR;
1775
1776 /* LDT descriptor is ok. */
1777 if (ppvLdt)
1778 {
1779 *ppvLdt = (RTGCPTR)( (Desc.Gen.u8BaseHigh2 << 24)
1780 | (Desc.Gen.u8BaseHigh1 << 16)
1781 | Desc.Gen.u16BaseLow);
1782 *pcbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1783 }
1784 return VINF_SUCCESS;
1785}
1786
1787
1788/**
1789 * Gets information about a selector.
1790 * Intended for the debugger mostly and will prefer the guest
1791 * descriptor tables over the shadow ones.
1792 *
1793 * @returns VINF_SUCCESS on success.
1794 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table.
1795 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present.
1796 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page
1797 * backing the selector table wasn't present.
1798 * @returns Other VBox status code on other errors.
1799 *
1800 * @param pVM VM handle.
1801 * @param Sel The selector to get info about.
1802 * @param pSelInfo Where to store the information.
1803 */
1804SELMR3DECL(int) SELMR3GetSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo)
1805{
1806 Assert(pSelInfo);
1807
1808 /*
1809 * Read the descriptor entry
1810 */
1811 VBOXDESC Desc;
1812 if ( !(Sel & X86_SEL_LDT)
1813 && ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)
1814 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)
1815 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)
1816 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)
1817 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK))
1818 )
1819 {
1820 /*
1821 * Hypervisor descriptor.
1822 */
1823 pSelInfo->fHyper = true;
1824 Desc = pVM->selm.s.paGdtHC[Sel >> X86_SEL_SHIFT];
1825 }
1826 else if (CPUMIsGuestInProtectedMode(pVM))
1827 {
1828 /*
1829 * Read it from the guest descriptor table.
1830 */
1831 pSelInfo->fHyper = false;
1832
1833 VBOXGDTR Gdtr;
1834 RTGCPTR GCPtrDesc;
1835 CPUMGetGuestGDTR(pVM, &Gdtr);
1836 if (!(Sel & X86_SEL_LDT))
1837 {
1838 /* GDT */
1839 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > (unsigned)Gdtr.cbGdt)
1840 return VERR_INVALID_SELECTOR;
1841 GCPtrDesc = Gdtr.pGdt + (Sel & X86_SEL_MASK);
1842 }
1843 else
1844 {
1845 /*
1846 * LDT - must locate the LDT first...
1847 */
1848 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
1849 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(VBOXDESC) /* the first selector is invalid, right? */
1850 || (unsigned)(SelLdt & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > (unsigned)Gdtr.cbGdt)
1851 return VERR_INVALID_SELECTOR;
1852 GCPtrDesc = Gdtr.pGdt + (SelLdt & X86_SEL_MASK);
1853 int rc = PGMPhysReadGCPtr(pVM, &Desc, GCPtrDesc, sizeof(Desc));
1854 if (VBOX_FAILURE(rc))
1855 return rc;
1856
1857 /* validate the LDT descriptor. */
1858 if (Desc.Gen.u1Present == 0)
1859 return VERR_SELECTOR_NOT_PRESENT;
1860 if ( Desc.Gen.u1DescType == 1
1861 || Desc.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1862 return VERR_INVALID_SELECTOR;
1863
1864 unsigned cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1865 if (Desc.Gen.u1Granularity)
1866 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1867 if ((unsigned)(Sel & X86_SEL_MASK) + sizeof(VBOXDESC) - 1 > cbLimit)
1868 return VERR_INVALID_SELECTOR;
1869
1870 /* calc the descriptor location. */
1871 GCPtrDesc = (Desc.Gen.u8BaseHigh2 << 24)
1872 | (Desc.Gen.u8BaseHigh1 << 16)
1873 | Desc.Gen.u16BaseLow;
1874 GCPtrDesc += (Sel & X86_SEL_MASK);
1875 }
1876
1877 /* read the descriptor. */
1878 int rc = PGMPhysReadGCPtr(pVM, &Desc, GCPtrDesc, sizeof(Desc));
1879 if (VBOX_FAILURE(rc))
1880 return rc;
1881 }
1882 else
1883 {
1884 /*
1885 * We're in real mode.
1886 */
1887 pSelInfo->Sel = Sel;
1888 pSelInfo->GCPtrBase = Sel << 4;
1889 pSelInfo->cbLimit = 0xffff;
1890 pSelInfo->fHyper = false;
1891 pSelInfo->fRealMode = true;
1892 memset(&pSelInfo->Raw, 0, sizeof(pSelInfo->Raw));
1893 return VINF_SUCCESS;
1894 }
1895
1896 /*
1897 * Extract the base and limit
1898 */
1899 pSelInfo->Sel = Sel;
1900 pSelInfo->Raw = Desc;
1901 pSelInfo->cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1902 if (Desc.Gen.u1Granularity)
1903 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1904 pSelInfo->GCPtrBase = (Desc.Gen.u8BaseHigh2 << 24)
1905 | (Desc.Gen.u8BaseHigh1 << 16)
1906 | Desc.Gen.u16BaseLow;
1907 pSelInfo->fRealMode = false;
1908
1909 return VINF_SUCCESS;
1910}
1911
1912
1913/**
1914 * Gets information about a selector from the shadow tables.
1915 *
1916 * This is intended to be faster than the SELMR3GetSelectorInfo() method, but requires
1917 * that the caller ensures that the shadow tables are up to date.
1918 *
1919 * @returns VINF_SUCCESS on success.
1920 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table.
1921 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present.
1922 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page
1923 * backing the selector table wasn't present.
1924 * @returns Other VBox status code on other errors.
1925 *
1926 * @param pVM VM handle.
1927 * @param Sel The selector to get info about.
1928 * @param pSelInfo Where to store the information.
1929 */
1930SELMR3DECL(int) SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo)
1931{
1932 Assert(pSelInfo);
1933
1934 /*
1935 * Read the descriptor entry
1936 */
1937 VBOXDESC Desc;
1938 if (!(Sel & X86_SEL_LDT))
1939 {
1940 /*
1941 * Global descriptor.
1942 */
1943 Desc = pVM->selm.s.paGdtHC[Sel >> X86_SEL_SHIFT];
1944 pSelInfo->fHyper = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK)
1945 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK)
1946 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == (Sel & X86_SEL_MASK)
1947 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == (Sel & X86_SEL_MASK)
1948 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == (Sel & X86_SEL_MASK);
1949 /** @todo check that the GDT offset is valid. */
1950 }
1951 else
1952 {
1953 /*
1954 * Local Descriptor.
1955 */
1956 PVBOXDESC paLDT = (PVBOXDESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper);
1957 Desc = paLDT[Sel >> X86_SEL_SHIFT];
1958 /** @todo check if the LDT page is actually available. */
1959 /** @todo check that the LDT offset is valid. */
1960 pSelInfo->fHyper = false;
1961 }
1962
1963 /*
1964 * Extract the base and limit
1965 */
1966 pSelInfo->Sel = Sel;
1967 pSelInfo->Raw = Desc;
1968 pSelInfo->cbLimit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
1969 if (Desc.Gen.u1Granularity)
1970 pSelInfo->cbLimit = (pSelInfo->cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
1971 pSelInfo->GCPtrBase = (Desc.Gen.u8BaseHigh2 << 24)
1972 | (Desc.Gen.u8BaseHigh1 << 16)
1973 | Desc.Gen.u16BaseLow;
1974 pSelInfo->fRealMode = false;
1975
1976 return VINF_SUCCESS;
1977}
1978
1979
1980/**
1981 * Formats a descriptor.
1982 *
1983 * @param Desc Descriptor to format.
1984 * @param Sel Selector number.
1985 * @param pszOutput Output buffer.
1986 * @param cchOutput Size of output buffer.
1987 */
1988static void selmR3FormatDescriptor(VBOXDESC Desc, RTSEL Sel, char *pszOutput, size_t cchOutput)
1989{
1990 /*
1991 * Make variable description string.
1992 */
1993 static struct
1994 {
1995 unsigned cch;
1996 const char *psz;
1997 } const aTypes[32] =
1998 {
1999 #define STRENTRY(str) { sizeof(str) - 1, str }
2000 /* system */
2001 STRENTRY("Reserved0 "), /* 0x00 */
2002 STRENTRY("TSS16Avail "), /* 0x01 */
2003 STRENTRY("LDT "), /* 0x02 */
2004 STRENTRY("TSS16Busy "), /* 0x03 */
2005 STRENTRY("Call16 "), /* 0x04 */
2006 STRENTRY("Task "), /* 0x05 */
2007 STRENTRY("Int16 "), /* 0x06 */
2008 STRENTRY("Trap16 "), /* 0x07 */
2009 STRENTRY("Reserved8 "), /* 0x08 */
2010 STRENTRY("TSS32Avail "), /* 0x09 */
2011 STRENTRY("ReservedA "), /* 0x0a */
2012 STRENTRY("TSS32Busy "), /* 0x0b */
2013 STRENTRY("Call32 "), /* 0x0c */
2014 STRENTRY("ReservedD "), /* 0x0d */
2015 STRENTRY("Int32 "), /* 0x0e */
2016 STRENTRY("Trap32 "), /* 0x0f */
2017 /* non system */
2018 STRENTRY("DataRO "), /* 0x10 */
2019 STRENTRY("DataRO Accessed "), /* 0x11 */
2020 STRENTRY("DataRW "), /* 0x12 */
2021 STRENTRY("DataRW Accessed "), /* 0x13 */
2022 STRENTRY("DataDownRO "), /* 0x14 */
2023 STRENTRY("DataDownRO Accessed "), /* 0x15 */
2024 STRENTRY("DataDownRW "), /* 0x16 */
2025 STRENTRY("DataDownRW Accessed "), /* 0x17 */
2026 STRENTRY("CodeEO "), /* 0x18 */
2027 STRENTRY("CodeEO Accessed "), /* 0x19 */
2028 STRENTRY("CodeER "), /* 0x1a */
2029 STRENTRY("CodeER Accessed "), /* 0x1b */
2030 STRENTRY("CodeConfEO "), /* 0x1c */
2031 STRENTRY("CodeConfEO Accessed "), /* 0x1d */
2032 STRENTRY("CodeConfER "), /* 0x1e */
2033 STRENTRY("CodeConfER Accessed ") /* 0x1f */
2034 #undef SYSENTRY
2035 };
2036 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0)
2037 char szMsg[128];
2038 char *psz = &szMsg[0];
2039 unsigned i = Desc.Gen.u1DescType << 4 | Desc.Gen.u4Type;
2040 memcpy(psz, aTypes[i].psz, aTypes[i].cch);
2041 psz += aTypes[i].cch;
2042
2043 if (Desc.Gen.u1Present)
2044 ADD_STR(psz, "Present ");
2045 else
2046 ADD_STR(psz, "Not-Present ");
2047 if (Desc.Gen.u1Granularity)
2048 ADD_STR(psz, "Page ");
2049 if (Desc.Gen.u1DefBig)
2050 ADD_STR(psz, "32-bit ");
2051 else
2052 ADD_STR(psz, "16-bit ");
2053 #undef ADD_STR
2054 *psz = '\0';
2055
2056 /*
2057 * Limit and Base and format the output.
2058 */
2059 uint32_t u32Limit = Desc.Gen.u4LimitHigh << 16 | Desc.Gen.u16LimitLow;
2060 if (Desc.Gen.u1Granularity)
2061 u32Limit = u32Limit << PAGE_SHIFT | PAGE_OFFSET_MASK;
2062 uint32_t u32Base = Desc.Gen.u8BaseHigh2 << 24 | Desc.Gen.u8BaseHigh1 << 16 | Desc.Gen.u16BaseLow;
2063
2064 RTStrPrintf(pszOutput, cchOutput, "%04x - %08x %08x - base=%08x limit=%08x dpl=%d %s",
2065 Sel, Desc.au32[0], Desc.au32[1], u32Base, u32Limit, Desc.Gen.u2Dpl, szMsg);
2066}
2067
2068
2069/**
2070 * Dumps a descriptor.
2071 *
2072 * @param Desc Descriptor to dump.
2073 * @param Sel Selector number.
2074 * @param pszMsg Message to prepend the log entry with.
2075 */
2076SELMR3DECL(void) SELMR3DumpDescriptor(VBOXDESC Desc, RTSEL Sel, const char *pszMsg)
2077{
2078 char szOutput[128];
2079 selmR3FormatDescriptor(Desc, Sel, &szOutput[0], sizeof(szOutput));
2080 Log(("%s: %s\n", pszMsg, szOutput));
2081 NOREF(szOutput[0]);
2082}
2083
2084
2085/**
2086 * Display the shadow gdt.
2087 *
2088 * @param pVM VM Handle.
2089 * @param pHlp The info helpers.
2090 * @param pszArgs Arguments, ignored.
2091 */
2092static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2093{
2094 pHlp->pfnPrintf(pHlp, "Shadow GDT (GCAddr=%VGv):\n", MMHyperHC2GC(pVM, pVM->selm.s.paGdtHC));
2095 for (unsigned iGDT = 0; iGDT < SELM_GDT_ELEMENTS; iGDT++)
2096 {
2097 if (pVM->selm.s.paGdtHC[iGDT].Gen.u1Present)
2098 {
2099 char szOutput[128];
2100 selmR3FormatDescriptor(pVM->selm.s.paGdtHC[iGDT], iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2101 const char *psz = "";
2102 if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] >> X86_SEL_SHIFT))
2103 psz = " HyperCS";
2104 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] >> X86_SEL_SHIFT))
2105 psz = " HyperDS";
2106 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] >> X86_SEL_SHIFT))
2107 psz = " HyperCS64";
2108 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> X86_SEL_SHIFT))
2109 psz = " HyperTSS";
2110 else if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> X86_SEL_SHIFT))
2111 psz = " HyperTSSTrap08";
2112 pHlp->pfnPrintf(pHlp, "%s%s\n", szOutput, psz);
2113 }
2114 }
2115}
2116
2117
2118/**
2119 * Display the guest gdt.
2120 *
2121 * @param pVM VM Handle.
2122 * @param pHlp The info helpers.
2123 * @param pszArgs Arguments, ignored.
2124 */
2125static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2126{
2127 VBOXGDTR GDTR;
2128 CPUMGetGuestGDTR(pVM, &GDTR);
2129 RTGCPTR pGDTGC = (RTGCPTR)GDTR.pGdt;
2130 unsigned cGDTs = ((unsigned)GDTR.cbGdt + 1) / sizeof(VBOXDESC);
2131
2132 pHlp->pfnPrintf(pHlp, "Guest GDT (GCAddr=%VGv limit=%x):\n", pGDTGC, GDTR.cbGdt);
2133 for (unsigned iGDT = 0; iGDT < cGDTs; iGDT++, pGDTGC += sizeof(VBOXDESC))
2134 {
2135 VBOXDESC GDTE;
2136 int rc = PGMPhysReadGCPtr(pVM, &GDTE, pGDTGC, sizeof(GDTE));
2137 if (VBOX_SUCCESS(rc))
2138 {
2139 if (GDTE.Gen.u1Present)
2140 {
2141 char szOutput[128];
2142 selmR3FormatDescriptor(GDTE, iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
2143 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2144 }
2145 }
2146 else if (rc == VERR_PAGE_NOT_PRESENT)
2147 {
2148 if ((pGDTGC & PAGE_OFFSET_MASK) + sizeof(VBOXDESC) - 1 < sizeof(VBOXDESC))
2149 pHlp->pfnPrintf(pHlp, "%04 - page not present (GCAddr=%VGv)\n", iGDT << X86_SEL_SHIFT, pGDTGC);
2150 }
2151 else
2152 pHlp->pfnPrintf(pHlp, "%04 - read error rc=%Vrc GCAddr=%VGv\n", iGDT << X86_SEL_SHIFT, rc, pGDTGC);
2153 }
2154}
2155
2156
2157/**
2158 * Display the shadow ldt.
2159 *
2160 * @param pVM VM Handle.
2161 * @param pHlp The info helpers.
2162 * @param pszArgs Arguments, ignored.
2163 */
2164static DECLCALLBACK(void) selmR3InfoLdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2165{
2166 unsigned cLDTs = ((unsigned)pVM->selm.s.cbLdtLimit + 1) >> X86_SEL_SHIFT;
2167 PVBOXDESC paLDT = (PVBOXDESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper);
2168 pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=%VGv limit=%d):\n", pVM->selm.s.GCPtrLdt + pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit);
2169 for (unsigned iLDT = 0; iLDT < cLDTs; iLDT++)
2170 {
2171 if (paLDT[iLDT].Gen.u1Present)
2172 {
2173 char szOutput[128];
2174 selmR3FormatDescriptor(paLDT[iLDT], (iLDT << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2175 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2176 }
2177 }
2178}
2179
2180
2181/**
2182 * Display the guest ldt.
2183 *
2184 * @param pVM VM Handle.
2185 * @param pHlp The info helpers.
2186 * @param pszArgs Arguments, ignored.
2187 */
2188static DECLCALLBACK(void) selmR3InfoLdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2189{
2190 RTSEL SelLdt = CPUMGetGuestLDTR(pVM);
2191 if (!(SelLdt & X86_SEL_MASK))
2192 {
2193 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): Null-Selector\n", SelLdt);
2194 return;
2195 }
2196
2197 RTGCPTR pLdtGC;
2198 unsigned cbLdt;
2199 int rc = SELMGetLDTFromSel(pVM, SelLdt, &pLdtGC, &cbLdt);
2200 if (VBOX_FAILURE(rc))
2201 {
2202 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): rc=%Vrc\n", SelLdt, rc);
2203 return;
2204 }
2205
2206 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%VGv limit=%x):\n", SelLdt, pLdtGC, cbLdt);
2207 unsigned cLdts = (cbLdt + 1) >> X86_SEL_SHIFT;
2208 for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, pLdtGC += sizeof(VBOXDESC))
2209 {
2210 VBOXDESC LdtE;
2211 int rc = PGMPhysReadGCPtr(pVM, &LdtE, pLdtGC, sizeof(LdtE));
2212 if (VBOX_SUCCESS(rc))
2213 {
2214 if (LdtE.Gen.u1Present)
2215 {
2216 char szOutput[128];
2217 selmR3FormatDescriptor(LdtE, (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
2218 pHlp->pfnPrintf(pHlp, "%s\n", szOutput);
2219 }
2220 }
2221 else if (rc == VERR_PAGE_NOT_PRESENT)
2222 {
2223 if ((pLdtGC & PAGE_OFFSET_MASK) + sizeof(VBOXDESC) - 1 < sizeof(VBOXDESC))
2224 pHlp->pfnPrintf(pHlp, "%04 - page not present (GCAddr=%VGv)\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, pLdtGC);
2225 }
2226 else
2227 pHlp->pfnPrintf(pHlp, "%04 - read error rc=%Vrc GCAddr=%VGv\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, rc, pLdtGC);
2228 }
2229}
2230
2231
2232/**
2233 * Dumps the hypervisor GDT
2234 *
2235 * @param pVM VM handle.
2236 */
2237SELMR3DECL(void) SELMR3DumpHyperGDT(PVM pVM)
2238{
2239 DBGFR3Info(pVM, "gdt", NULL, NULL);
2240}
2241
2242/**
2243 * Dumps the hypervisor LDT
2244 *
2245 * @param pVM VM handle.
2246 */
2247SELMR3DECL(void) SELMR3DumpHyperLDT(PVM pVM)
2248{
2249 DBGFR3Info(pVM, "ldt", NULL, NULL);
2250}
2251
2252/**
2253 * Dumps the guest GDT
2254 *
2255 * @param pVM VM handle.
2256 */
2257SELMR3DECL(void) SELMR3DumpGuestGDT(PVM pVM)
2258{
2259 DBGFR3Info(pVM, "gdtguest", NULL, NULL);
2260}
2261
2262/**
2263 * Dumps the guest LDT
2264 *
2265 * @param pVM VM handle.
2266 */
2267SELMR3DECL(void) SELMR3DumpGuestLDT(PVM pVM)
2268{
2269 DBGFR3Info(pVM, "ldtguest", NULL, NULL);
2270}
2271
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette