VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher.cpp@ 17522

Last change on this file since 17522 was 16859, checked in by vboxsync, 16 years ago

Load hypervisor CR3 from CPUM (instead of hardcoded fixups in the switchers). Dangerous change. Watch for regressions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 36.5 KB
Line 
1/* $Id: VMMSwitcher.cpp 16859 2009-02-17 16:19:51Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor, World Switcher(s).
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VMM
26#include <VBox/vmm.h>
27#include <VBox/pgm.h>
28#include <VBox/selm.h>
29#include <VBox/mm.h>
30#include <VBox/sup.h>
31#include "VMMInternal.h"
32#include "VMMSwitcher/VMMSwitcher.h"
33#include <VBox/vm.h>
34#include <VBox/dis.h>
35
36#include <VBox/err.h>
37#include <VBox/param.h>
38#include <iprt/assert.h>
39#include <iprt/alloc.h>
40#include <iprt/asm.h>
41#include <iprt/string.h>
42#include <iprt/ctype.h>
43
44
45/*******************************************************************************
46* Global Variables *
47*******************************************************************************/
48/** Array of switcher defininitions.
49 * The type and index shall match!
50 */
51static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
52{
53 NULL, /* invalid entry */
54#ifndef RT_ARCH_AMD64
55 &vmmR3Switcher32BitTo32Bit_Def,
56 &vmmR3Switcher32BitToPAE_Def,
57 &vmmR3Switcher32BitToAMD64_Def,
58 &vmmR3SwitcherPAETo32Bit_Def,
59 &vmmR3SwitcherPAEToPAE_Def,
60 &vmmR3SwitcherPAEToAMD64_Def,
61 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
62# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
63 &vmmR3SwitcherAMD64ToPAE_Def,
64# else
65 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
66# endif
67 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
68#else /* RT_ARCH_AMD64 */
69 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
70 NULL, //&vmmR3Switcher32BitToPAE_Def,
71 NULL, //&vmmR3Switcher32BitToAMD64_Def,
72 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
73 NULL, //&vmmR3SwitcherPAEToPAE_Def,
74 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
75 &vmmR3SwitcherAMD64To32Bit_Def,
76 &vmmR3SwitcherAMD64ToPAE_Def,
77 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
78#endif /* RT_ARCH_AMD64 */
79};
80
81
82/**
83 * VMMR3Init worker that initiates the switcher code (aka core code).
84 *
85 * This is core per VM code which might need fixups and/or for ease of use are
86 * put on linear contiguous backing.
87 *
88 * @returns VBox status code.
89 * @param pVM Pointer to the shared VM structure.
90 */
91int vmmR3SwitcherInit(PVM pVM)
92{
93 /*
94 * Calc the size.
95 */
96 unsigned cbCoreCode = 0;
97 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
98 {
99 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
100 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
101 if (pSwitcher)
102 {
103 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
104 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
105 }
106 }
107
108 /*
109 * Allocate continguous pages for switchers and deal with
110 * conflicts in the intermediate mapping of the code.
111 */
112 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
113 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
114 int rc = VERR_NO_MEMORY;
115 if (pVM->vmm.s.pvCoreCodeR3)
116 {
117 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
118 if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
119 {
120 /* try more allocations - Solaris, Linux. */
121 const unsigned cTries = 8234;
122 struct VMMInitBadTry
123 {
124 RTR0PTR pvR0;
125 void *pvR3;
126 RTHCPHYS HCPhys;
127 RTUINT cb;
128 } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
129 AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
130 unsigned i = 0;
131 do
132 {
133 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
134 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
135 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
136 i++;
137 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
138 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
139 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
140 if (!pVM->vmm.s.pvCoreCodeR3)
141 break;
142 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
143 } while ( rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
144 && i < cTries - 1);
145
146 /* cleanup */
147 if (RT_FAILURE(rc))
148 {
149 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
150 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
151 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
152 paBadTries[i].cb = pVM->vmm.s.cbCoreCode;
153 i++;
154 LogRel(("Failed to allocated and map core code: rc=%Rrc\n", rc));
155 }
156 while (i-- > 0)
157 {
158 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%RHp\n",
159 i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
160 SUPContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
161 }
162 RTMemTmpFree(paBadTries);
163 }
164 }
165 if (RT_SUCCESS(rc))
166 {
167 /*
168 * copy the code.
169 */
170 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
171 {
172 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
173 if (pSwitcher)
174 memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
175 pSwitcher->pvCode, pSwitcher->cbCode);
176 }
177
178 /*
179 * Map the code into the GC address space.
180 */
181 RTGCPTR GCPtr;
182 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode,
183 cbCoreCode, "Core Code", &GCPtr);
184 if (RT_SUCCESS(rc))
185 {
186 pVM->vmm.s.pvCoreCodeRC = GCPtr;
187 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
188 LogRel(("CoreCode: R3=%RHv R0=%RHv RC=%RRv Phys=%RHp cb=%#x\n",
189 pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
190
191 /*
192 * Finally, PGM probably has selected a switcher already but we need
193 * to get the routine addresses, so we'll reselect it.
194 * This may legally fail so, we're ignoring the rc.
195 */
196 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
197 return rc;
198 }
199
200 /* shit */
201 AssertMsgFailed(("PGMR3Map(,%RRv, %RHp, %#x, 0) failed with rc=%Rrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
202 SUPContFree(pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
203 }
204 else
205 VMSetError(pVM, rc, RT_SRC_POS,
206 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
207 cbCoreCode);
208
209 pVM->vmm.s.pvCoreCodeR3 = NULL;
210 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
211 pVM->vmm.s.pvCoreCodeRC = 0;
212 return rc;
213}
214
215/**
216 * Relocate the switchers, called by VMMR#Relocate.
217 *
218 * @param pVM Pointer to the shared VM structure.
219 * @param offDelta The relocation delta.
220 */
221void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
222{
223 /*
224 * Relocate all the switchers.
225 */
226 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
227 {
228 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
229 if (pSwitcher && pSwitcher->pfnRelocate)
230 {
231 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
232 pSwitcher->pfnRelocate(pVM,
233 pSwitcher,
234 pVM->vmm.s.pvCoreCodeR0 + off,
235 (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
236 pVM->vmm.s.pvCoreCodeRC + off,
237 pVM->vmm.s.HCPhysCoreCode + off);
238 }
239 }
240
241 /*
242 * Recalc the RC address for the current switcher.
243 */
244 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
245 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
246 pVM->vmm.s.pfnGuestToHostRC = RCPtr + pSwitcher->offGCGuestToHost;
247 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offGCCallTrampoline;
248 pVM->pfnVMMGCGuestToHostAsm = RCPtr + pSwitcher->offGCGuestToHostAsm;
249 pVM->pfnVMMGCGuestToHostAsmHyperCtx = RCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
250 pVM->pfnVMMGCGuestToHostAsmGuestCtx = RCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
251
252// AssertFailed();
253}
254
255
256/**
257 * Generic switcher code relocator.
258 *
259 * @param pVM The VM handle.
260 * @param pSwitcher The switcher definition.
261 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
262 * @param R0PtrCode Pointer to the core code block for the switcher, ring-0 mapping.
263 * @param GCPtrCode The guest context address corresponding to pu8Code.
264 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
265 * @param SelCS The hypervisor CS selector.
266 * @param SelDS The hypervisor DS selector.
267 * @param SelTSS The hypervisor TSS selector.
268 * @param GCPtrGDT The GC address of the hypervisor GDT.
269 * @param SelCS64 The 64-bit mode hypervisor CS selector.
270 */
271static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
272 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
273{
274 union
275 {
276 const uint8_t *pu8;
277 const uint16_t *pu16;
278 const uint32_t *pu32;
279 const uint64_t *pu64;
280 const void *pv;
281 uintptr_t u;
282 } u;
283 u.pv = pSwitcher->pvFixups;
284
285 /*
286 * Process fixups.
287 */
288 uint8_t u8;
289 while ((u8 = *u.pu8++) != FIX_THE_END)
290 {
291 /*
292 * Get the source (where to write the fixup).
293 */
294 uint32_t offSrc = *u.pu32++;
295 Assert(offSrc < pSwitcher->cbCode);
296 union
297 {
298 uint8_t *pu8;
299 uint16_t *pu16;
300 uint32_t *pu32;
301 uint64_t *pu64;
302 uintptr_t u;
303 } uSrc;
304 uSrc.pu8 = pu8CodeR3 + offSrc;
305
306 /* The fixup target and method depends on the type. */
307 switch (u8)
308 {
309 /*
310 * 32-bit relative, source in HC and target in GC.
311 */
312 case FIX_HC_2_GC_NEAR_REL:
313 {
314 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
315 uint32_t offTrg = *u.pu32++;
316 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
317 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
318 break;
319 }
320
321 /*
322 * 32-bit relative, source in HC and target in ID.
323 */
324 case FIX_HC_2_ID_NEAR_REL:
325 {
326 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
327 uint32_t offTrg = *u.pu32++;
328 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
329 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (R0PtrCode + offSrc + 4));
330 break;
331 }
332
333 /*
334 * 32-bit relative, source in GC and target in HC.
335 */
336 case FIX_GC_2_HC_NEAR_REL:
337 {
338 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
339 uint32_t offTrg = *u.pu32++;
340 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
341 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (GCPtrCode + offSrc + 4));
342 break;
343 }
344
345 /*
346 * 32-bit relative, source in GC and target in ID.
347 */
348 case FIX_GC_2_ID_NEAR_REL:
349 {
350 AssertMsg(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode, ("%x - %x < %x\n", offSrc, pSwitcher->offGCCode, pSwitcher->cbGCCode));
351 uint32_t offTrg = *u.pu32++;
352 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
353 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
354 break;
355 }
356
357 /*
358 * 32-bit relative, source in ID and target in HC.
359 */
360 case FIX_ID_2_HC_NEAR_REL:
361 {
362 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
363 uint32_t offTrg = *u.pu32++;
364 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
365 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (u32IDCode + offSrc + 4));
366 break;
367 }
368
369 /*
370 * 32-bit relative, source in ID and target in HC.
371 */
372 case FIX_ID_2_GC_NEAR_REL:
373 {
374 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
375 uint32_t offTrg = *u.pu32++;
376 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
377 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
378 break;
379 }
380
381 /*
382 * 16:32 far jump, target in GC.
383 */
384 case FIX_GC_FAR32:
385 {
386 uint32_t offTrg = *u.pu32++;
387 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
388 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
389 *uSrc.pu16++ = SelCS;
390 break;
391 }
392
393 /*
394 * Make 32-bit GC pointer given CPUM offset.
395 */
396 case FIX_GC_CPUM_OFF:
397 {
398 uint32_t offCPUM = *u.pu32++;
399 Assert(offCPUM < sizeof(pVM->cpum));
400 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->cpum) + offCPUM);
401 break;
402 }
403
404 /*
405 * Make 32-bit GC pointer given VM offset.
406 */
407 case FIX_GC_VM_OFF:
408 {
409 uint32_t offVM = *u.pu32++;
410 Assert(offVM < sizeof(VM));
411 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, pVM) + offVM);
412 break;
413 }
414
415 /*
416 * Make 32-bit HC pointer given CPUM offset.
417 */
418 case FIX_HC_CPUM_OFF:
419 {
420 uint32_t offCPUM = *u.pu32++;
421 Assert(offCPUM < sizeof(pVM->cpum));
422 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
423 break;
424 }
425
426 /*
427 * Make 32-bit R0 pointer given VM offset.
428 */
429 case FIX_HC_VM_OFF:
430 {
431 uint32_t offVM = *u.pu32++;
432 Assert(offVM < sizeof(VM));
433 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
434 break;
435 }
436
437 /*
438 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
439 */
440 case FIX_INTER_32BIT_CR3:
441 {
442
443 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
444 break;
445 }
446
447 /*
448 * Store the PAE CR3 (32-bit) for the intermediate memory context.
449 */
450 case FIX_INTER_PAE_CR3:
451 {
452
453 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
454 break;
455 }
456
457 /*
458 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
459 */
460 case FIX_INTER_AMD64_CR3:
461 {
462
463 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
464 break;
465 }
466
467 /*
468 * Store Hypervisor CS (16-bit).
469 */
470 case FIX_HYPER_CS:
471 {
472 *uSrc.pu16 = SelCS;
473 break;
474 }
475
476 /*
477 * Store Hypervisor DS (16-bit).
478 */
479 case FIX_HYPER_DS:
480 {
481 *uSrc.pu16 = SelDS;
482 break;
483 }
484
485 /*
486 * Store Hypervisor TSS (16-bit).
487 */
488 case FIX_HYPER_TSS:
489 {
490 *uSrc.pu16 = SelTSS;
491 break;
492 }
493
494 /*
495 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
496 */
497 case FIX_GC_TSS_GDTE_DW2:
498 {
499 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
500 *uSrc.pu32 = (uint32_t)GCPtr;
501 break;
502 }
503
504
505 ///@todo case FIX_CR4_MASK:
506 ///@todo case FIX_CR4_OSFSXR:
507
508 /*
509 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
510 */
511 case FIX_NO_FXSAVE_JMP:
512 {
513 uint32_t offTrg = *u.pu32++;
514 Assert(offTrg < pSwitcher->cbCode);
515 if (!CPUMSupportsFXSR(pVM))
516 {
517 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
518 *uSrc.pu32++ = offTrg - (offSrc + 5);
519 }
520 else
521 {
522 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
523 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
524 }
525 break;
526 }
527
528 /*
529 * Insert relative jump to specified target it SYSENTER isn't used by the host.
530 */
531 case FIX_NO_SYSENTER_JMP:
532 {
533 uint32_t offTrg = *u.pu32++;
534 Assert(offTrg < pSwitcher->cbCode);
535 if (!CPUMIsHostUsingSysEnter(pVM))
536 {
537 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
538 *uSrc.pu32++ = offTrg - (offSrc + 5);
539 }
540 else
541 {
542 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
543 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
544 }
545 break;
546 }
547
548 /*
549 * Insert relative jump to specified target it SYSENTER isn't used by the host.
550 */
551 case FIX_NO_SYSCALL_JMP:
552 {
553 uint32_t offTrg = *u.pu32++;
554 Assert(offTrg < pSwitcher->cbCode);
555 if (!CPUMIsHostUsingSysEnter(pVM))
556 {
557 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
558 *uSrc.pu32++ = offTrg - (offSrc + 5);
559 }
560 else
561 {
562 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
563 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
564 }
565 break;
566 }
567
568 /*
569 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
570 */
571 case FIX_HC_32BIT:
572 {
573 uint32_t offTrg = *u.pu32++;
574 Assert(offSrc < pSwitcher->cbCode);
575 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
576 *uSrc.pu32 = R0PtrCode + offTrg;
577 break;
578 }
579
580#if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
581 /*
582 * 64-bit HC Code Selector (no argument).
583 */
584 case FIX_HC_64BIT_CS:
585 {
586 Assert(offSrc < pSwitcher->cbCode);
587# if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
588 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
589# else
590 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
591# endif
592 break;
593 }
594
595 /*
596 * 64-bit HC pointer to the CPUM instance data (no argument).
597 */
598 case FIX_HC_64BIT_CPUM:
599 {
600 Assert(offSrc < pSwitcher->cbCode);
601 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
602 break;
603 }
604#endif
605 /*
606 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
607 */
608 case FIX_HC_64BIT:
609 {
610 uint32_t offTrg = *u.pu32++;
611 Assert(offSrc < pSwitcher->cbCode);
612 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
613 *uSrc.pu64 = R0PtrCode + offTrg;
614 break;
615 }
616
617#ifdef RT_ARCH_X86
618 case FIX_GC_64_BIT_CPUM_OFF:
619 {
620 uint32_t offCPUM = *u.pu32++;
621 Assert(offCPUM < sizeof(pVM->cpum));
622 *uSrc.pu64 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->cpum) + offCPUM);
623 break;
624 }
625#endif
626
627 /*
628 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
629 */
630 case FIX_ID_32BIT:
631 {
632 uint32_t offTrg = *u.pu32++;
633 Assert(offSrc < pSwitcher->cbCode);
634 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
635 *uSrc.pu32 = u32IDCode + offTrg;
636 break;
637 }
638
639 /*
640 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
641 */
642 case FIX_ID_64BIT:
643 case FIX_HC_64BIT_NOCHECK:
644 {
645 uint32_t offTrg = *u.pu32++;
646 Assert(offSrc < pSwitcher->cbCode);
647 Assert(u8 == FIX_HC_64BIT_NOCHECK || offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
648 *uSrc.pu64 = u32IDCode + offTrg;
649 break;
650 }
651
652 /*
653 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
654 */
655 case FIX_ID_FAR32_TO_64BIT_MODE:
656 {
657 uint32_t offTrg = *u.pu32++;
658 Assert(offSrc < pSwitcher->cbCode);
659 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
660 *uSrc.pu32++ = u32IDCode + offTrg;
661 *uSrc.pu16 = SelCS64;
662 AssertRelease(SelCS64);
663 break;
664 }
665
666#ifdef VBOX_WITH_NMI
667 /*
668 * 32-bit address to the APIC base.
669 */
670 case FIX_GC_APIC_BASE_32BIT:
671 {
672 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
673 break;
674 }
675#endif
676
677 default:
678 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
679 break;
680 }
681 }
682
683#ifdef LOG_ENABLED
684 /*
685 * If Log2 is enabled disassemble the switcher code.
686 *
687 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
688 */
689 if (LogIs2Enabled())
690 {
691 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
692 " R0PtrCode = %p\n"
693 " pu8CodeR3 = %p\n"
694 " GCPtrCode = %RGv\n"
695 " u32IDCode = %08x\n"
696 " pVMRC = %RRv\n"
697 " pCPUMRC = %RRv\n"
698 " pVMR3 = %p\n"
699 " pCPUMR3 = %p\n"
700 " GCPtrGDT = %RGv\n"
701 " InterCR3s = %08RHp, %08RHp, %08RHp (32-Bit, PAE, AMD64)\n"
702 " HyperCR3s = %08RHp, %08RHp, %08RHp (32-Bit, PAE, AMD64)\n"
703 " SelCS = %04x\n"
704 " SelDS = %04x\n"
705 " SelCS64 = %04x\n"
706 " SelTSS = %04x\n",
707 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
708 R0PtrCode,
709 pu8CodeR3,
710 GCPtrCode,
711 u32IDCode,
712 VM_RC_ADDR(pVM, pVM),
713 VM_RC_ADDR(pVM, &pVM->cpum),
714 pVM,
715 &pVM->cpum,
716 GCPtrGDT,
717 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
718#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
719 /* @todo No need for three GetHyper calls; one and the same base is used */
720#endif
721 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
722 SelCS, SelDS, SelCS64, SelTSS);
723
724 uint32_t offCode = 0;
725 while (offCode < pSwitcher->cbCode)
726 {
727 /*
728 * Figure out where this is.
729 */
730 const char *pszDesc = NULL;
731 RTUINTPTR uBase;
732 uint32_t cbCode;
733 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
734 {
735 pszDesc = "HCCode0";
736 uBase = R0PtrCode;
737 offCode = pSwitcher->offHCCode0;
738 cbCode = pSwitcher->cbHCCode0;
739 }
740 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
741 {
742 pszDesc = "HCCode1";
743 uBase = R0PtrCode;
744 offCode = pSwitcher->offHCCode1;
745 cbCode = pSwitcher->cbHCCode1;
746 }
747 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
748 {
749 pszDesc = "GCCode";
750 uBase = GCPtrCode;
751 offCode = pSwitcher->offGCCode;
752 cbCode = pSwitcher->cbGCCode;
753 }
754 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
755 {
756 pszDesc = "IDCode0";
757 uBase = u32IDCode;
758 offCode = pSwitcher->offIDCode0;
759 cbCode = pSwitcher->cbIDCode0;
760 }
761 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
762 {
763 pszDesc = "IDCode1";
764 uBase = u32IDCode;
765 offCode = pSwitcher->offIDCode1;
766 cbCode = pSwitcher->cbIDCode1;
767 }
768 else
769 {
770 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
771 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
772 offCode++;
773 continue;
774 }
775
776 /*
777 * Disassemble it.
778 */
779 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
780 DISCPUSTATE Cpu;
781
782 memset(&Cpu, 0, sizeof(Cpu));
783 Cpu.mode = CPUMODE_32BIT;
784 while (cbCode > 0)
785 {
786 /* try label it */
787 if (pSwitcher->offR0HostToGuest == offCode)
788 RTLogPrintf(" *R0HostToGuest:\n");
789 if (pSwitcher->offGCGuestToHost == offCode)
790 RTLogPrintf(" *GCGuestToHost:\n");
791 if (pSwitcher->offGCCallTrampoline == offCode)
792 RTLogPrintf(" *GCCallTrampoline:\n");
793 if (pSwitcher->offGCGuestToHostAsm == offCode)
794 RTLogPrintf(" *GCGuestToHostAsm:\n");
795 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
796 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
797 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
798 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
799
800 /* disas */
801 uint32_t cbInstr = 0;
802 char szDisas[256];
803 if (RT_SUCCESS(DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas)))
804 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
805 else
806 {
807 RTLogPrintf(" %04x: %02x '%c'\n",
808 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
809 cbInstr = 1;
810 }
811 offCode += cbInstr;
812 cbCode -= RT_MIN(cbInstr, cbCode);
813 }
814 }
815 }
816#endif
817}
818
819
820/**
821 * Relocator for the 32-Bit to 32-Bit world switcher.
822 */
823DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
824{
825 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
826 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
827}
828
829
830/**
831 * Relocator for the 32-Bit to PAE world switcher.
832 */
833DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
834{
835 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
836 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
837}
838
839
840/**
841 * Relocator for the 32-Bit to AMD64 world switcher.
842 */
843DECLCALLBACK(void) vmmR3Switcher32BitToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
844{
845 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
846 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
847}
848
849
850/**
851 * Relocator for the PAE to 32-Bit world switcher.
852 */
853DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
854{
855 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
856 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
857}
858
859
860/**
861 * Relocator for the PAE to PAE world switcher.
862 */
863DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
864{
865 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
866 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
867}
868
869/**
870 * Relocator for the PAE to AMD64 world switcher.
871 */
872DECLCALLBACK(void) vmmR3SwitcherPAEToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
873{
874 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
875 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
876}
877
878
879/**
880 * Relocator for the AMD64 to 32-bit world switcher.
881 */
882DECLCALLBACK(void) vmmR3SwitcherAMD64To32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
883{
884 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
885 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
886}
887
888
889/**
890 * Relocator for the AMD64 to PAE world switcher.
891 */
892DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
893{
894 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
895 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
896}
897
898
899/**
900 * Selects the switcher to be used for switching to GC.
901 *
902 * @returns VBox status code.
903 * @param pVM VM handle.
904 * @param enmSwitcher The new switcher.
905 * @remark This function may be called before the VMM is initialized.
906 */
907VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
908{
909 /*
910 * Validate input.
911 */
912 if ( enmSwitcher < VMMSWITCHER_INVALID
913 || enmSwitcher >= VMMSWITCHER_MAX)
914 {
915 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
916 return VERR_INVALID_PARAMETER;
917 }
918
919 /* Do nothing if the switcher is disabled. */
920 if (pVM->vmm.s.fSwitcherDisabled)
921 return VINF_SUCCESS;
922
923 /*
924 * Select the new switcher.
925 */
926 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
927 if (pSwitcher)
928 {
929 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
930 pVM->vmm.s.enmSwitcher = enmSwitcher;
931
932 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
933 pVM->vmm.s.pfnHostToGuestR0 = pbCodeR0 + pSwitcher->offR0HostToGuest;
934
935 RTGCPTR GCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher];
936 pVM->vmm.s.pfnGuestToHostRC = GCPtr + pSwitcher->offGCGuestToHost;
937 pVM->vmm.s.pfnCallTrampolineRC = GCPtr + pSwitcher->offGCCallTrampoline;
938 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
939 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
940 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
941 return VINF_SUCCESS;
942 }
943
944 return VERR_NOT_IMPLEMENTED;
945}
946
947
948/**
949 * Disable the switcher logic permanently.
950 *
951 * @returns VBox status code.
952 * @param pVM VM handle.
953 */
954VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
955{
956/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
957 * @code
958 * mov eax, VERR_INTERNAL_ERROR
959 * ret
960 * @endcode
961 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
962 */
963 pVM->vmm.s.fSwitcherDisabled = true;
964 return VINF_SUCCESS;
965}
966
967/**
968 * Gets the switcher to be used for switching to GC.
969 *
970 * @returns host to guest ring 0 switcher entrypoint
971 * @param pVM VM handle.
972 * @param enmSwitcher The new switcher.
973 */
974VMMR3DECL(RTR0PTR) VMMR3GetHostToGuestSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
975{
976 /*
977 * Validate input.
978 */
979 if ( enmSwitcher < VMMSWITCHER_INVALID
980 || enmSwitcher >= VMMSWITCHER_MAX)
981 {
982 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
983 return VERR_INVALID_PARAMETER;
984 }
985
986 /*
987 * Select the new switcher.
988 */
989 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
990 if (pSwitcher)
991 {
992 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
993 return pbCodeR0 + pSwitcher->offR0HostToGuest;
994 }
995 return (RTR0PTR)0;
996}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette