VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher.cpp@ 13798

Last change on this file since 13798 was 13798, checked in by vboxsync, 16 years ago

VMM: Split out the switcher code from VMM.cpp and into VMMSwitcher.cpp.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 34.7 KB
Line 
1/* $Id: VMMSwitcher.cpp 13798 2008-11-04 18:57:19Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor, World Switcher(s).
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VMM
26#include <VBox/vmm.h>
27#include <VBox/vmapi.h>
28#include <VBox/pgm.h>
29#include <VBox/cfgm.h>
30#include <VBox/pdmqueue.h>
31#include <VBox/pdmapi.h>
32#include <VBox/cpum.h>
33#include <VBox/mm.h>
34#include <VBox/iom.h>
35#include <VBox/trpm.h>
36#include <VBox/selm.h>
37#include <VBox/em.h>
38#include <VBox/sup.h>
39#include <VBox/dbgf.h>
40#include <VBox/csam.h>
41#include <VBox/patm.h>
42#include <VBox/rem.h>
43#include <VBox/ssm.h>
44#include <VBox/tm.h>
45#include "VMMInternal.h"
46#include "VMMSwitcher/VMMSwitcher.h"
47#include <VBox/vm.h>
48
49#include <VBox/err.h>
50#include <VBox/param.h>
51#include <VBox/version.h>
52#include <VBox/x86.h>
53#include <VBox/hwaccm.h>
54#include <iprt/assert.h>
55#include <iprt/alloc.h>
56#include <iprt/asm.h>
57#include <iprt/time.h>
58#include <iprt/stream.h>
59#include <iprt/string.h>
60#include <iprt/stdarg.h>
61#include <iprt/ctype.h>
62
63
64
65/** The saved state version. */
66#define VMM_SAVED_STATE_VERSION 3
67
68
69/*******************************************************************************
70* Global Variables *
71*******************************************************************************/
72/** Array of switcher defininitions.
73 * The type and index shall match!
74 */
75static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
76{
77 NULL, /* invalid entry */
78#ifndef RT_ARCH_AMD64
79 &vmmR3Switcher32BitTo32Bit_Def,
80 &vmmR3Switcher32BitToPAE_Def,
81 NULL, //&vmmR3Switcher32BitToAMD64_Def,
82 &vmmR3SwitcherPAETo32Bit_Def,
83 &vmmR3SwitcherPAEToPAE_Def,
84 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
85# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
86 &vmmR3SwitcherAMD64ToPAE_Def,
87# else
88 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
89# endif
90 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
91#else /* RT_ARCH_AMD64 */
92 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
93 NULL, //&vmmR3Switcher32BitToPAE_Def,
94 NULL, //&vmmR3Switcher32BitToAMD64_Def,
95 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
96 NULL, //&vmmR3SwitcherPAEToPAE_Def,
97 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
98 &vmmR3SwitcherAMD64ToPAE_Def,
99 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
100#endif /* RT_ARCH_AMD64 */
101};
102
103
104/**
105 * VMMR3Init worker that initiates the switcher code (aka core code).
106 *
107 * This is core per VM code which might need fixups and/or for ease of use are
108 * put on linear contiguous backing.
109 *
110 * @returns VBox status code.
111 * @param pVM Pointer to the shared VM structure.
112 */
113int vmmR3SwitcherInit(PVM pVM)
114{
115 /*
116 * Calc the size.
117 */
118 unsigned cbCoreCode = 0;
119 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
120 {
121 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
122 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
123 if (pSwitcher)
124 {
125 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
126 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
127 }
128 }
129
130 /*
131 * Allocate continguous pages for switchers and deal with
132 * conflicts in the intermediate mapping of the code.
133 */
134 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
135 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
136 int rc = VERR_NO_MEMORY;
137 if (pVM->vmm.s.pvCoreCodeR3)
138 {
139 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
140 if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
141 {
142 /* try more allocations - Solaris, Linux. */
143 const unsigned cTries = 8234;
144 struct VMMInitBadTry
145 {
146 RTR0PTR pvR0;
147 void *pvR3;
148 RTHCPHYS HCPhys;
149 RTUINT cb;
150 } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
151 AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
152 unsigned i = 0;
153 do
154 {
155 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
156 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
157 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
158 i++;
159 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
160 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
161 pVM->vmm.s.pvCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
162 if (!pVM->vmm.s.pvCoreCodeR3)
163 break;
164 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
165 } while ( rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
166 && i < cTries - 1);
167
168 /* cleanup */
169 if (VBOX_FAILURE(rc))
170 {
171 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
172 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
173 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
174 paBadTries[i].cb = pVM->vmm.s.cbCoreCode;
175 i++;
176 LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc));
177 }
178 while (i-- > 0)
179 {
180 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n",
181 i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
182 SUPContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
183 }
184 RTMemTmpFree(paBadTries);
185 }
186 }
187 if (VBOX_SUCCESS(rc))
188 {
189 /*
190 * copy the code.
191 */
192 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
193 {
194 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
195 if (pSwitcher)
196 memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
197 pSwitcher->pvCode, pSwitcher->cbCode);
198 }
199
200 /*
201 * Map the code into the GC address space.
202 */
203 RTGCPTR GCPtr;
204 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &GCPtr);
205 if (VBOX_SUCCESS(rc))
206 {
207 pVM->vmm.s.pvCoreCodeRC = GCPtr;
208 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
209 LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VRv Phys=%VHp cb=%#x\n",
210 pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
211
212 /*
213 * Finally, PGM probably have selected a switcher already but we need
214 * to get the routine addresses, so we'll reselect it.
215 * This may legally fail so, we're ignoring the rc.
216 */
217 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
218 return rc;
219 }
220
221 /* shit */
222 AssertMsgFailed(("PGMR3Map(,%VRv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
223 SUPContFree(pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
224 }
225 else
226 VMSetError(pVM, rc, RT_SRC_POS,
227 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
228 cbCoreCode);
229
230 pVM->vmm.s.pvCoreCodeR3 = NULL;
231 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
232 pVM->vmm.s.pvCoreCodeRC = 0;
233 return rc;
234}
235
236
237/**
238 * Relocate the switchers, called by VMMR#Relocate.
239 *
240 * @param pVM Pointer to the shared VM structure.
241 * @param offDelta The relocation delta.
242 */
243void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
244{
245 /*
246 * Relocate all the switchers.
247 */
248 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
249 {
250 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
251 if (pSwitcher && pSwitcher->pfnRelocate)
252 {
253 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
254 pSwitcher->pfnRelocate(pVM,
255 pSwitcher,
256 pVM->vmm.s.pvCoreCodeR0 + off,
257 (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
258 pVM->vmm.s.pvCoreCodeRC + off,
259 pVM->vmm.s.HCPhysCoreCode + off);
260 }
261 }
262
263 /*
264 * Recalc the RC address for the current switcher.
265 */
266 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
267 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
268 pVM->vmm.s.pfnGuestToHostRC = RCPtr + pSwitcher->offGCGuestToHost;
269 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offGCCallTrampoline;
270 pVM->pfnVMMGCGuestToHostAsm = RCPtr + pSwitcher->offGCGuestToHostAsm;
271 pVM->pfnVMMGCGuestToHostAsmHyperCtx = RCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
272 pVM->pfnVMMGCGuestToHostAsmGuestCtx = RCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
273
274}
275
276
277/**
278 * Generic switcher code relocator.
279 *
280 * @param pVM The VM handle.
281 * @param pSwitcher The switcher definition.
282 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
283 * @param R0PtrCode Pointer to the core code block for the switcher, ring-0 mapping.
284 * @param GCPtrCode The guest context address corresponding to pu8Code.
285 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
286 * @param SelCS The hypervisor CS selector.
287 * @param SelDS The hypervisor DS selector.
288 * @param SelTSS The hypervisor TSS selector.
289 * @param GCPtrGDT The GC address of the hypervisor GDT.
290 * @param SelCS64 The 64-bit mode hypervisor CS selector.
291 */
292static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
293 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
294{
295 union
296 {
297 const uint8_t *pu8;
298 const uint16_t *pu16;
299 const uint32_t *pu32;
300 const uint64_t *pu64;
301 const void *pv;
302 uintptr_t u;
303 } u;
304 u.pv = pSwitcher->pvFixups;
305
306 /*
307 * Process fixups.
308 */
309 uint8_t u8;
310 while ((u8 = *u.pu8++) != FIX_THE_END)
311 {
312 /*
313 * Get the source (where to write the fixup).
314 */
315 uint32_t offSrc = *u.pu32++;
316 Assert(offSrc < pSwitcher->cbCode);
317 union
318 {
319 uint8_t *pu8;
320 uint16_t *pu16;
321 uint32_t *pu32;
322 uint64_t *pu64;
323 uintptr_t u;
324 } uSrc;
325 uSrc.pu8 = pu8CodeR3 + offSrc;
326
327 /* The fixup target and method depends on the type. */
328 switch (u8)
329 {
330 /*
331 * 32-bit relative, source in HC and target in GC.
332 */
333 case FIX_HC_2_GC_NEAR_REL:
334 {
335 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
336 uint32_t offTrg = *u.pu32++;
337 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
338 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
339 break;
340 }
341
342 /*
343 * 32-bit relative, source in HC and target in ID.
344 */
345 case FIX_HC_2_ID_NEAR_REL:
346 {
347 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
348 uint32_t offTrg = *u.pu32++;
349 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
350 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (R0PtrCode + offSrc + 4));
351 break;
352 }
353
354 /*
355 * 32-bit relative, source in GC and target in HC.
356 */
357 case FIX_GC_2_HC_NEAR_REL:
358 {
359 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
360 uint32_t offTrg = *u.pu32++;
361 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
362 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (GCPtrCode + offSrc + 4));
363 break;
364 }
365
366 /*
367 * 32-bit relative, source in GC and target in ID.
368 */
369 case FIX_GC_2_ID_NEAR_REL:
370 {
371 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
372 uint32_t offTrg = *u.pu32++;
373 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
374 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
375 break;
376 }
377
378 /*
379 * 32-bit relative, source in ID and target in HC.
380 */
381 case FIX_ID_2_HC_NEAR_REL:
382 {
383 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
384 uint32_t offTrg = *u.pu32++;
385 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
386 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (u32IDCode + offSrc + 4));
387 break;
388 }
389
390 /*
391 * 32-bit relative, source in ID and target in HC.
392 */
393 case FIX_ID_2_GC_NEAR_REL:
394 {
395 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
396 uint32_t offTrg = *u.pu32++;
397 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
398 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
399 break;
400 }
401
402 /*
403 * 16:32 far jump, target in GC.
404 */
405 case FIX_GC_FAR32:
406 {
407 uint32_t offTrg = *u.pu32++;
408 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
409 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
410 *uSrc.pu16++ = SelCS;
411 break;
412 }
413
414 /*
415 * Make 32-bit GC pointer given CPUM offset.
416 */
417 case FIX_GC_CPUM_OFF:
418 {
419 uint32_t offCPUM = *u.pu32++;
420 Assert(offCPUM < sizeof(pVM->cpum));
421 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM);
422 break;
423 }
424
425 /*
426 * Make 32-bit GC pointer given VM offset.
427 */
428 case FIX_GC_VM_OFF:
429 {
430 uint32_t offVM = *u.pu32++;
431 Assert(offVM < sizeof(VM));
432 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM);
433 break;
434 }
435
436 /*
437 * Make 32-bit HC pointer given CPUM offset.
438 */
439 case FIX_HC_CPUM_OFF:
440 {
441 uint32_t offCPUM = *u.pu32++;
442 Assert(offCPUM < sizeof(pVM->cpum));
443 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
444 break;
445 }
446
447 /*
448 * Make 32-bit R0 pointer given VM offset.
449 */
450 case FIX_HC_VM_OFF:
451 {
452 uint32_t offVM = *u.pu32++;
453 Assert(offVM < sizeof(VM));
454 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
455 break;
456 }
457
458 /*
459 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
460 */
461 case FIX_INTER_32BIT_CR3:
462 {
463
464 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
465 break;
466 }
467
468 /*
469 * Store the PAE CR3 (32-bit) for the intermediate memory context.
470 */
471 case FIX_INTER_PAE_CR3:
472 {
473
474 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
475 break;
476 }
477
478 /*
479 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
480 */
481 case FIX_INTER_AMD64_CR3:
482 {
483
484 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
485 break;
486 }
487
488 /*
489 * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
490 */
491 case FIX_HYPER_32BIT_CR3:
492 {
493
494 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
495 break;
496 }
497
498 /*
499 * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
500 */
501 case FIX_HYPER_PAE_CR3:
502 {
503
504 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
505 break;
506 }
507
508 /*
509 * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
510 */
511 case FIX_HYPER_AMD64_CR3:
512 {
513
514 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
515 break;
516 }
517
518 /*
519 * Store Hypervisor CS (16-bit).
520 */
521 case FIX_HYPER_CS:
522 {
523 *uSrc.pu16 = SelCS;
524 break;
525 }
526
527 /*
528 * Store Hypervisor DS (16-bit).
529 */
530 case FIX_HYPER_DS:
531 {
532 *uSrc.pu16 = SelDS;
533 break;
534 }
535
536 /*
537 * Store Hypervisor TSS (16-bit).
538 */
539 case FIX_HYPER_TSS:
540 {
541 *uSrc.pu16 = SelTSS;
542 break;
543 }
544
545 /*
546 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
547 */
548 case FIX_GC_TSS_GDTE_DW2:
549 {
550 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
551 *uSrc.pu32 = (uint32_t)GCPtr;
552 break;
553 }
554
555
556 ///@todo case FIX_CR4_MASK:
557 ///@todo case FIX_CR4_OSFSXR:
558
559 /*
560 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
561 */
562 case FIX_NO_FXSAVE_JMP:
563 {
564 uint32_t offTrg = *u.pu32++;
565 Assert(offTrg < pSwitcher->cbCode);
566 if (!CPUMSupportsFXSR(pVM))
567 {
568 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
569 *uSrc.pu32++ = offTrg - (offSrc + 5);
570 }
571 else
572 {
573 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
574 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
575 }
576 break;
577 }
578
579 /*
580 * Insert relative jump to specified target it SYSENTER isn't used by the host.
581 */
582 case FIX_NO_SYSENTER_JMP:
583 {
584 uint32_t offTrg = *u.pu32++;
585 Assert(offTrg < pSwitcher->cbCode);
586 if (!CPUMIsHostUsingSysEnter(pVM))
587 {
588 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
589 *uSrc.pu32++ = offTrg - (offSrc + 5);
590 }
591 else
592 {
593 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
594 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
595 }
596 break;
597 }
598
599 /*
600 * Insert relative jump to specified target it SYSENTER isn't used by the host.
601 */
602 case FIX_NO_SYSCALL_JMP:
603 {
604 uint32_t offTrg = *u.pu32++;
605 Assert(offTrg < pSwitcher->cbCode);
606 if (!CPUMIsHostUsingSysEnter(pVM))
607 {
608 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
609 *uSrc.pu32++ = offTrg - (offSrc + 5);
610 }
611 else
612 {
613 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
614 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
615 }
616 break;
617 }
618
619 /*
620 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
621 */
622 case FIX_HC_32BIT:
623 {
624 uint32_t offTrg = *u.pu32++;
625 Assert(offSrc < pSwitcher->cbCode);
626 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
627 *uSrc.pu32 = R0PtrCode + offTrg;
628 break;
629 }
630
631#if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
632 /*
633 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
634 */
635 case FIX_HC_64BIT:
636 {
637 uint32_t offTrg = *u.pu32++;
638 Assert(offSrc < pSwitcher->cbCode);
639 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
640 *uSrc.pu64 = R0PtrCode + offTrg;
641 break;
642 }
643
644 /*
645 * 64-bit HC Code Selector (no argument).
646 */
647 case FIX_HC_64BIT_CS:
648 {
649 Assert(offSrc < pSwitcher->cbCode);
650#if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
651 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
652#else
653 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
654#endif
655 break;
656 }
657
658 /*
659 * 64-bit HC pointer to the CPUM instance data (no argument).
660 */
661 case FIX_HC_64BIT_CPUM:
662 {
663 Assert(offSrc < pSwitcher->cbCode);
664 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
665 break;
666 }
667#endif
668
669 /*
670 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
671 */
672 case FIX_ID_32BIT:
673 {
674 uint32_t offTrg = *u.pu32++;
675 Assert(offSrc < pSwitcher->cbCode);
676 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
677 *uSrc.pu32 = u32IDCode + offTrg;
678 break;
679 }
680
681 /*
682 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
683 */
684 case FIX_ID_64BIT:
685 {
686 uint32_t offTrg = *u.pu32++;
687 Assert(offSrc < pSwitcher->cbCode);
688 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
689 *uSrc.pu64 = u32IDCode + offTrg;
690 break;
691 }
692
693 /*
694 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
695 */
696 case FIX_ID_FAR32_TO_64BIT_MODE:
697 {
698 uint32_t offTrg = *u.pu32++;
699 Assert(offSrc < pSwitcher->cbCode);
700 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
701 *uSrc.pu32++ = u32IDCode + offTrg;
702 *uSrc.pu16 = SelCS64;
703 AssertRelease(SelCS64);
704 break;
705 }
706
707#ifdef VBOX_WITH_NMI
708 /*
709 * 32-bit address to the APIC base.
710 */
711 case FIX_GC_APIC_BASE_32BIT:
712 {
713 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
714 break;
715 }
716#endif
717
718 default:
719 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
720 break;
721 }
722 }
723
724#ifdef LOG_ENABLED
725 /*
726 * If Log2 is enabled disassemble the switcher code.
727 *
728 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
729 */
730 if (LogIs2Enabled())
731 {
732 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
733 " R0PtrCode = %p\n"
734 " pu8CodeR3 = %p\n"
735 " GCPtrCode = %VGv\n"
736 " u32IDCode = %08x\n"
737 " pVMGC = %VGv\n"
738 " pCPUMGC = %VGv\n"
739 " pVMHC = %p\n"
740 " pCPUMHC = %p\n"
741 " GCPtrGDT = %VGv\n"
742 " InterCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
743 " HyperCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
744 " SelCS = %04x\n"
745 " SelDS = %04x\n"
746 " SelCS64 = %04x\n"
747 " SelTSS = %04x\n",
748 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
749 R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM),
750 VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum,
751 GCPtrGDT,
752 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
753 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
754 SelCS, SelDS, SelCS64, SelTSS);
755
756 uint32_t offCode = 0;
757 while (offCode < pSwitcher->cbCode)
758 {
759 /*
760 * Figure out where this is.
761 */
762 const char *pszDesc = NULL;
763 RTUINTPTR uBase;
764 uint32_t cbCode;
765 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
766 {
767 pszDesc = "HCCode0";
768 uBase = R0PtrCode;
769 offCode = pSwitcher->offHCCode0;
770 cbCode = pSwitcher->cbHCCode0;
771 }
772 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
773 {
774 pszDesc = "HCCode1";
775 uBase = R0PtrCode;
776 offCode = pSwitcher->offHCCode1;
777 cbCode = pSwitcher->cbHCCode1;
778 }
779 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
780 {
781 pszDesc = "GCCode";
782 uBase = GCPtrCode;
783 offCode = pSwitcher->offGCCode;
784 cbCode = pSwitcher->cbGCCode;
785 }
786 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
787 {
788 pszDesc = "IDCode0";
789 uBase = u32IDCode;
790 offCode = pSwitcher->offIDCode0;
791 cbCode = pSwitcher->cbIDCode0;
792 }
793 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
794 {
795 pszDesc = "IDCode1";
796 uBase = u32IDCode;
797 offCode = pSwitcher->offIDCode1;
798 cbCode = pSwitcher->cbIDCode1;
799 }
800 else
801 {
802 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
803 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
804 offCode++;
805 continue;
806 }
807
808 /*
809 * Disassemble it.
810 */
811 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
812 DISCPUSTATE Cpu;
813
814 memset(&Cpu, 0, sizeof(Cpu));
815 Cpu.mode = CPUMODE_32BIT;
816 while (cbCode > 0)
817 {
818 /* try label it */
819 if (pSwitcher->offR0HostToGuest == offCode)
820 RTLogPrintf(" *R0HostToGuest:\n");
821 if (pSwitcher->offGCGuestToHost == offCode)
822 RTLogPrintf(" *GCGuestToHost:\n");
823 if (pSwitcher->offGCCallTrampoline == offCode)
824 RTLogPrintf(" *GCCallTrampoline:\n");
825 if (pSwitcher->offGCGuestToHostAsm == offCode)
826 RTLogPrintf(" *GCGuestToHostAsm:\n");
827 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
828 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
829 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
830 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
831
832 /* disas */
833 uint32_t cbInstr = 0;
834 char szDisas[256];
835 if (RT_SUCCESS(DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas)))
836 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
837 else
838 {
839 RTLogPrintf(" %04x: %02x '%c'\n",
840 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
841 cbInstr = 1;
842 }
843 offCode += cbInstr;
844 cbCode -= RT_MIN(cbInstr, cbCode);
845 }
846 }
847 }
848#endif
849}
850
851
852/**
853 * Relocator for the 32-Bit to 32-Bit world switcher.
854 */
855DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
856{
857 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
858 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
859}
860
861
862/**
863 * Relocator for the 32-Bit to PAE world switcher.
864 */
865DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
866{
867 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
868 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
869}
870
871
872/**
873 * Relocator for the PAE to 32-Bit world switcher.
874 */
875DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
876{
877 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
878 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
879}
880
881
882/**
883 * Relocator for the PAE to PAE world switcher.
884 */
885DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
886{
887 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
888 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
889}
890
891
892/**
893 * Relocator for the AMD64 to PAE world switcher.
894 */
895DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
896{
897 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
898 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
899}
900
901
902/**
903 * Selects the switcher to be used for switching to GC.
904 *
905 * @returns VBox status code.
906 * @param pVM VM handle.
907 * @param enmSwitcher The new switcher.
908 * @remark This function may be called before the VMM is initialized.
909 */
910VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
911{
912 /*
913 * Validate input.
914 */
915 if ( enmSwitcher < VMMSWITCHER_INVALID
916 || enmSwitcher >= VMMSWITCHER_MAX)
917 {
918 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
919 return VERR_INVALID_PARAMETER;
920 }
921
922 /* Do nothing if the switcher is disabled. */
923 if (pVM->vmm.s.fSwitcherDisabled)
924 return VINF_SUCCESS;
925
926 /*
927 * Select the new switcher.
928 */
929 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
930 if (pSwitcher)
931 {
932 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
933 pVM->vmm.s.enmSwitcher = enmSwitcher;
934
935 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
936 pVM->vmm.s.pfnHostToGuestR0 = pbCodeR0 + pSwitcher->offR0HostToGuest;
937
938 RTGCPTR GCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher];
939 pVM->vmm.s.pfnGuestToHostRC = GCPtr + pSwitcher->offGCGuestToHost;
940 pVM->vmm.s.pfnCallTrampolineRC = GCPtr + pSwitcher->offGCCallTrampoline;
941 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
942 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
943 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
944 return VINF_SUCCESS;
945 }
946
947 return VERR_NOT_IMPLEMENTED;
948}
949
950
951/**
952 * Disable the switcher logic permanently.
953 *
954 * @returns VBox status code.
955 * @param pVM VM handle.
956 */
957VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
958{
959/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
960 * @code
961 * mov eax, VERR_INTERNAL_ERROR
962 * ret
963 * @endcode
964 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
965 */
966 pVM->vmm.s.fSwitcherDisabled = true;
967 return VINF_SUCCESS;
968}
969
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette