VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp@ 45745

Last change on this file since 45745 was 45726, checked in by vboxsync, 12 years ago

VMMSwitcher.cpp: We never include internal headers of other components.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 38.5 KB
Line 
1/* $Id: VMMSwitcher.cpp 45726 2013-04-25 10:21:25Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor, World Switcher(s).
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/sup.h>
29#include "VMMInternal.h"
30#include "VMMSwitcher.h"
31#include <VBox/vmm/vm.h>
32#include <VBox/dis.h>
33
34#include <VBox/err.h>
35#include <VBox/param.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <iprt/asm-amd64-x86.h>
40#include <iprt/string.h>
41#include <iprt/ctype.h>
42
43
44/*******************************************************************************
45* Global Variables *
46*******************************************************************************/
47/** Array of switcher definitions.
48 * The type and index shall match!
49 */
50static PVMMSWITCHERDEF g_apRawModeSwitchers[VMMSWITCHER_MAX] =
51{
52 NULL, /* invalid entry */
53#ifdef VBOX_WITH_RAW_MODE
54# ifndef RT_ARCH_AMD64
55 &vmmR3Switcher32BitTo32Bit_Def,
56 &vmmR3Switcher32BitToPAE_Def,
57 &vmmR3Switcher32BitToAMD64_Def,
58 &vmmR3SwitcherPAETo32Bit_Def,
59 &vmmR3SwitcherPAEToPAE_Def,
60 &vmmR3SwitcherPAEToAMD64_Def,
61 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
62# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
63 &vmmR3SwitcherAMD64ToPAE_Def,
64# else
65 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
66# endif
67 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
68# else /* RT_ARCH_AMD64 */
69 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
70 NULL, //&vmmR3Switcher32BitToPAE_Def,
71 NULL, //&vmmR3Switcher32BitToAMD64_Def,
72 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
73 NULL, //&vmmR3SwitcherPAEToPAE_Def,
74 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
75 &vmmR3SwitcherAMD64To32Bit_Def,
76 &vmmR3SwitcherAMD64ToPAE_Def,
77 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
78# endif /* RT_ARCH_AMD64 */
79#else /* !VBOX_WITH_RAW_MODE */
80 NULL,
81 NULL,
82 NULL,
83 NULL,
84 NULL,
85 NULL,
86 NULL,
87 NULL,
88 NULL,
89#endif /* !VBOX_WITH_RAW_MODE */
90#ifndef RT_ARCH_AMD64
91 &vmmR3SwitcherX86Stub_Def,
92 NULL,
93#else
94 NULL,
95 &vmmR3SwitcherAMD64Stub_Def,
96#endif
97};
98
99/** Array of switcher definitions.
100 * The type and index shall match!
101 */
102static PVMMSWITCHERDEF g_apHmSwitchers[VMMSWITCHER_MAX] =
103{
104 NULL, /* invalid entry */
105#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
106 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
107 NULL, //&vmmR3Switcher32BitToPAE_Def,
108 &vmmR3Switcher32BitToAMD64_Def,
109 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
110 NULL, //&vmmR3SwitcherPAEToPAE_Def,
111 &vmmR3SwitcherPAEToAMD64_Def,
112 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
113 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
114 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def,
115#else /* !VBOX_WITH_RAW_MODE */
116 NULL,
117 NULL,
118 NULL,
119 NULL,
120 NULL,
121 NULL,
122 NULL,
123 NULL,
124 NULL,
125#endif /* !VBOX_WITH_RAW_MODE */
126#ifndef RT_ARCH_AMD64
127 &vmmR3SwitcherX86Stub_Def,
128 NULL,
129#else
130 NULL,
131 &vmmR3SwitcherAMD64Stub_Def,
132#endif
133};
134
135
136/**
137 * VMMR3Init worker that initiates the switcher code (aka core code).
138 *
139 * This is core per VM code which might need fixups and/or for ease of use are
140 * put on linear contiguous backing.
141 *
142 * @returns VBox status code.
143 * @param pVM Pointer to the VM.
144 */
145int vmmR3SwitcherInit(PVM pVM)
146{
147#ifndef VBOX_WITH_RAW_MODE /** @todo 64-bit on 32-bit. */
148 return VINF_SUCCESS;
149#else
150 /*
151 * Calc the size.
152 */
153 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
154 unsigned cbCoreCode = 0;
155 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
156 {
157 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
158 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
159 if (pSwitcher)
160 {
161 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
162 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
163 }
164 }
165
166 /*
167 * Allocate contiguous pages for switchers and deal with
168 * conflicts in the intermediate mapping of the code.
169 */
170 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
171 pVM->vmm.s.pvCoreCodeR3 = SUPR3ContAlloc(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
172 int rc = VERR_NO_MEMORY;
173 if (pVM->vmm.s.pvCoreCodeR3)
174 {
175 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
176 if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
177 {
178 /* try more allocations - Solaris, Linux. */
179 const unsigned cTries = 8234;
180 struct VMMInitBadTry
181 {
182 RTR0PTR pvR0;
183 void *pvR3;
184 RTHCPHYS HCPhys;
185 RTUINT cb;
186 } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
187 AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
188 unsigned i = 0;
189 do
190 {
191 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
192 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
193 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
194 i++;
195 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
196 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
197 pVM->vmm.s.pvCoreCodeR3 = SUPR3ContAlloc(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
198 if (!pVM->vmm.s.pvCoreCodeR3)
199 break;
200 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
201 } while ( rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
202 && i < cTries - 1);
203
204 /* cleanup */
205 if (RT_FAILURE(rc))
206 {
207 paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
208 paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
209 paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
210 paBadTries[i].cb = pVM->vmm.s.cbCoreCode;
211 i++;
212 LogRel(("Failed to allocated and map core code: rc=%Rrc\n", rc));
213 }
214 while (i-- > 0)
215 {
216 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%RHp\n",
217 i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
218 SUPR3ContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
219 }
220 RTMemTmpFree(paBadTries);
221 }
222 }
223 if (RT_SUCCESS(rc))
224 {
225 /*
226 * copy the code.
227 */
228 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
229 {
230 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
231 if (pSwitcher)
232 memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
233 pSwitcher->pvCode, pSwitcher->cbCode);
234 }
235
236 /*
237 * Map the code into the GC address space.
238 */
239 RTGCPTR GCPtr;
240 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode,
241 cbCoreCode, "Core Code", &GCPtr);
242 if (RT_SUCCESS(rc))
243 {
244 pVM->vmm.s.pvCoreCodeRC = GCPtr;
245 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
246 LogRel(("CoreCode: R3=%RHv R0=%RHv RC=%RRv Phys=%RHp cb=%#x\n",
247 pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
248
249 /*
250 * Finally, PGM probably has selected a switcher already but we need
251 * to get the routine addresses, so we'll reselect it.
252 * This may legally fail so, we're ignoring the rc.
253 * Note! See HMIsEnabled hack in selector function.
254 */
255 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
256 return rc;
257 }
258
259 /* shit */
260 AssertMsgFailed(("PGMR3Map(,%RRv, %RHp, %#x, 0) failed with rc=%Rrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
261 SUPR3ContFree(pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
262 }
263 else
264 VMSetError(pVM, rc, RT_SRC_POS,
265 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
266 cbCoreCode);
267
268 pVM->vmm.s.pvCoreCodeR3 = NULL;
269 pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
270 pVM->vmm.s.pvCoreCodeRC = 0;
271 return rc;
272#endif
273}
274
275/**
276 * Relocate the switchers, called by VMMR#Relocate.
277 *
278 * @param pVM Pointer to the VM.
279 * @param offDelta The relocation delta.
280 */
281void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
282{
283#ifdef VBOX_WITH_RAW_MODE
284 /*
285 * Relocate all the switchers.
286 */
287 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
288 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
289 {
290 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
291 if (pSwitcher && pSwitcher->pfnRelocate)
292 {
293 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
294 pSwitcher->pfnRelocate(pVM,
295 pSwitcher,
296 pVM->vmm.s.pvCoreCodeR0 + off,
297 (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off,
298 pVM->vmm.s.pvCoreCodeRC + off,
299 pVM->vmm.s.HCPhysCoreCode + off);
300 }
301 }
302
303 /*
304 * Recalc the RC address for the current switcher.
305 */
306 PVMMSWITCHERDEF pSwitcher = papSwitchers[pVM->vmm.s.enmSwitcher];
307 if (pSwitcher)
308 {
309 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
310 pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost;
311 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline;
312 pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm;
313 pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn;
314 }
315 else
316 AssertRelease(HMIsEnabled(pVM));
317
318// AssertFailed();
319#else
320 NOREF(pVM);
321#endif
322 NOREF(offDelta);
323}
324
325
326#ifdef VBOX_WITH_RAW_MODE
327
328/**
329 * Generic switcher code relocator.
330 *
331 * @param pVM Pointer to the VM.
332 * @param pSwitcher The switcher definition.
333 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
334 * @param R0PtrCode Pointer to the core code block for the switcher, ring-0 mapping.
335 * @param GCPtrCode The guest context address corresponding to pu8Code.
336 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
337 * @param SelCS The hypervisor CS selector.
338 * @param SelDS The hypervisor DS selector.
339 * @param SelTSS The hypervisor TSS selector.
340 * @param GCPtrGDT The GC address of the hypervisor GDT.
341 * @param SelCS64 The 64-bit mode hypervisor CS selector.
342 */
343static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher,
344 RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
345 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
346{
347 union
348 {
349 const uint8_t *pu8;
350 const uint16_t *pu16;
351 const uint32_t *pu32;
352 const uint64_t *pu64;
353 const void *pv;
354 uintptr_t u;
355 } u;
356 u.pv = pSwitcher->pvFixups;
357
358 /*
359 * Process fixups.
360 */
361 uint8_t u8;
362 while ((u8 = *u.pu8++) != FIX_THE_END)
363 {
364 /*
365 * Get the source (where to write the fixup).
366 */
367 uint32_t offSrc = *u.pu32++;
368 Assert(offSrc < pSwitcher->cbCode);
369 union
370 {
371 uint8_t *pu8;
372 uint16_t *pu16;
373 uint32_t *pu32;
374 uint64_t *pu64;
375 uintptr_t u;
376 } uSrc;
377 uSrc.pu8 = pu8CodeR3 + offSrc;
378
379 /* The fixup target and method depends on the type. */
380 switch (u8)
381 {
382 /*
383 * 32-bit relative, source in HC and target in GC.
384 */
385 case FIX_HC_2_GC_NEAR_REL:
386 {
387 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
388 uint32_t offTrg = *u.pu32++;
389 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
390 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
391 break;
392 }
393
394 /*
395 * 32-bit relative, source in HC and target in ID.
396 */
397 case FIX_HC_2_ID_NEAR_REL:
398 {
399 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
400 uint32_t offTrg = *u.pu32++;
401 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
402 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (R0PtrCode + offSrc + 4));
403 break;
404 }
405
406 /*
407 * 32-bit relative, source in GC and target in HC.
408 */
409 case FIX_GC_2_HC_NEAR_REL:
410 {
411 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
412 uint32_t offTrg = *u.pu32++;
413 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
414 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (GCPtrCode + offSrc + 4));
415 break;
416 }
417
418 /*
419 * 32-bit relative, source in GC and target in ID.
420 */
421 case FIX_GC_2_ID_NEAR_REL:
422 {
423 AssertMsg(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode, ("%x - %x < %x\n", offSrc, pSwitcher->offGCCode, pSwitcher->cbGCCode));
424 uint32_t offTrg = *u.pu32++;
425 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
426 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
427 break;
428 }
429
430 /*
431 * 32-bit relative, source in ID and target in HC.
432 */
433 case FIX_ID_2_HC_NEAR_REL:
434 {
435 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
436 uint32_t offTrg = *u.pu32++;
437 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
438 *uSrc.pu32 = (uint32_t)((R0PtrCode + offTrg) - (u32IDCode + offSrc + 4));
439 break;
440 }
441
442 /*
443 * 32-bit relative, source in ID and target in HC.
444 */
445 case FIX_ID_2_GC_NEAR_REL:
446 {
447 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
448 uint32_t offTrg = *u.pu32++;
449 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
450 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
451 break;
452 }
453
454 /*
455 * 16:32 far jump, target in GC.
456 */
457 case FIX_GC_FAR32:
458 {
459 uint32_t offTrg = *u.pu32++;
460 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
461 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
462 *uSrc.pu16++ = SelCS;
463 break;
464 }
465
466 /*
467 * Make 32-bit GC pointer given CPUM offset.
468 */
469 case FIX_GC_CPUM_OFF:
470 {
471 uint32_t offCPUM = *u.pu32++;
472 Assert(offCPUM < sizeof(pVM->cpum));
473 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->cpum) + offCPUM);
474 break;
475 }
476
477 /*
478 * Make 32-bit GC pointer given CPUMCPU offset.
479 */
480 case FIX_GC_CPUMCPU_OFF:
481 {
482 uint32_t offCPUM = *u.pu32++;
483 Assert(offCPUM < sizeof(pVM->aCpus[0].cpum));
484 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->aCpus[0].cpum) + offCPUM);
485 break;
486 }
487
488 /*
489 * Make 32-bit GC pointer given VM offset.
490 */
491 case FIX_GC_VM_OFF:
492 {
493 uint32_t offVM = *u.pu32++;
494 Assert(offVM < sizeof(VM));
495 *uSrc.pu32 = (uint32_t)(VM_RC_ADDR(pVM, pVM) + offVM);
496 break;
497 }
498
499 /*
500 * Make 32-bit HC pointer given CPUM offset.
501 */
502 case FIX_HC_CPUM_OFF:
503 {
504 uint32_t offCPUM = *u.pu32++;
505 Assert(offCPUM < sizeof(pVM->cpum));
506 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
507 break;
508 }
509
510 /*
511 * Make 32-bit R0 pointer given VM offset.
512 */
513 case FIX_HC_VM_OFF:
514 {
515 uint32_t offVM = *u.pu32++;
516 Assert(offVM < sizeof(VM));
517 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
518 break;
519 }
520
521 /*
522 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
523 */
524 case FIX_INTER_32BIT_CR3:
525 {
526
527 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
528 break;
529 }
530
531 /*
532 * Store the PAE CR3 (32-bit) for the intermediate memory context.
533 */
534 case FIX_INTER_PAE_CR3:
535 {
536
537 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
538 break;
539 }
540
541 /*
542 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
543 */
544 case FIX_INTER_AMD64_CR3:
545 {
546
547 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
548 break;
549 }
550
551 /*
552 * Store Hypervisor CS (16-bit).
553 */
554 case FIX_HYPER_CS:
555 {
556 *uSrc.pu16 = SelCS;
557 break;
558 }
559
560 /*
561 * Store Hypervisor DS (16-bit).
562 */
563 case FIX_HYPER_DS:
564 {
565 *uSrc.pu16 = SelDS;
566 break;
567 }
568
569 /*
570 * Store Hypervisor TSS (16-bit).
571 */
572 case FIX_HYPER_TSS:
573 {
574 *uSrc.pu16 = SelTSS;
575 break;
576 }
577
578 /*
579 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
580 */
581 case FIX_GC_TSS_GDTE_DW2:
582 {
583 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
584 *uSrc.pu32 = (uint32_t)GCPtr;
585 break;
586 }
587
588 /*
589 * Store the EFER or mask for the 32->64 bit switcher.
590 */
591 case FIX_EFER_OR_MASK:
592 {
593 uint32_t u32OrMask = MSR_K6_EFER_LME | MSR_K6_EFER_SCE;
594 /*
595 * We don't care if cpuid 0x8000001 isn't supported as that implies
596 * long mode isn't supported either, so this switched would never be used.
597 */
598 if (!!(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
599 u32OrMask |= MSR_K6_EFER_NXE;
600
601 *uSrc.pu32 = u32OrMask;
602 break;
603 }
604
605 /*
606 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
607 */
608 case FIX_NO_FXSAVE_JMP:
609 {
610 uint32_t offTrg = *u.pu32++;
611 Assert(offTrg < pSwitcher->cbCode);
612 if (!CPUMSupportsFXSR(pVM))
613 {
614 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
615 *uSrc.pu32++ = offTrg - (offSrc + 5);
616 }
617 else
618 {
619 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
620 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
621 }
622 break;
623 }
624
625 /*
626 * Insert relative jump to specified target it SYSENTER isn't used by the host.
627 */
628 case FIX_NO_SYSENTER_JMP:
629 {
630 uint32_t offTrg = *u.pu32++;
631 Assert(offTrg < pSwitcher->cbCode);
632 if (!CPUMIsHostUsingSysEnter(pVM))
633 {
634 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
635 *uSrc.pu32++ = offTrg - (offSrc + 5);
636 }
637 else
638 {
639 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
640 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
641 }
642 break;
643 }
644
645 /*
646 * Insert relative jump to specified target it SYSCALL isn't used by the host.
647 */
648 case FIX_NO_SYSCALL_JMP:
649 {
650 uint32_t offTrg = *u.pu32++;
651 Assert(offTrg < pSwitcher->cbCode);
652 if (!CPUMIsHostUsingSysCall(pVM))
653 {
654 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
655 *uSrc.pu32++ = offTrg - (offSrc + 5);
656 }
657 else
658 {
659 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
660 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
661 }
662 break;
663 }
664
665 /*
666 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
667 */
668 case FIX_HC_32BIT:
669 {
670 uint32_t offTrg = *u.pu32++;
671 Assert(offSrc < pSwitcher->cbCode);
672 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
673 *uSrc.pu32 = R0PtrCode + offTrg;
674 break;
675 }
676
677#if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
678 /*
679 * 64-bit HC Code Selector (no argument).
680 */
681 case FIX_HC_64BIT_CS:
682 {
683 Assert(offSrc < pSwitcher->cbCode);
684# if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
685 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
686# else
687 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
688# endif
689 break;
690 }
691
692 /*
693 * 64-bit HC pointer to the CPUM instance data (no argument).
694 */
695 case FIX_HC_64BIT_CPUM:
696 {
697 Assert(offSrc < pSwitcher->cbCode);
698 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
699 break;
700 }
701#endif
702 /*
703 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
704 */
705 case FIX_HC_64BIT:
706 {
707 uint32_t offTrg = *u.pu32++;
708 Assert(offSrc < pSwitcher->cbCode);
709 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
710 *uSrc.pu64 = R0PtrCode + offTrg;
711 break;
712 }
713
714#ifdef RT_ARCH_X86
715 case FIX_GC_64_BIT_CPUM_OFF:
716 {
717 uint32_t offCPUM = *u.pu32++;
718 Assert(offCPUM < sizeof(pVM->cpum));
719 *uSrc.pu64 = (uint32_t)(VM_RC_ADDR(pVM, &pVM->cpum) + offCPUM);
720 break;
721 }
722#endif
723
724 /*
725 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
726 */
727 case FIX_ID_32BIT:
728 {
729 uint32_t offTrg = *u.pu32++;
730 Assert(offSrc < pSwitcher->cbCode);
731 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
732 *uSrc.pu32 = u32IDCode + offTrg;
733 break;
734 }
735
736 /*
737 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
738 */
739 case FIX_ID_64BIT:
740 case FIX_HC_64BIT_NOCHECK:
741 {
742 uint32_t offTrg = *u.pu32++;
743 Assert(offSrc < pSwitcher->cbCode);
744 Assert(u8 == FIX_HC_64BIT_NOCHECK || offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
745 *uSrc.pu64 = u32IDCode + offTrg;
746 break;
747 }
748
749 /*
750 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
751 */
752 case FIX_ID_FAR32_TO_64BIT_MODE:
753 {
754 uint32_t offTrg = *u.pu32++;
755 Assert(offSrc < pSwitcher->cbCode);
756 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
757 *uSrc.pu32++ = u32IDCode + offTrg;
758 *uSrc.pu16 = SelCS64;
759 AssertRelease(SelCS64);
760 break;
761 }
762
763#ifdef VBOX_WITH_NMI
764 /*
765 * 32-bit address to the APIC base.
766 */
767 case FIX_GC_APIC_BASE_32BIT:
768 {
769 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
770 break;
771 }
772#endif
773
774 default:
775 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
776 break;
777 }
778 }
779
780#ifdef LOG_ENABLED
781 /*
782 * If Log2 is enabled disassemble the switcher code.
783 *
784 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
785 */
786 if (LogIs2Enabled())
787 {
788 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
789 " R0PtrCode = %p\n"
790 " pu8CodeR3 = %p\n"
791 " GCPtrCode = %RGv\n"
792 " u32IDCode = %08x\n"
793 " pVMRC = %RRv\n"
794 " pCPUMRC = %RRv\n"
795 " pVMR3 = %p\n"
796 " pCPUMR3 = %p\n"
797 " GCPtrGDT = %RGv\n"
798 " InterCR3s = %08RHp, %08RHp, %08RHp (32-Bit, PAE, AMD64)\n"
799 " HyperCR3s = %08RHp (32-Bit, PAE & AMD64)\n"
800 " SelCS = %04x\n"
801 " SelDS = %04x\n"
802 " SelCS64 = %04x\n"
803 " SelTSS = %04x\n",
804 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
805 R0PtrCode,
806 pu8CodeR3,
807 GCPtrCode,
808 u32IDCode,
809 VM_RC_ADDR(pVM, pVM),
810 VM_RC_ADDR(pVM, &pVM->cpum),
811 pVM,
812 &pVM->cpum,
813 GCPtrGDT,
814 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
815 PGMGetHyperCR3(VMMGetCpu(pVM)),
816 SelCS, SelDS, SelCS64, SelTSS);
817
818 uint32_t offCode = 0;
819 while (offCode < pSwitcher->cbCode)
820 {
821 /*
822 * Figure out where this is.
823 */
824 const char *pszDesc = NULL;
825 RTUINTPTR uBase;
826 uint32_t cbCode;
827 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
828 {
829 pszDesc = "HCCode0";
830 uBase = R0PtrCode;
831 offCode = pSwitcher->offHCCode0;
832 cbCode = pSwitcher->cbHCCode0;
833 }
834 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
835 {
836 pszDesc = "HCCode1";
837 uBase = R0PtrCode;
838 offCode = pSwitcher->offHCCode1;
839 cbCode = pSwitcher->cbHCCode1;
840 }
841 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
842 {
843 pszDesc = "GCCode";
844 uBase = GCPtrCode;
845 offCode = pSwitcher->offGCCode;
846 cbCode = pSwitcher->cbGCCode;
847 }
848 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
849 {
850 pszDesc = "IDCode0";
851 uBase = u32IDCode;
852 offCode = pSwitcher->offIDCode0;
853 cbCode = pSwitcher->cbIDCode0;
854 }
855 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
856 {
857 pszDesc = "IDCode1";
858 uBase = u32IDCode;
859 offCode = pSwitcher->offIDCode1;
860 cbCode = pSwitcher->cbIDCode1;
861 }
862 else
863 {
864 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
865 offCode, pu8CodeR3[offCode], RT_C_IS_PRINT(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
866 offCode++;
867 continue;
868 }
869
870 /*
871 * Disassemble it.
872 */
873 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
874
875 while (cbCode > 0)
876 {
877 /* try label it */
878 if (pSwitcher->offR0ToRawMode == offCode)
879 RTLogPrintf(" *R0ToRawMode:\n");
880 if (pSwitcher->offRCToHost == offCode)
881 RTLogPrintf(" *RCToHost:\n");
882 if (pSwitcher->offRCCallTrampoline == offCode)
883 RTLogPrintf(" *RCCallTrampoline:\n");
884 if (pSwitcher->offRCToHostAsm == offCode)
885 RTLogPrintf(" *RCToHostAsm:\n");
886 if (pSwitcher->offRCToHostAsmNoReturn == offCode)
887 RTLogPrintf(" *RCToHostAsmNoReturn:\n");
888
889 /* disas */
890 uint32_t cbInstr = 0;
891 DISCPUSTATE Cpu;
892 char szDisas[256];
893 int rc = DISInstr(pu8CodeR3 + offCode, DISCPUMODE_32BIT, &Cpu, &cbInstr);
894 if (RT_SUCCESS(rc))
895 {
896 Cpu.uInstrAddr += uBase - (uintptr_t)pu8CodeR3;
897 DISFormatYasmEx(&Cpu, szDisas, sizeof(szDisas),
898 DIS_FMT_FLAGS_ADDR_LEFT | DIS_FMT_FLAGS_BYTES_LEFT | DIS_FMT_FLAGS_BYTES_SPACED
899 | DIS_FMT_FLAGS_RELATIVE_BRANCH,
900 NULL, NULL);
901 }
902 if (RT_SUCCESS(rc))
903 RTLogPrintf(" %04x: %s\n", offCode, szDisas);
904 else
905 {
906 RTLogPrintf(" %04x: %02x '%c' (rc=%Rrc\n",
907 offCode, pu8CodeR3[offCode], RT_C_IS_PRINT(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ', rc);
908 cbInstr = 1;
909 }
910 offCode += cbInstr;
911 cbCode -= RT_MIN(cbInstr, cbCode);
912 }
913 }
914 }
915#endif
916}
917
918/**
919 * Relocator for the 32-Bit to 32-Bit world switcher.
920 */
921DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
922{
923 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
924 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
925}
926
927
928/**
929 * Relocator for the 32-Bit to PAE world switcher.
930 */
931DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
932{
933 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
934 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
935}
936
937
938/**
939 * Relocator for the 32-Bit to AMD64 world switcher.
940 */
941DECLCALLBACK(void) vmmR3Switcher32BitToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
942{
943 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
944 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
945}
946
947
948/**
949 * Relocator for the PAE to 32-Bit world switcher.
950 */
951DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
952{
953 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
954 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
955}
956
957
958/**
959 * Relocator for the PAE to PAE world switcher.
960 */
961DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
962{
963 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
964 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
965}
966
967/**
968 * Relocator for the PAE to AMD64 world switcher.
969 */
970DECLCALLBACK(void) vmmR3SwitcherPAEToAMD64_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
971{
972 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
973 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
974}
975
976
977/**
978 * Relocator for the AMD64 to 32-bit world switcher.
979 */
980DECLCALLBACK(void) vmmR3SwitcherAMD64To32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
981{
982 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
983 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
984}
985
986
987/**
988 * Relocator for the AMD64 to PAE world switcher.
989 */
990DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, RTR0PTR R0PtrCode, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
991{
992 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode,
993 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
994}
995
996
997/**
998 * Selects the switcher to be used for switching to raw-mode context.
999 *
1000 * @returns VBox status code.
1001 * @param pVM Pointer to the VM.
1002 * @param enmSwitcher The new switcher.
1003 * @remark This function may be called before the VMM is initialized.
1004 */
1005VMMR3_INT_DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1006{
1007 /*
1008 * Validate input.
1009 */
1010 if ( enmSwitcher < VMMSWITCHER_INVALID
1011 || enmSwitcher >= VMMSWITCHER_MAX)
1012 {
1013 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1014 return VERR_INVALID_PARAMETER;
1015 }
1016
1017 /*
1018 * Override it if HM is active.
1019 */
1020 if (HMIsEnabled(pVM))
1021 pVM->vmm.s.enmSwitcher = HC_ARCH_BITS == 64 ? VMMSWITCHER_AMD64_STUB : VMMSWITCHER_X86_STUB;
1022
1023 /*
1024 * Select the new switcher.
1025 */
1026 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
1027 PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher];
1028 if (pSwitcher)
1029 {
1030 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
1031 pVM->vmm.s.enmSwitcher = enmSwitcher;
1032
1033 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
1034 pVM->vmm.s.pfnR0ToRawMode = pbCodeR0 + pSwitcher->offR0ToRawMode;
1035
1036 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1037 pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost;
1038 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline;
1039 pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm;
1040 pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn;
1041 return VINF_SUCCESS;
1042 }
1043
1044 return VERR_NOT_IMPLEMENTED;
1045}
1046
1047#endif /* VBOX_WITH_RAW_MODE */
1048
1049
1050/**
1051 * Gets the switcher to be used for switching to GC.
1052 *
1053 * @returns host to guest ring 0 switcher entrypoint
1054 * @param pVM Pointer to the VM.
1055 * @param enmSwitcher The new switcher.
1056 */
1057VMMR3_INT_DECL(RTR0PTR) VMMR3GetHostToGuestSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1058{
1059 /*
1060 * Validate input.
1061 */
1062 if ( enmSwitcher < VMMSWITCHER_INVALID
1063 || enmSwitcher >= VMMSWITCHER_MAX)
1064 {
1065 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1066 return NIL_RTR0PTR;
1067 }
1068
1069 /*
1070 * Select the new switcher.
1071 */
1072 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
1073 PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher];
1074 if (pSwitcher)
1075 {
1076 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvCoreCodeR0 type */
1077 return pbCodeR0 + pSwitcher->offR0ToRawMode;
1078 }
1079 return NIL_RTR0PTR;
1080}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette