VirtualBox

source: vbox/trunk/src/VBox/VMM/VMM.cpp@ 10719

Last change on this file since 10719 was 10450, checked in by vboxsync, 16 years ago

Added VMMGetSvnRev() (exported) and changed VMMR0Init and VMMGCInit check the the revision is the same. (We've got private interface between ring-3 and ring-0 and GC, not to mention shared structures, so this check is really long overdue.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 98.8 KB
Line 
1/* $Id: VMM.cpp 10450 2008-07-09 21:55:45Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22//#define NO_SUPCALLR0VMM
23
24/** @page pg_vmm VMM - The Virtual Machine Monitor
25 *
26 * !Revise this! It's already incorrect!
27 *
28 * The Virtual Machine Monitor (VMM) is the core of the virtual machine. It
29 * manages the alternate reality; controlling the virtualization, managing
30 * resources, tracking CPU state, it's resources and so on...
31 *
32 * We will split the VMM into smaller entities:
33 *
34 * - Virtual Machine Core Monitor (VMCM), which purpose it is to
35 * provide ring and world switching, that including routing
36 * interrupts to the host OS and traps to the appropriate trap
37 * handlers. It will implement an external interface for
38 * managing trap handlers.
39 *
40 * - CPU Monitor (CM), tracking the state of the CPU (in the alternate
41 * reality) and implementing external interfaces to read and change
42 * the state.
43 *
44 * - Memory Monitor (MM), which purpose it is to virtualize physical
45 * pages, segment descriptor tables, interrupt descriptor tables, task
46 * segments, and keep track of all memory providing external interfaces
47 * to access content and map pages. (Internally splitt into smaller entities!)
48 *
49 * - IO Monitor (IOM), which virtualizes in and out I/O operations. It
50 * interacts with the MM to implement memory mapped I/O. External
51 * interfaces for adding and removing I/O ranges are implemented.
52 *
53 * - External Interrupt Monitor (EIM), which purpose it is to manage
54 * interrupts generated by virtual devices. This monitor provides
55 * an interfaces for raising interrupts which is accessible at any
56 * time and from all thread.
57 * <p>
58 * A subentity of the EIM is the vitual Programmable Interrupt
59 * Controller Device (VPICD), and perhaps a virtual I/O Advanced
60 * Programmable Interrupt Controller Device (VAPICD).
61 *
62 * - Direct Memory Access Monitor (DMAM), which purpose it is to support
63 * virtual device using the DMA controller. Interfaces must be as the
64 * EIM interfaces independent and threadable.
65 * <p>
66 * A subentity of the DMAM is a virtual DMA Controller Device (VDMACD).
67 *
68 *
69 * Entities working on a higher level:
70 *
71 * - Device Manager (DM), which is a support facility for virtualized
72 * hardware. This provides generic facilities for efficient device
73 * virtualization. It will manage device attaching and detaching
74 * conversing with EIM and IOM.
75 *
76 * - Debugger Facility (DBGF) provides the basic features for
77 * debugging the alternate reality execution.
78 *
79 *
80 *
81 * @section pg_vmm_s_use_cases Use Cases
82 *
83 * @subsection pg_vmm_s_use_case_boot Bootstrap
84 *
85 * - Basic Init:
86 * - Init SUPDRV.
87 *
88 * - Init Virtual Machine Instance:
89 * - Load settings.
90 * - Check resource requirements (memory, com, stuff).
91 *
92 * - Init Host Ring 3 part:
93 * - Init Core code.
94 * - Load Pluggable Components.
95 * - Init Pluggable Components.
96 *
97 * - Init Host Ring 0 part:
98 * - Load Core (core = core components like VMM, RMI, CA, and so on) code.
99 * - Init Core code.
100 * - Load Pluggable Component code.
101 * - Init Pluggable Component code.
102 *
103 * - Allocate first chunk of memory and pin it down. This block of memory
104 * will fit the following pieces:
105 * - Virtual Machine Instance data. (Config, CPU state, VMM state, ++)
106 * (This is available from everywhere (at different addresses though)).
107 * - VMM Guest Context code.
108 * - Pluggable devices Guest Context code.
109 * - Page tables (directory and everything) for the VMM Guest
110 *
111 * - Setup Guest (Ring 0) part:
112 * - Setup initial page tables (i.e. directory all the stuff).
113 * - Load Core Guest Context code.
114 * - Load Pluggable Devices Guest Context code.
115 *
116 *
117 */
118
119
120/*******************************************************************************
121* Header Files *
122*******************************************************************************/
123#define LOG_GROUP LOG_GROUP_VMM
124#include <VBox/vmm.h>
125#include <VBox/vmapi.h>
126#include <VBox/pgm.h>
127#include <VBox/cfgm.h>
128#include <VBox/pdmqueue.h>
129#include <VBox/pdmapi.h>
130#include <VBox/cpum.h>
131#include <VBox/mm.h>
132#include <VBox/iom.h>
133#include <VBox/trpm.h>
134#include <VBox/selm.h>
135#include <VBox/em.h>
136#include <VBox/sup.h>
137#include <VBox/dbgf.h>
138#include <VBox/csam.h>
139#include <VBox/patm.h>
140#include <VBox/rem.h>
141#include <VBox/ssm.h>
142#include <VBox/tm.h>
143#include "VMMInternal.h"
144#include "VMMSwitcher/VMMSwitcher.h"
145#include <VBox/vm.h>
146#include <VBox/err.h>
147#include <VBox/param.h>
148#include <VBox/version.h>
149#include <VBox/x86.h>
150#include <VBox/hwaccm.h>
151#include <iprt/assert.h>
152#include <iprt/alloc.h>
153#include <iprt/asm.h>
154#include <iprt/time.h>
155#include <iprt/stream.h>
156#include <iprt/string.h>
157#include <iprt/stdarg.h>
158#include <iprt/ctype.h>
159
160
161
162/** The saved state version. */
163#define VMM_SAVED_STATE_VERSION 3
164
165
166/*******************************************************************************
167* Internal Functions *
168*******************************************************************************/
169static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
170static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
171static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
172static int vmmR3ServiceCallHostRequest(PVM pVM);
173static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
174
175
176/*******************************************************************************
177* Global Variables *
178*******************************************************************************/
179/** Array of switcher defininitions.
180 * The type and index shall match!
181 */
182static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
183{
184 NULL, /* invalid entry */
185#ifndef RT_ARCH_AMD64
186 &vmmR3Switcher32BitTo32Bit_Def,
187 &vmmR3Switcher32BitToPAE_Def,
188 NULL, //&vmmR3Switcher32BitToAMD64_Def,
189 &vmmR3SwitcherPAETo32Bit_Def,
190 &vmmR3SwitcherPAEToPAE_Def,
191 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
192# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
193 &vmmR3SwitcherAMD64ToPAE_Def,
194# else
195 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
196# endif
197 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
198#else
199 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
200 NULL, //&vmmR3Switcher32BitToPAE_Def,
201 NULL, //&vmmR3Switcher32BitToAMD64_Def,
202 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
203 NULL, //&vmmR3SwitcherPAEToPAE_Def,
204 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
205 &vmmR3SwitcherAMD64ToPAE_Def,
206 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
207#endif
208};
209
210
211
212/**
213 * Initiates the core code.
214 *
215 * This is core per VM code which might need fixups and/or for ease of use
216 * are put on linear contiguous backing.
217 *
218 * @returns VBox status code.
219 * @param pVM Pointer to VM structure.
220 */
221static int vmmR3InitCoreCode(PVM pVM)
222{
223 /*
224 * Calc the size.
225 */
226 unsigned cbCoreCode = 0;
227 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
228 {
229 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
230 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
231 if (pSwitcher)
232 {
233 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
234 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
235 }
236 }
237
238 /*
239 * Allocate continguous pages for switchers and deal with
240 * conflicts in the intermediate mapping of the code.
241 */
242 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
243 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
244 int rc = VERR_NO_MEMORY;
245 if (pVM->vmm.s.pvHCCoreCodeR3)
246 {
247 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
248 if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
249 {
250 /* try more allocations. */
251 struct
252 {
253 RTR0PTR pvR0;
254 void *pvR3;
255 RTHCPHYS HCPhys;
256 RTUINT cb;
257 } aBadTries[128];
258 unsigned i = 0;
259 do
260 {
261 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
262 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
263 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
264 i++;
265 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
266 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
267 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
268 if (!pVM->vmm.s.pvHCCoreCodeR3)
269 break;
270 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
271 } while ( rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
272 && i < RT_ELEMENTS(aBadTries) - 1);
273
274 /* cleanup */
275 if (VBOX_FAILURE(rc))
276 {
277 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
278 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
279 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
280 aBadTries[i].cb = pVM->vmm.s.cbCoreCode;
281 i++;
282 LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc));
283 }
284 while (i-- > 0)
285 {
286 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n",
287 i, aBadTries[i].pvR3, aBadTries[i].pvR0, aBadTries[i].HCPhys));
288 SUPContFree(aBadTries[i].pvR3, aBadTries[i].cb >> PAGE_SHIFT);
289 }
290 }
291 }
292 if (VBOX_SUCCESS(rc))
293 {
294 /*
295 * copy the code.
296 */
297 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
298 {
299 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
300 if (pSwitcher)
301 memcpy((uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
302 pSwitcher->pvCode, pSwitcher->cbCode);
303 }
304
305 /*
306 * Map the code into the GC address space.
307 */
308 RTGCPTR GCPtr;
309 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &GCPtr);
310 if (VBOX_SUCCESS(rc))
311 {
312 pVM->vmm.s.pvGCCoreCode = GCPtr;
313 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
314 LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VRv Phys=%VHp cb=%#x\n",
315 pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
316
317 /*
318 * Finally, PGM probably have selected a switcher already but we need
319 * to do get the addresses so we'll reselect it.
320 * This may legally fail so, we're ignoring the rc.
321 */
322 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
323 return rc;
324 }
325
326 /* shit */
327 AssertMsgFailed(("PGMR3Map(,%VRv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
328 SUPContFree(pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
329 }
330 else
331 VMSetError(pVM, rc, RT_SRC_POS,
332 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
333 cbCoreCode);
334
335 pVM->vmm.s.pvHCCoreCodeR3 = NULL;
336 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
337 pVM->vmm.s.pvGCCoreCode = 0;
338 return rc;
339}
340
341
342/**
343 * Initializes the VMM.
344 *
345 * @returns VBox status code.
346 * @param pVM The VM to operate on.
347 */
348VMMR3DECL(int) VMMR3Init(PVM pVM)
349{
350 LogFlow(("VMMR3Init\n"));
351
352 /*
353 * Assert alignment, sizes and order.
354 */
355 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
356 AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s),
357 ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n",
358 sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s)));
359
360 /*
361 * Init basic VM VMM members.
362 */
363 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
364 int rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies);
365 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
366 pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */
367 //pVM->vmm.s.cYieldEveryMillies = 8; //debugging
368 else
369 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Vrc\n", rc), rc);
370
371 /* GC switchers are enabled by default. Turned off by HWACCM. */
372 pVM->vmm.s.fSwitcherDisabled = false;
373
374 /*
375 * Register the saved state data unit.
376 */
377 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
378 NULL, vmmR3Save, NULL,
379 NULL, vmmR3Load, NULL);
380 if (VBOX_FAILURE(rc))
381 return rc;
382
383 /*
384 * Register the Ring-0 VM handle with the session for fast ioctl calls.
385 */
386 rc = SUPSetVMForFastIOCtl(pVM->pVMR0);
387 if (VBOX_FAILURE(rc))
388 return rc;
389
390 /*
391 * Init core code.
392 */
393 rc = vmmR3InitCoreCode(pVM);
394 if (VBOX_SUCCESS(rc))
395 {
396 /*
397 * Allocate & init VMM GC stack.
398 * The stack pages are also used by the VMM R0 when VMMR0CallHost is invoked.
399 * (The page protection is modifed during R3 init completion.)
400 */
401#ifdef VBOX_STRICT_VMM_STACK
402 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
403#else
404 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
405#endif
406 if (VBOX_SUCCESS(rc))
407 {
408 /* Set HC and GC stack pointers to top of stack. */
409 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = (RTR0PTR)pVM->vmm.s.pbHCStack;
410 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
411 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
412 AssertRelease(pVM->vmm.s.pbGCStack);
413
414 /* Set hypervisor eip. */
415 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStack);
416
417 /*
418 * Allocate GC & R0 Logger instances (they are finalized in the relocator).
419 */
420#ifdef LOG_ENABLED
421 PRTLOGGER pLogger = RTLogDefaultInstance();
422 if (pLogger)
423 {
424 pVM->vmm.s.cbLoggerGC = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]);
425 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pLoggerHC);
426 if (VBOX_SUCCESS(rc))
427 {
428 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
429
430/*
431 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup), so
432 * you have to sign up here by adding your defined(DEBUG_<userid>) to the #if.
433 *
434 * If you want to log in non-debug modes, you'll have to remember to change SUPDRvShared.c
435 * to not stub all the log functions.
436 *
437 * You might also wish to enable the AssertMsg1/2 overrides in VMMR0.cpp when enabling this.
438 */
439# if defined(DEBUG_sandervl) || defined(DEBUG_frank)
440 rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]),
441 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0Logger);
442 if (VBOX_SUCCESS(rc))
443 {
444 pVM->vmm.s.pR0Logger->pVM = pVM->pVMR0;
445 //pVM->vmm.s.pR0Logger->fCreated = false;
446 pVM->vmm.s.pR0Logger->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]);
447 }
448# endif
449 }
450 }
451#endif /* LOG_ENABLED */
452
453#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
454 /*
455 * Allocate GC Release Logger instances (finalized in the relocator).
456 */
457 if (VBOX_SUCCESS(rc))
458 {
459 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
460 if (pRelLogger)
461 {
462 pVM->vmm.s.cbRelLoggerGC = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
463 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRelLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRelLoggerHC);
464 if (VBOX_SUCCESS(rc))
465 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
466 }
467 }
468#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
469
470#ifdef VBOX_WITH_NMI
471 /*
472 * Allocate mapping for the host APIC.
473 */
474 if (VBOX_SUCCESS(rc))
475 {
476 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
477 AssertRC(rc);
478 }
479#endif
480 if (VBOX_SUCCESS(rc))
481 {
482 rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock);
483 if (VBOX_SUCCESS(rc))
484 {
485 /*
486 * Debug info.
487 */
488 DBGFR3InfoRegisterInternal(pVM, "ff", "Displays the current Forced actions Flags.", vmmR3InfoFF);
489
490 /*
491 * Statistics.
492 */
493 STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches.");
494 STAM_REG(pVM, &pVM->vmm.s.StatGCRetNormal, STAMTYPE_COUNTER, "/VMM/GCRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
495 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterrupt, STAMTYPE_COUNTER, "/VMM/GCRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
496 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
497 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGuestTrap, STAMTYPE_COUNTER, "/VMM/GCRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
498 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitch, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
499 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
500 STAM_REG(pVM, &pVM->vmm.s.StatGCRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/GCRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");
501 STAM_REG(pVM, &pVM->vmm.s.StatGCRetStaleSelector, STAMTYPE_COUNTER, "/VMM/GCRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
502 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIRETTrap, STAMTYPE_COUNTER, "/VMM/GCRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
503 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
504 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
505 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIORead, STAMTYPE_COUNTER, "/VMM/GCRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns.");
506 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns.");
507 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIORead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns.");
508 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns.");
509 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns.");
510 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
511 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
512 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
513 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
514 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
515 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTSSFault, STAMTYPE_COUNTER, "/VMM/GCRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
516 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDFault, STAMTYPE_COUNTER, "/VMM/GCRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
517 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCSAMTask, STAMTYPE_COUNTER, "/VMM/GCRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
518 STAM_REG(pVM, &pVM->vmm.s.StatGCRetSyncCR3, STAMTYPE_COUNTER, "/VMM/GCRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
519 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMisc, STAMTYPE_COUNTER, "/VMM/GCRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
520 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchInt3, STAMTYPE_COUNTER, "/VMM/GCRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
521 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchPF, STAMTYPE_COUNTER, "/VMM/GCRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
522 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchGP, STAMTYPE_COUNTER, "/VMM/GCRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
523 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/GCRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
524 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPageOverflow, STAMTYPE_COUNTER, "/VMM/GCRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns.");
525 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/GCRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
526 STAM_REG(pVM, &pVM->vmm.s.StatGCRetToR3, STAMTYPE_COUNTER, "/VMM/GCRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
527 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTimerPending, STAMTYPE_COUNTER, "/VMM/GCRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
528 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptPending, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
529 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCallHost, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/Misc", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
530 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMGrowRAM, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/GrowRAM", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
531 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PDMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
532 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLogFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/LogFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
533 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMQueueFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/QueueFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
534 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMPoolGrow",STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
535 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRemReplay, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/REMReplay", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
536 STAM_REG(pVM, &pVM->vmm.s.StatGCRetVMSetError, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/VMSetError", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
537 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
538 STAM_REG(pVM, &pVM->vmm.s.StatGCRetHyperAssertion, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/HyperAssert", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
539 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/GCRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
540 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/GCRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
541 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulHlt, STAMTYPE_COUNTER, "/VMM/GCRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");
542 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPendingRequest, STAMTYPE_COUNTER, "/VMM/GCRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
543
544 return VINF_SUCCESS;
545 }
546 AssertRC(rc);
547 }
548 }
549 /** @todo: Need failure cleanup. */
550
551 //more todo in here?
552 //if (VBOX_SUCCESS(rc))
553 //{
554 //}
555 //int rc2 = vmmR3TermCoreCode(pVM);
556 //AssertRC(rc2));
557 }
558
559 return rc;
560}
561
562
563/**
564 * Ring-3 init finalizing.
565 *
566 * @returns VBox status code.
567 * @param pVM The VM handle.
568 */
569VMMR3DECL(int) VMMR3InitFinalize(PVM pVM)
570{
571#ifdef VBOX_STRICT_VMM_STACK
572 /*
573 * Two inaccessible pages at each sides of the stack to catch over/under-flows.
574 */
575 memset(pVM->vmm.s.pbHCStack - PAGE_SIZE, 0xcc, PAGE_SIZE);
576 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack - PAGE_SIZE), PAGE_SIZE, 0);
577 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
578
579 memset(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
580 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack + VMM_STACK_SIZE), PAGE_SIZE, 0);
581 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
582#endif
583
584 /*
585 * Set page attributes to r/w for stack pages.
586 */
587 int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbGCStack, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
588 AssertRC(rc);
589 if (VBOX_SUCCESS(rc))
590 {
591 /*
592 * Create the EMT yield timer.
593 */
594 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
595 if (VBOX_SUCCESS(rc))
596 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
597 }
598#ifdef VBOX_WITH_NMI
599 /*
600 * Map the host APIC into GC - This may be host os specific!
601 */
602 if (VBOX_SUCCESS(rc))
603 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
604 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
605#endif
606 return rc;
607}
608
609
610/**
611 * Initializes the R0 VMM.
612 *
613 * @returns VBox status code.
614 * @param pVM The VM to operate on.
615 */
616VMMR3DECL(int) VMMR3InitR0(PVM pVM)
617{
618 int rc;
619
620 /*
621 * Initialize the ring-0 logger if we haven't done so yet.
622 */
623 if ( pVM->vmm.s.pR0Logger
624 && !pVM->vmm.s.pR0Logger->fCreated)
625 {
626 rc = VMMR3UpdateLoggers(pVM);
627 if (VBOX_FAILURE(rc))
628 return rc;
629 }
630
631 /*
632 * Call Ring-0 entry with init code.
633 */
634 for (;;)
635 {
636#ifdef NO_SUPCALLR0VMM
637 //rc = VERR_GENERAL_FAILURE;
638 rc = VINF_SUCCESS;
639#else
640 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, VMMGetSvnRev(), NULL);
641#endif
642 if ( pVM->vmm.s.pR0Logger
643 && pVM->vmm.s.pR0Logger->Logger.offScratch > 0)
644 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL);
645 if (rc != VINF_VMM_CALL_HOST)
646 break;
647 rc = vmmR3ServiceCallHostRequest(pVM);
648 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
649 break;
650 /* Resume R0 */
651 }
652
653 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
654 {
655 LogRel(("R0 init failed, rc=%Vra\n", rc));
656 if (VBOX_SUCCESS(rc))
657 rc = VERR_INTERNAL_ERROR;
658 }
659 return rc;
660}
661
662
663/**
664 * Initializes the GC VMM.
665 *
666 * @returns VBox status code.
667 * @param pVM The VM to operate on.
668 */
669VMMR3DECL(int) VMMR3InitGC(PVM pVM)
670{
671 /* In VMX mode, there's no need to init GC. */
672 if (pVM->vmm.s.fSwitcherDisabled)
673 return VINF_SUCCESS;
674
675 /*
676 * Call VMMGCInit():
677 * -# resolve the address.
678 * -# setup stackframe and EIP to use the trampoline.
679 * -# do a generic hypervisor call.
680 */
681 RTGCPTR32 GCPtrEP;
682 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
683 if (VBOX_SUCCESS(rc))
684 {
685 CPUMHyperSetCtxCore(pVM, NULL);
686 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
687 uint64_t u64TS = RTTimeProgramStartNanoTS();
688#if GC_ARCH_BITS == 32
689 CPUMPushHyper(pVM, (uint32_t)(u64TS >> 32)); /* Param 3: The program startup TS - Hi. */
690 CPUMPushHyper(pVM, (uint32_t)u64TS); /* Param 3: The program startup TS - Lo. */
691#else /* 64-bit GC */
692 CPUMPushHyper(pVM, u64TS); /* Param 3: The program startup TS. */
693#endif
694 CPUMPushHyper(pVM, VMMGetSvnRev()); /* Param 2: Version argument. */
695 CPUMPushHyper(pVM, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
696 CPUMPushHyper(pVM, pVM->pVMGC); /* Param 0: pVM */
697 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* trampoline param: stacksize. */
698 CPUMPushHyper(pVM, GCPtrEP); /* Call EIP. */
699 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
700
701 for (;;)
702 {
703#ifdef NO_SUPCALLR0VMM
704 //rc = VERR_GENERAL_FAILURE;
705 rc = VINF_SUCCESS;
706#else
707 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_CALL_HYPERVISOR, NULL);
708#endif
709#ifdef LOG_ENABLED
710 PRTLOGGERRC pLogger = pVM->vmm.s.pLoggerHC;
711 if ( pLogger
712 && pLogger->offScratch > 0)
713 RTLogFlushGC(NULL, pLogger);
714#endif
715#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
716 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRelLoggerHC;
717 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
718 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
719#endif
720 if (rc != VINF_VMM_CALL_HOST)
721 break;
722 rc = vmmR3ServiceCallHostRequest(pVM);
723 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
724 break;
725 }
726
727 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
728 {
729 VMMR3FatalDump(pVM, rc);
730 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
731 rc = VERR_INTERNAL_ERROR;
732 }
733 AssertRC(rc);
734 }
735 return rc;
736}
737
738
739/**
740 * Terminate the VMM bits.
741 *
742 * @returns VINF_SUCCESS.
743 * @param pVM The VM handle.
744 */
745VMMR3DECL(int) VMMR3Term(PVM pVM)
746{
747 /*
748 * Call Ring-0 entry with termination code.
749 */
750 int rc;
751 for (;;)
752 {
753#ifdef NO_SUPCALLR0VMM
754 //rc = VERR_GENERAL_FAILURE;
755 rc = VINF_SUCCESS;
756#else
757 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_TERM, 0, NULL);
758#endif
759 if ( pVM->vmm.s.pR0Logger
760 && pVM->vmm.s.pR0Logger->Logger.offScratch > 0)
761 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL);
762 if (rc != VINF_VMM_CALL_HOST)
763 break;
764 rc = vmmR3ServiceCallHostRequest(pVM);
765 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
766 break;
767 /* Resume R0 */
768 }
769 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
770 {
771 LogRel(("VMMR3Term: R0 term failed, rc=%Vra. (warning)\n", rc));
772 if (VBOX_SUCCESS(rc))
773 rc = VERR_INTERNAL_ERROR;
774 }
775
776#ifdef VBOX_STRICT_VMM_STACK
777 /*
778 * Make the two stack guard pages present again.
779 */
780 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
781 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
782#endif
783 return rc;
784}
785
786
787/**
788 * Applies relocations to data and code managed by this
789 * component. This function will be called at init and
790 * whenever the VMM need to relocate it self inside the GC.
791 *
792 * The VMM will need to apply relocations to the core code.
793 *
794 * @param pVM The VM handle.
795 * @param offDelta The relocation delta.
796 */
797VMMR3DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
798{
799 LogFlow(("VMMR3Relocate: offDelta=%VGv\n", offDelta));
800
801 /*
802 * Recalc the GC address.
803 */
804 pVM->vmm.s.pvGCCoreCode = MMHyperHC2GC(pVM, pVM->vmm.s.pvHCCoreCodeR3);
805
806 /*
807 * The stack.
808 */
809 CPUMSetHyperESP(pVM, CPUMGetHyperESP(pVM) + offDelta);
810 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
811 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
812
813 /*
814 * All the switchers.
815 */
816 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
817 {
818 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
819 if (pSwitcher && pSwitcher->pfnRelocate)
820 {
821 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
822 pSwitcher->pfnRelocate(pVM,
823 pSwitcher,
824 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR0 + off,
825 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + off,
826 pVM->vmm.s.pvGCCoreCode + off,
827 pVM->vmm.s.HCPhysCoreCode + off);
828 }
829 }
830
831 /*
832 * Recalc the GC address for the current switcher.
833 */
834 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
835 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
836 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
837 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
838 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
839 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
840 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
841
842 /*
843 * Get other GC entry points.
844 */
845 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMGCResumeGuest);
846 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Vra\n", rc));
847
848 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMGCResumeGuestV86);
849 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Vra\n", rc));
850
851 /*
852 * Update the logger.
853 */
854 VMMR3UpdateLoggers(pVM);
855}
856
857
858/**
859 * Updates the settings for the GC and R0 loggers.
860 *
861 * @returns VBox status code.
862 * @param pVM The VM handle.
863 */
864VMMR3DECL(int) VMMR3UpdateLoggers(PVM pVM)
865{
866 /*
867 * Simply clone the logger instance (for GC).
868 */
869 int rc = VINF_SUCCESS;
870 RTGCPTR32 GCPtrLoggerFlush = 0;
871
872 if (pVM->vmm.s.pLoggerHC
873#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
874 || pVM->vmm.s.pRelLoggerHC
875#endif
876 )
877 {
878 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &GCPtrLoggerFlush);
879 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Vra\n", rc));
880 }
881
882 if (pVM->vmm.s.pLoggerHC)
883 {
884 RTGCPTR32 GCPtrLoggerWrapper = 0;
885 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &GCPtrLoggerWrapper);
886 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Vra\n", rc));
887 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
888 rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.pLoggerHC, pVM->vmm.s.cbLoggerGC,
889 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
890 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
891 }
892
893#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
894 if (pVM->vmm.s.pRelLoggerHC)
895 {
896 RTGCPTR32 GCPtrLoggerWrapper = 0;
897 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &GCPtrLoggerWrapper);
898 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Vra\n", rc));
899 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
900 rc = RTLogCloneRC(RTLogRelDefaultInstance(), pVM->vmm.s.pRelLoggerHC, pVM->vmm.s.cbRelLoggerGC,
901 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
902 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
903 }
904#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
905
906 /*
907 * For the ring-0 EMT logger, we use a per-thread logger
908 * instance in ring-0. Only initialize it once.
909 */
910 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
911 if (pR0Logger)
912 {
913 if (!pR0Logger->fCreated)
914 {
915 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
916 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
917 AssertReleaseMsgRCReturn(rc, ("VMMLoggerWrapper not found! rc=%Vra\n", rc), rc);
918
919 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
920 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
921 AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Vra\n", rc), rc);
922
923 rc = RTLogCreateForR0(&pR0Logger->Logger, pR0Logger->cbLogger,
924 *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush,
925 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
926 AssertReleaseMsgRCReturn(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc), rc);
927 pR0Logger->fCreated = true;
928 }
929
930 rc = RTLogCopyGroupsAndFlags(&pR0Logger->Logger, NULL /* default */, RTLOGFLAGS_BUFFERED, 0);
931 AssertRC(rc);
932 }
933
934 return rc;
935}
936
937
938/**
939 * Generic switch code relocator.
940 *
941 * @param pVM The VM handle.
942 * @param pSwitcher The switcher definition.
943 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
944 * @param pu8CodeR0 Pointer to the core code block for the switcher, ring-0 mapping.
945 * @param GCPtrCode The guest context address corresponding to pu8Code.
946 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
947 * @param SelCS The hypervisor CS selector.
948 * @param SelDS The hypervisor DS selector.
949 * @param SelTSS The hypervisor TSS selector.
950 * @param GCPtrGDT The GC address of the hypervisor GDT.
951 * @param SelCS64 The 64-bit mode hypervisor CS selector.
952 */
953static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
954 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
955{
956 union
957 {
958 const uint8_t *pu8;
959 const uint16_t *pu16;
960 const uint32_t *pu32;
961 const uint64_t *pu64;
962 const void *pv;
963 uintptr_t u;
964 } u;
965 u.pv = pSwitcher->pvFixups;
966
967 /*
968 * Process fixups.
969 */
970 uint8_t u8;
971 while ((u8 = *u.pu8++) != FIX_THE_END)
972 {
973 /*
974 * Get the source (where to write the fixup).
975 */
976 uint32_t offSrc = *u.pu32++;
977 Assert(offSrc < pSwitcher->cbCode);
978 union
979 {
980 uint8_t *pu8;
981 uint16_t *pu16;
982 uint32_t *pu32;
983 uint64_t *pu64;
984 uintptr_t u;
985 } uSrc;
986 uSrc.pu8 = pu8CodeR3 + offSrc;
987
988 /* The fixup target and method depends on the type. */
989 switch (u8)
990 {
991 /*
992 * 32-bit relative, source in HC and target in GC.
993 */
994 case FIX_HC_2_GC_NEAR_REL:
995 {
996 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
997 uint32_t offTrg = *u.pu32++;
998 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
999 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
1000 break;
1001 }
1002
1003 /*
1004 * 32-bit relative, source in HC and target in ID.
1005 */
1006 case FIX_HC_2_ID_NEAR_REL:
1007 {
1008 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1009 uint32_t offTrg = *u.pu32++;
1010 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1011 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - ((uintptr_t)pu8CodeR0 + offSrc + 4));
1012 break;
1013 }
1014
1015 /*
1016 * 32-bit relative, source in GC and target in HC.
1017 */
1018 case FIX_GC_2_HC_NEAR_REL:
1019 {
1020 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1021 uint32_t offTrg = *u.pu32++;
1022 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1023 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (GCPtrCode + offSrc + 4));
1024 break;
1025 }
1026
1027 /*
1028 * 32-bit relative, source in GC and target in ID.
1029 */
1030 case FIX_GC_2_ID_NEAR_REL:
1031 {
1032 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1033 uint32_t offTrg = *u.pu32++;
1034 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1035 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
1036 break;
1037 }
1038
1039 /*
1040 * 32-bit relative, source in ID and target in HC.
1041 */
1042 case FIX_ID_2_HC_NEAR_REL:
1043 {
1044 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1045 uint32_t offTrg = *u.pu32++;
1046 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1047 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (u32IDCode + offSrc + 4));
1048 break;
1049 }
1050
1051 /*
1052 * 32-bit relative, source in ID and target in HC.
1053 */
1054 case FIX_ID_2_GC_NEAR_REL:
1055 {
1056 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1057 uint32_t offTrg = *u.pu32++;
1058 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1059 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
1060 break;
1061 }
1062
1063 /*
1064 * 16:32 far jump, target in GC.
1065 */
1066 case FIX_GC_FAR32:
1067 {
1068 uint32_t offTrg = *u.pu32++;
1069 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1070 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
1071 *uSrc.pu16++ = SelCS;
1072 break;
1073 }
1074
1075 /*
1076 * Make 32-bit GC pointer given CPUM offset.
1077 */
1078 case FIX_GC_CPUM_OFF:
1079 {
1080 uint32_t offCPUM = *u.pu32++;
1081 Assert(offCPUM < sizeof(pVM->cpum));
1082 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM);
1083 break;
1084 }
1085
1086 /*
1087 * Make 32-bit GC pointer given VM offset.
1088 */
1089 case FIX_GC_VM_OFF:
1090 {
1091 uint32_t offVM = *u.pu32++;
1092 Assert(offVM < sizeof(VM));
1093 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM);
1094 break;
1095 }
1096
1097 /*
1098 * Make 32-bit HC pointer given CPUM offset.
1099 */
1100 case FIX_HC_CPUM_OFF:
1101 {
1102 uint32_t offCPUM = *u.pu32++;
1103 Assert(offCPUM < sizeof(pVM->cpum));
1104 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
1105 break;
1106 }
1107
1108 /*
1109 * Make 32-bit R0 pointer given VM offset.
1110 */
1111 case FIX_HC_VM_OFF:
1112 {
1113 uint32_t offVM = *u.pu32++;
1114 Assert(offVM < sizeof(VM));
1115 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
1116 break;
1117 }
1118
1119 /*
1120 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
1121 */
1122 case FIX_INTER_32BIT_CR3:
1123 {
1124
1125 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
1126 break;
1127 }
1128
1129 /*
1130 * Store the PAE CR3 (32-bit) for the intermediate memory context.
1131 */
1132 case FIX_INTER_PAE_CR3:
1133 {
1134
1135 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
1136 break;
1137 }
1138
1139 /*
1140 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
1141 */
1142 case FIX_INTER_AMD64_CR3:
1143 {
1144
1145 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
1146 break;
1147 }
1148
1149 /*
1150 * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
1151 */
1152 case FIX_HYPER_32BIT_CR3:
1153 {
1154
1155 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
1156 break;
1157 }
1158
1159 /*
1160 * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
1161 */
1162 case FIX_HYPER_PAE_CR3:
1163 {
1164
1165 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
1166 break;
1167 }
1168
1169 /*
1170 * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
1171 */
1172 case FIX_HYPER_AMD64_CR3:
1173 {
1174
1175 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
1176 break;
1177 }
1178
1179 /*
1180 * Store Hypervisor CS (16-bit).
1181 */
1182 case FIX_HYPER_CS:
1183 {
1184 *uSrc.pu16 = SelCS;
1185 break;
1186 }
1187
1188 /*
1189 * Store Hypervisor DS (16-bit).
1190 */
1191 case FIX_HYPER_DS:
1192 {
1193 *uSrc.pu16 = SelDS;
1194 break;
1195 }
1196
1197 /*
1198 * Store Hypervisor TSS (16-bit).
1199 */
1200 case FIX_HYPER_TSS:
1201 {
1202 *uSrc.pu16 = SelTSS;
1203 break;
1204 }
1205
1206 /*
1207 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
1208 */
1209 case FIX_GC_TSS_GDTE_DW2:
1210 {
1211 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
1212 *uSrc.pu32 = (uint32_t)GCPtr;
1213 break;
1214 }
1215
1216
1217 ///@todo case FIX_CR4_MASK:
1218 ///@todo case FIX_CR4_OSFSXR:
1219
1220 /*
1221 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
1222 */
1223 case FIX_NO_FXSAVE_JMP:
1224 {
1225 uint32_t offTrg = *u.pu32++;
1226 Assert(offTrg < pSwitcher->cbCode);
1227 if (!CPUMSupportsFXSR(pVM))
1228 {
1229 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1230 *uSrc.pu32++ = offTrg - (offSrc + 5);
1231 }
1232 else
1233 {
1234 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1235 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1236 }
1237 break;
1238 }
1239
1240 /*
1241 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1242 */
1243 case FIX_NO_SYSENTER_JMP:
1244 {
1245 uint32_t offTrg = *u.pu32++;
1246 Assert(offTrg < pSwitcher->cbCode);
1247 if (!CPUMIsHostUsingSysEnter(pVM))
1248 {
1249 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1250 *uSrc.pu32++ = offTrg - (offSrc + 5);
1251 }
1252 else
1253 {
1254 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1255 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1256 }
1257 break;
1258 }
1259
1260 /*
1261 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1262 */
1263 case FIX_NO_SYSCALL_JMP:
1264 {
1265 uint32_t offTrg = *u.pu32++;
1266 Assert(offTrg < pSwitcher->cbCode);
1267 if (!CPUMIsHostUsingSysEnter(pVM))
1268 {
1269 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1270 *uSrc.pu32++ = offTrg - (offSrc + 5);
1271 }
1272 else
1273 {
1274 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1275 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1276 }
1277 break;
1278 }
1279
1280 /*
1281 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1282 */
1283 case FIX_HC_32BIT:
1284 {
1285 uint32_t offTrg = *u.pu32++;
1286 Assert(offSrc < pSwitcher->cbCode);
1287 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1288 *uSrc.pu32 = (uintptr_t)pu8CodeR0 + offTrg;
1289 break;
1290 }
1291
1292#if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1293 /*
1294 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1295 */
1296 case FIX_HC_64BIT:
1297 {
1298 uint32_t offTrg = *u.pu32++;
1299 Assert(offSrc < pSwitcher->cbCode);
1300 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1301 *uSrc.pu64 = (uintptr_t)pu8CodeR0 + offTrg;
1302 break;
1303 }
1304
1305 /*
1306 * 64-bit HC Code Selector (no argument).
1307 */
1308 case FIX_HC_64BIT_CS:
1309 {
1310 Assert(offSrc < pSwitcher->cbCode);
1311#if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1312 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
1313#else
1314 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
1315#endif
1316 break;
1317 }
1318
1319 /*
1320 * 64-bit HC pointer to the CPUM instance data (no argument).
1321 */
1322 case FIX_HC_64BIT_CPUM:
1323 {
1324 Assert(offSrc < pSwitcher->cbCode);
1325 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
1326 break;
1327 }
1328#endif
1329
1330 /*
1331 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
1332 */
1333 case FIX_ID_32BIT:
1334 {
1335 uint32_t offTrg = *u.pu32++;
1336 Assert(offSrc < pSwitcher->cbCode);
1337 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1338 *uSrc.pu32 = u32IDCode + offTrg;
1339 break;
1340 }
1341
1342 /*
1343 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
1344 */
1345 case FIX_ID_64BIT:
1346 {
1347 uint32_t offTrg = *u.pu32++;
1348 Assert(offSrc < pSwitcher->cbCode);
1349 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1350 *uSrc.pu64 = u32IDCode + offTrg;
1351 break;
1352 }
1353
1354 /*
1355 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
1356 */
1357 case FIX_ID_FAR32_TO_64BIT_MODE:
1358 {
1359 uint32_t offTrg = *u.pu32++;
1360 Assert(offSrc < pSwitcher->cbCode);
1361 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1362 *uSrc.pu32++ = u32IDCode + offTrg;
1363 *uSrc.pu16 = SelCS64;
1364 AssertRelease(SelCS64);
1365 break;
1366 }
1367
1368#ifdef VBOX_WITH_NMI
1369 /*
1370 * 32-bit address to the APIC base.
1371 */
1372 case FIX_GC_APIC_BASE_32BIT:
1373 {
1374 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
1375 break;
1376 }
1377#endif
1378
1379 default:
1380 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
1381 break;
1382 }
1383 }
1384
1385#ifdef LOG_ENABLED
1386 /*
1387 * If Log2 is enabled disassemble the switcher code.
1388 *
1389 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
1390 */
1391 if (LogIs2Enabled())
1392 {
1393 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
1394 " pu8CodeR0 = %p\n"
1395 " pu8CodeR3 = %p\n"
1396 " GCPtrCode = %VGv\n"
1397 " u32IDCode = %08x\n"
1398 " pVMGC = %VGv\n"
1399 " pCPUMGC = %VGv\n"
1400 " pVMHC = %p\n"
1401 " pCPUMHC = %p\n"
1402 " GCPtrGDT = %VGv\n"
1403 " InterCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1404 " HyperCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1405 " SelCS = %04x\n"
1406 " SelDS = %04x\n"
1407 " SelCS64 = %04x\n"
1408 " SelTSS = %04x\n",
1409 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
1410 pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM),
1411 VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum,
1412 GCPtrGDT,
1413 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
1414 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
1415 SelCS, SelDS, SelCS64, SelTSS);
1416
1417 uint32_t offCode = 0;
1418 while (offCode < pSwitcher->cbCode)
1419 {
1420 /*
1421 * Figure out where this is.
1422 */
1423 const char *pszDesc = NULL;
1424 RTUINTPTR uBase;
1425 uint32_t cbCode;
1426 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
1427 {
1428 pszDesc = "HCCode0";
1429 uBase = (RTUINTPTR)pu8CodeR0;
1430 offCode = pSwitcher->offHCCode0;
1431 cbCode = pSwitcher->cbHCCode0;
1432 }
1433 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
1434 {
1435 pszDesc = "HCCode1";
1436 uBase = (RTUINTPTR)pu8CodeR0;
1437 offCode = pSwitcher->offHCCode1;
1438 cbCode = pSwitcher->cbHCCode1;
1439 }
1440 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
1441 {
1442 pszDesc = "GCCode";
1443 uBase = GCPtrCode;
1444 offCode = pSwitcher->offGCCode;
1445 cbCode = pSwitcher->cbGCCode;
1446 }
1447 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
1448 {
1449 pszDesc = "IDCode0";
1450 uBase = u32IDCode;
1451 offCode = pSwitcher->offIDCode0;
1452 cbCode = pSwitcher->cbIDCode0;
1453 }
1454 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
1455 {
1456 pszDesc = "IDCode1";
1457 uBase = u32IDCode;
1458 offCode = pSwitcher->offIDCode1;
1459 cbCode = pSwitcher->cbIDCode1;
1460 }
1461 else
1462 {
1463 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
1464 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1465 offCode++;
1466 continue;
1467 }
1468
1469 /*
1470 * Disassemble it.
1471 */
1472 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
1473 DISCPUSTATE Cpu;
1474
1475 memset(&Cpu, 0, sizeof(Cpu));
1476 Cpu.mode = CPUMODE_32BIT;
1477 while (cbCode > 0)
1478 {
1479 /* try label it */
1480 if (pSwitcher->offR0HostToGuest == offCode)
1481 RTLogPrintf(" *R0HostToGuest:\n");
1482 if (pSwitcher->offGCGuestToHost == offCode)
1483 RTLogPrintf(" *GCGuestToHost:\n");
1484 if (pSwitcher->offGCCallTrampoline == offCode)
1485 RTLogPrintf(" *GCCallTrampoline:\n");
1486 if (pSwitcher->offGCGuestToHostAsm == offCode)
1487 RTLogPrintf(" *GCGuestToHostAsm:\n");
1488 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
1489 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
1490 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
1491 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
1492
1493 /* disas */
1494 uint32_t cbInstr = 0;
1495 char szDisas[256];
1496 if (RT_SUCCESS(DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas)))
1497 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
1498 else
1499 {
1500 RTLogPrintf(" %04x: %02x '%c'\n",
1501 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1502 cbInstr = 1;
1503 }
1504 offCode += cbInstr;
1505 cbCode -= RT_MIN(cbInstr, cbCode);
1506 }
1507 }
1508 }
1509#endif
1510}
1511
1512
1513/**
1514 * Relocator for the 32-Bit to 32-Bit world switcher.
1515 */
1516DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1517{
1518 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1519 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1520}
1521
1522
1523/**
1524 * Relocator for the 32-Bit to PAE world switcher.
1525 */
1526DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1527{
1528 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1529 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1530}
1531
1532
1533/**
1534 * Relocator for the PAE to 32-Bit world switcher.
1535 */
1536DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1537{
1538 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1539 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1540}
1541
1542
1543/**
1544 * Relocator for the PAE to PAE world switcher.
1545 */
1546DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1547{
1548 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1549 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1550}
1551
1552
1553/**
1554 * Relocator for the AMD64 to PAE world switcher.
1555 */
1556DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1557{
1558 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1559 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1560}
1561
1562
1563/**
1564 * Gets the pointer to g_szRTAssertMsg1 in GC.
1565 * @returns Pointer to VMMGC::g_szRTAssertMsg1.
1566 * Returns NULL if not present.
1567 * @param pVM The VM handle.
1568 */
1569VMMR3DECL(const char *) VMMR3GetGCAssertMsg1(PVM pVM)
1570{
1571 RTGCPTR32 GCPtr;
1572 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg1", &GCPtr);
1573 if (VBOX_SUCCESS(rc))
1574 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1575 return NULL;
1576}
1577
1578
1579/**
1580 * Gets the pointer to g_szRTAssertMsg2 in GC.
1581 * @returns Pointer to VMMGC::g_szRTAssertMsg2.
1582 * Returns NULL if not present.
1583 * @param pVM The VM handle.
1584 */
1585VMMR3DECL(const char *) VMMR3GetGCAssertMsg2(PVM pVM)
1586{
1587 RTGCPTR32 GCPtr;
1588 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg2", &GCPtr);
1589 if (VBOX_SUCCESS(rc))
1590 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1591 return NULL;
1592}
1593
1594
1595/**
1596 * Execute state save operation.
1597 *
1598 * @returns VBox status code.
1599 * @param pVM VM Handle.
1600 * @param pSSM SSM operation handle.
1601 */
1602static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1603{
1604 LogFlow(("vmmR3Save:\n"));
1605
1606 /*
1607 * The hypervisor stack.
1608 */
1609 SSMR3PutGCPtr(pSSM, pVM->vmm.s.pbGCStackBottom);
1610 RTGCPTR GCPtrESP = CPUMGetHyperESP(pVM);
1611 Assert(pVM->vmm.s.pbGCStackBottom - GCPtrESP <= VMM_STACK_SIZE);
1612 SSMR3PutGCPtr(pSSM, GCPtrESP);
1613 SSMR3PutMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1614 return SSMR3PutU32(pSSM, ~0); /* terminator */
1615}
1616
1617
1618/**
1619 * Execute state load operation.
1620 *
1621 * @returns VBox status code.
1622 * @param pVM VM Handle.
1623 * @param pSSM SSM operation handle.
1624 * @param u32Version Data layout version.
1625 */
1626static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
1627{
1628 LogFlow(("vmmR3Load:\n"));
1629
1630 /*
1631 * Validate version.
1632 */
1633 if (u32Version != VMM_SAVED_STATE_VERSION)
1634 {
1635 Log(("vmmR3Load: Invalid version u32Version=%d!\n", u32Version));
1636 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1637 }
1638
1639 /*
1640 * Check that the stack is in the same place, or that it's fearly empty.
1641 */
1642 RTGCPTR GCPtrStackBottom;
1643 SSMR3GetGCPtr(pSSM, &GCPtrStackBottom);
1644 RTGCPTR GCPtrESP;
1645 int rc = SSMR3GetGCPtr(pSSM, &GCPtrESP);
1646 if (VBOX_FAILURE(rc))
1647 return rc;
1648 if ( GCPtrStackBottom == pVM->vmm.s.pbGCStackBottom
1649 || (GCPtrStackBottom - GCPtrESP < 32)) /** @todo This will break if we start preemting the hypervisor. */
1650 {
1651 /*
1652 * We *must* set the ESP because the CPUM load + PGM load relocations will render
1653 * the ESP in CPUM fatally invalid.
1654 */
1655 CPUMSetHyperESP(pVM, GCPtrESP);
1656
1657 /* restore the stack. */
1658 SSMR3GetMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1659
1660 /* terminator */
1661 uint32_t u32;
1662 rc = SSMR3GetU32(pSSM, &u32);
1663 if (VBOX_FAILURE(rc))
1664 return rc;
1665 if (u32 != ~0U)
1666 {
1667 AssertMsgFailed(("u32=%#x\n", u32));
1668 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1669 }
1670 return VINF_SUCCESS;
1671 }
1672
1673 LogRel(("The stack is not in the same place and it's not empty! GCPtrStackBottom=%VGv pbGCStackBottom=%VGv ESP=%VGv\n",
1674 GCPtrStackBottom, pVM->vmm.s.pbGCStackBottom, GCPtrESP));
1675 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
1676 return VINF_SUCCESS; /* ignore this */
1677 AssertFailed();
1678 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1679}
1680
1681
1682/**
1683 * Selects the switcher to be used for switching to GC.
1684 *
1685 * @returns VBox status code.
1686 * @param pVM VM handle.
1687 * @param enmSwitcher The new switcher.
1688 * @remark This function may be called before the VMM is initialized.
1689 */
1690VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1691{
1692 /*
1693 * Validate input.
1694 */
1695 if ( enmSwitcher < VMMSWITCHER_INVALID
1696 || enmSwitcher >= VMMSWITCHER_MAX)
1697 {
1698 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1699 return VERR_INVALID_PARAMETER;
1700 }
1701
1702 /* Do nothing if the switcher is disabled. */
1703 if (pVM->vmm.s.fSwitcherDisabled)
1704 return VINF_SUCCESS;
1705
1706 /*
1707 * Select the new switcher.
1708 */
1709 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
1710 if (pSwitcher)
1711 {
1712 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
1713 pVM->vmm.s.enmSwitcher = enmSwitcher;
1714
1715 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvHCCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvHCCoreCodeR0 type */
1716 pVM->vmm.s.pfnR0HostToGuest = pbCodeR0 + pSwitcher->offR0HostToGuest;
1717
1718 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1719 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
1720 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
1721 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
1722 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
1723 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
1724 return VINF_SUCCESS;
1725 }
1726 return VERR_NOT_IMPLEMENTED;
1727}
1728
1729/**
1730 * Disable the switcher logic permanently.
1731 *
1732 * @returns VBox status code.
1733 * @param pVM VM handle.
1734 */
1735VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
1736{
1737/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
1738 * @code
1739 * mov eax, VERR_INTERNAL_ERROR
1740 * ret
1741 * @endcode
1742 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
1743 */
1744 pVM->vmm.s.fSwitcherDisabled = true;
1745 return VINF_SUCCESS;
1746}
1747
1748
1749/**
1750 * Resolve a builtin GC symbol.
1751 * Called by PDM when loading or relocating GC modules.
1752 *
1753 * @returns VBox status
1754 * @param pVM VM Handle.
1755 * @param pszSymbol Symbol to resolv
1756 * @param pGCPtrValue Where to store the symbol value.
1757 * @remark This has to work before VMMR3Relocate() is called.
1758 */
1759VMMR3DECL(int) VMMR3GetImportGC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue)
1760{
1761 if (!strcmp(pszSymbol, "g_Logger"))
1762 {
1763 if (pVM->vmm.s.pLoggerHC)
1764 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
1765 *pGCPtrValue = pVM->vmm.s.pLoggerGC;
1766 }
1767 else if (!strcmp(pszSymbol, "g_RelLogger"))
1768 {
1769#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1770 if (pVM->vmm.s.pRelLoggerHC)
1771 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
1772 *pGCPtrValue = pVM->vmm.s.pRelLoggerGC;
1773#else
1774 *pGCPtrValue = NIL_RTGCPTR;
1775#endif
1776 }
1777 else
1778 return VERR_SYMBOL_NOT_FOUND;
1779 return VINF_SUCCESS;
1780}
1781
1782
1783/**
1784 * Suspends the the CPU yielder.
1785 *
1786 * @param pVM The VM handle.
1787 */
1788VMMR3DECL(void) VMMR3YieldSuspend(PVM pVM)
1789{
1790 if (!pVM->vmm.s.cYieldResumeMillies)
1791 {
1792 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1793 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1794 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1795 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1796 else
1797 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1798 TMTimerStop(pVM->vmm.s.pYieldTimer);
1799 }
1800 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1801}
1802
1803
1804/**
1805 * Stops the the CPU yielder.
1806 *
1807 * @param pVM The VM handle.
1808 */
1809VMMR3DECL(void) VMMR3YieldStop(PVM pVM)
1810{
1811 if (!pVM->vmm.s.cYieldResumeMillies)
1812 TMTimerStop(pVM->vmm.s.pYieldTimer);
1813 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1814 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1815}
1816
1817
1818/**
1819 * Resumes the CPU yielder when it has been a suspended or stopped.
1820 *
1821 * @param pVM The VM handle.
1822 */
1823VMMR3DECL(void) VMMR3YieldResume(PVM pVM)
1824{
1825 if (pVM->vmm.s.cYieldResumeMillies)
1826 {
1827 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1828 pVM->vmm.s.cYieldResumeMillies = 0;
1829 }
1830}
1831
1832
1833/**
1834 * Internal timer callback function.
1835 *
1836 * @param pVM The VM.
1837 * @param pTimer The timer handle.
1838 * @param pvUser User argument specified upon timer creation.
1839 */
1840static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1841{
1842 /*
1843 * This really needs some careful tuning. While we shouldn't be too gready since
1844 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1845 * because that'll cause us to stop up.
1846 *
1847 * The current logic is to use the default interval when there is no lag worth
1848 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1849 *
1850 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1851 * so the lag is up to date.)
1852 */
1853 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1854 if ( u64Lag < 50000000 /* 50ms */
1855 || ( u64Lag < 1000000000 /* 1s */
1856 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1857 )
1858 {
1859 uint64_t u64Elapsed = RTTimeNanoTS();
1860 pVM->vmm.s.u64LastYield = u64Elapsed;
1861
1862 RTThreadYield();
1863
1864#ifdef LOG_ENABLED
1865 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1866 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1867#endif
1868 }
1869 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1870}
1871
1872
1873/**
1874 * Acquire global VM lock.
1875 *
1876 * @returns VBox status code
1877 * @param pVM The VM to operate on.
1878 */
1879VMMR3DECL(int) VMMR3Lock(PVM pVM)
1880{
1881 return RTCritSectEnter(&pVM->vmm.s.CritSectVMLock);
1882}
1883
1884
1885/**
1886 * Release global VM lock.
1887 *
1888 * @returns VBox status code
1889 * @param pVM The VM to operate on.
1890 */
1891VMMR3DECL(int) VMMR3Unlock(PVM pVM)
1892{
1893 return RTCritSectLeave(&pVM->vmm.s.CritSectVMLock);
1894}
1895
1896
1897/**
1898 * Return global VM lock owner.
1899 *
1900 * @returns Thread id of owner.
1901 * @returns NIL_RTTHREAD if no owner.
1902 * @param pVM The VM to operate on.
1903 */
1904VMMR3DECL(RTNATIVETHREAD) VMMR3LockGetOwner(PVM pVM)
1905{
1906 return RTCritSectGetOwner(&pVM->vmm.s.CritSectVMLock);
1907}
1908
1909
1910/**
1911 * Checks if the current thread is the owner of the global VM lock.
1912 *
1913 * @returns true if owner.
1914 * @returns false if not owner.
1915 * @param pVM The VM to operate on.
1916 */
1917VMMR3DECL(bool) VMMR3LockIsOwner(PVM pVM)
1918{
1919 return RTCritSectIsOwner(&pVM->vmm.s.CritSectVMLock);
1920}
1921
1922
1923/**
1924 * Executes guest code.
1925 *
1926 * @param pVM VM handle.
1927 */
1928VMMR3DECL(int) VMMR3RawRunGC(PVM pVM)
1929{
1930 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1931
1932 /*
1933 * Set the EIP and ESP.
1934 */
1935 CPUMSetHyperEIP(pVM, CPUMGetGuestEFlags(pVM) & X86_EFL_VM
1936 ? pVM->vmm.s.pfnCPUMGCResumeGuestV86
1937 : pVM->vmm.s.pfnCPUMGCResumeGuest);
1938 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom);
1939
1940 /*
1941 * We hide log flushes (outer) and hypervisor interrupts (inner).
1942 */
1943 for (;;)
1944 {
1945 int rc;
1946 do
1947 {
1948#ifdef NO_SUPCALLR0VMM
1949 rc = VERR_GENERAL_FAILURE;
1950#else
1951 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
1952#endif
1953 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1954
1955 /*
1956 * Flush the logs.
1957 */
1958#ifdef LOG_ENABLED
1959 PRTLOGGERRC pLogger = pVM->vmm.s.pLoggerHC;
1960 if ( pLogger
1961 && pLogger->offScratch > 0)
1962 RTLogFlushGC(NULL, pLogger);
1963#endif
1964#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1965 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRelLoggerHC;
1966 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1967 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
1968#endif
1969 if (rc != VINF_VMM_CALL_HOST)
1970 {
1971 Log2(("VMMR3RawRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1972 return rc;
1973 }
1974 rc = vmmR3ServiceCallHostRequest(pVM);
1975 if (VBOX_FAILURE(rc))
1976 return rc;
1977 /* Resume GC */
1978 }
1979}
1980
1981
1982/**
1983 * Executes guest code (Intel VT-x and AMD-V).
1984 *
1985 * @param pVM VM handle.
1986 */
1987VMMR3DECL(int) VMMR3HwAccRunGC(PVM pVM)
1988{
1989 Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1990
1991 for (;;)
1992 {
1993 int rc;
1994 do
1995 {
1996#ifdef NO_SUPCALLR0VMM
1997 rc = VERR_GENERAL_FAILURE;
1998#else
1999 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN);
2000#endif
2001 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2002
2003#ifdef LOG_ENABLED
2004 /*
2005 * Flush the log
2006 */
2007 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
2008 if ( pR0Logger
2009 && pR0Logger->Logger.offScratch > 0)
2010 RTLogFlushToLogger(&pR0Logger->Logger, NULL);
2011#endif /* !LOG_ENABLED */
2012 if (rc != VINF_VMM_CALL_HOST)
2013 {
2014 Log2(("VMMR3HwAccRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
2015 return rc;
2016 }
2017 rc = vmmR3ServiceCallHostRequest(pVM);
2018 if (VBOX_FAILURE(rc) || rc == VINF_EM_DBG_HYPER_ASSERTION)
2019 return rc;
2020 /* Resume R0 */
2021 }
2022}
2023
2024/**
2025 * Calls GC a function.
2026 *
2027 * @param pVM The VM handle.
2028 * @param GCPtrEntry The GC function address.
2029 * @param cArgs The number of arguments in the ....
2030 * @param ... Arguments to the function.
2031 */
2032VMMR3DECL(int) VMMR3CallGC(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, ...)
2033{
2034 va_list args;
2035 va_start(args, cArgs);
2036 int rc = VMMR3CallGCV(pVM, GCPtrEntry, cArgs, args);
2037 va_end(args);
2038 return rc;
2039}
2040
2041
2042/**
2043 * Calls GC a function.
2044 *
2045 * @param pVM The VM handle.
2046 * @param GCPtrEntry The GC function address.
2047 * @param cArgs The number of arguments in the ....
2048 * @param args Arguments to the function.
2049 */
2050VMMR3DECL(int) VMMR3CallGCV(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, va_list args)
2051{
2052 Log2(("VMMR3CallGCV: GCPtrEntry=%VGv cArgs=%d\n", GCPtrEntry, cArgs));
2053
2054 /*
2055 * Setup the call frame using the trampoline.
2056 */
2057 CPUMHyperSetCtxCore(pVM, NULL);
2058 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
2059 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom - cArgs * sizeof(RTGCUINTPTR));
2060 PRTGCUINTPTR pFrame = (PRTGCUINTPTR)(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE) - cArgs;
2061 int i = cArgs;
2062 while (i-- > 0)
2063 *pFrame++ = va_arg(args, RTGCUINTPTR);
2064
2065 CPUMPushHyper(pVM, cArgs * sizeof(RTGCUINTPTR)); /* stack frame size */
2066 CPUMPushHyper(pVM, GCPtrEntry); /* what to call */
2067 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2068
2069 /*
2070 * We hide log flushes (outer) and hypervisor interrupts (inner).
2071 */
2072 for (;;)
2073 {
2074 int rc;
2075 do
2076 {
2077#ifdef NO_SUPCALLR0VMM
2078 rc = VERR_GENERAL_FAILURE;
2079#else
2080 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2081#endif
2082 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2083
2084 /*
2085 * Flush the logs.
2086 */
2087#ifdef LOG_ENABLED
2088 PRTLOGGERRC pLogger = pVM->vmm.s.pLoggerHC;
2089 if ( pLogger
2090 && pLogger->offScratch > 0)
2091 RTLogFlushGC(NULL, pLogger);
2092#endif
2093#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2094 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2095 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2096 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2097#endif
2098 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2099 VMMR3FatalDump(pVM, rc);
2100 if (rc != VINF_VMM_CALL_HOST)
2101 {
2102 Log2(("VMMR3CallGCV: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
2103 return rc;
2104 }
2105 rc = vmmR3ServiceCallHostRequest(pVM);
2106 if (VBOX_FAILURE(rc))
2107 return rc;
2108 }
2109}
2110
2111
2112/**
2113 * Resumes executing hypervisor code when interrupted
2114 * by a queue flush or a debug event.
2115 *
2116 * @returns VBox status code.
2117 * @param pVM VM handle.
2118 */
2119VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM)
2120{
2121 Log(("VMMR3ResumeHyper: eip=%VGv esp=%VGv\n", CPUMGetHyperEIP(pVM), CPUMGetHyperESP(pVM)));
2122
2123 /*
2124 * We hide log flushes (outer) and hypervisor interrupts (inner).
2125 */
2126 for (;;)
2127 {
2128 int rc;
2129 do
2130 {
2131#ifdef NO_SUPCALLR0VMM
2132 rc = VERR_GENERAL_FAILURE;
2133#else
2134 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2135#endif
2136 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2137
2138 /*
2139 * Flush the loggers,
2140 */
2141#ifdef LOG_ENABLED
2142 PRTLOGGERRC pLogger = pVM->vmm.s.pLoggerHC;
2143 if ( pLogger
2144 && pLogger->offScratch > 0)
2145 RTLogFlushGC(NULL, pLogger);
2146#endif
2147#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2148 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2149 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2150 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2151#endif
2152 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2153 VMMR3FatalDump(pVM, rc);
2154 if (rc != VINF_VMM_CALL_HOST)
2155 {
2156 Log(("VMMR3ResumeHyper: returns %Vrc\n", rc));
2157 return rc;
2158 }
2159 rc = vmmR3ServiceCallHostRequest(pVM);
2160 if (VBOX_FAILURE(rc))
2161 return rc;
2162 }
2163}
2164
2165
2166/**
2167 * Service a call to the ring-3 host code.
2168 *
2169 * @returns VBox status code.
2170 * @param pVM VM handle.
2171 * @remark Careful with critsects.
2172 */
2173static int vmmR3ServiceCallHostRequest(PVM pVM)
2174{
2175 switch (pVM->vmm.s.enmCallHostOperation)
2176 {
2177 /*
2178 * Acquire the PDM lock.
2179 */
2180 case VMMCALLHOST_PDM_LOCK:
2181 {
2182 pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM);
2183 break;
2184 }
2185
2186 /*
2187 * Flush a PDM queue.
2188 */
2189 case VMMCALLHOST_PDM_QUEUE_FLUSH:
2190 {
2191 PDMR3QueueFlushWorker(pVM, NULL);
2192 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
2193 break;
2194 }
2195
2196 /*
2197 * Grow the PGM pool.
2198 */
2199 case VMMCALLHOST_PGM_POOL_GROW:
2200 {
2201 pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
2202 break;
2203 }
2204
2205 /*
2206 * Maps an page allocation chunk into ring-3 so ring-0 can use it.
2207 */
2208 case VMMCALLHOST_PGM_MAP_CHUNK:
2209 {
2210 pVM->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVM->vmm.s.u64CallHostArg);
2211 break;
2212 }
2213
2214 /*
2215 * Allocates more handy pages.
2216 */
2217 case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
2218 {
2219 pVM->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM);
2220 break;
2221 }
2222#ifndef VBOX_WITH_NEW_PHYS_CODE
2223
2224 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
2225 {
2226 const RTGCPHYS GCPhys = pVM->vmm.s.u64CallHostArg;
2227 pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, &GCPhys);
2228 break;
2229 }
2230#endif
2231
2232 /*
2233 * Acquire the PGM lock.
2234 */
2235 case VMMCALLHOST_PGM_LOCK:
2236 {
2237 pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM);
2238 break;
2239 }
2240
2241 /*
2242 * Flush REM handler notifications.
2243 */
2244 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
2245 {
2246 REMR3ReplayHandlerNotifications(pVM);
2247 break;
2248 }
2249
2250 /*
2251 * This is a noop. We just take this route to avoid unnecessary
2252 * tests in the loops.
2253 */
2254 case VMMCALLHOST_VMM_LOGGER_FLUSH:
2255 break;
2256
2257 /*
2258 * Set the VM error message.
2259 */
2260 case VMMCALLHOST_VM_SET_ERROR:
2261 VMR3SetErrorWorker(pVM);
2262 break;
2263
2264 /*
2265 * Set the VM runtime error message.
2266 */
2267 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
2268 VMR3SetRuntimeErrorWorker(pVM);
2269 break;
2270
2271 /*
2272 * Signal a ring 0 hypervisor assertion.
2273 * Cancel the longjmp operation that's in progress.
2274 */
2275 case VMMCALLHOST_VM_R0_HYPER_ASSERTION:
2276 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
2277 pVM->vmm.s.CallHostR0JmpBuf.fInRing3Call = false;
2278#ifdef RT_ARCH_X86
2279 pVM->vmm.s.CallHostR0JmpBuf.eip = 0;
2280#else
2281 pVM->vmm.s.CallHostR0JmpBuf.rip = 0;
2282#endif
2283 return VINF_EM_DBG_HYPER_ASSERTION;
2284
2285 default:
2286 AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation));
2287 return VERR_INTERNAL_ERROR;
2288 }
2289
2290 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
2291 return VINF_SUCCESS;
2292}
2293
2294
2295
2296/**
2297 * Structure to pass to DBGFR3Info() and for doing all other
2298 * output during fatal dump.
2299 */
2300typedef struct VMMR3FATALDUMPINFOHLP
2301{
2302 /** The helper core. */
2303 DBGFINFOHLP Core;
2304 /** The release logger instance. */
2305 PRTLOGGER pRelLogger;
2306 /** The saved release logger flags. */
2307 RTUINT fRelLoggerFlags;
2308 /** The logger instance. */
2309 PRTLOGGER pLogger;
2310 /** The saved logger flags. */
2311 RTUINT fLoggerFlags;
2312 /** The saved logger destination flags. */
2313 RTUINT fLoggerDestFlags;
2314 /** Whether to output to stderr or not. */
2315 bool fStdErr;
2316} VMMR3FATALDUMPINFOHLP, *PVMMR3FATALDUMPINFOHLP;
2317typedef const VMMR3FATALDUMPINFOHLP *PCVMMR3FATALDUMPINFOHLP;
2318
2319
2320/**
2321 * Print formatted string.
2322 *
2323 * @param pHlp Pointer to this structure.
2324 * @param pszFormat The format string.
2325 * @param ... Arguments.
2326 */
2327static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
2328{
2329 va_list args;
2330 va_start(args, pszFormat);
2331 pHlp->pfnPrintfV(pHlp, pszFormat, args);
2332 va_end(args);
2333}
2334
2335
2336/**
2337 * Print formatted string.
2338 *
2339 * @param pHlp Pointer to this structure.
2340 * @param pszFormat The format string.
2341 * @param args Argument list.
2342 */
2343static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
2344{
2345 PCVMMR3FATALDUMPINFOHLP pMyHlp = (PCVMMR3FATALDUMPINFOHLP)pHlp;
2346
2347 if (pMyHlp->pRelLogger)
2348 {
2349 va_list args2;
2350 va_copy(args2, args);
2351 RTLogLoggerV(pMyHlp->pRelLogger, pszFormat, args2);
2352 va_end(args2);
2353 }
2354 if (pMyHlp->pLogger)
2355 {
2356 va_list args2;
2357 va_copy(args2, args);
2358 RTLogLoggerV(pMyHlp->pLogger, pszFormat, args);
2359 va_end(args2);
2360 }
2361 if (pMyHlp->fStdErr)
2362 {
2363 va_list args2;
2364 va_copy(args2, args);
2365 RTStrmPrintfV(g_pStdErr, pszFormat, args);
2366 va_end(args2);
2367 }
2368}
2369
2370
2371/**
2372 * Initializes the fatal dump output helper.
2373 *
2374 * @param pHlp The structure to initialize.
2375 */
2376static void vmmR3FatalDumpInfoHlpInit(PVMMR3FATALDUMPINFOHLP pHlp)
2377{
2378 memset(pHlp, 0, sizeof(*pHlp));
2379
2380 pHlp->Core.pfnPrintf = vmmR3FatalDumpInfoHlp_pfnPrintf;
2381 pHlp->Core.pfnPrintfV = vmmR3FatalDumpInfoHlp_pfnPrintfV;
2382
2383 /*
2384 * The loggers.
2385 */
2386 pHlp->pRelLogger = RTLogRelDefaultInstance();
2387#ifndef LOG_ENABLED
2388 if (!pHlp->pRelLogger)
2389#endif
2390 pHlp->pLogger = RTLogDefaultInstance();
2391
2392 if (pHlp->pRelLogger)
2393 {
2394 pHlp->fRelLoggerFlags = pHlp->pRelLogger->fFlags;
2395 pHlp->pRelLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2396 }
2397
2398 if (pHlp->pLogger)
2399 {
2400 pHlp->fLoggerFlags = pHlp->pLogger->fFlags;
2401 pHlp->fLoggerDestFlags = pHlp->pLogger->fDestFlags;
2402 pHlp->pLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2403#ifndef DEBUG_sandervl
2404 pHlp->pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
2405#endif
2406 }
2407
2408 /*
2409 * Check if we need write to stderr.
2410 */
2411#ifdef DEBUG_sandervl
2412 pHlp->fStdErr = false; /* takes too long to display here */
2413#else
2414 pHlp->fStdErr = (!pHlp->pRelLogger || !(pHlp->pRelLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)))
2415 && (!pHlp->pLogger || !(pHlp->pLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)));
2416#endif
2417}
2418
2419
2420/**
2421 * Deletes the fatal dump output helper.
2422 *
2423 * @param pHlp The structure to delete.
2424 */
2425static void vmmR3FatalDumpInfoHlpDelete(PVMMR3FATALDUMPINFOHLP pHlp)
2426{
2427 if (pHlp->pRelLogger)
2428 {
2429 RTLogFlush(pHlp->pRelLogger);
2430 pHlp->pRelLogger->fFlags = pHlp->fRelLoggerFlags;
2431 }
2432
2433 if (pHlp->pLogger)
2434 {
2435 RTLogFlush(pHlp->pLogger);
2436 pHlp->pLogger->fFlags = pHlp->fLoggerFlags;
2437 pHlp->pLogger->fDestFlags = pHlp->fLoggerDestFlags;
2438 }
2439}
2440
2441
2442/**
2443 * Dumps the VM state on a fatal error.
2444 *
2445 * @param pVM VM Handle.
2446 * @param rcErr VBox status code.
2447 */
2448VMMR3DECL(void) VMMR3FatalDump(PVM pVM, int rcErr)
2449{
2450 /*
2451 * Create our output helper and sync it with the log settings.
2452 * This helper will be used for all the output.
2453 */
2454 VMMR3FATALDUMPINFOHLP Hlp;
2455 PCDBGFINFOHLP pHlp = &Hlp.Core;
2456 vmmR3FatalDumpInfoHlpInit(&Hlp);
2457
2458 /*
2459 * Header.
2460 */
2461 pHlp->pfnPrintf(pHlp,
2462 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
2463 "!!\n"
2464 "!! Guru Meditation %d (%Vrc)\n"
2465 "!!\n",
2466 rcErr, rcErr);
2467
2468 /*
2469 * Continue according to context.
2470 */
2471 bool fDoneHyper = false;
2472 switch (rcErr)
2473 {
2474 /*
2475 * Hyper visor errors.
2476 */
2477 case VINF_EM_DBG_HYPER_ASSERTION:
2478 pHlp->pfnPrintf(pHlp, "%s%s!!\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
2479 /* fall thru */
2480 case VERR_TRPM_DONT_PANIC:
2481 case VERR_TRPM_PANIC:
2482 case VINF_EM_RAW_STALE_SELECTOR:
2483 case VINF_EM_RAW_IRET_TRAP:
2484 case VINF_EM_DBG_HYPER_BREAKPOINT:
2485 case VINF_EM_DBG_HYPER_STEPPED:
2486 {
2487 /* Trap? */
2488 uint32_t uEIP = CPUMGetHyperEIP(pVM);
2489 TRPMEVENT enmType;
2490 uint8_t u8TrapNo = 0xce;
2491 RTGCUINT uErrorCode = 0xdeadface;
2492 RTGCUINTPTR uCR2 = 0xdeadface;
2493 int rc2 = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
2494 if (VBOX_SUCCESS(rc2))
2495 pHlp->pfnPrintf(pHlp,
2496 "!! TRAP=%02x ERRCD=%VGv CR2=%VGv EIP=%VGv Type=%d\n",
2497 u8TrapNo, uErrorCode, uCR2, uEIP, enmType);
2498 else
2499 pHlp->pfnPrintf(pHlp,
2500 "!! EIP=%VGv NOTRAP\n",
2501 uEIP);
2502
2503 /*
2504 * Try figure out where eip is.
2505 */
2506 /** @todo make query call for core code or move this function to VMM. */
2507 /* core code? */
2508 //if (uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode < pVM->vmm.s.cbCoreCode)
2509 // pHlp->pfnPrintf(pHlp,
2510 // "!! EIP is in CoreCode, offset %#x\n",
2511 // uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode);
2512 //else
2513 { /* ask PDM */
2514 /** @todo ask DBGFR3Sym later. */
2515 char szModName[64];
2516 RTGCPTR GCPtrMod;
2517 char szNearSym1[260];
2518 RTGCPTR GCPtrNearSym1;
2519 char szNearSym2[260];
2520 RTGCPTR GCPtrNearSym2;
2521 int rc = PDMR3QueryModFromEIP(pVM, uEIP,
2522 &szModName[0], sizeof(szModName), &GCPtrMod,
2523 &szNearSym1[0], sizeof(szNearSym1), &GCPtrNearSym1,
2524 &szNearSym2[0], sizeof(szNearSym2), &GCPtrNearSym2);
2525 if (VBOX_SUCCESS(rc))
2526 {
2527 pHlp->pfnPrintf(pHlp,
2528 "!! EIP in %s (%VGv) at rva %x near symbols:\n"
2529 "!! %VGv rva %VGv off %08x %s\n"
2530 "!! %VGv rva %VGv off -%08x %s\n",
2531 szModName, GCPtrMod, (unsigned)(uEIP - GCPtrMod),
2532 GCPtrNearSym1, GCPtrNearSym1 - GCPtrMod, (unsigned)(uEIP - GCPtrNearSym1), szNearSym1,
2533 GCPtrNearSym2, GCPtrNearSym2 - GCPtrMod, (unsigned)(GCPtrNearSym2 - uEIP), szNearSym2);
2534 }
2535 else
2536 pHlp->pfnPrintf(pHlp,
2537 "!! EIP is not in any code known to VMM!\n");
2538 }
2539
2540 /* Disassemble the instruction. */
2541 char szInstr[256];
2542 rc2 = DBGFR3DisasInstrEx(pVM, 0, 0, DBGF_DISAS_FLAGS_CURRENT_HYPER, &szInstr[0], sizeof(szInstr), NULL);
2543 if (VBOX_SUCCESS(rc2))
2544 pHlp->pfnPrintf(pHlp,
2545 "!! %s\n", szInstr);
2546
2547 /* Dump the hypervisor cpu state. */
2548 pHlp->pfnPrintf(pHlp,
2549 "!!\n"
2550 "!!\n"
2551 "!!\n");
2552 rc2 = DBGFR3Info(pVM, "cpumhyper", "verbose", pHlp);
2553 fDoneHyper = true;
2554
2555 /* Callstack. */
2556 DBGFSTACKFRAME Frame = {0};
2557 rc2 = DBGFR3StackWalkBeginHyper(pVM, &Frame);
2558 if (VBOX_SUCCESS(rc2))
2559 {
2560 pHlp->pfnPrintf(pHlp,
2561 "!!\n"
2562 "!! Call Stack:\n"
2563 "!!\n"
2564 "EBP Ret EBP Ret CS:EIP Arg0 Arg1 Arg2 Arg3 CS:EIP Symbol [line]\n");
2565 do
2566 {
2567 pHlp->pfnPrintf(pHlp,
2568 "%08RX32 %08RX32 %04RX32:%08RX32 %08RX32 %08RX32 %08RX32 %08RX32",
2569 (uint32_t)Frame.AddrFrame.off,
2570 (uint32_t)Frame.AddrReturnFrame.off,
2571 (uint32_t)Frame.AddrReturnPC.Sel,
2572 (uint32_t)Frame.AddrReturnPC.off,
2573 Frame.Args.au32[0],
2574 Frame.Args.au32[1],
2575 Frame.Args.au32[2],
2576 Frame.Args.au32[3]);
2577 pHlp->pfnPrintf(pHlp, " %RTsel:%08RGv", Frame.AddrPC.Sel, Frame.AddrPC.off);
2578 if (Frame.pSymPC)
2579 {
2580 RTGCINTPTR offDisp = Frame.AddrPC.FlatPtr - Frame.pSymPC->Value;
2581 if (offDisp > 0)
2582 pHlp->pfnPrintf(pHlp, " %s+%llx", Frame.pSymPC->szName, (int64_t)offDisp);
2583 else if (offDisp < 0)
2584 pHlp->pfnPrintf(pHlp, " %s-%llx", Frame.pSymPC->szName, -(int64_t)offDisp);
2585 else
2586 pHlp->pfnPrintf(pHlp, " %s", Frame.pSymPC->szName);
2587 }
2588 if (Frame.pLinePC)
2589 pHlp->pfnPrintf(pHlp, " [%s @ 0i%d]", Frame.pLinePC->szFilename, Frame.pLinePC->uLineNo);
2590 pHlp->pfnPrintf(pHlp, "\n");
2591
2592 /* next */
2593 rc2 = DBGFR3StackWalkNext(pVM, &Frame);
2594 } while (VBOX_SUCCESS(rc2));
2595 DBGFR3StackWalkEnd(pVM, &Frame);
2596 }
2597
2598 /* raw stack */
2599 pHlp->pfnPrintf(pHlp,
2600 "!!\n"
2601 "!! Raw stack (mind the direction).\n"
2602 "!!\n"
2603 "%.*Vhxd\n",
2604 VMM_STACK_SIZE, (char *)pVM->vmm.s.pbHCStack);
2605 break;
2606 }
2607
2608 default:
2609 {
2610 break;
2611 }
2612
2613 } /* switch (rcErr) */
2614
2615
2616 /*
2617 * Generic info dumper loop.
2618 */
2619 static struct
2620 {
2621 const char *pszInfo;
2622 const char *pszArgs;
2623 } const aInfo[] =
2624 {
2625 { "mappings", NULL },
2626 { "hma", NULL },
2627 { "cpumguest", "verbose" },
2628 { "cpumguestinstr", "verbose" },
2629 { "cpumhyper", "verbose" },
2630 { "cpumhost", "verbose" },
2631 { "mode", "all" },
2632 { "cpuid", "verbose" },
2633 { "gdt", NULL },
2634 { "ldt", NULL },
2635 //{ "tss", NULL },
2636 { "ioport", NULL },
2637 { "mmio", NULL },
2638 { "phys", NULL },
2639 //{ "pgmpd", NULL }, - doesn't always work at init time...
2640 { "timers", NULL },
2641 { "activetimers", NULL },
2642 { "handlers", "phys virt hyper stats" },
2643 { "cfgm", NULL },
2644 };
2645 for (unsigned i = 0; i < ELEMENTS(aInfo); i++)
2646 {
2647 if (fDoneHyper && !strcmp(aInfo[i].pszInfo, "cpumhyper"))
2648 continue;
2649 pHlp->pfnPrintf(pHlp,
2650 "!!\n"
2651 "!! {%s, %s}\n"
2652 "!!\n",
2653 aInfo[i].pszInfo, aInfo[i].pszArgs);
2654 DBGFR3Info(pVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
2655 }
2656
2657 /* done */
2658 pHlp->pfnPrintf(pHlp,
2659 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
2660
2661
2662 /*
2663 * Delete the output instance (flushing and restoring of flags).
2664 */
2665 vmmR3FatalDumpInfoHlpDelete(&Hlp);
2666}
2667
2668
2669
2670/**
2671 * Displays the Force action Flags.
2672 *
2673 * @param pVM The VM handle.
2674 * @param pHlp The output helpers.
2675 * @param pszArgs The additional arguments (ignored).
2676 */
2677static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2678{
2679 const uint32_t fForcedActions = pVM->fForcedActions;
2680
2681 pHlp->pfnPrintf(pHlp, "Forced action Flags: %#RX32", fForcedActions);
2682
2683 /* show the flag mnemonics */
2684 int c = 0;
2685 uint32_t f = fForcedActions;
2686#define PRINT_FLAG(flag) do { \
2687 if (f & (flag)) \
2688 { \
2689 static const char *s_psz = #flag; \
2690 if (!(c % 6)) \
2691 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz + 6); \
2692 else \
2693 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
2694 c++; \
2695 f &= ~(flag); \
2696 } \
2697 } while (0)
2698 PRINT_FLAG(VM_FF_INTERRUPT_APIC);
2699 PRINT_FLAG(VM_FF_INTERRUPT_PIC);
2700 PRINT_FLAG(VM_FF_TIMER);
2701 PRINT_FLAG(VM_FF_PDM_QUEUES);
2702 PRINT_FLAG(VM_FF_PDM_DMA);
2703 PRINT_FLAG(VM_FF_PDM_CRITSECT);
2704 PRINT_FLAG(VM_FF_DBGF);
2705 PRINT_FLAG(VM_FF_REQUEST);
2706 PRINT_FLAG(VM_FF_TERMINATE);
2707 PRINT_FLAG(VM_FF_RESET);
2708 PRINT_FLAG(VM_FF_PGM_SYNC_CR3);
2709 PRINT_FLAG(VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
2710 PRINT_FLAG(VM_FF_TRPM_SYNC_IDT);
2711 PRINT_FLAG(VM_FF_SELM_SYNC_TSS);
2712 PRINT_FLAG(VM_FF_SELM_SYNC_GDT);
2713 PRINT_FLAG(VM_FF_SELM_SYNC_LDT);
2714 PRINT_FLAG(VM_FF_INHIBIT_INTERRUPTS);
2715 PRINT_FLAG(VM_FF_CSAM_SCAN_PAGE);
2716 PRINT_FLAG(VM_FF_CSAM_PENDING_ACTION);
2717 PRINT_FLAG(VM_FF_TO_R3);
2718 PRINT_FLAG(VM_FF_DEBUG_SUSPEND);
2719 if (f)
2720 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2721 else
2722 pHlp->pfnPrintf(pHlp, "\n");
2723#undef PRINT_FLAG
2724
2725 /* the groups */
2726 c = 0;
2727#define PRINT_GROUP(grp) do { \
2728 if (fForcedActions & (grp)) \
2729 { \
2730 static const char *s_psz = #grp; \
2731 if (!(c % 5)) \
2732 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : "Groups:\n", s_psz + 6); \
2733 else \
2734 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
2735 c++; \
2736 } \
2737 } while (0)
2738 PRINT_GROUP(VM_FF_EXTERNAL_SUSPENDED_MASK);
2739 PRINT_GROUP(VM_FF_EXTERNAL_HALTED_MASK);
2740 PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_MASK);
2741 PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK);
2742 PRINT_GROUP(VM_FF_HIGH_PRIORITY_POST_MASK);
2743 PRINT_GROUP(VM_FF_NORMAL_PRIORITY_POST_MASK);
2744 PRINT_GROUP(VM_FF_NORMAL_PRIORITY_MASK);
2745 PRINT_GROUP(VM_FF_RESUME_GUEST_MASK);
2746 PRINT_GROUP(VM_FF_ALL_BUT_RAW_MASK);
2747 if (c)
2748 pHlp->pfnPrintf(pHlp, "\n");
2749#undef PRINT_GROUP
2750}
2751
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette