VirtualBox

source: vbox/trunk/src/VBox/VMM/VMM.cpp@ 8542

Last change on this file since 8542 was 8542, checked in by vboxsync, 17 years ago

Try harder to find contiguous memory for the core code (world switchers) since we have kind of special virtual/physical address requirements of it it there is no simple way of telling the OS about these.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 98.1 KB
Line 
1/* $Id: VMM.cpp 8542 2008-05-02 17:26:30Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22//#define NO_SUPCALLR0VMM
23
24/** @page pg_vmm VMM - The Virtual Machine Monitor
25 *
26 * !Revise this! It's already incorrect!
27 *
28 * The Virtual Machine Monitor (VMM) is the core of the virtual machine. It
29 * manages the alternate reality; controlling the virtualization, managing
30 * resources, tracking CPU state, it's resources and so on...
31 *
32 * We will split the VMM into smaller entities:
33 *
34 * - Virtual Machine Core Monitor (VMCM), which purpose it is to
35 * provide ring and world switching, that including routing
36 * interrupts to the host OS and traps to the appropriate trap
37 * handlers. It will implement an external interface for
38 * managing trap handlers.
39 *
40 * - CPU Monitor (CM), tracking the state of the CPU (in the alternate
41 * reality) and implementing external interfaces to read and change
42 * the state.
43 *
44 * - Memory Monitor (MM), which purpose it is to virtualize physical
45 * pages, segment descriptor tables, interrupt descriptor tables, task
46 * segments, and keep track of all memory providing external interfaces
47 * to access content and map pages. (Internally splitt into smaller entities!)
48 *
49 * - IO Monitor (IOM), which virtualizes in and out I/O operations. It
50 * interacts with the MM to implement memory mapped I/O. External
51 * interfaces for adding and removing I/O ranges are implemented.
52 *
53 * - External Interrupt Monitor (EIM), which purpose it is to manage
54 * interrupts generated by virtual devices. This monitor provides
55 * an interfaces for raising interrupts which is accessible at any
56 * time and from all thread.
57 * <p>
58 * A subentity of the EIM is the vitual Programmable Interrupt
59 * Controller Device (VPICD), and perhaps a virtual I/O Advanced
60 * Programmable Interrupt Controller Device (VAPICD).
61 *
62 * - Direct Memory Access Monitor (DMAM), which purpose it is to support
63 * virtual device using the DMA controller. Interfaces must be as the
64 * EIM interfaces independent and threadable.
65 * <p>
66 * A subentity of the DMAM is a virtual DMA Controller Device (VDMACD).
67 *
68 *
69 * Entities working on a higher level:
70 *
71 * - Device Manager (DM), which is a support facility for virtualized
72 * hardware. This provides generic facilities for efficient device
73 * virtualization. It will manage device attaching and detaching
74 * conversing with EIM and IOM.
75 *
76 * - Debugger Facility (DBGF) provides the basic features for
77 * debugging the alternate reality execution.
78 *
79 *
80 *
81 * @section pg_vmm_s_use_cases Use Cases
82 *
83 * @subsection pg_vmm_s_use_case_boot Bootstrap
84 *
85 * - Basic Init:
86 * - Init SUPDRV.
87 *
88 * - Init Virtual Machine Instance:
89 * - Load settings.
90 * - Check resource requirements (memory, com, stuff).
91 *
92 * - Init Host Ring 3 part:
93 * - Init Core code.
94 * - Load Pluggable Components.
95 * - Init Pluggable Components.
96 *
97 * - Init Host Ring 0 part:
98 * - Load Core (core = core components like VMM, RMI, CA, and so on) code.
99 * - Init Core code.
100 * - Load Pluggable Component code.
101 * - Init Pluggable Component code.
102 *
103 * - Allocate first chunk of memory and pin it down. This block of memory
104 * will fit the following pieces:
105 * - Virtual Machine Instance data. (Config, CPU state, VMM state, ++)
106 * (This is available from everywhere (at different addresses though)).
107 * - VMM Guest Context code.
108 * - Pluggable devices Guest Context code.
109 * - Page tables (directory and everything) for the VMM Guest
110 *
111 * - Setup Guest (Ring 0) part:
112 * - Setup initial page tables (i.e. directory all the stuff).
113 * - Load Core Guest Context code.
114 * - Load Pluggable Devices Guest Context code.
115 *
116 *
117 */
118
119
120/*******************************************************************************
121* Header Files *
122*******************************************************************************/
123#define LOG_GROUP LOG_GROUP_VMM
124#include <VBox/vmm.h>
125#include <VBox/vmapi.h>
126#include <VBox/pgm.h>
127#include <VBox/cfgm.h>
128#include <VBox/pdmqueue.h>
129#include <VBox/pdmapi.h>
130#include <VBox/cpum.h>
131#include <VBox/mm.h>
132#include <VBox/iom.h>
133#include <VBox/trpm.h>
134#include <VBox/selm.h>
135#include <VBox/em.h>
136#include <VBox/sup.h>
137#include <VBox/dbgf.h>
138#include <VBox/csam.h>
139#include <VBox/patm.h>
140#include <VBox/rem.h>
141#include <VBox/ssm.h>
142#include <VBox/tm.h>
143#include "VMMInternal.h"
144#include "VMMSwitcher/VMMSwitcher.h"
145#include <VBox/vm.h>
146#include <VBox/err.h>
147#include <VBox/param.h>
148#include <VBox/version.h>
149#include <VBox/x86.h>
150#include <VBox/hwaccm.h>
151#include <iprt/assert.h>
152#include <iprt/alloc.h>
153#include <iprt/asm.h>
154#include <iprt/time.h>
155#include <iprt/stream.h>
156#include <iprt/string.h>
157#include <iprt/stdarg.h>
158#include <iprt/ctype.h>
159
160
161
162/** The saved state version. */
163#define VMM_SAVED_STATE_VERSION 3
164
165
166/*******************************************************************************
167* Internal Functions *
168*******************************************************************************/
169static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
170static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
171static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
172static int vmmR3ServiceCallHostRequest(PVM pVM);
173static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
174
175
176/*******************************************************************************
177* Global Variables *
178*******************************************************************************/
179/** Array of switcher defininitions.
180 * The type and index shall match!
181 */
182static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
183{
184 NULL, /* invalid entry */
185#ifndef RT_ARCH_AMD64
186 &vmmR3Switcher32BitTo32Bit_Def,
187 &vmmR3Switcher32BitToPAE_Def,
188 NULL, //&vmmR3Switcher32BitToAMD64_Def,
189 &vmmR3SwitcherPAETo32Bit_Def,
190 &vmmR3SwitcherPAEToPAE_Def,
191 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
192# ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL
193 &vmmR3SwitcherAMD64ToPAE_Def,
194# else
195 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
196# endif
197 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
198#else
199 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
200 NULL, //&vmmR3Switcher32BitToPAE_Def,
201 NULL, //&vmmR3Switcher32BitToAMD64_Def,
202 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
203 NULL, //&vmmR3SwitcherPAEToPAE_Def,
204 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
205 &vmmR3SwitcherAMD64ToPAE_Def,
206 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
207#endif
208};
209
210
211
212/**
213 * Initiates the core code.
214 *
215 * This is core per VM code which might need fixups and/or for ease of use
216 * are put on linear contiguous backing.
217 *
218 * @returns VBox status code.
219 * @param pVM Pointer to VM structure.
220 */
221static int vmmR3InitCoreCode(PVM pVM)
222{
223 /*
224 * Calc the size.
225 */
226 unsigned cbCoreCode = 0;
227 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
228 {
229 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
230 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
231 if (pSwitcher)
232 {
233 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
234 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
235 }
236 }
237
238 /*
239 * Allocate continguous pages for switchers and deal with
240 * conflicts in the intermediate mapping of the code.
241 */
242 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
243 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
244 int rc = VERR_NO_MEMORY;
245 if (pVM->vmm.s.pvHCCoreCodeR3)
246 {
247 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
248 if (rc == VERR_PGM_MAPPINGS_FIX_CONFLICT)
249 {
250 /* try more allocations. */
251 struct
252 {
253 RTR0PTR pvR0;
254 void *pvR3;
255 RTHCPHYS HCPhys;
256 RTUINT cb;
257 } aBadTries[128];
258 unsigned i = 0;
259 do
260 {
261 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
262 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
263 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
264 i++;
265 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
266 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
267 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
268 if (!pVM->vmm.s.pvHCCoreCodeR3)
269 break;
270 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
271 } while ( rc == VERR_PGM_MAPPINGS_FIX_CONFLICT
272 && i < ELEMENTS(aBadTries) - 1);
273
274 /* cleanup */
275 if (VBOX_FAILURE(rc))
276 {
277 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
278 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
279 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
280 aBadTries[i].cb = pVM->vmm.s.cbCoreCode;
281 i++;
282 LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc));
283 }
284 while (i-- > 0)
285 {
286 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n",
287 i, aBadTries[i].pvR3, aBadTries[i].pvR0, aBadTries[i].HCPhys));
288 SUPContFree(aBadTries[i].pvR3, aBadTries[i].cb >> PAGE_SHIFT);
289 }
290 }
291 }
292 if (VBOX_SUCCESS(rc))
293 {
294 /*
295 * copy the code.
296 */
297 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
298 {
299 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
300 if (pSwitcher)
301 memcpy((uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
302 pSwitcher->pvCode, pSwitcher->cbCode);
303 }
304
305 /*
306 * Map the code into the GC address space.
307 */
308 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &pVM->vmm.s.pvGCCoreCode);
309 if (VBOX_SUCCESS(rc))
310 {
311 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
312 LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VGv Phys=%VHp cb=%#x\n",
313 pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
314
315 /*
316 * Finally, PGM probably have selected a switcher already but we need
317 * to do get the addresses so we'll reselect it.
318 * This may legally fail so, we're ignoring the rc.
319 */
320 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
321 return rc;
322 }
323
324 /* shit */
325 AssertMsgFailed(("PGMR3Map(,%VGv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
326 SUPContFree(pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT);
327 }
328 else
329 VMSetError(pVM, rc, RT_SRC_POS,
330 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code"),
331 cbCoreCode);
332
333 pVM->vmm.s.pvHCCoreCodeR3 = NULL;
334 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
335 pVM->vmm.s.pvGCCoreCode = 0;
336 return rc;
337}
338
339
340/**
341 * Initializes the VMM.
342 *
343 * @returns VBox status code.
344 * @param pVM The VM to operate on.
345 */
346VMMR3DECL(int) VMMR3Init(PVM pVM)
347{
348 LogFlow(("VMMR3Init\n"));
349
350 /*
351 * Assert alignment, sizes and order.
352 */
353 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
354 AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s),
355 ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n",
356 sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s)));
357
358 /*
359 * Init basic VM VMM members.
360 */
361 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
362 int rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies);
363 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
364 pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */
365 //pVM->vmm.s.cYieldEveryMillies = 8; //debugging
366 else
367 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Vrc\n", rc), rc);
368
369 /* GC switchers are enabled by default. Turned off by HWACCM. */
370 pVM->vmm.s.fSwitcherDisabled = false;
371
372 /*
373 * Register the saved state data unit.
374 */
375 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
376 NULL, vmmR3Save, NULL,
377 NULL, vmmR3Load, NULL);
378 if (VBOX_FAILURE(rc))
379 return rc;
380
381 /*
382 * Register the Ring-0 VM handle with the session for fast ioctl calls.
383 */
384 rc = SUPSetVMForFastIOCtl(pVM->pVMR0);
385 if (VBOX_FAILURE(rc))
386 return rc;
387
388 /*
389 * Init core code.
390 */
391 rc = vmmR3InitCoreCode(pVM);
392 if (VBOX_SUCCESS(rc))
393 {
394 /*
395 * Allocate & init VMM GC stack.
396 * The stack pages are also used by the VMM R0 when VMMR0CallHost is invoked.
397 * (The page protection is modifed during R3 init completion.)
398 */
399#ifdef VBOX_STRICT_VMM_STACK
400 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
401#else
402 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
403#endif
404 if (VBOX_SUCCESS(rc))
405 {
406 /* Set HC and GC stack pointers to top of stack. */
407 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = (RTR0PTR)pVM->vmm.s.pbHCStack;
408 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
409 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
410 AssertRelease(pVM->vmm.s.pbGCStack);
411
412 /* Set hypervisor eip. */
413 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStack);
414
415 /*
416 * Allocate GC & R0 Logger instances (they are finalized in the relocator).
417 */
418#ifdef LOG_ENABLED
419 PRTLOGGER pLogger = RTLogDefaultInstance();
420 if (pLogger)
421 {
422 pVM->vmm.s.cbLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pLogger->cGroups]);
423 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pLoggerHC);
424 if (VBOX_SUCCESS(rc))
425 {
426 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
427
428/*
429 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup), so
430 * you have to sign up here by adding your defined(DEBUG_<userid>) to the #if.
431 *
432 * If you want to log in non-debug modes, you'll have to remember to change SUPDRvShared.c
433 * to not stub all the log functions.
434 *
435 * You might also wish to enable the AssertMsg1/2 overrides in VMMR0.cpp when enabling this.
436 */
437# if defined(DEBUG_sandervl) || defined(DEBUG_frank)
438 rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]),
439 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0Logger);
440 if (VBOX_SUCCESS(rc))
441 {
442 pVM->vmm.s.pR0Logger->pVM = pVM->pVMR0;
443 //pVM->vmm.s.pR0Logger->fCreated = false;
444 pVM->vmm.s.pR0Logger->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]);
445 }
446# endif
447 }
448 }
449#endif /* LOG_ENABLED */
450
451#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
452 /*
453 * Allocate GC Release Logger instances (finalized in the relocator).
454 */
455 if (VBOX_SUCCESS(rc))
456 {
457 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
458 if (pRelLogger)
459 {
460 pVM->vmm.s.cbRelLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pRelLogger->cGroups]);
461 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRelLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRelLoggerHC);
462 if (VBOX_SUCCESS(rc))
463 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
464 }
465 }
466#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
467
468#ifdef VBOX_WITH_NMI
469 /*
470 * Allocate mapping for the host APIC.
471 */
472 if (VBOX_SUCCESS(rc))
473 {
474 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
475 AssertRC(rc);
476 }
477#endif
478 if (VBOX_SUCCESS(rc))
479 {
480 rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock);
481 if (VBOX_SUCCESS(rc))
482 {
483 /*
484 * Debug info.
485 */
486 DBGFR3InfoRegisterInternal(pVM, "ff", "Displays the current Forced actions Flags.", vmmR3InfoFF);
487
488 /*
489 * Statistics.
490 */
491 STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches.");
492 STAM_REG(pVM, &pVM->vmm.s.StatGCRetNormal, STAMTYPE_COUNTER, "/VMM/GCRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
493 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterrupt, STAMTYPE_COUNTER, "/VMM/GCRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
494 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
495 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGuestTrap, STAMTYPE_COUNTER, "/VMM/GCRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
496 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitch, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
497 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
498 STAM_REG(pVM, &pVM->vmm.s.StatGCRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/GCRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");
499 STAM_REG(pVM, &pVM->vmm.s.StatGCRetStaleSelector, STAMTYPE_COUNTER, "/VMM/GCRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
500 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIRETTrap, STAMTYPE_COUNTER, "/VMM/GCRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
501 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
502 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
503 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIORead, STAMTYPE_COUNTER, "/VMM/GCRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns.");
504 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns.");
505 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIORead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns.");
506 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns.");
507 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns.");
508 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
509 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
510 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
511 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
512 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
513 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTSSFault, STAMTYPE_COUNTER, "/VMM/GCRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
514 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDFault, STAMTYPE_COUNTER, "/VMM/GCRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
515 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCSAMTask, STAMTYPE_COUNTER, "/VMM/GCRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
516 STAM_REG(pVM, &pVM->vmm.s.StatGCRetSyncCR3, STAMTYPE_COUNTER, "/VMM/GCRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
517 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMisc, STAMTYPE_COUNTER, "/VMM/GCRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
518 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchInt3, STAMTYPE_COUNTER, "/VMM/GCRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
519 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchPF, STAMTYPE_COUNTER, "/VMM/GCRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
520 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchGP, STAMTYPE_COUNTER, "/VMM/GCRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
521 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/GCRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
522 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPageOverflow, STAMTYPE_COUNTER, "/VMM/GCRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns.");
523 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/GCRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
524 STAM_REG(pVM, &pVM->vmm.s.StatGCRetToR3, STAMTYPE_COUNTER, "/VMM/GCRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
525 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTimerPending, STAMTYPE_COUNTER, "/VMM/GCRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
526 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptPending, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
527 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCallHost, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/Misc", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
528 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMGrowRAM, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/GrowRAM", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
529 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PDMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
530 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLogFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/LogFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
531 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMQueueFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/QueueFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
532 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMPoolGrow",STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
533 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRemReplay, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/REMReplay", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
534 STAM_REG(pVM, &pVM->vmm.s.StatGCRetVMSetError, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/VMSetError", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
535 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
536 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/GCRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
537 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/GCRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
538 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulHlt, STAMTYPE_COUNTER, "/VMM/GCRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");
539 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPendingRequest, STAMTYPE_COUNTER, "/VMM/GCRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
540
541 return VINF_SUCCESS;
542 }
543 AssertRC(rc);
544 }
545 }
546 /** @todo: Need failure cleanup. */
547
548 //more todo in here?
549 //if (VBOX_SUCCESS(rc))
550 //{
551 //}
552 //int rc2 = vmmR3TermCoreCode(pVM);
553 //AssertRC(rc2));
554 }
555
556 return rc;
557}
558
559
560/**
561 * Ring-3 init finalizing.
562 *
563 * @returns VBox status code.
564 * @param pVM The VM handle.
565 */
566VMMR3DECL(int) VMMR3InitFinalize(PVM pVM)
567{
568#ifdef VBOX_STRICT_VMM_STACK
569 /*
570 * Two inaccessible pages at each sides of the stack to catch over/under-flows.
571 */
572 memset(pVM->vmm.s.pbHCStack - PAGE_SIZE, 0xcc, PAGE_SIZE);
573 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack - PAGE_SIZE), PAGE_SIZE, 0);
574 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
575
576 memset(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
577 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack + VMM_STACK_SIZE), PAGE_SIZE, 0);
578 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
579#endif
580
581 /*
582 * Set page attributes to r/w for stack pages.
583 */
584 int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbGCStack, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
585 AssertRC(rc);
586 if (VBOX_SUCCESS(rc))
587 {
588 /*
589 * Create the EMT yield timer.
590 */
591 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
592 if (VBOX_SUCCESS(rc))
593 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
594 }
595#ifdef VBOX_WITH_NMI
596 /*
597 * Map the host APIC into GC - This may be host os specific!
598 */
599 if (VBOX_SUCCESS(rc))
600 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
601 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
602#endif
603 return rc;
604}
605
606
607/**
608 * Initializes the R0 VMM.
609 *
610 * @returns VBox status code.
611 * @param pVM The VM to operate on.
612 */
613VMMR3DECL(int) VMMR3InitR0(PVM pVM)
614{
615 int rc;
616
617 /*
618 * Initialize the ring-0 logger if we haven't done so yet.
619 */
620 if ( pVM->vmm.s.pR0Logger
621 && !pVM->vmm.s.pR0Logger->fCreated)
622 {
623 rc = VMMR3UpdateLoggers(pVM);
624 if (VBOX_FAILURE(rc))
625 return rc;
626 }
627
628 /*
629 * Call Ring-0 entry with init code.
630 */
631 for (;;)
632 {
633#ifdef NO_SUPCALLR0VMM
634 //rc = VERR_GENERAL_FAILURE;
635 rc = VINF_SUCCESS;
636#else
637 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, VBOX_VERSION, NULL);
638#endif
639 if ( pVM->vmm.s.pR0Logger
640 && pVM->vmm.s.pR0Logger->Logger.offScratch > 0)
641 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL);
642 if (rc != VINF_VMM_CALL_HOST)
643 break;
644 rc = vmmR3ServiceCallHostRequest(pVM);
645 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
646 break;
647 /* Resume R0 */
648 }
649
650 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
651 {
652 LogRel(("R0 init failed, rc=%Vra\n", rc));
653 if (VBOX_SUCCESS(rc))
654 rc = VERR_INTERNAL_ERROR;
655 }
656 return rc;
657}
658
659
660/**
661 * Initializes the GC VMM.
662 *
663 * @returns VBox status code.
664 * @param pVM The VM to operate on.
665 */
666VMMR3DECL(int) VMMR3InitGC(PVM pVM)
667{
668 /* In VMX mode, there's no need to init GC. */
669 if (pVM->vmm.s.fSwitcherDisabled)
670 return VINF_SUCCESS;
671
672 /*
673 * Call VMMGCInit():
674 * -# resolve the address.
675 * -# setup stackframe and EIP to use the trampoline.
676 * -# do a generic hypervisor call.
677 */
678 RTGCPTR GCPtrEP;
679 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
680 if (VBOX_SUCCESS(rc))
681 {
682 CPUMHyperSetCtxCore(pVM, NULL);
683 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
684 uint64_t u64TS = RTTimeProgramStartNanoTS();
685#if GC_ARCH_BITS == 32
686 CPUMPushHyper(pVM, (uint32_t)(u64TS >> 32)); /* Param 3: The program startup TS - Hi. */
687 CPUMPushHyper(pVM, (uint32_t)u64TS); /* Param 3: The program startup TS - Lo. */
688#else /* 64-bit GC */
689 CPUMPushHyper(pVM, u64TS); /* Param 3: The program startup TS. */
690#endif
691 CPUMPushHyper(pVM, VBOX_VERSION); /* Param 2: Version argument. */
692 CPUMPushHyper(pVM, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
693 CPUMPushHyper(pVM, pVM->pVMGC); /* Param 0: pVM */
694 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* trampoline param: stacksize. */
695 CPUMPushHyper(pVM, GCPtrEP); /* Call EIP. */
696 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
697
698 for (;;)
699 {
700#ifdef NO_SUPCALLR0VMM
701 //rc = VERR_GENERAL_FAILURE;
702 rc = VINF_SUCCESS;
703#else
704 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_CALL_HYPERVISOR, NULL);
705#endif
706#ifdef LOG_ENABLED
707 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
708 if ( pLogger
709 && pLogger->offScratch > 0)
710 RTLogFlushGC(NULL, pLogger);
711#endif
712#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
713 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
714 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
715 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
716#endif
717 if (rc != VINF_VMM_CALL_HOST)
718 break;
719 rc = vmmR3ServiceCallHostRequest(pVM);
720 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
721 break;
722 }
723
724 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
725 {
726 VMMR3FatalDump(pVM, rc);
727 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
728 rc = VERR_INTERNAL_ERROR;
729 }
730 AssertRC(rc);
731 }
732 return rc;
733}
734
735
736/**
737 * Terminate the VMM bits.
738 *
739 * @returns VINF_SUCCESS.
740 * @param pVM The VM handle.
741 */
742VMMR3DECL(int) VMMR3Term(PVM pVM)
743{
744 /*
745 * Call Ring-0 entry with termination code.
746 */
747 int rc;
748 for (;;)
749 {
750#ifdef NO_SUPCALLR0VMM
751 //rc = VERR_GENERAL_FAILURE;
752 rc = VINF_SUCCESS;
753#else
754 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_TERM, VBOX_VERSION, NULL);
755#endif
756 if ( pVM->vmm.s.pR0Logger
757 && pVM->vmm.s.pR0Logger->Logger.offScratch > 0)
758 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL);
759 if (rc != VINF_VMM_CALL_HOST)
760 break;
761 rc = vmmR3ServiceCallHostRequest(pVM);
762 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
763 break;
764 /* Resume R0 */
765 }
766 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
767 {
768 LogRel(("VMMR3Term: R0 term failed, rc=%Vra. (warning)\n", rc));
769 if (VBOX_SUCCESS(rc))
770 rc = VERR_INTERNAL_ERROR;
771 }
772
773#ifdef VBOX_STRICT_VMM_STACK
774 /*
775 * Make the two stack guard pages present again.
776 */
777 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
778 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
779#endif
780 return rc;
781}
782
783
784/**
785 * Applies relocations to data and code managed by this
786 * component. This function will be called at init and
787 * whenever the VMM need to relocate it self inside the GC.
788 *
789 * The VMM will need to apply relocations to the core code.
790 *
791 * @param pVM The VM handle.
792 * @param offDelta The relocation delta.
793 */
794VMMR3DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
795{
796 LogFlow(("VMMR3Relocate: offDelta=%VGv\n", offDelta));
797
798 /*
799 * Recalc the GC address.
800 */
801 pVM->vmm.s.pvGCCoreCode = MMHyperHC2GC(pVM, pVM->vmm.s.pvHCCoreCodeR3);
802
803 /*
804 * The stack.
805 */
806 CPUMSetHyperESP(pVM, CPUMGetHyperESP(pVM) + offDelta);
807 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
808 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
809
810 /*
811 * All the switchers.
812 */
813 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
814 {
815 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
816 if (pSwitcher && pSwitcher->pfnRelocate)
817 {
818 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
819 pSwitcher->pfnRelocate(pVM,
820 pSwitcher,
821 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR0 + off,
822 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + off,
823 pVM->vmm.s.pvGCCoreCode + off,
824 pVM->vmm.s.HCPhysCoreCode + off);
825 }
826 }
827
828 /*
829 * Recalc the GC address for the current switcher.
830 */
831 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
832 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
833 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
834 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
835 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
836 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
837 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
838
839 /*
840 * Get other GC entry points.
841 */
842 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMGCResumeGuest);
843 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Vra\n", rc));
844
845 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMGCResumeGuestV86);
846 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Vra\n", rc));
847
848 /*
849 * Update the logger.
850 */
851 VMMR3UpdateLoggers(pVM);
852}
853
854
855/**
856 * Updates the settings for the GC and R0 loggers.
857 *
858 * @returns VBox status code.
859 * @param pVM The VM handle.
860 */
861VMMR3DECL(int) VMMR3UpdateLoggers(PVM pVM)
862{
863 /*
864 * Simply clone the logger instance (for GC).
865 */
866 int rc = VINF_SUCCESS;
867 RTGCPTR GCPtrLoggerFlush = 0;
868
869 if (pVM->vmm.s.pLoggerHC
870#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
871 || pVM->vmm.s.pRelLoggerHC
872#endif
873 )
874 {
875 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &GCPtrLoggerFlush);
876 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Vra\n", rc));
877 }
878
879 if (pVM->vmm.s.pLoggerHC)
880 {
881 RTGCPTR GCPtrLoggerWrapper = 0;
882 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &GCPtrLoggerWrapper);
883 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Vra\n", rc));
884 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
885 rc = RTLogCloneGC(NULL /* default */, pVM->vmm.s.pLoggerHC, pVM->vmm.s.cbLoggerGC,
886 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
887 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
888 }
889
890#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
891 if (pVM->vmm.s.pRelLoggerHC)
892 {
893 RTGCPTR GCPtrLoggerWrapper = 0;
894 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &GCPtrLoggerWrapper);
895 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Vra\n", rc));
896 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
897 rc = RTLogCloneGC(RTLogRelDefaultInstance(), pVM->vmm.s.pRelLoggerHC, pVM->vmm.s.cbRelLoggerGC,
898 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
899 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
900 }
901#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
902
903 /*
904 * For the ring-0 EMT logger, we use a per-thread logger
905 * instance in ring-0. Only initialize it once.
906 */
907 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
908 if (pR0Logger)
909 {
910 if (!pR0Logger->fCreated)
911 {
912 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
913 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
914 AssertReleaseMsgRCReturn(rc, ("VMMLoggerWrapper not found! rc=%Vra\n", rc), rc);
915
916 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
917 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
918 AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Vra\n", rc), rc);
919
920 rc = RTLogCreateForR0(&pR0Logger->Logger, pR0Logger->cbLogger,
921 *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush,
922 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
923 AssertReleaseMsgRCReturn(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc), rc);
924 pR0Logger->fCreated = true;
925 }
926
927 rc = RTLogCopyGroupsAndFlags(&pR0Logger->Logger, NULL /* default */, RTLOGFLAGS_BUFFERED, 0);
928 AssertRC(rc);
929 }
930
931 return rc;
932}
933
934
935/**
936 * Generic switch code relocator.
937 *
938 * @param pVM The VM handle.
939 * @param pSwitcher The switcher definition.
940 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
941 * @param pu8CodeR0 Pointer to the core code block for the switcher, ring-0 mapping.
942 * @param GCPtrCode The guest context address corresponding to pu8Code.
943 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
944 * @param SelCS The hypervisor CS selector.
945 * @param SelDS The hypervisor DS selector.
946 * @param SelTSS The hypervisor TSS selector.
947 * @param GCPtrGDT The GC address of the hypervisor GDT.
948 * @param SelCS64 The 64-bit mode hypervisor CS selector.
949 */
950static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
951 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
952{
953 union
954 {
955 const uint8_t *pu8;
956 const uint16_t *pu16;
957 const uint32_t *pu32;
958 const uint64_t *pu64;
959 const void *pv;
960 uintptr_t u;
961 } u;
962 u.pv = pSwitcher->pvFixups;
963
964 /*
965 * Process fixups.
966 */
967 uint8_t u8;
968 while ((u8 = *u.pu8++) != FIX_THE_END)
969 {
970 /*
971 * Get the source (where to write the fixup).
972 */
973 uint32_t offSrc = *u.pu32++;
974 Assert(offSrc < pSwitcher->cbCode);
975 union
976 {
977 uint8_t *pu8;
978 uint16_t *pu16;
979 uint32_t *pu32;
980 uint64_t *pu64;
981 uintptr_t u;
982 } uSrc;
983 uSrc.pu8 = pu8CodeR3 + offSrc;
984
985 /* The fixup target and method depends on the type. */
986 switch (u8)
987 {
988 /*
989 * 32-bit relative, source in HC and target in GC.
990 */
991 case FIX_HC_2_GC_NEAR_REL:
992 {
993 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
994 uint32_t offTrg = *u.pu32++;
995 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
996 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
997 break;
998 }
999
1000 /*
1001 * 32-bit relative, source in HC and target in ID.
1002 */
1003 case FIX_HC_2_ID_NEAR_REL:
1004 {
1005 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1006 uint32_t offTrg = *u.pu32++;
1007 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1008 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - ((uintptr_t)pu8CodeR0 + offSrc + 4));
1009 break;
1010 }
1011
1012 /*
1013 * 32-bit relative, source in GC and target in HC.
1014 */
1015 case FIX_GC_2_HC_NEAR_REL:
1016 {
1017 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1018 uint32_t offTrg = *u.pu32++;
1019 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1020 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (GCPtrCode + offSrc + 4));
1021 break;
1022 }
1023
1024 /*
1025 * 32-bit relative, source in GC and target in ID.
1026 */
1027 case FIX_GC_2_ID_NEAR_REL:
1028 {
1029 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1030 uint32_t offTrg = *u.pu32++;
1031 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1032 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
1033 break;
1034 }
1035
1036 /*
1037 * 32-bit relative, source in ID and target in HC.
1038 */
1039 case FIX_ID_2_HC_NEAR_REL:
1040 {
1041 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1042 uint32_t offTrg = *u.pu32++;
1043 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1044 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (u32IDCode + offSrc + 4));
1045 break;
1046 }
1047
1048 /*
1049 * 32-bit relative, source in ID and target in HC.
1050 */
1051 case FIX_ID_2_GC_NEAR_REL:
1052 {
1053 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1054 uint32_t offTrg = *u.pu32++;
1055 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1056 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
1057 break;
1058 }
1059
1060 /*
1061 * 16:32 far jump, target in GC.
1062 */
1063 case FIX_GC_FAR32:
1064 {
1065 uint32_t offTrg = *u.pu32++;
1066 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1067 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
1068 *uSrc.pu16++ = SelCS;
1069 break;
1070 }
1071
1072 /*
1073 * Make 32-bit GC pointer given CPUM offset.
1074 */
1075 case FIX_GC_CPUM_OFF:
1076 {
1077 uint32_t offCPUM = *u.pu32++;
1078 Assert(offCPUM < sizeof(pVM->cpum));
1079 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM);
1080 break;
1081 }
1082
1083 /*
1084 * Make 32-bit GC pointer given VM offset.
1085 */
1086 case FIX_GC_VM_OFF:
1087 {
1088 uint32_t offVM = *u.pu32++;
1089 Assert(offVM < sizeof(VM));
1090 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM);
1091 break;
1092 }
1093
1094 /*
1095 * Make 32-bit HC pointer given CPUM offset.
1096 */
1097 case FIX_HC_CPUM_OFF:
1098 {
1099 uint32_t offCPUM = *u.pu32++;
1100 Assert(offCPUM < sizeof(pVM->cpum));
1101 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
1102 break;
1103 }
1104
1105 /*
1106 * Make 32-bit R0 pointer given VM offset.
1107 */
1108 case FIX_HC_VM_OFF:
1109 {
1110 uint32_t offVM = *u.pu32++;
1111 Assert(offVM < sizeof(VM));
1112 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
1113 break;
1114 }
1115
1116 /*
1117 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
1118 */
1119 case FIX_INTER_32BIT_CR3:
1120 {
1121
1122 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
1123 break;
1124 }
1125
1126 /*
1127 * Store the PAE CR3 (32-bit) for the intermediate memory context.
1128 */
1129 case FIX_INTER_PAE_CR3:
1130 {
1131
1132 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
1133 break;
1134 }
1135
1136 /*
1137 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
1138 */
1139 case FIX_INTER_AMD64_CR3:
1140 {
1141
1142 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
1143 break;
1144 }
1145
1146 /*
1147 * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
1148 */
1149 case FIX_HYPER_32BIT_CR3:
1150 {
1151
1152 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
1153 break;
1154 }
1155
1156 /*
1157 * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
1158 */
1159 case FIX_HYPER_PAE_CR3:
1160 {
1161
1162 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
1163 break;
1164 }
1165
1166 /*
1167 * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
1168 */
1169 case FIX_HYPER_AMD64_CR3:
1170 {
1171
1172 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
1173 break;
1174 }
1175
1176 /*
1177 * Store Hypervisor CS (16-bit).
1178 */
1179 case FIX_HYPER_CS:
1180 {
1181 *uSrc.pu16 = SelCS;
1182 break;
1183 }
1184
1185 /*
1186 * Store Hypervisor DS (16-bit).
1187 */
1188 case FIX_HYPER_DS:
1189 {
1190 *uSrc.pu16 = SelDS;
1191 break;
1192 }
1193
1194 /*
1195 * Store Hypervisor TSS (16-bit).
1196 */
1197 case FIX_HYPER_TSS:
1198 {
1199 *uSrc.pu16 = SelTSS;
1200 break;
1201 }
1202
1203 /*
1204 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
1205 */
1206 case FIX_GC_TSS_GDTE_DW2:
1207 {
1208 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
1209 *uSrc.pu32 = (uint32_t)GCPtr;
1210 break;
1211 }
1212
1213
1214 ///@todo case FIX_CR4_MASK:
1215 ///@todo case FIX_CR4_OSFSXR:
1216
1217 /*
1218 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
1219 */
1220 case FIX_NO_FXSAVE_JMP:
1221 {
1222 uint32_t offTrg = *u.pu32++;
1223 Assert(offTrg < pSwitcher->cbCode);
1224 if (!CPUMSupportsFXSR(pVM))
1225 {
1226 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1227 *uSrc.pu32++ = offTrg - (offSrc + 5);
1228 }
1229 else
1230 {
1231 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1232 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1233 }
1234 break;
1235 }
1236
1237 /*
1238 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1239 */
1240 case FIX_NO_SYSENTER_JMP:
1241 {
1242 uint32_t offTrg = *u.pu32++;
1243 Assert(offTrg < pSwitcher->cbCode);
1244 if (!CPUMIsHostUsingSysEnter(pVM))
1245 {
1246 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1247 *uSrc.pu32++ = offTrg - (offSrc + 5);
1248 }
1249 else
1250 {
1251 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1252 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1253 }
1254 break;
1255 }
1256
1257 /*
1258 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1259 */
1260 case FIX_NO_SYSCALL_JMP:
1261 {
1262 uint32_t offTrg = *u.pu32++;
1263 Assert(offTrg < pSwitcher->cbCode);
1264 if (!CPUMIsHostUsingSysEnter(pVM))
1265 {
1266 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1267 *uSrc.pu32++ = offTrg - (offSrc + 5);
1268 }
1269 else
1270 {
1271 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1272 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1273 }
1274 break;
1275 }
1276
1277 /*
1278 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1279 */
1280 case FIX_HC_32BIT:
1281 {
1282 uint32_t offTrg = *u.pu32++;
1283 Assert(offSrc < pSwitcher->cbCode);
1284 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1285 *uSrc.pu32 = (uintptr_t)pu8CodeR0 + offTrg;
1286 break;
1287 }
1288
1289#if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1290 /*
1291 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1292 */
1293 case FIX_HC_64BIT:
1294 {
1295 uint32_t offTrg = *u.pu32++;
1296 Assert(offSrc < pSwitcher->cbCode);
1297 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1298 *uSrc.pu64 = (uintptr_t)pu8CodeR0 + offTrg;
1299 break;
1300 }
1301
1302 /*
1303 * 64-bit HC Code Selector (no argument).
1304 */
1305 case FIX_HC_64BIT_CS:
1306 {
1307 Assert(offSrc < pSwitcher->cbCode);
1308#if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
1309 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
1310#else
1311 AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
1312#endif
1313 break;
1314 }
1315
1316 /*
1317 * 64-bit HC pointer to the CPUM instance data (no argument).
1318 */
1319 case FIX_HC_64BIT_CPUM:
1320 {
1321 Assert(offSrc < pSwitcher->cbCode);
1322 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
1323 break;
1324 }
1325#endif
1326
1327 /*
1328 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
1329 */
1330 case FIX_ID_32BIT:
1331 {
1332 uint32_t offTrg = *u.pu32++;
1333 Assert(offSrc < pSwitcher->cbCode);
1334 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1335 *uSrc.pu32 = u32IDCode + offTrg;
1336 break;
1337 }
1338
1339 /*
1340 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
1341 */
1342 case FIX_ID_64BIT:
1343 {
1344 uint32_t offTrg = *u.pu32++;
1345 Assert(offSrc < pSwitcher->cbCode);
1346 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1347 *uSrc.pu64 = u32IDCode + offTrg;
1348 break;
1349 }
1350
1351 /*
1352 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
1353 */
1354 case FIX_ID_FAR32_TO_64BIT_MODE:
1355 {
1356 uint32_t offTrg = *u.pu32++;
1357 Assert(offSrc < pSwitcher->cbCode);
1358 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1359 *uSrc.pu32++ = u32IDCode + offTrg;
1360 *uSrc.pu16 = SelCS64;
1361 AssertRelease(SelCS64);
1362 break;
1363 }
1364
1365#ifdef VBOX_WITH_NMI
1366 /*
1367 * 32-bit address to the APIC base.
1368 */
1369 case FIX_GC_APIC_BASE_32BIT:
1370 {
1371 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
1372 break;
1373 }
1374#endif
1375
1376 default:
1377 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
1378 break;
1379 }
1380 }
1381
1382#ifdef LOG_ENABLED
1383 /*
1384 * If Log2 is enabled disassemble the switcher code.
1385 *
1386 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
1387 */
1388 if (LogIs2Enabled())
1389 {
1390 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
1391 " pu8CodeR0 = %p\n"
1392 " pu8CodeR3 = %p\n"
1393 " GCPtrCode = %VGv\n"
1394 " u32IDCode = %08x\n"
1395 " pVMGC = %VGv\n"
1396 " pCPUMGC = %VGv\n"
1397 " pVMHC = %p\n"
1398 " pCPUMHC = %p\n"
1399 " GCPtrGDT = %VGv\n"
1400 " InterCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1401 " HyperCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1402 " SelCS = %04x\n"
1403 " SelDS = %04x\n"
1404 " SelCS64 = %04x\n"
1405 " SelTSS = %04x\n",
1406 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
1407 pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM),
1408 VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum,
1409 GCPtrGDT,
1410 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
1411 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
1412 SelCS, SelDS, SelCS64, SelTSS);
1413
1414 uint32_t offCode = 0;
1415 while (offCode < pSwitcher->cbCode)
1416 {
1417 /*
1418 * Figure out where this is.
1419 */
1420 const char *pszDesc = NULL;
1421 RTUINTPTR uBase;
1422 uint32_t cbCode;
1423 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
1424 {
1425 pszDesc = "HCCode0";
1426 uBase = (RTUINTPTR)pu8CodeR0;
1427 offCode = pSwitcher->offHCCode0;
1428 cbCode = pSwitcher->cbHCCode0;
1429 }
1430 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
1431 {
1432 pszDesc = "HCCode1";
1433 uBase = (RTUINTPTR)pu8CodeR0;
1434 offCode = pSwitcher->offHCCode1;
1435 cbCode = pSwitcher->cbHCCode1;
1436 }
1437 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
1438 {
1439 pszDesc = "GCCode";
1440 uBase = GCPtrCode;
1441 offCode = pSwitcher->offGCCode;
1442 cbCode = pSwitcher->cbGCCode;
1443 }
1444 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
1445 {
1446 pszDesc = "IDCode0";
1447 uBase = u32IDCode;
1448 offCode = pSwitcher->offIDCode0;
1449 cbCode = pSwitcher->cbIDCode0;
1450 }
1451 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
1452 {
1453 pszDesc = "IDCode1";
1454 uBase = u32IDCode;
1455 offCode = pSwitcher->offIDCode1;
1456 cbCode = pSwitcher->cbIDCode1;
1457 }
1458 else
1459 {
1460 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
1461 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1462 offCode++;
1463 continue;
1464 }
1465
1466 /*
1467 * Disassemble it.
1468 */
1469 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
1470 DISCPUSTATE Cpu;
1471
1472 memset(&Cpu, 0, sizeof(Cpu));
1473 Cpu.mode = CPUMODE_32BIT;
1474 while (cbCode > 0)
1475 {
1476 /* try label it */
1477 if (pSwitcher->offR0HostToGuest == offCode)
1478 RTLogPrintf(" *R0HostToGuest:\n");
1479 if (pSwitcher->offGCGuestToHost == offCode)
1480 RTLogPrintf(" *GCGuestToHost:\n");
1481 if (pSwitcher->offGCCallTrampoline == offCode)
1482 RTLogPrintf(" *GCCallTrampoline:\n");
1483 if (pSwitcher->offGCGuestToHostAsm == offCode)
1484 RTLogPrintf(" *GCGuestToHostAsm:\n");
1485 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
1486 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
1487 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
1488 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
1489
1490 /* disas */
1491 uint32_t cbInstr = 0;
1492 char szDisas[256];
1493 if (RT_SUCCESS(DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas)))
1494 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
1495 else
1496 {
1497 RTLogPrintf(" %04x: %02x '%c'\n",
1498 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1499 cbInstr = 1;
1500 }
1501 offCode += cbInstr;
1502 cbCode -= RT_MIN(cbInstr, cbCode);
1503 }
1504 }
1505 }
1506#endif
1507}
1508
1509
1510/**
1511 * Relocator for the 32-Bit to 32-Bit world switcher.
1512 */
1513DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1514{
1515 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1516 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1517}
1518
1519
1520/**
1521 * Relocator for the 32-Bit to PAE world switcher.
1522 */
1523DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1524{
1525 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1526 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1527}
1528
1529
1530/**
1531 * Relocator for the PAE to 32-Bit world switcher.
1532 */
1533DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1534{
1535 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1536 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1537}
1538
1539
1540/**
1541 * Relocator for the PAE to PAE world switcher.
1542 */
1543DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1544{
1545 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1546 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1547}
1548
1549
1550/**
1551 * Relocator for the AMD64 to PAE world switcher.
1552 */
1553DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1554{
1555 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1556 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1557}
1558
1559
1560/**
1561 * Gets the pointer to g_szRTAssertMsg1 in GC.
1562 * @returns Pointer to VMMGC::g_szRTAssertMsg1.
1563 * Returns NULL if not present.
1564 * @param pVM The VM handle.
1565 */
1566VMMR3DECL(const char *) VMMR3GetGCAssertMsg1(PVM pVM)
1567{
1568 RTGCPTR GCPtr;
1569 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg1", &GCPtr);
1570 if (VBOX_SUCCESS(rc))
1571 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1572 return NULL;
1573}
1574
1575
1576/**
1577 * Gets the pointer to g_szRTAssertMsg2 in GC.
1578 * @returns Pointer to VMMGC::g_szRTAssertMsg2.
1579 * Returns NULL if not present.
1580 * @param pVM The VM handle.
1581 */
1582VMMR3DECL(const char *) VMMR3GetGCAssertMsg2(PVM pVM)
1583{
1584 RTGCPTR GCPtr;
1585 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg2", &GCPtr);
1586 if (VBOX_SUCCESS(rc))
1587 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1588 return NULL;
1589}
1590
1591
1592/**
1593 * Execute state save operation.
1594 *
1595 * @returns VBox status code.
1596 * @param pVM VM Handle.
1597 * @param pSSM SSM operation handle.
1598 */
1599static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1600{
1601 LogFlow(("vmmR3Save:\n"));
1602
1603 /*
1604 * The hypervisor stack.
1605 */
1606 SSMR3PutGCPtr(pSSM, pVM->vmm.s.pbGCStackBottom);
1607 RTGCPTR GCPtrESP = CPUMGetHyperESP(pVM);
1608 Assert(pVM->vmm.s.pbGCStackBottom - GCPtrESP <= VMM_STACK_SIZE);
1609 SSMR3PutGCPtr(pSSM, GCPtrESP);
1610 SSMR3PutMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1611 return SSMR3PutU32(pSSM, ~0); /* terminator */
1612}
1613
1614
1615/**
1616 * Execute state load operation.
1617 *
1618 * @returns VBox status code.
1619 * @param pVM VM Handle.
1620 * @param pSSM SSM operation handle.
1621 * @param u32Version Data layout version.
1622 */
1623static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
1624{
1625 LogFlow(("vmmR3Load:\n"));
1626
1627 /*
1628 * Validate version.
1629 */
1630 if (u32Version != VMM_SAVED_STATE_VERSION)
1631 {
1632 Log(("vmmR3Load: Invalid version u32Version=%d!\n", u32Version));
1633 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1634 }
1635
1636 /*
1637 * Check that the stack is in the same place, or that it's fearly empty.
1638 */
1639 RTGCPTR GCPtrStackBottom;
1640 SSMR3GetGCPtr(pSSM, &GCPtrStackBottom);
1641 RTGCPTR GCPtrESP;
1642 int rc = SSMR3GetGCPtr(pSSM, &GCPtrESP);
1643 if (VBOX_FAILURE(rc))
1644 return rc;
1645 if ( GCPtrStackBottom == pVM->vmm.s.pbGCStackBottom
1646 || (GCPtrStackBottom - GCPtrESP < 32)) /** @todo This will break if we start preemting the hypervisor. */
1647 {
1648 /*
1649 * We *must* set the ESP because the CPUM load + PGM load relocations will render
1650 * the ESP in CPUM fatally invalid.
1651 */
1652 CPUMSetHyperESP(pVM, GCPtrESP);
1653
1654 /* restore the stack. */
1655 SSMR3GetMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1656
1657 /* terminator */
1658 uint32_t u32;
1659 rc = SSMR3GetU32(pSSM, &u32);
1660 if (VBOX_FAILURE(rc))
1661 return rc;
1662 if (u32 != ~0U)
1663 {
1664 AssertMsgFailed(("u32=%#x\n", u32));
1665 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1666 }
1667 return VINF_SUCCESS;
1668 }
1669
1670 LogRel(("The stack is not in the same place and it's not empty! GCPtrStackBottom=%VGv pbGCStackBottom=%VGv ESP=%VGv\n",
1671 GCPtrStackBottom, pVM->vmm.s.pbGCStackBottom, GCPtrESP));
1672 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
1673 return VINF_SUCCESS; /* ignore this */
1674 AssertFailed();
1675 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1676}
1677
1678
1679/**
1680 * Selects the switcher to be used for switching to GC.
1681 *
1682 * @returns VBox status code.
1683 * @param pVM VM handle.
1684 * @param enmSwitcher The new switcher.
1685 * @remark This function may be called before the VMM is initialized.
1686 */
1687VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1688{
1689 /*
1690 * Validate input.
1691 */
1692 if ( enmSwitcher < VMMSWITCHER_INVALID
1693 || enmSwitcher >= VMMSWITCHER_MAX)
1694 {
1695 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1696 return VERR_INVALID_PARAMETER;
1697 }
1698
1699 /*
1700 * Select the new switcher.
1701 */
1702 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
1703 if (pSwitcher)
1704 {
1705 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
1706 pVM->vmm.s.enmSwitcher = enmSwitcher;
1707
1708 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvHCCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvHCCoreCodeR0 type */
1709 pVM->vmm.s.pfnR0HostToGuest = pbCodeR0 + pSwitcher->offR0HostToGuest;
1710
1711 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1712 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
1713 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
1714 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
1715 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
1716 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
1717 return VINF_SUCCESS;
1718 }
1719 return VERR_NOT_IMPLEMENTED;
1720}
1721
1722/**
1723 * Disable the switcher logic permanently.
1724 *
1725 * @returns VBox status code.
1726 * @param pVM VM handle.
1727 */
1728VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
1729{
1730/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
1731 * @code
1732 * mov eax, VERR_INTERNAL_ERROR
1733 * ret
1734 * @endcode
1735 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
1736 */
1737 pVM->vmm.s.fSwitcherDisabled = true;
1738 return VINF_SUCCESS;
1739}
1740
1741
1742/**
1743 * Resolve a builtin GC symbol.
1744 * Called by PDM when loading or relocating GC modules.
1745 *
1746 * @returns VBox status
1747 * @param pVM VM Handle.
1748 * @param pszSymbol Symbol to resolv
1749 * @param pGCPtrValue Where to store the symbol value.
1750 * @remark This has to work before VMMR3Relocate() is called.
1751 */
1752VMMR3DECL(int) VMMR3GetImportGC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue)
1753{
1754 if (!strcmp(pszSymbol, "g_Logger"))
1755 {
1756 if (pVM->vmm.s.pLoggerHC)
1757 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
1758 *pGCPtrValue = pVM->vmm.s.pLoggerGC;
1759 }
1760 else if (!strcmp(pszSymbol, "g_RelLogger"))
1761 {
1762#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1763 if (pVM->vmm.s.pRelLoggerHC)
1764 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
1765 *pGCPtrValue = pVM->vmm.s.pRelLoggerGC;
1766#else
1767 *pGCPtrValue = NIL_RTGCPTR;
1768#endif
1769 }
1770 else
1771 return VERR_SYMBOL_NOT_FOUND;
1772 return VINF_SUCCESS;
1773}
1774
1775
1776/**
1777 * Suspends the the CPU yielder.
1778 *
1779 * @param pVM The VM handle.
1780 */
1781VMMR3DECL(void) VMMR3YieldSuspend(PVM pVM)
1782{
1783 if (!pVM->vmm.s.cYieldResumeMillies)
1784 {
1785 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1786 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1787 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1788 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1789 else
1790 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1791 TMTimerStop(pVM->vmm.s.pYieldTimer);
1792 }
1793 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1794}
1795
1796
1797/**
1798 * Stops the the CPU yielder.
1799 *
1800 * @param pVM The VM handle.
1801 */
1802VMMR3DECL(void) VMMR3YieldStop(PVM pVM)
1803{
1804 if (!pVM->vmm.s.cYieldResumeMillies)
1805 TMTimerStop(pVM->vmm.s.pYieldTimer);
1806 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1807 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1808}
1809
1810
1811/**
1812 * Resumes the CPU yielder when it has been a suspended or stopped.
1813 *
1814 * @param pVM The VM handle.
1815 */
1816VMMR3DECL(void) VMMR3YieldResume(PVM pVM)
1817{
1818 if (pVM->vmm.s.cYieldResumeMillies)
1819 {
1820 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1821 pVM->vmm.s.cYieldResumeMillies = 0;
1822 }
1823}
1824
1825
1826/**
1827 * Internal timer callback function.
1828 *
1829 * @param pVM The VM.
1830 * @param pTimer The timer handle.
1831 * @param pvUser User argument specified upon timer creation.
1832 */
1833static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1834{
1835 /*
1836 * This really needs some careful tuning. While we shouldn't be too gready since
1837 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1838 * because that'll cause us to stop up.
1839 *
1840 * The current logic is to use the default interval when there is no lag worth
1841 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1842 *
1843 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1844 * so the lag is up to date.)
1845 */
1846 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1847 if ( u64Lag < 50000000 /* 50ms */
1848 || ( u64Lag < 1000000000 /* 1s */
1849 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1850 )
1851 {
1852 uint64_t u64Elapsed = RTTimeNanoTS();
1853 pVM->vmm.s.u64LastYield = u64Elapsed;
1854
1855 RTThreadYield();
1856
1857#ifdef LOG_ENABLED
1858 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1859 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1860#endif
1861 }
1862 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1863}
1864
1865
1866/**
1867 * Acquire global VM lock.
1868 *
1869 * @returns VBox status code
1870 * @param pVM The VM to operate on.
1871 */
1872VMMR3DECL(int) VMMR3Lock(PVM pVM)
1873{
1874 return RTCritSectEnter(&pVM->vmm.s.CritSectVMLock);
1875}
1876
1877
1878/**
1879 * Release global VM lock.
1880 *
1881 * @returns VBox status code
1882 * @param pVM The VM to operate on.
1883 */
1884VMMR3DECL(int) VMMR3Unlock(PVM pVM)
1885{
1886 return RTCritSectLeave(&pVM->vmm.s.CritSectVMLock);
1887}
1888
1889
1890/**
1891 * Return global VM lock owner.
1892 *
1893 * @returns Thread id of owner.
1894 * @returns NIL_RTTHREAD if no owner.
1895 * @param pVM The VM to operate on.
1896 */
1897VMMR3DECL(RTNATIVETHREAD) VMMR3LockGetOwner(PVM pVM)
1898{
1899 return RTCritSectGetOwner(&pVM->vmm.s.CritSectVMLock);
1900}
1901
1902
1903/**
1904 * Checks if the current thread is the owner of the global VM lock.
1905 *
1906 * @returns true if owner.
1907 * @returns false if not owner.
1908 * @param pVM The VM to operate on.
1909 */
1910VMMR3DECL(bool) VMMR3LockIsOwner(PVM pVM)
1911{
1912 return RTCritSectIsOwner(&pVM->vmm.s.CritSectVMLock);
1913}
1914
1915
1916/**
1917 * Executes guest code.
1918 *
1919 * @param pVM VM handle.
1920 */
1921VMMR3DECL(int) VMMR3RawRunGC(PVM pVM)
1922{
1923 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1924
1925 /*
1926 * Set the EIP and ESP.
1927 */
1928 CPUMSetHyperEIP(pVM, CPUMGetGuestEFlags(pVM) & X86_EFL_VM
1929 ? pVM->vmm.s.pfnCPUMGCResumeGuestV86
1930 : pVM->vmm.s.pfnCPUMGCResumeGuest);
1931 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom);
1932
1933 /*
1934 * We hide log flushes (outer) and hypervisor interrupts (inner).
1935 */
1936 for (;;)
1937 {
1938 int rc;
1939 do
1940 {
1941#ifdef NO_SUPCALLR0VMM
1942 rc = VERR_GENERAL_FAILURE;
1943#else
1944 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
1945#endif
1946 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1947
1948 /*
1949 * Flush the logs.
1950 */
1951#ifdef LOG_ENABLED
1952 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
1953 if ( pLogger
1954 && pLogger->offScratch > 0)
1955 RTLogFlushGC(NULL, pLogger);
1956#endif
1957#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1958 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
1959 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1960 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
1961#endif
1962 if (rc != VINF_VMM_CALL_HOST)
1963 {
1964 Log2(("VMMR3RawRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1965 return rc;
1966 }
1967 rc = vmmR3ServiceCallHostRequest(pVM);
1968 if (VBOX_FAILURE(rc))
1969 return rc;
1970 /* Resume GC */
1971 }
1972}
1973
1974
1975/**
1976 * Executes guest code (Intel VT-x and AMD-V).
1977 *
1978 * @param pVM VM handle.
1979 */
1980VMMR3DECL(int) VMMR3HwAccRunGC(PVM pVM)
1981{
1982 Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1983
1984 for (;;)
1985 {
1986 int rc;
1987 do
1988 {
1989#ifdef NO_SUPCALLR0VMM
1990 rc = VERR_GENERAL_FAILURE;
1991#else
1992 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN);
1993#endif
1994 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1995
1996#ifdef LOG_ENABLED
1997 /*
1998 * Flush the log
1999 */
2000 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
2001 if ( pR0Logger
2002 && pR0Logger->Logger.offScratch > 0)
2003 RTLogFlushToLogger(&pR0Logger->Logger, NULL);
2004#endif /* !LOG_ENABLED */
2005 if (rc != VINF_VMM_CALL_HOST)
2006 {
2007 Log2(("VMMR3HwAccRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
2008 return rc;
2009 }
2010 rc = vmmR3ServiceCallHostRequest(pVM);
2011 if (VBOX_FAILURE(rc))
2012 return rc;
2013 /* Resume R0 */
2014 }
2015}
2016
2017/**
2018 * Calls GC a function.
2019 *
2020 * @param pVM The VM handle.
2021 * @param GCPtrEntry The GC function address.
2022 * @param cArgs The number of arguments in the ....
2023 * @param ... Arguments to the function.
2024 */
2025VMMR3DECL(int) VMMR3CallGC(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, ...)
2026{
2027 va_list args;
2028 va_start(args, cArgs);
2029 int rc = VMMR3CallGCV(pVM, GCPtrEntry, cArgs, args);
2030 va_end(args);
2031 return rc;
2032}
2033
2034
2035/**
2036 * Calls GC a function.
2037 *
2038 * @param pVM The VM handle.
2039 * @param GCPtrEntry The GC function address.
2040 * @param cArgs The number of arguments in the ....
2041 * @param args Arguments to the function.
2042 */
2043VMMR3DECL(int) VMMR3CallGCV(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, va_list args)
2044{
2045 Log2(("VMMR3CallGCV: GCPtrEntry=%VGv cArgs=%d\n", GCPtrEntry, cArgs));
2046
2047 /*
2048 * Setup the call frame using the trampoline.
2049 */
2050 CPUMHyperSetCtxCore(pVM, NULL);
2051 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
2052 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom - cArgs * sizeof(RTGCUINTPTR));
2053 PRTGCUINTPTR pFrame = (PRTGCUINTPTR)(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE) - cArgs;
2054 int i = cArgs;
2055 while (i-- > 0)
2056 *pFrame++ = va_arg(args, RTGCUINTPTR);
2057
2058 CPUMPushHyper(pVM, cArgs * sizeof(RTGCUINTPTR)); /* stack frame size */
2059 CPUMPushHyper(pVM, GCPtrEntry); /* what to call */
2060 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2061
2062 /*
2063 * We hide log flushes (outer) and hypervisor interrupts (inner).
2064 */
2065 for (;;)
2066 {
2067 int rc;
2068 do
2069 {
2070#ifdef NO_SUPCALLR0VMM
2071 rc = VERR_GENERAL_FAILURE;
2072#else
2073 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2074#endif
2075 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2076
2077 /*
2078 * Flush the logs.
2079 */
2080#ifdef LOG_ENABLED
2081 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
2082 if ( pLogger
2083 && pLogger->offScratch > 0)
2084 RTLogFlushGC(NULL, pLogger);
2085#endif
2086#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2087 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2088 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2089 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2090#endif
2091 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2092 VMMR3FatalDump(pVM, rc);
2093 if (rc != VINF_VMM_CALL_HOST)
2094 {
2095 Log2(("VMMR3CallGCV: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
2096 return rc;
2097 }
2098 rc = vmmR3ServiceCallHostRequest(pVM);
2099 if (VBOX_FAILURE(rc))
2100 return rc;
2101 }
2102}
2103
2104
2105/**
2106 * Resumes executing hypervisor code when interrupted
2107 * by a queue flush or a debug event.
2108 *
2109 * @returns VBox status code.
2110 * @param pVM VM handle.
2111 */
2112VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM)
2113{
2114 Log(("VMMR3ResumeHyper: eip=%VGv esp=%VGv\n", CPUMGetHyperEIP(pVM), CPUMGetHyperESP(pVM)));
2115
2116 /*
2117 * We hide log flushes (outer) and hypervisor interrupts (inner).
2118 */
2119 for (;;)
2120 {
2121 int rc;
2122 do
2123 {
2124#ifdef NO_SUPCALLR0VMM
2125 rc = VERR_GENERAL_FAILURE;
2126#else
2127 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2128#endif
2129 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2130
2131 /*
2132 * Flush the loggers,
2133 */
2134#ifdef LOG_ENABLED
2135 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
2136 if ( pLogger
2137 && pLogger->offScratch > 0)
2138 RTLogFlushGC(NULL, pLogger);
2139#endif
2140#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2141 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2142 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2143 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2144#endif
2145 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2146 VMMR3FatalDump(pVM, rc);
2147 if (rc != VINF_VMM_CALL_HOST)
2148 {
2149 Log(("VMMR3ResumeHyper: returns %Vrc\n", rc));
2150 return rc;
2151 }
2152 rc = vmmR3ServiceCallHostRequest(pVM);
2153 if (VBOX_FAILURE(rc))
2154 return rc;
2155 }
2156}
2157
2158
2159/**
2160 * Service a call to the ring-3 host code.
2161 *
2162 * @returns VBox status code.
2163 * @param pVM VM handle.
2164 * @remark Careful with critsects.
2165 */
2166static int vmmR3ServiceCallHostRequest(PVM pVM)
2167{
2168 switch (pVM->vmm.s.enmCallHostOperation)
2169 {
2170 /*
2171 * Acquire the PDM lock.
2172 */
2173 case VMMCALLHOST_PDM_LOCK:
2174 {
2175 pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM);
2176 break;
2177 }
2178
2179 /*
2180 * Flush a PDM queue.
2181 */
2182 case VMMCALLHOST_PDM_QUEUE_FLUSH:
2183 {
2184 PDMR3QueueFlushWorker(pVM, NULL);
2185 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
2186 break;
2187 }
2188
2189 /*
2190 * Grow the PGM pool.
2191 */
2192 case VMMCALLHOST_PGM_POOL_GROW:
2193 {
2194 pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
2195 break;
2196 }
2197
2198 /*
2199 * Maps an page allocation chunk into ring-3 so ring-0 can use it.
2200 */
2201 case VMMCALLHOST_PGM_MAP_CHUNK:
2202 {
2203 pVM->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVM->vmm.s.u64CallHostArg);
2204 break;
2205 }
2206
2207 /*
2208 * Allocates more handy pages.
2209 */
2210 case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
2211 {
2212 pVM->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM);
2213 break;
2214 }
2215#ifndef VBOX_WITH_NEW_PHYS_CODE
2216
2217 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
2218 {
2219 const RTGCPHYS GCPhys = pVM->vmm.s.u64CallHostArg;
2220 pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, &GCPhys);
2221 break;
2222 }
2223#endif
2224
2225 /*
2226 * Acquire the PGM lock.
2227 */
2228 case VMMCALLHOST_PGM_LOCK:
2229 {
2230 pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM);
2231 break;
2232 }
2233
2234 /*
2235 * Flush REM handler notifications.
2236 */
2237 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
2238 {
2239 REMR3ReplayHandlerNotifications(pVM);
2240 break;
2241 }
2242
2243 /*
2244 * This is a noop. We just take this route to avoid unnecessary
2245 * tests in the loops.
2246 */
2247 case VMMCALLHOST_VMM_LOGGER_FLUSH:
2248 break;
2249
2250 /*
2251 * Set the VM error message.
2252 */
2253 case VMMCALLHOST_VM_SET_ERROR:
2254 VMR3SetErrorWorker(pVM);
2255 break;
2256
2257 /*
2258 * Set the VM runtime error message.
2259 */
2260 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
2261 VMR3SetRuntimeErrorWorker(pVM);
2262 break;
2263
2264 /*
2265 * Signal a ring 0 hypervisor assertion.
2266 * Cancel the longjmp operation that's in progress.
2267 */
2268 case VMMCALLHOST_VM_R0_HYPER_ASSERTION:
2269 pVM->vmm.s.CallHostR0JmpBuf.fInRing3Call = false;
2270#ifdef RT_ARCH_X86
2271 pVM->vmm.s.CallHostR0JmpBuf.eip = 0;
2272#else
2273 pVM->vmm.s.CallHostR0JmpBuf.rip = 0;
2274#endif
2275 return VINF_EM_DBG_HYPER_ASSERTION;
2276
2277 default:
2278 AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation));
2279 return VERR_INTERNAL_ERROR;
2280 }
2281
2282 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
2283 return VINF_SUCCESS;
2284}
2285
2286
2287
2288/**
2289 * Structure to pass to DBGFR3Info() and for doing all other
2290 * output during fatal dump.
2291 */
2292typedef struct VMMR3FATALDUMPINFOHLP
2293{
2294 /** The helper core. */
2295 DBGFINFOHLP Core;
2296 /** The release logger instance. */
2297 PRTLOGGER pRelLogger;
2298 /** The saved release logger flags. */
2299 RTUINT fRelLoggerFlags;
2300 /** The logger instance. */
2301 PRTLOGGER pLogger;
2302 /** The saved logger flags. */
2303 RTUINT fLoggerFlags;
2304 /** The saved logger destination flags. */
2305 RTUINT fLoggerDestFlags;
2306 /** Whether to output to stderr or not. */
2307 bool fStdErr;
2308} VMMR3FATALDUMPINFOHLP, *PVMMR3FATALDUMPINFOHLP;
2309typedef const VMMR3FATALDUMPINFOHLP *PCVMMR3FATALDUMPINFOHLP;
2310
2311
2312/**
2313 * Print formatted string.
2314 *
2315 * @param pHlp Pointer to this structure.
2316 * @param pszFormat The format string.
2317 * @param ... Arguments.
2318 */
2319static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
2320{
2321 va_list args;
2322 va_start(args, pszFormat);
2323 pHlp->pfnPrintfV(pHlp, pszFormat, args);
2324 va_end(args);
2325}
2326
2327
2328/**
2329 * Print formatted string.
2330 *
2331 * @param pHlp Pointer to this structure.
2332 * @param pszFormat The format string.
2333 * @param args Argument list.
2334 */
2335static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
2336{
2337 PCVMMR3FATALDUMPINFOHLP pMyHlp = (PCVMMR3FATALDUMPINFOHLP)pHlp;
2338
2339 if (pMyHlp->pRelLogger)
2340 {
2341 va_list args2;
2342 va_copy(args2, args);
2343 RTLogLoggerV(pMyHlp->pRelLogger, pszFormat, args2);
2344 va_end(args2);
2345 }
2346 if (pMyHlp->pLogger)
2347 {
2348 va_list args2;
2349 va_copy(args2, args);
2350 RTLogLoggerV(pMyHlp->pLogger, pszFormat, args);
2351 va_end(args2);
2352 }
2353 if (pMyHlp->fStdErr)
2354 {
2355 va_list args2;
2356 va_copy(args2, args);
2357 RTStrmPrintfV(g_pStdErr, pszFormat, args);
2358 va_end(args2);
2359 }
2360}
2361
2362
2363/**
2364 * Initializes the fatal dump output helper.
2365 *
2366 * @param pHlp The structure to initialize.
2367 */
2368static void vmmR3FatalDumpInfoHlpInit(PVMMR3FATALDUMPINFOHLP pHlp)
2369{
2370 memset(pHlp, 0, sizeof(*pHlp));
2371
2372 pHlp->Core.pfnPrintf = vmmR3FatalDumpInfoHlp_pfnPrintf;
2373 pHlp->Core.pfnPrintfV = vmmR3FatalDumpInfoHlp_pfnPrintfV;
2374
2375 /*
2376 * The loggers.
2377 */
2378 pHlp->pRelLogger = RTLogRelDefaultInstance();
2379#ifndef LOG_ENABLED
2380 if (!pHlp->pRelLogger)
2381#endif
2382 pHlp->pLogger = RTLogDefaultInstance();
2383
2384 if (pHlp->pRelLogger)
2385 {
2386 pHlp->fRelLoggerFlags = pHlp->pRelLogger->fFlags;
2387 pHlp->pRelLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2388 }
2389
2390 if (pHlp->pLogger)
2391 {
2392 pHlp->fLoggerFlags = pHlp->pLogger->fFlags;
2393 pHlp->fLoggerDestFlags = pHlp->pLogger->fDestFlags;
2394 pHlp->pLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2395#ifndef DEBUG_sandervl
2396 pHlp->pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
2397#endif
2398 }
2399
2400 /*
2401 * Check if we need write to stderr.
2402 */
2403 pHlp->fStdErr = (!pHlp->pRelLogger || !(pHlp->pRelLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)))
2404 && (!pHlp->pLogger || !(pHlp->pLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)));
2405}
2406
2407
2408/**
2409 * Deletes the fatal dump output helper.
2410 *
2411 * @param pHlp The structure to delete.
2412 */
2413static void vmmR3FatalDumpInfoHlpDelete(PVMMR3FATALDUMPINFOHLP pHlp)
2414{
2415 if (pHlp->pRelLogger)
2416 {
2417 RTLogFlush(pHlp->pRelLogger);
2418 pHlp->pRelLogger->fFlags = pHlp->fRelLoggerFlags;
2419 }
2420
2421 if (pHlp->pLogger)
2422 {
2423 RTLogFlush(pHlp->pLogger);
2424 pHlp->pLogger->fFlags = pHlp->fLoggerFlags;
2425 pHlp->pLogger->fDestFlags = pHlp->fLoggerDestFlags;
2426 }
2427}
2428
2429
2430/**
2431 * Dumps the VM state on a fatal error.
2432 *
2433 * @param pVM VM Handle.
2434 * @param rcErr VBox status code.
2435 */
2436VMMR3DECL(void) VMMR3FatalDump(PVM pVM, int rcErr)
2437{
2438 /*
2439 * Create our output helper and sync it with the log settings.
2440 * This helper will be used for all the output.
2441 */
2442 VMMR3FATALDUMPINFOHLP Hlp;
2443 PCDBGFINFOHLP pHlp = &Hlp.Core;
2444 vmmR3FatalDumpInfoHlpInit(&Hlp);
2445
2446 /*
2447 * Header.
2448 */
2449 pHlp->pfnPrintf(pHlp,
2450 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
2451 "!!\n"
2452 "!! Guru Meditation %d (%Vrc)\n"
2453 "!!\n",
2454 rcErr, rcErr);
2455
2456 /*
2457 * Continue according to context.
2458 */
2459 bool fDoneHyper = false;
2460 switch (rcErr)
2461 {
2462 /*
2463 * Hyper visor errors.
2464 */
2465 case VINF_EM_DBG_HYPER_ASSERTION:
2466 pHlp->pfnPrintf(pHlp, "%s%s!!\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
2467 /* fall thru */
2468 case VERR_TRPM_DONT_PANIC:
2469 case VERR_TRPM_PANIC:
2470 case VINF_EM_RAW_STALE_SELECTOR:
2471 case VINF_EM_RAW_IRET_TRAP:
2472 case VINF_EM_DBG_HYPER_BREAKPOINT:
2473 case VINF_EM_DBG_HYPER_STEPPED:
2474 {
2475 /* Trap? */
2476 uint32_t uEIP = CPUMGetHyperEIP(pVM);
2477 TRPMEVENT enmType;
2478 uint8_t u8TrapNo = 0xce;
2479 RTGCUINT uErrorCode = 0xdeadface;
2480 RTGCUINTPTR uCR2 = 0xdeadface;
2481 int rc2 = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
2482 if (VBOX_SUCCESS(rc2))
2483 pHlp->pfnPrintf(pHlp,
2484 "!! TRAP=%02x ERRCD=%VGv CR2=%VGv EIP=%VGv Type=%d\n",
2485 u8TrapNo, uErrorCode, uCR2, uEIP, enmType);
2486 else
2487 pHlp->pfnPrintf(pHlp,
2488 "!! EIP=%VGv NOTRAP\n",
2489 uEIP);
2490
2491 /*
2492 * Try figure out where eip is.
2493 */
2494 /** @todo make query call for core code or move this function to VMM. */
2495 /* core code? */
2496 //if (uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode < pVM->vmm.s.cbCoreCode)
2497 // pHlp->pfnPrintf(pHlp,
2498 // "!! EIP is in CoreCode, offset %#x\n",
2499 // uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode);
2500 //else
2501 { /* ask PDM */
2502 /** @todo ask DBGFR3Sym later. */
2503 char szModName[64];
2504 RTGCPTR GCPtrMod;
2505 char szNearSym1[260];
2506 RTGCPTR GCPtrNearSym1;
2507 char szNearSym2[260];
2508 RTGCPTR GCPtrNearSym2;
2509 int rc = PDMR3QueryModFromEIP(pVM, uEIP,
2510 &szModName[0], sizeof(szModName), &GCPtrMod,
2511 &szNearSym1[0], sizeof(szNearSym1), &GCPtrNearSym1,
2512 &szNearSym2[0], sizeof(szNearSym2), &GCPtrNearSym2);
2513 if (VBOX_SUCCESS(rc))
2514 {
2515 pHlp->pfnPrintf(pHlp,
2516 "!! EIP in %s (%p) at rva %x near symbols:\n"
2517 "!! %VGv rva %VGv off %08x %s\n"
2518 "!! %VGv rva %VGv off -%08x %s\n",
2519 szModName, GCPtrMod, (unsigned)(uEIP - GCPtrMod),
2520 GCPtrNearSym1, GCPtrNearSym1 - GCPtrMod, (unsigned)(uEIP - GCPtrNearSym1), szNearSym1,
2521 GCPtrNearSym2, GCPtrNearSym2 - GCPtrMod, (unsigned)(GCPtrNearSym2 - uEIP), szNearSym2);
2522 }
2523 else
2524 pHlp->pfnPrintf(pHlp,
2525 "!! EIP is not in any code known to VMM!\n");
2526 }
2527
2528 /* Disassemble the instruction. */
2529 char szInstr[256];
2530 rc2 = DBGFR3DisasInstrEx(pVM, 0, 0, DBGF_DISAS_FLAGS_CURRENT_HYPER, &szInstr[0], sizeof(szInstr), NULL);
2531 if (VBOX_SUCCESS(rc2))
2532 pHlp->pfnPrintf(pHlp,
2533 "!! %s\n", szInstr);
2534
2535 /* Dump the hypervisor cpu state. */
2536 pHlp->pfnPrintf(pHlp,
2537 "!!\n"
2538 "!!\n"
2539 "!!\n");
2540 rc2 = DBGFR3Info(pVM, "cpumhyper", "verbose", pHlp);
2541 fDoneHyper = true;
2542
2543 /* Callstack. */
2544 DBGFSTACKFRAME Frame = {0};
2545 rc2 = DBGFR3StackWalkBeginHyper(pVM, &Frame);
2546 if (VBOX_SUCCESS(rc2))
2547 {
2548 pHlp->pfnPrintf(pHlp,
2549 "!!\n"
2550 "!! Call Stack:\n"
2551 "!!\n"
2552 "EBP Ret EBP Ret CS:EIP Arg0 Arg1 Arg2 Arg3 CS:EIP Symbol [line]\n");
2553 do
2554 {
2555 pHlp->pfnPrintf(pHlp,
2556 "%08RX32 %08RX32 %04RX32:%08RX32 %08RX32 %08RX32 %08RX32 %08RX32",
2557 (uint32_t)Frame.AddrFrame.off,
2558 (uint32_t)Frame.AddrReturnFrame.off,
2559 (uint32_t)Frame.AddrReturnPC.Sel,
2560 (uint32_t)Frame.AddrReturnPC.off,
2561 Frame.Args.au32[0],
2562 Frame.Args.au32[1],
2563 Frame.Args.au32[2],
2564 Frame.Args.au32[3]);
2565 pHlp->pfnPrintf(pHlp, " %RTsel:%08RGv", Frame.AddrPC.Sel, Frame.AddrPC.off);
2566 if (Frame.pSymPC)
2567 {
2568 RTGCINTPTR offDisp = Frame.AddrPC.FlatPtr - Frame.pSymPC->Value;
2569 if (offDisp > 0)
2570 pHlp->pfnPrintf(pHlp, " %s+%llx", Frame.pSymPC->szName, (int64_t)offDisp);
2571 else if (offDisp < 0)
2572 pHlp->pfnPrintf(pHlp, " %s-%llx", Frame.pSymPC->szName, -(int64_t)offDisp);
2573 else
2574 pHlp->pfnPrintf(pHlp, " %s", Frame.pSymPC->szName);
2575 }
2576 if (Frame.pLinePC)
2577 pHlp->pfnPrintf(pHlp, " [%s @ 0i%d]", Frame.pLinePC->szFilename, Frame.pLinePC->uLineNo);
2578 pHlp->pfnPrintf(pHlp, "\n");
2579
2580 /* next */
2581 rc2 = DBGFR3StackWalkNext(pVM, &Frame);
2582 } while (VBOX_SUCCESS(rc2));
2583 DBGFR3StackWalkEnd(pVM, &Frame);
2584 }
2585
2586 /* raw stack */
2587 pHlp->pfnPrintf(pHlp,
2588 "!!\n"
2589 "!! Raw stack (mind the direction).\n"
2590 "!!\n"
2591 "%.*Vhxd\n",
2592 VMM_STACK_SIZE, (char *)pVM->vmm.s.pbHCStack);
2593 break;
2594 }
2595
2596 default:
2597 {
2598 break;
2599 }
2600
2601 } /* switch (rcErr) */
2602
2603
2604 /*
2605 * Generic info dumper loop.
2606 */
2607 static struct
2608 {
2609 const char *pszInfo;
2610 const char *pszArgs;
2611 } const aInfo[] =
2612 {
2613 { "mappings", NULL },
2614 { "hma", NULL },
2615 { "cpumguest", "verbose" },
2616 { "cpumhyper", "verbose" },
2617 { "cpumhost", "verbose" },
2618 { "mode", "all" },
2619 { "cpuid", "verbose" },
2620 { "gdt", NULL },
2621 { "ldt", NULL },
2622 //{ "tss", NULL },
2623 { "ioport", NULL },
2624 { "mmio", NULL },
2625 { "phys", NULL },
2626 //{ "pgmpd", NULL }, - doesn't always work at init time...
2627 { "timers", NULL },
2628 { "activetimers", NULL },
2629 { "handlers", "phys virt hyper stats" },
2630 { "cfgm", NULL },
2631 };
2632 for (unsigned i = 0; i < ELEMENTS(aInfo); i++)
2633 {
2634 if (fDoneHyper && !strcmp(aInfo[i].pszInfo, "cpumhyper"))
2635 continue;
2636 pHlp->pfnPrintf(pHlp,
2637 "!!\n"
2638 "!! {%s, %s}\n"
2639 "!!\n",
2640 aInfo[i].pszInfo, aInfo[i].pszArgs);
2641 DBGFR3Info(pVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
2642 }
2643
2644 /* done */
2645 pHlp->pfnPrintf(pHlp,
2646 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
2647
2648
2649 /*
2650 * Delete the output instance (flushing and restoring of flags).
2651 */
2652 vmmR3FatalDumpInfoHlpDelete(&Hlp);
2653}
2654
2655
2656
2657/**
2658 * Displays the Force action Flags.
2659 *
2660 * @param pVM The VM handle.
2661 * @param pHlp The output helpers.
2662 * @param pszArgs The additional arguments (ignored).
2663 */
2664static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2665{
2666 const uint32_t fForcedActions = pVM->fForcedActions;
2667
2668 pHlp->pfnPrintf(pHlp, "Forced action Flags: %#RX32", fForcedActions);
2669
2670 /* show the flag mnemonics */
2671 int c = 0;
2672 uint32_t f = fForcedActions;
2673#define PRINT_FLAG(flag) do { \
2674 if (f & (flag)) \
2675 { \
2676 static const char *s_psz = #flag; \
2677 if (!(c % 6)) \
2678 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz + 6); \
2679 else \
2680 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
2681 c++; \
2682 f &= ~(flag); \
2683 } \
2684 } while (0)
2685 PRINT_FLAG(VM_FF_INTERRUPT_APIC);
2686 PRINT_FLAG(VM_FF_INTERRUPT_PIC);
2687 PRINT_FLAG(VM_FF_TIMER);
2688 PRINT_FLAG(VM_FF_PDM_QUEUES);
2689 PRINT_FLAG(VM_FF_PDM_DMA);
2690 PRINT_FLAG(VM_FF_PDM_CRITSECT);
2691 PRINT_FLAG(VM_FF_DBGF);
2692 PRINT_FLAG(VM_FF_REQUEST);
2693 PRINT_FLAG(VM_FF_TERMINATE);
2694 PRINT_FLAG(VM_FF_RESET);
2695 PRINT_FLAG(VM_FF_PGM_SYNC_CR3);
2696 PRINT_FLAG(VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
2697 PRINT_FLAG(VM_FF_TRPM_SYNC_IDT);
2698 PRINT_FLAG(VM_FF_SELM_SYNC_TSS);
2699 PRINT_FLAG(VM_FF_SELM_SYNC_GDT);
2700 PRINT_FLAG(VM_FF_SELM_SYNC_LDT);
2701 PRINT_FLAG(VM_FF_INHIBIT_INTERRUPTS);
2702 PRINT_FLAG(VM_FF_CSAM_SCAN_PAGE);
2703 PRINT_FLAG(VM_FF_CSAM_PENDING_ACTION);
2704 PRINT_FLAG(VM_FF_TO_R3);
2705 PRINT_FLAG(VM_FF_DEBUG_SUSPEND);
2706 if (f)
2707 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2708 else
2709 pHlp->pfnPrintf(pHlp, "\n");
2710#undef PRINT_FLAG
2711
2712 /* the groups */
2713 c = 0;
2714#define PRINT_GROUP(grp) do { \
2715 if (fForcedActions & (grp)) \
2716 { \
2717 static const char *s_psz = #grp; \
2718 if (!(c % 5)) \
2719 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : "Groups:\n", s_psz + 6); \
2720 else \
2721 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
2722 c++; \
2723 } \
2724 } while (0)
2725 PRINT_GROUP(VM_FF_EXTERNAL_SUSPENDED_MASK);
2726 PRINT_GROUP(VM_FF_EXTERNAL_HALTED_MASK);
2727 PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_MASK);
2728 PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK);
2729 PRINT_GROUP(VM_FF_HIGH_PRIORITY_POST_MASK);
2730 PRINT_GROUP(VM_FF_NORMAL_PRIORITY_POST_MASK);
2731 PRINT_GROUP(VM_FF_NORMAL_PRIORITY_MASK);
2732 PRINT_GROUP(VM_FF_RESUME_GUEST_MASK);
2733 PRINT_GROUP(VM_FF_ALL_BUT_RAW_MASK);
2734 if (c)
2735 pHlp->pfnPrintf(pHlp, "\n");
2736#undef PRINT_GROUP
2737}
2738
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette