/* $Id: VMM.cpp 1027 2007-02-22 20:29:35Z vboxsync $ */ /** @file * VMM - The Virtual Machine Monitor Core. */ /* * Copyright (C) 2006 InnoTek Systemberatung GmbH * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License as published by the Free Software Foundation, * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE * distribution. VirtualBox OSE is distributed in the hope that it will * be useful, but WITHOUT ANY WARRANTY of any kind. * * If you received this file as part of a commercial VirtualBox * distribution, then only the terms of your commercial VirtualBox * license agreement apply instead of the previous paragraph. */ #if 0 //defined(__AMD64__) && !defined(__WIN__) # define NO_SUPCALLR0VMM #endif /** @page pg_vmm VMM - The Virtual Machine Monitor * * !Revise this! It's already incorrect! * * The Virtual Machine Monitor (VMM) is the core of the virtual machine. It * manages the alternate reality; controlling the virtualization, managing * resources, tracking CPU state, it's resources and so on... * * We will split the VMM into smaller entities: * * - Virtual Machine Core Monitor (VMCM), which purpose it is to * provide ring and world switching, that including routing * interrupts to the host OS and traps to the appropriate trap * handlers. It will implement an external interface for * managing trap handlers. * * - CPU Monitor (CM), tracking the state of the CPU (in the alternate * reality) and implementing external interfaces to read and change * the state. * * - Memory Monitor (MM), which purpose it is to virtualize physical * pages, segment descriptor tables, interrupt descriptor tables, task * segments, and keep track of all memory providing external interfaces * to access content and map pages. (Internally splitt into smaller entities!) * * - IO Monitor (IOM), which virtualizes in and out I/O operations. It * interacts with the MM to implement memory mapped I/O. External * interfaces for adding and removing I/O ranges are implemented. * * - External Interrupt Monitor (EIM), which purpose it is to manage * interrupts generated by virtual devices. This monitor provides * an interfaces for raising interrupts which is accessible at any * time and from all thread. *

* A subentity of the EIM is the vitual Programmable Interrupt * Controller Device (VPICD), and perhaps a virtual I/O Advanced * Programmable Interrupt Controller Device (VAPICD). * * - Direct Memory Access Monitor (DMAM), which purpose it is to support * virtual device using the DMA controller. Interfaces must be as the * EIM interfaces independent and threadable. *

* A subentity of the DMAM is a virtual DMA Controller Device (VDMACD). * * * Entities working on a higher level: * * - Device Manager (DM), which is a support facility for virtualized * hardware. This provides generic facilities for efficient device * virtualization. It will manage device attaching and detaching * conversing with EIM and IOM. * * - Debugger Facility (DBGF) provides the basic features for * debugging the alternate reality execution. * * * * @section pg_vmm_s_use_cases Use Cases * * @subsection pg_vmm_s_use_case_boot Bootstrap * * - Basic Init: * - Init SUPDRV. * * - Init Virtual Machine Instance: * - Load settings. * - Check resource requirements (memory, com, stuff). * * - Init Host Ring 3 part: * - Init Core code. * - Load Pluggable Components. * - Init Pluggable Components. * * - Init Host Ring 0 part: * - Load Core (core = core components like VMM, RMI, CA, and so on) code. * - Init Core code. * - Load Pluggable Component code. * - Init Pluggable Component code. * * - Allocate first chunk of memory and pin it down. This block of memory * will fit the following pieces: * - Virtual Machine Instance data. (Config, CPU state, VMM state, ++) * (This is available from everywhere (at different addresses though)). * - VMM Guest Context code. * - Pluggable devices Guest Context code. * - Page tables (directory and everything) for the VMM Guest * * - Setup Guest (Ring 0) part: * - Setup initial page tables (i.e. directory all the stuff). * - Load Core Guest Context code. * - Load Pluggable Devices Guest Context code. * * */ /******************************************************************************* * Header Files * *******************************************************************************/ #define LOG_GROUP LOG_GROUP_VMM #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "VMMInternal.h" #include "VMMSwitcher/VMMSwitcher.h" #include #include #include #include #include #include #include #include #include #include #include #include #include /** The saved state version. */ #define VMM_SAVED_STATE_VERSION 3 /******************************************************************************* * Internal Functions * *******************************************************************************/ static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM); static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version); static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser); static int vmmR3ServiceCallHostRequest(PVM pVM); /******************************************************************************* * Global Variables * *******************************************************************************/ /** Array of switcher defininitions. * The type and index shall match! */ static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] = { NULL, /* invalid entry */ #ifndef __AMD64__ &vmmR3Switcher32BitTo32Bit_Def, &vmmR3Switcher32BitToPAE_Def, NULL, //&vmmR3Switcher32BitToAMD64_Def, &vmmR3SwitcherPAETo32Bit_Def, &vmmR3SwitcherPAEToPAE_Def, NULL, //&vmmR3SwitcherPAEToAMD64_Def, NULL, //&vmmR3SwitcherAMD64ToPAE_Def, NULL //&vmmR3SwitcherAMD64ToAMD64_Def, #else NULL, //&vmmR3Switcher32BitTo32Bit_Def, NULL, //&vmmR3Switcher32BitToPAE_Def, NULL, //&vmmR3Switcher32BitToAMD64_Def, NULL, //&vmmR3SwitcherPAETo32Bit_Def, NULL, //&vmmR3SwitcherPAEToPAE_Def, NULL, //&vmmR3SwitcherPAEToAMD64_Def, &vmmR3SwitcherAMD64ToPAE_Def, NULL //&vmmR3SwitcherAMD64ToAMD64_Def, #endif }; /** * Initiates the core code. * * This is core per VM code which might need fixups and/or for ease of use * are put on linear contiguous backing. * * @returns VBox status code. * @param pVM Pointer to VM structure. */ static int vmmR3InitCoreCode(PVM pVM) { /* * Calc the size. */ unsigned cbCoreCode = 0; for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++) { pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode; PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher]; if (pSwitcher) { AssertRelease((unsigned)pSwitcher->enmType == iSwitcher); cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32); } } /* * Allocate continguous pages for switchers and deal with * conflicts in the intermediate mapping of the code. */ pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE); pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode); int rc = VERR_NO_MEMORY; if (pVM->vmm.s.pvHCCoreCodeR3) { rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode); if (rc == VERR_PGM_MAPPINGS_FIX_CONFLICT) { /* try more allocations. */ struct { RTR0PTR pvR0; void *pvR3; RTHCPHYS HCPhys; } aBadTries[16]; unsigned i = 0; do { aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3; aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0; aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode; i++; pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR; pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS; pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode); if (!pVM->vmm.s.pvHCCoreCodeR3) break; rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode); } while ( rc == VERR_PGM_MAPPINGS_FIX_CONFLICT && i < ELEMENTS(aBadTries) - 1); /* cleanup */ if (VBOX_FAILURE(rc)) { aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3; aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0; aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode; i++; LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc)); } while (i-- > 0) { LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n", i, aBadTries[i].pvR3, aBadTries[i].pvR0, aBadTries[i].HCPhys)); SUPContFree(aBadTries[i].pvR3); } } } if (VBOX_SUCCESS(rc)) { /* * copy the code. */ for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++) { PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher]; if (pSwitcher) memcpy((uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher], pSwitcher->pvCode, pSwitcher->cbCode); } /* * Map the code into the GC address space. */ rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &pVM->vmm.s.pvGCCoreCode); if (VBOX_SUCCESS(rc)) { MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL); LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VGv Phys=%VHp cb=%#x\n", pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode)); /* * Finally, PGM probably have selected a switcher already but we need * to do get the addresses so we'll reselect it. * This may legally fail so, we're ignoring the rc. */ VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher); return rc; } /* shit */ AssertMsgFailed(("PGMR3Map(,%VGv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc)); SUPContFree(pVM->vmm.s.pvHCCoreCodeR3); } else VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to allocate %d bytes of contiguous memory for the world switcher code."), cbCoreCode); pVM->vmm.s.pvHCCoreCodeR3 = NULL; pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR; pVM->vmm.s.pvGCCoreCode = 0; return rc; } /** * Initializes the VMM. * * @returns VBox status code. * @param pVM The VM to operate on. */ VMMR3DECL(int) VMMR3Init(PVM pVM) { LogFlow(("VMMR3Init\n")); /* * Assert alignment, sizes and order. */ AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n")); AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s), ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n", sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s))); /* * Init basic VM VMM members. */ pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm); int rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies); if (rc == VERR_CFGM_VALUE_NOT_FOUND) pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */ //pVM->vmm.s.cYieldEveryMillies = 8; //debugging else AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Vrc\n", rc), rc); /* GC switchers are enabled by default. Turned off by HWACCM. */ pVM->vmm.s.fSwitcherDisabled = false; /* * Register the saved state data unit. */ rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR), NULL, vmmR3Save, NULL, NULL, vmmR3Load, NULL); if (VBOX_FAILURE(rc)) return rc; #ifdef VBOX_WITHOUT_IDT_PATCHING /* * Register the Ring-0 VM handle with the session for fast ioctl calls. */ rc = SUPSetVMForFastIOCtl(pVM->pVMR0); if (VBOX_FAILURE(rc)) return rc; #endif /* * Init core code. */ rc = vmmR3InitCoreCode(pVM); if (VBOX_SUCCESS(rc)) { /* * Allocate & init VMM GC stack. * The stack pages are also used by the VMM R0 when VMMR0CallHost is invoked. * (The page protection is modifed during R3 init completion.) */ #ifdef VBOX_STRICT_VMM_STACK rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack); #else rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack); #endif if (VBOX_SUCCESS(rc)) { /* Set HC and GC stack pointers to top of stack. */ pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = (RTR0PTR)pVM->vmm.s.pbHCStack; pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack); pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE; AssertRelease(pVM->vmm.s.pbGCStack); /* Set hypervisor eip. */ CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStack); /* * Allocate GC & R0 Logger instances (they are finalized in the relocator). */ #ifdef LOG_ENABLED PRTLOGGER pLogger = RTLogDefaultInstance(); if (pLogger) { pVM->vmm.s.cbLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pLogger->cGroups]); rc = MMHyperAlloc(pVM, pVM->vmm.s.cbLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pLoggerHC); if (VBOX_SUCCESS(rc)) { pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC); /* * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup), so * you have to sign up here by adding your defined(DEBUG_) to the #if. * * If you want to log in non-debug modes, you'll have to remember to change SUPDRvShared.c * to not stub all the log functions. */ # ifdef DEBUG_sandervl rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]), 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0Logger); if (VBOX_SUCCESS(rc)) { pVM->vmm.s.pR0Logger->pVM = pVM; //pVM->vmm.s.pR0Logger->fCreated = false; pVM->vmm.s.pR0Logger->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]); } # endif } } #endif /* LOG_ENABLED */ #ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG /* * Allocate GC Release Logger instances (finalized in the relocator). */ if (VBOX_SUCCESS(rc)) { PRTLOGGER pRelLogger = RTLogRelDefaultInstance(); if (pRelLogger) { pVM->vmm.s.cbRelLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pRelLogger->cGroups]); rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRelLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRelLoggerHC); if (VBOX_SUCCESS(rc)) pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC); } } #endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */ #ifdef VBOX_WITH_NMI /* * Allocate mapping for the host APIC. */ if (VBOX_SUCCESS(rc)) { rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase); AssertRC(rc); } #endif if (VBOX_SUCCESS(rc)) { rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock); if (VBOX_SUCCESS(rc)) { /* * Statistics. */ STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetNormal, STAMTYPE_COUNTER, "/VMM/GCRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterrupt, STAMTYPE_COUNTER, "/VMM/GCRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetGuestTrap, STAMTYPE_COUNTER, "/VMM/GCRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitch, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/GCRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetStaleSelector, STAMTYPE_COUNTER, "/VMM/GCRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetIRETTrap, STAMTYPE_COUNTER, "/VMM/GCRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetIORead, STAMTYPE_COUNTER, "/VMM/GCRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READWRITE returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIORead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetLDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetGDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetIDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetTSSFault, STAMTYPE_COUNTER, "/VMM/GCRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDFault, STAMTYPE_COUNTER, "/VMM/GCRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetCSAMTask, STAMTYPE_COUNTER, "/VMM/GCRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetSyncCR3, STAMTYPE_COUNTER, "/VMM/GCRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetMisc, STAMTYPE_COUNTER, "/VMM/GCRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchInt3, STAMTYPE_COUNTER, "/VMM/GCRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchPF, STAMTYPE_COUNTER, "/VMM/GCRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchGP, STAMTYPE_COUNTER, "/VMM/GCRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/GCRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPageOverflow, STAMTYPE_COUNTER, "/VMM/GCRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/GCRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetToR3, STAMTYPE_COUNTER, "/VMM/GCRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetTimerPending, STAMTYPE_COUNTER, "/VMM/GCRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptPending, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetCallHost, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/Misc", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMGrowRAM, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/GrowRAM", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PDMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetLogFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/LogFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMQueueFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/QueueFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMPoolGrow",STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetRemReplay, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/REMReplay", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetVMSetError, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/VMSetError", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/GCRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/GCRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulHlt, STAMTYPE_COUNTER, "/VMM/GCRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns."); STAM_REG(pVM, &pVM->vmm.s.StatGCRetPendingRequest, STAMTYPE_COUNTER, "/VMM/GCRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns."); return VINF_SUCCESS; } AssertRC(rc); } } /** @todo: Need failure cleanup. */ //more todo in here? //if (VBOX_SUCCESS(rc)) //{ //} //int rc2 = vmmR3TermCoreCode(pVM); //AssertRC(rc2)); } return rc; } /** * Ring-3 init finalizing. * * @returns VBox status code. * @param pVM The VM handle. */ VMMR3DECL(int) VMMR3InitFinalize(PVM pVM) { #ifdef VBOX_STRICT_VMM_STACK /* * Two inaccessible pages at each sides of the stack to catch over/under-flows. */ memset(pVM->vmm.s.pbHCStack - PAGE_SIZE, 0xcc, PAGE_SIZE); PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack - PAGE_SIZE), PAGE_SIZE, 0); RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE); memset(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, 0xcc, PAGE_SIZE); PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack + VMM_STACK_SIZE), PAGE_SIZE, 0); RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE); #endif /* * Set page attributes to r/w for stack pages. */ int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbGCStack, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW); AssertRC(rc); if (VBOX_SUCCESS(rc)) { /* * Create the EMT yield timer. */ rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer); if (VBOX_SUCCESS(rc)) rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies); } #ifdef VBOX_WITH_NMI /* * Map the host APIC into GC - This may be host os specific! */ if (VBOX_SUCCESS(rc)) rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE, X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D); #endif return rc; } /** * Initializes the R0 VMM. * * @returns VBox status code. * @param pVM The VM to operate on. */ VMMR3DECL(int) VMMR3InitR0(PVM pVM) { int rc; /* * Initialize the ring-0 logger if we haven't done so yet. */ if ( pVM->vmm.s.pR0Logger && !pVM->vmm.s.pR0Logger->fCreated) { rc = VMMR3UpdateLoggers(pVM); if (VBOX_FAILURE(rc)) return rc; } /* * Call Ring-0 entry with init code. */ for (;;) { #ifdef NO_SUPCALLR0VMM //rc = VERR_GENERAL_FAILURE; rc = VINF_SUCCESS; #else rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, (void *)VBOX_VERSION); #endif if ( pVM->vmm.s.pR0Logger && pVM->vmm.s.pR0Logger->Logger.offScratch > 0) RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL); if (rc != VINF_VMM_CALL_HOST) break; rc = vmmR3ServiceCallHostRequest(pVM); if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) break; break; // remove this when we do setjmp for all ring-0 stuff. } if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) { LogRel(("R0 init failed, rc=%Vra\n", rc)); if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) rc = VERR_INTERNAL_ERROR; } return rc; } /** * Initializes the GC VMM. * * @returns VBox status code. * @param pVM The VM to operate on. */ VMMR3DECL(int) VMMR3InitGC(PVM pVM) { /* In VMX mode, there's no need to init GC. */ if (pVM->vmm.s.fSwitcherDisabled) return VINF_SUCCESS; /* * Call VMMGCInit(): * -# resolve the address. * -# setup stackframe and EIP to use the trampoline. * -# do a generic hypervisor call. */ RTGCPTR GCPtrEP; int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP); if (VBOX_SUCCESS(rc)) { CPUMHyperSetCtxCore(pVM, NULL); CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */ CPUMPushHyper(pVM, VBOX_VERSION); /* Param 2: Version argument. */ CPUMPushHyper(pVM, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */ CPUMPushHyper(pVM, pVM->pVMGC); /* Param 0: pVM */ CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* trampoline param: stacksize. */ CPUMPushHyper(pVM, GCPtrEP); /* Call EIP. */ CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline); for (;;) { #ifdef NO_SUPCALLR0VMM //rc = VERR_GENERAL_FAILURE; rc = VINF_SUCCESS; #else rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_CALL_HYPERVISOR, NULL); #endif #ifdef LOG_ENABLED PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC; if ( pLogger && pLogger->offScratch > 0) RTLogFlushGC(NULL, pLogger); #endif #ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC; if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0)) RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger); #endif if (rc != VINF_VMM_CALL_HOST) break; rc = vmmR3ServiceCallHostRequest(pVM); if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) break; } if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) { VMMR3FatalDump(pVM, rc); if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) rc = VERR_INTERNAL_ERROR; } AssertRC(rc); } return rc; } /** * Terminate the VMM bits. * * @returns VINF_SUCCESS. * @param pVM The VM handle. */ VMMR3DECL(int) VMMR3Term(PVM pVM) { /** @todo must call ring-0 so the logger thread instance can be properly removed. */ #ifdef VBOX_STRICT_VMM_STACK /* * Make the two stack guard pages present again. */ RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); #endif return VINF_SUCCESS; } /** * Applies relocations to data and code managed by this * component. This function will be called at init and * whenever the VMM need to relocate it self inside the GC. * * The VMM will need to apply relocations to the core code. * * @param pVM The VM handle. * @param offDelta The relocation delta. */ VMMR3DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta) { LogFlow(("VMMR3Relocate: offDelta=%VGv\n", offDelta)); /* * Recalc the GC address. */ pVM->vmm.s.pvGCCoreCode = MMHyperHC2GC(pVM, pVM->vmm.s.pvHCCoreCodeR3); /* * The stack. */ CPUMSetHyperESP(pVM, CPUMGetHyperESP(pVM) + offDelta); pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack); pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE; /* * All the switchers. */ for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++) { PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher]; if (pSwitcher && pSwitcher->pfnRelocate) { unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher]; pSwitcher->pfnRelocate(pVM, pSwitcher, (uint8_t *)pVM->vmm.s.pvHCCoreCodeR0 + off, (uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + off, pVM->vmm.s.pvGCCoreCode + off, pVM->vmm.s.HCPhysCoreCode + off); } } /* * Recalc the GC address for the current switcher. */ PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher]; RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher]; pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost; pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline; pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm; pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx; pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx; /* * Get other GC entry points. */ int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMGCResumeGuest); AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Vra\n", rc)); rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMGCResumeGuestV86); AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Vra\n", rc)); /* * Update the logger. */ VMMR3UpdateLoggers(pVM); } /** * Updates the settings for the GC and R0 loggers. * * @returns VBox status code. * @param pVM The VM handle. */ VMMR3DECL(int) VMMR3UpdateLoggers(PVM pVM) { /* * Simply clone the logger instance (for GC). */ int rc = VINF_SUCCESS; RTGCPTR GCPtrLoggerFlush = 0; if (pVM->vmm.s.pLoggerHC #ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG || pVM->vmm.s.pRelLoggerHC #endif ) { rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &GCPtrLoggerFlush); AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Vra\n", rc)); } if (pVM->vmm.s.pLoggerHC) { RTGCPTR GCPtrLoggerWrapper = 0; rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &GCPtrLoggerWrapper); AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Vra\n", rc)); pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC); rc = RTLogCloneGC(NULL /* default */, pVM->vmm.s.pLoggerHC, pVM->vmm.s.cbLoggerGC, GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED); AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc)); } #ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG if (pVM->vmm.s.pRelLoggerHC) { RTGCPTR GCPtrLoggerWrapper = 0; rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &GCPtrLoggerWrapper); AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Vra\n", rc)); pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC); rc = RTLogCloneGC(RTLogRelDefaultInstance(), pVM->vmm.s.pRelLoggerHC, pVM->vmm.s.cbRelLoggerGC, GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED); AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc)); } #endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */ /* * For the ring-0 EMT logger, we use a per-thread logger * instance in ring-0. Only initialize it once. */ PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger; if (pR0Logger) { if (!pR0Logger->fCreated) { RTHCPTR pfnLoggerWrapper = NULL; rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper); AssertReleaseMsgRCReturn(rc, ("VMMLoggerWrapper not found! rc=%Vra\n", rc), rc); RTHCPTR pfnLoggerFlush = NULL; rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush); AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Vra\n", rc), rc); rc = RTLogCreateForR0(&pR0Logger->Logger, pR0Logger->cbLogger, *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush, RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY); AssertReleaseMsgRCReturn(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc), rc); pR0Logger->fCreated = true; } rc = RTLogCopyGroupsAndFlags(&pR0Logger->Logger, NULL /* default */, RTLOGFLAGS_BUFFERED, 0); AssertRC(rc); } return rc; } /** * Generic switch code relocator. * * @param pVM The VM handle. * @param pSwitcher The switcher definition. * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping. * @param pu8CodeR0 Pointer to the core code block for the switcher, ring-0 mapping. * @param GCPtrCode The guest context address corresponding to pu8Code. * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code. * @param SelCS The hypervisor CS selector. * @param SelDS The hypervisor DS selector. * @param SelTSS The hypervisor TSS selector. * @param GCPtrGDT The GC address of the hypervisor GDT. * @param SelCS64 The 64-bit mode hypervisor CS selector. */ static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode, RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64) { union { const uint8_t *pu8; const uint16_t *pu16; const uint32_t *pu32; const uint64_t *pu64; const void *pv; uintptr_t u; } u; u.pv = pSwitcher->pvFixups; /* * Process fixups. */ uint8_t u8; while ((u8 = *u.pu8++) != FIX_THE_END) { /* * Get the source (where to write the fixup). */ uint32_t offSrc = *u.pu32++; Assert(offSrc < pSwitcher->cbCode); union { uint8_t *pu8; uint16_t *pu16; uint32_t *pu32; uint64_t *pu64; uintptr_t u; } uSrc; uSrc.pu8 = pu8CodeR3 + offSrc; /* The fixup target and method depends on the type. */ switch (u8) { /* * 32-bit relative, source in HC and target in GC. */ case FIX_HC_2_GC_NEAR_REL: { Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1); uint32_t offTrg = *u.pu32++; Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode); *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4)); break; } /* * 32-bit relative, source in HC and target in ID. */ case FIX_HC_2_ID_NEAR_REL: { Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1); uint32_t offTrg = *u.pu32++; Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1); *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (uSrc.u + 4)); break; } /* * 32-bit relative, source in GC and target in HC. */ case FIX_GC_2_HC_NEAR_REL: { Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode); uint32_t offTrg = *u.pu32++; Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1); *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (GCPtrCode + offSrc + 4)); break; } /* * 32-bit relative, source in GC and target in ID. */ case FIX_GC_2_ID_NEAR_REL: { Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode); uint32_t offTrg = *u.pu32++; Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1); *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4)); break; } /* * 32-bit relative, source in ID and target in HC. */ case FIX_ID_2_HC_NEAR_REL: { Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1); uint32_t offTrg = *u.pu32++; Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1); *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (u32IDCode + offSrc + 4)); break; } /* * 32-bit relative, source in ID and target in HC. */ case FIX_ID_2_GC_NEAR_REL: { Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1); uint32_t offTrg = *u.pu32++; Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode); *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4)); break; } /* * 16:32 far jump, target in GC. */ case FIX_GC_FAR32: { uint32_t offTrg = *u.pu32++; Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode); *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg); *uSrc.pu16++ = SelCS; break; } /* * Make 32-bit GC pointer given CPUM offset. */ case FIX_GC_CPUM_OFF: { uint32_t offCPUM = *u.pu32++; Assert(offCPUM < sizeof(pVM->cpum)); *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM); break; } /* * Make 32-bit GC pointer given VM offset. */ case FIX_GC_VM_OFF: { uint32_t offVM = *u.pu32++; Assert(offVM < sizeof(VM)); *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM); break; } /* * Make 32-bit HC pointer given CPUM offset. */ case FIX_HC_CPUM_OFF: { uint32_t offCPUM = *u.pu32++; Assert(offCPUM < sizeof(pVM->cpum)); *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM; break; } /* * Make 32-bit R0 pointer given VM offset. */ case FIX_HC_VM_OFF: { uint32_t offVM = *u.pu32++; Assert(offVM < sizeof(VM)); *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM; break; } /* * Store the 32-Bit CR3 (32-bit) for the intermediate memory context. */ case FIX_INTER_32BIT_CR3: { *uSrc.pu32 = PGMGetInter32BitCR3(pVM); break; } /* * Store the PAE CR3 (32-bit) for the intermediate memory context. */ case FIX_INTER_PAE_CR3: { *uSrc.pu32 = PGMGetInterPaeCR3(pVM); break; } /* * Store the AMD64 CR3 (32-bit) for the intermediate memory context. */ case FIX_INTER_AMD64_CR3: { *uSrc.pu32 = PGMGetInterAmd64CR3(pVM); break; } /* * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context. */ case FIX_HYPER_32BIT_CR3: { *uSrc.pu32 = PGMGetHyper32BitCR3(pVM); break; } /* * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context. */ case FIX_HYPER_PAE_CR3: { *uSrc.pu32 = PGMGetHyperPaeCR3(pVM); break; } /* * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context. */ case FIX_HYPER_AMD64_CR3: { *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM); break; } /* * Store Hypervisor CS (16-bit). */ case FIX_HYPER_CS: { *uSrc.pu16 = SelCS; break; } /* * Store Hypervisor DS (16-bit). */ case FIX_HYPER_DS: { *uSrc.pu16 = SelDS; break; } /* * Store Hypervisor TSS (16-bit). */ case FIX_HYPER_TSS: { *uSrc.pu16 = SelTSS; break; } /* * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT). */ case FIX_GC_TSS_GDTE_DW2: { RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4; *uSrc.pu32 = (uint32_t)GCPtr; break; } ///@todo case FIX_CR4_MASK: ///@todo case FIX_CR4_OSFSXR: /* * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu. */ case FIX_NO_FXSAVE_JMP: { uint32_t offTrg = *u.pu32++; Assert(offTrg < pSwitcher->cbCode); if (!CPUMSupportsFXSR(pVM)) { *uSrc.pu8++ = 0xe9; /* jmp rel32 */ *uSrc.pu32++ = offTrg - (offSrc + 5); } else { *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc); *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1); } break; } /* * Insert relative jump to specified target it SYSENTER isn't used by the host. */ case FIX_NO_SYSENTER_JMP: { uint32_t offTrg = *u.pu32++; Assert(offTrg < pSwitcher->cbCode); if (!CPUMIsHostUsingSysEnter(pVM)) { *uSrc.pu8++ = 0xe9; /* jmp rel32 */ *uSrc.pu32++ = offTrg - (offSrc + 5); } else { *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc); *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1); } break; } /* * Insert relative jump to specified target it SYSENTER isn't used by the host. */ case FIX_NO_SYSCALL_JMP: { uint32_t offTrg = *u.pu32++; Assert(offTrg < pSwitcher->cbCode); if (!CPUMIsHostUsingSysEnter(pVM)) { *uSrc.pu8++ = 0xe9; /* jmp rel32 */ *uSrc.pu32++ = offTrg - (offSrc + 5); } else { *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc); *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1); } break; } #ifdef __AMD64__ /* * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset). */ case FIX_HC_64BIT: { uint32_t offTrg = *u.pu32++; Assert(offSrc < pSwitcher->cbCode); Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1); *uSrc.pu64 = (uintptr_t)pu8CodeR0 + offTrg; break; } /* * 64-bit HC pointer to the CPUM instance data (no argument). */ case FIX_HC_64BIT_CPUM: { Assert(offSrc < pSwitcher->cbCode); *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum); break; } #endif /* * 32-bit ID pointer to (ID) target within the code (32-bit offset). */ case FIX_ID_32BIT: { uint32_t offTrg = *u.pu32++; Assert(offSrc < pSwitcher->cbCode); Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1); *uSrc.pu32 = u32IDCode + offTrg; break; } /* * 64-bit ID pointer to (ID) target within the code (32-bit offset). */ case FIX_ID_64BIT: { uint32_t offTrg = *u.pu32++; Assert(offSrc < pSwitcher->cbCode); Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1); *uSrc.pu64 = u32IDCode + offTrg; break; } /* * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset). */ case FIX_ID_FAR32_TO_64BIT_MODE: { uint32_t offTrg = *u.pu32++; Assert(offSrc < pSwitcher->cbCode); Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1); *uSrc.pu32++ = u32IDCode + offTrg; *uSrc.pu16 = SelCS64; AssertRelease(SelCS64); break; } #ifdef VBOX_WITH_NMI /* * 32-bit address to the APIC base. */ case FIX_GC_APIC_BASE_32BIT: { *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase; break; } #endif default: AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc)); break; } } #ifdef LOG_ENABLED /* * If Log2 is enabled disassemble the switcher code. * * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts. */ if (LogIs2Enabled()) { RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n" " pu8CodeR0 = %p\n" " pu8CodeR3 = %p\n" " GCPtrCode = %VGv\n" " u32IDCode = %08x\n" " pVMGC = %VGv\n" " pCPUMGC = %VGv\n" " pVMHC = %p\n" " pCPUMHC = %p\n" " GCPtrGDT = %VGv\n" " InterCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n" " HyperCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n" " SelCS = %04x\n" " SelDS = %04x\n" " SelCS64 = %04x\n" " SelTSS = %04x\n", pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM), VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum, GCPtrGDT, PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM), PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM), SelCS, SelDS, SelCS64, SelTSS); uint32_t offCode = 0; while (offCode < pSwitcher->cbCode) { /* * Figure out where this is. */ const char *pszDesc = NULL; RTUINTPTR uBase; uint32_t cbCode; if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0) { pszDesc = "HCCode0"; uBase = (RTUINTPTR)pu8CodeR0; offCode = pSwitcher->offHCCode0; cbCode = pSwitcher->cbHCCode0; } else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1) { pszDesc = "HCCode1"; uBase = (RTUINTPTR)pu8CodeR0; offCode = pSwitcher->offHCCode1; cbCode = pSwitcher->cbHCCode1; } else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode) { pszDesc = "GCCode"; uBase = GCPtrCode; offCode = pSwitcher->offGCCode; cbCode = pSwitcher->cbGCCode; } else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0) { pszDesc = "IDCode0"; uBase = u32IDCode; offCode = pSwitcher->offIDCode0; cbCode = pSwitcher->cbIDCode0; } else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1) { pszDesc = "IDCode1"; uBase = u32IDCode; offCode = pSwitcher->offIDCode1; cbCode = pSwitcher->cbIDCode1; } else { RTLogPrintf(" %04x: %02x '%c' (nowhere)\n", offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' '); offCode++; continue; } /* * Disassemble it. */ RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode); DISCPUSTATE Cpu = {0}; Cpu.mode = CPUMODE_32BIT; while (cbCode > 0) { /* try label it */ if (pSwitcher->offR0HostToGuest == offCode) RTLogPrintf(" *R0HostToGuest:\n"); if (pSwitcher->offGCGuestToHost == offCode) RTLogPrintf(" *GCGuestToHost:\n"); if (pSwitcher->offGCCallTrampoline == offCode) RTLogPrintf(" *GCCallTrampoline:\n"); if (pSwitcher->offGCGuestToHostAsm == offCode) RTLogPrintf(" *GCGuestToHostAsm:\n"); if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode) RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n"); if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode) RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n"); /* disas */ uint32_t cbInstr = 0; char szDisas[256]; if (DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas)) RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'. else { RTLogPrintf(" %04x: %02x '%c'\n", offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' '); cbInstr = 1; } offCode += cbInstr; cbCode -= RT_MIN(cbInstr, cbCode); } } } #endif } /** * Relocator for the 32-Bit to 32-Bit world switcher. */ DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode) { vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0); } /** * Relocator for the 32-Bit to PAE world switcher. */ DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode) { vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0); } /** * Relocator for the PAE to 32-Bit world switcher. */ DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode) { vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0); } /** * Relocator for the PAE to PAE world switcher. */ DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode) { vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0); } /** * Relocator for the AMD64 to PAE world switcher. */ DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode) { vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM)); } /** * Gets the pointer to g_szRTAssertMsg1 in GC. * @returns Pointer to VMMGC::g_szRTAssertMsg1. * Returns NULL if not present. * @param pVM The VM handle. */ VMMR3DECL(const char *) VMMR3GetGCAssertMsg1(PVM pVM) { RTGCPTR GCPtr; int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg1", &GCPtr); if (VBOX_SUCCESS(rc)) return (const char *)MMHyperGC2HC(pVM, GCPtr); return NULL; } /** * Gets the pointer to g_szRTAssertMsg2 in GC. * @returns Pointer to VMMGC::g_szRTAssertMsg2. * Returns NULL if not present. * @param pVM The VM handle. */ VMMR3DECL(const char *) VMMR3GetGCAssertMsg2(PVM pVM) { RTGCPTR GCPtr; int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg2", &GCPtr); if (VBOX_SUCCESS(rc)) return (const char *)MMHyperGC2HC(pVM, GCPtr); return NULL; } /** * Execute state save operation. * * @returns VBox status code. * @param pVM VM Handle. * @param pSSM SSM operation handle. */ static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM) { LogFlow(("vmmR3Save:\n")); /* * The hypervisor stack. */ SSMR3PutGCPtr(pSSM, pVM->vmm.s.pbGCStackBottom); RTGCPTR GCPtrESP = CPUMGetHyperESP(pVM); Assert(pVM->vmm.s.pbGCStackBottom - GCPtrESP <= VMM_STACK_SIZE); SSMR3PutGCPtr(pSSM, GCPtrESP); SSMR3PutMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE); return SSMR3PutU32(pSSM, ~0); /* terminator */ } /** * Execute state load operation. * * @returns VBox status code. * @param pVM VM Handle. * @param pSSM SSM operation handle. * @param u32Version Data layout version. */ static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version) { LogFlow(("vmmR3Load:\n")); /* * Validate version. */ if (u32Version != VMM_SAVED_STATE_VERSION) { Log(("vmmR3Load: Invalid version u32Version=%d!\n", u32Version)); return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION; } /* * Check that the stack is in the same place, or that it's fearly empty. */ RTGCPTR GCPtrStackBottom; SSMR3GetGCPtr(pSSM, &GCPtrStackBottom); RTGCPTR GCPtrESP; int rc = SSMR3GetGCPtr(pSSM, &GCPtrESP); if (VBOX_FAILURE(rc)) return rc; if ( GCPtrStackBottom == pVM->vmm.s.pbGCStackBottom || (GCPtrStackBottom - GCPtrESP < 32)) /** @todo This will break if we start preemting the hypervisor. */ { /* * We *must* set the ESP because the CPUM load + PGM load relocations will render * the ESP in CPUM fatally invalid. */ CPUMSetHyperESP(pVM, GCPtrESP); /* restore the stack. */ SSMR3GetMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE); /* terminator */ uint32_t u32; rc = SSMR3GetU32(pSSM, &u32); if (VBOX_FAILURE(rc)) return rc; if (u32 != ~0U) { AssertMsgFailed(("u32=%#x\n", u32)); return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; } return VINF_SUCCESS; } LogRel(("The stack is not in the same place and it's not empty! GCPtrStackBottom=%VGv pbGCStackBottom=%VGv ESP=%VGv\n", GCPtrStackBottom, pVM->vmm.s.pbGCStackBottom, GCPtrESP)); AssertFailed(); return VERR_SSM_LOAD_CONFIG_MISMATCH; } /** * Selects the switcher to be used for switching to GC. * * @returns VBox status code. * @param pVM VM handle. * @param enmSwitcher The new switcher. * @remark This function may be called before the VMM is initialized. */ VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher) { /* * Validate input. */ if ( enmSwitcher < VMMSWITCHER_INVALID || enmSwitcher >= VMMSWITCHER_MAX) { AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher)); return VERR_INVALID_PARAMETER; } /* * Select the new switcher. */ PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher]; if (pSwitcher) { Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc)); pVM->vmm.s.enmSwitcher = enmSwitcher; RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvHCCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvHCCoreCodeR0 type */ pVM->vmm.s.pfnR0HostToGuest = pbCodeR0 + pSwitcher->offR0HostToGuest; RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[enmSwitcher]; pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost; pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline; pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm; pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx; pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx; return VINF_SUCCESS; } return VERR_NOT_IMPLEMENTED; } /** * Disable the switcher logic permanently. * * @returns VBox status code. * @param pVM VM handle. */ VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM) { /** @todo r=bird: I would suggest that we create a dummy switcher which just does something like: * @code * mov eax, VERR_INTERNAL_ERROR * ret * @endcode * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed. */ pVM->vmm.s.fSwitcherDisabled = true; return VINF_SUCCESS; } /** * Resolve a builtin GC symbol. * Called by PDM when loading or relocating GC modules. * * @returns VBox status * @param pVM VM Handle. * @param pszSymbol Symbol to resolv * @param pGCPtrValue Where to store the symbol value. * @remark This has to work before VMMR3Relocate() is called. */ VMMR3DECL(int) VMMR3GetImportGC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue) { if (!strcmp(pszSymbol, "g_Logger")) { if (pVM->vmm.s.pLoggerHC) pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC); *pGCPtrValue = pVM->vmm.s.pLoggerGC; } else if (!strcmp(pszSymbol, "g_RelLogger")) { #ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG if (pVM->vmm.s.pRelLoggerHC) pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC); *pGCPtrValue = pVM->vmm.s.pRelLoggerGC; #else *pGCPtrValue = NIL_RTGCPTR; #endif } else return VERR_SYMBOL_NOT_FOUND; return VINF_SUCCESS; } /** * Suspends the the CPU yielder. * * @param pVM The VM handle. */ VMMR3DECL(void) VMMR3YieldSuspend(PVM pVM) { if (!pVM->vmm.s.cYieldResumeMillies) { uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer); uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer); if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0) pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies; else pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now); TMTimerStop(pVM->vmm.s.pYieldTimer); } } /** * Stops the the CPU yielder. * * @param pVM The VM handle. */ VMMR3DECL(void) VMMR3YieldStop(PVM pVM) { if (!pVM->vmm.s.cYieldResumeMillies) TMTimerStop(pVM->vmm.s.pYieldTimer); pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies; } /** * Resumes the CPU yielder when it has been a suspended or stopped. * * @param pVM The VM handle. */ VMMR3DECL(void) VMMR3YieldResume(PVM pVM) { if (pVM->vmm.s.cYieldResumeMillies) { TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies); pVM->vmm.s.cYieldResumeMillies = 0; } } /** * Internal timer callback function. * * @param pVM The VM. * @param pTimer The timer handle. * @param pvUser User argument specified upon timer creation. */ static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser) { #ifdef LOG_ENABLED uint64_t u64Elapsed = RTTimeNanoTS(); #endif RTThreadYield(); TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies); Log(("vmmR3YieldEMT: %RI64 ns\n", RTTimeNanoTS() - u64Elapsed)); } /** * Acquire global VM lock. * * @returns VBox status code * @param pVM The VM to operate on. */ VMMR3DECL(int) VMMR3Lock(PVM pVM) { return RTCritSectEnter(&pVM->vmm.s.CritSectVMLock); } /** * Release global VM lock. * * @returns VBox status code * @param pVM The VM to operate on. */ VMMR3DECL(int) VMMR3Unlock(PVM pVM) { return RTCritSectLeave(&pVM->vmm.s.CritSectVMLock); } /** * Return global VM lock owner. * * @returns Thread id of owner. * @returns NIL_RTTHREAD if no owner. * @param pVM The VM to operate on. */ VMMR3DECL(RTNATIVETHREAD) VMMR3LockGetOwner(PVM pVM) { return RTCritSectGetOwner(&pVM->vmm.s.CritSectVMLock); } /** * Checks if the current thread is the owner of the global VM lock. * * @returns true if owner. * @returns false if not owner. * @param pVM The VM to operate on. */ VMMR3DECL(bool) VMMR3LockIsOwner(PVM pVM) { return RTCritSectIsOwner(&pVM->vmm.s.CritSectVMLock); } /** * Executes guest code. * * @param pVM VM handle. */ VMMR3DECL(int) VMMR3RawRunGC(PVM pVM) { Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM))); /* * Set the EIP and ESP. */ CPUMSetHyperEIP(pVM, CPUMGetGuestEFlags(pVM) & X86_EFL_VM ? pVM->vmm.s.pfnCPUMGCResumeGuestV86 : pVM->vmm.s.pfnCPUMGCResumeGuest); CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* * We hide log flushes (outer) and hypervisor interrupts (inner). */ for (;;) { int rc; do { #ifdef NO_SUPCALLR0VMM rc = VERR_GENERAL_FAILURE; #else rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); #endif } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); /* * Flush the logs. */ #ifdef LOG_ENABLED PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC; if ( pLogger && pLogger->offScratch > 0) RTLogFlushGC(NULL, pLogger); #endif #ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC; if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0)) RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger); #endif if (rc != VINF_VMM_CALL_HOST) { Log2(("VMMR3RawRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM))); return rc; } rc = vmmR3ServiceCallHostRequest(pVM); if (VBOX_FAILURE(rc)) return rc; /* Resume GC */ } } /** * Executes guest code (Intel VMX and AMD SVM). * * @param pVM VM handle. */ VMMR3DECL(int) VMMR3HwAccRunGC(PVM pVM) { Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM))); for (;;) { int rc; do { #ifdef NO_SUPCALLR0VMM rc = VERR_GENERAL_FAILURE; #else rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL); #endif } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); #ifdef LOG_ENABLED /* * Flush the log */ PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger; if ( pR0Logger && pR0Logger->Logger.offScratch > 0) RTLogFlushToLogger(&pR0Logger->Logger, NULL); #endif /* !LOG_ENABLED */ if (rc != VINF_VMM_CALL_HOST) { Log2(("VMMR3HwAccRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM))); return rc; } rc = vmmR3ServiceCallHostRequest(pVM); if (VBOX_FAILURE(rc)) return rc; /* Resume R0 */ } } /** * Calls GC a function. * * @param pVM The VM handle. * @param GCPtrEntry The GC function address. * @param cArgs The number of arguments in the .... * @param ... Arguments to the function. */ VMMR3DECL(int) VMMR3CallGC(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, ...) { va_list args; va_start(args, cArgs); int rc = VMMR3CallGCV(pVM, GCPtrEntry, cArgs, args); va_end(args); return rc; } /** * Calls GC a function. * * @param pVM The VM handle. * @param GCPtrEntry The GC function address. * @param cArgs The number of arguments in the .... * @param args Arguments to the function. */ VMMR3DECL(int) VMMR3CallGCV(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, va_list args) { Log2(("VMMR3CallGCV: GCPtrEntry=%VGv cArgs=%d\n", GCPtrEntry, cArgs)); /* * Setup the call frame using the trampoline. */ CPUMHyperSetCtxCore(pVM, NULL); memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */ CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom - cArgs * sizeof(RTGCUINTPTR)); PRTGCUINTPTR pFrame = (PRTGCUINTPTR)(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE) - cArgs; int i = cArgs; while (i-- > 0) *pFrame++ = va_arg(args, RTGCUINTPTR); CPUMPushHyper(pVM, cArgs * sizeof(RTGCUINTPTR)); /* stack frame size */ CPUMPushHyper(pVM, GCPtrEntry); /* what to call */ CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline); /* * We hide log flushes (outer) and hypervisor interrupts (inner). */ for (;;) { int rc; do { #ifdef NO_SUPCALLR0VMM rc = VERR_GENERAL_FAILURE; #else rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); #endif } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); /* * Flush the logs. */ #ifdef LOG_ENABLED PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC; if ( pLogger && pLogger->offScratch > 0) RTLogFlushGC(NULL, pLogger); #endif #ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC; if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0)) RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger); #endif if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC) VMMR3FatalDump(pVM, rc); if (rc != VINF_VMM_CALL_HOST) { Log2(("VMMR3CallGCV: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM))); return rc; } rc = vmmR3ServiceCallHostRequest(pVM); if (VBOX_FAILURE(rc)) return rc; } } /** * Resumes executing hypervisor code when interrupted * by a queue flush or a debug event. * * @returns VBox status code. * @param pVM VM handle. */ VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM) { Log(("VMMR3ResumeHyper: eip=%VGv esp=%VGv\n", CPUMGetHyperEIP(pVM), CPUMGetHyperESP(pVM))); /* * We hide log flushes (outer) and hypervisor interrupts (inner). */ for (;;) { int rc; do { #ifdef NO_SUPCALLR0VMM rc = VERR_GENERAL_FAILURE; #else rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); #endif } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); /* * Flush the loggers, */ #ifdef LOG_ENABLED PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC; if ( pLogger && pLogger->offScratch > 0) RTLogFlushGC(NULL, pLogger); #endif #ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC; if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0)) RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger); #endif if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC) VMMR3FatalDump(pVM, rc); if (rc != VINF_VMM_CALL_HOST) { Log(("VMMR3ResumeHyper: returns %Vrc\n", rc)); return rc; } rc = vmmR3ServiceCallHostRequest(pVM); if (VBOX_FAILURE(rc)) return rc; } } /** * Service a call to the ring-3 host code. * * @returns VBox status code. * @param pVM VM handle. * @remark Careful with critsects. */ static int vmmR3ServiceCallHostRequest(PVM pVM) { switch (pVM->vmm.s.enmCallHostOperation) { /* * Acquire the PDM lock. */ case VMMCALLHOST_PDM_LOCK: { pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM); break; } /* * Flush a PDM queue. */ case VMMCALLHOST_PDM_QUEUE_FLUSH: { PDMR3QueueFlushWorker(pVM, NULL); pVM->vmm.s.rcCallHost = VINF_SUCCESS; break; } /* * Grow the PGM pool. */ case VMMCALLHOST_PGM_POOL_GROW: { pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM); break; } /* * Acquire the PGM lock. */ case VMMCALLHOST_PGM_LOCK: { pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM); break; } /* * Flush REM handler notifications. */ case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS: { REMR3ReplayHandlerNotifications(pVM); break; } case VMMCALLHOST_PGM_RAM_GROW_RANGE: { pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, pVM->vmm.s.u64CallHostArg); break; } /* * This is a noop. We just take this route to avoid unnecessary * tests in the loops. */ case VMMCALLHOST_VMM_LOGGER_FLUSH: break; /* * Set the VM error message. */ case VMMCALLHOST_VM_SET_ERROR: VMR3SetErrorWorker(pVM); break; /* * Set the VM runtime error message. */ case VMMCALLHOST_VM_SET_RUNTIME_ERROR: VMR3SetRuntimeErrorWorker(pVM); break; default: AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation)); return VERR_INTERNAL_ERROR; } pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID; return VINF_SUCCESS; } /** * Structure to pass to DBGFR3Info() and for doing all other * output during fatal dump. */ typedef struct VMMR3FATALDUMPINFOHLP { /** The helper core. */ DBGFINFOHLP Core; /** The release logger instance. */ PRTLOGGER pRelLogger; /** The saved release logger flags. */ RTUINT fRelLoggerFlags; /** The logger instance. */ PRTLOGGER pLogger; /** The saved logger flags. */ RTUINT fLoggerFlags; /** The saved logger destination flags. */ RTUINT fLoggerDestFlags; /** Whether to output to stderr or not. */ bool fStdErr; } VMMR3FATALDUMPINFOHLP, *PVMMR3FATALDUMPINFOHLP; typedef const VMMR3FATALDUMPINFOHLP *PCVMMR3FATALDUMPINFOHLP; /** * Print formatted string. * * @param pHlp Pointer to this structure. * @param pszFormat The format string. * @param ... Arguments. */ static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...) { va_list args; va_start(args, pszFormat); pHlp->pfnPrintfV(pHlp, pszFormat, args); va_end(args); } /** * Print formatted string. * * @param pHlp Pointer to this structure. * @param pszFormat The format string. * @param args Argument list. */ static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args) { PCVMMR3FATALDUMPINFOHLP pMyHlp = (PCVMMR3FATALDUMPINFOHLP)pHlp; if (pMyHlp->pRelLogger) { va_list args2; va_copy(args2, args); RTLogLoggerV(pMyHlp->pRelLogger, pszFormat, args2); va_end(args2); } if (pMyHlp->pLogger) { va_list args2; va_copy(args2, args); RTLogLoggerV(pMyHlp->pLogger, pszFormat, args); va_end(args2); } if (pMyHlp->fStdErr) { va_list args2; va_copy(args2, args); RTStrmPrintfV(g_pStdErr, pszFormat, args); va_end(args2); } } /** * Initializes the fatal dump output helper. * * @param pHlp The structure to initialize. */ static void vmmR3FatalDumpInfoHlpInit(PVMMR3FATALDUMPINFOHLP pHlp) { memset(pHlp, 0, sizeof(*pHlp)); pHlp->Core.pfnPrintf = vmmR3FatalDumpInfoHlp_pfnPrintf; pHlp->Core.pfnPrintfV = vmmR3FatalDumpInfoHlp_pfnPrintfV; /* * The loggers. */ pHlp->pRelLogger = RTLogRelDefaultInstance(); #ifndef LOG_ENABLED if (!pHlp->pRelLogger) #endif pHlp->pLogger = RTLogDefaultInstance(); if (pHlp->pRelLogger) { pHlp->fRelLoggerFlags = pHlp->pRelLogger->fFlags; pHlp->pRelLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED); } if (pHlp->pLogger) { pHlp->fLoggerFlags = pHlp->pLogger->fFlags; pHlp->fLoggerDestFlags = pHlp->pLogger->fDestFlags; pHlp->pLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED); pHlp->pLogger->fDestFlags |= RTLOGDEST_DEBUGGER; } /* * Check if we need write to stderr. */ pHlp->fStdErr = (!pHlp->pRelLogger || !(pHlp->pRelLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR))) && (!pHlp->pLogger || !(pHlp->pLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR))); } /** * Deletes the fatal dump output helper. * * @param pHlp The structure to delete. */ static void vmmR3FatalDumpInfoHlpDelete(PVMMR3FATALDUMPINFOHLP pHlp) { if (pHlp->pRelLogger) { RTLogFlush(pHlp->pRelLogger); pHlp->pRelLogger->fFlags = pHlp->fRelLoggerFlags; } if (pHlp->pLogger) { RTLogFlush(pHlp->pLogger); pHlp->pLogger->fFlags = pHlp->fLoggerFlags; pHlp->pLogger->fDestFlags = pHlp->fLoggerDestFlags; } } /** * Dumps the VM state on a fatal error. * * @param pVM VM Handle. * @param rcErr VBox status code. */ VMMR3DECL(void) VMMR3FatalDump(PVM pVM, int rcErr) { /* * Create our output helper and sync it with the log settings. * This helper will be used for all the output. */ VMMR3FATALDUMPINFOHLP Hlp; PCDBGFINFOHLP pHlp = &Hlp.Core; vmmR3FatalDumpInfoHlpInit(&Hlp); /* * Header. */ pHlp->pfnPrintf(pHlp, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" "!!\n" "!! Guru Meditation %d (%Vrc)\n" "!!\n", rcErr, rcErr); /* * Continue according to context. */ bool fDoneHyper = false; switch (rcErr) { /* * Hyper visor errors. */ case VINF_EM_DBG_HYPER_ASSERTION: pHlp->pfnPrintf(pHlp, "%s%s!!\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM)); /* fall thru */ case VERR_TRPM_DONT_PANIC: case VERR_TRPM_PANIC: case VINF_EM_RAW_STALE_SELECTOR: case VINF_EM_RAW_IRET_TRAP: case VINF_EM_DBG_HYPER_BREAKPOINT: case VINF_EM_DBG_HYPER_STEPPED: { /* Trap? */ uint32_t uEIP = CPUMGetHyperEIP(pVM); bool fSoftwareInterrupt = false; uint8_t u8TrapNo = 0xce; RTGCUINT uErrorCode = 0xdeadface; RTGCUINTPTR uCR2 = 0xdeadface; int rc2 = TRPMQueryTrapAll(pVM, &u8TrapNo, &fSoftwareInterrupt, &uErrorCode, &uCR2); if (VBOX_SUCCESS(rc2)) pHlp->pfnPrintf(pHlp, "!! TRAP=%02x ERRCD=%VGv CR2=%VGv EIP=%VGv fSoft=%d\n", u8TrapNo, uErrorCode, uCR2, uEIP, fSoftwareInterrupt); else pHlp->pfnPrintf(pHlp, "!! EIP=%VGv NOTRAP\n", uEIP); /* * Try figure out where eip is. */ /** @todo make query call for core code or move this function to VMM. */ /* core code? */ //if (uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode < pVM->vmm.s.cbCoreCode) // pHlp->pfnPrintf(pHlp, // "!! EIP is in CoreCode, offset %#x\n", // uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode); //else { /* ask PDM */ /** @todo ask DBGFR3Sym later. */ char szModName[64]; RTGCPTR GCPtrMod; char szNearSym1[260]; RTGCPTR GCPtrNearSym1; char szNearSym2[260]; RTGCPTR GCPtrNearSym2; int rc = PDMR3QueryModFromEIP(pVM, uEIP, &szModName[0], sizeof(szModName), &GCPtrMod, &szNearSym1[0], sizeof(szNearSym1), &GCPtrNearSym1, &szNearSym2[0], sizeof(szNearSym2), &GCPtrNearSym2); if (VBOX_SUCCESS(rc)) { pHlp->pfnPrintf(pHlp, "!! EIP in %s (%p) at rva %x near symbols:\n" "!! %VGv rva %VGv off %08x %s\n" "!! %VGv rva %VGv off -%08x %s\n", szModName, GCPtrMod, (unsigned)(uEIP - GCPtrMod), GCPtrNearSym1, GCPtrNearSym1 - GCPtrMod, (unsigned)(uEIP - GCPtrNearSym1), szNearSym1, GCPtrNearSym2, GCPtrNearSym2 - GCPtrMod, (unsigned)(GCPtrNearSym2 - uEIP), szNearSym2); } else pHlp->pfnPrintf(pHlp, "!! EIP is not in any code known to VMM!\n"); } /* Disassemble the instruction. */ char szInstr[256]; rc2 = DBGFR3DisasInstrEx(pVM, 0, 0, DBGF_DISAS_FLAGS_CURRENT_HYPER, &szInstr[0], sizeof(szInstr), NULL); if (VBOX_SUCCESS(rc2)) pHlp->pfnPrintf(pHlp, "!! %s\n", szInstr); /* Dump the hypervisor cpu state. */ pHlp->pfnPrintf(pHlp, "!!\n" "!!\n" "!!\n"); rc2 = DBGFR3Info(pVM, "cpumhyper", "verbose", pHlp); fDoneHyper = true; /* Callstack. */ DBGFSTACKFRAME Frame = {0}; rc2 = DBGFR3StackWalkBeginHyper(pVM, &Frame); if (VBOX_SUCCESS(rc2)) { pHlp->pfnPrintf(pHlp, "!!\n" "!! Call Stack:\n" "!!\n" "EBP Ret EBP Ret CS:EIP Arg0 Arg1 Arg2 Arg3 CS:EIP Symbol [line]\n"); do { pHlp->pfnPrintf(pHlp, "%08RX32 %08RX32 %04RX32:%08RX32 %08RX32 %08RX32 %08RX32 %08RX32", (uint32_t)Frame.AddrFrame.off, (uint32_t)Frame.AddrReturnFrame.off, (uint32_t)Frame.AddrReturnPC.Sel, (uint32_t)Frame.AddrReturnPC.off, Frame.Args.au32[0], Frame.Args.au32[1], Frame.Args.au32[2], Frame.Args.au32[3]); pHlp->pfnPrintf(pHlp, " %RTsel:%08RGv", Frame.AddrPC.Sel, Frame.AddrPC.off); if (Frame.pSymPC) { RTGCINTPTR offDisp = Frame.AddrPC.FlatPtr - Frame.pSymPC->Value; if (offDisp > 0) pHlp->pfnPrintf(pHlp, " %s+%llx", Frame.pSymPC->szName, (int64_t)offDisp); else if (offDisp < 0) pHlp->pfnPrintf(pHlp, " %s-%llx", Frame.pSymPC->szName, -(int64_t)offDisp); else pHlp->pfnPrintf(pHlp, " %s", Frame.pSymPC->szName); } if (Frame.pLinePC) pHlp->pfnPrintf(pHlp, " [%s @ 0i%d]", Frame.pLinePC->szFilename, Frame.pLinePC->uLineNo); pHlp->pfnPrintf(pHlp, "\n"); /* next */ rc2 = DBGFR3StackWalkNext(pVM, &Frame); } while (VBOX_SUCCESS(rc2)); DBGFR3StackWalkEnd(pVM, &Frame); } /* raw stack */ pHlp->pfnPrintf(pHlp, "!!\n" "!! Raw stack (mind the direction).\n" "!!\n" "%.*Vhxd\n", VMM_STACK_SIZE, (char *)pVM->vmm.s.pbHCStack); break; } default: { break; } } /* switch (rcErr) */ /* * Dump useful state information. */ /** @todo convert these dumpers to DBGFR3Info() handlers!!! */ pHlp->pfnPrintf(pHlp, "!!\n" "!! PGM Access Handlers & Stuff:\n" "!!\n"); PGMR3DumpMappings(pVM); /* * Generic info dumper loop. */ static struct { const char *pszInfo; const char *pszArgs; } const aInfo[] = { { "hma", NULL }, { "cpumguest", "verbose" }, { "cpumhyper", "verbose" }, { "cpumhost", "verbose" }, { "mode", "all" }, { "cpuid", "verbose" }, { "gdt", NULL }, { "ldt", NULL }, //{ "tss", NULL }, { "ioport", NULL }, { "mmio", NULL }, { "phys", NULL }, //{ "pgmpd", NULL }, - doesn't always work at init time... { "timers", NULL }, { "activetimers", NULL }, { "handlers", "phys virt stats" }, { "cfgm", NULL }, }; for (unsigned i = 0; i < ELEMENTS(aInfo); i++) { if (fDoneHyper && !strcmp(aInfo[i].pszInfo, "cpumhyper")) continue; pHlp->pfnPrintf(pHlp, "!!\n" "!! {%s, %s}\n" "!!\n", aInfo[i].pszInfo, aInfo[i].pszArgs); DBGFR3Info(pVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp); } /* done */ pHlp->pfnPrintf(pHlp, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"); /* * Delete the output instance (flushing and restoring of flags). */ vmmR3FatalDumpInfoHlpDelete(&Hlp); } /** * Performs a testcase. * * @returns return value from the test. * @param pVM The VM handle. * @param enmTestcase The testcase operation to perform. * @param uVariation The testcase variation id. */ static int vmmR3DoGCTest(PVM pVM, VMMGCOPERATION enmTestcase, unsigned uVariation) { RTGCPTR GCPtrEP; int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP); if (VBOX_FAILURE(rc)) return rc; CPUMHyperSetCtxCore(pVM, NULL); memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE); CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */ CPUMPushHyper(pVM, uVariation); CPUMPushHyper(pVM, enmTestcase); CPUMPushHyper(pVM, pVM->pVMGC); CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */ CPUMPushHyper(pVM, GCPtrEP); /* what to call */ CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline); return SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); } /** * Performs a trap test. * * @returns Return value from the trap test. * @param pVM The VM handle. * @param u8Trap The trap number to test. * @param uVariation The testcase variation. * @param rcExpect The expected result. * @param u32Eax The expected eax value. * @param pszFaultEIP The fault address. Pass NULL if this isn't available or doesn't apply. * @param pszDesc The test description. */ static int vmmR3DoTrapTest(PVM pVM, uint8_t u8Trap, unsigned uVariation, int rcExpect, uint32_t u32Eax, const char *pszFaultEIP, const char *pszDesc) { RTPrintf("VMM: testing 0%x / %d - %s\n", u8Trap, uVariation, pszDesc); RTGCPTR GCPtrEP; int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP); if (VBOX_FAILURE(rc)) return rc; CPUMHyperSetCtxCore(pVM, NULL); memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE); CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */ CPUMPushHyper(pVM, uVariation); CPUMPushHyper(pVM, u8Trap + VMMGC_DO_TESTCASE_TRAP_FIRST); CPUMPushHyper(pVM, pVM->pVMGC); CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */ CPUMPushHyper(pVM, GCPtrEP); /* what to call */ CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline); rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); bool fDump = false; if (rc != rcExpect) { RTPrintf("VMM: FAILURE - rc=%Vrc expected %Vrc\n", rc, rcExpect); if (rc != VERR_NOT_IMPLEMENTED) fDump = true; } else if ( rcExpect != VINF_SUCCESS && u8Trap != 8 /* double fault doesn't dare set TrapNo. */ && u8Trap != 3 /* guest only, we're not in guest. */ && u8Trap != 1 /* guest only, we're not in guest. */ && u8Trap != TRPMGetTrapNo(pVM)) { RTPrintf("VMM: FAILURE - Trap %#x expected %#x\n", TRPMGetTrapNo(pVM), u8Trap); fDump = true; } else if (pszFaultEIP) { RTGCPTR GCPtrFault; int rc2 = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, pszFaultEIP, &GCPtrFault); if (VBOX_FAILURE(rc2)) RTPrintf("VMM: FAILURE - Failed to resolve symbol '%s', %Vrc!\n", pszFaultEIP, rc); else if (GCPtrFault != CPUMGetHyperEIP(pVM)) { RTPrintf("VMM: FAILURE - EIP=%VGv expected %VGv (%s)\n", CPUMGetHyperEIP(pVM), GCPtrFault, pszFaultEIP); fDump = true; } } else if (rcExpect != VINF_SUCCESS) { if (CPUMGetHyperSS(pVM) == SELMGetHyperDS(pVM)) RTPrintf("VMM: FAILURE - ss=%x expected %x\n", CPUMGetHyperSS(pVM), SELMGetHyperDS(pVM)); if (CPUMGetHyperES(pVM) == SELMGetHyperDS(pVM)) RTPrintf("VMM: FAILURE - es=%x expected %x\n", CPUMGetHyperES(pVM), SELMGetHyperDS(pVM)); if (CPUMGetHyperDS(pVM) == SELMGetHyperDS(pVM)) RTPrintf("VMM: FAILURE - ds=%x expected %x\n", CPUMGetHyperDS(pVM), SELMGetHyperDS(pVM)); if (CPUMGetHyperFS(pVM) == SELMGetHyperDS(pVM)) RTPrintf("VMM: FAILURE - fs=%x expected %x\n", CPUMGetHyperFS(pVM), SELMGetHyperDS(pVM)); if (CPUMGetHyperGS(pVM) == SELMGetHyperDS(pVM)) RTPrintf("VMM: FAILURE - gs=%x expected %x\n", CPUMGetHyperGS(pVM), SELMGetHyperDS(pVM)); if (CPUMGetHyperEDI(pVM) == 0x01234567) RTPrintf("VMM: FAILURE - edi=%x expected %x\n", CPUMGetHyperEDI(pVM), 0x01234567); if (CPUMGetHyperESI(pVM) == 0x42000042) RTPrintf("VMM: FAILURE - esi=%x expected %x\n", CPUMGetHyperESI(pVM), 0x42000042); if (CPUMGetHyperEBP(pVM) == 0xffeeddcc) RTPrintf("VMM: FAILURE - ebp=%x expected %x\n", CPUMGetHyperEBP(pVM), 0xffeeddcc); if (CPUMGetHyperEBX(pVM) == 0x89abcdef) RTPrintf("VMM: FAILURE - ebx=%x expected %x\n", CPUMGetHyperEBX(pVM), 0x89abcdef); if (CPUMGetHyperECX(pVM) == 0xffffaaaa) RTPrintf("VMM: FAILURE - ecx=%x expected %x\n", CPUMGetHyperECX(pVM), 0xffffaaaa); if (CPUMGetHyperEDX(pVM) == 0x77778888) RTPrintf("VMM: FAILURE - edx=%x expected %x\n", CPUMGetHyperEDX(pVM), 0x77778888); if (CPUMGetHyperEAX(pVM) == u32Eax) RTPrintf("VMM: FAILURE - eax=%x expected %x\n", CPUMGetHyperEAX(pVM), u32Eax); } if (fDump) VMMR3FatalDump(pVM, rc); return rc; } /* execute the switch. */ VMMR3DECL(int) VMMDoTest(PVM pVM) { #if 1 #ifdef NO_SUPCALLR0VMM RTPrintf("NO_SUPCALLR0VMM\n"); return VINF_SUCCESS; #endif /* * Setup stack for calling VMMGCEntry(). */ RTGCPTR GCPtrEP; int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP); if (VBOX_SUCCESS(rc)) { RTPrintf("VMM: VMMGCEntry=%VGv\n", GCPtrEP); /* * Test various crashes which we must be able to recover from. */ vmmR3DoTrapTest(pVM, 0x3, 0, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3"); vmmR3DoTrapTest(pVM, 0x3, 1, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3 WP"); #if defined(DEBUG_bird) /* guess most people would like to skip these since they write to com1. */ vmmR3DoTrapTest(pVM, 0x8, 0, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG]"); SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */ bool f; rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f); #if !defined(DEBUG_bird) if (VBOX_SUCCESS(rc) && f) #endif { /* see tripple fault warnings in SELM and VMMGC.cpp. */ vmmR3DoTrapTest(pVM, 0x8, 1, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG] WP"); SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */ } #endif vmmR3DoTrapTest(pVM, 0xd, 0, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP"); ///@todo find a better \#GP case, on intel ltr will \#PF (busy update?) and not \#GP. //vmmR3DoTrapTest(pVM, 0xd, 1, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP WP"); vmmR3DoTrapTest(pVM, 0xe, 0, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL)"); vmmR3DoTrapTest(pVM, 0xe, 1, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL) WP"); vmmR3DoTrapTest(pVM, 0xe, 2, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler"); vmmR3DoTrapTest(pVM, 0xe, 4, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler and bad fs"); /* * Set a debug register and perform a context switch. */ rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: Nop test failed, rc=%Vrc not VINF_SUCCESS\n", rc); return rc; } /* a harmless breakpoint */ RTPrintf("VMM: testing hardware bp at 0x10000 (not hit)\n"); DBGFADDRESS Addr; DBGFR3AddrFromFlat(pVM, &Addr, 0x10000); RTUINT iBp0; rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp0); AssertReleaseRC(rc); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: DR0=0x10000 test failed with rc=%Vrc!\n", rc); return rc; } /* a bad one at VMMGCEntry */ RTPrintf("VMM: testing hardware bp at VMMGCEntry (hit)\n"); DBGFR3AddrFromFlat(pVM, &Addr, GCPtrEP); RTUINT iBp1; rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp1); AssertReleaseRC(rc); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_EM_DBG_HYPER_BREAKPOINT) { RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Vrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc); return rc; } /* resume the breakpoint */ RTPrintf("VMM: resuming hyper after breakpoint\n"); CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_RF); rc = VMMR3ResumeHyper(pVM); if (rc != VINF_SUCCESS) { RTPrintf("VMM: failed to resume on hyper breakpoint, rc=%Vrc\n", rc); return rc; } /* engage the breakpoint again and try single stepping. */ RTPrintf("VMM: testing hardware bp at VMMGCEntry + stepping\n"); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_EM_DBG_HYPER_BREAKPOINT) { RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Vrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc); return rc; } RTGCUINTREG OldPc = CPUMGetHyperEIP(pVM); RTPrintf("%RGr=>", OldPc); unsigned i; for (i = 0; i < 8; i++) { CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_TF | X86_EFL_RF); rc = VMMR3ResumeHyper(pVM); if (rc != VINF_EM_DBG_HYPER_STEPPED) { RTPrintf("\nVMM: failed to step on hyper breakpoint, rc=%Vrc\n", rc); return rc; } RTGCUINTREG Pc = CPUMGetHyperEIP(pVM); RTPrintf("%RGr=>", Pc); if (Pc == OldPc) { RTPrintf("\nVMM: step failed, PC: %RGr -> %RGr\n", OldPc, Pc); return VERR_GENERAL_FAILURE; } OldPc = Pc; } RTPrintf("ok\n"); /* done, clear it */ if ( VBOX_FAILURE(DBGFR3BpClear(pVM, iBp0)) || VBOX_FAILURE(DBGFR3BpClear(pVM, iBp1))) { RTPrintf("VMM: Failed to clear breakpoints!\n"); return VERR_GENERAL_FAILURE; } rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: NOP failed, rc=%Vrc\n", rc); return rc; } /* * Interrupt masking. */ RTPrintf("VMM: interrupt masking...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250); for (i = 0; i < 10000; i++) { uint64_t StartTick = ASMReadTSC(); rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_INTERRUPT_MASKING, 0); if (rc != VINF_SUCCESS) { RTPrintf("VMM: Interrupt masking failed: rc=%Vrc\n", rc); return rc; } uint64_t Ticks = ASMReadTSC() - StartTick; if (Ticks < (SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000)) RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000); } /* * Interrupt forwarding. */ CPUMHyperSetCtxCore(pVM, NULL); CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */ CPUMPushHyper(pVM, 0); CPUMPushHyper(pVM, VMMGC_DO_TESTCASE_HYPER_INTERRUPT); CPUMPushHyper(pVM, pVM->pVMGC); CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */ CPUMPushHyper(pVM, GCPtrEP); /* what to call */ CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline); Log(("trampoline=%x\n", pVM->vmm.s.pfnGCCallTrampoline)); /* * Switch and do da thing. */ RTPrintf("VMM: interrupt forwarding...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250); i = 0; uint64_t tsBegin = RTTimeNanoTS(); uint64_t TickStart = ASMReadTSC(); do { rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); if (VBOX_FAILURE(rc)) { Log(("VMM: GC returned fatal %Vra in iteration %d\n", rc, i)); VMMR3FatalDump(pVM, rc); return rc; } i++; if (!(i % 32)) Log(("VMM: iteration %d, esi=%08x edi=%08x ebx=%08x\n", i, CPUMGetHyperESI(pVM), CPUMGetHyperEDI(pVM), CPUMGetHyperEBX(pVM))); } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); uint64_t TickEnd = ASMReadTSC(); uint64_t tsEnd = RTTimeNanoTS(); uint64_t Elapsed = tsEnd - tsBegin; uint64_t PerIteration = Elapsed / (uint64_t)i; uint64_t cTicksElapsed = TickEnd - TickStart; uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i; RTPrintf("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration); Log(("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration)); /* * These forced actions are not necessary for the test and trigger breakpoints too. */ VM_FF_CLEAR(pVM, VM_FF_TRPM_SYNC_IDT); VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS); /* * Profile switching. */ RTPrintf("VMM: profiling switcher...\n"); Log(("VMM: profiling switcher...\n")); uint64_t TickMin = ~0; tsBegin = RTTimeNanoTS(); TickStart = ASMReadTSC(); for (i = 0; i < 1000000; i++) { CPUMHyperSetCtxCore(pVM, NULL); CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */ CPUMPushHyper(pVM, 0); CPUMPushHyper(pVM, VMMGC_DO_TESTCASE_NOP); CPUMPushHyper(pVM, pVM->pVMGC); CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */ CPUMPushHyper(pVM, GCPtrEP); /* what to call */ CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline); uint64_t TickThisStart = ASMReadTSC(); rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL); uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart; if (VBOX_FAILURE(rc)) { Log(("VMM: GC returned fatal %Vra in iteration %d\n", rc, i)); VMMR3FatalDump(pVM, rc); return rc; } if (TickThisElapsed < TickMin) TickMin = TickThisElapsed; } TickEnd = ASMReadTSC(); tsEnd = RTTimeNanoTS(); Elapsed = tsEnd - tsBegin; PerIteration = Elapsed / (uint64_t)i; cTicksElapsed = TickEnd - TickStart; cTicksPerIteration = cTicksElapsed / (uint64_t)i; RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin); Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n", i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin)); rc = VINF_SUCCESS; } else AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Vrc\n", rc)); #endif return rc; }