VirtualBox

source: vbox/trunk/src/VBox/VMM/VMM.cpp@ 19167

Last change on this file since 19167 was 19141, checked in by vboxsync, 16 years ago

Action flags breakup.
Fixed PGM saved state loading of 2.2.2 images.
Reduced hacks in PATM state loading (fixups).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 55.5 KB
Line 
1/* $Id: VMM.cpp 19141 2009-04-23 13:52:18Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22//#define NO_SUPCALLR0VMM
23
24/** @page pg_vmm VMM - The Virtual Machine Monitor
25 *
26 * The VMM component is two things at the moment, it's a component doing a few
27 * management and routing tasks, and it's the whole virtual machine monitor
28 * thing. For hysterical reasons, it is not doing all the management that one
29 * would expect, this is instead done by @ref pg_vm. We'll address this
30 * misdesign eventually.
31 *
32 * @see grp_vmm, grp_vm
33 *
34 *
35 * @section sec_vmmstate VMM State
36 *
37 * @image html VM_Statechart_Diagram.gif
38 *
39 * To be written.
40 *
41 *
42 * @subsection subsec_vmm_init VMM Initialization
43 *
44 * To be written.
45 *
46 *
47 * @subsection subsec_vmm_term VMM Termination
48 *
49 * To be written.
50 *
51 */
52
53/*******************************************************************************
54* Header Files *
55*******************************************************************************/
56#define LOG_GROUP LOG_GROUP_VMM
57#include <VBox/vmm.h>
58#include <VBox/vmapi.h>
59#include <VBox/pgm.h>
60#include <VBox/cfgm.h>
61#include <VBox/pdmqueue.h>
62#include <VBox/pdmapi.h>
63#include <VBox/cpum.h>
64#include <VBox/mm.h>
65#include <VBox/iom.h>
66#include <VBox/trpm.h>
67#include <VBox/selm.h>
68#include <VBox/em.h>
69#include <VBox/sup.h>
70#include <VBox/dbgf.h>
71#include <VBox/csam.h>
72#include <VBox/patm.h>
73#include <VBox/rem.h>
74#include <VBox/ssm.h>
75#include <VBox/tm.h>
76#include "VMMInternal.h"
77#include "VMMSwitcher/VMMSwitcher.h"
78#include <VBox/vm.h>
79
80#include <VBox/err.h>
81#include <VBox/param.h>
82#include <VBox/version.h>
83#include <VBox/x86.h>
84#include <VBox/hwaccm.h>
85#include <iprt/assert.h>
86#include <iprt/alloc.h>
87#include <iprt/asm.h>
88#include <iprt/time.h>
89#include <iprt/stream.h>
90#include <iprt/string.h>
91#include <iprt/stdarg.h>
92#include <iprt/ctype.h>
93
94
95
96/** The saved state version. */
97#define VMM_SAVED_STATE_VERSION 3
98
99
100/*******************************************************************************
101* Internal Functions *
102*******************************************************************************/
103static int vmmR3InitStacks(PVM pVM);
104static int vmmR3InitLoggers(PVM pVM);
105static void vmmR3InitRegisterStats(PVM pVM);
106static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
107static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
108static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
109static int vmmR3ServiceCallHostRequest(PVM pVM);
110static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
111
112
113/**
114 * Initializes the VMM.
115 *
116 * @returns VBox status code.
117 * @param pVM The VM to operate on.
118 */
119VMMR3DECL(int) VMMR3Init(PVM pVM)
120{
121 LogFlow(("VMMR3Init\n"));
122
123 /*
124 * Assert alignment, sizes and order.
125 */
126 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
127 AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s),
128 ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n",
129 sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s)));
130
131 /*
132 * Init basic VM VMM members.
133 */
134 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
135 int rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies);
136 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
137 pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */
138 //pVM->vmm.s.cYieldEveryMillies = 8; //debugging
139 else
140 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc);
141
142 /* GC switchers are enabled by default. Turned off by HWACCM. */
143 pVM->vmm.s.fSwitcherDisabled = false;
144
145 /*
146 * Register the saved state data unit.
147 */
148 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
149 NULL, vmmR3Save, NULL,
150 NULL, vmmR3Load, NULL);
151 if (RT_FAILURE(rc))
152 return rc;
153
154 /*
155 * Register the Ring-0 VM handle with the session for fast ioctl calls.
156 */
157 rc = SUPSetVMForFastIOCtl(pVM->pVMR0);
158 if (RT_FAILURE(rc))
159 return rc;
160
161 /*
162 * Init various sub-components.
163 */
164 rc = vmmR3SwitcherInit(pVM);
165 if (RT_SUCCESS(rc))
166 {
167 rc = vmmR3InitStacks(pVM);
168 if (RT_SUCCESS(rc))
169 {
170 rc = vmmR3InitLoggers(pVM);
171
172#ifdef VBOX_WITH_NMI
173 /*
174 * Allocate mapping for the host APIC.
175 */
176 if (RT_SUCCESS(rc))
177 {
178 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
179 AssertRC(rc);
180 }
181#endif
182 if (RT_SUCCESS(rc))
183 {
184 rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock);
185 if (RT_SUCCESS(rc))
186 {
187 /*
188 * Debug info and statistics.
189 */
190 DBGFR3InfoRegisterInternal(pVM, "ff", "Displays the current Forced actions Flags.", vmmR3InfoFF);
191 vmmR3InitRegisterStats(pVM);
192
193 return VINF_SUCCESS;
194 }
195 }
196 }
197 /** @todo: Need failure cleanup. */
198
199 //more todo in here?
200 //if (RT_SUCCESS(rc))
201 //{
202 //}
203 //int rc2 = vmmR3TermCoreCode(pVM);
204 //AssertRC(rc2));
205 }
206
207 return rc;
208}
209
210
211/**
212 * Allocate & setup the VMM RC stack(s) (for EMTs).
213 *
214 * The stacks are also used for long jumps in Ring-0.
215 *
216 * @returns VBox status code.
217 * @param pVM Pointer to the shared VM structure.
218 *
219 * @remarks The optional guard page gets it protection setup up during R3 init
220 * completion because of init order issues.
221 */
222static int vmmR3InitStacks(PVM pVM)
223{
224 /** @todo SMP: One stack per vCPU. */
225#ifdef VBOX_STRICT_VMM_STACK
226 int rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3);
227#else
228 int rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3);
229#endif
230 if (RT_SUCCESS(rc))
231 {
232#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
233 /* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */
234 if (!VMMIsHwVirtExtForced(pVM))
235 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = NIL_RTR0PTR;
236 else
237#endif
238 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = MMHyperR3ToR0(pVM, pVM->vmm.s.pbEMTStackR3);
239 pVM->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3);
240 pVM->vmm.s.pbEMTStackBottomRC = pVM->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
241 AssertRelease(pVM->vmm.s.pbEMTStackRC);
242
243 for (unsigned i=0;i<pVM->cCPUs;i++)
244 {
245 PVMCPU pVCpu = &pVM->aCpus[i];
246 CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC);
247 }
248 }
249
250 return rc;
251}
252
253
254/**
255 * Initialize the loggers.
256 *
257 * @returns VBox status code.
258 * @param pVM Pointer to the shared VM structure.
259 */
260static int vmmR3InitLoggers(PVM pVM)
261{
262 int rc;
263
264 /*
265 * Allocate RC & R0 Logger instances (they are finalized in the relocator).
266 */
267#ifdef LOG_ENABLED
268 PRTLOGGER pLogger = RTLogDefaultInstance();
269 if (pLogger)
270 {
271 pVM->vmm.s.cbRCLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]);
272 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3);
273 if (RT_FAILURE(rc))
274 return rc;
275 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
276
277# ifdef VBOX_WITH_R0_LOGGING
278 rc = MMR3HyperAllocOnceNoRel(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]),
279 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0LoggerR3);
280 if (RT_FAILURE(rc))
281 return rc;
282 pVM->vmm.s.pR0LoggerR3->pVM = pVM->pVMR0;
283 //pVM->vmm.s.pR0LoggerR3->fCreated = false;
284 pVM->vmm.s.pR0LoggerR3->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]);
285 pVM->vmm.s.pR0LoggerR0 = MMHyperR3ToR0(pVM, pVM->vmm.s.pR0LoggerR3);
286# endif
287 }
288#endif /* LOG_ENABLED */
289
290#ifdef VBOX_WITH_RC_RELEASE_LOGGING
291 /*
292 * Allocate RC release logger instances (finalized in the relocator).
293 */
294 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
295 if (pRelLogger)
296 {
297 pVM->vmm.s.cbRCRelLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
298 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3);
299 if (RT_FAILURE(rc))
300 return rc;
301 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
302 }
303#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
304 return VINF_SUCCESS;
305}
306
307
308/**
309 * VMMR3Init worker that register the statistics with STAM.
310 *
311 * @param pVM The shared VM structure.
312 */
313static void vmmR3InitRegisterStats(PVM pVM)
314{
315 /*
316 * Statistics.
317 */
318 STAM_REG(pVM, &pVM->vmm.s.StatRunRC, STAMTYPE_COUNTER, "/VMM/RunRC", STAMUNIT_OCCURENCES, "Number of context switches.");
319 STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
320 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt, STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
321 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
322 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap, STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
323 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
324 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
325 STAM_REG(pVM, &pVM->vmm.s.StatRZRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/RZRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");
326 STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
327 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
328 STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
329 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOBlockEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/EmulateIOBlock", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_IO_BLOCK returns.");
330 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
331 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead, STAMTYPE_COUNTER, "/VMM/RZRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns.");
332 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns.");
333 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns.");
334 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns.");
335 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns.");
336 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
337 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
338 STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
339 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
340 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
341 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault, STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
342 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPDFault, STAMTYPE_COUNTER, "/VMM/RZRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
343 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask, STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
344 STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3, STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
345 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc, STAMTYPE_COUNTER, "/VMM/RZRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
346 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3, STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
347 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF, STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
348 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP, STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
349 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
350 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPageOverflow, STAMTYPE_COUNTER, "/VMM/RZRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns.");
351 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
352 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
353 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending, STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
354 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
355 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
356 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/RZRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
357 STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulHlt, STAMTYPE_COUNTER, "/VMM/RZRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");
358 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
359
360 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallHost, STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc", STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
361 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PDM_LOCK calls.");
362 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMQueueFlush, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMQueueFlush", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PDM_QUEUE_FLUSH calls.");
363 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_LOCK calls.");
364 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMPoolGrow", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_POOL_GROW calls.");
365 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMMapChunk, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMMapChunk", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_MAP_CHUNK calls.");
366 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMAllocHandy, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMAllocHandy", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES calls.");
367 STAM_REG(pVM, &pVM->vmm.s.StatRZCallRemReplay, STAMTYPE_COUNTER, "/VMM/RZCallR3/REMReplay", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS calls.");
368 STAM_REG(pVM, &pVM->vmm.s.StatRZCallLogFlush, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMMLogFlush", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VMM_LOGGER_FLUSH calls.");
369 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMSetError", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VM_SET_ERROR calls.");
370 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetRuntimeError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMRuntimeError", STAMUNIT_OCCURENCES, "Number of VMMCALLHOST_VM_SET_RUNTIME_ERROR calls.");
371}
372
373
374/**
375 * Initializes the per-VCPU VMM.
376 *
377 * @returns VBox status code.
378 * @param pVM The VM to operate on.
379 */
380VMMR3DECL(int) VMMR3InitCPU(PVM pVM)
381{
382 LogFlow(("VMMR3InitCPU\n"));
383 return VINF_SUCCESS;
384}
385
386
387/**
388 * Ring-3 init finalizing.
389 *
390 * @returns VBox status code.
391 * @param pVM The VM handle.
392 */
393VMMR3DECL(int) VMMR3InitFinalize(PVM pVM)
394{
395#ifdef VBOX_STRICT_VMM_STACK
396 /*
397 * Two inaccessible pages at each sides of the stack to catch over/under-flows.
398 */
399 memset(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE);
400 PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE), PAGE_SIZE, 0);
401 RTMemProtect(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
402
403 memset(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
404 PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE), PAGE_SIZE, 0);
405 RTMemProtect(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
406#endif
407
408 /*
409 * Set page attributes to r/w for stack pages.
410 */
411 int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbEMTStackRC, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
412 AssertRC(rc);
413 if (RT_SUCCESS(rc))
414 {
415 /*
416 * Create the EMT yield timer.
417 */
418 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
419 if (RT_SUCCESS(rc))
420 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
421 }
422
423#ifdef VBOX_WITH_NMI
424 /*
425 * Map the host APIC into GC - This is AMD/Intel + Host OS specific!
426 */
427 if (RT_SUCCESS(rc))
428 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
429 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
430#endif
431 return rc;
432}
433
434
435/**
436 * Initializes the R0 VMM.
437 *
438 * @returns VBox status code.
439 * @param pVM The VM to operate on.
440 */
441VMMR3DECL(int) VMMR3InitR0(PVM pVM)
442{
443 int rc;
444
445 /*
446 * Initialize the ring-0 logger if we haven't done so yet.
447 */
448 if ( pVM->vmm.s.pR0LoggerR3
449 && !pVM->vmm.s.pR0LoggerR3->fCreated)
450 {
451 rc = VMMR3UpdateLoggers(pVM);
452 if (RT_FAILURE(rc))
453 return rc;
454 }
455
456 /*
457 * Call Ring-0 entry with init code.
458 */
459 for (;;)
460 {
461#ifdef NO_SUPCALLR0VMM
462 //rc = VERR_GENERAL_FAILURE;
463 rc = VINF_SUCCESS;
464#else
465 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, VMMGetSvnRev(), NULL);
466#endif
467 if ( pVM->vmm.s.pR0LoggerR3
468 && pVM->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
469 RTLogFlushToLogger(&pVM->vmm.s.pR0LoggerR3->Logger, NULL);
470 if (rc != VINF_VMM_CALL_HOST)
471 break;
472 rc = vmmR3ServiceCallHostRequest(pVM);
473 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
474 break;
475 /* Resume R0 */
476 }
477
478 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
479 {
480 LogRel(("R0 init failed, rc=%Rra\n", rc));
481 if (RT_SUCCESS(rc))
482 rc = VERR_INTERNAL_ERROR;
483 }
484 return rc;
485}
486
487
488/**
489 * Initializes the RC VMM.
490 *
491 * @returns VBox status code.
492 * @param pVM The VM to operate on.
493 */
494VMMR3DECL(int) VMMR3InitRC(PVM pVM)
495{
496 PVMCPU pVCpu = VMMGetCpu(pVM);
497 Assert(pVCpu);
498
499 /* In VMX mode, there's no need to init RC. */
500 if (pVM->vmm.s.fSwitcherDisabled)
501 return VINF_SUCCESS;
502
503 AssertReturn(pVM->cCPUs == 1, VERR_RAW_MODE_INVALID_SMP);
504
505 /*
506 * Call VMMGCInit():
507 * -# resolve the address.
508 * -# setup stackframe and EIP to use the trampoline.
509 * -# do a generic hypervisor call.
510 */
511 RTRCPTR RCPtrEP;
512 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
513 if (RT_SUCCESS(rc))
514 {
515 CPUMHyperSetCtxCore(pVCpu, NULL);
516 CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
517 uint64_t u64TS = RTTimeProgramStartNanoTS();
518 CPUMPushHyper(pVCpu, (uint32_t)(u64TS >> 32)); /* Param 3: The program startup TS - Hi. */
519 CPUMPushHyper(pVCpu, (uint32_t)u64TS); /* Param 3: The program startup TS - Lo. */
520 CPUMPushHyper(pVCpu, VMMGetSvnRev()); /* Param 2: Version argument. */
521 CPUMPushHyper(pVCpu, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
522 CPUMPushHyper(pVCpu, pVM->pVMRC); /* Param 0: pVM */
523 CPUMPushHyper(pVCpu, 5 * sizeof(RTRCPTR)); /* trampoline param: stacksize. */
524 CPUMPushHyper(pVCpu, RCPtrEP); /* Call EIP. */
525 CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);
526 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
527
528 for (;;)
529 {
530#ifdef NO_SUPCALLR0VMM
531 //rc = VERR_GENERAL_FAILURE;
532 rc = VINF_SUCCESS;
533#else
534 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_CALL_HYPERVISOR, NULL);
535#endif
536#ifdef LOG_ENABLED
537 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
538 if ( pLogger
539 && pLogger->offScratch > 0)
540 RTLogFlushRC(NULL, pLogger);
541#endif
542#ifdef VBOX_WITH_RC_RELEASE_LOGGING
543 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
544 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
545 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
546#endif
547 if (rc != VINF_VMM_CALL_HOST)
548 break;
549 rc = vmmR3ServiceCallHostRequest(pVM);
550 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
551 break;
552 }
553
554 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
555 {
556 VMMR3FatalDump(pVM, pVCpu, rc);
557 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
558 rc = VERR_INTERNAL_ERROR;
559 }
560 AssertRC(rc);
561 }
562 return rc;
563}
564
565
566/**
567 * Terminate the VMM bits.
568 *
569 * @returns VINF_SUCCESS.
570 * @param pVM The VM handle.
571 */
572VMMR3DECL(int) VMMR3Term(PVM pVM)
573{
574 /*
575 * Call Ring-0 entry with termination code.
576 */
577 int rc;
578 for (;;)
579 {
580#ifdef NO_SUPCALLR0VMM
581 //rc = VERR_GENERAL_FAILURE;
582 rc = VINF_SUCCESS;
583#else
584 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_VMMR0_TERM, 0, NULL);
585#endif
586 if ( pVM->vmm.s.pR0LoggerR3
587 && pVM->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
588 RTLogFlushToLogger(&pVM->vmm.s.pR0LoggerR3->Logger, NULL);
589 if (rc != VINF_VMM_CALL_HOST)
590 break;
591 rc = vmmR3ServiceCallHostRequest(pVM);
592 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
593 break;
594 /* Resume R0 */
595 }
596 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
597 {
598 LogRel(("VMMR3Term: R0 term failed, rc=%Rra. (warning)\n", rc));
599 if (RT_SUCCESS(rc))
600 rc = VERR_INTERNAL_ERROR;
601 }
602
603#ifdef VBOX_STRICT_VMM_STACK
604 /*
605 * Make the two stack guard pages present again.
606 */
607 RTMemProtect(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
608 RTMemProtect(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
609#endif
610 return rc;
611}
612
613
614/**
615 * Terminates the per-VCPU VMM.
616 *
617 * Termination means cleaning up and freeing all resources,
618 * the VM it self is at this point powered off or suspended.
619 *
620 * @returns VBox status code.
621 * @param pVM The VM to operate on.
622 */
623VMMR3DECL(int) VMMR3TermCPU(PVM pVM)
624{
625 return VINF_SUCCESS;
626}
627
628
629/**
630 * Applies relocations to data and code managed by this
631 * component. This function will be called at init and
632 * whenever the VMM need to relocate it self inside the GC.
633 *
634 * The VMM will need to apply relocations to the core code.
635 *
636 * @param pVM The VM handle.
637 * @param offDelta The relocation delta.
638 */
639VMMR3DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
640{
641 LogFlow(("VMMR3Relocate: offDelta=%RGv\n", offDelta));
642
643 /*
644 * Recalc the RC address.
645 */
646 pVM->vmm.s.pvCoreCodeRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pvCoreCodeR3);
647
648 /*
649 * The stack.
650 */
651 for (unsigned i=0;i<pVM->cCPUs;i++)
652 {
653 PVMCPU pVCpu = &pVM->aCpus[i];
654
655 CPUMSetHyperESP(pVCpu, CPUMGetHyperESP(pVCpu) + offDelta);
656 }
657
658 pVM->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3);
659 pVM->vmm.s.pbEMTStackBottomRC = pVM->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
660
661 /*
662 * All the switchers.
663 */
664 vmmR3SwitcherRelocate(pVM, offDelta);
665
666 /*
667 * Get other RC entry points.
668 */
669 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest);
670 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Rra\n", rc));
671
672 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86);
673 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Rra\n", rc));
674
675 /*
676 * Update the logger.
677 */
678 VMMR3UpdateLoggers(pVM);
679}
680
681
682/**
683 * Updates the settings for the RC and R0 loggers.
684 *
685 * @returns VBox status code.
686 * @param pVM The VM handle.
687 */
688VMMR3DECL(int) VMMR3UpdateLoggers(PVM pVM)
689{
690 /*
691 * Simply clone the logger instance (for RC).
692 */
693 int rc = VINF_SUCCESS;
694 RTRCPTR RCPtrLoggerFlush = 0;
695
696 if (pVM->vmm.s.pRCLoggerR3
697#ifdef VBOX_WITH_RC_RELEASE_LOGGING
698 || pVM->vmm.s.pRCRelLoggerR3
699#endif
700 )
701 {
702 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &RCPtrLoggerFlush);
703 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Rra\n", rc));
704 }
705
706 if (pVM->vmm.s.pRCLoggerR3)
707 {
708 RTRCPTR RCPtrLoggerWrapper = 0;
709 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &RCPtrLoggerWrapper);
710 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Rra\n", rc));
711
712 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
713 rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.pRCLoggerR3, pVM->vmm.s.cbRCLogger,
714 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
715 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
716 }
717
718#ifdef VBOX_WITH_RC_RELEASE_LOGGING
719 if (pVM->vmm.s.pRCRelLoggerR3)
720 {
721 RTRCPTR RCPtrLoggerWrapper = 0;
722 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &RCPtrLoggerWrapper);
723 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Rra\n", rc));
724
725 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
726 rc = RTLogCloneRC(RTLogRelDefaultInstance(), pVM->vmm.s.pRCRelLoggerR3, pVM->vmm.s.cbRCRelLogger,
727 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
728 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
729 }
730#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
731
732 /*
733 * For the ring-0 EMT logger, we use a per-thread logger instance
734 * in ring-0. Only initialize it once.
735 */
736 PVMMR0LOGGER pR0LoggerR3 = pVM->vmm.s.pR0LoggerR3;
737 if (pR0LoggerR3)
738 {
739 if (!pR0LoggerR3->fCreated)
740 {
741 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
742 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
743 AssertReleaseMsgRCReturn(rc, ("VMMLoggerWrapper not found! rc=%Rra\n", rc), rc);
744
745 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
746 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
747 AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Rra\n", rc), rc);
748
749 rc = RTLogCreateForR0(&pR0LoggerR3->Logger, pR0LoggerR3->cbLogger,
750 *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush,
751 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
752 AssertReleaseMsgRCReturn(rc, ("RTLogCreateForR0 failed! rc=%Rra\n", rc), rc);
753 pR0LoggerR3->fCreated = true;
754 pR0LoggerR3->fFlushingDisabled = false;
755 }
756
757 rc = RTLogCopyGroupsAndFlags(&pR0LoggerR3->Logger, NULL /* default */, pVM->vmm.s.pRCLoggerR3->fFlags, RTLOGFLAGS_BUFFERED);
758 AssertRC(rc);
759 }
760
761 return rc;
762}
763
764
765/**
766 * Gets the pointer to a buffer containing the R0/RC AssertMsg1 output.
767 *
768 * @returns Pointer to the buffer.
769 * @param pVM The VM handle.
770 */
771VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
772{
773 if (HWACCMIsEnabled(pVM))
774 return pVM->vmm.s.szRing0AssertMsg1;
775
776 RTRCPTR RCPtr;
777 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg1", &RCPtr);
778 if (RT_SUCCESS(rc))
779 return (const char *)MMHyperRCToR3(pVM, RCPtr);
780
781 return NULL;
782}
783
784
785/**
786 * Gets the pointer to a buffer containing the R0/RC AssertMsg2 output.
787 *
788 * @returns Pointer to the buffer.
789 * @param pVM The VM handle.
790 */
791VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
792{
793 if (HWACCMIsEnabled(pVM))
794 return pVM->vmm.s.szRing0AssertMsg2;
795
796 RTRCPTR RCPtr;
797 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg2", &RCPtr);
798 if (RT_SUCCESS(rc))
799 return (const char *)MMHyperRCToR3(pVM, RCPtr);
800
801 return NULL;
802}
803
804
805/**
806 * Execute state save operation.
807 *
808 * @returns VBox status code.
809 * @param pVM VM Handle.
810 * @param pSSM SSM operation handle.
811 */
812static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
813{
814 LogFlow(("vmmR3Save:\n"));
815
816 /*
817 * The hypervisor stack.
818 * Note! See not in vmmR3Load.
819 */
820 SSMR3PutRCPtr(pSSM, pVM->vmm.s.pbEMTStackBottomRC);
821
822 for (unsigned i=0;i<pVM->cCPUs;i++)
823 {
824 PVMCPU pVCpu = &pVM->aCpus[i];
825
826 RTRCPTR RCPtrESP = CPUMGetHyperESP(pVCpu);
827 AssertMsg(pVM->vmm.s.pbEMTStackBottomRC - RCPtrESP <= VMM_STACK_SIZE, ("Bottom %RRv ESP=%RRv\n", pVM->vmm.s.pbEMTStackBottomRC, RCPtrESP));
828 SSMR3PutRCPtr(pSSM, RCPtrESP);
829 }
830 SSMR3PutMem(pSSM, pVM->vmm.s.pbEMTStackR3, VMM_STACK_SIZE);
831 return SSMR3PutU32(pSSM, ~0); /* terminator */
832}
833
834
835/**
836 * Execute state load operation.
837 *
838 * @returns VBox status code.
839 * @param pVM VM Handle.
840 * @param pSSM SSM operation handle.
841 * @param u32Version Data layout version.
842 */
843static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
844{
845 LogFlow(("vmmR3Load:\n"));
846
847 /*
848 * Validate version.
849 */
850 if (u32Version != VMM_SAVED_STATE_VERSION)
851 {
852 AssertMsgFailed(("vmmR3Load: Invalid version u32Version=%d!\n", u32Version));
853 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
854 }
855
856 /*
857 * Check that the stack is in the same place, or that it's fearly empty.
858 *
859 * Note! This can be skipped next time we update saved state as we will
860 * never be in a R0/RC -> ring-3 call when saving the state. The
861 * stack and the two associated pointers are not required.
862 */
863 RTRCPTR RCPtrStackBottom;
864 SSMR3GetRCPtr(pSSM, &RCPtrStackBottom);
865 RTRCPTR RCPtrESP;
866 int rc = SSMR3GetRCPtr(pSSM, &RCPtrESP);
867 if (RT_FAILURE(rc))
868 return rc;
869
870 /* restore the stack. */
871 SSMR3GetMem(pSSM, pVM->vmm.s.pbEMTStackR3, VMM_STACK_SIZE);
872
873 /* terminator */
874 uint32_t u32;
875 rc = SSMR3GetU32(pSSM, &u32);
876 if (RT_FAILURE(rc))
877 return rc;
878 if (u32 != ~0U)
879 {
880 AssertMsgFailed(("u32=%#x\n", u32));
881 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
882 }
883 return VINF_SUCCESS;
884}
885
886
887/**
888 * Resolve a builtin RC symbol.
889 *
890 * Called by PDM when loading or relocating RC modules.
891 *
892 * @returns VBox status
893 * @param pVM VM Handle.
894 * @param pszSymbol Symbol to resolv
895 * @param pRCPtrValue Where to store the symbol value.
896 *
897 * @remark This has to work before VMMR3Relocate() is called.
898 */
899VMMR3DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue)
900{
901 if (!strcmp(pszSymbol, "g_Logger"))
902 {
903 if (pVM->vmm.s.pRCLoggerR3)
904 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
905 *pRCPtrValue = pVM->vmm.s.pRCLoggerRC;
906 }
907 else if (!strcmp(pszSymbol, "g_RelLogger"))
908 {
909#ifdef VBOX_WITH_RC_RELEASE_LOGGING
910 if (pVM->vmm.s.pRCRelLoggerR3)
911 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
912 *pRCPtrValue = pVM->vmm.s.pRCRelLoggerRC;
913#else
914 *pRCPtrValue = NIL_RTRCPTR;
915#endif
916 }
917 else
918 return VERR_SYMBOL_NOT_FOUND;
919 return VINF_SUCCESS;
920}
921
922
923/**
924 * Suspends the CPU yielder.
925 *
926 * @param pVM The VM handle.
927 */
928VMMR3DECL(void) VMMR3YieldSuspend(PVM pVM)
929{
930 if (!pVM->vmm.s.cYieldResumeMillies)
931 {
932 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
933 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
934 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
935 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
936 else
937 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
938 TMTimerStop(pVM->vmm.s.pYieldTimer);
939 }
940 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
941}
942
943
944/**
945 * Stops the CPU yielder.
946 *
947 * @param pVM The VM handle.
948 */
949VMMR3DECL(void) VMMR3YieldStop(PVM pVM)
950{
951 if (!pVM->vmm.s.cYieldResumeMillies)
952 TMTimerStop(pVM->vmm.s.pYieldTimer);
953 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
954 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
955}
956
957
958/**
959 * Resumes the CPU yielder when it has been a suspended or stopped.
960 *
961 * @param pVM The VM handle.
962 */
963VMMR3DECL(void) VMMR3YieldResume(PVM pVM)
964{
965 if (pVM->vmm.s.cYieldResumeMillies)
966 {
967 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
968 pVM->vmm.s.cYieldResumeMillies = 0;
969 }
970}
971
972
973/**
974 * Internal timer callback function.
975 *
976 * @param pVM The VM.
977 * @param pTimer The timer handle.
978 * @param pvUser User argument specified upon timer creation.
979 */
980static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
981{
982 /*
983 * This really needs some careful tuning. While we shouldn't be too greedy since
984 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
985 * because that'll cause us to stop up.
986 *
987 * The current logic is to use the default interval when there is no lag worth
988 * mentioning, but when we start accumulating lag we don't bother yielding at all.
989 *
990 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
991 * so the lag is up to date.)
992 */
993 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
994 if ( u64Lag < 50000000 /* 50ms */
995 || ( u64Lag < 1000000000 /* 1s */
996 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
997 )
998 {
999 uint64_t u64Elapsed = RTTimeNanoTS();
1000 pVM->vmm.s.u64LastYield = u64Elapsed;
1001
1002 RTThreadYield();
1003
1004#ifdef LOG_ENABLED
1005 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1006 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1007#endif
1008 }
1009 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1010}
1011
1012
1013/**
1014 * Acquire global VM lock.
1015 *
1016 * @returns VBox status code
1017 * @param pVM The VM to operate on.
1018 *
1019 * @remarks The global VMM lock isn't really used for anything any longer.
1020 */
1021VMMR3DECL(int) VMMR3Lock(PVM pVM)
1022{
1023 return RTCritSectEnter(&pVM->vmm.s.CritSectVMLock);
1024}
1025
1026
1027/**
1028 * Release global VM lock.
1029 *
1030 * @returns VBox status code
1031 * @param pVM The VM to operate on.
1032 *
1033 * @remarks The global VMM lock isn't really used for anything any longer.
1034 */
1035VMMR3DECL(int) VMMR3Unlock(PVM pVM)
1036{
1037 return RTCritSectLeave(&pVM->vmm.s.CritSectVMLock);
1038}
1039
1040
1041/**
1042 * Return global VM lock owner.
1043 *
1044 * @returns Thread id of owner.
1045 * @returns NIL_RTTHREAD if no owner.
1046 * @param pVM The VM to operate on.
1047 *
1048 * @remarks The global VMM lock isn't really used for anything any longer.
1049 */
1050VMMR3DECL(RTNATIVETHREAD) VMMR3LockGetOwner(PVM pVM)
1051{
1052 return RTCritSectGetOwner(&pVM->vmm.s.CritSectVMLock);
1053}
1054
1055
1056/**
1057 * Checks if the current thread is the owner of the global VM lock.
1058 *
1059 * @returns true if owner.
1060 * @returns false if not owner.
1061 * @param pVM The VM to operate on.
1062 *
1063 * @remarks The global VMM lock isn't really used for anything any longer.
1064 */
1065VMMR3DECL(bool) VMMR3LockIsOwner(PVM pVM)
1066{
1067 return RTCritSectIsOwner(&pVM->vmm.s.CritSectVMLock);
1068}
1069
1070
1071/**
1072 * Executes guest code in the raw-mode context.
1073 *
1074 * @param pVM VM handle.
1075 * @param pVCpu The VMCPU to operate on.
1076 */
1077VMMR3DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu)
1078{
1079 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1080
1081 /*
1082 * Set the EIP and ESP.
1083 */
1084 CPUMSetHyperEIP(pVCpu, CPUMGetGuestEFlags(pVCpu) & X86_EFL_VM
1085 ? pVM->vmm.s.pfnCPUMRCResumeGuestV86
1086 : pVM->vmm.s.pfnCPUMRCResumeGuest);
1087 CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC);
1088
1089 /*
1090 * We hide log flushes (outer) and hypervisor interrupts (inner).
1091 */
1092 for (;;)
1093 {
1094 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
1095#ifdef VBOX_STRICT
1096 PGMMapCheck(pVM);
1097#endif
1098 int rc;
1099 do
1100 {
1101#ifdef NO_SUPCALLR0VMM
1102 rc = VERR_GENERAL_FAILURE;
1103#else
1104 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1105 if (RT_LIKELY(rc == VINF_SUCCESS))
1106 rc = pVM->vmm.s.iLastGZRc;
1107#endif
1108 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1109
1110 /*
1111 * Flush the logs.
1112 */
1113#ifdef LOG_ENABLED
1114 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
1115 if ( pLogger
1116 && pLogger->offScratch > 0)
1117 RTLogFlushRC(NULL, pLogger);
1118#endif
1119#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1120 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
1121 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1122 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
1123#endif
1124 if (rc != VINF_VMM_CALL_HOST)
1125 {
1126 Log2(("VMMR3RawRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1127 return rc;
1128 }
1129 rc = vmmR3ServiceCallHostRequest(pVM);
1130 if (RT_FAILURE(rc))
1131 return rc;
1132 /* Resume GC */
1133 }
1134}
1135
1136
1137/**
1138 * Executes guest code (Intel VT-x and AMD-V).
1139 *
1140 * @param pVM VM handle.
1141 * @param pVCpu The VMCPU to operate on.
1142 */
1143VMMR3DECL(int) VMMR3HwAccRunGC(PVM pVM, PVMCPU pVCpu)
1144{
1145 Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1146
1147 for (;;)
1148 {
1149 int rc;
1150 do
1151 {
1152#ifdef NO_SUPCALLR0VMM
1153 rc = VERR_GENERAL_FAILURE;
1154#else
1155 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN, pVCpu->idCpu);
1156 if (RT_LIKELY(rc == VINF_SUCCESS))
1157 rc = pVM->vmm.s.iLastGZRc;
1158#endif
1159 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1160
1161#ifdef LOG_ENABLED
1162 /*
1163 * Flush the log
1164 */
1165 PVMMR0LOGGER pR0LoggerR3 = pVM->vmm.s.pR0LoggerR3;
1166 if ( pR0LoggerR3
1167 && pR0LoggerR3->Logger.offScratch > 0)
1168 RTLogFlushToLogger(&pR0LoggerR3->Logger, NULL);
1169#endif /* !LOG_ENABLED */
1170 if (rc != VINF_VMM_CALL_HOST)
1171 {
1172 Log2(("VMMR3HwAccRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1173 return rc;
1174 }
1175 rc = vmmR3ServiceCallHostRequest(pVM);
1176 if (RT_FAILURE(rc))
1177 return rc;
1178 /* Resume R0 */
1179 }
1180}
1181
1182
1183/**
1184 * Calls a RC function.
1185 *
1186 * @param pVM The VM handle.
1187 * @param RCPtrEntry The address of the RC function.
1188 * @param cArgs The number of arguments in the ....
1189 * @param ... Arguments to the function.
1190 */
1191VMMR3DECL(int) VMMR3CallRC(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, ...)
1192{
1193 va_list args;
1194 va_start(args, cArgs);
1195 int rc = VMMR3CallRCV(pVM, RCPtrEntry, cArgs, args);
1196 va_end(args);
1197 return rc;
1198}
1199
1200
1201/**
1202 * Calls a RC function.
1203 *
1204 * @param pVM The VM handle.
1205 * @param RCPtrEntry The address of the RC function.
1206 * @param cArgs The number of arguments in the ....
1207 * @param args Arguments to the function.
1208 */
1209VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list args)
1210{
1211 /* Raw mode implies 1 VCPU. */
1212 Assert(pVM->cCPUs == 1);
1213 PVMCPU pVCpu = &pVM->aCpus[0];
1214
1215 Log2(("VMMR3CallGCV: RCPtrEntry=%RRv cArgs=%d\n", RCPtrEntry, cArgs));
1216
1217 /*
1218 * Setup the call frame using the trampoline.
1219 */
1220 CPUMHyperSetCtxCore(pVCpu, NULL);
1221 memset(pVM->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
1222 CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32));
1223 PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;
1224 int i = cArgs;
1225 while (i-- > 0)
1226 *pFrame++ = va_arg(args, RTGCUINTPTR32);
1227
1228 CPUMPushHyper(pVCpu, cArgs * sizeof(RTGCUINTPTR32)); /* stack frame size */
1229 CPUMPushHyper(pVCpu, RCPtrEntry); /* what to call */
1230 CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);
1231
1232 /*
1233 * We hide log flushes (outer) and hypervisor interrupts (inner).
1234 */
1235 for (;;)
1236 {
1237 int rc;
1238 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
1239 do
1240 {
1241#ifdef NO_SUPCALLR0VMM
1242 rc = VERR_GENERAL_FAILURE;
1243#else
1244 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1245 if (RT_LIKELY(rc == VINF_SUCCESS))
1246 rc = pVM->vmm.s.iLastGZRc;
1247#endif
1248 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1249
1250 /*
1251 * Flush the logs.
1252 */
1253#ifdef LOG_ENABLED
1254 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
1255 if ( pLogger
1256 && pLogger->offScratch > 0)
1257 RTLogFlushRC(NULL, pLogger);
1258#endif
1259#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1260 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
1261 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1262 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
1263#endif
1264 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
1265 VMMR3FatalDump(pVM, pVCpu, rc);
1266 if (rc != VINF_VMM_CALL_HOST)
1267 {
1268 Log2(("VMMR3CallGCV: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1269 return rc;
1270 }
1271 rc = vmmR3ServiceCallHostRequest(pVM);
1272 if (RT_FAILURE(rc))
1273 return rc;
1274 }
1275}
1276
1277
1278/**
1279 * Wrapper for SUPCallVMMR0Ex which will deal with
1280 * VINF_VMM_CALL_HOST returns.
1281 *
1282 * @returns VBox status code.
1283 * @param pVM The VM to operate on.
1284 * @param uOperation Operation to execute.
1285 * @param u64Arg Constant argument.
1286 * @param pReqHdr Pointer to a request header. See SUPCallVMMR0Ex for
1287 * details.
1288 */
1289VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
1290{
1291 /*
1292 * Call Ring-0 entry with init code.
1293 */
1294 int rc;
1295 for (;;)
1296 {
1297#ifdef NO_SUPCALLR0VMM
1298 rc = VERR_GENERAL_FAILURE;
1299#else
1300 rc = SUPCallVMMR0Ex(pVM->pVMR0, uOperation, u64Arg, pReqHdr);
1301#endif
1302 if ( pVM->vmm.s.pR0LoggerR3
1303 && pVM->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
1304 RTLogFlushToLogger(&pVM->vmm.s.pR0LoggerR3->Logger, NULL);
1305 if (rc != VINF_VMM_CALL_HOST)
1306 break;
1307 rc = vmmR3ServiceCallHostRequest(pVM);
1308 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
1309 break;
1310 /* Resume R0 */
1311 }
1312
1313 AssertLogRelMsgReturn(rc == VINF_SUCCESS || VBOX_FAILURE(rc),
1314 ("uOperation=%u rc=%Rrc\n", uOperation, rc),
1315 VERR_INTERNAL_ERROR);
1316 return rc;
1317}
1318
1319
1320/**
1321 * Resumes executing hypervisor code when interrupted by a queue flush or a
1322 * debug event.
1323 *
1324 * @returns VBox status code.
1325 * @param pVM VM handle.
1326 * @param pVCpu VMCPU handle.
1327 */
1328VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu)
1329{
1330 Log(("VMMR3ResumeHyper: eip=%RRv esp=%RRv\n", CPUMGetHyperEIP(pVCpu), CPUMGetHyperESP(pVCpu)));
1331
1332 /*
1333 * We hide log flushes (outer) and hypervisor interrupts (inner).
1334 */
1335 for (;;)
1336 {
1337 int rc;
1338 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
1339 do
1340 {
1341#ifdef NO_SUPCALLR0VMM
1342 rc = VERR_GENERAL_FAILURE;
1343#else
1344 rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1345 if (RT_LIKELY(rc == VINF_SUCCESS))
1346 rc = pVM->vmm.s.iLastGZRc;
1347#endif
1348 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1349
1350 /*
1351 * Flush the loggers,
1352 */
1353#ifdef LOG_ENABLED
1354 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
1355 if ( pLogger
1356 && pLogger->offScratch > 0)
1357 RTLogFlushRC(NULL, pLogger);
1358#endif
1359#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1360 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
1361 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1362 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
1363#endif
1364 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
1365 VMMR3FatalDump(pVM, pVCpu, rc);
1366 if (rc != VINF_VMM_CALL_HOST)
1367 {
1368 Log(("VMMR3ResumeHyper: returns %Rrc\n", rc));
1369 return rc;
1370 }
1371 rc = vmmR3ServiceCallHostRequest(pVM);
1372 if (RT_FAILURE(rc))
1373 return rc;
1374 }
1375}
1376
1377
1378/**
1379 * Service a call to the ring-3 host code.
1380 *
1381 * @returns VBox status code.
1382 * @param pVM VM handle.
1383 * @remark Careful with critsects.
1384 */
1385static int vmmR3ServiceCallHostRequest(PVM pVM)
1386{
1387 switch (pVM->vmm.s.enmCallHostOperation)
1388 {
1389 /*
1390 * Acquire the PDM lock.
1391 */
1392 case VMMCALLHOST_PDM_LOCK:
1393 {
1394 pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM);
1395 break;
1396 }
1397
1398 /*
1399 * Flush a PDM queue.
1400 */
1401 case VMMCALLHOST_PDM_QUEUE_FLUSH:
1402 {
1403 PDMR3QueueFlushWorker(pVM, NULL);
1404 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
1405 break;
1406 }
1407
1408 /*
1409 * Grow the PGM pool.
1410 */
1411 case VMMCALLHOST_PGM_POOL_GROW:
1412 {
1413 pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
1414 break;
1415 }
1416
1417 /*
1418 * Maps an page allocation chunk into ring-3 so ring-0 can use it.
1419 */
1420 case VMMCALLHOST_PGM_MAP_CHUNK:
1421 {
1422 pVM->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVM->vmm.s.u64CallHostArg);
1423 break;
1424 }
1425
1426 /*
1427 * Allocates more handy pages.
1428 */
1429 case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
1430 {
1431 pVM->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM);
1432 break;
1433 }
1434
1435 /*
1436 * Acquire the PGM lock.
1437 */
1438 case VMMCALLHOST_PGM_LOCK:
1439 {
1440 pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM);
1441 break;
1442 }
1443
1444 /*
1445 * Flush REM handler notifications.
1446 */
1447 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
1448 {
1449 REMR3ReplayHandlerNotifications(pVM);
1450 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
1451 break;
1452 }
1453
1454 /*
1455 * This is a noop. We just take this route to avoid unnecessary
1456 * tests in the loops.
1457 */
1458 case VMMCALLHOST_VMM_LOGGER_FLUSH:
1459 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
1460 LogAlways(("*FLUSH*\n"));
1461 break;
1462
1463 /*
1464 * Set the VM error message.
1465 */
1466 case VMMCALLHOST_VM_SET_ERROR:
1467 VMR3SetErrorWorker(pVM);
1468 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
1469 break;
1470
1471 /*
1472 * Set the VM runtime error message.
1473 */
1474 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
1475 pVM->vmm.s.rcCallHost = VMR3SetRuntimeErrorWorker(pVM);
1476 break;
1477
1478 /*
1479 * Signal a ring 0 hypervisor assertion.
1480 * Cancel the longjmp operation that's in progress.
1481 */
1482 case VMMCALLHOST_VM_R0_ASSERTION:
1483 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
1484 pVM->vmm.s.CallHostR0JmpBuf.fInRing3Call = false;
1485#ifdef RT_ARCH_X86
1486 pVM->vmm.s.CallHostR0JmpBuf.eip = 0;
1487#else
1488 pVM->vmm.s.CallHostR0JmpBuf.rip = 0;
1489#endif
1490 LogRel((pVM->vmm.s.szRing0AssertMsg1));
1491 LogRel((pVM->vmm.s.szRing0AssertMsg2));
1492 return VERR_VMM_RING0_ASSERTION;
1493
1494 /*
1495 * A forced switch to ring 0 for preemption purposes.
1496 */
1497 case VMMCALLHOST_VM_R0_PREEMPT:
1498 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
1499 break;
1500
1501 default:
1502 AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation));
1503 return VERR_INTERNAL_ERROR;
1504 }
1505
1506 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
1507 return VINF_SUCCESS;
1508}
1509
1510
1511/**
1512 * Displays the Force action Flags.
1513 *
1514 * @param pVM The VM handle.
1515 * @param pHlp The output helpers.
1516 * @param pszArgs The additional arguments (ignored).
1517 */
1518static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1519{
1520 const uint32_t fGlobalForcedActions = pVM->fGlobalForcedActions;
1521
1522 pHlp->pfnPrintf(pHlp, "Forced action Flags: %#RX32", fGlobalForcedActions);
1523 for (unsigned i=0;i<pVM->cCPUs;i++)
1524 {
1525 PVMCPU pVCpu = &pVM->aCpus[i];
1526
1527 pHlp->pfnPrintf(pHlp, "CPU %d: Forced action Flags: %#RX32", pVCpu->fLocalForcedActions);
1528 }
1529
1530 /* show the flag mnemonics */
1531 int c = 0;
1532 uint32_t f = fGlobalForcedActions;
1533#define PRINT_FLAG(flag) do { \
1534 if (f & (flag)) \
1535 { \
1536 static const char *s_psz = #flag; \
1537 if (!(c % 6)) \
1538 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz + 6); \
1539 else \
1540 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
1541 c++; \
1542 f &= ~(flag); \
1543 } \
1544 } while (0)
1545 PRINT_FLAG(VM_FF_TIMER);
1546 PRINT_FLAG(VM_FF_PDM_QUEUES);
1547 PRINT_FLAG(VM_FF_PDM_DMA);
1548 PRINT_FLAG(VM_FF_PDM_CRITSECT);
1549 PRINT_FLAG(VM_FF_DBGF);
1550 PRINT_FLAG(VM_FF_REQUEST);
1551 PRINT_FLAG(VM_FF_TERMINATE);
1552 PRINT_FLAG(VM_FF_RESET);
1553 PRINT_FLAG(VM_FF_PGM_NEED_HANDY_PAGES);
1554 PRINT_FLAG(VM_FF_PGM_NO_MEMORY);
1555 PRINT_FLAG(VM_FF_REM_HANDLER_NOTIFY);
1556 PRINT_FLAG(VM_FF_DEBUG_SUSPEND);
1557 for (unsigned i=0;i<pVM->cCPUs;i++)
1558 {
1559 PVMCPU pVCpu = &pVM->aCpus[i];
1560
1561 f = pVCpu->fLocalForcedActions;
1562#define PRINT_CPU_FLAG(flag) do { \
1563 if (f & (flag)) \
1564 { \
1565 static const char *s_psz = #flag; \
1566 if (!(c % 6)) \
1567 pHlp->pfnPrintf(pHlp, "CPU %d: %s\n %s", i, c ? "," : "", s_psz + 6); \
1568 else \
1569 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
1570 c++; \
1571 f &= ~(flag); \
1572 } \
1573 } while (0)
1574
1575 PRINT_CPU_FLAG(VMCPU_FF_INTERRUPT_APIC);
1576 PRINT_CPU_FLAG(VMCPU_FF_INTERRUPT_PIC);
1577 PRINT_CPU_FLAG(VMCPU_FF_PGM_SYNC_CR3);
1578 PRINT_CPU_FLAG(VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1579 PRINT_CPU_FLAG(VMCPU_FF_TRPM_SYNC_IDT);
1580 PRINT_CPU_FLAG(VMCPU_FF_SELM_SYNC_TSS);
1581 PRINT_CPU_FLAG(VMCPU_FF_SELM_SYNC_GDT);
1582 PRINT_CPU_FLAG(VMCPU_FF_SELM_SYNC_LDT);
1583 PRINT_CPU_FLAG(VMCPU_FF_INHIBIT_INTERRUPTS);
1584 PRINT_CPU_FLAG(VMCPU_FF_CSAM_SCAN_PAGE);
1585 PRINT_CPU_FLAG(VMCPU_FF_CSAM_PENDING_ACTION);
1586 PRINT_CPU_FLAG(VMCPU_FF_TO_R3);
1587 }
1588
1589 if (f)
1590 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
1591 else
1592 pHlp->pfnPrintf(pHlp, "\n");
1593#undef PRINT_FLAG
1594#undef PRINT_CPU_FLAG
1595
1596 /* the groups */
1597 c = 0;
1598#define PRINT_GROUP(grp) do { \
1599 if (fGlobalForcedActions & (grp)) \
1600 { \
1601 static const char *s_psz = #grp; \
1602 if (!(c % 5)) \
1603 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : "Groups:\n", s_psz + 6); \
1604 else \
1605 pHlp->pfnPrintf(pHlp, ", %s", s_psz + 6); \
1606 c++; \
1607 } \
1608 } while (0)
1609 PRINT_GROUP(VM_FF_EXTERNAL_SUSPENDED_MASK);
1610 PRINT_GROUP(VM_FF_EXTERNAL_HALTED_MASK);
1611 PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_MASK);
1612 PRINT_GROUP(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK);
1613 PRINT_GROUP(VM_FF_HIGH_PRIORITY_POST_MASK);
1614 PRINT_GROUP(VM_FF_NORMAL_PRIORITY_POST_MASK);
1615 PRINT_GROUP(VM_FF_NORMAL_PRIORITY_MASK);
1616 PRINT_GROUP(VM_FF_ALL_BUT_RAW_MASK);
1617 if (c)
1618 pHlp->pfnPrintf(pHlp, "\n");
1619#undef PRINT_GROUP
1620}
1621
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette