VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMM.cpp@ 41932

Last change on this file since 41932 was 41931, checked in by vboxsync, 13 years ago

TRPM: Save state directly to the CPUMCPU context member instead of putting on the stack. this avoid copying the state around before returning to host context to service an IRQ, or before using IEM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 85.8 KB
Line 
1/* $Id: VMM.cpp 41931 2012-06-27 16:12:16Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18//#define NO_SUPCALLR0VMM
19
20/** @page pg_vmm VMM - The Virtual Machine Monitor
21 *
22 * The VMM component is two things at the moment, it's a component doing a few
23 * management and routing tasks, and it's the whole virtual machine monitor
24 * thing. For hysterical reasons, it is not doing all the management that one
25 * would expect, this is instead done by @ref pg_vm. We'll address this
26 * misdesign eventually.
27 *
28 * @see grp_vmm, grp_vm
29 *
30 *
31 * @section sec_vmmstate VMM State
32 *
33 * @image html VM_Statechart_Diagram.gif
34 *
35 * To be written.
36 *
37 *
38 * @subsection subsec_vmm_init VMM Initialization
39 *
40 * To be written.
41 *
42 *
43 * @subsection subsec_vmm_term VMM Termination
44 *
45 * To be written.
46 *
47 *
48 * @sections sec_vmm_limits VMM Limits
49 *
50 * There are various resource limits imposed by the VMM and it's
51 * sub-components. We'll list some of them here.
52 *
53 * On 64-bit hosts:
54 * - Max 8191 VMs. Imposed by GVMM's handle allocation (GVMM_MAX_HANDLES),
55 * can be increased up to 64K - 1.
56 * - Max 16TB - 64KB of the host memory can be used for backing VM RAM and
57 * ROM pages. The limit is imposed by the 32-bit page ID used by GMM.
58 * - A VM can be assigned all the memory we can use (16TB), however, the
59 * Main API will restrict this to 2TB (MM_RAM_MAX_IN_MB).
60 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
61 *
62 * On 32-bit hosts:
63 * - Max 127 VMs. Imposed by GMM's per page structure.
64 * - Max 64GB - 64KB of the host memory can be used for backing VM RAM and
65 * ROM pages. The limit is imposed by the 28-bit page ID used
66 * internally in GMM. It is also limited by PAE.
67 * - A VM can be assigned all the memory GMM can allocate, however, the
68 * Main API will restrict this to 3584MB (MM_RAM_MAX_IN_MB).
69 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
70 *
71 */
72
73/*******************************************************************************
74* Header Files *
75*******************************************************************************/
76#define LOG_GROUP LOG_GROUP_VMM
77#include <VBox/vmm/vmm.h>
78#include <VBox/vmm/vmapi.h>
79#include <VBox/vmm/pgm.h>
80#include <VBox/vmm/cfgm.h>
81#include <VBox/vmm/pdmqueue.h>
82#include <VBox/vmm/pdmcritsect.h>
83#include <VBox/vmm/pdmapi.h>
84#include <VBox/vmm/cpum.h>
85#include <VBox/vmm/mm.h>
86#include <VBox/vmm/iom.h>
87#include <VBox/vmm/trpm.h>
88#include <VBox/vmm/selm.h>
89#include <VBox/vmm/em.h>
90#include <VBox/sup.h>
91#include <VBox/vmm/dbgf.h>
92#include <VBox/vmm/csam.h>
93#include <VBox/vmm/patm.h>
94#ifdef VBOX_WITH_REM
95# include <VBox/vmm/rem.h>
96#endif
97#include <VBox/vmm/ssm.h>
98#include <VBox/vmm/tm.h>
99#include "VMMInternal.h"
100#include "VMMSwitcher.h"
101#include <VBox/vmm/vm.h>
102#include <VBox/vmm/ftm.h>
103
104#include <VBox/err.h>
105#include <VBox/param.h>
106#include <VBox/version.h>
107#include <VBox/vmm/hwaccm.h>
108#include <iprt/assert.h>
109#include <iprt/alloc.h>
110#include <iprt/asm.h>
111#include <iprt/time.h>
112#include <iprt/semaphore.h>
113#include <iprt/stream.h>
114#include <iprt/string.h>
115#include <iprt/stdarg.h>
116#include <iprt/ctype.h>
117#include <iprt/x86.h>
118
119
120
121/*******************************************************************************
122* Defined Constants And Macros *
123*******************************************************************************/
124/** The saved state version. */
125#define VMM_SAVED_STATE_VERSION 4
126/** The saved state version used by v3.0 and earlier. (Teleportation) */
127#define VMM_SAVED_STATE_VERSION_3_0 3
128
129
130/*******************************************************************************
131* Internal Functions *
132*******************************************************************************/
133static int vmmR3InitStacks(PVM pVM);
134static int vmmR3InitLoggers(PVM pVM);
135static void vmmR3InitRegisterStats(PVM pVM);
136static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
137static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
138static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
139static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu);
140static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
141
142
143/**
144 * Initializes the VMM.
145 *
146 * @returns VBox status code.
147 * @param pVM Pointer to the VM.
148 */
149VMMR3_INT_DECL(int) VMMR3Init(PVM pVM)
150{
151 LogFlow(("VMMR3Init\n"));
152
153 /*
154 * Assert alignment, sizes and order.
155 */
156 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
157 AssertCompile(sizeof(pVM->vmm.s) <= sizeof(pVM->vmm.padding));
158 AssertCompile(sizeof(pVM->aCpus[0].vmm.s) <= sizeof(pVM->aCpus[0].vmm.padding));
159
160 /*
161 * Init basic VM VMM members.
162 */
163 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
164 pVM->vmm.s.pahEvtRendezvousEnterOrdered = NULL;
165 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
166 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
167 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
168 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
169
170 /** @cfgm{YieldEMTInterval, uint32_t, 1, UINT32_MAX, 23, ms}
171 * The EMT yield interval. The EMT yielding is a hack we employ to play a
172 * bit nicer with the rest of the system (like for instance the GUI).
173 */
174 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies,
175 23 /* Value arrived at after experimenting with the grub boot prompt. */);
176 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc);
177
178
179 /** @cfgm{VMM/UsePeriodicPreemptionTimers, boolean, true}
180 * Controls whether we employ per-cpu preemption timers to limit the time
181 * spent executing guest code. This option is not available on all
182 * platforms and we will silently ignore this setting then. If we are
183 * running in VT-x mode, we will use the VMX-preemption timer instead of
184 * this one when possible.
185 */
186 PCFGMNODE pCfgVMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "VMM");
187 rc = CFGMR3QueryBoolDef(pCfgVMM, "UsePeriodicPreemptionTimers", &pVM->vmm.s.fUsePeriodicPreemptionTimers, true);
188 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"VMM/UsePeriodicPreemptionTimers\", rc=%Rrc\n", rc), rc);
189
190 /*
191 * Initialize the VMM rendezvous semaphores.
192 */
193 pVM->vmm.s.pahEvtRendezvousEnterOrdered = (PRTSEMEVENT)MMR3HeapAlloc(pVM, MM_TAG_VMM, sizeof(RTSEMEVENT) * pVM->cCpus);
194 if (!pVM->vmm.s.pahEvtRendezvousEnterOrdered)
195 return VERR_NO_MEMORY;
196 for (VMCPUID i = 0; i < pVM->cCpus; i++)
197 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
198 for (VMCPUID i = 0; i < pVM->cCpus; i++)
199 {
200 rc = RTSemEventCreate(&pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
201 AssertRCReturn(rc, rc);
202 }
203 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousEnterOneByOne);
204 AssertRCReturn(rc, rc);
205 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
206 AssertRCReturn(rc, rc);
207 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousDone);
208 AssertRCReturn(rc, rc);
209 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller);
210 AssertRCReturn(rc, rc);
211
212 /* GC switchers are enabled by default. Turned off by HWACCM. */
213 pVM->vmm.s.fSwitcherDisabled = false;
214
215 /*
216 * Register the saved state data unit.
217 */
218 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
219 NULL, NULL, NULL,
220 NULL, vmmR3Save, NULL,
221 NULL, vmmR3Load, NULL);
222 if (RT_FAILURE(rc))
223 return rc;
224
225 /*
226 * Register the Ring-0 VM handle with the session for fast ioctl calls.
227 */
228 rc = SUPR3SetVMForFastIOCtl(pVM->pVMR0);
229 if (RT_FAILURE(rc))
230 return rc;
231
232 /*
233 * Init various sub-components.
234 */
235 rc = vmmR3SwitcherInit(pVM);
236 if (RT_SUCCESS(rc))
237 {
238 rc = vmmR3InitStacks(pVM);
239 if (RT_SUCCESS(rc))
240 {
241 rc = vmmR3InitLoggers(pVM);
242
243#ifdef VBOX_WITH_NMI
244 /*
245 * Allocate mapping for the host APIC.
246 */
247 if (RT_SUCCESS(rc))
248 {
249 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
250 AssertRC(rc);
251 }
252#endif
253 if (RT_SUCCESS(rc))
254 {
255 /*
256 * Debug info and statistics.
257 */
258 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF);
259 vmmR3InitRegisterStats(pVM);
260 vmmInitFormatTypes();
261
262 return VINF_SUCCESS;
263 }
264 }
265 /** @todo: Need failure cleanup. */
266
267 //more todo in here?
268 //if (RT_SUCCESS(rc))
269 //{
270 //}
271 //int rc2 = vmmR3TermCoreCode(pVM);
272 //AssertRC(rc2));
273 }
274
275 return rc;
276}
277
278
279/**
280 * Allocate & setup the VMM RC stack(s) (for EMTs).
281 *
282 * The stacks are also used for long jumps in Ring-0.
283 *
284 * @returns VBox status code.
285 * @param pVM Pointer to the VM.
286 *
287 * @remarks The optional guard page gets it protection setup up during R3 init
288 * completion because of init order issues.
289 */
290static int vmmR3InitStacks(PVM pVM)
291{
292 int rc = VINF_SUCCESS;
293#ifdef VMM_R0_SWITCH_STACK
294 uint32_t fFlags = MMHYPER_AONR_FLAGS_KERNEL_MAPPING;
295#else
296 uint32_t fFlags = 0;
297#endif
298
299 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
300 {
301 PVMCPU pVCpu = &pVM->aCpus[idCpu];
302
303#ifdef VBOX_STRICT_VMM_STACK
304 rc = MMR3HyperAllocOnceNoRelEx(pVM, PAGE_SIZE + VMM_STACK_SIZE + PAGE_SIZE,
305#else
306 rc = MMR3HyperAllocOnceNoRelEx(pVM, VMM_STACK_SIZE,
307#endif
308 PAGE_SIZE, MM_TAG_VMM, fFlags, (void **)&pVCpu->vmm.s.pbEMTStackR3);
309 if (RT_SUCCESS(rc))
310 {
311#ifdef VBOX_STRICT_VMM_STACK
312 pVCpu->vmm.s.pbEMTStackR3 += PAGE_SIZE;
313#endif
314#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
315 /* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */
316 if (!VMMIsHwVirtExtForced(pVM))
317 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = NIL_RTR0PTR;
318 else
319#endif
320 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
321 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
322 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
323 AssertRelease(pVCpu->vmm.s.pbEMTStackRC);
324
325 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC);
326 }
327 }
328
329 return rc;
330}
331
332
333/**
334 * Initialize the loggers.
335 *
336 * @returns VBox status code.
337 * @param pVM Pointer to the VM.
338 */
339static int vmmR3InitLoggers(PVM pVM)
340{
341 int rc;
342#define RTLogCalcSizeForR0(cGroups, fFlags) (RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[cGroups]) + PAGE_SIZE)
343
344 /*
345 * Allocate RC & R0 Logger instances (they are finalized in the relocator).
346 */
347#ifdef LOG_ENABLED
348 PRTLOGGER pLogger = RTLogDefaultInstance();
349 if (pLogger)
350 {
351 pVM->vmm.s.cbRCLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]);
352 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3);
353 if (RT_FAILURE(rc))
354 return rc;
355 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
356
357# ifdef VBOX_WITH_R0_LOGGING
358 size_t const cbLogger = RTLogCalcSizeForR0(pLogger->cGroups, 0);
359 for (VMCPUID i = 0; i < pVM->cCpus; i++)
360 {
361 PVMCPU pVCpu = &pVM->aCpus[i];
362 rc = MMR3HyperAllocOnceNoRelEx(pVM, cbLogger, PAGE_SIZE, MM_TAG_VMM, MMHYPER_AONR_FLAGS_KERNEL_MAPPING,
363 (void **)&pVCpu->vmm.s.pR0LoggerR3);
364 if (RT_FAILURE(rc))
365 return rc;
366 pVCpu->vmm.s.pR0LoggerR3->pVM = pVM->pVMR0;
367 //pVCpu->vmm.s.pR0LoggerR3->fCreated = false;
368 pVCpu->vmm.s.pR0LoggerR3->cbLogger = (uint32_t)cbLogger;
369 pVCpu->vmm.s.pR0LoggerR0 = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pR0LoggerR3);
370 }
371# endif
372 }
373#endif /* LOG_ENABLED */
374
375#ifdef VBOX_WITH_RC_RELEASE_LOGGING
376 /*
377 * Allocate RC release logger instances (finalized in the relocator).
378 */
379 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
380 if (pRelLogger)
381 {
382 pVM->vmm.s.cbRCRelLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
383 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3);
384 if (RT_FAILURE(rc))
385 return rc;
386 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
387 }
388#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
389 return VINF_SUCCESS;
390}
391
392
393/**
394 * VMMR3Init worker that register the statistics with STAM.
395 *
396 * @param pVM The shared VM structure.
397 */
398static void vmmR3InitRegisterStats(PVM pVM)
399{
400 /*
401 * Statistics.
402 */
403 STAM_REG(pVM, &pVM->vmm.s.StatRunRC, STAMTYPE_COUNTER, "/VMM/RunRC", STAMUNIT_OCCURENCES, "Number of context switches.");
404 STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
405 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt, STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
406 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
407 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap, STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
408 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
409 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
410 STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
411 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
412 STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
413 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOBlockEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/EmulateIOBlock", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_IO_BLOCK returns.");
414 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
415 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead, STAMTYPE_COUNTER, "/VMM/RZRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_READ returns.");
416 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_WRITE returns.");
417 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ returns.");
418 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_WRITE returns.");
419 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ_WRITE returns.");
420 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
421 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
422 STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
423 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
424 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
425 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault, STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
426 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPDFault, STAMTYPE_COUNTER, "/VMM/RZRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
427 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask, STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
428 STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3, STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
429 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc, STAMTYPE_COUNTER, "/VMM/RZRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
430 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3, STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
431 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF, STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
432 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP, STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
433 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
434 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
435 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
436 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Unknown, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Unknown", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
437 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3TMVirt, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/TMVirt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
438 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3HandyPages, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Handy", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
439 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3PDMQueues, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/PDMQueue", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
440 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Rendezvous, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Rendezvous", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
441 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Timer, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Timer", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
442 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3DMA, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/DMA", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
443 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3CritSect, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/CritSect", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
444 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending, STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
445 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
446 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
447 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/RZRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
448 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending, STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending", STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns.");
449 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
450 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HWACCM_PATCH_TPR_INSTR returns.");
451 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3, STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc", STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
452 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_LOCK calls.");
453 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMCritSectEnter, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMCritSectEnter", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_CRITSECT_ENTER calls.");
454 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_LOCK calls.");
455 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMPoolGrow", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_POOL_GROW calls.");
456 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMMapChunk, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMMapChunk", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_MAP_CHUNK calls.");
457 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMAllocHandy, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMAllocHandy", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES calls.");
458 STAM_REG(pVM, &pVM->vmm.s.StatRZCallRemReplay, STAMTYPE_COUNTER, "/VMM/RZCallR3/REMReplay", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS calls.");
459 STAM_REG(pVM, &pVM->vmm.s.StatRZCallLogFlush, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMMLogFlush", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VMM_LOGGER_FLUSH calls.");
460 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMSetError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_ERROR calls.");
461 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetRuntimeError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMRuntimeError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_RUNTIME_ERROR calls.");
462
463#ifdef VBOX_WITH_STATISTICS
464 for (VMCPUID i = 0; i < pVM->cCpus; i++)
465 {
466 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedMax, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max amount of stack used.", "/VMM/Stack/CPU%u/Max", i);
467 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedAvg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Average stack usage.", "/VMM/Stack/CPU%u/Avg", i);
468 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cUsedTotal, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of stack usages.", "/VMM/Stack/CPU%u/Uses", i);
469 }
470#endif
471}
472
473
474/**
475 * Initializes the R0 VMM.
476 *
477 * @returns VBox status code.
478 * @param pVM Pointer to the VM.
479 */
480VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM)
481{
482 int rc;
483 PVMCPU pVCpu = VMMGetCpu(pVM);
484 Assert(pVCpu && pVCpu->idCpu == 0);
485
486#ifdef LOG_ENABLED
487 /*
488 * Initialize the ring-0 logger if we haven't done so yet.
489 */
490 if ( pVCpu->vmm.s.pR0LoggerR3
491 && !pVCpu->vmm.s.pR0LoggerR3->fCreated)
492 {
493 rc = VMMR3UpdateLoggers(pVM);
494 if (RT_FAILURE(rc))
495 return rc;
496 }
497#endif
498
499 /*
500 * Call Ring-0 entry with init code.
501 */
502 for (;;)
503 {
504#ifdef NO_SUPCALLR0VMM
505 //rc = VERR_GENERAL_FAILURE;
506 rc = VINF_SUCCESS;
507#else
508 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, VMMGetSvnRev(), NULL);
509#endif
510 /*
511 * Flush the logs.
512 */
513#ifdef LOG_ENABLED
514 if ( pVCpu->vmm.s.pR0LoggerR3
515 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
516 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
517#endif
518 if (rc != VINF_VMM_CALL_HOST)
519 break;
520 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
521 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
522 break;
523 /* Resume R0 */
524 }
525
526 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
527 {
528 LogRel(("R0 init failed, rc=%Rra\n", rc));
529 if (RT_SUCCESS(rc))
530 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
531 }
532 return rc;
533}
534
535
536/**
537 * Initializes the RC VMM.
538 *
539 * @returns VBox status code.
540 * @param pVM Pointer to the VM.
541 */
542VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM)
543{
544 PVMCPU pVCpu = VMMGetCpu(pVM);
545 Assert(pVCpu && pVCpu->idCpu == 0);
546
547 /* In VMX mode, there's no need to init RC. */
548 if (pVM->vmm.s.fSwitcherDisabled)
549 return VINF_SUCCESS;
550
551 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
552
553 /*
554 * Call VMMGCInit():
555 * -# resolve the address.
556 * -# setup stackframe and EIP to use the trampoline.
557 * -# do a generic hypervisor call.
558 */
559 RTRCPTR RCPtrEP;
560 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
561 if (RT_SUCCESS(rc))
562 {
563 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
564 uint64_t u64TS = RTTimeProgramStartNanoTS();
565 CPUMPushHyper(pVCpu, (uint32_t)(u64TS >> 32)); /* Param 3: The program startup TS - Hi. */
566 CPUMPushHyper(pVCpu, (uint32_t)u64TS); /* Param 3: The program startup TS - Lo. */
567 CPUMPushHyper(pVCpu, VMMGetSvnRev()); /* Param 2: Version argument. */
568 CPUMPushHyper(pVCpu, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
569 CPUMPushHyper(pVCpu, pVM->pVMRC); /* Param 0: pVM */
570 CPUMPushHyper(pVCpu, 5 * sizeof(RTRCPTR)); /* trampoline param: stacksize. */
571 CPUMPushHyper(pVCpu, RCPtrEP); /* Call EIP. */
572 CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);
573 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
574
575 for (;;)
576 {
577#ifdef NO_SUPCALLR0VMM
578 //rc = VERR_GENERAL_FAILURE;
579 rc = VINF_SUCCESS;
580#else
581 rc = SUPR3CallVMMR0(pVM->pVMR0, 0 /* VCPU 0 */, VMMR0_DO_CALL_HYPERVISOR, NULL);
582#endif
583#ifdef LOG_ENABLED
584 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
585 if ( pLogger
586 && pLogger->offScratch > 0)
587 RTLogFlushRC(NULL, pLogger);
588#endif
589#ifdef VBOX_WITH_RC_RELEASE_LOGGING
590 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
591 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
592 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
593#endif
594 if (rc != VINF_VMM_CALL_HOST)
595 break;
596 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
597 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
598 break;
599 }
600
601 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
602 {
603 VMMR3FatalDump(pVM, pVCpu, rc);
604 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
605 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
606 }
607 AssertRC(rc);
608 }
609 return rc;
610}
611
612
613/**
614 * Called when an init phase completes.
615 *
616 * @returns VBox status code.
617 * @param pVM Pointer to the VM.
618 * @param enmWhat Which init phase.
619 */
620VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
621{
622 int rc = VINF_SUCCESS;
623
624 switch (enmWhat)
625 {
626 case VMINITCOMPLETED_RING3:
627 {
628 /*
629 * Set page attributes to r/w for stack pages.
630 */
631 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
632 {
633 rc = PGMMapSetPage(pVM, pVM->aCpus[idCpu].vmm.s.pbEMTStackRC, VMM_STACK_SIZE,
634 X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
635 AssertRCReturn(rc, rc);
636 }
637
638 /*
639 * Create the EMT yield timer.
640 */
641 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
642 AssertRCReturn(rc, rc);
643
644 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
645 AssertRCReturn(rc, rc);
646
647#ifdef VBOX_WITH_NMI
648 /*
649 * Map the host APIC into GC - This is AMD/Intel + Host OS specific!
650 */
651 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
652 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
653 AssertRCReturn(rc, rc);
654#endif
655
656#ifdef VBOX_STRICT_VMM_STACK
657 /*
658 * Setup the stack guard pages: Two inaccessible pages at each sides of the
659 * stack to catch over/under-flows.
660 */
661 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
662 {
663 uint8_t *pbEMTStackR3 = pVM->aCpus[idCpu].vmm.s.pbEMTStackR3;
664
665 memset(pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE);
666 MMR3HyperSetGuard(pVM, pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, true /*fSet*/);
667
668 memset(pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
669 MMR3HyperSetGuard(pVM, pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, true /*fSet*/);
670 }
671 pVM->vmm.s.fStackGuardsStationed = true;
672#endif
673 break;
674 }
675
676 case VMINITCOMPLETED_RING0:
677 {
678 /*
679 * Disable the periodic preemption timers if we can use the
680 * VMX-preemption timer instead.
681 */
682 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
683 && HWACCMR3IsVmxPreemptionTimerUsed(pVM))
684 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
685 LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers));
686 break;
687 }
688
689 default: /* shuts up gcc */
690 break;
691 }
692
693 return rc;
694}
695
696
697/**
698 * Terminate the VMM bits.
699 *
700 * @returns VINF_SUCCESS.
701 * @param pVM Pointer to the VM.
702 */
703VMMR3_INT_DECL(int) VMMR3Term(PVM pVM)
704{
705 PVMCPU pVCpu = VMMGetCpu(pVM);
706 Assert(pVCpu && pVCpu->idCpu == 0);
707
708 /*
709 * Call Ring-0 entry with termination code.
710 */
711 int rc;
712 for (;;)
713 {
714#ifdef NO_SUPCALLR0VMM
715 //rc = VERR_GENERAL_FAILURE;
716 rc = VINF_SUCCESS;
717#else
718 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
719#endif
720 /*
721 * Flush the logs.
722 */
723#ifdef LOG_ENABLED
724 if ( pVCpu->vmm.s.pR0LoggerR3
725 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
726 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
727#endif
728 if (rc != VINF_VMM_CALL_HOST)
729 break;
730 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
731 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
732 break;
733 /* Resume R0 */
734 }
735 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
736 {
737 LogRel(("VMMR3Term: R0 term failed, rc=%Rra. (warning)\n", rc));
738 if (RT_SUCCESS(rc))
739 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
740 }
741
742 for (VMCPUID i = 0; i < pVM->cCpus; i++)
743 {
744 RTSemEventDestroy(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
745 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
746 }
747 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
748 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
749 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
750 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
751 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousDone);
752 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
753 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousDoneCaller);
754 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
755
756#ifdef VBOX_STRICT_VMM_STACK
757 /*
758 * Make the two stack guard pages present again.
759 */
760 if (pVM->vmm.s.fStackGuardsStationed)
761 {
762 for (VMCPUID i = 0; i < pVM->cCpus; i++)
763 {
764 uint8_t *pbEMTStackR3 = pVM->aCpus[i].vmm.s.pbEMTStackR3;
765 MMR3HyperSetGuard(pVM, pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, false /*fSet*/);
766 MMR3HyperSetGuard(pVM, pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, false /*fSet*/);
767 }
768 pVM->vmm.s.fStackGuardsStationed = false;
769 }
770#endif
771
772 vmmTermFormatTypes();
773 return rc;
774}
775
776
777/**
778 * Applies relocations to data and code managed by this
779 * component. This function will be called at init and
780 * whenever the VMM need to relocate it self inside the GC.
781 *
782 * The VMM will need to apply relocations to the core code.
783 *
784 * @param pVM Pointer to the VM.
785 * @param offDelta The relocation delta.
786 */
787VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
788{
789 LogFlow(("VMMR3Relocate: offDelta=%RGv\n", offDelta));
790
791 /*
792 * Recalc the RC address.
793 */
794#ifdef VBOX_WITH_RAW_MODE
795 pVM->vmm.s.pvCoreCodeRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pvCoreCodeR3);
796#endif
797
798 /*
799 * The stack.
800 */
801 for (VMCPUID i = 0; i < pVM->cCpus; i++)
802 {
803 PVMCPU pVCpu = &pVM->aCpus[i];
804
805 CPUMSetHyperESP(pVCpu, CPUMGetHyperESP(pVCpu) + offDelta);
806
807 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
808 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
809 }
810
811 /*
812 * All the switchers.
813 */
814 vmmR3SwitcherRelocate(pVM, offDelta);
815
816 /*
817 * Get other RC entry points.
818 */
819 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest);
820 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Rra\n", rc));
821
822 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86);
823 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Rra\n", rc));
824
825 /*
826 * Update the logger.
827 */
828 VMMR3UpdateLoggers(pVM);
829}
830
831
832/**
833 * Updates the settings for the RC and R0 loggers.
834 *
835 * @returns VBox status code.
836 * @param pVM Pointer to the VM.
837 */
838VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
839{
840 /*
841 * Simply clone the logger instance (for RC).
842 */
843 int rc = VINF_SUCCESS;
844 RTRCPTR RCPtrLoggerFlush = 0;
845
846 if (pVM->vmm.s.pRCLoggerR3
847#ifdef VBOX_WITH_RC_RELEASE_LOGGING
848 || pVM->vmm.s.pRCRelLoggerR3
849#endif
850 )
851 {
852 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &RCPtrLoggerFlush);
853 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Rra\n", rc));
854 }
855
856 if (pVM->vmm.s.pRCLoggerR3)
857 {
858 RTRCPTR RCPtrLoggerWrapper = 0;
859 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &RCPtrLoggerWrapper);
860 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Rra\n", rc));
861
862 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
863 rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.pRCLoggerR3, pVM->vmm.s.cbRCLogger,
864 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
865 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
866 }
867
868#ifdef VBOX_WITH_RC_RELEASE_LOGGING
869 if (pVM->vmm.s.pRCRelLoggerR3)
870 {
871 RTRCPTR RCPtrLoggerWrapper = 0;
872 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &RCPtrLoggerWrapper);
873 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Rra\n", rc));
874
875 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
876 rc = RTLogCloneRC(RTLogRelDefaultInstance(), pVM->vmm.s.pRCRelLoggerR3, pVM->vmm.s.cbRCRelLogger,
877 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
878 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
879 }
880#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
881
882#ifdef LOG_ENABLED
883 /*
884 * For the ring-0 EMT logger, we use a per-thread logger instance
885 * in ring-0. Only initialize it once.
886 */
887 PRTLOGGER const pDefault = RTLogDefaultInstance();
888 for (VMCPUID i = 0; i < pVM->cCpus; i++)
889 {
890 PVMCPU pVCpu = &pVM->aCpus[i];
891 PVMMR0LOGGER pR0LoggerR3 = pVCpu->vmm.s.pR0LoggerR3;
892 if (pR0LoggerR3)
893 {
894 if (!pR0LoggerR3->fCreated)
895 {
896 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
897 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
898 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerWrapper not found! rc=%Rra\n", rc), rc);
899
900 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
901 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
902 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerFlush not found! rc=%Rra\n", rc), rc);
903
904 rc = RTLogCreateForR0(&pR0LoggerR3->Logger, pR0LoggerR3->cbLogger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
905 pfnLoggerWrapper, pfnLoggerFlush,
906 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
907 AssertReleaseMsgRCReturn(rc, ("RTLogCreateForR0 failed! rc=%Rra\n", rc), rc);
908
909 RTR0PTR pfnLoggerPrefix = NIL_RTR0PTR;
910 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerPrefix", &pfnLoggerPrefix);
911 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerPrefix not found! rc=%Rra\n", rc), rc);
912 rc = RTLogSetCustomPrefixCallbackForR0(&pR0LoggerR3->Logger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger), pfnLoggerPrefix, NIL_RTR0PTR);
913 AssertReleaseMsgRCReturn(rc, ("RTLogSetCustomPrefixCallback failed! rc=%Rra\n", rc), rc);
914
915 pR0LoggerR3->idCpu = i;
916 pR0LoggerR3->fCreated = true;
917 pR0LoggerR3->fFlushingDisabled = false;
918
919 }
920
921 rc = RTLogCopyGroupsAndFlagsForR0(&pR0LoggerR3->Logger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger), pDefault,
922 RTLOGFLAGS_BUFFERED, UINT32_MAX);
923 AssertRC(rc);
924 }
925 }
926#endif
927 return rc;
928}
929
930
931/**
932 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg1Weak output.
933 *
934 * @returns Pointer to the buffer.
935 * @param pVM Pointer to the VM.
936 */
937VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
938{
939 if (HWACCMIsEnabled(pVM))
940 return pVM->vmm.s.szRing0AssertMsg1;
941
942 RTRCPTR RCPtr;
943 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg1", &RCPtr);
944 if (RT_SUCCESS(rc))
945 return (const char *)MMHyperRCToR3(pVM, RCPtr);
946
947 return NULL;
948}
949
950
951/**
952 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg2Weak output.
953 *
954 * @returns Pointer to the buffer.
955 * @param pVM Pointer to the VM.
956 */
957VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
958{
959 if (HWACCMIsEnabled(pVM))
960 return pVM->vmm.s.szRing0AssertMsg2;
961
962 RTRCPTR RCPtr;
963 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg2", &RCPtr);
964 if (RT_SUCCESS(rc))
965 return (const char *)MMHyperRCToR3(pVM, RCPtr);
966
967 return NULL;
968}
969
970
971/**
972 * Execute state save operation.
973 *
974 * @returns VBox status code.
975 * @param pVM Pointer to the VM.
976 * @param pSSM SSM operation handle.
977 */
978static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
979{
980 LogFlow(("vmmR3Save:\n"));
981
982 /*
983 * Save the started/stopped state of all CPUs except 0 as it will always
984 * be running. This avoids breaking the saved state version. :-)
985 */
986 for (VMCPUID i = 1; i < pVM->cCpus; i++)
987 SSMR3PutBool(pSSM, VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(&pVM->aCpus[i])));
988
989 return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
990}
991
992
993/**
994 * Execute state load operation.
995 *
996 * @returns VBox status code.
997 * @param pVM Pointer to the VM.
998 * @param pSSM SSM operation handle.
999 * @param uVersion Data layout version.
1000 * @param uPass The data pass.
1001 */
1002static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1003{
1004 LogFlow(("vmmR3Load:\n"));
1005 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1006
1007 /*
1008 * Validate version.
1009 */
1010 if ( uVersion != VMM_SAVED_STATE_VERSION
1011 && uVersion != VMM_SAVED_STATE_VERSION_3_0)
1012 {
1013 AssertMsgFailed(("vmmR3Load: Invalid version uVersion=%u!\n", uVersion));
1014 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1015 }
1016
1017 if (uVersion <= VMM_SAVED_STATE_VERSION_3_0)
1018 {
1019 /* Ignore the stack bottom, stack pointer and stack bits. */
1020 RTRCPTR RCPtrIgnored;
1021 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1022 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1023#ifdef RT_OS_DARWIN
1024 if ( SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(3,0,0)
1025 && SSMR3HandleVersion(pSSM) < VBOX_FULL_VERSION_MAKE(3,1,0)
1026 && SSMR3HandleRevision(pSSM) >= 48858
1027 && ( !strcmp(SSMR3HandleHostOSAndArch(pSSM), "darwin.x86")
1028 || !strcmp(SSMR3HandleHostOSAndArch(pSSM), "") )
1029 )
1030 SSMR3Skip(pSSM, 16384);
1031 else
1032 SSMR3Skip(pSSM, 8192);
1033#else
1034 SSMR3Skip(pSSM, 8192);
1035#endif
1036 }
1037
1038 /*
1039 * Restore the VMCPU states. VCPU 0 is always started.
1040 */
1041 VMCPU_SET_STATE(&pVM->aCpus[0], VMCPUSTATE_STARTED);
1042 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1043 {
1044 bool fStarted;
1045 int rc = SSMR3GetBool(pSSM, &fStarted);
1046 if (RT_FAILURE(rc))
1047 return rc;
1048 VMCPU_SET_STATE(&pVM->aCpus[i], fStarted ? VMCPUSTATE_STARTED : VMCPUSTATE_STOPPED);
1049 }
1050
1051 /* terminator */
1052 uint32_t u32;
1053 int rc = SSMR3GetU32(pSSM, &u32);
1054 if (RT_FAILURE(rc))
1055 return rc;
1056 if (u32 != UINT32_MAX)
1057 {
1058 AssertMsgFailed(("u32=%#x\n", u32));
1059 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1060 }
1061 return VINF_SUCCESS;
1062}
1063
1064
1065/**
1066 * Resolve a builtin RC symbol.
1067 *
1068 * Called by PDM when loading or relocating RC modules.
1069 *
1070 * @returns VBox status
1071 * @param pVM Pointer to the VM.
1072 * @param pszSymbol Symbol to resolv
1073 * @param pRCPtrValue Where to store the symbol value.
1074 *
1075 * @remark This has to work before VMMR3Relocate() is called.
1076 */
1077VMMR3_INT_DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue)
1078{
1079 if (!strcmp(pszSymbol, "g_Logger"))
1080 {
1081 if (pVM->vmm.s.pRCLoggerR3)
1082 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
1083 *pRCPtrValue = pVM->vmm.s.pRCLoggerRC;
1084 }
1085 else if (!strcmp(pszSymbol, "g_RelLogger"))
1086 {
1087#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1088 if (pVM->vmm.s.pRCRelLoggerR3)
1089 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
1090 *pRCPtrValue = pVM->vmm.s.pRCRelLoggerRC;
1091#else
1092 *pRCPtrValue = NIL_RTRCPTR;
1093#endif
1094 }
1095 else
1096 return VERR_SYMBOL_NOT_FOUND;
1097 return VINF_SUCCESS;
1098}
1099
1100
1101/**
1102 * Suspends the CPU yielder.
1103 *
1104 * @param pVM Pointer to the VM.
1105 */
1106VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM)
1107{
1108 VMCPU_ASSERT_EMT(&pVM->aCpus[0]);
1109 if (!pVM->vmm.s.cYieldResumeMillies)
1110 {
1111 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1112 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1113 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1114 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1115 else
1116 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1117 TMTimerStop(pVM->vmm.s.pYieldTimer);
1118 }
1119 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1120}
1121
1122
1123/**
1124 * Stops the CPU yielder.
1125 *
1126 * @param pVM Pointer to the VM.
1127 */
1128VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM)
1129{
1130 if (!pVM->vmm.s.cYieldResumeMillies)
1131 TMTimerStop(pVM->vmm.s.pYieldTimer);
1132 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1133 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1134}
1135
1136
1137/**
1138 * Resumes the CPU yielder when it has been a suspended or stopped.
1139 *
1140 * @param pVM Pointer to the VM.
1141 */
1142VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM)
1143{
1144 if (pVM->vmm.s.cYieldResumeMillies)
1145 {
1146 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1147 pVM->vmm.s.cYieldResumeMillies = 0;
1148 }
1149}
1150
1151
1152/**
1153 * Internal timer callback function.
1154 *
1155 * @param pVM The VM.
1156 * @param pTimer The timer handle.
1157 * @param pvUser User argument specified upon timer creation.
1158 */
1159static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1160{
1161 NOREF(pvUser);
1162
1163 /*
1164 * This really needs some careful tuning. While we shouldn't be too greedy since
1165 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1166 * because that'll cause us to stop up.
1167 *
1168 * The current logic is to use the default interval when there is no lag worth
1169 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1170 *
1171 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1172 * so the lag is up to date.)
1173 */
1174 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1175 if ( u64Lag < 50000000 /* 50ms */
1176 || ( u64Lag < 1000000000 /* 1s */
1177 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1178 )
1179 {
1180 uint64_t u64Elapsed = RTTimeNanoTS();
1181 pVM->vmm.s.u64LastYield = u64Elapsed;
1182
1183 RTThreadYield();
1184
1185#ifdef LOG_ENABLED
1186 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1187 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1188#endif
1189 }
1190 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1191}
1192
1193
1194/**
1195 * Executes guest code in the raw-mode context.
1196 *
1197 * @param pVM Pointer to the VM.
1198 * @param pVCpu Pointer to the VMCPU.
1199 */
1200VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu)
1201{
1202 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1203
1204 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
1205
1206 /*
1207 * Set the EIP and ESP.
1208 */
1209 CPUMSetHyperEIP(pVCpu, CPUMGetGuestEFlags(pVCpu) & X86_EFL_VM
1210 ? pVM->vmm.s.pfnCPUMRCResumeGuestV86
1211 : pVM->vmm.s.pfnCPUMRCResumeGuest);
1212 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC);
1213
1214 /*
1215 * We hide log flushes (outer) and hypervisor interrupts (inner).
1216 */
1217 for (;;)
1218 {
1219#ifdef VBOX_STRICT
1220 if (RT_UNLIKELY(!CPUMGetHyperCR3(pVCpu) || CPUMGetHyperCR3(pVCpu) != PGMGetHyperCR3(pVCpu)))
1221 EMR3FatalError(pVCpu, VERR_VMM_HYPER_CR3_MISMATCH);
1222 PGMMapCheck(pVM);
1223#endif
1224 int rc;
1225 do
1226 {
1227#ifdef NO_SUPCALLR0VMM
1228 rc = VERR_GENERAL_FAILURE;
1229#else
1230 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1231 if (RT_LIKELY(rc == VINF_SUCCESS))
1232 rc = pVCpu->vmm.s.iLastGZRc;
1233#endif
1234 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1235
1236 /*
1237 * Flush the logs.
1238 */
1239#ifdef LOG_ENABLED
1240 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
1241 if ( pLogger
1242 && pLogger->offScratch > 0)
1243 RTLogFlushRC(NULL, pLogger);
1244#endif
1245#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1246 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
1247 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1248 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
1249#endif
1250 if (rc != VINF_VMM_CALL_HOST)
1251 {
1252 Log2(("VMMR3RawRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1253 return rc;
1254 }
1255 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1256 if (RT_FAILURE(rc))
1257 return rc;
1258 /* Resume GC */
1259 }
1260}
1261
1262
1263/**
1264 * Executes guest code (Intel VT-x and AMD-V).
1265 *
1266 * @param pVM Pointer to the VM.
1267 * @param pVCpu Pointer to the VMCPU.
1268 */
1269VMMR3_INT_DECL(int) VMMR3HwAccRunGC(PVM pVM, PVMCPU pVCpu)
1270{
1271 Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1272
1273 for (;;)
1274 {
1275 int rc;
1276 do
1277 {
1278#ifdef NO_SUPCALLR0VMM
1279 rc = VERR_GENERAL_FAILURE;
1280#else
1281 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN, pVCpu->idCpu);
1282 if (RT_LIKELY(rc == VINF_SUCCESS))
1283 rc = pVCpu->vmm.s.iLastGZRc;
1284#endif
1285 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1286
1287#if 0 /* todo triggers too often */
1288 Assert(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TO_R3));
1289#endif
1290
1291#ifdef LOG_ENABLED
1292 /*
1293 * Flush the log
1294 */
1295 PVMMR0LOGGER pR0LoggerR3 = pVCpu->vmm.s.pR0LoggerR3;
1296 if ( pR0LoggerR3
1297 && pR0LoggerR3->Logger.offScratch > 0)
1298 RTLogFlushR0(NULL, &pR0LoggerR3->Logger);
1299#endif /* !LOG_ENABLED */
1300 if (rc != VINF_VMM_CALL_HOST)
1301 {
1302 Log2(("VMMR3HwAccRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1303 return rc;
1304 }
1305 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1306 if (RT_FAILURE(rc))
1307 return rc;
1308 /* Resume R0 */
1309 }
1310}
1311
1312/**
1313 * VCPU worker for VMMSendSipi.
1314 *
1315 * @param pVM Pointer to the VM.
1316 * @param idCpu Virtual CPU to perform SIPI on
1317 * @param uVector SIPI vector
1318 */
1319DECLCALLBACK(int) vmmR3SendSipi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1320{
1321 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1322 VMCPU_ASSERT_EMT(pVCpu);
1323
1324 /** @todo what are we supposed to do if the processor is already running? */
1325 if (EMGetState(pVCpu) != EMSTATE_WAIT_SIPI)
1326 return VERR_ACCESS_DENIED;
1327
1328
1329 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1330
1331 pCtx->cs.Sel = uVector << 8;
1332 pCtx->cs.ValidSel = uVector << 8;
1333 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1334 pCtx->cs.u64Base = uVector << 12;
1335 pCtx->cs.u32Limit = UINT32_C(0x0000ffff);
1336 pCtx->rip = 0;
1337
1338 Log(("vmmR3SendSipi for VCPU %d with vector %x\n", uVector));
1339
1340# if 1 /* If we keep the EMSTATE_WAIT_SIPI method, then move this to EM.cpp. */
1341 EMSetState(pVCpu, EMSTATE_HALTED);
1342 return VINF_EM_RESCHEDULE;
1343# else /* And if we go the VMCPU::enmState way it can stay here. */
1344 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STOPPED);
1345 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1346 return VINF_SUCCESS;
1347# endif
1348}
1349
1350DECLCALLBACK(int) vmmR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1351{
1352 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1353 VMCPU_ASSERT_EMT(pVCpu);
1354
1355 Log(("vmmR3SendInitIpi for VCPU %d\n", idCpu));
1356 CPUMR3ResetCpu(pVCpu);
1357 return VINF_EM_WAIT_SIPI;
1358}
1359
1360/**
1361 * Sends SIPI to the virtual CPU by setting CS:EIP into vector-dependent state
1362 * and unhalting processor
1363 *
1364 * @param pVM Pointer to the VM.
1365 * @param idCpu Virtual CPU to perform SIPI on
1366 * @param uVector SIPI vector
1367 */
1368VMMR3_INT_DECL(void) VMMR3SendSipi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1369{
1370 AssertReturnVoid(idCpu < pVM->cCpus);
1371
1372 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendSipi, 3, pVM, idCpu, uVector);
1373 AssertRC(rc);
1374}
1375
1376/**
1377 * Sends init IPI to the virtual CPU.
1378 *
1379 * @param pVM Pointer to the VM.
1380 * @param idCpu Virtual CPU to perform int IPI on
1381 */
1382VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1383{
1384 AssertReturnVoid(idCpu < pVM->cCpus);
1385
1386 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendInitIpi, 2, pVM, idCpu);
1387 AssertRC(rc);
1388}
1389
1390/**
1391 * Registers the guest memory range that can be used for patching
1392 *
1393 * @returns VBox status code.
1394 * @param pVM Pointer to the VM.
1395 * @param pPatchMem Patch memory range
1396 * @param cbPatchMem Size of the memory range
1397 */
1398VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1399{
1400 VM_ASSERT_EMT(pVM);
1401 if (HWACCMIsEnabled(pVM))
1402 return HWACMMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
1403
1404 return VERR_NOT_SUPPORTED;
1405}
1406
1407/**
1408 * Deregisters the guest memory range that can be used for patching
1409 *
1410 * @returns VBox status code.
1411 * @param pVM Pointer to the VM.
1412 * @param pPatchMem Patch memory range
1413 * @param cbPatchMem Size of the memory range
1414 */
1415VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1416{
1417 if (HWACCMIsEnabled(pVM))
1418 return HWACMMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
1419
1420 return VINF_SUCCESS;
1421}
1422
1423
1424/**
1425 * Count returns and have the last non-caller EMT wake up the caller.
1426 *
1427 * @returns VBox strict informational status code for EM scheduling. No failures
1428 * will be returned here, those are for the caller only.
1429 *
1430 * @param pVM Pointer to the VM.
1431 */
1432DECL_FORCE_INLINE(int) vmmR3EmtRendezvousNonCallerReturn(PVM pVM)
1433{
1434 int rcRet = ASMAtomicReadS32(&pVM->vmm.s.i32RendezvousStatus);
1435 uint32_t cReturned = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsReturned);
1436 if (cReturned == pVM->cCpus - 1U)
1437 {
1438 int rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
1439 AssertLogRelRC(rc);
1440 }
1441
1442 AssertLogRelMsgReturn( rcRet <= VINF_SUCCESS
1443 || (rcRet >= VINF_EM_FIRST && rcRet <= VINF_EM_LAST),
1444 ("%Rrc\n", rcRet),
1445 VERR_IPE_UNEXPECTED_INFO_STATUS);
1446 return RT_SUCCESS(rcRet) ? rcRet : VINF_SUCCESS;
1447}
1448
1449
1450/**
1451 * Common worker for VMMR3EmtRendezvous and VMMR3EmtRendezvousFF.
1452 *
1453 * @returns VBox strict informational status code for EM scheduling. No failures
1454 * will be returned here, those are for the caller only. When
1455 * fIsCaller is set, VINF_SUCCESS is always returned.
1456 *
1457 * @param pVM Pointer to the VM.
1458 * @param pVCpu The VMCPU structure for the calling EMT.
1459 * @param fIsCaller Whether we're the VMMR3EmtRendezvous caller or
1460 * not.
1461 * @param fFlags The flags.
1462 * @param pfnRendezvous The callback.
1463 * @param pvUser The user argument for the callback.
1464 */
1465static int vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
1466 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1467{
1468 int rc;
1469
1470 /*
1471 * Enter, the last EMT triggers the next callback phase.
1472 */
1473 uint32_t cEntered = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsEntered);
1474 if (cEntered != pVM->cCpus)
1475 {
1476 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1477 {
1478 /* Wait for our turn. */
1479 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, RT_INDEFINITE_WAIT);
1480 AssertLogRelRC(rc);
1481 }
1482 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1483 {
1484 /* Wait for the last EMT to arrive and wake everyone up. */
1485 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce, RT_INDEFINITE_WAIT);
1486 AssertLogRelRC(rc);
1487 }
1488 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1489 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1490 {
1491 /* Wait for our turn. */
1492 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1493 AssertLogRelRC(rc);
1494 }
1495 else
1496 {
1497 Assert((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE);
1498
1499 /*
1500 * The execute once is handled specially to optimize the code flow.
1501 *
1502 * The last EMT to arrive will perform the callback and the other
1503 * EMTs will wait on the Done/DoneCaller semaphores (instead of
1504 * the EnterOneByOne/AllAtOnce) in the meanwhile. When the callback
1505 * returns, that EMT will initiate the normal return sequence.
1506 */
1507 if (!fIsCaller)
1508 {
1509 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1510 AssertLogRelRC(rc);
1511
1512 return vmmR3EmtRendezvousNonCallerReturn(pVM);
1513 }
1514 return VINF_SUCCESS;
1515 }
1516 }
1517 else
1518 {
1519 /*
1520 * All EMTs are waiting, clear the FF and take action according to the
1521 * execution method.
1522 */
1523 VM_FF_CLEAR(pVM, VM_FF_EMT_RENDEZVOUS);
1524
1525 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1526 {
1527 /* Wake up everyone. */
1528 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
1529 AssertLogRelRC(rc);
1530 }
1531 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1532 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1533 {
1534 /* Figure out who to wake up and wake it up. If it's ourself, then
1535 it's easy otherwise wait for our turn. */
1536 VMCPUID iFirst = (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1537 ? 0
1538 : pVM->cCpus - 1U;
1539 if (pVCpu->idCpu != iFirst)
1540 {
1541 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iFirst]);
1542 AssertLogRelRC(rc);
1543 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1544 AssertLogRelRC(rc);
1545 }
1546 }
1547 /* else: execute the handler on the current EMT and wake up one or more threads afterwards. */
1548 }
1549
1550
1551 /*
1552 * Do the callback and update the status if necessary.
1553 */
1554 if ( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
1555 || RT_SUCCESS(ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus)) )
1556 {
1557 VBOXSTRICTRC rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
1558 if (rcStrict != VINF_SUCCESS)
1559 {
1560 AssertLogRelMsg( rcStrict <= VINF_SUCCESS
1561 || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
1562 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1563 int32_t i32RendezvousStatus;
1564 do
1565 {
1566 i32RendezvousStatus = ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus);
1567 if ( rcStrict == i32RendezvousStatus
1568 || RT_FAILURE(i32RendezvousStatus)
1569 || ( i32RendezvousStatus != VINF_SUCCESS
1570 && rcStrict > i32RendezvousStatus))
1571 break;
1572 } while (!ASMAtomicCmpXchgS32(&pVM->vmm.s.i32RendezvousStatus, VBOXSTRICTRC_VAL(rcStrict), i32RendezvousStatus));
1573 }
1574 }
1575
1576 /*
1577 * Increment the done counter and take action depending on whether we're
1578 * the last to finish callback execution.
1579 */
1580 uint32_t cDone = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsDone);
1581 if ( cDone != pVM->cCpus
1582 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE)
1583 {
1584 /* Signal the next EMT? */
1585 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1586 {
1587 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
1588 AssertLogRelRC(rc);
1589 }
1590 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
1591 {
1592 Assert(cDone == pVCpu->idCpu + 1U);
1593 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu + 1U]);
1594 AssertLogRelRC(rc);
1595 }
1596 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1597 {
1598 Assert(pVM->cCpus - cDone == pVCpu->idCpu);
1599 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVM->cCpus - cDone - 1U]);
1600 AssertLogRelRC(rc);
1601 }
1602
1603 /* Wait for the rest to finish (the caller waits on hEvtRendezvousDoneCaller). */
1604 if (!fIsCaller)
1605 {
1606 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1607 AssertLogRelRC(rc);
1608 }
1609 }
1610 else
1611 {
1612 /* Callback execution is all done, tell the rest to return. */
1613 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
1614 AssertLogRelRC(rc);
1615 }
1616
1617 if (!fIsCaller)
1618 return vmmR3EmtRendezvousNonCallerReturn(pVM);
1619 return VINF_SUCCESS;
1620}
1621
1622
1623/**
1624 * Called in response to VM_FF_EMT_RENDEZVOUS.
1625 *
1626 * @returns VBox strict status code - EM scheduling. No errors will be returned
1627 * here, nor will any non-EM scheduling status codes be returned.
1628 *
1629 * @param pVM Pointer to the VM.
1630 * @param pVCpu The handle of the calling EMT.
1631 *
1632 * @thread EMT
1633 */
1634VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu)
1635{
1636 Assert(!pVCpu->vmm.s.fInRendezvous);
1637 pVCpu->vmm.s.fInRendezvous = true;
1638 int rc = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
1639 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
1640 pVCpu->vmm.s.fInRendezvous = false;
1641 return rc;
1642}
1643
1644
1645/**
1646 * EMT rendezvous.
1647 *
1648 * Gathers all the EMTs and execute some code on each of them, either in a one
1649 * by one fashion or all at once.
1650 *
1651 * @returns VBox strict status code. This will be the first error,
1652 * VINF_SUCCESS, or an EM scheduling status code.
1653 *
1654 * @param pVM Pointer to the VM.
1655 * @param fFlags Flags indicating execution methods. See
1656 * grp_VMMR3EmtRendezvous_fFlags.
1657 * @param pfnRendezvous The callback.
1658 * @param pvUser User argument for the callback.
1659 *
1660 * @thread Any.
1661 */
1662VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1663{
1664 /*
1665 * Validate input.
1666 */
1667 AssertMsg( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID
1668 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) <= VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
1669 && !(fFlags & ~VMMEMTRENDEZVOUS_FLAGS_VALID_MASK), ("%#x\n", fFlags));
1670 AssertMsg( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
1671 || ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE
1672 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE),
1673 ("type %u\n", fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK));
1674
1675 VBOXSTRICTRC rcStrict;
1676 PVMCPU pVCpu = VMMGetCpu(pVM);
1677 if (!pVCpu)
1678 /*
1679 * Forward the request to an EMT thread.
1680 */
1681 rcStrict = VMR3ReqCallWait(pVM, VMCPUID_ANY,
1682 (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
1683 else if (pVM->cCpus == 1)
1684 {
1685 /*
1686 * Shortcut for the single EMT case.
1687 */
1688 AssertLogRelReturn(!pVCpu->vmm.s.fInRendezvous, VERR_DEADLOCK);
1689 pVCpu->vmm.s.fInRendezvous = true;
1690 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
1691 pVCpu->vmm.s.fInRendezvous = false;
1692 }
1693 else
1694 {
1695 /*
1696 * Spin lock. If busy, wait for the other EMT to finish while keeping a
1697 * lookout of the RENDEZVOUS FF.
1698 */
1699 int rc;
1700 rcStrict = VINF_SUCCESS;
1701 if (RT_UNLIKELY(!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0)))
1702 {
1703 AssertLogRelReturn(!pVCpu->vmm.s.fInRendezvous, VERR_DEADLOCK);
1704
1705 while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0))
1706 {
1707 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1708 {
1709 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
1710 if ( rc != VINF_SUCCESS
1711 && ( rcStrict == VINF_SUCCESS
1712 || rcStrict > rc))
1713 rcStrict = rc;
1714 /** @todo Perhaps deal with termination here? */
1715 }
1716 ASMNopPause();
1717 }
1718 }
1719 Assert(!VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS));
1720 Assert(!pVCpu->vmm.s.fInRendezvous);
1721 pVCpu->vmm.s.fInRendezvous = true;
1722
1723 /*
1724 * Clear the slate. This is a semaphore ping-pong orgy. :-)
1725 */
1726 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1727 {
1728 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i], 0);
1729 AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
1730 }
1731 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
1732 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
1733 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
1734 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
1735 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
1736 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
1737 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
1738 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
1739 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
1740 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
1741 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
1742
1743 /*
1744 * Set the FF and poke the other EMTs.
1745 */
1746 VM_FF_SET(pVM, VM_FF_EMT_RENDEZVOUS);
1747 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_POKE);
1748
1749 /*
1750 * Do the same ourselves.
1751 */
1752 vmmR3EmtRendezvousCommon(pVM, pVCpu, true /* fIsCaller */, fFlags, pfnRendezvous, pvUser);
1753
1754 /*
1755 * The caller waits for the other EMTs to be done and return before doing
1756 * the cleanup. This makes away with wakeup / reset races we would otherwise
1757 * risk in the multiple release event semaphore code (hEvtRendezvousDoneCaller).
1758 */
1759 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
1760 AssertLogRelRC(rc);
1761
1762 /*
1763 * Get the return code and clean up a little bit.
1764 */
1765 int rcMy = pVM->vmm.s.i32RendezvousStatus;
1766 ASMAtomicWriteNullPtr((void * volatile *)&pVM->vmm.s.pfnRendezvous);
1767
1768 ASMAtomicWriteU32(&pVM->vmm.s.u32RendezvousLock, 0);
1769 pVCpu->vmm.s.fInRendezvous = false;
1770
1771 /*
1772 * Merge rcStrict and rcMy.
1773 */
1774 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
1775 if ( rcMy != VINF_SUCCESS
1776 && ( rcStrict == VINF_SUCCESS
1777 || rcStrict > rcMy))
1778 rcStrict = rcMy;
1779 }
1780
1781 AssertLogRelMsgReturn( rcStrict <= VINF_SUCCESS
1782 || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
1783 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
1784 VERR_IPE_UNEXPECTED_INFO_STATUS);
1785 return VBOXSTRICTRC_VAL(rcStrict);
1786}
1787
1788
1789/**
1790 * Disables/enables EMT rendezvous.
1791 *
1792 * This is used to make sure EMT rendezvous does not take place while
1793 * processing a priority request.
1794 *
1795 * @returns Old rendezvous-disabled state.
1796 * @param pVCpu The handle of the calling EMT.
1797 * @param fDisabled True if disabled, false if enabled.
1798 */
1799VMMR3_INT_DECL(bool) VMMR3EmtRendezvousSetDisabled(PVMCPU pVCpu, bool fDisabled)
1800{
1801 VMCPU_ASSERT_EMT(pVCpu);
1802 bool fOld = pVCpu->vmm.s.fInRendezvous;
1803 pVCpu->vmm.s.fInRendezvous = fDisabled;
1804 return fOld;
1805}
1806
1807
1808/**
1809 * Read from the ring 0 jump buffer stack
1810 *
1811 * @returns VBox status code.
1812 *
1813 * @param pVM Pointer to the VM.
1814 * @param idCpu The ID of the source CPU context (for the address).
1815 * @param R0Addr Where to start reading.
1816 * @param pvBuf Where to store the data we've read.
1817 * @param cbRead The number of bytes to read.
1818 */
1819VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead)
1820{
1821 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1822 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
1823
1824#ifdef VMM_R0_SWITCH_STACK
1825 RTHCUINTPTR off = R0Addr - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
1826#else
1827 RTHCUINTPTR off = pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack - (pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck - R0Addr);
1828#endif
1829 if ( off > VMM_STACK_SIZE
1830 || off + cbRead >= VMM_STACK_SIZE)
1831 return VERR_INVALID_POINTER;
1832
1833 memcpy(pvBuf, &pVCpu->vmm.s.pbEMTStackR3[off], cbRead);
1834 return VINF_SUCCESS;
1835}
1836
1837
1838/**
1839 * Calls a RC function.
1840 *
1841 * @param pVM Pointer to the VM.
1842 * @param RCPtrEntry The address of the RC function.
1843 * @param cArgs The number of arguments in the ....
1844 * @param ... Arguments to the function.
1845 */
1846VMMR3DECL(int) VMMR3CallRC(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, ...)
1847{
1848 va_list args;
1849 va_start(args, cArgs);
1850 int rc = VMMR3CallRCV(pVM, RCPtrEntry, cArgs, args);
1851 va_end(args);
1852 return rc;
1853}
1854
1855
1856/**
1857 * Calls a RC function.
1858 *
1859 * @param pVM Pointer to the VM.
1860 * @param RCPtrEntry The address of the RC function.
1861 * @param cArgs The number of arguments in the ....
1862 * @param args Arguments to the function.
1863 */
1864VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list args)
1865{
1866 /* Raw mode implies 1 VCPU. */
1867 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
1868 PVMCPU pVCpu = &pVM->aCpus[0];
1869
1870 Log2(("VMMR3CallGCV: RCPtrEntry=%RRv cArgs=%d\n", RCPtrEntry, cArgs));
1871
1872 /*
1873 * Setup the call frame using the trampoline.
1874 */
1875 memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
1876 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32));
1877 PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;
1878 int i = cArgs;
1879 while (i-- > 0)
1880 *pFrame++ = va_arg(args, RTGCUINTPTR32);
1881
1882 CPUMPushHyper(pVCpu, cArgs * sizeof(RTGCUINTPTR32)); /* stack frame size */
1883 CPUMPushHyper(pVCpu, RCPtrEntry); /* what to call */
1884 CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);
1885
1886 /*
1887 * We hide log flushes (outer) and hypervisor interrupts (inner).
1888 */
1889 for (;;)
1890 {
1891 int rc;
1892 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
1893 do
1894 {
1895#ifdef NO_SUPCALLR0VMM
1896 rc = VERR_GENERAL_FAILURE;
1897#else
1898 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1899 if (RT_LIKELY(rc == VINF_SUCCESS))
1900 rc = pVCpu->vmm.s.iLastGZRc;
1901#endif
1902 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1903
1904 /*
1905 * Flush the logs.
1906 */
1907#ifdef LOG_ENABLED
1908 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
1909 if ( pLogger
1910 && pLogger->offScratch > 0)
1911 RTLogFlushRC(NULL, pLogger);
1912#endif
1913#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1914 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
1915 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1916 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
1917#endif
1918 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
1919 VMMR3FatalDump(pVM, pVCpu, rc);
1920 if (rc != VINF_VMM_CALL_HOST)
1921 {
1922 Log2(("VMMR3CallGCV: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1923 return rc;
1924 }
1925 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1926 if (RT_FAILURE(rc))
1927 return rc;
1928 }
1929}
1930
1931
1932/**
1933 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
1934 *
1935 * @returns VBox status code.
1936 * @param pVM Pointer to the VM.
1937 * @param uOperation Operation to execute.
1938 * @param u64Arg Constant argument.
1939 * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
1940 * details.
1941 */
1942VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
1943{
1944 PVMCPU pVCpu = VMMGetCpu(pVM);
1945 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
1946
1947 /*
1948 * Call Ring-0 entry with init code.
1949 */
1950 int rc;
1951 for (;;)
1952 {
1953#ifdef NO_SUPCALLR0VMM
1954 rc = VERR_GENERAL_FAILURE;
1955#else
1956 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, uOperation, u64Arg, pReqHdr);
1957#endif
1958 /*
1959 * Flush the logs.
1960 */
1961#ifdef LOG_ENABLED
1962 if ( pVCpu->vmm.s.pR0LoggerR3
1963 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
1964 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
1965#endif
1966 if (rc != VINF_VMM_CALL_HOST)
1967 break;
1968 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1969 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
1970 break;
1971 /* Resume R0 */
1972 }
1973
1974 AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
1975 ("uOperation=%u rc=%Rrc\n", uOperation, rc),
1976 VERR_IPE_UNEXPECTED_INFO_STATUS);
1977 return rc;
1978}
1979
1980
1981/**
1982 * Resumes executing hypervisor code when interrupted by a queue flush or a
1983 * debug event.
1984 *
1985 * @returns VBox status code.
1986 * @param pVM Pointer to the VM.
1987 * @param pVCpu Pointer to the VMCPU.
1988 */
1989VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu)
1990{
1991 Log(("VMMR3ResumeHyper: eip=%RRv esp=%RRv\n", CPUMGetHyperEIP(pVCpu), CPUMGetHyperESP(pVCpu)));
1992 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
1993
1994 /*
1995 * We hide log flushes (outer) and hypervisor interrupts (inner).
1996 */
1997 for (;;)
1998 {
1999 int rc;
2000 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
2001 do
2002 {
2003#ifdef NO_SUPCALLR0VMM
2004 rc = VERR_GENERAL_FAILURE;
2005#else
2006 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
2007 if (RT_LIKELY(rc == VINF_SUCCESS))
2008 rc = pVCpu->vmm.s.iLastGZRc;
2009#endif
2010 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2011
2012 /*
2013 * Flush the loggers,
2014 */
2015#ifdef LOG_ENABLED
2016 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
2017 if ( pLogger
2018 && pLogger->offScratch > 0)
2019 RTLogFlushRC(NULL, pLogger);
2020#endif
2021#ifdef VBOX_WITH_RC_RELEASE_LOGGING
2022 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
2023 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2024 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
2025#endif
2026 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2027 VMMR3FatalDump(pVM, pVCpu, rc);
2028 if (rc != VINF_VMM_CALL_HOST)
2029 {
2030 Log(("VMMR3ResumeHyper: returns %Rrc\n", rc));
2031 return rc;
2032 }
2033 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2034 if (RT_FAILURE(rc))
2035 return rc;
2036 }
2037}
2038
2039
2040/**
2041 * Service a call to the ring-3 host code.
2042 *
2043 * @returns VBox status code.
2044 * @param pVM Pointer to the VM.
2045 * @param pVCpu Pointer to the VMCPU.
2046 * @remark Careful with critsects.
2047 */
2048static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu)
2049{
2050 /*
2051 * We must also check for pending critsect exits or else we can deadlock
2052 * when entering other critsects here.
2053 */
2054 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
2055 PDMCritSectFF(pVCpu);
2056
2057 switch (pVCpu->vmm.s.enmCallRing3Operation)
2058 {
2059 /*
2060 * Acquire a critical section.
2061 */
2062 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
2063 {
2064 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectEnterEx((PPDMCRITSECT)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2065 true /*fCallRing3*/);
2066 break;
2067 }
2068
2069 /*
2070 * Acquire the PDM lock.
2071 */
2072 case VMMCALLRING3_PDM_LOCK:
2073 {
2074 pVCpu->vmm.s.rcCallRing3 = PDMR3LockCall(pVM);
2075 break;
2076 }
2077
2078 /*
2079 * Grow the PGM pool.
2080 */
2081 case VMMCALLRING3_PGM_POOL_GROW:
2082 {
2083 pVCpu->vmm.s.rcCallRing3 = PGMR3PoolGrow(pVM);
2084 break;
2085 }
2086
2087 /*
2088 * Maps an page allocation chunk into ring-3 so ring-0 can use it.
2089 */
2090 case VMMCALLRING3_PGM_MAP_CHUNK:
2091 {
2092 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysChunkMap(pVM, pVCpu->vmm.s.u64CallRing3Arg);
2093 break;
2094 }
2095
2096 /*
2097 * Allocates more handy pages.
2098 */
2099 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
2100 {
2101 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateHandyPages(pVM);
2102 break;
2103 }
2104
2105 /*
2106 * Allocates a large page.
2107 */
2108 case VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE:
2109 {
2110 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateLargeHandyPage(pVM, pVCpu->vmm.s.u64CallRing3Arg);
2111 break;
2112 }
2113
2114 /*
2115 * Acquire the PGM lock.
2116 */
2117 case VMMCALLRING3_PGM_LOCK:
2118 {
2119 pVCpu->vmm.s.rcCallRing3 = PGMR3LockCall(pVM);
2120 break;
2121 }
2122
2123 /*
2124 * Acquire the MM hypervisor heap lock.
2125 */
2126 case VMMCALLRING3_MMHYPER_LOCK:
2127 {
2128 pVCpu->vmm.s.rcCallRing3 = MMR3LockCall(pVM);
2129 break;
2130 }
2131
2132#ifdef VBOX_WITH_REM
2133 /*
2134 * Flush REM handler notifications.
2135 */
2136 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
2137 {
2138 REMR3ReplayHandlerNotifications(pVM);
2139 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2140 break;
2141 }
2142#endif
2143
2144 /*
2145 * This is a noop. We just take this route to avoid unnecessary
2146 * tests in the loops.
2147 */
2148 case VMMCALLRING3_VMM_LOGGER_FLUSH:
2149 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2150 LogAlways(("*FLUSH*\n"));
2151 break;
2152
2153 /*
2154 * Set the VM error message.
2155 */
2156 case VMMCALLRING3_VM_SET_ERROR:
2157 VMR3SetErrorWorker(pVM);
2158 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2159 break;
2160
2161 /*
2162 * Set the VM runtime error message.
2163 */
2164 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
2165 pVCpu->vmm.s.rcCallRing3 = VMR3SetRuntimeErrorWorker(pVM);
2166 break;
2167
2168 /*
2169 * Signal a ring 0 hypervisor assertion.
2170 * Cancel the longjmp operation that's in progress.
2171 */
2172 case VMMCALLRING3_VM_R0_ASSERTION:
2173 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
2174 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
2175#ifdef RT_ARCH_X86
2176 pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
2177#else
2178 pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
2179#endif
2180#ifdef VMM_R0_SWITCH_STACK
2181 *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker */
2182#endif
2183 LogRel((pVM->vmm.s.szRing0AssertMsg1));
2184 LogRel((pVM->vmm.s.szRing0AssertMsg2));
2185 return VERR_VMM_RING0_ASSERTION;
2186
2187 /*
2188 * A forced switch to ring 0 for preemption purposes.
2189 */
2190 case VMMCALLRING3_VM_R0_PREEMPT:
2191 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2192 break;
2193
2194 case VMMCALLRING3_FTM_SET_CHECKPOINT:
2195 pVCpu->vmm.s.rcCallRing3 = FTMR3SetCheckpoint(pVM, (FTMCHECKPOINTTYPE)pVCpu->vmm.s.u64CallRing3Arg);
2196 break;
2197
2198 default:
2199 AssertMsgFailed(("enmCallRing3Operation=%d\n", pVCpu->vmm.s.enmCallRing3Operation));
2200 return VERR_VMM_UNKNOWN_RING3_CALL;
2201 }
2202
2203 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
2204 return VINF_SUCCESS;
2205}
2206
2207
2208/**
2209 * Displays the Force action Flags.
2210 *
2211 * @param pVM Pointer to the VM.
2212 * @param pHlp The output helpers.
2213 * @param pszArgs The additional arguments (ignored).
2214 */
2215static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2216{
2217 int c;
2218 uint32_t f;
2219 NOREF(pszArgs);
2220
2221#define PRINT_FLAG(prf,flag) do { \
2222 if (f & (prf##flag)) \
2223 { \
2224 static const char *s_psz = #flag; \
2225 if (!(c % 6)) \
2226 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz); \
2227 else \
2228 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2229 c++; \
2230 f &= ~(prf##flag); \
2231 } \
2232 } while (0)
2233
2234#define PRINT_GROUP(prf,grp,sfx) do { \
2235 if (f & (prf##grp##sfx)) \
2236 { \
2237 static const char *s_psz = #grp; \
2238 if (!(c % 5)) \
2239 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : " Groups:\n", s_psz); \
2240 else \
2241 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2242 c++; \
2243 } \
2244 } while (0)
2245
2246 /*
2247 * The global flags.
2248 */
2249 const uint32_t fGlobalForcedActions = pVM->fGlobalForcedActions;
2250 pHlp->pfnPrintf(pHlp, "Global FFs: %#RX32", fGlobalForcedActions);
2251
2252 /* show the flag mnemonics */
2253 c = 0;
2254 f = fGlobalForcedActions;
2255 PRINT_FLAG(VM_FF_,TM_VIRTUAL_SYNC);
2256 PRINT_FLAG(VM_FF_,PDM_QUEUES);
2257 PRINT_FLAG(VM_FF_,PDM_DMA);
2258 PRINT_FLAG(VM_FF_,DBGF);
2259 PRINT_FLAG(VM_FF_,REQUEST);
2260 PRINT_FLAG(VM_FF_,CHECK_VM_STATE);
2261 PRINT_FLAG(VM_FF_,RESET);
2262 PRINT_FLAG(VM_FF_,EMT_RENDEZVOUS);
2263 PRINT_FLAG(VM_FF_,PGM_NEED_HANDY_PAGES);
2264 PRINT_FLAG(VM_FF_,PGM_NO_MEMORY);
2265 PRINT_FLAG(VM_FF_,PGM_POOL_FLUSH_PENDING);
2266 PRINT_FLAG(VM_FF_,REM_HANDLER_NOTIFY);
2267 PRINT_FLAG(VM_FF_,DEBUG_SUSPEND);
2268 if (f)
2269 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2270 else
2271 pHlp->pfnPrintf(pHlp, "\n");
2272
2273 /* the groups */
2274 c = 0;
2275 f = fGlobalForcedActions;
2276 PRINT_GROUP(VM_FF_,EXTERNAL_SUSPENDED,_MASK);
2277 PRINT_GROUP(VM_FF_,EXTERNAL_HALTED,_MASK);
2278 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE,_MASK);
2279 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2280 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_POST,_MASK);
2281 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY_POST,_MASK);
2282 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY,_MASK);
2283 PRINT_GROUP(VM_FF_,ALL_REM,_MASK);
2284 if (c)
2285 pHlp->pfnPrintf(pHlp, "\n");
2286
2287 /*
2288 * Per CPU flags.
2289 */
2290 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2291 {
2292 const uint32_t fLocalForcedActions = pVM->aCpus[i].fLocalForcedActions;
2293 pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX32", i, fLocalForcedActions);
2294
2295 /* show the flag mnemonics */
2296 c = 0;
2297 f = fLocalForcedActions;
2298 PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC);
2299 PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC);
2300 PRINT_FLAG(VMCPU_FF_,TIMER);
2301 PRINT_FLAG(VMCPU_FF_,PDM_CRITSECT);
2302 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
2303 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
2304 PRINT_FLAG(VMCPU_FF_,TLB_FLUSH);
2305 PRINT_FLAG(VMCPU_FF_,TRPM_SYNC_IDT);
2306 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_TSS);
2307 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_GDT);
2308 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_LDT);
2309 PRINT_FLAG(VMCPU_FF_,INHIBIT_INTERRUPTS);
2310 PRINT_FLAG(VMCPU_FF_,CSAM_SCAN_PAGE);
2311 PRINT_FLAG(VMCPU_FF_,CSAM_PENDING_ACTION);
2312 PRINT_FLAG(VMCPU_FF_,TO_R3);
2313 if (f)
2314 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2315 else
2316 pHlp->pfnPrintf(pHlp, "\n");
2317
2318 if (fLocalForcedActions & VMCPU_FF_INHIBIT_INTERRUPTS)
2319 pHlp->pfnPrintf(pHlp, " intr inhibit RIP: %RGp\n", EMGetInhibitInterruptsPC(&pVM->aCpus[i]));
2320
2321 /* the groups */
2322 c = 0;
2323 f = fLocalForcedActions;
2324 PRINT_GROUP(VMCPU_FF_,EXTERNAL_SUSPENDED,_MASK);
2325 PRINT_GROUP(VMCPU_FF_,EXTERNAL_HALTED,_MASK);
2326 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE,_MASK);
2327 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2328 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_POST,_MASK);
2329 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY_POST,_MASK);
2330 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK);
2331 PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK);
2332 PRINT_GROUP(VMCPU_FF_,HWACCM_TO_R3,_MASK);
2333 PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK);
2334 if (c)
2335 pHlp->pfnPrintf(pHlp, "\n");
2336 }
2337
2338#undef PRINT_FLAG
2339#undef PRINT_GROUP
2340}
2341
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette