VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMM.cpp@ 93583

Last change on this file since 93583 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 104.8 KB
Line 
1/* $Id: VMM.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18//#define NO_SUPCALLR0VMM
19
20/** @page pg_vmm VMM - The Virtual Machine Monitor
21 *
22 * The VMM component is two things at the moment, it's a component doing a few
23 * management and routing tasks, and it's the whole virtual machine monitor
24 * thing. For hysterical reasons, it is not doing all the management that one
25 * would expect, this is instead done by @ref pg_vm. We'll address this
26 * misdesign eventually, maybe.
27 *
28 * VMM is made up of these components:
29 * - @subpage pg_cfgm
30 * - @subpage pg_cpum
31 * - @subpage pg_dbgf
32 * - @subpage pg_em
33 * - @subpage pg_gim
34 * - @subpage pg_gmm
35 * - @subpage pg_gvmm
36 * - @subpage pg_hm
37 * - @subpage pg_iem
38 * - @subpage pg_iom
39 * - @subpage pg_mm
40 * - @subpage pg_nem
41 * - @subpage pg_pdm
42 * - @subpage pg_pgm
43 * - @subpage pg_selm
44 * - @subpage pg_ssm
45 * - @subpage pg_stam
46 * - @subpage pg_tm
47 * - @subpage pg_trpm
48 * - @subpage pg_vm
49 *
50 *
51 * @see @ref grp_vmm @ref grp_vm @subpage pg_vmm_guideline @subpage pg_raw
52 *
53 *
54 * @section sec_vmmstate VMM State
55 *
56 * @image html VM_Statechart_Diagram.gif
57 *
58 * To be written.
59 *
60 *
61 * @subsection subsec_vmm_init VMM Initialization
62 *
63 * To be written.
64 *
65 *
66 * @subsection subsec_vmm_term VMM Termination
67 *
68 * To be written.
69 *
70 *
71 * @section sec_vmm_limits VMM Limits
72 *
73 * There are various resource limits imposed by the VMM and it's
74 * sub-components. We'll list some of them here.
75 *
76 * On 64-bit hosts:
77 * - Max 8191 VMs. Imposed by GVMM's handle allocation (GVMM_MAX_HANDLES),
78 * can be increased up to 64K - 1.
79 * - Max 16TB - 64KB of the host memory can be used for backing VM RAM and
80 * ROM pages. The limit is imposed by the 32-bit page ID used by GMM.
81 * - A VM can be assigned all the memory we can use (16TB), however, the
82 * Main API will restrict this to 2TB (MM_RAM_MAX_IN_MB).
83 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
84 *
85 * On 32-bit hosts:
86 * - Max 127 VMs. Imposed by GMM's per page structure.
87 * - Max 64GB - 64KB of the host memory can be used for backing VM RAM and
88 * ROM pages. The limit is imposed by the 28-bit page ID used
89 * internally in GMM. It is also limited by PAE.
90 * - A VM can be assigned all the memory GMM can allocate, however, the
91 * Main API will restrict this to 3584MB (MM_RAM_MAX_IN_MB).
92 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
93 *
94 */
95
96
97/*********************************************************************************************************************************
98* Header Files *
99*********************************************************************************************************************************/
100#define LOG_GROUP LOG_GROUP_VMM
101#include <VBox/vmm/vmm.h>
102#include <VBox/vmm/vmapi.h>
103#include <VBox/vmm/pgm.h>
104#include <VBox/vmm/cfgm.h>
105#include <VBox/vmm/pdmqueue.h>
106#include <VBox/vmm/pdmcritsect.h>
107#include <VBox/vmm/pdmcritsectrw.h>
108#include <VBox/vmm/pdmapi.h>
109#include <VBox/vmm/cpum.h>
110#include <VBox/vmm/gim.h>
111#include <VBox/vmm/mm.h>
112#include <VBox/vmm/nem.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
114# include <VBox/vmm/iem.h>
115#endif
116#include <VBox/vmm/iom.h>
117#include <VBox/vmm/trpm.h>
118#include <VBox/vmm/selm.h>
119#include <VBox/vmm/em.h>
120#include <VBox/sup.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/apic.h>
123#include <VBox/vmm/ssm.h>
124#include <VBox/vmm/tm.h>
125#include "VMMInternal.h"
126#include <VBox/vmm/vmcc.h>
127
128#include <VBox/err.h>
129#include <VBox/param.h>
130#include <VBox/version.h>
131#include <VBox/vmm/hm.h>
132#include <iprt/assert.h>
133#include <iprt/alloc.h>
134#include <iprt/asm.h>
135#include <iprt/time.h>
136#include <iprt/semaphore.h>
137#include <iprt/stream.h>
138#include <iprt/string.h>
139#include <iprt/stdarg.h>
140#include <iprt/ctype.h>
141#include <iprt/x86.h>
142
143
144/*********************************************************************************************************************************
145* Defined Constants And Macros *
146*********************************************************************************************************************************/
147/** The saved state version. */
148#define VMM_SAVED_STATE_VERSION 4
149/** The saved state version used by v3.0 and earlier. (Teleportation) */
150#define VMM_SAVED_STATE_VERSION_3_0 3
151
152/** Macro for flushing the ring-0 logging. */
153#define VMM_FLUSH_R0_LOG(a_pVM, a_pVCpu, a_pLogger, a_pR3Logger) \
154 do { \
155 size_t const idxBuf = (a_pLogger)->idxBuf % VMMLOGGER_BUFFER_COUNT; \
156 if ( (a_pLogger)->aBufs[idxBuf].AuxDesc.offBuf == 0 \
157 || (a_pLogger)->aBufs[idxBuf].AuxDesc.fFlushedIndicator) \
158 { /* likely? */ } \
159 else \
160 vmmR3LogReturnFlush(a_pVM, a_pVCpu, a_pLogger, idxBuf, a_pR3Logger); \
161 } while (0)
162
163
164/*********************************************************************************************************************************
165* Internal Functions *
166*********************************************************************************************************************************/
167static void vmmR3InitRegisterStats(PVM pVM);
168static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
169static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
170#if 0 /* pointless when timers doesn't run on EMT */
171static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser);
172#endif
173static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
174 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
175static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu);
176static FNRTTHREAD vmmR3LogFlusher;
177static void vmmR3LogReturnFlush(PVM pVM, PVMCPU pVCpu, PVMMR3CPULOGGER pShared, size_t idxBuf,
178 PRTLOGGER pDstLogger);
179static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
180
181
182
183/**
184 * Initializes the VMM.
185 *
186 * @returns VBox status code.
187 * @param pVM The cross context VM structure.
188 */
189VMMR3_INT_DECL(int) VMMR3Init(PVM pVM)
190{
191 LogFlow(("VMMR3Init\n"));
192
193 /*
194 * Assert alignment, sizes and order.
195 */
196 AssertCompile(sizeof(pVM->vmm.s) <= sizeof(pVM->vmm.padding));
197 AssertCompile(RT_SIZEOFMEMB(VMCPU, vmm.s) <= RT_SIZEOFMEMB(VMCPU, vmm.padding));
198
199 /*
200 * Init basic VM VMM members.
201 */
202 pVM->vmm.s.pahEvtRendezvousEnterOrdered = NULL;
203 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
204 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
205 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
206 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
207 pVM->vmm.s.hEvtMulRendezvousRecursionPush = NIL_RTSEMEVENTMULTI;
208 pVM->vmm.s.hEvtMulRendezvousRecursionPop = NIL_RTSEMEVENTMULTI;
209 pVM->vmm.s.hEvtRendezvousRecursionPushCaller = NIL_RTSEMEVENT;
210 pVM->vmm.s.hEvtRendezvousRecursionPopCaller = NIL_RTSEMEVENT;
211 pVM->vmm.s.nsProgramStart = RTTimeProgramStartNanoTS();
212
213#if 0 /* pointless when timers doesn't run on EMT */
214 /** @cfgm{/YieldEMTInterval, uint32_t, 1, UINT32_MAX, 23, ms}
215 * The EMT yield interval. The EMT yielding is a hack we employ to play a
216 * bit nicer with the rest of the system (like for instance the GUI).
217 */
218 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies,
219 23 /* Value arrived at after experimenting with the grub boot prompt. */);
220 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc);
221#endif
222
223 /** @cfgm{/VMM/UsePeriodicPreemptionTimers, boolean, true}
224 * Controls whether we employ per-cpu preemption timers to limit the time
225 * spent executing guest code. This option is not available on all
226 * platforms and we will silently ignore this setting then. If we are
227 * running in VT-x mode, we will use the VMX-preemption timer instead of
228 * this one when possible.
229 */
230 PCFGMNODE pCfgVMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "VMM");
231 int rc = CFGMR3QueryBoolDef(pCfgVMM, "UsePeriodicPreemptionTimers", &pVM->vmm.s.fUsePeriodicPreemptionTimers, true);
232 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"VMM/UsePeriodicPreemptionTimers\", rc=%Rrc\n", rc), rc);
233
234 /*
235 * Initialize the VMM rendezvous semaphores.
236 */
237 pVM->vmm.s.pahEvtRendezvousEnterOrdered = (PRTSEMEVENT)MMR3HeapAlloc(pVM, MM_TAG_VMM, sizeof(RTSEMEVENT) * pVM->cCpus);
238 if (!pVM->vmm.s.pahEvtRendezvousEnterOrdered)
239 return VERR_NO_MEMORY;
240 for (VMCPUID i = 0; i < pVM->cCpus; i++)
241 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
242 for (VMCPUID i = 0; i < pVM->cCpus; i++)
243 {
244 rc = RTSemEventCreate(&pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
245 AssertRCReturn(rc, rc);
246 }
247 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousEnterOneByOne);
248 AssertRCReturn(rc, rc);
249 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
250 AssertRCReturn(rc, rc);
251 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousDone);
252 AssertRCReturn(rc, rc);
253 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller);
254 AssertRCReturn(rc, rc);
255 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousRecursionPush);
256 AssertRCReturn(rc, rc);
257 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousRecursionPop);
258 AssertRCReturn(rc, rc);
259 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
260 AssertRCReturn(rc, rc);
261 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
262 AssertRCReturn(rc, rc);
263
264 /*
265 * Register the saved state data unit.
266 */
267 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
268 NULL, NULL, NULL,
269 NULL, vmmR3Save, NULL,
270 NULL, vmmR3Load, NULL);
271 if (RT_FAILURE(rc))
272 return rc;
273
274 /*
275 * Register the Ring-0 VM handle with the session for fast ioctl calls.
276 */
277 bool const fDriverless = SUPR3IsDriverless();
278 if (!fDriverless)
279 {
280 rc = SUPR3SetVMForFastIOCtl(VMCC_GET_VMR0_FOR_CALL(pVM));
281 if (RT_FAILURE(rc))
282 return rc;
283 }
284
285#ifdef VBOX_WITH_NMI
286 /*
287 * Allocate mapping for the host APIC.
288 */
289 rc = MMR3HyperReserve(pVM, HOST_PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
290 AssertRC(rc);
291#endif
292 if (RT_SUCCESS(rc))
293 {
294 /*
295 * Start the log flusher thread.
296 */
297 if (!fDriverless)
298 rc = RTThreadCreate(&pVM->vmm.s.hLogFlusherThread, vmmR3LogFlusher, pVM, 0 /*cbStack*/,
299 RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "R0LogWrk");
300 if (RT_SUCCESS(rc))
301 {
302
303 /*
304 * Debug info and statistics.
305 */
306 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF);
307 vmmR3InitRegisterStats(pVM);
308 vmmInitFormatTypes();
309
310 return VINF_SUCCESS;
311 }
312 }
313 /** @todo Need failure cleanup? */
314
315 return rc;
316}
317
318
319/**
320 * VMMR3Init worker that register the statistics with STAM.
321 *
322 * @param pVM The cross context VM structure.
323 */
324static void vmmR3InitRegisterStats(PVM pVM)
325{
326 RT_NOREF_PV(pVM);
327
328 /* Nothing to do here in driverless mode. */
329 if (SUPR3IsDriverless())
330 return;
331
332 /*
333 * Statistics.
334 */
335 STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches.");
336 STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
337 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt, STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
338 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
339 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap, STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
340 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
341 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
342 STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
343 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
344 STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
345 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
346 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead, STAMTYPE_COUNTER, "/VMM/RZRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_READ returns.");
347 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_WRITE returns.");
348 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOCommitWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOCommitWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_COMMIT_WRITE returns.");
349 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ returns.");
350 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_WRITE returns.");
351 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOCommitWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOCommitWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_COMMIT_WRITE returns.");
352 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ_WRITE returns.");
353 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
354 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
355 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRRead, STAMTYPE_COUNTER, "/VMM/RZRet/MSRRead", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_READ returns.");
356 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MSRWrite", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_WRITE returns.");
357 STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
358 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
359 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
360 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault, STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
361 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask, STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
362 STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3, STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
363 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc, STAMTYPE_COUNTER, "/VMM/RZRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
364 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3, STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
365 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF, STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
366 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP, STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
367 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
368 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
369 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Total, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
370 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Unknown, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Unknown", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns without responsible force flag.");
371 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3FF, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_TO_R3.");
372 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3TMVirt, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/TMVirt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_TM_VIRTUAL_SYNC.");
373 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3HandyPages, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Handy", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PGM_NEED_HANDY_PAGES.");
374 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3PDMQueues, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/PDMQueue", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PDM_QUEUES.");
375 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Rendezvous, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Rendezvous", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_EMT_RENDEZVOUS.");
376 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Timer, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Timer", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_TIMER.");
377 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3DMA, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/DMA", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VM_FF_PDM_DMA.");
378 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3CritSect, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/CritSect", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_PDM_CRITSECT.");
379 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Iem, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/IEM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_IEM.");
380 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Iom, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/IOM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns with VMCPU_FF_IOM.");
381 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending, STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
382 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
383 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
384 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending, STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending", STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns.");
385 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
386 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
387
388 STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-Flushes", STAMUNIT_OCCURENCES, "Total number of buffer flushes");
389 STAMR3Register(pVM, &pVM->vmm.s.StatLogFlusherNoWakeUp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, "/VMM/LogFlush/00-NoWakups", STAMUNIT_OCCURENCES, "Times the flusher thread didn't need waking up.");
390
391 for (VMCPUID i = 0; i < pVM->cCpus; i++)
392 {
393 PVMCPU pVCpu = pVM->apCpusR3[i];
394 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlock", i);
395 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockOnTime", i);
396 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockOverslept", i);
397 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "", "/PROF/CPU%u/VM/Halt/R0HaltBlockInsomnia", i);
398 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltExec, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec", i);
399 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltExecFromSpin, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec/FromSpin", i);
400 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltExecFromBlock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltExec/FromBlock", i);
401 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3", i);
402 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3FromSpin, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/FromSpin", i);
403 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3Other, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/Other", i);
404 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3PendingFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/PendingFF", i);
405 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3SmallDelta, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/SmallDelta", i);
406 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3PostNoInt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/PostWaitNoInt", i);
407 STAMR3RegisterF(pVM, &pVCpu->vmm.s.StatR0HaltToR3PostPendingFF,STAMTYPE_COUNTER,STAMVISIBILITY_ALWAYS,STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltToR3/PostWaitPendingFF", i);
408 STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0Halts, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryCounter", i);
409 STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0HaltsSucceeded, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistorySucceeded", i);
410 STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0HaltsToRing3, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryToRing3", i);
411
412 STAMR3RegisterF(pVM, &pVCpu->cEmtHashCollisions, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/VMM/EmtHashCollisions/Emt%02u", i);
413
414 PVMMR3CPULOGGER pShared = &pVCpu->vmm.s.u.s.Logger;
415 STAMR3RegisterF(pVM, &pShared->StatFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Reg", i);
416 STAMR3RegisterF(pVM, &pShared->StatCannotBlock, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Reg/CannotBlock", i);
417 STAMR3RegisterF(pVM, &pShared->StatWait, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Reg/Wait", i);
418 STAMR3RegisterF(pVM, &pShared->StatRaces, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Reg/Races", i);
419 STAMR3RegisterF(pVM, &pShared->StatRacesToR0, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Reg/RacesToR0", i);
420 STAMR3RegisterF(pVM, &pShared->cbDropped, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Reg/cbDropped", i);
421 STAMR3RegisterF(pVM, &pShared->cbBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Reg/cbBuf", i);
422 STAMR3RegisterF(pVM, &pShared->idxBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Reg/idxBuf", i);
423
424 pShared = &pVCpu->vmm.s.u.s.RelLogger;
425 STAMR3RegisterF(pVM, &pShared->StatFlushes, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Rel", i);
426 STAMR3RegisterF(pVM, &pShared->StatCannotBlock, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Rel/CannotBlock", i);
427 STAMR3RegisterF(pVM, &pShared->StatWait, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Rel/Wait", i);
428 STAMR3RegisterF(pVM, &pShared->StatRaces, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "", "/VMM/LogFlush/CPU%u/Rel/Races", i);
429 STAMR3RegisterF(pVM, &pShared->StatRacesToR0, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "", "/VMM/LogFlush/CPU%u/Rel/RacesToR0", i);
430 STAMR3RegisterF(pVM, &pShared->cbDropped, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Rel/cbDropped", i);
431 STAMR3RegisterF(pVM, &pShared->cbBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Rel/cbBuf", i);
432 STAMR3RegisterF(pVM, &pShared->idxBuf, STAMTYPE_U32, STAMVISIBILITY_USED, STAMUNIT_BYTES, "", "/VMM/LogFlush/CPU%u/Rel/idxBuf", i);
433 }
434}
435
436
437/**
438 * Worker for VMMR3InitR0 that calls ring-0 to do EMT specific initialization.
439 *
440 * @returns VBox status code.
441 * @param pVM The cross context VM structure.
442 * @param pVCpu The cross context per CPU structure.
443 * @thread EMT(pVCpu)
444 */
445static DECLCALLBACK(int) vmmR3InitR0Emt(PVM pVM, PVMCPU pVCpu)
446{
447 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_VMMR0_INIT_EMT, 0, NULL);
448}
449
450
451/**
452 * Initializes the R0 VMM.
453 *
454 * @returns VBox status code.
455 * @param pVM The cross context VM structure.
456 */
457VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM)
458{
459 int rc;
460 PVMCPU pVCpu = VMMGetCpu(pVM);
461 Assert(pVCpu && pVCpu->idCpu == 0);
462
463 /*
464 * Nothing to do here in driverless mode.
465 */
466 if (SUPR3IsDriverless())
467 return VINF_SUCCESS;
468
469 /*
470 * Make sure the ring-0 loggers are up to date.
471 */
472 rc = VMMR3UpdateLoggers(pVM);
473 if (RT_FAILURE(rc))
474 return rc;
475
476 /*
477 * Call Ring-0 entry with init code.
478 */
479#ifdef NO_SUPCALLR0VMM
480 //rc = VERR_GENERAL_FAILURE;
481 rc = VINF_SUCCESS;
482#else
483 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT, RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
484#endif
485
486 /*
487 * Flush the logs & deal with assertions.
488 */
489#ifdef LOG_ENABLED
490 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
491#endif
492 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
493 if (rc == VERR_VMM_RING0_ASSERTION)
494 rc = vmmR3HandleRing0Assert(pVM, pVCpu);
495 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
496 {
497 LogRel(("VMM: R0 init failed, rc=%Rra\n", rc));
498 if (RT_SUCCESS(rc))
499 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
500 }
501
502 /*
503 * Log stuff we learned in ring-0.
504 */
505 /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
506 if (pVM->vmm.s.fIsUsingContextHooks)
507 LogRel(("VMM: Enabled thread-context hooks\n"));
508 else
509 LogRel(("VMM: Thread-context hooks unavailable\n"));
510
511 /* Log RTThreadPreemptIsPendingTrusty() and RTThreadPreemptIsPossible() results. */
512 if (pVM->vmm.s.fIsPreemptPendingApiTrusty)
513 LogRel(("VMM: RTThreadPreemptIsPending() can be trusted\n"));
514 else
515 LogRel(("VMM: Warning! RTThreadPreemptIsPending() cannot be trusted! Need to update kernel info?\n"));
516 if (pVM->vmm.s.fIsPreemptPossible)
517 LogRel(("VMM: Kernel preemption is possible\n"));
518 else
519 LogRel(("VMM: Kernel preemption is not possible it seems\n"));
520
521 /*
522 * Send all EMTs to ring-0 to get their logger initialized.
523 */
524 for (VMCPUID idCpu = 0; RT_SUCCESS(rc) && idCpu < pVM->cCpus; idCpu++)
525 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)vmmR3InitR0Emt, 2, pVM, pVM->apCpusR3[idCpu]);
526
527 return rc;
528}
529
530
531/**
532 * Called when an init phase completes.
533 *
534 * @returns VBox status code.
535 * @param pVM The cross context VM structure.
536 * @param enmWhat Which init phase.
537 */
538VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
539{
540 int rc = VINF_SUCCESS;
541
542 switch (enmWhat)
543 {
544 case VMINITCOMPLETED_RING3:
545 {
546#if 0 /* pointless when timers doesn't run on EMT */
547 /*
548 * Create the EMT yield timer.
549 */
550 rc = TMR3TimerCreate(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, TMTIMER_FLAGS_NO_RING0,
551 "EMT Yielder", &pVM->vmm.s.hYieldTimer);
552 AssertRCReturn(rc, rc);
553
554 rc = TMTimerSetMillies(pVM, pVM->vmm.s.hYieldTimer, pVM->vmm.s.cYieldEveryMillies);
555 AssertRCReturn(rc, rc);
556#endif
557 break;
558 }
559
560 case VMINITCOMPLETED_HM:
561 {
562 /*
563 * Disable the periodic preemption timers if we can use the
564 * VMX-preemption timer instead.
565 */
566 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
567 && HMR3IsVmxPreemptionTimerUsed(pVM))
568 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
569 LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers));
570
571 /*
572 * Last chance for GIM to update its CPUID leaves if it requires
573 * knowledge/information from HM initialization.
574 */
575/** @todo r=bird: This shouldn't be done from here, but rather from VM.cpp. There is no dependency on VMM here. */
576 rc = GIMR3InitCompleted(pVM);
577 AssertRCReturn(rc, rc);
578
579 /*
580 * CPUM's post-initialization (print CPUIDs).
581 */
582 CPUMR3LogCpuIdAndMsrFeatures(pVM);
583 break;
584 }
585
586 default: /* shuts up gcc */
587 break;
588 }
589
590 return rc;
591}
592
593
594/**
595 * Terminate the VMM bits.
596 *
597 * @returns VBox status code.
598 * @param pVM The cross context VM structure.
599 */
600VMMR3_INT_DECL(int) VMMR3Term(PVM pVM)
601{
602 PVMCPU pVCpu = VMMGetCpu(pVM);
603 Assert(pVCpu && pVCpu->idCpu == 0);
604
605 /*
606 * Call Ring-0 entry with termination code.
607 */
608 int rc = VINF_SUCCESS;
609 if (!SUPR3IsDriverless())
610 {
611#ifndef NO_SUPCALLR0VMM
612 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
613#endif
614 }
615
616 /*
617 * Flush the logs & deal with assertions.
618 */
619#ifdef LOG_ENABLED
620 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
621#endif
622 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
623 if (rc == VERR_VMM_RING0_ASSERTION)
624 rc = vmmR3HandleRing0Assert(pVM, pVCpu);
625 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
626 {
627 LogRel(("VMM: VMMR3Term: R0 term failed, rc=%Rra. (warning)\n", rc));
628 if (RT_SUCCESS(rc))
629 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
630 }
631
632 /*
633 * Do clean ups.
634 */
635 for (VMCPUID i = 0; i < pVM->cCpus; i++)
636 {
637 RTSemEventDestroy(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
638 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
639 }
640 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
641 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
642 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
643 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
644 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousDone);
645 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
646 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousDoneCaller);
647 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
648 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
649 pVM->vmm.s.hEvtMulRendezvousRecursionPush = NIL_RTSEMEVENTMULTI;
650 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
651 pVM->vmm.s.hEvtMulRendezvousRecursionPop = NIL_RTSEMEVENTMULTI;
652 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
653 pVM->vmm.s.hEvtRendezvousRecursionPushCaller = NIL_RTSEMEVENT;
654 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
655 pVM->vmm.s.hEvtRendezvousRecursionPopCaller = NIL_RTSEMEVENT;
656
657 vmmTermFormatTypes();
658
659 /*
660 * Wait for the log flusher thread to complete.
661 */
662 if (pVM->vmm.s.hLogFlusherThread != NIL_RTTHREAD)
663 {
664 int rc2 = RTThreadWait(pVM->vmm.s.hLogFlusherThread, RT_MS_30SEC, NULL);
665 AssertLogRelRC(rc2);
666 if (RT_SUCCESS(rc2))
667 pVM->vmm.s.hLogFlusherThread = NIL_RTTHREAD;
668 }
669
670 return rc;
671}
672
673
674/**
675 * Applies relocations to data and code managed by this
676 * component. This function will be called at init and
677 * whenever the VMM need to relocate it self inside the GC.
678 *
679 * The VMM will need to apply relocations to the core code.
680 *
681 * @param pVM The cross context VM structure.
682 * @param offDelta The relocation delta.
683 */
684VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
685{
686 LogFlow(("VMMR3Relocate: offDelta=%RGv\n", offDelta));
687 RT_NOREF(offDelta);
688
689 /*
690 * Update the logger.
691 */
692 VMMR3UpdateLoggers(pVM);
693}
694
695
696/**
697 * Worker for VMMR3UpdateLoggers.
698 */
699static int vmmR3UpdateLoggersWorker(PVM pVM, PVMCPU pVCpu, PRTLOGGER pSrcLogger, bool fReleaseLogger)
700{
701 /*
702 * Get the group count.
703 */
704 uint32_t uGroupsCrc32 = 0;
705 uint32_t cGroups = 0;
706 uint64_t fFlags = 0;
707 int rc = RTLogQueryBulk(pSrcLogger, &fFlags, &uGroupsCrc32, &cGroups, NULL);
708 Assert(rc == VERR_BUFFER_OVERFLOW);
709
710 /*
711 * Allocate the request of the right size.
712 */
713 uint32_t const cbReq = RT_UOFFSETOF_DYN(VMMR0UPDATELOGGERSREQ, afGroups[cGroups]);
714 PVMMR0UPDATELOGGERSREQ pReq = (PVMMR0UPDATELOGGERSREQ)RTMemAllocZVar(cbReq);
715 if (pReq)
716 {
717 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
718 pReq->Hdr.cbReq = cbReq;
719 pReq->cGroups = cGroups;
720 rc = RTLogQueryBulk(pSrcLogger, &pReq->fFlags, &pReq->uGroupCrc32, &pReq->cGroups, pReq->afGroups);
721 AssertRC(rc);
722 if (RT_SUCCESS(rc))
723 rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_VMMR0_UPDATE_LOGGERS, fReleaseLogger, &pReq->Hdr);
724
725 RTMemFree(pReq);
726 }
727 else
728 rc = VERR_NO_MEMORY;
729 return rc;
730}
731
732
733/**
734 * Updates the settings for the RC and R0 loggers.
735 *
736 * @returns VBox status code.
737 * @param pVM The cross context VM structure.
738 * @thread EMT
739 */
740VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
741{
742 /* Nothing to do here if we're in driverless mode: */
743 if (SUPR3IsDriverless())
744 return VINF_SUCCESS;
745
746 PVMCPU pVCpu = VMMGetCpu(pVM);
747 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
748
749 /*
750 * Each EMT has each own logger instance.
751 */
752 /* Debug logging.*/
753 int rcDebug = VINF_SUCCESS;
754#ifdef LOG_ENABLED
755 PRTLOGGER const pDefault = RTLogDefaultInstance();
756 if (pDefault)
757 rcDebug = vmmR3UpdateLoggersWorker(pVM, pVCpu, pDefault, false /*fReleaseLogger*/);
758#else
759 RT_NOREF(pVM);
760#endif
761
762 /* Release logging. */
763 int rcRelease = VINF_SUCCESS;
764 PRTLOGGER const pRelease = RTLogRelGetDefaultInstance();
765 if (pRelease)
766 rcRelease = vmmR3UpdateLoggersWorker(pVM, pVCpu, pRelease, true /*fReleaseLogger*/);
767
768 return RT_SUCCESS(rcDebug) ? rcRelease : rcDebug;
769}
770
771
772/**
773 * @callback_method_impl{FNRTTHREAD, Ring-0 log flusher thread.}
774 */
775static DECLCALLBACK(int) vmmR3LogFlusher(RTTHREAD hThreadSelf, void *pvUser)
776{
777 PVM const pVM = (PVM)pvUser;
778 RT_NOREF(hThreadSelf);
779
780 /* Reset the flusher state before we start: */
781 pVM->vmm.s.LogFlusherItem.u32 = UINT32_MAX;
782
783 /*
784 * The work loop.
785 */
786 for (;;)
787 {
788 /*
789 * Wait for work.
790 */
791 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_VMMR0_LOG_FLUSHER, 0, NULL);
792 if (RT_SUCCESS(rc))
793 {
794 /* Paranoia: Make another copy of the request, to make sure the validated data can't be changed. */
795 VMMLOGFLUSHERENTRY Item;
796 Item.u32 = pVM->vmm.s.LogFlusherItem.u32;
797 if ( Item.s.idCpu < pVM->cCpus
798 && Item.s.idxLogger < VMMLOGGER_IDX_MAX
799 && Item.s.idxBuffer < VMMLOGGER_BUFFER_COUNT)
800 {
801 /*
802 * Verify the request.
803 */
804 PVMCPU const pVCpu = pVM->apCpusR3[Item.s.idCpu];
805 PVMMR3CPULOGGER const pShared = &pVCpu->vmm.s.u.aLoggers[Item.s.idxLogger];
806 uint32_t const cbToFlush = pShared->aBufs[Item.s.idxBuffer].AuxDesc.offBuf;
807 if (cbToFlush > 0)
808 {
809 if (cbToFlush <= pShared->cbBuf)
810 {
811 char * const pchBufR3 = pShared->aBufs[Item.s.idxBuffer].pchBufR3;
812 if (pchBufR3)
813 {
814 /*
815 * Do the flushing.
816 */
817 PRTLOGGER const pLogger = Item.s.idxLogger == VMMLOGGER_IDX_REGULAR
818 ? RTLogGetDefaultInstance() : RTLogRelGetDefaultInstance();
819 if (pLogger)
820 {
821 char szBefore[128];
822 RTStrPrintf(szBefore, sizeof(szBefore),
823 "*FLUSH* idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x fFlushed=%RTbool cbDropped=%#x\n",
824 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush,
825 pShared->aBufs[Item.s.idxBuffer].AuxDesc.fFlushedIndicator, pShared->cbDropped);
826 RTLogBulkWrite(pLogger, szBefore, pchBufR3, cbToFlush, "*FLUSH DONE*\n");
827 }
828 }
829 else
830 Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! No ring-3 buffer pointer!\n",
831 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush));
832 }
833 else
834 Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! Exceeds %#x bytes buffer size!\n",
835 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush, pShared->cbBuf));
836 }
837 else
838 Log(("vmmR3LogFlusher: idCpu=%u idxLogger=%u idxBuffer=%u cbToFlush=%#x: Warning! Zero bytes to flush!\n",
839 Item.s.idCpu, Item.s.idxLogger, Item.s.idxBuffer, cbToFlush));
840
841 /*
842 * Mark the descriptor as flushed and set the request flag for same.
843 */
844 pShared->aBufs[Item.s.idxBuffer].AuxDesc.fFlushedIndicator = true;
845 }
846 else
847 {
848 Assert(Item.s.idCpu == UINT16_MAX);
849 Assert(Item.s.idxLogger == UINT8_MAX);
850 Assert(Item.s.idxBuffer == UINT8_MAX);
851 }
852 }
853 /*
854 * Interrupted can happen, just ignore it.
855 */
856 else if (rc == VERR_INTERRUPTED)
857 { /* ignore*/ }
858 /*
859 * The ring-0 termination code will set the shutdown flag and wake us
860 * up, and we should return with object destroyed. In case there is
861 * some kind of race, we might also get sempahore destroyed.
862 */
863 else if ( rc == VERR_OBJECT_DESTROYED
864 || rc == VERR_SEM_DESTROYED
865 || rc == VERR_INVALID_HANDLE)
866 {
867 LogRel(("vmmR3LogFlusher: Terminating (%Rrc)\n", rc));
868 return VINF_SUCCESS;
869 }
870 /*
871 * There shouldn't be any other errors...
872 */
873 else
874 {
875 LogRelMax(64, ("vmmR3LogFlusher: VMMR0_DO_VMMR0_LOG_FLUSHER -> %Rrc\n", rc));
876 AssertRC(rc);
877 RTThreadSleep(1);
878 }
879 }
880}
881
882
883/**
884 * Helper for VMM_FLUSH_R0_LOG that does the flushing.
885 *
886 * @param pVM The cross context VM structure.
887 * @param pVCpu The cross context virtual CPU structure of the calling
888 * EMT.
889 * @param pShared The shared logger data.
890 * @param idxBuf The buffer to flush.
891 * @param pDstLogger The destination IPRT logger.
892 */
893static void vmmR3LogReturnFlush(PVM pVM, PVMCPU pVCpu, PVMMR3CPULOGGER pShared, size_t idxBuf, PRTLOGGER pDstLogger)
894{
895 uint32_t const cbToFlush = pShared->aBufs[idxBuf].AuxDesc.offBuf;
896 const char *pszBefore = cbToFlush < 256 ? NULL : "*FLUSH*\n";
897 const char *pszAfter = cbToFlush < 256 ? NULL : "*END*\n";
898
899#if VMMLOGGER_BUFFER_COUNT > 1
900 /*
901 * When we have more than one log buffer, the flusher thread may still be
902 * working on the previous buffer when we get here.
903 */
904 char szBefore[64];
905 if (pShared->cFlushing > 0)
906 {
907 STAM_REL_PROFILE_START(&pShared->StatRaces, a);
908 uint64_t const nsStart = RTTimeNanoTS();
909
910 /* A no-op, but it takes the lock and the hope is that we end up waiting
911 on the flusher to finish up. */
912 RTLogBulkWrite(pDstLogger, NULL, "", 0, NULL);
913 if (pShared->cFlushing != 0)
914 {
915 RTLogBulkWrite(pDstLogger, NULL, "", 0, NULL);
916
917 /* If no luck, go to ring-0 and to proper waiting. */
918 if (pShared->cFlushing != 0)
919 {
920 STAM_REL_COUNTER_INC(&pShared->StatRacesToR0);
921 SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_VMMR0_LOG_WAIT_FLUSHED, 0, NULL);
922 }
923 }
924
925 RTStrPrintf(szBefore, sizeof(szBefore), "*%sFLUSH* waited %'RU64 ns\n",
926 pShared->cFlushing == 0 ? "" : " MISORDERED", RTTimeNanoTS() - nsStart);
927 pszBefore = szBefore;
928 STAM_REL_PROFILE_STOP(&pShared->StatRaces, a);
929 }
930#else
931 RT_NOREF(pVM, pVCpu);
932#endif
933
934 RTLogBulkWrite(pDstLogger, pszBefore, pShared->aBufs[idxBuf].pchBufR3, cbToFlush, pszAfter);
935 pShared->aBufs[idxBuf].AuxDesc.fFlushedIndicator = true;
936}
937
938
939/**
940 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg1Weak output.
941 *
942 * @returns Pointer to the buffer.
943 * @param pVM The cross context VM structure.
944 */
945VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
946{
947 return pVM->vmm.s.szRing0AssertMsg1;
948}
949
950
951/**
952 * Returns the VMCPU of the specified virtual CPU.
953 *
954 * @returns The VMCPU pointer. NULL if @a idCpu or @a pUVM is invalid.
955 *
956 * @param pUVM The user mode VM handle.
957 * @param idCpu The ID of the virtual CPU.
958 */
959VMMR3DECL(PVMCPU) VMMR3GetCpuByIdU(PUVM pUVM, RTCPUID idCpu)
960{
961 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
962 AssertReturn(idCpu < pUVM->cCpus, NULL);
963 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
964 return pUVM->pVM->apCpusR3[idCpu];
965}
966
967
968/**
969 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg2Weak output.
970 *
971 * @returns Pointer to the buffer.
972 * @param pVM The cross context VM structure.
973 */
974VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
975{
976 return pVM->vmm.s.szRing0AssertMsg2;
977}
978
979
980/**
981 * Execute state save operation.
982 *
983 * @returns VBox status code.
984 * @param pVM The cross context VM structure.
985 * @param pSSM SSM operation handle.
986 */
987static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
988{
989 LogFlow(("vmmR3Save:\n"));
990
991 /*
992 * Save the started/stopped state of all CPUs except 0 as it will always
993 * be running. This avoids breaking the saved state version. :-)
994 */
995 for (VMCPUID i = 1; i < pVM->cCpus; i++)
996 SSMR3PutBool(pSSM, VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVM->apCpusR3[i])));
997
998 return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
999}
1000
1001
1002/**
1003 * Execute state load operation.
1004 *
1005 * @returns VBox status code.
1006 * @param pVM The cross context VM structure.
1007 * @param pSSM SSM operation handle.
1008 * @param uVersion Data layout version.
1009 * @param uPass The data pass.
1010 */
1011static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1012{
1013 LogFlow(("vmmR3Load:\n"));
1014 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1015
1016 /*
1017 * Validate version.
1018 */
1019 if ( uVersion != VMM_SAVED_STATE_VERSION
1020 && uVersion != VMM_SAVED_STATE_VERSION_3_0)
1021 {
1022 AssertMsgFailed(("vmmR3Load: Invalid version uVersion=%u!\n", uVersion));
1023 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1024 }
1025
1026 if (uVersion <= VMM_SAVED_STATE_VERSION_3_0)
1027 {
1028 /* Ignore the stack bottom, stack pointer and stack bits. */
1029 RTRCPTR RCPtrIgnored;
1030 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1031 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1032#ifdef RT_OS_DARWIN
1033 if ( SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(3,0,0)
1034 && SSMR3HandleVersion(pSSM) < VBOX_FULL_VERSION_MAKE(3,1,0)
1035 && SSMR3HandleRevision(pSSM) >= 48858
1036 && ( !strcmp(SSMR3HandleHostOSAndArch(pSSM), "darwin.x86")
1037 || !strcmp(SSMR3HandleHostOSAndArch(pSSM), "") )
1038 )
1039 SSMR3Skip(pSSM, 16384);
1040 else
1041 SSMR3Skip(pSSM, 8192);
1042#else
1043 SSMR3Skip(pSSM, 8192);
1044#endif
1045 }
1046
1047 /*
1048 * Restore the VMCPU states. VCPU 0 is always started.
1049 */
1050 VMCPU_SET_STATE(pVM->apCpusR3[0], VMCPUSTATE_STARTED);
1051 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1052 {
1053 bool fStarted;
1054 int rc = SSMR3GetBool(pSSM, &fStarted);
1055 if (RT_FAILURE(rc))
1056 return rc;
1057 VMCPU_SET_STATE(pVM->apCpusR3[i], fStarted ? VMCPUSTATE_STARTED : VMCPUSTATE_STOPPED);
1058 }
1059
1060 /* terminator */
1061 uint32_t u32;
1062 int rc = SSMR3GetU32(pSSM, &u32);
1063 if (RT_FAILURE(rc))
1064 return rc;
1065 if (u32 != UINT32_MAX)
1066 {
1067 AssertMsgFailed(("u32=%#x\n", u32));
1068 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1069 }
1070 return VINF_SUCCESS;
1071}
1072
1073
1074/**
1075 * Suspends the CPU yielder.
1076 *
1077 * @param pVM The cross context VM structure.
1078 */
1079VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM)
1080{
1081#if 0 /* pointless when timers doesn't run on EMT */
1082 VMCPU_ASSERT_EMT(pVM->apCpusR3[0]);
1083 if (!pVM->vmm.s.cYieldResumeMillies)
1084 {
1085 uint64_t u64Now = TMTimerGet(pVM, pVM->vmm.s.hYieldTimer);
1086 uint64_t u64Expire = TMTimerGetExpire(pVM, pVM->vmm.s.hYieldTimer);
1087 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1088 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1089 else
1090 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM, pVM->vmm.s.hYieldTimer, u64Expire - u64Now);
1091 TMTimerStop(pVM, pVM->vmm.s.hYieldTimer);
1092 }
1093 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1094#else
1095 RT_NOREF(pVM);
1096#endif
1097}
1098
1099
1100/**
1101 * Stops the CPU yielder.
1102 *
1103 * @param pVM The cross context VM structure.
1104 */
1105VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM)
1106{
1107#if 0 /* pointless when timers doesn't run on EMT */
1108 if (!pVM->vmm.s.cYieldResumeMillies)
1109 TMTimerStop(pVM, pVM->vmm.s.hYieldTimer);
1110 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1111 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1112#else
1113 RT_NOREF(pVM);
1114#endif
1115}
1116
1117
1118/**
1119 * Resumes the CPU yielder when it has been a suspended or stopped.
1120 *
1121 * @param pVM The cross context VM structure.
1122 */
1123VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM)
1124{
1125#if 0 /* pointless when timers doesn't run on EMT */
1126 if (pVM->vmm.s.cYieldResumeMillies)
1127 {
1128 TMTimerSetMillies(pVM, pVM->vmm.s.hYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1129 pVM->vmm.s.cYieldResumeMillies = 0;
1130 }
1131#else
1132 RT_NOREF(pVM);
1133#endif
1134}
1135
1136
1137#if 0 /* pointless when timers doesn't run on EMT */
1138/**
1139 * @callback_method_impl{FNTMTIMERINT, EMT yielder}
1140 *
1141 * @todo This is a UNI core/thread thing, really... Should be reconsidered.
1142 */
1143static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, TMTIMERHANDLE hTimer, void *pvUser)
1144{
1145 NOREF(pvUser);
1146
1147 /*
1148 * This really needs some careful tuning. While we shouldn't be too greedy since
1149 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1150 * because that'll cause us to stop up.
1151 *
1152 * The current logic is to use the default interval when there is no lag worth
1153 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1154 *
1155 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1156 * so the lag is up to date.)
1157 */
1158 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1159 if ( u64Lag < 50000000 /* 50ms */
1160 || ( u64Lag < 1000000000 /* 1s */
1161 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1162 )
1163 {
1164 uint64_t u64Elapsed = RTTimeNanoTS();
1165 pVM->vmm.s.u64LastYield = u64Elapsed;
1166
1167 RTThreadYield();
1168
1169#ifdef LOG_ENABLED
1170 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1171 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1172#endif
1173 }
1174 TMTimerSetMillies(pVM, hTimer, pVM->vmm.s.cYieldEveryMillies);
1175}
1176#endif
1177
1178
1179/**
1180 * Executes guest code (Intel VT-x and AMD-V).
1181 *
1182 * @param pVM The cross context VM structure.
1183 * @param pVCpu The cross context virtual CPU structure.
1184 */
1185VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu)
1186{
1187 Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1188
1189 int rc;
1190 do
1191 {
1192#ifdef NO_SUPCALLR0VMM
1193 rc = VERR_GENERAL_FAILURE;
1194#else
1195 rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu);
1196 if (RT_LIKELY(rc == VINF_SUCCESS))
1197 rc = pVCpu->vmm.s.iLastGZRc;
1198#endif
1199 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1200
1201#if 0 /** @todo triggers too often */
1202 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
1203#endif
1204
1205 /*
1206 * Flush the logs
1207 */
1208#ifdef LOG_ENABLED
1209 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
1210#endif
1211 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
1212 if (rc != VERR_VMM_RING0_ASSERTION)
1213 {
1214 Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1215 return rc;
1216 }
1217 return vmmR3HandleRing0Assert(pVM, pVCpu);
1218}
1219
1220
1221/**
1222 * Perform one of the fast I/O control VMMR0 operation.
1223 *
1224 * @returns VBox strict status code.
1225 * @param pVM The cross context VM structure.
1226 * @param pVCpu The cross context virtual CPU structure.
1227 * @param enmOperation The operation to perform.
1228 */
1229VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation)
1230{
1231 VBOXSTRICTRC rcStrict;
1232 do
1233 {
1234#ifdef NO_SUPCALLR0VMM
1235 rcStrict = VERR_GENERAL_FAILURE;
1236#else
1237 rcStrict = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), enmOperation, pVCpu->idCpu);
1238 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1239 rcStrict = pVCpu->vmm.s.iLastGZRc;
1240#endif
1241 } while (rcStrict == VINF_EM_RAW_INTERRUPT_HYPER);
1242
1243 /*
1244 * Flush the logs
1245 */
1246#ifdef LOG_ENABLED
1247 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
1248#endif
1249 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
1250 if (rcStrict != VERR_VMM_RING0_ASSERTION)
1251 return rcStrict;
1252 return vmmR3HandleRing0Assert(pVM, pVCpu);
1253}
1254
1255
1256/**
1257 * VCPU worker for VMMR3SendStartupIpi.
1258 *
1259 * @param pVM The cross context VM structure.
1260 * @param idCpu Virtual CPU to perform SIPI on.
1261 * @param uVector The SIPI vector.
1262 */
1263static DECLCALLBACK(int) vmmR3SendStarupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1264{
1265 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1266 VMCPU_ASSERT_EMT(pVCpu);
1267
1268 /*
1269 * In the INIT state, the target CPU is only responsive to an SIPI.
1270 * This is also true for when when the CPU is in VMX non-root mode.
1271 *
1272 * See AMD spec. 16.5 "Interprocessor Interrupts (IPI)".
1273 * See Intel spec. 26.6.2 "Activity State".
1274 */
1275 if (EMGetState(pVCpu) != EMSTATE_WAIT_SIPI)
1276 return VINF_SUCCESS;
1277
1278 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1279#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1280 if (CPUMIsGuestInVmxRootMode(pCtx))
1281 {
1282 /* If the CPU is in VMX non-root mode we must cause a VM-exit. */
1283 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1284 return VBOXSTRICTRC_TODO(IEMExecVmxVmexitStartupIpi(pVCpu, uVector));
1285
1286 /* If the CPU is in VMX root mode (and not in VMX non-root mode) SIPIs are blocked. */
1287 return VINF_SUCCESS;
1288 }
1289#endif
1290
1291 pCtx->cs.Sel = uVector << 8;
1292 pCtx->cs.ValidSel = uVector << 8;
1293 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1294 pCtx->cs.u64Base = uVector << 12;
1295 pCtx->cs.u32Limit = UINT32_C(0x0000ffff);
1296 pCtx->rip = 0;
1297
1298 Log(("vmmR3SendSipi for VCPU %d with vector %x\n", idCpu, uVector));
1299
1300# if 1 /* If we keep the EMSTATE_WAIT_SIPI method, then move this to EM.cpp. */
1301 EMSetState(pVCpu, EMSTATE_HALTED);
1302 return VINF_EM_RESCHEDULE;
1303# else /* And if we go the VMCPU::enmState way it can stay here. */
1304 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STOPPED);
1305 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1306 return VINF_SUCCESS;
1307# endif
1308}
1309
1310
1311/**
1312 * VCPU worker for VMMR3SendInitIpi.
1313 *
1314 * @returns VBox status code.
1315 * @param pVM The cross context VM structure.
1316 * @param idCpu Virtual CPU to perform SIPI on.
1317 */
1318static DECLCALLBACK(int) vmmR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1319{
1320 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1321 VMCPU_ASSERT_EMT(pVCpu);
1322
1323 Log(("vmmR3SendInitIpi for VCPU %d\n", idCpu));
1324
1325 /** @todo r=ramshankar: We should probably block INIT signal when the CPU is in
1326 * wait-for-SIPI state. Verify. */
1327
1328 /* If the CPU is in VMX non-root mode, INIT signals cause VM-exits. */
1329#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1330 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1331 if (CPUMIsGuestInVmxNonRootMode(pCtx))
1332 return VBOXSTRICTRC_TODO(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INIT_SIGNAL, 0 /* uExitQual */));
1333#endif
1334
1335 /** @todo Figure out how to handle a SVM nested-guest intercepts here for INIT
1336 * IPI (e.g. SVM_EXIT_INIT). */
1337
1338 PGMR3ResetCpu(pVM, pVCpu);
1339 PDMR3ResetCpu(pVCpu); /* Only clears pending interrupts force flags */
1340 APICR3InitIpi(pVCpu);
1341 TRPMR3ResetCpu(pVCpu);
1342 CPUMR3ResetCpu(pVM, pVCpu);
1343 EMR3ResetCpu(pVCpu);
1344 HMR3ResetCpu(pVCpu);
1345 NEMR3ResetCpu(pVCpu, true /*fInitIpi*/);
1346
1347 /* This will trickle up on the target EMT. */
1348 return VINF_EM_WAIT_SIPI;
1349}
1350
1351
1352/**
1353 * Sends a Startup IPI to the virtual CPU by setting CS:EIP into
1354 * vector-dependent state and unhalting processor.
1355 *
1356 * @param pVM The cross context VM structure.
1357 * @param idCpu Virtual CPU to perform SIPI on.
1358 * @param uVector SIPI vector.
1359 */
1360VMMR3_INT_DECL(void) VMMR3SendStartupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1361{
1362 AssertReturnVoid(idCpu < pVM->cCpus);
1363
1364 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendStarupIpi, 3, pVM, idCpu, uVector);
1365 AssertRC(rc);
1366}
1367
1368
1369/**
1370 * Sends init IPI to the virtual CPU.
1371 *
1372 * @param pVM The cross context VM structure.
1373 * @param idCpu Virtual CPU to perform int IPI on.
1374 */
1375VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1376{
1377 AssertReturnVoid(idCpu < pVM->cCpus);
1378
1379 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendInitIpi, 2, pVM, idCpu);
1380 AssertRC(rc);
1381}
1382
1383
1384/**
1385 * Registers the guest memory range that can be used for patching.
1386 *
1387 * @returns VBox status code.
1388 * @param pVM The cross context VM structure.
1389 * @param pPatchMem Patch memory range.
1390 * @param cbPatchMem Size of the memory range.
1391 */
1392VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1393{
1394 VM_ASSERT_EMT(pVM);
1395 if (HMIsEnabled(pVM))
1396 return HMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
1397
1398 return VERR_NOT_SUPPORTED;
1399}
1400
1401
1402/**
1403 * Deregisters the guest memory range that can be used for patching.
1404 *
1405 * @returns VBox status code.
1406 * @param pVM The cross context VM structure.
1407 * @param pPatchMem Patch memory range.
1408 * @param cbPatchMem Size of the memory range.
1409 */
1410VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1411{
1412 if (HMIsEnabled(pVM))
1413 return HMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
1414
1415 return VINF_SUCCESS;
1416}
1417
1418
1419/**
1420 * Common recursion handler for the other EMTs.
1421 *
1422 * @returns Strict VBox status code.
1423 * @param pVM The cross context VM structure.
1424 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1425 * @param rcStrict Current status code to be combined with the one
1426 * from this recursion and returned.
1427 */
1428static VBOXSTRICTRC vmmR3EmtRendezvousCommonRecursion(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
1429{
1430 int rc2;
1431
1432 /*
1433 * We wait here while the initiator of this recursion reconfigures
1434 * everything. The last EMT to get in signals the initiator.
1435 */
1436 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush) == pVM->cCpus)
1437 {
1438 rc2 = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
1439 AssertLogRelRC(rc2);
1440 }
1441
1442 rc2 = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousRecursionPush, RT_INDEFINITE_WAIT);
1443 AssertLogRelRC(rc2);
1444
1445 /*
1446 * Do the normal rendezvous processing.
1447 */
1448 VBOXSTRICTRC rcStrict2 = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
1449 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
1450
1451 /*
1452 * Wait for the initiator to restore everything.
1453 */
1454 rc2 = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousRecursionPop, RT_INDEFINITE_WAIT);
1455 AssertLogRelRC(rc2);
1456
1457 /*
1458 * Last thread out of here signals the initiator.
1459 */
1460 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop) == pVM->cCpus)
1461 {
1462 rc2 = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
1463 AssertLogRelRC(rc2);
1464 }
1465
1466 /*
1467 * Merge status codes and return.
1468 */
1469 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
1470 if ( rcStrict2 != VINF_SUCCESS
1471 && ( rcStrict == VINF_SUCCESS
1472 || rcStrict > rcStrict2))
1473 rcStrict = rcStrict2;
1474 return rcStrict;
1475}
1476
1477
1478/**
1479 * Count returns and have the last non-caller EMT wake up the caller.
1480 *
1481 * @returns VBox strict informational status code for EM scheduling. No failures
1482 * will be returned here, those are for the caller only.
1483 *
1484 * @param pVM The cross context VM structure.
1485 * @param rcStrict The current accumulated recursive status code,
1486 * to be merged with i32RendezvousStatus and
1487 * returned.
1488 */
1489DECL_FORCE_INLINE(VBOXSTRICTRC) vmmR3EmtRendezvousNonCallerReturn(PVM pVM, VBOXSTRICTRC rcStrict)
1490{
1491 VBOXSTRICTRC rcStrict2 = ASMAtomicReadS32(&pVM->vmm.s.i32RendezvousStatus);
1492
1493 uint32_t cReturned = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsReturned);
1494 if (cReturned == pVM->cCpus - 1U)
1495 {
1496 int rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
1497 AssertLogRelRC(rc);
1498 }
1499
1500 /*
1501 * Merge the status codes, ignoring error statuses in this code path.
1502 */
1503 AssertLogRelMsgReturn( rcStrict2 <= VINF_SUCCESS
1504 || (rcStrict2 >= VINF_EM_FIRST && rcStrict2 <= VINF_EM_LAST),
1505 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1506 VERR_IPE_UNEXPECTED_INFO_STATUS);
1507
1508 if (RT_SUCCESS(rcStrict2))
1509 {
1510 if ( rcStrict2 != VINF_SUCCESS
1511 && ( rcStrict == VINF_SUCCESS
1512 || rcStrict > rcStrict2))
1513 rcStrict = rcStrict2;
1514 }
1515 return rcStrict;
1516}
1517
1518
1519/**
1520 * Common worker for VMMR3EmtRendezvous and VMMR3EmtRendezvousFF.
1521 *
1522 * @returns VBox strict informational status code for EM scheduling. No failures
1523 * will be returned here, those are for the caller only. When
1524 * fIsCaller is set, VINF_SUCCESS is always returned.
1525 *
1526 * @param pVM The cross context VM structure.
1527 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1528 * @param fIsCaller Whether we're the VMMR3EmtRendezvous caller or
1529 * not.
1530 * @param fFlags The flags.
1531 * @param pfnRendezvous The callback.
1532 * @param pvUser The user argument for the callback.
1533 */
1534static VBOXSTRICTRC vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
1535 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1536{
1537 int rc;
1538 VBOXSTRICTRC rcStrictRecursion = VINF_SUCCESS;
1539
1540 /*
1541 * Enter, the last EMT triggers the next callback phase.
1542 */
1543 uint32_t cEntered = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsEntered);
1544 if (cEntered != pVM->cCpus)
1545 {
1546 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1547 {
1548 /* Wait for our turn. */
1549 for (;;)
1550 {
1551 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, RT_INDEFINITE_WAIT);
1552 AssertLogRelRC(rc);
1553 if (!pVM->vmm.s.fRendezvousRecursion)
1554 break;
1555 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1556 }
1557 }
1558 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1559 {
1560 /* Wait for the last EMT to arrive and wake everyone up. */
1561 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce, RT_INDEFINITE_WAIT);
1562 AssertLogRelRC(rc);
1563 Assert(!pVM->vmm.s.fRendezvousRecursion);
1564 }
1565 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1566 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1567 {
1568 /* Wait for our turn. */
1569 for (;;)
1570 {
1571 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1572 AssertLogRelRC(rc);
1573 if (!pVM->vmm.s.fRendezvousRecursion)
1574 break;
1575 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1576 }
1577 }
1578 else
1579 {
1580 Assert((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE);
1581
1582 /*
1583 * The execute once is handled specially to optimize the code flow.
1584 *
1585 * The last EMT to arrive will perform the callback and the other
1586 * EMTs will wait on the Done/DoneCaller semaphores (instead of
1587 * the EnterOneByOne/AllAtOnce) in the meanwhile. When the callback
1588 * returns, that EMT will initiate the normal return sequence.
1589 */
1590 if (!fIsCaller)
1591 {
1592 for (;;)
1593 {
1594 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1595 AssertLogRelRC(rc);
1596 if (!pVM->vmm.s.fRendezvousRecursion)
1597 break;
1598 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1599 }
1600
1601 return vmmR3EmtRendezvousNonCallerReturn(pVM, rcStrictRecursion);
1602 }
1603 return VINF_SUCCESS;
1604 }
1605 }
1606 else
1607 {
1608 /*
1609 * All EMTs are waiting, clear the FF and take action according to the
1610 * execution method.
1611 */
1612 VM_FF_CLEAR(pVM, VM_FF_EMT_RENDEZVOUS);
1613
1614 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1615 {
1616 /* Wake up everyone. */
1617 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
1618 AssertLogRelRC(rc);
1619 }
1620 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1621 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1622 {
1623 /* Figure out who to wake up and wake it up. If it's ourself, then
1624 it's easy otherwise wait for our turn. */
1625 VMCPUID iFirst = (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1626 ? 0
1627 : pVM->cCpus - 1U;
1628 if (pVCpu->idCpu != iFirst)
1629 {
1630 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iFirst]);
1631 AssertLogRelRC(rc);
1632 for (;;)
1633 {
1634 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1635 AssertLogRelRC(rc);
1636 if (!pVM->vmm.s.fRendezvousRecursion)
1637 break;
1638 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1639 }
1640 }
1641 }
1642 /* else: execute the handler on the current EMT and wake up one or more threads afterwards. */
1643 }
1644
1645
1646 /*
1647 * Do the callback and update the status if necessary.
1648 */
1649 if ( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
1650 || RT_SUCCESS(ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus)) )
1651 {
1652 VBOXSTRICTRC rcStrict2 = pfnRendezvous(pVM, pVCpu, pvUser);
1653 if (rcStrict2 != VINF_SUCCESS)
1654 {
1655 AssertLogRelMsg( rcStrict2 <= VINF_SUCCESS
1656 || (rcStrict2 >= VINF_EM_FIRST && rcStrict2 <= VINF_EM_LAST),
1657 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
1658 int32_t i32RendezvousStatus;
1659 do
1660 {
1661 i32RendezvousStatus = ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus);
1662 if ( rcStrict2 == i32RendezvousStatus
1663 || RT_FAILURE(i32RendezvousStatus)
1664 || ( i32RendezvousStatus != VINF_SUCCESS
1665 && rcStrict2 > i32RendezvousStatus))
1666 break;
1667 } while (!ASMAtomicCmpXchgS32(&pVM->vmm.s.i32RendezvousStatus, VBOXSTRICTRC_VAL(rcStrict2), i32RendezvousStatus));
1668 }
1669 }
1670
1671 /*
1672 * Increment the done counter and take action depending on whether we're
1673 * the last to finish callback execution.
1674 */
1675 uint32_t cDone = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsDone);
1676 if ( cDone != pVM->cCpus
1677 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE)
1678 {
1679 /* Signal the next EMT? */
1680 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1681 {
1682 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
1683 AssertLogRelRC(rc);
1684 }
1685 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
1686 {
1687 Assert(cDone == pVCpu->idCpu + 1U);
1688 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu + 1U]);
1689 AssertLogRelRC(rc);
1690 }
1691 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1692 {
1693 Assert(pVM->cCpus - cDone == pVCpu->idCpu);
1694 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVM->cCpus - cDone - 1U]);
1695 AssertLogRelRC(rc);
1696 }
1697
1698 /* Wait for the rest to finish (the caller waits on hEvtRendezvousDoneCaller). */
1699 if (!fIsCaller)
1700 {
1701 for (;;)
1702 {
1703 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1704 AssertLogRelRC(rc);
1705 if (!pVM->vmm.s.fRendezvousRecursion)
1706 break;
1707 rcStrictRecursion = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrictRecursion);
1708 }
1709 }
1710 }
1711 else
1712 {
1713 /* Callback execution is all done, tell the rest to return. */
1714 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
1715 AssertLogRelRC(rc);
1716 }
1717
1718 if (!fIsCaller)
1719 return vmmR3EmtRendezvousNonCallerReturn(pVM, rcStrictRecursion);
1720 return rcStrictRecursion;
1721}
1722
1723
1724/**
1725 * Called in response to VM_FF_EMT_RENDEZVOUS.
1726 *
1727 * @returns VBox strict status code - EM scheduling. No errors will be returned
1728 * here, nor will any non-EM scheduling status codes be returned.
1729 *
1730 * @param pVM The cross context VM structure.
1731 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1732 *
1733 * @thread EMT
1734 */
1735VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu)
1736{
1737 Assert(!pVCpu->vmm.s.fInRendezvous);
1738 Log(("VMMR3EmtRendezvousFF: EMT%#u\n", pVCpu->idCpu));
1739 pVCpu->vmm.s.fInRendezvous = true;
1740 VBOXSTRICTRC rcStrict = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
1741 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
1742 pVCpu->vmm.s.fInRendezvous = false;
1743 Log(("VMMR3EmtRendezvousFF: EMT%#u returns %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict)));
1744 return VBOXSTRICTRC_TODO(rcStrict);
1745}
1746
1747
1748/**
1749 * Helper for resetting an single wakeup event sempahore.
1750 *
1751 * @returns VERR_TIMEOUT on success, RTSemEventWait status otherwise.
1752 * @param hEvt The event semaphore to reset.
1753 */
1754static int vmmR3HlpResetEvent(RTSEMEVENT hEvt)
1755{
1756 for (uint32_t cLoops = 0; ; cLoops++)
1757 {
1758 int rc = RTSemEventWait(hEvt, 0 /*cMsTimeout*/);
1759 if (rc != VINF_SUCCESS || cLoops > _4K)
1760 return rc;
1761 }
1762}
1763
1764
1765/**
1766 * Worker for VMMR3EmtRendezvous that handles recursion.
1767 *
1768 * @returns VBox strict status code. This will be the first error,
1769 * VINF_SUCCESS, or an EM scheduling status code.
1770 *
1771 * @param pVM The cross context VM structure.
1772 * @param pVCpu The cross context virtual CPU structure of the
1773 * calling EMT.
1774 * @param fFlags Flags indicating execution methods. See
1775 * grp_VMMR3EmtRendezvous_fFlags.
1776 * @param pfnRendezvous The callback.
1777 * @param pvUser User argument for the callback.
1778 *
1779 * @thread EMT(pVCpu)
1780 */
1781static VBOXSTRICTRC vmmR3EmtRendezvousRecursive(PVM pVM, PVMCPU pVCpu, uint32_t fFlags,
1782 PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1783{
1784 Log(("vmmR3EmtRendezvousRecursive: %#x EMT#%u depth=%d\n", fFlags, pVCpu->idCpu, pVM->vmm.s.cRendezvousRecursions));
1785 AssertLogRelReturn(pVM->vmm.s.cRendezvousRecursions < 3, VERR_DEADLOCK);
1786 Assert(pVCpu->vmm.s.fInRendezvous);
1787
1788 /*
1789 * Save the current state.
1790 */
1791 uint32_t const fParentFlags = pVM->vmm.s.fRendezvousFlags;
1792 uint32_t const cParentDone = pVM->vmm.s.cRendezvousEmtsDone;
1793 int32_t const iParentStatus = pVM->vmm.s.i32RendezvousStatus;
1794 PFNVMMEMTRENDEZVOUS const pfnParent = pVM->vmm.s.pfnRendezvous;
1795 void * const pvParentUser = pVM->vmm.s.pvRendezvousUser;
1796
1797 /*
1798 * Check preconditions and save the current state.
1799 */
1800 AssertReturn( (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1801 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
1802 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
1803 || (fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
1804 VERR_INTERNAL_ERROR);
1805 AssertReturn(pVM->vmm.s.cRendezvousEmtsEntered == pVM->cCpus, VERR_INTERNAL_ERROR_2);
1806 AssertReturn(pVM->vmm.s.cRendezvousEmtsReturned == 0, VERR_INTERNAL_ERROR_3);
1807
1808 /*
1809 * Reset the recursion prep and pop semaphores.
1810 */
1811 int rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
1812 AssertLogRelRCReturn(rc, rc);
1813 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
1814 AssertLogRelRCReturn(rc, rc);
1815 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPushCaller);
1816 AssertLogRelMsgReturn(rc == VERR_TIMEOUT, ("%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
1817 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPopCaller);
1818 AssertLogRelMsgReturn(rc == VERR_TIMEOUT, ("%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS);
1819
1820 /*
1821 * Usher the other thread into the recursion routine.
1822 */
1823 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush, 0);
1824 ASMAtomicWriteBool(&pVM->vmm.s.fRendezvousRecursion, true);
1825
1826 uint32_t cLeft = pVM->cCpus - (cParentDone + 1U);
1827 if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1828 while (cLeft-- > 0)
1829 {
1830 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
1831 AssertLogRelRC(rc);
1832 }
1833 else if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
1834 {
1835 Assert(cLeft == pVM->cCpus - (pVCpu->idCpu + 1U));
1836 for (VMCPUID iCpu = pVCpu->idCpu + 1U; iCpu < pVM->cCpus; iCpu++)
1837 {
1838 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iCpu]);
1839 AssertLogRelRC(rc);
1840 }
1841 }
1842 else if ((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1843 {
1844 Assert(cLeft == pVCpu->idCpu);
1845 for (VMCPUID iCpu = pVCpu->idCpu; iCpu > 0; iCpu--)
1846 {
1847 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iCpu - 1U]);
1848 AssertLogRelRC(rc);
1849 }
1850 }
1851 else
1852 AssertLogRelReturn((fParentFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
1853 VERR_INTERNAL_ERROR_4);
1854
1855 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
1856 AssertLogRelRC(rc);
1857 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
1858 AssertLogRelRC(rc);
1859
1860
1861 /*
1862 * Wait for the EMTs to wake up and get out of the parent rendezvous code.
1863 */
1864 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPush) != pVM->cCpus)
1865 {
1866 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousRecursionPushCaller, RT_INDEFINITE_WAIT);
1867 AssertLogRelRC(rc);
1868 }
1869
1870 ASMAtomicWriteBool(&pVM->vmm.s.fRendezvousRecursion, false);
1871
1872 /*
1873 * Clear the slate and setup the new rendezvous.
1874 */
1875 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1876 {
1877 rc = vmmR3HlpResetEvent(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
1878 AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1879 }
1880 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousEnterOneByOne); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1881 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
1882 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
1883 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousDoneCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1884
1885 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
1886 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
1887 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
1888 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
1889 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
1890 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
1891 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
1892 ASMAtomicIncU32(&pVM->vmm.s.cRendezvousRecursions);
1893
1894 /*
1895 * We're ready to go now, do normal rendezvous processing.
1896 */
1897 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousRecursionPush);
1898 AssertLogRelRC(rc);
1899
1900 VBOXSTRICTRC rcStrict = vmmR3EmtRendezvousCommon(pVM, pVCpu, true /*fIsCaller*/, fFlags, pfnRendezvous, pvUser);
1901
1902 /*
1903 * The caller waits for the other EMTs to be done, return and waiting on the
1904 * pop semaphore.
1905 */
1906 for (;;)
1907 {
1908 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
1909 AssertLogRelRC(rc);
1910 if (!pVM->vmm.s.fRendezvousRecursion)
1911 break;
1912 rcStrict = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrict);
1913 }
1914
1915 /*
1916 * Get the return code and merge it with the above recursion status.
1917 */
1918 VBOXSTRICTRC rcStrict2 = pVM->vmm.s.i32RendezvousStatus;
1919 if ( rcStrict2 != VINF_SUCCESS
1920 && ( rcStrict == VINF_SUCCESS
1921 || rcStrict > rcStrict2))
1922 rcStrict = rcStrict2;
1923
1924 /*
1925 * Restore the parent rendezvous state.
1926 */
1927 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1928 {
1929 rc = vmmR3HlpResetEvent(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
1930 AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1931 }
1932 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousEnterOneByOne); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1933 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
1934 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
1935 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousDoneCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1936
1937 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, pVM->cCpus);
1938 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
1939 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, cParentDone);
1940 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, iParentStatus);
1941 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fParentFlags);
1942 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvParentUser);
1943 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnParent);
1944
1945 /*
1946 * Usher the other EMTs back to their parent recursion routine, waiting
1947 * for them to all get there before we return (makes sure they've been
1948 * scheduled and are past the pop event sem, see below).
1949 */
1950 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop, 0);
1951 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousRecursionPop);
1952 AssertLogRelRC(rc);
1953
1954 if (ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsRecursingPop) != pVM->cCpus)
1955 {
1956 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousRecursionPopCaller, RT_INDEFINITE_WAIT);
1957 AssertLogRelRC(rc);
1958 }
1959
1960 /*
1961 * We must reset the pop semaphore on the way out (doing the pop caller too,
1962 * just in case). The parent may be another recursion.
1963 */
1964 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousRecursionPop); AssertLogRelRC(rc);
1965 rc = vmmR3HlpResetEvent(pVM->vmm.s.hEvtRendezvousRecursionPopCaller); AssertLogRelMsg(rc == VERR_TIMEOUT, ("%Rrc\n", rc));
1966
1967 ASMAtomicDecU32(&pVM->vmm.s.cRendezvousRecursions);
1968
1969 Log(("vmmR3EmtRendezvousRecursive: %#x EMT#%u depth=%d returns %Rrc\n",
1970 fFlags, pVCpu->idCpu, pVM->vmm.s.cRendezvousRecursions, VBOXSTRICTRC_VAL(rcStrict)));
1971 return rcStrict;
1972}
1973
1974
1975/**
1976 * EMT rendezvous.
1977 *
1978 * Gathers all the EMTs and execute some code on each of them, either in a one
1979 * by one fashion or all at once.
1980 *
1981 * @returns VBox strict status code. This will be the first error,
1982 * VINF_SUCCESS, or an EM scheduling status code.
1983 *
1984 * @retval VERR_DEADLOCK if recursion is attempted using a rendezvous type that
1985 * doesn't support it or if the recursion is too deep.
1986 *
1987 * @param pVM The cross context VM structure.
1988 * @param fFlags Flags indicating execution methods. See
1989 * grp_VMMR3EmtRendezvous_fFlags. The one-by-one,
1990 * descending and ascending rendezvous types support
1991 * recursion from inside @a pfnRendezvous.
1992 * @param pfnRendezvous The callback.
1993 * @param pvUser User argument for the callback.
1994 *
1995 * @thread Any.
1996 */
1997VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1998{
1999 /*
2000 * Validate input.
2001 */
2002 AssertReturn(pVM, VERR_INVALID_VM_HANDLE);
2003 AssertMsg( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID
2004 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) <= VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2005 && !(fFlags & ~VMMEMTRENDEZVOUS_FLAGS_VALID_MASK), ("%#x\n", fFlags));
2006 AssertMsg( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
2007 || ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE
2008 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE),
2009 ("type %u\n", fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK));
2010
2011 VBOXSTRICTRC rcStrict;
2012 PVMCPU pVCpu = VMMGetCpu(pVM);
2013 if (!pVCpu)
2014 {
2015 /*
2016 * Forward the request to an EMT thread.
2017 */
2018 Log(("VMMR3EmtRendezvous: %#x non-EMT\n", fFlags));
2019 if (!(fFlags & VMMEMTRENDEZVOUS_FLAGS_PRIORITY))
2020 rcStrict = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
2021 else
2022 rcStrict = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
2023 Log(("VMMR3EmtRendezvous: %#x non-EMT returns %Rrc\n", fFlags, VBOXSTRICTRC_VAL(rcStrict)));
2024 }
2025 else if ( pVM->cCpus == 1
2026 || ( pVM->enmVMState == VMSTATE_DESTROYING
2027 && VMR3GetActiveEmts(pVM->pUVM) < pVM->cCpus ) )
2028 {
2029 /*
2030 * Shortcut for the single EMT case.
2031 *
2032 * We also ends up here if EMT(0) (or others) tries to issue a rendezvous
2033 * during vmR3Destroy after other emulation threads have started terminating.
2034 */
2035 if (!pVCpu->vmm.s.fInRendezvous)
2036 {
2037 Log(("VMMR3EmtRendezvous: %#x EMT (uni)\n", fFlags));
2038 pVCpu->vmm.s.fInRendezvous = true;
2039 pVM->vmm.s.fRendezvousFlags = fFlags;
2040 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
2041 pVCpu->vmm.s.fInRendezvous = false;
2042 }
2043 else
2044 {
2045 /* Recursion. Do the same checks as in the SMP case. */
2046 Log(("VMMR3EmtRendezvous: %#x EMT (uni), recursion depth=%d\n", fFlags, pVM->vmm.s.cRendezvousRecursions));
2047 uint32_t fType = pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK;
2048 AssertLogRelReturn( !pVCpu->vmm.s.fInRendezvous
2049 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
2050 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2051 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
2052 || fType == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE
2053 , VERR_DEADLOCK);
2054
2055 AssertLogRelReturn(pVM->vmm.s.cRendezvousRecursions < 3, VERR_DEADLOCK);
2056 pVM->vmm.s.cRendezvousRecursions++;
2057 uint32_t const fParentFlags = pVM->vmm.s.fRendezvousFlags;
2058 pVM->vmm.s.fRendezvousFlags = fFlags;
2059
2060 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
2061
2062 pVM->vmm.s.fRendezvousFlags = fParentFlags;
2063 pVM->vmm.s.cRendezvousRecursions--;
2064 }
2065 Log(("VMMR3EmtRendezvous: %#x EMT (uni) returns %Rrc\n", fFlags, VBOXSTRICTRC_VAL(rcStrict)));
2066 }
2067 else
2068 {
2069 /*
2070 * Spin lock. If busy, check for recursion, if not recursing wait for
2071 * the other EMT to finish while keeping a lookout for the RENDEZVOUS FF.
2072 */
2073 int rc;
2074 rcStrict = VINF_SUCCESS;
2075 if (RT_UNLIKELY(!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0)))
2076 {
2077 /* Allow recursion in some cases. */
2078 if ( pVCpu->vmm.s.fInRendezvous
2079 && ( (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
2080 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
2081 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE
2082 || (pVM->vmm.s.fRendezvousFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE
2083 ))
2084 return VBOXSTRICTRC_TODO(vmmR3EmtRendezvousRecursive(pVM, pVCpu, fFlags, pfnRendezvous, pvUser));
2085
2086 AssertLogRelMsgReturn(!pVCpu->vmm.s.fInRendezvous, ("fRendezvousFlags=%#x\n", pVM->vmm.s.fRendezvousFlags),
2087 VERR_DEADLOCK);
2088
2089 Log(("VMMR3EmtRendezvous: %#x EMT#%u, waiting for lock...\n", fFlags, pVCpu->idCpu));
2090 while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0))
2091 {
2092 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2093 {
2094 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
2095 if ( rc != VINF_SUCCESS
2096 && ( rcStrict == VINF_SUCCESS
2097 || rcStrict > rc))
2098 rcStrict = rc;
2099 /** @todo Perhaps deal with termination here? */
2100 }
2101 ASMNopPause();
2102 }
2103 }
2104
2105 Log(("VMMR3EmtRendezvous: %#x EMT#%u\n", fFlags, pVCpu->idCpu));
2106 Assert(!VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS));
2107 Assert(!pVCpu->vmm.s.fInRendezvous);
2108 pVCpu->vmm.s.fInRendezvous = true;
2109
2110 /*
2111 * Clear the slate and setup the rendezvous. This is a semaphore ping-pong orgy. :-)
2112 */
2113 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2114 {
2115 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i], 0);
2116 AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2117 }
2118 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2119 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
2120 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
2121 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
2122 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
2123 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
2124 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
2125 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
2126 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
2127 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
2128 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
2129
2130 /*
2131 * Set the FF and poke the other EMTs.
2132 */
2133 VM_FF_SET(pVM, VM_FF_EMT_RENDEZVOUS);
2134 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_POKE);
2135
2136 /*
2137 * Do the same ourselves.
2138 */
2139 VBOXSTRICTRC rcStrict2 = vmmR3EmtRendezvousCommon(pVM, pVCpu, true /* fIsCaller */, fFlags, pfnRendezvous, pvUser);
2140
2141 /*
2142 * The caller waits for the other EMTs to be done and return before doing
2143 * the cleanup. This makes away with wakeup / reset races we would otherwise
2144 * risk in the multiple release event semaphore code (hEvtRendezvousDoneCaller).
2145 */
2146 for (;;)
2147 {
2148 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
2149 AssertLogRelRC(rc);
2150 if (!pVM->vmm.s.fRendezvousRecursion)
2151 break;
2152 rcStrict2 = vmmR3EmtRendezvousCommonRecursion(pVM, pVCpu, rcStrict2);
2153 }
2154
2155 /*
2156 * Get the return code and clean up a little bit.
2157 */
2158 VBOXSTRICTRC rcStrict3 = pVM->vmm.s.i32RendezvousStatus;
2159 ASMAtomicWriteNullPtr((void * volatile *)&pVM->vmm.s.pfnRendezvous);
2160
2161 ASMAtomicWriteU32(&pVM->vmm.s.u32RendezvousLock, 0);
2162 pVCpu->vmm.s.fInRendezvous = false;
2163
2164 /*
2165 * Merge rcStrict, rcStrict2 and rcStrict3.
2166 */
2167 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2168 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
2169 if ( rcStrict2 != VINF_SUCCESS
2170 && ( rcStrict == VINF_SUCCESS
2171 || rcStrict > rcStrict2))
2172 rcStrict = rcStrict2;
2173 if ( rcStrict3 != VINF_SUCCESS
2174 && ( rcStrict == VINF_SUCCESS
2175 || rcStrict > rcStrict3))
2176 rcStrict = rcStrict3;
2177 Log(("VMMR3EmtRendezvous: %#x EMT#%u returns %Rrc\n", fFlags, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict)));
2178 }
2179
2180 AssertLogRelMsgReturn( rcStrict <= VINF_SUCCESS
2181 || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
2182 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
2183 VERR_IPE_UNEXPECTED_INFO_STATUS);
2184 return VBOXSTRICTRC_VAL(rcStrict);
2185}
2186
2187
2188/**
2189 * Interface for vmR3SetHaltMethodU.
2190 *
2191 * @param pVCpu The cross context virtual CPU structure of the
2192 * calling EMT.
2193 * @param fMayHaltInRing0 The new state.
2194 * @param cNsSpinBlockThreshold The spin-vs-blocking threashold.
2195 * @thread EMT(pVCpu)
2196 *
2197 * @todo Move the EMT handling to VMM (or EM). I soooooo regret that VM
2198 * component.
2199 */
2200VMMR3_INT_DECL(void) VMMR3SetMayHaltInRing0(PVMCPU pVCpu, bool fMayHaltInRing0, uint32_t cNsSpinBlockThreshold)
2201{
2202 LogFlow(("VMMR3SetMayHaltInRing0(#%u, %d, %u)\n", pVCpu->idCpu, fMayHaltInRing0, cNsSpinBlockThreshold));
2203 pVCpu->vmm.s.fMayHaltInRing0 = fMayHaltInRing0;
2204 pVCpu->vmm.s.cNsSpinBlockThreshold = cNsSpinBlockThreshold;
2205}
2206
2207
2208/**
2209 * Read from the ring 0 jump buffer stack.
2210 *
2211 * @returns VBox status code.
2212 *
2213 * @param pVM The cross context VM structure.
2214 * @param idCpu The ID of the source CPU context (for the address).
2215 * @param R0Addr Where to start reading.
2216 * @param pvBuf Where to store the data we've read.
2217 * @param cbRead The number of bytes to read.
2218 */
2219VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead)
2220{
2221 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
2222 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
2223 AssertReturn(cbRead < ~(size_t)0 / 2, VERR_INVALID_PARAMETER);
2224
2225 /*
2226 * Hopefully we've got all the requested bits. If not supply what we
2227 * can and zero the remaining stuff.
2228 */
2229 RTHCUINTPTR off = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindSp;
2230 if (off < pVCpu->vmm.s.AssertJmpBuf.cbStackValid)
2231 {
2232 size_t const cbValid = pVCpu->vmm.s.AssertJmpBuf.cbStackValid - off;
2233 if (cbRead <= cbValid)
2234 {
2235 memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbRead);
2236 return VINF_SUCCESS;
2237 }
2238
2239 memcpy(pvBuf, &pVCpu->vmm.s.abAssertStack[off], cbValid);
2240 RT_BZERO((uint8_t *)pvBuf + cbValid, cbRead - cbValid);
2241 }
2242 else
2243 RT_BZERO(pvBuf, cbRead);
2244
2245 /*
2246 * Supply the setjmp return RIP/EIP if requested.
2247 */
2248 if ( pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation + sizeof(RTR0UINTPTR) > R0Addr
2249 && pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation < R0Addr + cbRead)
2250 {
2251 uint8_t const *pbSrc = (uint8_t const *)&pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue;
2252 size_t cbSrc = sizeof(pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcValue);
2253 size_t offDst = 0;
2254 if (R0Addr < pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation)
2255 offDst = pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation - R0Addr;
2256 else if (R0Addr > pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation)
2257 {
2258 size_t offSrc = R0Addr - pVCpu->vmm.s.AssertJmpBuf.UnwindRetPcLocation;
2259 Assert(offSrc < cbSrc);
2260 pbSrc -= offSrc;
2261 cbSrc -= offSrc;
2262 }
2263 if (cbSrc > cbRead - offDst)
2264 cbSrc = cbRead - offDst;
2265 memcpy((uint8_t *)pvBuf + offDst, pbSrc, cbSrc);
2266
2267 //if (cbSrc == cbRead)
2268 // rc = VINF_SUCCESS;
2269 }
2270
2271 return VINF_SUCCESS;
2272}
2273
2274
2275/**
2276 * Used by the DBGF stack unwinder to initialize the register state.
2277 *
2278 * @param pUVM The user mode VM handle.
2279 * @param idCpu The ID of the CPU being unwound.
2280 * @param pState The unwind state to initialize.
2281 */
2282VMMR3_INT_DECL(void) VMMR3InitR0StackUnwindState(PUVM pUVM, VMCPUID idCpu, struct RTDBGUNWINDSTATE *pState)
2283{
2284 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
2285 AssertReturnVoid(pVCpu);
2286
2287 /*
2288 * This is all we really need here if we had proper unwind info (win64 only)...
2289 */
2290 pState->u.x86.auRegs[X86_GREG_xBP] = pVCpu->vmm.s.AssertJmpBuf.UnwindBp;
2291 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindSp;
2292 pState->uPc = pVCpu->vmm.s.AssertJmpBuf.UnwindPc;
2293
2294 /*
2295 * Locate the resume point on the stack.
2296 */
2297 uintptr_t off = 0;
2298
2299#ifdef RT_ARCH_AMD64
2300 /*
2301 * This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-amd64.asm exactly.
2302 */
2303# ifdef RT_OS_WINDOWS
2304 off += 0xa0; /* XMM6 thru XMM15 */
2305# endif
2306 pState->u.x86.uRFlags = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2307 off += 8;
2308 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2309 off += 8;
2310# ifdef RT_OS_WINDOWS
2311 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2312 off += 8;
2313 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2314 off += 8;
2315# endif
2316 pState->u.x86.auRegs[X86_GREG_x12] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2317 off += 8;
2318 pState->u.x86.auRegs[X86_GREG_x13] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2319 off += 8;
2320 pState->u.x86.auRegs[X86_GREG_x14] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2321 off += 8;
2322 pState->u.x86.auRegs[X86_GREG_x15] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2323 off += 8;
2324 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2325 off += 8;
2326 pState->uPc = *(uint64_t const *)&pVCpu->vmm.s.abAssertStack[off];
2327 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp;
2328
2329#elif defined(RT_ARCH_X86)
2330 /*
2331 * This code must match the vmmR0CallRing3LongJmp stack frame setup in VMMR0JmpA-x86.asm exactly.
2332 */
2333 pState->u.x86.uRFlags = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2334 off += 4;
2335 pState->u.x86.auRegs[X86_GREG_xBX] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2336 off += 4;
2337 pState->u.x86.auRegs[X86_GREG_xSI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2338 off += 4;
2339 pState->u.x86.auRegs[X86_GREG_xDI] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2340 off += 4;
2341 pState->u.x86.auRegs[X86_GREG_xBP] = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2342 off += 4;
2343 pState->uPc = *(uint32_t const *)&pVCpu->vmm.s.abAssertStack[off];
2344 pState->u.x86.auRegs[X86_GREG_xSP] = pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp;
2345#else
2346# error "Port me"
2347#endif
2348}
2349
2350
2351/**
2352 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
2353 *
2354 * @returns VBox status code.
2355 * @param pVM The cross context VM structure.
2356 * @param uOperation Operation to execute.
2357 * @param u64Arg Constant argument.
2358 * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
2359 * details.
2360 */
2361VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
2362{
2363 PVMCPU pVCpu = VMMGetCpu(pVM);
2364 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
2365 return VMMR3CallR0Emt(pVM, pVCpu, (VMMR0OPERATION)uOperation, u64Arg, pReqHdr);
2366}
2367
2368
2369/**
2370 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
2371 *
2372 * @returns VBox status code.
2373 * @param pVM The cross context VM structure.
2374 * @param pVCpu The cross context VM structure.
2375 * @param enmOperation Operation to execute.
2376 * @param u64Arg Constant argument.
2377 * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
2378 * details.
2379 */
2380VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
2381{
2382 /*
2383 * Call ring-0.
2384 */
2385#ifdef NO_SUPCALLR0VMM
2386 int rc = VERR_GENERAL_FAILURE;
2387#else
2388 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, enmOperation, u64Arg, pReqHdr);
2389#endif
2390
2391 /*
2392 * Flush the logs and deal with ring-0 assertions.
2393 */
2394#ifdef LOG_ENABLED
2395 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL);
2396#endif
2397 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance());
2398 if (rc != VERR_VMM_RING0_ASSERTION)
2399 {
2400 AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
2401 ("enmOperation=%u rc=%Rrc\n", enmOperation, rc),
2402 VERR_IPE_UNEXPECTED_INFO_STATUS);
2403 return rc;
2404 }
2405 return vmmR3HandleRing0Assert(pVM, pVCpu);
2406}
2407
2408
2409/**
2410 * Logs a ring-0 assertion ASAP after returning to ring-3.
2411 *
2412 * @returns VBox status code.
2413 * @param pVM The cross context VM structure.
2414 * @param pVCpu The cross context virtual CPU structure.
2415 */
2416static int vmmR3HandleRing0Assert(PVM pVM, PVMCPU pVCpu)
2417{
2418 RT_NOREF(pVCpu);
2419 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
2420 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
2421 return VERR_VMM_RING0_ASSERTION;
2422}
2423
2424
2425/**
2426 * Displays the Force action Flags.
2427 *
2428 * @param pVM The cross context VM structure.
2429 * @param pHlp The output helpers.
2430 * @param pszArgs The additional arguments (ignored).
2431 */
2432static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2433{
2434 int c;
2435 uint32_t f;
2436 NOREF(pszArgs);
2437
2438#define PRINT_FLAG(prf,flag) do { \
2439 if (f & (prf##flag)) \
2440 { \
2441 static const char *s_psz = #flag; \
2442 if (!(c % 6)) \
2443 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz); \
2444 else \
2445 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2446 c++; \
2447 f &= ~(prf##flag); \
2448 } \
2449 } while (0)
2450
2451#define PRINT_GROUP(prf,grp,sfx) do { \
2452 if (f & (prf##grp##sfx)) \
2453 { \
2454 static const char *s_psz = #grp; \
2455 if (!(c % 5)) \
2456 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : " Groups:\n", s_psz); \
2457 else \
2458 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2459 c++; \
2460 } \
2461 } while (0)
2462
2463 /*
2464 * The global flags.
2465 */
2466 const uint32_t fGlobalForcedActions = pVM->fGlobalForcedActions;
2467 pHlp->pfnPrintf(pHlp, "Global FFs: %#RX32", fGlobalForcedActions);
2468
2469 /* show the flag mnemonics */
2470 c = 0;
2471 f = fGlobalForcedActions;
2472 PRINT_FLAG(VM_FF_,TM_VIRTUAL_SYNC);
2473 PRINT_FLAG(VM_FF_,PDM_QUEUES);
2474 PRINT_FLAG(VM_FF_,PDM_DMA);
2475 PRINT_FLAG(VM_FF_,DBGF);
2476 PRINT_FLAG(VM_FF_,REQUEST);
2477 PRINT_FLAG(VM_FF_,CHECK_VM_STATE);
2478 PRINT_FLAG(VM_FF_,RESET);
2479 PRINT_FLAG(VM_FF_,EMT_RENDEZVOUS);
2480 PRINT_FLAG(VM_FF_,PGM_NEED_HANDY_PAGES);
2481 PRINT_FLAG(VM_FF_,PGM_NO_MEMORY);
2482 PRINT_FLAG(VM_FF_,PGM_POOL_FLUSH_PENDING);
2483 PRINT_FLAG(VM_FF_,DEBUG_SUSPEND);
2484 if (f)
2485 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2486 else
2487 pHlp->pfnPrintf(pHlp, "\n");
2488
2489 /* the groups */
2490 c = 0;
2491 f = fGlobalForcedActions;
2492 PRINT_GROUP(VM_FF_,EXTERNAL_SUSPENDED,_MASK);
2493 PRINT_GROUP(VM_FF_,EXTERNAL_HALTED,_MASK);
2494 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE,_MASK);
2495 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2496 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_POST,_MASK);
2497 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY_POST,_MASK);
2498 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY,_MASK);
2499 PRINT_GROUP(VM_FF_,ALL_REM,_MASK);
2500 if (c)
2501 pHlp->pfnPrintf(pHlp, "\n");
2502
2503 /*
2504 * Per CPU flags.
2505 */
2506 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2507 {
2508 PVMCPU pVCpu = pVM->apCpusR3[i];
2509 const uint64_t fLocalForcedActions = pVCpu->fLocalForcedActions;
2510 pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX64", i, fLocalForcedActions);
2511
2512 /* show the flag mnemonics */
2513 c = 0;
2514 f = fLocalForcedActions;
2515 PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC);
2516 PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC);
2517 PRINT_FLAG(VMCPU_FF_,TIMER);
2518 PRINT_FLAG(VMCPU_FF_,INTERRUPT_NMI);
2519 PRINT_FLAG(VMCPU_FF_,INTERRUPT_SMI);
2520 PRINT_FLAG(VMCPU_FF_,PDM_CRITSECT);
2521 PRINT_FLAG(VMCPU_FF_,UNHALT);
2522 PRINT_FLAG(VMCPU_FF_,IEM);
2523 PRINT_FLAG(VMCPU_FF_,UPDATE_APIC);
2524 PRINT_FLAG(VMCPU_FF_,DBGF);
2525 PRINT_FLAG(VMCPU_FF_,REQUEST);
2526 PRINT_FLAG(VMCPU_FF_,HM_UPDATE_CR3);
2527 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
2528 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
2529 PRINT_FLAG(VMCPU_FF_,TLB_FLUSH);
2530 PRINT_FLAG(VMCPU_FF_,INHIBIT_INTERRUPTS);
2531 PRINT_FLAG(VMCPU_FF_,BLOCK_NMIS);
2532 PRINT_FLAG(VMCPU_FF_,TO_R3);
2533 PRINT_FLAG(VMCPU_FF_,IOM);
2534 if (f)
2535 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX64\n", c ? "," : "", f);
2536 else
2537 pHlp->pfnPrintf(pHlp, "\n");
2538
2539 if (fLocalForcedActions & VMCPU_FF_INHIBIT_INTERRUPTS)
2540 pHlp->pfnPrintf(pHlp, " intr inhibit RIP: %RGp\n", EMGetInhibitInterruptsPC(pVCpu));
2541
2542 /* the groups */
2543 c = 0;
2544 f = fLocalForcedActions;
2545 PRINT_GROUP(VMCPU_FF_,EXTERNAL_SUSPENDED,_MASK);
2546 PRINT_GROUP(VMCPU_FF_,EXTERNAL_HALTED,_MASK);
2547 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE,_MASK);
2548 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2549 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_POST,_MASK);
2550 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY_POST,_MASK);
2551 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK);
2552 PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK);
2553 PRINT_GROUP(VMCPU_FF_,HM_TO_R3,_MASK);
2554 PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK);
2555 if (c)
2556 pHlp->pfnPrintf(pHlp, "\n");
2557 }
2558
2559#undef PRINT_FLAG
2560#undef PRINT_GROUP
2561}
2562
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette