VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMM.cpp@ 55966

Last change on this file since 55966 was 55863, checked in by vboxsync, 10 years ago

IPRT,SUPDrv,VMM: Revised the context switching hook interface. Do less work when enabling the hook (formerly 'registration'). Drop the reference counting (kept internally for solaris) as it complicates restrictions wrt destroying enabled hooks. Bumped support driver version.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 90.2 KB
Line 
1/* $Id: VMM.cpp 55863 2015-05-14 18:29:34Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18//#define NO_SUPCALLR0VMM
19
20/** @page pg_vmm VMM - The Virtual Machine Monitor
21 *
22 * The VMM component is two things at the moment, it's a component doing a few
23 * management and routing tasks, and it's the whole virtual machine monitor
24 * thing. For hysterical reasons, it is not doing all the management that one
25 * would expect, this is instead done by @ref pg_vm. We'll address this
26 * misdesign eventually.
27 *
28 * @see grp_vmm, grp_vm
29 *
30 *
31 * @section sec_vmmstate VMM State
32 *
33 * @image html VM_Statechart_Diagram.gif
34 *
35 * To be written.
36 *
37 *
38 * @subsection subsec_vmm_init VMM Initialization
39 *
40 * To be written.
41 *
42 *
43 * @subsection subsec_vmm_term VMM Termination
44 *
45 * To be written.
46 *
47 *
48 * @sections sec_vmm_limits VMM Limits
49 *
50 * There are various resource limits imposed by the VMM and it's
51 * sub-components. We'll list some of them here.
52 *
53 * On 64-bit hosts:
54 * - Max 8191 VMs. Imposed by GVMM's handle allocation (GVMM_MAX_HANDLES),
55 * can be increased up to 64K - 1.
56 * - Max 16TB - 64KB of the host memory can be used for backing VM RAM and
57 * ROM pages. The limit is imposed by the 32-bit page ID used by GMM.
58 * - A VM can be assigned all the memory we can use (16TB), however, the
59 * Main API will restrict this to 2TB (MM_RAM_MAX_IN_MB).
60 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
61 *
62 * On 32-bit hosts:
63 * - Max 127 VMs. Imposed by GMM's per page structure.
64 * - Max 64GB - 64KB of the host memory can be used for backing VM RAM and
65 * ROM pages. The limit is imposed by the 28-bit page ID used
66 * internally in GMM. It is also limited by PAE.
67 * - A VM can be assigned all the memory GMM can allocate, however, the
68 * Main API will restrict this to 3584MB (MM_RAM_MAX_IN_MB).
69 * - Max 32 virtual CPUs (VMM_MAX_CPU_COUNT).
70 *
71 */
72
73/*******************************************************************************
74* Header Files *
75*******************************************************************************/
76#define LOG_GROUP LOG_GROUP_VMM
77#include <VBox/vmm/vmm.h>
78#include <VBox/vmm/vmapi.h>
79#include <VBox/vmm/pgm.h>
80#include <VBox/vmm/cfgm.h>
81#include <VBox/vmm/pdmqueue.h>
82#include <VBox/vmm/pdmcritsect.h>
83#include <VBox/vmm/pdmcritsectrw.h>
84#include <VBox/vmm/pdmapi.h>
85#include <VBox/vmm/cpum.h>
86#include <VBox/vmm/gim.h>
87#include <VBox/vmm/mm.h>
88#include <VBox/vmm/iom.h>
89#include <VBox/vmm/trpm.h>
90#include <VBox/vmm/selm.h>
91#include <VBox/vmm/em.h>
92#include <VBox/sup.h>
93#include <VBox/vmm/dbgf.h>
94#include <VBox/vmm/csam.h>
95#include <VBox/vmm/patm.h>
96#ifdef VBOX_WITH_REM
97# include <VBox/vmm/rem.h>
98#endif
99#include <VBox/vmm/ssm.h>
100#include <VBox/vmm/ftm.h>
101#include <VBox/vmm/tm.h>
102#include "VMMInternal.h"
103#include "VMMSwitcher.h"
104#include <VBox/vmm/vm.h>
105#include <VBox/vmm/uvm.h>
106
107#include <VBox/err.h>
108#include <VBox/param.h>
109#include <VBox/version.h>
110#include <VBox/vmm/hm.h>
111#include <iprt/assert.h>
112#include <iprt/alloc.h>
113#include <iprt/asm.h>
114#include <iprt/time.h>
115#include <iprt/semaphore.h>
116#include <iprt/stream.h>
117#include <iprt/string.h>
118#include <iprt/stdarg.h>
119#include <iprt/ctype.h>
120#include <iprt/x86.h>
121
122
123
124/*******************************************************************************
125* Defined Constants And Macros *
126*******************************************************************************/
127/** The saved state version. */
128#define VMM_SAVED_STATE_VERSION 4
129/** The saved state version used by v3.0 and earlier. (Teleportation) */
130#define VMM_SAVED_STATE_VERSION_3_0 3
131
132
133/*******************************************************************************
134* Internal Functions *
135*******************************************************************************/
136static int vmmR3InitStacks(PVM pVM);
137static int vmmR3InitLoggers(PVM pVM);
138static void vmmR3InitRegisterStats(PVM pVM);
139static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
140static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
141static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
142static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu);
143static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
144
145
146/**
147 * Initializes the VMM.
148 *
149 * @returns VBox status code.
150 * @param pVM Pointer to the VM.
151 */
152VMMR3_INT_DECL(int) VMMR3Init(PVM pVM)
153{
154 LogFlow(("VMMR3Init\n"));
155
156 /*
157 * Assert alignment, sizes and order.
158 */
159 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
160 AssertCompile(sizeof(pVM->vmm.s) <= sizeof(pVM->vmm.padding));
161 AssertCompile(sizeof(pVM->aCpus[0].vmm.s) <= sizeof(pVM->aCpus[0].vmm.padding));
162
163 /*
164 * Init basic VM VMM members.
165 */
166 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
167 pVM->vmm.s.pahEvtRendezvousEnterOrdered = NULL;
168 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
169 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
170 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
171 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
172
173 /** @cfgm{/YieldEMTInterval, uint32_t, 1, UINT32_MAX, 23, ms}
174 * The EMT yield interval. The EMT yielding is a hack we employ to play a
175 * bit nicer with the rest of the system (like for instance the GUI).
176 */
177 int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies,
178 23 /* Value arrived at after experimenting with the grub boot prompt. */);
179 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Rrc\n", rc), rc);
180
181
182 /** @cfgm{/VMM/UsePeriodicPreemptionTimers, boolean, true}
183 * Controls whether we employ per-cpu preemption timers to limit the time
184 * spent executing guest code. This option is not available on all
185 * platforms and we will silently ignore this setting then. If we are
186 * running in VT-x mode, we will use the VMX-preemption timer instead of
187 * this one when possible.
188 */
189 PCFGMNODE pCfgVMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "VMM");
190 rc = CFGMR3QueryBoolDef(pCfgVMM, "UsePeriodicPreemptionTimers", &pVM->vmm.s.fUsePeriodicPreemptionTimers, true);
191 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"VMM/UsePeriodicPreemptionTimers\", rc=%Rrc\n", rc), rc);
192
193 /*
194 * Initialize the VMM rendezvous semaphores.
195 */
196 pVM->vmm.s.pahEvtRendezvousEnterOrdered = (PRTSEMEVENT)MMR3HeapAlloc(pVM, MM_TAG_VMM, sizeof(RTSEMEVENT) * pVM->cCpus);
197 if (!pVM->vmm.s.pahEvtRendezvousEnterOrdered)
198 return VERR_NO_MEMORY;
199 for (VMCPUID i = 0; i < pVM->cCpus; i++)
200 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
201 for (VMCPUID i = 0; i < pVM->cCpus; i++)
202 {
203 rc = RTSemEventCreate(&pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
204 AssertRCReturn(rc, rc);
205 }
206 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousEnterOneByOne);
207 AssertRCReturn(rc, rc);
208 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
209 AssertRCReturn(rc, rc);
210 rc = RTSemEventMultiCreate(&pVM->vmm.s.hEvtMulRendezvousDone);
211 AssertRCReturn(rc, rc);
212 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller);
213 AssertRCReturn(rc, rc);
214
215 /*
216 * Register the saved state data unit.
217 */
218 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
219 NULL, NULL, NULL,
220 NULL, vmmR3Save, NULL,
221 NULL, vmmR3Load, NULL);
222 if (RT_FAILURE(rc))
223 return rc;
224
225 /*
226 * Register the Ring-0 VM handle with the session for fast ioctl calls.
227 */
228 rc = SUPR3SetVMForFastIOCtl(pVM->pVMR0);
229 if (RT_FAILURE(rc))
230 return rc;
231
232 /*
233 * Init various sub-components.
234 */
235 rc = vmmR3SwitcherInit(pVM);
236 if (RT_SUCCESS(rc))
237 {
238 rc = vmmR3InitStacks(pVM);
239 if (RT_SUCCESS(rc))
240 {
241 rc = vmmR3InitLoggers(pVM);
242
243#ifdef VBOX_WITH_NMI
244 /*
245 * Allocate mapping for the host APIC.
246 */
247 if (RT_SUCCESS(rc))
248 {
249 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
250 AssertRC(rc);
251 }
252#endif
253 if (RT_SUCCESS(rc))
254 {
255 /*
256 * Debug info and statistics.
257 */
258 DBGFR3InfoRegisterInternal(pVM, "fflags", "Displays the current Forced actions Flags.", vmmR3InfoFF);
259 vmmR3InitRegisterStats(pVM);
260 vmmInitFormatTypes();
261
262 return VINF_SUCCESS;
263 }
264 }
265 /** @todo: Need failure cleanup. */
266
267 //more todo in here?
268 //if (RT_SUCCESS(rc))
269 //{
270 //}
271 //int rc2 = vmmR3TermCoreCode(pVM);
272 //AssertRC(rc2));
273 }
274
275 return rc;
276}
277
278
279/**
280 * Allocate & setup the VMM RC stack(s) (for EMTs).
281 *
282 * The stacks are also used for long jumps in Ring-0.
283 *
284 * @returns VBox status code.
285 * @param pVM Pointer to the VM.
286 *
287 * @remarks The optional guard page gets it protection setup up during R3 init
288 * completion because of init order issues.
289 */
290static int vmmR3InitStacks(PVM pVM)
291{
292 int rc = VINF_SUCCESS;
293#ifdef VMM_R0_SWITCH_STACK
294 uint32_t fFlags = MMHYPER_AONR_FLAGS_KERNEL_MAPPING;
295#else
296 uint32_t fFlags = 0;
297#endif
298
299 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
300 {
301 PVMCPU pVCpu = &pVM->aCpus[idCpu];
302
303#ifdef VBOX_STRICT_VMM_STACK
304 rc = MMR3HyperAllocOnceNoRelEx(pVM, PAGE_SIZE + VMM_STACK_SIZE + PAGE_SIZE,
305#else
306 rc = MMR3HyperAllocOnceNoRelEx(pVM, VMM_STACK_SIZE,
307#endif
308 PAGE_SIZE, MM_TAG_VMM, fFlags, (void **)&pVCpu->vmm.s.pbEMTStackR3);
309 if (RT_SUCCESS(rc))
310 {
311#ifdef VBOX_STRICT_VMM_STACK
312 pVCpu->vmm.s.pbEMTStackR3 += PAGE_SIZE;
313#endif
314#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
315 /* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */
316 if (!HMIsEnabled(pVM))
317 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = NIL_RTR0PTR;
318 else
319#endif
320 pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
321 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
322 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
323 AssertRelease(pVCpu->vmm.s.pbEMTStackRC);
324
325 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC);
326 }
327 }
328
329 return rc;
330}
331
332
333/**
334 * Initialize the loggers.
335 *
336 * @returns VBox status code.
337 * @param pVM Pointer to the VM.
338 */
339static int vmmR3InitLoggers(PVM pVM)
340{
341 int rc;
342#define RTLogCalcSizeForR0(cGroups, fFlags) (RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[cGroups]) + PAGE_SIZE)
343
344 /*
345 * Allocate RC & R0 Logger instances (they are finalized in the relocator).
346 */
347#ifdef LOG_ENABLED
348 PRTLOGGER pLogger = RTLogDefaultInstance();
349 if (pLogger)
350 {
351 if (!HMIsEnabled(pVM))
352 {
353 pVM->vmm.s.cbRCLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pLogger->cGroups]);
354 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCLoggerR3);
355 if (RT_FAILURE(rc))
356 return rc;
357 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
358 }
359
360# ifdef VBOX_WITH_R0_LOGGING
361 size_t const cbLogger = RTLogCalcSizeForR0(pLogger->cGroups, 0);
362 for (VMCPUID i = 0; i < pVM->cCpus; i++)
363 {
364 PVMCPU pVCpu = &pVM->aCpus[i];
365 rc = MMR3HyperAllocOnceNoRelEx(pVM, cbLogger, PAGE_SIZE, MM_TAG_VMM, MMHYPER_AONR_FLAGS_KERNEL_MAPPING,
366 (void **)&pVCpu->vmm.s.pR0LoggerR3);
367 if (RT_FAILURE(rc))
368 return rc;
369 pVCpu->vmm.s.pR0LoggerR3->pVM = pVM->pVMR0;
370 //pVCpu->vmm.s.pR0LoggerR3->fCreated = false;
371 pVCpu->vmm.s.pR0LoggerR3->cbLogger = (uint32_t)cbLogger;
372 pVCpu->vmm.s.pR0LoggerR0 = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pR0LoggerR3);
373 }
374# endif
375 }
376#endif /* LOG_ENABLED */
377
378#ifdef VBOX_WITH_RC_RELEASE_LOGGING
379 /*
380 * Allocate RC release logger instances (finalized in the relocator).
381 */
382 if (!HMIsEnabled(pVM))
383 {
384 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
385 if (pRelLogger)
386 {
387 pVM->vmm.s.cbRCRelLogger = RT_OFFSETOF(RTLOGGERRC, afGroups[pRelLogger->cGroups]);
388 rc = MMR3HyperAllocOnceNoRel(pVM, pVM->vmm.s.cbRCRelLogger, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRCRelLoggerR3);
389 if (RT_FAILURE(rc))
390 return rc;
391 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
392 }
393 }
394#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
395 return VINF_SUCCESS;
396}
397
398
399/**
400 * VMMR3Init worker that register the statistics with STAM.
401 *
402 * @param pVM The shared VM structure.
403 */
404static void vmmR3InitRegisterStats(PVM pVM)
405{
406 /*
407 * Statistics.
408 */
409 STAM_REG(pVM, &pVM->vmm.s.StatRunRC, STAMTYPE_COUNTER, "/VMM/RunRC", STAMUNIT_OCCURENCES, "Number of context switches.");
410 STAM_REG(pVM, &pVM->vmm.s.StatRZRetNormal, STAMTYPE_COUNTER, "/VMM/RZRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
411 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterrupt, STAMTYPE_COUNTER, "/VMM/RZRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
412 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
413 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGuestTrap, STAMTYPE_COUNTER, "/VMM/RZRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
414 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
415 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
416 STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
417 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
418 STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
419 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOBlockEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/EmulateIOBlock", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_IO_BLOCK returns.");
420 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/RZRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
421 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIORead, STAMTYPE_COUNTER, "/VMM/RZRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_READ returns.");
422 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_IOPORT_WRITE returns.");
423 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIORead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ returns.");
424 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_WRITE returns.");
425 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_R3_MMIO_READ_WRITE returns.");
426 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
427 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
428 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRRead, STAMTYPE_COUNTER, "/VMM/RZRet/MSRRead", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_READ returns.");
429 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMSRWrite, STAMTYPE_COUNTER, "/VMM/RZRet/MSRWrite", STAMUNIT_OCCURENCES, "Number of VINF_CPUM_R3_MSR_WRITE returns.");
430 STAM_REG(pVM, &pVM->vmm.s.StatRZRetLDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
431 STAM_REG(pVM, &pVM->vmm.s.StatRZRetGDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
432 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIDTFault, STAMTYPE_COUNTER, "/VMM/RZRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
433 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTSSFault, STAMTYPE_COUNTER, "/VMM/RZRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
434 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPDFault, STAMTYPE_COUNTER, "/VMM/RZRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
435 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCSAMTask, STAMTYPE_COUNTER, "/VMM/RZRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
436 STAM_REG(pVM, &pVM->vmm.s.StatRZRetSyncCR3, STAMTYPE_COUNTER, "/VMM/RZRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
437 STAM_REG(pVM, &pVM->vmm.s.StatRZRetMisc, STAMTYPE_COUNTER, "/VMM/RZRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
438 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchInt3, STAMTYPE_COUNTER, "/VMM/RZRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
439 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchPF, STAMTYPE_COUNTER, "/VMM/RZRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
440 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchGP, STAMTYPE_COUNTER, "/VMM/RZRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
441 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/RZRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
442 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/RZRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
443 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
444 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Unknown, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Unknown", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
445 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3TMVirt, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/TMVirt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
446 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3HandyPages, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Handy", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
447 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3PDMQueues, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/PDMQueue", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
448 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Rendezvous, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Rendezvous", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
449 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3Timer, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/Timer", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
450 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3DMA, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/DMA", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
451 STAM_REG(pVM, &pVM->vmm.s.StatRZRetToR3CritSect, STAMTYPE_COUNTER, "/VMM/RZRet/ToR3/CritSect", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
452 STAM_REG(pVM, &pVM->vmm.s.StatRZRetTimerPending, STAMTYPE_COUNTER, "/VMM/RZRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
453 STAM_REG(pVM, &pVM->vmm.s.StatRZRetInterruptPending, STAMTYPE_COUNTER, "/VMM/RZRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
454 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
455 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/RZRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
456 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending, STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending", STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns.");
457 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
458 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
459 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3, STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc", STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
460 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_LOCK calls.");
461 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMCritSectEnter, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMCritSectEnter", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_CRITSECT_ENTER calls.");
462 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_LOCK calls.");
463 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMPoolGrow", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_POOL_GROW calls.");
464 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMMapChunk, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMMapChunk", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_MAP_CHUNK calls.");
465 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPGMAllocHandy, STAMTYPE_COUNTER, "/VMM/RZCallR3/PGMAllocHandy", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES calls.");
466 STAM_REG(pVM, &pVM->vmm.s.StatRZCallRemReplay, STAMTYPE_COUNTER, "/VMM/RZCallR3/REMReplay", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS calls.");
467 STAM_REG(pVM, &pVM->vmm.s.StatRZCallLogFlush, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMMLogFlush", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VMM_LOGGER_FLUSH calls.");
468 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMSetError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_ERROR calls.");
469 STAM_REG(pVM, &pVM->vmm.s.StatRZCallVMSetRuntimeError, STAMTYPE_COUNTER, "/VMM/RZCallR3/VMRuntimeError", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_VM_SET_RUNTIME_ERROR calls.");
470
471#ifdef VBOX_WITH_STATISTICS
472 for (VMCPUID i = 0; i < pVM->cCpus; i++)
473 {
474 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedMax, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max amount of stack used.", "/VMM/Stack/CPU%u/Max", i);
475 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cbUsedAvg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Average stack usage.", "/VMM/Stack/CPU%u/Avg", i);
476 STAMR3RegisterF(pVM, &pVM->aCpus[i].vmm.s.CallRing3JmpBufR0.cUsedTotal, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of stack usages.", "/VMM/Stack/CPU%u/Uses", i);
477 }
478#endif
479}
480
481
482/**
483 * Initializes the R0 VMM.
484 *
485 * @returns VBox status code.
486 * @param pVM Pointer to the VM.
487 */
488VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM)
489{
490 int rc;
491 PVMCPU pVCpu = VMMGetCpu(pVM);
492 Assert(pVCpu && pVCpu->idCpu == 0);
493
494#ifdef LOG_ENABLED
495 /*
496 * Initialize the ring-0 logger if we haven't done so yet.
497 */
498 if ( pVCpu->vmm.s.pR0LoggerR3
499 && !pVCpu->vmm.s.pR0LoggerR3->fCreated)
500 {
501 rc = VMMR3UpdateLoggers(pVM);
502 if (RT_FAILURE(rc))
503 return rc;
504 }
505#endif
506
507 /*
508 * Call Ring-0 entry with init code.
509 */
510 for (;;)
511 {
512#ifdef NO_SUPCALLR0VMM
513 //rc = VERR_GENERAL_FAILURE;
514 rc = VINF_SUCCESS;
515#else
516 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_INIT,
517 RT_MAKE_U64(VMMGetSvnRev(), vmmGetBuildType()), NULL);
518#endif
519 /*
520 * Flush the logs.
521 */
522#ifdef LOG_ENABLED
523 if ( pVCpu->vmm.s.pR0LoggerR3
524 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
525 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
526#endif
527 if (rc != VINF_VMM_CALL_HOST)
528 break;
529 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
530 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
531 break;
532 /* Resume R0 */
533 }
534
535 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
536 {
537 LogRel(("R0 init failed, rc=%Rra\n", rc));
538 if (RT_SUCCESS(rc))
539 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
540 }
541
542 /* Log whether thread-context hooks are used (on Linux this can depend on how the kernel is configured). */
543 if (pVM->aCpus[0].vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
544 LogRel(("VMM: Thread-context hooks enabled!\n"));
545 else
546 LogRel(("VMM: Thread-context hooks unavailable\n"));
547
548 return rc;
549}
550
551
552#ifdef VBOX_WITH_RAW_MODE
553/**
554 * Initializes the RC VMM.
555 *
556 * @returns VBox status code.
557 * @param pVM Pointer to the VM.
558 */
559VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM)
560{
561 PVMCPU pVCpu = VMMGetCpu(pVM);
562 Assert(pVCpu && pVCpu->idCpu == 0);
563
564 /* In VMX mode, there's no need to init RC. */
565 if (HMIsEnabled(pVM))
566 return VINF_SUCCESS;
567
568 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
569
570 /*
571 * Call VMMGCInit():
572 * -# resolve the address.
573 * -# setup stackframe and EIP to use the trampoline.
574 * -# do a generic hypervisor call.
575 */
576 RTRCPTR RCPtrEP;
577 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
578 if (RT_SUCCESS(rc))
579 {
580 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */
581 uint64_t u64TS = RTTimeProgramStartNanoTS();
582 CPUMPushHyper(pVCpu, (uint32_t)(u64TS >> 32)); /* Param 4: The program startup TS - Hi. */
583 CPUMPushHyper(pVCpu, (uint32_t)u64TS); /* Param 4: The program startup TS - Lo. */
584 CPUMPushHyper(pVCpu, vmmGetBuildType()); /* Param 3: Version argument. */
585 CPUMPushHyper(pVCpu, VMMGetSvnRev()); /* Param 2: Version argument. */
586 CPUMPushHyper(pVCpu, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
587 CPUMPushHyper(pVCpu, pVM->pVMRC); /* Param 0: pVM */
588 CPUMPushHyper(pVCpu, 6 * sizeof(RTRCPTR)); /* trampoline param: stacksize. */
589 CPUMPushHyper(pVCpu, RCPtrEP); /* Call EIP. */
590 CPUMSetHyperEIP(pVCpu, pVM->vmm.s.pfnCallTrampolineRC);
591 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
592
593 for (;;)
594 {
595#ifdef NO_SUPCALLR0VMM
596 //rc = VERR_GENERAL_FAILURE;
597 rc = VINF_SUCCESS;
598#else
599 rc = SUPR3CallVMMR0(pVM->pVMR0, 0 /* VCPU 0 */, VMMR0_DO_CALL_HYPERVISOR, NULL);
600#endif
601#ifdef LOG_ENABLED
602 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
603 if ( pLogger
604 && pLogger->offScratch > 0)
605 RTLogFlushRC(NULL, pLogger);
606#endif
607#ifdef VBOX_WITH_RC_RELEASE_LOGGING
608 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
609 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
610 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
611#endif
612 if (rc != VINF_VMM_CALL_HOST)
613 break;
614 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
615 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
616 break;
617 }
618
619 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
620 {
621 VMMR3FatalDump(pVM, pVCpu, rc);
622 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
623 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
624 }
625 AssertRC(rc);
626 }
627 return rc;
628}
629#endif /* VBOX_WITH_RAW_MODE */
630
631
632/**
633 * Called when an init phase completes.
634 *
635 * @returns VBox status code.
636 * @param pVM Pointer to the VM.
637 * @param enmWhat Which init phase.
638 */
639VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
640{
641 int rc = VINF_SUCCESS;
642
643 switch (enmWhat)
644 {
645 case VMINITCOMPLETED_RING3:
646 {
647 /*
648 * CPUM's post-initialization (APIC base MSR caching).
649 */
650 rc = CPUMR3InitCompleted(pVM);
651 AssertRCReturn(rc, rc);
652
653 /*
654 * Set page attributes to r/w for stack pages.
655 */
656 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
657 {
658 rc = PGMMapSetPage(pVM, pVM->aCpus[idCpu].vmm.s.pbEMTStackRC, VMM_STACK_SIZE,
659 X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
660 AssertRCReturn(rc, rc);
661 }
662
663 /*
664 * Create the EMT yield timer.
665 */
666 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
667 AssertRCReturn(rc, rc);
668
669 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
670 AssertRCReturn(rc, rc);
671
672#ifdef VBOX_WITH_NMI
673 /*
674 * Map the host APIC into GC - This is AMD/Intel + Host OS specific!
675 */
676 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
677 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
678 AssertRCReturn(rc, rc);
679#endif
680
681#ifdef VBOX_STRICT_VMM_STACK
682 /*
683 * Setup the stack guard pages: Two inaccessible pages at each sides of the
684 * stack to catch over/under-flows.
685 */
686 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
687 {
688 uint8_t *pbEMTStackR3 = pVM->aCpus[idCpu].vmm.s.pbEMTStackR3;
689
690 memset(pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE);
691 MMR3HyperSetGuard(pVM, pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, true /*fSet*/);
692
693 memset(pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
694 MMR3HyperSetGuard(pVM, pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, true /*fSet*/);
695 }
696 pVM->vmm.s.fStackGuardsStationed = true;
697#endif
698 break;
699 }
700
701 case VMINITCOMPLETED_HM:
702 {
703 /*
704 * Disable the periodic preemption timers if we can use the
705 * VMX-preemption timer instead.
706 */
707 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
708 && HMR3IsVmxPreemptionTimerUsed(pVM))
709 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
710 LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers));
711
712 /*
713 * Last chance for GIM to update its CPUID leaves if it requires
714 * knowledge/information from HM initialization.
715 */
716 rc = GIMR3InitCompleted(pVM);
717 AssertRCReturn(rc, rc);
718
719 /*
720 * CPUM's post-initialization (print CPUIDs).
721 */
722 CPUMR3LogCpuIds(pVM);
723 break;
724 }
725
726 default: /* shuts up gcc */
727 break;
728 }
729
730 return rc;
731}
732
733
734/**
735 * Terminate the VMM bits.
736 *
737 * @returns VINF_SUCCESS.
738 * @param pVM Pointer to the VM.
739 */
740VMMR3_INT_DECL(int) VMMR3Term(PVM pVM)
741{
742 PVMCPU pVCpu = VMMGetCpu(pVM);
743 Assert(pVCpu && pVCpu->idCpu == 0);
744
745 /*
746 * Call Ring-0 entry with termination code.
747 */
748 int rc;
749 for (;;)
750 {
751#ifdef NO_SUPCALLR0VMM
752 //rc = VERR_GENERAL_FAILURE;
753 rc = VINF_SUCCESS;
754#else
755 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_VMMR0_TERM, 0, NULL);
756#endif
757 /*
758 * Flush the logs.
759 */
760#ifdef LOG_ENABLED
761 if ( pVCpu->vmm.s.pR0LoggerR3
762 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
763 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
764#endif
765 if (rc != VINF_VMM_CALL_HOST)
766 break;
767 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
768 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
769 break;
770 /* Resume R0 */
771 }
772 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
773 {
774 LogRel(("VMMR3Term: R0 term failed, rc=%Rra. (warning)\n", rc));
775 if (RT_SUCCESS(rc))
776 rc = VERR_IPE_UNEXPECTED_INFO_STATUS;
777 }
778
779 for (VMCPUID i = 0; i < pVM->cCpus; i++)
780 {
781 RTSemEventDestroy(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i]);
782 pVM->vmm.s.pahEvtRendezvousEnterOrdered[i] = NIL_RTSEMEVENT;
783 }
784 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
785 pVM->vmm.s.hEvtRendezvousEnterOneByOne = NIL_RTSEMEVENT;
786 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
787 pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce = NIL_RTSEMEVENTMULTI;
788 RTSemEventMultiDestroy(pVM->vmm.s.hEvtMulRendezvousDone);
789 pVM->vmm.s.hEvtMulRendezvousDone = NIL_RTSEMEVENTMULTI;
790 RTSemEventDestroy(pVM->vmm.s.hEvtRendezvousDoneCaller);
791 pVM->vmm.s.hEvtRendezvousDoneCaller = NIL_RTSEMEVENT;
792
793#ifdef VBOX_STRICT_VMM_STACK
794 /*
795 * Make the two stack guard pages present again.
796 */
797 if (pVM->vmm.s.fStackGuardsStationed)
798 {
799 for (VMCPUID i = 0; i < pVM->cCpus; i++)
800 {
801 uint8_t *pbEMTStackR3 = pVM->aCpus[i].vmm.s.pbEMTStackR3;
802 MMR3HyperSetGuard(pVM, pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, false /*fSet*/);
803 MMR3HyperSetGuard(pVM, pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, false /*fSet*/);
804 }
805 pVM->vmm.s.fStackGuardsStationed = false;
806 }
807#endif
808
809 vmmTermFormatTypes();
810 return rc;
811}
812
813
814/**
815 * Applies relocations to data and code managed by this
816 * component. This function will be called at init and
817 * whenever the VMM need to relocate it self inside the GC.
818 *
819 * The VMM will need to apply relocations to the core code.
820 *
821 * @param pVM Pointer to the VM.
822 * @param offDelta The relocation delta.
823 */
824VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
825{
826 LogFlow(("VMMR3Relocate: offDelta=%RGv\n", offDelta));
827
828 /*
829 * Recalc the RC address.
830 */
831#ifdef VBOX_WITH_RAW_MODE
832 pVM->vmm.s.pvCoreCodeRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pvCoreCodeR3);
833#endif
834
835 /*
836 * The stack.
837 */
838 for (VMCPUID i = 0; i < pVM->cCpus; i++)
839 {
840 PVMCPU pVCpu = &pVM->aCpus[i];
841
842 CPUMSetHyperESP(pVCpu, CPUMGetHyperESP(pVCpu) + offDelta);
843
844 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3);
845 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE;
846 }
847
848 /*
849 * All the switchers.
850 */
851 vmmR3SwitcherRelocate(pVM, offDelta);
852
853 /*
854 * Get other RC entry points.
855 */
856 if (!HMIsEnabled(pVM))
857 {
858 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMRCResumeGuest);
859 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Rra\n", rc));
860
861 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMRCResumeGuestV86);
862 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Rra\n", rc));
863 }
864
865 /*
866 * Update the logger.
867 */
868 VMMR3UpdateLoggers(pVM);
869}
870
871
872/**
873 * Updates the settings for the RC and R0 loggers.
874 *
875 * @returns VBox status code.
876 * @param pVM Pointer to the VM.
877 */
878VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM)
879{
880 /*
881 * Simply clone the logger instance (for RC).
882 */
883 int rc = VINF_SUCCESS;
884 RTRCPTR RCPtrLoggerFlush = 0;
885
886 if ( pVM->vmm.s.pRCLoggerR3
887#ifdef VBOX_WITH_RC_RELEASE_LOGGING
888 || pVM->vmm.s.pRCRelLoggerR3
889#endif
890 )
891 {
892 Assert(!HMIsEnabled(pVM));
893 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &RCPtrLoggerFlush);
894 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Rra\n", rc));
895 }
896
897 if (pVM->vmm.s.pRCLoggerR3)
898 {
899 Assert(!HMIsEnabled(pVM));
900 RTRCPTR RCPtrLoggerWrapper = 0;
901 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &RCPtrLoggerWrapper);
902 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Rra\n", rc));
903
904 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
905 rc = RTLogCloneRC(NULL /* default */, pVM->vmm.s.pRCLoggerR3, pVM->vmm.s.cbRCLogger,
906 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
907 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
908 }
909
910#ifdef VBOX_WITH_RC_RELEASE_LOGGING
911 if (pVM->vmm.s.pRCRelLoggerR3)
912 {
913 Assert(!HMIsEnabled(pVM));
914 RTRCPTR RCPtrLoggerWrapper = 0;
915 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &RCPtrLoggerWrapper);
916 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Rra\n", rc));
917
918 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
919 rc = RTLogCloneRC(RTLogRelDefaultInstance(), pVM->vmm.s.pRCRelLoggerR3, pVM->vmm.s.cbRCRelLogger,
920 RCPtrLoggerWrapper, RCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
921 AssertReleaseMsgRC(rc, ("RTLogCloneRC failed! rc=%Rra\n", rc));
922 }
923#endif /* VBOX_WITH_RC_RELEASE_LOGGING */
924
925#ifdef LOG_ENABLED
926 /*
927 * For the ring-0 EMT logger, we use a per-thread logger instance
928 * in ring-0. Only initialize it once.
929 */
930 PRTLOGGER const pDefault = RTLogDefaultInstance();
931 for (VMCPUID i = 0; i < pVM->cCpus; i++)
932 {
933 PVMCPU pVCpu = &pVM->aCpus[i];
934 PVMMR0LOGGER pR0LoggerR3 = pVCpu->vmm.s.pR0LoggerR3;
935 if (pR0LoggerR3)
936 {
937 if (!pR0LoggerR3->fCreated)
938 {
939 RTR0PTR pfnLoggerWrapper = NIL_RTR0PTR;
940 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
941 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerWrapper not found! rc=%Rra\n", rc), rc);
942
943 RTR0PTR pfnLoggerFlush = NIL_RTR0PTR;
944 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
945 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerFlush not found! rc=%Rra\n", rc), rc);
946
947 rc = RTLogCreateForR0(&pR0LoggerR3->Logger, pR0LoggerR3->cbLogger,
948 pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
949 pfnLoggerWrapper, pfnLoggerFlush,
950 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
951 AssertReleaseMsgRCReturn(rc, ("RTLogCreateForR0 failed! rc=%Rra\n", rc), rc);
952
953 RTR0PTR pfnLoggerPrefix = NIL_RTR0PTR;
954 rc = PDMR3LdrGetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerPrefix", &pfnLoggerPrefix);
955 AssertReleaseMsgRCReturn(rc, ("vmmR0LoggerPrefix not found! rc=%Rra\n", rc), rc);
956 rc = RTLogSetCustomPrefixCallbackForR0(&pR0LoggerR3->Logger,
957 pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
958 pfnLoggerPrefix, NIL_RTR0PTR);
959 AssertReleaseMsgRCReturn(rc, ("RTLogSetCustomPrefixCallback failed! rc=%Rra\n", rc), rc);
960
961 pR0LoggerR3->idCpu = i;
962 pR0LoggerR3->fCreated = true;
963 pR0LoggerR3->fFlushingDisabled = false;
964
965 }
966
967 rc = RTLogCopyGroupsAndFlagsForR0(&pR0LoggerR3->Logger, pVCpu->vmm.s.pR0LoggerR0 + RT_OFFSETOF(VMMR0LOGGER, Logger),
968 pDefault, RTLOGFLAGS_BUFFERED, UINT32_MAX);
969 AssertRC(rc);
970 }
971 }
972#endif
973 return rc;
974}
975
976
977/**
978 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg1Weak output.
979 *
980 * @returns Pointer to the buffer.
981 * @param pVM Pointer to the VM.
982 */
983VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
984{
985 if (HMIsEnabled(pVM))
986 return pVM->vmm.s.szRing0AssertMsg1;
987
988 RTRCPTR RCPtr;
989 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg1", &RCPtr);
990 if (RT_SUCCESS(rc))
991 return (const char *)MMHyperRCToR3(pVM, RCPtr);
992
993 return NULL;
994}
995
996
997/**
998 * Returns the VMCPU of the specified virtual CPU.
999 *
1000 * @returns The VMCPU pointer. NULL if @a idCpu or @a pUVM is invalid.
1001 *
1002 * @param pUVM The user mode VM handle.
1003 * @param idCpu The ID of the virtual CPU.
1004 */
1005VMMR3DECL(PVMCPU) VMMR3GetCpuByIdU(PUVM pUVM, RTCPUID idCpu)
1006{
1007 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
1008 AssertReturn(idCpu < pUVM->cCpus, NULL);
1009 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, NULL);
1010 return &pUVM->pVM->aCpus[idCpu];
1011}
1012
1013
1014/**
1015 * Gets the pointer to a buffer containing the R0/RC RTAssertMsg2Weak output.
1016 *
1017 * @returns Pointer to the buffer.
1018 * @param pVM Pointer to the VM.
1019 */
1020VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
1021{
1022 if (HMIsEnabled(pVM))
1023 return pVM->vmm.s.szRing0AssertMsg2;
1024
1025 RTRCPTR RCPtr;
1026 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "g_szRTAssertMsg2", &RCPtr);
1027 if (RT_SUCCESS(rc))
1028 return (const char *)MMHyperRCToR3(pVM, RCPtr);
1029
1030 return NULL;
1031}
1032
1033
1034/**
1035 * Execute state save operation.
1036 *
1037 * @returns VBox status code.
1038 * @param pVM Pointer to the VM.
1039 * @param pSSM SSM operation handle.
1040 */
1041static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1042{
1043 LogFlow(("vmmR3Save:\n"));
1044
1045 /*
1046 * Save the started/stopped state of all CPUs except 0 as it will always
1047 * be running. This avoids breaking the saved state version. :-)
1048 */
1049 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1050 SSMR3PutBool(pSSM, VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(&pVM->aCpus[i])));
1051
1052 return SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
1053}
1054
1055
1056/**
1057 * Execute state load operation.
1058 *
1059 * @returns VBox status code.
1060 * @param pVM Pointer to the VM.
1061 * @param pSSM SSM operation handle.
1062 * @param uVersion Data layout version.
1063 * @param uPass The data pass.
1064 */
1065static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
1066{
1067 LogFlow(("vmmR3Load:\n"));
1068 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
1069
1070 /*
1071 * Validate version.
1072 */
1073 if ( uVersion != VMM_SAVED_STATE_VERSION
1074 && uVersion != VMM_SAVED_STATE_VERSION_3_0)
1075 {
1076 AssertMsgFailed(("vmmR3Load: Invalid version uVersion=%u!\n", uVersion));
1077 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1078 }
1079
1080 if (uVersion <= VMM_SAVED_STATE_VERSION_3_0)
1081 {
1082 /* Ignore the stack bottom, stack pointer and stack bits. */
1083 RTRCPTR RCPtrIgnored;
1084 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1085 SSMR3GetRCPtr(pSSM, &RCPtrIgnored);
1086#ifdef RT_OS_DARWIN
1087 if ( SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(3,0,0)
1088 && SSMR3HandleVersion(pSSM) < VBOX_FULL_VERSION_MAKE(3,1,0)
1089 && SSMR3HandleRevision(pSSM) >= 48858
1090 && ( !strcmp(SSMR3HandleHostOSAndArch(pSSM), "darwin.x86")
1091 || !strcmp(SSMR3HandleHostOSAndArch(pSSM), "") )
1092 )
1093 SSMR3Skip(pSSM, 16384);
1094 else
1095 SSMR3Skip(pSSM, 8192);
1096#else
1097 SSMR3Skip(pSSM, 8192);
1098#endif
1099 }
1100
1101 /*
1102 * Restore the VMCPU states. VCPU 0 is always started.
1103 */
1104 VMCPU_SET_STATE(&pVM->aCpus[0], VMCPUSTATE_STARTED);
1105 for (VMCPUID i = 1; i < pVM->cCpus; i++)
1106 {
1107 bool fStarted;
1108 int rc = SSMR3GetBool(pSSM, &fStarted);
1109 if (RT_FAILURE(rc))
1110 return rc;
1111 VMCPU_SET_STATE(&pVM->aCpus[i], fStarted ? VMCPUSTATE_STARTED : VMCPUSTATE_STOPPED);
1112 }
1113
1114 /* terminator */
1115 uint32_t u32;
1116 int rc = SSMR3GetU32(pSSM, &u32);
1117 if (RT_FAILURE(rc))
1118 return rc;
1119 if (u32 != UINT32_MAX)
1120 {
1121 AssertMsgFailed(("u32=%#x\n", u32));
1122 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1123 }
1124 return VINF_SUCCESS;
1125}
1126
1127
1128#ifdef VBOX_WITH_RAW_MODE
1129/**
1130 * Resolve a builtin RC symbol.
1131 *
1132 * Called by PDM when loading or relocating RC modules.
1133 *
1134 * @returns VBox status
1135 * @param pVM Pointer to the VM.
1136 * @param pszSymbol Symbol to resolv
1137 * @param pRCPtrValue Where to store the symbol value.
1138 *
1139 * @remark This has to work before VMMR3Relocate() is called.
1140 */
1141VMMR3_INT_DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue)
1142{
1143 if (!strcmp(pszSymbol, "g_Logger"))
1144 {
1145 if (pVM->vmm.s.pRCLoggerR3)
1146 pVM->vmm.s.pRCLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCLoggerR3);
1147 *pRCPtrValue = pVM->vmm.s.pRCLoggerRC;
1148 }
1149 else if (!strcmp(pszSymbol, "g_RelLogger"))
1150 {
1151# ifdef VBOX_WITH_RC_RELEASE_LOGGING
1152 if (pVM->vmm.s.pRCRelLoggerR3)
1153 pVM->vmm.s.pRCRelLoggerRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pRCRelLoggerR3);
1154 *pRCPtrValue = pVM->vmm.s.pRCRelLoggerRC;
1155# else
1156 *pRCPtrValue = NIL_RTRCPTR;
1157# endif
1158 }
1159 else
1160 return VERR_SYMBOL_NOT_FOUND;
1161 return VINF_SUCCESS;
1162}
1163#endif /* VBOX_WITH_RAW_MODE */
1164
1165
1166/**
1167 * Suspends the CPU yielder.
1168 *
1169 * @param pVM Pointer to the VM.
1170 */
1171VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM)
1172{
1173 VMCPU_ASSERT_EMT(&pVM->aCpus[0]);
1174 if (!pVM->vmm.s.cYieldResumeMillies)
1175 {
1176 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1177 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1178 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1179 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1180 else
1181 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1182 TMTimerStop(pVM->vmm.s.pYieldTimer);
1183 }
1184 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1185}
1186
1187
1188/**
1189 * Stops the CPU yielder.
1190 *
1191 * @param pVM Pointer to the VM.
1192 */
1193VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM)
1194{
1195 if (!pVM->vmm.s.cYieldResumeMillies)
1196 TMTimerStop(pVM->vmm.s.pYieldTimer);
1197 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1198 pVM->vmm.s.u64LastYield = RTTimeNanoTS();
1199}
1200
1201
1202/**
1203 * Resumes the CPU yielder when it has been a suspended or stopped.
1204 *
1205 * @param pVM Pointer to the VM.
1206 */
1207VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM)
1208{
1209 if (pVM->vmm.s.cYieldResumeMillies)
1210 {
1211 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1212 pVM->vmm.s.cYieldResumeMillies = 0;
1213 }
1214}
1215
1216
1217/**
1218 * Internal timer callback function.
1219 *
1220 * @param pVM The VM.
1221 * @param pTimer The timer handle.
1222 * @param pvUser User argument specified upon timer creation.
1223 */
1224static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1225{
1226 NOREF(pvUser);
1227
1228 /*
1229 * This really needs some careful tuning. While we shouldn't be too greedy since
1230 * that'll cause the rest of the system to stop up, we shouldn't be too nice either
1231 * because that'll cause us to stop up.
1232 *
1233 * The current logic is to use the default interval when there is no lag worth
1234 * mentioning, but when we start accumulating lag we don't bother yielding at all.
1235 *
1236 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL
1237 * so the lag is up to date.)
1238 */
1239 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
1240 if ( u64Lag < 50000000 /* 50ms */
1241 || ( u64Lag < 1000000000 /* 1s */
1242 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */)
1243 )
1244 {
1245 uint64_t u64Elapsed = RTTimeNanoTS();
1246 pVM->vmm.s.u64LastYield = u64Elapsed;
1247
1248 RTThreadYield();
1249
1250#ifdef LOG_ENABLED
1251 u64Elapsed = RTTimeNanoTS() - u64Elapsed;
1252 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed));
1253#endif
1254 }
1255 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1256}
1257
1258
1259#ifdef VBOX_WITH_RAW_MODE
1260/**
1261 * Executes guest code in the raw-mode context.
1262 *
1263 * @param pVM Pointer to the VM.
1264 * @param pVCpu Pointer to the VMCPU.
1265 */
1266VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu)
1267{
1268 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1269
1270 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
1271
1272 /*
1273 * Set the hypervisor to resume executing a CPUM resume function
1274 * in CPUMRCA.asm.
1275 */
1276 CPUMSetHyperState(pVCpu,
1277 CPUMGetGuestEFlags(pVCpu) & X86_EFL_VM
1278 ? pVM->vmm.s.pfnCPUMRCResumeGuestV86
1279 : pVM->vmm.s.pfnCPUMRCResumeGuest, /* eip */
1280 pVCpu->vmm.s.pbEMTStackBottomRC, /* esp */
1281 0, /* eax */
1282 VM_RC_ADDR(pVM, &pVCpu->cpum) /* edx */);
1283
1284 /*
1285 * We hide log flushes (outer) and hypervisor interrupts (inner).
1286 */
1287 for (;;)
1288 {
1289#ifdef VBOX_STRICT
1290 if (RT_UNLIKELY(!CPUMGetHyperCR3(pVCpu) || CPUMGetHyperCR3(pVCpu) != PGMGetHyperCR3(pVCpu)))
1291 EMR3FatalError(pVCpu, VERR_VMM_HYPER_CR3_MISMATCH);
1292 PGMMapCheck(pVM);
1293# ifdef VBOX_WITH_SAFE_STR
1294 SELMR3CheckShadowTR(pVM);
1295# endif
1296#endif
1297 int rc;
1298 do
1299 {
1300#ifdef NO_SUPCALLR0VMM
1301 rc = VERR_GENERAL_FAILURE;
1302#else
1303 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1304 if (RT_LIKELY(rc == VINF_SUCCESS))
1305 rc = pVCpu->vmm.s.iLastGZRc;
1306#endif
1307 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1308
1309 /*
1310 * Flush the logs.
1311 */
1312#ifdef LOG_ENABLED
1313 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
1314 if ( pLogger
1315 && pLogger->offScratch > 0)
1316 RTLogFlushRC(NULL, pLogger);
1317#endif
1318#ifdef VBOX_WITH_RC_RELEASE_LOGGING
1319 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
1320 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1321 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
1322#endif
1323 if (rc != VINF_VMM_CALL_HOST)
1324 {
1325 Log2(("VMMR3RawRunGC: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1326 return rc;
1327 }
1328 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1329 if (RT_FAILURE(rc))
1330 return rc;
1331 /* Resume GC */
1332 }
1333}
1334#endif /* VBOX_WITH_RAW_MODE */
1335
1336
1337/**
1338 * Executes guest code (Intel VT-x and AMD-V).
1339 *
1340 * @param pVM Pointer to the VM.
1341 * @param pVCpu Pointer to the VMCPU.
1342 */
1343VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu)
1344{
1345 Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1346
1347 for (;;)
1348 {
1349 int rc;
1350 do
1351 {
1352#ifdef NO_SUPCALLR0VMM
1353 rc = VERR_GENERAL_FAILURE;
1354#else
1355 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HM_RUN, pVCpu->idCpu);
1356 if (RT_LIKELY(rc == VINF_SUCCESS))
1357 rc = pVCpu->vmm.s.iLastGZRc;
1358#endif
1359 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1360
1361#if 0 /* todo triggers too often */
1362 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3));
1363#endif
1364
1365#ifdef LOG_ENABLED
1366 /*
1367 * Flush the log
1368 */
1369 PVMMR0LOGGER pR0LoggerR3 = pVCpu->vmm.s.pR0LoggerR3;
1370 if ( pR0LoggerR3
1371 && pR0LoggerR3->Logger.offScratch > 0)
1372 RTLogFlushR0(NULL, &pR0LoggerR3->Logger);
1373#endif /* !LOG_ENABLED */
1374 if (rc != VINF_VMM_CALL_HOST)
1375 {
1376 Log2(("VMMR3HmRunGC: returns %Rrc (cs:rip=%04x:%RX64)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1377 return rc;
1378 }
1379 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
1380 if (RT_FAILURE(rc))
1381 return rc;
1382 /* Resume R0 */
1383 }
1384}
1385
1386/**
1387 * VCPU worker for VMMSendSipi.
1388 *
1389 * @param pVM Pointer to the VM.
1390 * @param idCpu Virtual CPU to perform SIPI on
1391 * @param uVector SIPI vector
1392 */
1393DECLCALLBACK(int) vmmR3SendSipi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1394{
1395 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1396 VMCPU_ASSERT_EMT(pVCpu);
1397
1398 /** @todo what are we supposed to do if the processor is already running? */
1399 if (EMGetState(pVCpu) != EMSTATE_WAIT_SIPI)
1400 return VERR_ACCESS_DENIED;
1401
1402
1403 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1404
1405 pCtx->cs.Sel = uVector << 8;
1406 pCtx->cs.ValidSel = uVector << 8;
1407 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1408 pCtx->cs.u64Base = uVector << 12;
1409 pCtx->cs.u32Limit = UINT32_C(0x0000ffff);
1410 pCtx->rip = 0;
1411
1412 Log(("vmmR3SendSipi for VCPU %d with vector %x\n", idCpu, uVector));
1413
1414# if 1 /* If we keep the EMSTATE_WAIT_SIPI method, then move this to EM.cpp. */
1415 EMSetState(pVCpu, EMSTATE_HALTED);
1416 return VINF_EM_RESCHEDULE;
1417# else /* And if we go the VMCPU::enmState way it can stay here. */
1418 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STOPPED);
1419 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1420 return VINF_SUCCESS;
1421# endif
1422}
1423
1424DECLCALLBACK(int) vmmR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1425{
1426 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1427 VMCPU_ASSERT_EMT(pVCpu);
1428
1429 Log(("vmmR3SendInitIpi for VCPU %d\n", idCpu));
1430
1431 PGMR3ResetCpu(pVM, pVCpu);
1432 PDMR3ResetCpu(pVCpu); /* Clear any pending interrupts */
1433 TRPMR3ResetCpu(pVCpu);
1434 CPUMR3ResetCpu(pVM, pVCpu);
1435 EMR3ResetCpu(pVCpu);
1436 HMR3ResetCpu(pVCpu);
1437
1438 /* This will trickle up on the target EMT. */
1439 return VINF_EM_WAIT_SIPI;
1440}
1441
1442/**
1443 * Sends SIPI to the virtual CPU by setting CS:EIP into vector-dependent state
1444 * and unhalting processor
1445 *
1446 * @param pVM Pointer to the VM.
1447 * @param idCpu Virtual CPU to perform SIPI on
1448 * @param uVector SIPI vector
1449 */
1450VMMR3_INT_DECL(void) VMMR3SendSipi(PVM pVM, VMCPUID idCpu, uint32_t uVector)
1451{
1452 AssertReturnVoid(idCpu < pVM->cCpus);
1453
1454 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendSipi, 3, pVM, idCpu, uVector);
1455 AssertRC(rc);
1456}
1457
1458/**
1459 * Sends init IPI to the virtual CPU.
1460 *
1461 * @param pVM Pointer to the VM.
1462 * @param idCpu Virtual CPU to perform int IPI on
1463 */
1464VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu)
1465{
1466 AssertReturnVoid(idCpu < pVM->cCpus);
1467
1468 int rc = VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmmR3SendInitIpi, 2, pVM, idCpu);
1469 AssertRC(rc);
1470}
1471
1472/**
1473 * Registers the guest memory range that can be used for patching
1474 *
1475 * @returns VBox status code.
1476 * @param pVM Pointer to the VM.
1477 * @param pPatchMem Patch memory range
1478 * @param cbPatchMem Size of the memory range
1479 */
1480VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1481{
1482 VM_ASSERT_EMT(pVM);
1483 if (HMIsEnabled(pVM))
1484 return HMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
1485
1486 return VERR_NOT_SUPPORTED;
1487}
1488
1489/**
1490 * Deregisters the guest memory range that can be used for patching
1491 *
1492 * @returns VBox status code.
1493 * @param pVM Pointer to the VM.
1494 * @param pPatchMem Patch memory range
1495 * @param cbPatchMem Size of the memory range
1496 */
1497VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1498{
1499 if (HMIsEnabled(pVM))
1500 return HMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
1501
1502 return VINF_SUCCESS;
1503}
1504
1505
1506/**
1507 * Count returns and have the last non-caller EMT wake up the caller.
1508 *
1509 * @returns VBox strict informational status code for EM scheduling. No failures
1510 * will be returned here, those are for the caller only.
1511 *
1512 * @param pVM Pointer to the VM.
1513 */
1514DECL_FORCE_INLINE(int) vmmR3EmtRendezvousNonCallerReturn(PVM pVM)
1515{
1516 int rcRet = ASMAtomicReadS32(&pVM->vmm.s.i32RendezvousStatus);
1517 uint32_t cReturned = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsReturned);
1518 if (cReturned == pVM->cCpus - 1U)
1519 {
1520 int rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousDoneCaller);
1521 AssertLogRelRC(rc);
1522 }
1523
1524 AssertLogRelMsgReturn( rcRet <= VINF_SUCCESS
1525 || (rcRet >= VINF_EM_FIRST && rcRet <= VINF_EM_LAST),
1526 ("%Rrc\n", rcRet),
1527 VERR_IPE_UNEXPECTED_INFO_STATUS);
1528 return RT_SUCCESS(rcRet) ? rcRet : VINF_SUCCESS;
1529}
1530
1531
1532/**
1533 * Common worker for VMMR3EmtRendezvous and VMMR3EmtRendezvousFF.
1534 *
1535 * @returns VBox strict informational status code for EM scheduling. No failures
1536 * will be returned here, those are for the caller only. When
1537 * fIsCaller is set, VINF_SUCCESS is always returned.
1538 *
1539 * @param pVM Pointer to the VM.
1540 * @param pVCpu The VMCPU structure for the calling EMT.
1541 * @param fIsCaller Whether we're the VMMR3EmtRendezvous caller or
1542 * not.
1543 * @param fFlags The flags.
1544 * @param pfnRendezvous The callback.
1545 * @param pvUser The user argument for the callback.
1546 */
1547static int vmmR3EmtRendezvousCommon(PVM pVM, PVMCPU pVCpu, bool fIsCaller,
1548 uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1549{
1550 int rc;
1551
1552 /*
1553 * Enter, the last EMT triggers the next callback phase.
1554 */
1555 uint32_t cEntered = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsEntered);
1556 if (cEntered != pVM->cCpus)
1557 {
1558 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1559 {
1560 /* Wait for our turn. */
1561 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, RT_INDEFINITE_WAIT);
1562 AssertLogRelRC(rc);
1563 }
1564 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1565 {
1566 /* Wait for the last EMT to arrive and wake everyone up. */
1567 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce, RT_INDEFINITE_WAIT);
1568 AssertLogRelRC(rc);
1569 }
1570 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1571 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1572 {
1573 /* Wait for our turn. */
1574 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1575 AssertLogRelRC(rc);
1576 }
1577 else
1578 {
1579 Assert((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE);
1580
1581 /*
1582 * The execute once is handled specially to optimize the code flow.
1583 *
1584 * The last EMT to arrive will perform the callback and the other
1585 * EMTs will wait on the Done/DoneCaller semaphores (instead of
1586 * the EnterOneByOne/AllAtOnce) in the meanwhile. When the callback
1587 * returns, that EMT will initiate the normal return sequence.
1588 */
1589 if (!fIsCaller)
1590 {
1591 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1592 AssertLogRelRC(rc);
1593
1594 return vmmR3EmtRendezvousNonCallerReturn(pVM);
1595 }
1596 return VINF_SUCCESS;
1597 }
1598 }
1599 else
1600 {
1601 /*
1602 * All EMTs are waiting, clear the FF and take action according to the
1603 * execution method.
1604 */
1605 VM_FF_CLEAR(pVM, VM_FF_EMT_RENDEZVOUS);
1606
1607 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE)
1608 {
1609 /* Wake up everyone. */
1610 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce);
1611 AssertLogRelRC(rc);
1612 }
1613 else if ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1614 || (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1615 {
1616 /* Figure out who to wake up and wake it up. If it's ourself, then
1617 it's easy otherwise wait for our turn. */
1618 VMCPUID iFirst = (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING
1619 ? 0
1620 : pVM->cCpus - 1U;
1621 if (pVCpu->idCpu != iFirst)
1622 {
1623 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[iFirst]);
1624 AssertLogRelRC(rc);
1625 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu], RT_INDEFINITE_WAIT);
1626 AssertLogRelRC(rc);
1627 }
1628 }
1629 /* else: execute the handler on the current EMT and wake up one or more threads afterwards. */
1630 }
1631
1632
1633 /*
1634 * Do the callback and update the status if necessary.
1635 */
1636 if ( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
1637 || RT_SUCCESS(ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus)) )
1638 {
1639 VBOXSTRICTRC rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
1640 if (rcStrict != VINF_SUCCESS)
1641 {
1642 AssertLogRelMsg( rcStrict <= VINF_SUCCESS
1643 || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
1644 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1645 int32_t i32RendezvousStatus;
1646 do
1647 {
1648 i32RendezvousStatus = ASMAtomicUoReadS32(&pVM->vmm.s.i32RendezvousStatus);
1649 if ( rcStrict == i32RendezvousStatus
1650 || RT_FAILURE(i32RendezvousStatus)
1651 || ( i32RendezvousStatus != VINF_SUCCESS
1652 && rcStrict > i32RendezvousStatus))
1653 break;
1654 } while (!ASMAtomicCmpXchgS32(&pVM->vmm.s.i32RendezvousStatus, VBOXSTRICTRC_VAL(rcStrict), i32RendezvousStatus));
1655 }
1656 }
1657
1658 /*
1659 * Increment the done counter and take action depending on whether we're
1660 * the last to finish callback execution.
1661 */
1662 uint32_t cDone = ASMAtomicIncU32(&pVM->vmm.s.cRendezvousEmtsDone);
1663 if ( cDone != pVM->cCpus
1664 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE)
1665 {
1666 /* Signal the next EMT? */
1667 if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE)
1668 {
1669 rc = RTSemEventSignal(pVM->vmm.s.hEvtRendezvousEnterOneByOne);
1670 AssertLogRelRC(rc);
1671 }
1672 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING)
1673 {
1674 Assert(cDone == pVCpu->idCpu + 1U);
1675 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVCpu->idCpu + 1U]);
1676 AssertLogRelRC(rc);
1677 }
1678 else if ((fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) == VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING)
1679 {
1680 Assert(pVM->cCpus - cDone == pVCpu->idCpu);
1681 rc = RTSemEventSignal(pVM->vmm.s.pahEvtRendezvousEnterOrdered[pVM->cCpus - cDone - 1U]);
1682 AssertLogRelRC(rc);
1683 }
1684
1685 /* Wait for the rest to finish (the caller waits on hEvtRendezvousDoneCaller). */
1686 if (!fIsCaller)
1687 {
1688 rc = RTSemEventMultiWait(pVM->vmm.s.hEvtMulRendezvousDone, RT_INDEFINITE_WAIT);
1689 AssertLogRelRC(rc);
1690 }
1691 }
1692 else
1693 {
1694 /* Callback execution is all done, tell the rest to return. */
1695 rc = RTSemEventMultiSignal(pVM->vmm.s.hEvtMulRendezvousDone);
1696 AssertLogRelRC(rc);
1697 }
1698
1699 if (!fIsCaller)
1700 return vmmR3EmtRendezvousNonCallerReturn(pVM);
1701 return VINF_SUCCESS;
1702}
1703
1704
1705/**
1706 * Called in response to VM_FF_EMT_RENDEZVOUS.
1707 *
1708 * @returns VBox strict status code - EM scheduling. No errors will be returned
1709 * here, nor will any non-EM scheduling status codes be returned.
1710 *
1711 * @param pVM Pointer to the VM.
1712 * @param pVCpu The handle of the calling EMT.
1713 *
1714 * @thread EMT
1715 */
1716VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu)
1717{
1718 Assert(!pVCpu->vmm.s.fInRendezvous);
1719 pVCpu->vmm.s.fInRendezvous = true;
1720 int rc = vmmR3EmtRendezvousCommon(pVM, pVCpu, false /* fIsCaller */, pVM->vmm.s.fRendezvousFlags,
1721 pVM->vmm.s.pfnRendezvous, pVM->vmm.s.pvRendezvousUser);
1722 pVCpu->vmm.s.fInRendezvous = false;
1723 return rc;
1724}
1725
1726
1727/**
1728 * EMT rendezvous.
1729 *
1730 * Gathers all the EMTs and execute some code on each of them, either in a one
1731 * by one fashion or all at once.
1732 *
1733 * @returns VBox strict status code. This will be the first error,
1734 * VINF_SUCCESS, or an EM scheduling status code.
1735 *
1736 * @param pVM Pointer to the VM.
1737 * @param fFlags Flags indicating execution methods. See
1738 * grp_VMMR3EmtRendezvous_fFlags.
1739 * @param pfnRendezvous The callback.
1740 * @param pvUser User argument for the callback.
1741 *
1742 * @thread Any.
1743 */
1744VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser)
1745{
1746 /*
1747 * Validate input.
1748 */
1749 AssertReturn(pVM, VERR_INVALID_VM_HANDLE);
1750 AssertMsg( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID
1751 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) <= VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING
1752 && !(fFlags & ~VMMEMTRENDEZVOUS_FLAGS_VALID_MASK), ("%#x\n", fFlags));
1753 AssertMsg( !(fFlags & VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR)
1754 || ( (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE
1755 && (fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK) != VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE),
1756 ("type %u\n", fFlags & VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK));
1757
1758 VBOXSTRICTRC rcStrict;
1759 PVMCPU pVCpu = VMMGetCpu(pVM);
1760 if (!pVCpu)
1761 /*
1762 * Forward the request to an EMT thread.
1763 */
1764 rcStrict = VMR3ReqCallWait(pVM, VMCPUID_ANY,
1765 (PFNRT)VMMR3EmtRendezvous, 4, pVM, fFlags, pfnRendezvous, pvUser);
1766 else if (pVM->cCpus == 1)
1767 {
1768 /*
1769 * Shortcut for the single EMT case.
1770 */
1771 AssertLogRelReturn(!pVCpu->vmm.s.fInRendezvous, VERR_DEADLOCK);
1772 pVCpu->vmm.s.fInRendezvous = true;
1773 rcStrict = pfnRendezvous(pVM, pVCpu, pvUser);
1774 pVCpu->vmm.s.fInRendezvous = false;
1775 }
1776 else
1777 {
1778 /*
1779 * Spin lock. If busy, wait for the other EMT to finish while keeping a
1780 * lookout of the RENDEZVOUS FF.
1781 */
1782 int rc;
1783 rcStrict = VINF_SUCCESS;
1784 if (RT_UNLIKELY(!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0)))
1785 {
1786 AssertLogRelReturn(!pVCpu->vmm.s.fInRendezvous, VERR_DEADLOCK);
1787
1788 while (!ASMAtomicCmpXchgU32(&pVM->vmm.s.u32RendezvousLock, 0x77778888, 0))
1789 {
1790 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1791 {
1792 rc = VMMR3EmtRendezvousFF(pVM, pVCpu);
1793 if ( rc != VINF_SUCCESS
1794 && ( rcStrict == VINF_SUCCESS
1795 || rcStrict > rc))
1796 rcStrict = rc;
1797 /** @todo Perhaps deal with termination here? */
1798 }
1799 ASMNopPause();
1800 }
1801 }
1802 Assert(!VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS));
1803 Assert(!pVCpu->vmm.s.fInRendezvous);
1804 pVCpu->vmm.s.fInRendezvous = true;
1805
1806 /*
1807 * Clear the slate. This is a semaphore ping-pong orgy. :-)
1808 */
1809 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1810 {
1811 rc = RTSemEventWait(pVM->vmm.s.pahEvtRendezvousEnterOrdered[i], 0);
1812 AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
1813 }
1814 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousEnterOneByOne, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
1815 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousEnterAllAtOnce); AssertLogRelRC(rc);
1816 rc = RTSemEventMultiReset(pVM->vmm.s.hEvtMulRendezvousDone); AssertLogRelRC(rc);
1817 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, 0); AssertLogRelMsg(rc == VERR_TIMEOUT || rc == VINF_SUCCESS, ("%Rrc\n", rc));
1818 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsEntered, 0);
1819 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsDone, 0);
1820 ASMAtomicWriteU32(&pVM->vmm.s.cRendezvousEmtsReturned, 0);
1821 ASMAtomicWriteS32(&pVM->vmm.s.i32RendezvousStatus, VINF_SUCCESS);
1822 ASMAtomicWritePtr((void * volatile *)&pVM->vmm.s.pfnRendezvous, (void *)(uintptr_t)pfnRendezvous);
1823 ASMAtomicWritePtr(&pVM->vmm.s.pvRendezvousUser, pvUser);
1824 ASMAtomicWriteU32(&pVM->vmm.s.fRendezvousFlags, fFlags);
1825
1826 /*
1827 * Set the FF and poke the other EMTs.
1828 */
1829 VM_FF_SET(pVM, VM_FF_EMT_RENDEZVOUS);
1830 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_POKE);
1831
1832 /*
1833 * Do the same ourselves.
1834 */
1835 vmmR3EmtRendezvousCommon(pVM, pVCpu, true /* fIsCaller */, fFlags, pfnRendezvous, pvUser);
1836
1837 /*
1838 * The caller waits for the other EMTs to be done and return before doing
1839 * the cleanup. This makes away with wakeup / reset races we would otherwise
1840 * risk in the multiple release event semaphore code (hEvtRendezvousDoneCaller).
1841 */
1842 rc = RTSemEventWait(pVM->vmm.s.hEvtRendezvousDoneCaller, RT_INDEFINITE_WAIT);
1843 AssertLogRelRC(rc);
1844
1845 /*
1846 * Get the return code and clean up a little bit.
1847 */
1848 int rcMy = pVM->vmm.s.i32RendezvousStatus;
1849 ASMAtomicWriteNullPtr((void * volatile *)&pVM->vmm.s.pfnRendezvous);
1850
1851 ASMAtomicWriteU32(&pVM->vmm.s.u32RendezvousLock, 0);
1852 pVCpu->vmm.s.fInRendezvous = false;
1853
1854 /*
1855 * Merge rcStrict and rcMy.
1856 */
1857 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
1858 if ( rcMy != VINF_SUCCESS
1859 && ( rcStrict == VINF_SUCCESS
1860 || rcStrict > rcMy))
1861 rcStrict = rcMy;
1862 }
1863
1864 AssertLogRelMsgReturn( rcStrict <= VINF_SUCCESS
1865 || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST),
1866 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
1867 VERR_IPE_UNEXPECTED_INFO_STATUS);
1868 return VBOXSTRICTRC_VAL(rcStrict);
1869}
1870
1871
1872/**
1873 * Disables/enables EMT rendezvous.
1874 *
1875 * This is used to make sure EMT rendezvous does not take place while
1876 * processing a priority request.
1877 *
1878 * @returns Old rendezvous-disabled state.
1879 * @param pVCpu The handle of the calling EMT.
1880 * @param fDisabled True if disabled, false if enabled.
1881 */
1882VMMR3_INT_DECL(bool) VMMR3EmtRendezvousSetDisabled(PVMCPU pVCpu, bool fDisabled)
1883{
1884 VMCPU_ASSERT_EMT(pVCpu);
1885 bool fOld = pVCpu->vmm.s.fInRendezvous;
1886 pVCpu->vmm.s.fInRendezvous = fDisabled;
1887 return fOld;
1888}
1889
1890
1891/**
1892 * Read from the ring 0 jump buffer stack
1893 *
1894 * @returns VBox status code.
1895 *
1896 * @param pVM Pointer to the VM.
1897 * @param idCpu The ID of the source CPU context (for the address).
1898 * @param R0Addr Where to start reading.
1899 * @param pvBuf Where to store the data we've read.
1900 * @param cbRead The number of bytes to read.
1901 */
1902VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead)
1903{
1904 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
1905 AssertReturn(pVCpu, VERR_INVALID_PARAMETER);
1906
1907#ifdef VMM_R0_SWITCH_STACK
1908 RTHCUINTPTR off = R0Addr - MMHyperCCToR0(pVM, pVCpu->vmm.s.pbEMTStackR3);
1909#else
1910 RTHCUINTPTR off = pVCpu->vmm.s.CallRing3JmpBufR0.cbSavedStack - (pVCpu->vmm.s.CallRing3JmpBufR0.SpCheck - R0Addr);
1911#endif
1912 if ( off > VMM_STACK_SIZE
1913 || off + cbRead >= VMM_STACK_SIZE)
1914 return VERR_INVALID_POINTER;
1915
1916 memcpy(pvBuf, &pVCpu->vmm.s.pbEMTStackR3[off], cbRead);
1917 return VINF_SUCCESS;
1918}
1919
1920#ifdef VBOX_WITH_RAW_MODE
1921
1922/**
1923 * Calls a RC function.
1924 *
1925 * @param pVM Pointer to the VM.
1926 * @param RCPtrEntry The address of the RC function.
1927 * @param cArgs The number of arguments in the ....
1928 * @param ... Arguments to the function.
1929 */
1930VMMR3DECL(int) VMMR3CallRC(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, ...)
1931{
1932 va_list args;
1933 va_start(args, cArgs);
1934 int rc = VMMR3CallRCV(pVM, RCPtrEntry, cArgs, args);
1935 va_end(args);
1936 return rc;
1937}
1938
1939
1940/**
1941 * Calls a RC function.
1942 *
1943 * @param pVM Pointer to the VM.
1944 * @param RCPtrEntry The address of the RC function.
1945 * @param cArgs The number of arguments in the ....
1946 * @param args Arguments to the function.
1947 */
1948VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list args)
1949{
1950 /* Raw mode implies 1 VCPU. */
1951 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
1952 PVMCPU pVCpu = &pVM->aCpus[0];
1953
1954 Log2(("VMMR3CallGCV: RCPtrEntry=%RRv cArgs=%d\n", RCPtrEntry, cArgs));
1955
1956 /*
1957 * Setup the call frame using the trampoline.
1958 */
1959 CPUMSetHyperState(pVCpu,
1960 pVM->vmm.s.pfnCallTrampolineRC, /* eip */
1961 pVCpu->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32), /* esp */
1962 RCPtrEntry, /* eax */
1963 cArgs /* edx */
1964 );
1965
1966#if 0
1967 memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
1968#endif
1969 PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;
1970 int i = cArgs;
1971 while (i-- > 0)
1972 *pFrame++ = va_arg(args, RTGCUINTPTR32);
1973
1974 CPUMPushHyper(pVCpu, cArgs * sizeof(RTGCUINTPTR32)); /* stack frame size */
1975 CPUMPushHyper(pVCpu, RCPtrEntry); /* what to call */
1976
1977 /*
1978 * We hide log flushes (outer) and hypervisor interrupts (inner).
1979 */
1980 for (;;)
1981 {
1982 int rc;
1983 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
1984 do
1985 {
1986#ifdef NO_SUPCALLR0VMM
1987 rc = VERR_GENERAL_FAILURE;
1988#else
1989 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
1990 if (RT_LIKELY(rc == VINF_SUCCESS))
1991 rc = pVCpu->vmm.s.iLastGZRc;
1992#endif
1993 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1994
1995 /*
1996 * Flush the loggers.
1997 */
1998#ifdef LOG_ENABLED
1999 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
2000 if ( pLogger
2001 && pLogger->offScratch > 0)
2002 RTLogFlushRC(NULL, pLogger);
2003#endif
2004#ifdef VBOX_WITH_RC_RELEASE_LOGGING
2005 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
2006 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2007 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
2008#endif
2009 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2010 VMMR3FatalDump(pVM, pVCpu, rc);
2011 if (rc != VINF_VMM_CALL_HOST)
2012 {
2013 Log2(("VMMR3CallGCV: returns %Rrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
2014 return rc;
2015 }
2016 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2017 if (RT_FAILURE(rc))
2018 return rc;
2019 }
2020}
2021
2022#endif /* VBOX_WITH_RAW_MODE */
2023
2024/**
2025 * Wrapper for SUPR3CallVMMR0Ex which will deal with VINF_VMM_CALL_HOST returns.
2026 *
2027 * @returns VBox status code.
2028 * @param pVM Pointer to the VM.
2029 * @param uOperation Operation to execute.
2030 * @param u64Arg Constant argument.
2031 * @param pReqHdr Pointer to a request header. See SUPR3CallVMMR0Ex for
2032 * details.
2033 */
2034VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
2035{
2036 PVMCPU pVCpu = VMMGetCpu(pVM);
2037 AssertReturn(pVCpu, VERR_VM_THREAD_NOT_EMT);
2038
2039 /*
2040 * Call Ring-0 entry with init code.
2041 */
2042 int rc;
2043 for (;;)
2044 {
2045#ifdef NO_SUPCALLR0VMM
2046 rc = VERR_GENERAL_FAILURE;
2047#else
2048 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, uOperation, u64Arg, pReqHdr);
2049#endif
2050 /*
2051 * Flush the logs.
2052 */
2053#ifdef LOG_ENABLED
2054 if ( pVCpu->vmm.s.pR0LoggerR3
2055 && pVCpu->vmm.s.pR0LoggerR3->Logger.offScratch > 0)
2056 RTLogFlushR0(NULL, &pVCpu->vmm.s.pR0LoggerR3->Logger);
2057#endif
2058 if (rc != VINF_VMM_CALL_HOST)
2059 break;
2060 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2061 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
2062 break;
2063 /* Resume R0 */
2064 }
2065
2066 AssertLogRelMsgReturn(rc == VINF_SUCCESS || RT_FAILURE(rc),
2067 ("uOperation=%u rc=%Rrc\n", uOperation, rc),
2068 VERR_IPE_UNEXPECTED_INFO_STATUS);
2069 return rc;
2070}
2071
2072
2073#ifdef VBOX_WITH_RAW_MODE
2074/**
2075 * Resumes executing hypervisor code when interrupted by a queue flush or a
2076 * debug event.
2077 *
2078 * @returns VBox status code.
2079 * @param pVM Pointer to the VM.
2080 * @param pVCpu Pointer to the VMCPU.
2081 */
2082VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu)
2083{
2084 Log(("VMMR3ResumeHyper: eip=%RRv esp=%RRv\n", CPUMGetHyperEIP(pVCpu), CPUMGetHyperESP(pVCpu)));
2085 AssertReturn(pVM->cCpus == 1, VERR_RAW_MODE_INVALID_SMP);
2086
2087 /*
2088 * We hide log flushes (outer) and hypervisor interrupts (inner).
2089 */
2090 for (;;)
2091 {
2092 int rc;
2093 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
2094 do
2095 {
2096# ifdef NO_SUPCALLR0VMM
2097 rc = VERR_GENERAL_FAILURE;
2098# else
2099 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
2100 if (RT_LIKELY(rc == VINF_SUCCESS))
2101 rc = pVCpu->vmm.s.iLastGZRc;
2102# endif
2103 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2104
2105 /*
2106 * Flush the loggers.
2107 */
2108# ifdef LOG_ENABLED
2109 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
2110 if ( pLogger
2111 && pLogger->offScratch > 0)
2112 RTLogFlushRC(NULL, pLogger);
2113# endif
2114# ifdef VBOX_WITH_RC_RELEASE_LOGGING
2115 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
2116 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2117 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
2118# endif
2119 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2120 VMMR3FatalDump(pVM, pVCpu, rc);
2121 if (rc != VINF_VMM_CALL_HOST)
2122 {
2123 Log(("VMMR3ResumeHyper: returns %Rrc\n", rc));
2124 return rc;
2125 }
2126 rc = vmmR3ServiceCallRing3Request(pVM, pVCpu);
2127 if (RT_FAILURE(rc))
2128 return rc;
2129 }
2130}
2131#endif /* VBOX_WITH_RAW_MODE */
2132
2133
2134/**
2135 * Service a call to the ring-3 host code.
2136 *
2137 * @returns VBox status code.
2138 * @param pVM Pointer to the VM.
2139 * @param pVCpu Pointer to the VMCPU.
2140 * @remark Careful with critsects.
2141 */
2142static int vmmR3ServiceCallRing3Request(PVM pVM, PVMCPU pVCpu)
2143{
2144 /*
2145 * We must also check for pending critsect exits or else we can deadlock
2146 * when entering other critsects here.
2147 */
2148 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
2149 PDMCritSectBothFF(pVCpu);
2150
2151 switch (pVCpu->vmm.s.enmCallRing3Operation)
2152 {
2153 /*
2154 * Acquire a critical section.
2155 */
2156 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
2157 {
2158 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectEnterEx((PPDMCRITSECT)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2159 true /*fCallRing3*/);
2160 break;
2161 }
2162
2163 /*
2164 * Enter a r/w critical section exclusively.
2165 */
2166 case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL:
2167 {
2168 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterExclEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2169 true /*fCallRing3*/);
2170 break;
2171 }
2172
2173 /*
2174 * Enter a r/w critical section shared.
2175 */
2176 case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED:
2177 {
2178 pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterSharedEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
2179 true /*fCallRing3*/);
2180 break;
2181 }
2182
2183 /*
2184 * Acquire the PDM lock.
2185 */
2186 case VMMCALLRING3_PDM_LOCK:
2187 {
2188 pVCpu->vmm.s.rcCallRing3 = PDMR3LockCall(pVM);
2189 break;
2190 }
2191
2192 /*
2193 * Grow the PGM pool.
2194 */
2195 case VMMCALLRING3_PGM_POOL_GROW:
2196 {
2197 pVCpu->vmm.s.rcCallRing3 = PGMR3PoolGrow(pVM);
2198 break;
2199 }
2200
2201 /*
2202 * Maps an page allocation chunk into ring-3 so ring-0 can use it.
2203 */
2204 case VMMCALLRING3_PGM_MAP_CHUNK:
2205 {
2206 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysChunkMap(pVM, pVCpu->vmm.s.u64CallRing3Arg);
2207 break;
2208 }
2209
2210 /*
2211 * Allocates more handy pages.
2212 */
2213 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
2214 {
2215 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateHandyPages(pVM);
2216 break;
2217 }
2218
2219 /*
2220 * Allocates a large page.
2221 */
2222 case VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE:
2223 {
2224 pVCpu->vmm.s.rcCallRing3 = PGMR3PhysAllocateLargeHandyPage(pVM, pVCpu->vmm.s.u64CallRing3Arg);
2225 break;
2226 }
2227
2228 /*
2229 * Acquire the PGM lock.
2230 */
2231 case VMMCALLRING3_PGM_LOCK:
2232 {
2233 pVCpu->vmm.s.rcCallRing3 = PGMR3LockCall(pVM);
2234 break;
2235 }
2236
2237 /*
2238 * Acquire the MM hypervisor heap lock.
2239 */
2240 case VMMCALLRING3_MMHYPER_LOCK:
2241 {
2242 pVCpu->vmm.s.rcCallRing3 = MMR3LockCall(pVM);
2243 break;
2244 }
2245
2246#ifdef VBOX_WITH_REM
2247 /*
2248 * Flush REM handler notifications.
2249 */
2250 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
2251 {
2252 REMR3ReplayHandlerNotifications(pVM);
2253 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2254 break;
2255 }
2256#endif
2257
2258 /*
2259 * This is a noop. We just take this route to avoid unnecessary
2260 * tests in the loops.
2261 */
2262 case VMMCALLRING3_VMM_LOGGER_FLUSH:
2263 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2264 LogAlways(("*FLUSH*\n"));
2265 break;
2266
2267 /*
2268 * Set the VM error message.
2269 */
2270 case VMMCALLRING3_VM_SET_ERROR:
2271 VMR3SetErrorWorker(pVM);
2272 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2273 break;
2274
2275 /*
2276 * Set the VM runtime error message.
2277 */
2278 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
2279 pVCpu->vmm.s.rcCallRing3 = VMR3SetRuntimeErrorWorker(pVM);
2280 break;
2281
2282 /*
2283 * Signal a ring 0 hypervisor assertion.
2284 * Cancel the longjmp operation that's in progress.
2285 */
2286 case VMMCALLRING3_VM_R0_ASSERTION:
2287 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
2288 pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call = false;
2289#ifdef RT_ARCH_X86
2290 pVCpu->vmm.s.CallRing3JmpBufR0.eip = 0;
2291#else
2292 pVCpu->vmm.s.CallRing3JmpBufR0.rip = 0;
2293#endif
2294#ifdef VMM_R0_SWITCH_STACK
2295 *(uint64_t *)pVCpu->vmm.s.pbEMTStackR3 = 0; /* clear marker */
2296#endif
2297 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg1));
2298 LogRel(("%s", pVM->vmm.s.szRing0AssertMsg2));
2299 return VERR_VMM_RING0_ASSERTION;
2300
2301 /*
2302 * A forced switch to ring 0 for preemption purposes.
2303 */
2304 case VMMCALLRING3_VM_R0_PREEMPT:
2305 pVCpu->vmm.s.rcCallRing3 = VINF_SUCCESS;
2306 break;
2307
2308 case VMMCALLRING3_FTM_SET_CHECKPOINT:
2309 pVCpu->vmm.s.rcCallRing3 = FTMR3SetCheckpoint(pVM, (FTMCHECKPOINTTYPE)pVCpu->vmm.s.u64CallRing3Arg);
2310 break;
2311
2312 default:
2313 AssertMsgFailed(("enmCallRing3Operation=%d\n", pVCpu->vmm.s.enmCallRing3Operation));
2314 return VERR_VMM_UNKNOWN_RING3_CALL;
2315 }
2316
2317 pVCpu->vmm.s.enmCallRing3Operation = VMMCALLRING3_INVALID;
2318 return VINF_SUCCESS;
2319}
2320
2321
2322/**
2323 * Displays the Force action Flags.
2324 *
2325 * @param pVM Pointer to the VM.
2326 * @param pHlp The output helpers.
2327 * @param pszArgs The additional arguments (ignored).
2328 */
2329static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2330{
2331 int c;
2332 uint32_t f;
2333 NOREF(pszArgs);
2334
2335#define PRINT_FLAG(prf,flag) do { \
2336 if (f & (prf##flag)) \
2337 { \
2338 static const char *s_psz = #flag; \
2339 if (!(c % 6)) \
2340 pHlp->pfnPrintf(pHlp, "%s\n %s", c ? "," : "", s_psz); \
2341 else \
2342 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2343 c++; \
2344 f &= ~(prf##flag); \
2345 } \
2346 } while (0)
2347
2348#define PRINT_GROUP(prf,grp,sfx) do { \
2349 if (f & (prf##grp##sfx)) \
2350 { \
2351 static const char *s_psz = #grp; \
2352 if (!(c % 5)) \
2353 pHlp->pfnPrintf(pHlp, "%s %s", c ? ",\n" : " Groups:\n", s_psz); \
2354 else \
2355 pHlp->pfnPrintf(pHlp, ", %s", s_psz); \
2356 c++; \
2357 } \
2358 } while (0)
2359
2360 /*
2361 * The global flags.
2362 */
2363 const uint32_t fGlobalForcedActions = pVM->fGlobalForcedActions;
2364 pHlp->pfnPrintf(pHlp, "Global FFs: %#RX32", fGlobalForcedActions);
2365
2366 /* show the flag mnemonics */
2367 c = 0;
2368 f = fGlobalForcedActions;
2369 PRINT_FLAG(VM_FF_,TM_VIRTUAL_SYNC);
2370 PRINT_FLAG(VM_FF_,PDM_QUEUES);
2371 PRINT_FLAG(VM_FF_,PDM_DMA);
2372 PRINT_FLAG(VM_FF_,DBGF);
2373 PRINT_FLAG(VM_FF_,REQUEST);
2374 PRINT_FLAG(VM_FF_,CHECK_VM_STATE);
2375 PRINT_FLAG(VM_FF_,RESET);
2376 PRINT_FLAG(VM_FF_,EMT_RENDEZVOUS);
2377 PRINT_FLAG(VM_FF_,PGM_NEED_HANDY_PAGES);
2378 PRINT_FLAG(VM_FF_,PGM_NO_MEMORY);
2379 PRINT_FLAG(VM_FF_,PGM_POOL_FLUSH_PENDING);
2380 PRINT_FLAG(VM_FF_,REM_HANDLER_NOTIFY);
2381 PRINT_FLAG(VM_FF_,DEBUG_SUSPEND);
2382 if (f)
2383 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2384 else
2385 pHlp->pfnPrintf(pHlp, "\n");
2386
2387 /* the groups */
2388 c = 0;
2389 f = fGlobalForcedActions;
2390 PRINT_GROUP(VM_FF_,EXTERNAL_SUSPENDED,_MASK);
2391 PRINT_GROUP(VM_FF_,EXTERNAL_HALTED,_MASK);
2392 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE,_MASK);
2393 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2394 PRINT_GROUP(VM_FF_,HIGH_PRIORITY_POST,_MASK);
2395 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY_POST,_MASK);
2396 PRINT_GROUP(VM_FF_,NORMAL_PRIORITY,_MASK);
2397 PRINT_GROUP(VM_FF_,ALL_REM,_MASK);
2398 if (c)
2399 pHlp->pfnPrintf(pHlp, "\n");
2400
2401 /*
2402 * Per CPU flags.
2403 */
2404 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2405 {
2406 const uint32_t fLocalForcedActions = pVM->aCpus[i].fLocalForcedActions;
2407 pHlp->pfnPrintf(pHlp, "CPU %u FFs: %#RX32", i, fLocalForcedActions);
2408
2409 /* show the flag mnemonics */
2410 c = 0;
2411 f = fLocalForcedActions;
2412 PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC);
2413 PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC);
2414 PRINT_FLAG(VMCPU_FF_,TIMER);
2415 PRINT_FLAG(VMCPU_FF_,INTERRUPT_NMI);
2416 PRINT_FLAG(VMCPU_FF_,INTERRUPT_SMI);
2417 PRINT_FLAG(VMCPU_FF_,PDM_CRITSECT);
2418 PRINT_FLAG(VMCPU_FF_,UNHALT);
2419 PRINT_FLAG(VMCPU_FF_,REQUEST);
2420 PRINT_FLAG(VMCPU_FF_,HM_UPDATE_CR3);
2421 PRINT_FLAG(VMCPU_FF_,HM_UPDATE_PAE_PDPES);
2422 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3);
2423 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
2424 PRINT_FLAG(VMCPU_FF_,TLB_SHOOTDOWN);
2425 PRINT_FLAG(VMCPU_FF_,TLB_FLUSH);
2426 PRINT_FLAG(VMCPU_FF_,INHIBIT_INTERRUPTS);
2427 PRINT_FLAG(VMCPU_FF_,BLOCK_NMIS);
2428 PRINT_FLAG(VMCPU_FF_,TO_R3);
2429#ifdef VBOX_WITH_RAW_MODE
2430 PRINT_FLAG(VMCPU_FF_,TRPM_SYNC_IDT);
2431 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_TSS);
2432 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_GDT);
2433 PRINT_FLAG(VMCPU_FF_,SELM_SYNC_LDT);
2434 PRINT_FLAG(VMCPU_FF_,CSAM_SCAN_PAGE);
2435 PRINT_FLAG(VMCPU_FF_,CSAM_PENDING_ACTION);
2436#endif
2437 if (f)
2438 pHlp->pfnPrintf(pHlp, "%s\n Unknown bits: %#RX32\n", c ? "," : "", f);
2439 else
2440 pHlp->pfnPrintf(pHlp, "\n");
2441
2442 if (fLocalForcedActions & VMCPU_FF_INHIBIT_INTERRUPTS)
2443 pHlp->pfnPrintf(pHlp, " intr inhibit RIP: %RGp\n", EMGetInhibitInterruptsPC(&pVM->aCpus[i]));
2444
2445 /* the groups */
2446 c = 0;
2447 f = fLocalForcedActions;
2448 PRINT_GROUP(VMCPU_FF_,EXTERNAL_SUSPENDED,_MASK);
2449 PRINT_GROUP(VMCPU_FF_,EXTERNAL_HALTED,_MASK);
2450 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE,_MASK);
2451 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_PRE_RAW,_MASK);
2452 PRINT_GROUP(VMCPU_FF_,HIGH_PRIORITY_POST,_MASK);
2453 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY_POST,_MASK);
2454 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK);
2455 PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK);
2456 PRINT_GROUP(VMCPU_FF_,HM_TO_R3,_MASK);
2457 PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK);
2458 if (c)
2459 pHlp->pfnPrintf(pHlp, "\n");
2460 }
2461
2462#undef PRINT_FLAG
2463#undef PRINT_GROUP
2464}
2465
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette