VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 38990

Last change on this file since 38990 was 38838, checked in by vboxsync, 14 years ago

VMM,++: Try fix the async reset, suspend and power-off problems in PDM wrt conflicting VMM requests. Split them into priority requests and normal requests. The priority requests can safely be processed when PDM is doing async state change waits, the normal ones cannot. (The problem I bumped into was a unmap-chunk request from PGM being processed during PDMR3Reset, causing a recursive VMMR3EmtRendezvous deadlock.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 157.2 KB
Line 
1/* $Id: VM.cpp 38838 2011-09-23 11:21:55Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41/*******************************************************************************
42* Header Files *
43*******************************************************************************/
44#define LOG_GROUP LOG_GROUP_VM
45#include <VBox/vmm/cfgm.h>
46#include <VBox/vmm/vmm.h>
47#include <VBox/vmm/gvmm.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/cpum.h>
50#include <VBox/vmm/selm.h>
51#include <VBox/vmm/trpm.h>
52#include <VBox/vmm/dbgf.h>
53#include <VBox/vmm/pgm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/em.h>
57#include <VBox/vmm/iem.h>
58#include <VBox/vmm/rem.h>
59#include <VBox/vmm/tm.h>
60#include <VBox/vmm/stam.h>
61#include <VBox/vmm/patm.h>
62#include <VBox/vmm/csam.h>
63#include <VBox/vmm/iom.h>
64#include <VBox/vmm/ssm.h>
65#include <VBox/vmm/ftm.h>
66#include <VBox/vmm/hwaccm.h>
67#include "VMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70
71#include <VBox/sup.h>
72#include <VBox/dbg.h>
73#include <VBox/err.h>
74#include <VBox/param.h>
75#include <VBox/log.h>
76#include <iprt/assert.h>
77#include <iprt/alloc.h>
78#include <iprt/asm.h>
79#include <iprt/env.h>
80#include <iprt/string.h>
81#include <iprt/time.h>
82#include <iprt/semaphore.h>
83#include <iprt/thread.h>
84#include <iprt/uuid.h>
85
86
87/*******************************************************************************
88* Structures and Typedefs *
89*******************************************************************************/
90/**
91 * VM destruction callback registration record.
92 */
93typedef struct VMATDTOR
94{
95 /** Pointer to the next record in the list. */
96 struct VMATDTOR *pNext;
97 /** Pointer to the callback function. */
98 PFNVMATDTOR pfnAtDtor;
99 /** The user argument. */
100 void *pvUser;
101} VMATDTOR;
102/** Pointer to a VM destruction callback registration record. */
103typedef VMATDTOR *PVMATDTOR;
104
105
106/*******************************************************************************
107* Global Variables *
108*******************************************************************************/
109/** Pointer to the list of VMs. */
110static PUVM g_pUVMsHead = NULL;
111
112/** Pointer to the list of at VM destruction callbacks. */
113static PVMATDTOR g_pVMAtDtorHead = NULL;
114/** Lock the g_pVMAtDtorHead list. */
115#define VM_ATDTOR_LOCK() do { } while (0)
116/** Unlock the g_pVMAtDtorHead list. */
117#define VM_ATDTOR_UNLOCK() do { } while (0)
118
119
120/*******************************************************************************
121* Internal Functions *
122*******************************************************************************/
123static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
124static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
125static int vmR3InitRing3(PVM pVM, PUVM pUVM);
126static int vmR3InitRing0(PVM pVM);
127static int vmR3InitGC(PVM pVM);
128static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
129static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
130static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
131static void vmR3AtDtor(PVM pVM);
132static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
133static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
134static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
135static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
136static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
137static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...);
138
139
140/**
141 * Do global VMM init.
142 *
143 * @returns VBox status code.
144 */
145VMMR3DECL(int) VMR3GlobalInit(void)
146{
147 /*
148 * Only once.
149 */
150 static bool volatile s_fDone = false;
151 if (s_fDone)
152 return VINF_SUCCESS;
153
154 /*
155 * We're done.
156 */
157 s_fDone = true;
158 return VINF_SUCCESS;
159}
160
161
162
163/**
164 * Creates a virtual machine by calling the supplied configuration constructor.
165 *
166 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
167 * called to start the execution.
168 *
169 * @returns 0 on success.
170 * @returns VBox error code on failure.
171 * @param cCpus Number of virtual CPUs for the new VM.
172 * @param pVmm2UserMethods An optional method table that the VMM can use
173 * to make the user perform various action, like
174 * for instance state saving.
175 * @param pfnVMAtError Pointer to callback function for setting VM
176 * errors. This was added as an implicit call to
177 * VMR3AtErrorRegister() since there is no way the
178 * caller can get to the VM handle early enough to
179 * do this on its own.
180 * This is called in the context of an EMT.
181 * @param pvUserVM The user argument passed to pfnVMAtError.
182 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
183 * This is called in the context of an EMT0.
184 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
185 * @param ppVM Where to store the 'handle' of the created VM.
186 */
187VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
188 PFNVMATERROR pfnVMAtError, void *pvUserVM,
189 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
190 PVM *ppVM)
191{
192 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
193 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
194
195 if (pVmm2UserMethods)
196 {
197 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
198 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
199 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
200 AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
201 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
202 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
203 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
204 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
205 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
206 }
207 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
208 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
209 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
210
211 /*
212 * Because of the current hackiness of the applications
213 * we'll have to initialize global stuff from here.
214 * Later the applications will take care of this in a proper way.
215 */
216 static bool fGlobalInitDone = false;
217 if (!fGlobalInitDone)
218 {
219 int rc = VMR3GlobalInit();
220 if (RT_FAILURE(rc))
221 return rc;
222 fGlobalInitDone = true;
223 }
224
225 /*
226 * Validate input.
227 */
228 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
229
230 /*
231 * Create the UVM so we can register the at-error callback
232 * and consolidate a bit of cleanup code.
233 */
234 PUVM pUVM = NULL; /* shuts up gcc */
235 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
236 if (RT_FAILURE(rc))
237 return rc;
238 if (pfnVMAtError)
239 rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
240 if (RT_SUCCESS(rc))
241 {
242 /*
243 * Initialize the support library creating the session for this VM.
244 */
245 rc = SUPR3Init(&pUVM->vm.s.pSession);
246 if (RT_SUCCESS(rc))
247 {
248 /*
249 * Call vmR3CreateU in the EMT thread and wait for it to finish.
250 *
251 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
252 * submitting a request to a specific VCPU without a pVM. So, to make
253 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
254 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
255 */
256 PVMREQ pReq;
257 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
258 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
259 if (RT_SUCCESS(rc))
260 {
261 rc = pReq->iStatus;
262 VMR3ReqFree(pReq);
263 if (RT_SUCCESS(rc))
264 {
265 /*
266 * Success!
267 */
268 *ppVM = pUVM->pVM;
269 LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
270 return VINF_SUCCESS;
271 }
272 }
273 else
274 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
275
276 /*
277 * An error occurred during VM creation. Set the error message directly
278 * using the initial callback, as the callback list might not exist yet.
279 */
280 const char *pszError;
281 switch (rc)
282 {
283 case VERR_VMX_IN_VMX_ROOT_MODE:
284#ifdef RT_OS_LINUX
285 pszError = N_("VirtualBox can't operate in VMX root mode. "
286 "Please disable the KVM kernel extension, recompile your kernel and reboot");
287#else
288 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
289#endif
290 break;
291
292#ifndef RT_OS_DARWIN
293 case VERR_HWACCM_CONFIG_MISMATCH:
294 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
295 "This hardware extension is required by the VM configuration");
296 break;
297#endif
298
299 case VERR_SVM_IN_USE:
300#ifdef RT_OS_LINUX
301 pszError = N_("VirtualBox can't enable the AMD-V extension. "
302 "Please disable the KVM kernel extension, recompile your kernel and reboot");
303#else
304 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
305#endif
306 break;
307
308#ifdef RT_OS_LINUX
309 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
310 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
311 "that no kernel modules from an older version of VirtualBox exist. "
312 "Then try to recompile and reload the kernel modules by executing "
313 "'/etc/init.d/vboxdrv setup' as root");
314 break;
315#endif
316
317 case VERR_RAW_MODE_INVALID_SMP:
318 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
319 "VirtualBox requires this hardware extension to emulate more than one "
320 "guest CPU");
321 break;
322
323 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
324#ifdef RT_OS_LINUX
325 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
326 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
327 "the VT-x extension in the VM settings. Note that without VT-x you have "
328 "to reduce the number of guest CPUs to one");
329#else
330 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
331 "extension. Either upgrade your kernel or disable the VT-x extension in the "
332 "VM settings. Note that without VT-x you have to reduce the number of guest "
333 "CPUs to one");
334#endif
335 break;
336
337 case VERR_PDM_DEVICE_NOT_FOUND:
338 pszError = N_("A virtual device is configured in the VM settings but the device "
339 "implementation is missing.\n"
340 "A possible reason for this error is a missing extension pack. Note "
341 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
342 "support and remote desktop) are only available from an 'extension "
343 "pack' which must be downloaded and installed separately");
344 break;
345
346 case VERR_PCI_PASSTHROUGH_NO_HWACCM:
347 pszError = N_("PCI passthrough requires VT-x/AMD-V");
348 break;
349
350 case VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING:
351 pszError = N_("PCI passthrough requires nested paging");
352 break;
353
354 default:
355 if (VMR3GetErrorCountU(pUVM) == 0)
356 pszError = RTErrGetFull(rc);
357 else
358 pszError = NULL; /* already set. */
359 break;
360 }
361 if (pszError)
362 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
363 }
364 else
365 {
366 /*
367 * An error occurred at support library initialization time (before the
368 * VM could be created). Set the error message directly using the
369 * initial callback, as the callback list doesn't exist yet.
370 */
371 const char *pszError;
372 switch (rc)
373 {
374 case VERR_VM_DRIVER_LOAD_ERROR:
375#ifdef RT_OS_LINUX
376 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
377 "was either not loaded or /dev/vboxdrv is not set up properly. "
378 "Re-setup the kernel module by executing "
379 "'/etc/init.d/vboxdrv setup' as root");
380#else
381 pszError = N_("VirtualBox kernel driver not loaded");
382#endif
383 break;
384 case VERR_VM_DRIVER_OPEN_ERROR:
385 pszError = N_("VirtualBox kernel driver cannot be opened");
386 break;
387 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
388#ifdef VBOX_WITH_HARDENING
389 /* This should only happen if the executable wasn't hardened - bad code/build. */
390 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
391 "Re-install VirtualBox. If you are building it yourself, you "
392 "should make sure it installed correctly and that the setuid "
393 "bit is set on the executables calling VMR3Create.");
394#else
395 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
396# if defined(RT_OS_DARWIN)
397 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
398 "If you have built VirtualBox yourself, make sure that you do not "
399 "have the vboxdrv KEXT from a different build or installation loaded.");
400# elif defined(RT_OS_LINUX)
401 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
402 "If you have built VirtualBox yourself, make sure that you do "
403 "not have the vboxdrv kernel module from a different build or "
404 "installation loaded. Also, make sure the vboxdrv udev rule gives "
405 "you the permission you need to access the device.");
406# elif defined(RT_OS_WINDOWS)
407 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
408# else /* solaris, freebsd, ++. */
409 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
410 "If you have built VirtualBox yourself, make sure that you do "
411 "not have the vboxdrv kernel module from a different install loaded.");
412# endif
413#endif
414 break;
415 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
416 case VERR_VM_DRIVER_NOT_INSTALLED:
417#ifdef RT_OS_LINUX
418 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
419 "was either not loaded or /dev/vboxdrv was not created for some "
420 "reason. Re-setup the kernel module by executing "
421 "'/etc/init.d/vboxdrv setup' as root");
422#else
423 pszError = N_("VirtualBox kernel driver not installed");
424#endif
425 break;
426 case VERR_NO_MEMORY:
427 pszError = N_("VirtualBox support library out of memory");
428 break;
429 case VERR_VERSION_MISMATCH:
430 case VERR_VM_DRIVER_VERSION_MISMATCH:
431 pszError = N_("The VirtualBox support driver which is running is from a different "
432 "version of VirtualBox. You can correct this by stopping all "
433 "running instances of VirtualBox and reinstalling the software.");
434 break;
435 default:
436 pszError = N_("Unknown error initializing kernel driver");
437 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
438 }
439 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
440 }
441 }
442
443 /* cleanup */
444 vmR3DestroyUVM(pUVM, 2000);
445 LogFlow(("VMR3Create: returns %Rrc\n", rc));
446 return rc;
447}
448
449
450/**
451 * Creates the UVM.
452 *
453 * This will not initialize the support library even if vmR3DestroyUVM
454 * will terminate that.
455 *
456 * @returns VBox status code.
457 * @param cCpus Number of virtual CPUs
458 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
459 * table.
460 * @param ppUVM Where to store the UVM pointer.
461 */
462static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
463{
464 uint32_t i;
465
466 /*
467 * Create and initialize the UVM.
468 */
469 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
470 AssertReturn(pUVM, VERR_NO_MEMORY);
471 pUVM->u32Magic = UVM_MAGIC;
472 pUVM->cCpus = cCpus;
473 pUVM->pVmm2UserMethods = pVmm2UserMethods;
474
475 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
476
477 pUVM->vm.s.cUvmRefs = 1;
478 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
479 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
480 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
481
482 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
483 RTUuidClear(&pUVM->vm.s.Uuid);
484
485 /* Initialize the VMCPU array in the UVM. */
486 for (i = 0; i < cCpus; i++)
487 {
488 pUVM->aCpus[i].pUVM = pUVM;
489 pUVM->aCpus[i].idCpu = i;
490 }
491
492 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
493 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
494 AssertRC(rc);
495 if (RT_SUCCESS(rc))
496 {
497 /* Allocate a halt method event semaphore for each VCPU. */
498 for (i = 0; i < cCpus; i++)
499 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
500 for (i = 0; i < cCpus; i++)
501 {
502 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
503 if (RT_FAILURE(rc))
504 break;
505 }
506 if (RT_SUCCESS(rc))
507 {
508 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
509 if (RT_SUCCESS(rc))
510 {
511 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
512 if (RT_SUCCESS(rc))
513 {
514 /*
515 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
516 */
517 rc = STAMR3InitUVM(pUVM);
518 if (RT_SUCCESS(rc))
519 {
520 rc = MMR3InitUVM(pUVM);
521 if (RT_SUCCESS(rc))
522 {
523 rc = PDMR3InitUVM(pUVM);
524 if (RT_SUCCESS(rc))
525 {
526 /*
527 * Start the emulation threads for all VMCPUs.
528 */
529 for (i = 0; i < cCpus; i++)
530 {
531 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
532 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
533 cCpus > 1 ? "EMT-%u" : "EMT", i);
534 if (RT_FAILURE(rc))
535 break;
536
537 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
538 }
539
540 if (RT_SUCCESS(rc))
541 {
542 *ppUVM = pUVM;
543 return VINF_SUCCESS;
544 }
545
546 /* bail out. */
547 while (i-- > 0)
548 {
549 /** @todo rainy day: terminate the EMTs. */
550 }
551 PDMR3TermUVM(pUVM);
552 }
553 MMR3TermUVM(pUVM);
554 }
555 STAMR3TermUVM(pUVM);
556 }
557 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
558 }
559 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
560 }
561 }
562 for (i = 0; i < cCpus; i++)
563 {
564 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
565 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
566 }
567 RTTlsFree(pUVM->vm.s.idxTLS);
568 }
569 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
570 return rc;
571}
572
573
574/**
575 * Creates and initializes the VM.
576 *
577 * @thread EMT
578 */
579static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
580{
581 /*
582 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
583 */
584 int rc = PDMR3LdrLoadVMMR0U(pUVM);
585 if (RT_FAILURE(rc))
586 {
587 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
588 * bird: what about moving the message down here? Main picks the first message, right? */
589 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
590 return rc; /* proper error message set later on */
591 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
592 }
593
594 /*
595 * Request GVMM to create a new VM for us.
596 */
597 GVMMCREATEVMREQ CreateVMReq;
598 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
599 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
600 CreateVMReq.pSession = pUVM->vm.s.pSession;
601 CreateVMReq.pVMR0 = NIL_RTR0PTR;
602 CreateVMReq.pVMR3 = NULL;
603 CreateVMReq.cCpus = cCpus;
604 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
605 if (RT_SUCCESS(rc))
606 {
607 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
608 AssertRelease(VALID_PTR(pVM));
609 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
610 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
611 AssertRelease(pVM->cCpus == cCpus);
612 AssertRelease(pVM->uCpuExecutionCap == 100);
613 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
614 AssertCompileMemberAlignment(VM, cpum, 64);
615 AssertCompileMemberAlignment(VM, tm, 64);
616 AssertCompileMemberAlignment(VM, aCpus, PAGE_SIZE);
617
618 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
619 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
620
621 /*
622 * Initialize the VM structure and our internal data (VMINT).
623 */
624 pVM->pUVM = pUVM;
625
626 for (VMCPUID i = 0; i < pVM->cCpus; i++)
627 {
628 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
629 pVM->aCpus[i].idCpu = i;
630 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
631 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
632 /* hNativeThreadR0 is initialized on EMT registration. */
633 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
634 pUVM->aCpus[i].pVM = pVM;
635 }
636
637
638 /*
639 * Init the configuration.
640 */
641 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
642 if (RT_SUCCESS(rc))
643 {
644 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
645 rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
646 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
647 pVM->fHWACCMEnabled = true;
648
649 /*
650 * If executing in fake suplib mode disable RR3 and RR0 in the config.
651 */
652 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
653 if (psz && !strcmp(psz, "fake"))
654 {
655 CFGMR3RemoveValue(pRoot, "RawR3Enabled");
656 CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
657 CFGMR3RemoveValue(pRoot, "RawR0Enabled");
658 CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
659 }
660
661 /*
662 * Make sure the CPU count in the config data matches.
663 */
664 if (RT_SUCCESS(rc))
665 {
666 uint32_t cCPUsCfg;
667 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
668 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
669 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
670 {
671 AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
672 cCPUsCfg, cCpus));
673 rc = VERR_INVALID_PARAMETER;
674 }
675 }
676
677 /*
678 * Get the CPU execution cap.
679 */
680 if (RT_SUCCESS(rc))
681 {
682 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
683 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc));
684 }
685
686 /*
687 * Get the VM name and UUID.
688 */
689 if (RT_SUCCESS(rc))
690 {
691 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
692 AssertLogRelMsg(RT_SUCCESS(rc), ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc));
693 }
694
695 if (RT_SUCCESS(rc))
696 {
697 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
698 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
699 rc = VINF_SUCCESS;
700 AssertLogRelMsg(RT_SUCCESS(rc), ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc));
701 }
702
703 if (RT_SUCCESS(rc))
704 {
705 /*
706 * Init the ring-3 components and ring-3 per cpu data, finishing it off
707 * by a relocation round (intermediate context finalization will do this).
708 */
709 rc = vmR3InitRing3(pVM, pUVM);
710 if (RT_SUCCESS(rc))
711 {
712 rc = PGMR3FinalizeMappings(pVM);
713 if (RT_SUCCESS(rc))
714 {
715
716 LogFlow(("Ring-3 init succeeded\n"));
717
718 /*
719 * Init the Ring-0 components.
720 */
721 rc = vmR3InitRing0(pVM);
722 if (RT_SUCCESS(rc))
723 {
724 /* Relocate again, because some switcher fixups depends on R0 init results. */
725 VMR3Relocate(pVM, 0);
726
727#ifdef VBOX_WITH_DEBUGGER
728 /*
729 * Init the tcp debugger console if we're building
730 * with debugger support.
731 */
732 void *pvUser = NULL;
733 rc = DBGCTcpCreate(pVM, &pvUser);
734 if ( RT_SUCCESS(rc)
735 || rc == VERR_NET_ADDRESS_IN_USE)
736 {
737 pUVM->vm.s.pvDBGC = pvUser;
738#endif
739 /*
740 * Init the Guest Context components.
741 */
742 rc = vmR3InitGC(pVM);
743 if (RT_SUCCESS(rc))
744 {
745 /*
746 * Now we can safely set the VM halt method to default.
747 */
748 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
749 if (RT_SUCCESS(rc))
750 {
751 /*
752 * Set the state and link into the global list.
753 */
754 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
755 pUVM->pNext = g_pUVMsHead;
756 g_pUVMsHead = pUVM;
757
758#ifdef LOG_ENABLED
759 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
760#endif
761 return VINF_SUCCESS;
762 }
763 }
764#ifdef VBOX_WITH_DEBUGGER
765 DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
766 pUVM->vm.s.pvDBGC = NULL;
767 }
768#endif
769 //..
770 }
771 }
772 vmR3Destroy(pVM);
773 }
774 }
775 //..
776
777 /* Clean CFGM. */
778 int rc2 = CFGMR3Term(pVM);
779 AssertRC(rc2);
780 }
781
782 /*
783 * Do automatic cleanups while the VM structure is still alive and all
784 * references to it are still working.
785 */
786 PDMR3CritSectTerm(pVM);
787
788 /*
789 * Drop all references to VM and the VMCPU structures, then
790 * tell GVMM to destroy the VM.
791 */
792 pUVM->pVM = NULL;
793 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
794 {
795 pUVM->aCpus[i].pVM = NULL;
796 pUVM->aCpus[i].pVCpu = NULL;
797 }
798 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
799
800 if (pUVM->cCpus > 1)
801 {
802 /* Poke the other EMTs since they may have stale pVM and pVCpu references
803 on the stack (see VMR3WaitU for instance) if they've been awakened after
804 VM creation. */
805 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
806 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
807 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
808 }
809
810 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
811 AssertRC(rc2);
812 }
813 else
814 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
815
816 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
817 return rc;
818}
819
820
821/**
822 * Register the calling EMT with GVM.
823 *
824 * @returns VBox status code.
825 * @param pVM The VM handle.
826 * @param idCpu The Virtual CPU ID.
827 */
828static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
829{
830 Assert(VMMGetCpuId(pVM) == idCpu);
831 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
832 if (RT_FAILURE(rc))
833 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
834 return rc;
835}
836
837
838/**
839 * Initializes all R3 components of the VM
840 */
841static int vmR3InitRing3(PVM pVM, PUVM pUVM)
842{
843 int rc;
844
845 /*
846 * Register the other EMTs with GVM.
847 */
848 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
849 {
850 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
851 if (RT_FAILURE(rc))
852 return rc;
853 }
854
855 /*
856 * Init all R3 components, the order here might be important.
857 */
858 rc = MMR3Init(pVM);
859 if (RT_SUCCESS(rc))
860 {
861 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
862 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
863 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
864 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
865 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
866 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
867 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
868 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
869 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
870 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
871 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
872 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
873 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
874 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
875
876 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
877 {
878 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
879 AssertRC(rc);
880 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
881 AssertRC(rc);
882 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/VM/CPU%d/Halt/BlockOverslept", idCpu);
883 AssertRC(rc);
884 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/VM/CPU%d/Halt/BlockInsomnia", idCpu);
885 AssertRC(rc);
886 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/VM/CPU%d/Halt/BlockOnTime", idCpu);
887 AssertRC(rc);
888 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
889 AssertRC(rc);
890 }
891
892 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
893 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
894 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
895 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
896 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
897 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
898 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
899 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
900
901 rc = CPUMR3Init(pVM);
902 if (RT_SUCCESS(rc))
903 {
904 rc = HWACCMR3Init(pVM);
905 if (RT_SUCCESS(rc))
906 {
907 rc = PGMR3Init(pVM);
908 if (RT_SUCCESS(rc))
909 {
910 rc = REMR3Init(pVM);
911 if (RT_SUCCESS(rc))
912 {
913 rc = MMR3InitPaging(pVM);
914 if (RT_SUCCESS(rc))
915 rc = TMR3Init(pVM);
916 if (RT_SUCCESS(rc))
917 {
918 rc = FTMR3Init(pVM);
919 if (RT_SUCCESS(rc))
920 {
921 rc = VMMR3Init(pVM);
922 if (RT_SUCCESS(rc))
923 {
924 rc = SELMR3Init(pVM);
925 if (RT_SUCCESS(rc))
926 {
927 rc = TRPMR3Init(pVM);
928 if (RT_SUCCESS(rc))
929 {
930 rc = CSAMR3Init(pVM);
931 if (RT_SUCCESS(rc))
932 {
933 rc = PATMR3Init(pVM);
934 if (RT_SUCCESS(rc))
935 {
936 rc = IOMR3Init(pVM);
937 if (RT_SUCCESS(rc))
938 {
939 rc = EMR3Init(pVM);
940 if (RT_SUCCESS(rc))
941 {
942 rc = IEMR3Init(pVM);
943 if (RT_SUCCESS(rc))
944 {
945 rc = DBGFR3Init(pVM);
946 if (RT_SUCCESS(rc))
947 {
948 rc = PDMR3Init(pVM);
949 if (RT_SUCCESS(rc))
950 {
951 rc = PGMR3InitDynMap(pVM);
952 if (RT_SUCCESS(rc))
953 rc = MMR3HyperInitFinalize(pVM);
954 if (RT_SUCCESS(rc))
955 rc = PATMR3InitFinalize(pVM);
956 if (RT_SUCCESS(rc))
957 rc = PGMR3InitFinalize(pVM);
958 if (RT_SUCCESS(rc))
959 rc = SELMR3InitFinalize(pVM);
960 if (RT_SUCCESS(rc))
961 rc = TMR3InitFinalize(pVM);
962 if (RT_SUCCESS(rc))
963 rc = REMR3InitFinalize(pVM);
964 if (RT_SUCCESS(rc))
965 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
966 if (RT_SUCCESS(rc))
967 {
968 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
969 return VINF_SUCCESS;
970 }
971
972 int rc2 = PDMR3Term(pVM);
973 AssertRC(rc2);
974 }
975 int rc2 = DBGFR3Term(pVM);
976 AssertRC(rc2);
977 }
978 int rc2 = IEMR3Term(pVM);
979 AssertRC(rc2);
980 }
981 int rc2 = EMR3Term(pVM);
982 AssertRC(rc2);
983 }
984 int rc2 = IOMR3Term(pVM);
985 AssertRC(rc2);
986 }
987 int rc2 = PATMR3Term(pVM);
988 AssertRC(rc2);
989 }
990 int rc2 = CSAMR3Term(pVM);
991 AssertRC(rc2);
992 }
993 int rc2 = TRPMR3Term(pVM);
994 AssertRC(rc2);
995 }
996 int rc2 = SELMR3Term(pVM);
997 AssertRC(rc2);
998 }
999 int rc2 = VMMR3Term(pVM);
1000 AssertRC(rc2);
1001 }
1002 int rc2 = FTMR3Term(pVM);
1003 AssertRC(rc2);
1004 }
1005 int rc2 = TMR3Term(pVM);
1006 AssertRC(rc2);
1007 }
1008 int rc2 = REMR3Term(pVM);
1009 AssertRC(rc2);
1010 }
1011 int rc2 = PGMR3Term(pVM);
1012 AssertRC(rc2);
1013 }
1014 int rc2 = HWACCMR3Term(pVM);
1015 AssertRC(rc2);
1016 }
1017 //int rc2 = CPUMR3Term(pVM);
1018 //AssertRC(rc2);
1019 }
1020 /* MMR3Term is not called here because it'll kill the heap. */
1021 }
1022
1023 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
1024 return rc;
1025}
1026
1027
1028/**
1029 * Initializes all R0 components of the VM
1030 */
1031static int vmR3InitRing0(PVM pVM)
1032{
1033 LogFlow(("vmR3InitRing0:\n"));
1034
1035 /*
1036 * Check for FAKE suplib mode.
1037 */
1038 int rc = VINF_SUCCESS;
1039 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1040 if (!psz || strcmp(psz, "fake"))
1041 {
1042 /*
1043 * Call the VMMR0 component and let it do the init.
1044 */
1045 rc = VMMR3InitR0(pVM);
1046 }
1047 else
1048 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1049
1050 /*
1051 * Do notifications and return.
1052 */
1053 if (RT_SUCCESS(rc))
1054 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1055 if (RT_SUCCESS(rc))
1056 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HWACCM);
1057
1058 /** @todo Move this to the VMINITCOMPLETED_HWACCM notification handler. */
1059 if (RT_SUCCESS(rc))
1060 CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
1061
1062 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1063 return rc;
1064}
1065
1066
1067/**
1068 * Initializes all GC components of the VM
1069 */
1070static int vmR3InitGC(PVM pVM)
1071{
1072 LogFlow(("vmR3InitGC:\n"));
1073
1074 /*
1075 * Check for FAKE suplib mode.
1076 */
1077 int rc = VINF_SUCCESS;
1078 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1079 if (!psz || strcmp(psz, "fake"))
1080 {
1081 /*
1082 * Call the VMMR0 component and let it do the init.
1083 */
1084 rc = VMMR3InitRC(pVM);
1085 }
1086 else
1087 Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1088
1089 /*
1090 * Do notifications and return.
1091 */
1092 if (RT_SUCCESS(rc))
1093 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
1094 LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
1095 return rc;
1096}
1097
1098
1099/**
1100 * Do init completed notifications.
1101 *
1102 * @returns VBox status code.
1103 * @param pVM The VM handle.
1104 * @param enmWhat What's completed.
1105 */
1106static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1107{
1108 int rc = VMMR3InitCompleted(pVM, enmWhat);
1109 if (RT_SUCCESS(rc))
1110 rc = HWACCMR3InitCompleted(pVM, enmWhat);
1111 if (RT_SUCCESS(rc))
1112 rc = PGMR3InitCompleted(pVM, enmWhat);
1113 return rc;
1114}
1115
1116
1117/**
1118 * Logger callback for inserting a custom prefix.
1119 *
1120 * @returns Number of chars written.
1121 * @param pLogger The logger.
1122 * @param pchBuf The output buffer.
1123 * @param cchBuf The output buffer size.
1124 * @param pvUser Pointer to the UVM structure.
1125 */
1126static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1127{
1128 AssertReturn(cchBuf >= 2, 0);
1129 PUVM pUVM = (PUVM)pvUser;
1130 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1131 if (pUVCpu)
1132 {
1133 static const char s_szHex[17] = "0123456789abcdef";
1134 VMCPUID const idCpu = pUVCpu->idCpu;
1135 pchBuf[1] = s_szHex[ idCpu & 15];
1136 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1137 }
1138 else
1139 {
1140 pchBuf[0] = 'x';
1141 pchBuf[1] = 'y';
1142 }
1143
1144 return 2;
1145}
1146
1147
1148/**
1149 * Calls the relocation functions for all VMM components so they can update
1150 * any GC pointers. When this function is called all the basic VM members
1151 * have been updated and the actual memory relocation have been done
1152 * by the PGM/MM.
1153 *
1154 * This is used both on init and on runtime relocations.
1155 *
1156 * @param pVM VM handle.
1157 * @param offDelta Relocation delta relative to old location.
1158 */
1159VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1160{
1161 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1162
1163 /*
1164 * The order here is very important!
1165 */
1166 PGMR3Relocate(pVM, offDelta);
1167 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1168 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1169 CPUMR3Relocate(pVM);
1170 HWACCMR3Relocate(pVM);
1171 SELMR3Relocate(pVM);
1172 VMMR3Relocate(pVM, offDelta);
1173 SELMR3Relocate(pVM); /* !hack! fix stack! */
1174 TRPMR3Relocate(pVM, offDelta);
1175 PATMR3Relocate(pVM);
1176 CSAMR3Relocate(pVM, offDelta);
1177 IOMR3Relocate(pVM, offDelta);
1178 EMR3Relocate(pVM);
1179 TMR3Relocate(pVM, offDelta);
1180 IEMR3Relocate(pVM);
1181 DBGFR3Relocate(pVM, offDelta);
1182 PDMR3Relocate(pVM, offDelta);
1183}
1184
1185
1186/**
1187 * EMT rendezvous worker for VMR3PowerOn.
1188 *
1189 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1190 * code, see FNVMMEMTRENDEZVOUS.)
1191 *
1192 * @param pVM The VM handle.
1193 * @param pVCpu The VMCPU handle of the EMT.
1194 * @param pvUser Ignored.
1195 */
1196static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1197{
1198 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1199 Assert(!pvUser); NOREF(pvUser);
1200
1201 /*
1202 * The first thread thru here tries to change the state. We shouldn't be
1203 * called again if this fails.
1204 */
1205 if (pVCpu->idCpu == pVM->cCpus - 1)
1206 {
1207 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1208 if (RT_FAILURE(rc))
1209 return rc;
1210 }
1211
1212 VMSTATE enmVMState = VMR3GetState(pVM);
1213 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1214 ("%s\n", VMR3GetStateName(enmVMState)),
1215 VERR_INTERNAL_ERROR_4);
1216
1217 /*
1218 * All EMTs changes their state to started.
1219 */
1220 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1221
1222 /*
1223 * EMT(0) is last thru here and it will make the notification calls
1224 * and advance the state.
1225 */
1226 if (pVCpu->idCpu == 0)
1227 {
1228 PDMR3PowerOn(pVM);
1229 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1230 }
1231
1232 return VINF_SUCCESS;
1233}
1234
1235
1236/**
1237 * Powers on the virtual machine.
1238 *
1239 * @returns VBox status code.
1240 *
1241 * @param pVM The VM to power on.
1242 *
1243 * @thread Any thread.
1244 * @vmstate Created
1245 * @vmstateto PoweringOn+Running
1246 */
1247VMMR3DECL(int) VMR3PowerOn(PVM pVM)
1248{
1249 LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
1250 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1251
1252 /*
1253 * Gather all the EMTs to reduce the init TSC drift and keep
1254 * the state changing APIs a bit uniform.
1255 */
1256 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1257 vmR3PowerOn, NULL);
1258 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1259 return rc;
1260}
1261
1262
1263/**
1264 * Does the suspend notifications.
1265 *
1266 * @param pVM The VM handle.
1267 * @thread EMT(0)
1268 */
1269static void vmR3SuspendDoWork(PVM pVM)
1270{
1271 PDMR3Suspend(pVM);
1272}
1273
1274
1275/**
1276 * EMT rendezvous worker for VMR3Suspend.
1277 *
1278 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1279 * return code, see FNVMMEMTRENDEZVOUS.)
1280 *
1281 * @param pVM The VM handle.
1282 * @param pVCpu The VMCPU handle of the EMT.
1283 * @param pvUser Ignored.
1284 */
1285static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1286{
1287 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1288 Assert(!pvUser); NOREF(pvUser);
1289
1290 /*
1291 * The first EMT switches the state to suspending. If this fails because
1292 * something was racing us in one way or the other, there will be no more
1293 * calls and thus the state assertion below is not going to annoy anyone.
1294 */
1295 if (pVCpu->idCpu == pVM->cCpus - 1)
1296 {
1297 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1298 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1299 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1300 if (RT_FAILURE(rc))
1301 return rc;
1302 }
1303
1304 VMSTATE enmVMState = VMR3GetState(pVM);
1305 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1306 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1307 ("%s\n", VMR3GetStateName(enmVMState)),
1308 VERR_INTERNAL_ERROR_4);
1309
1310 /*
1311 * EMT(0) does the actually suspending *after* all the other CPUs have
1312 * been thru here.
1313 */
1314 if (pVCpu->idCpu == 0)
1315 {
1316 vmR3SuspendDoWork(pVM);
1317
1318 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1319 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1320 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1321 if (RT_FAILURE(rc))
1322 return VERR_INTERNAL_ERROR_3;
1323 }
1324
1325 return VINF_EM_SUSPEND;
1326}
1327
1328
1329/**
1330 * Suspends a running VM.
1331 *
1332 * @returns VBox status code. When called on EMT, this will be a strict status
1333 * code that has to be propagated up the call stack.
1334 *
1335 * @param pVM The VM to suspend.
1336 *
1337 * @thread Any thread.
1338 * @vmstate Running or RunningLS
1339 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1340 */
1341VMMR3DECL(int) VMR3Suspend(PVM pVM)
1342{
1343 LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
1344 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1345
1346 /*
1347 * Gather all the EMTs to make sure there are no races before
1348 * changing the VM state.
1349 */
1350 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1351 vmR3Suspend, NULL);
1352 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1353 return rc;
1354}
1355
1356
1357/**
1358 * EMT rendezvous worker for VMR3Resume.
1359 *
1360 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1361 * return code, see FNVMMEMTRENDEZVOUS.)
1362 *
1363 * @param pVM The VM handle.
1364 * @param pVCpu The VMCPU handle of the EMT.
1365 * @param pvUser Ignored.
1366 */
1367static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1368{
1369 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1370 Assert(!pvUser); NOREF(pvUser);
1371
1372 /*
1373 * The first thread thru here tries to change the state. We shouldn't be
1374 * called again if this fails.
1375 */
1376 if (pVCpu->idCpu == pVM->cCpus - 1)
1377 {
1378 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1379 if (RT_FAILURE(rc))
1380 return rc;
1381 }
1382
1383 VMSTATE enmVMState = VMR3GetState(pVM);
1384 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1385 ("%s\n", VMR3GetStateName(enmVMState)),
1386 VERR_INTERNAL_ERROR_4);
1387
1388#if 0
1389 /*
1390 * All EMTs changes their state to started.
1391 */
1392 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1393#endif
1394
1395 /*
1396 * EMT(0) is last thru here and it will make the notification calls
1397 * and advance the state.
1398 */
1399 if (pVCpu->idCpu == 0)
1400 {
1401 PDMR3Resume(pVM);
1402 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1403 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1404 }
1405
1406 return VINF_EM_RESUME;
1407}
1408
1409
1410/**
1411 * Resume VM execution.
1412 *
1413 * @returns VBox status code. When called on EMT, this will be a strict status
1414 * code that has to be propagated up the call stack.
1415 *
1416 * @param pVM The VM to resume.
1417 *
1418 * @thread Any thread.
1419 * @vmstate Suspended
1420 * @vmstateto Running
1421 */
1422VMMR3DECL(int) VMR3Resume(PVM pVM)
1423{
1424 LogFlow(("VMR3Resume: pVM=%p\n", pVM));
1425 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1426
1427 /*
1428 * Gather all the EMTs to make sure there are no races before
1429 * changing the VM state.
1430 */
1431 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1432 vmR3Resume, NULL);
1433 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1434 return rc;
1435}
1436
1437
1438/**
1439 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1440 * after the live step has been completed.
1441 *
1442 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1443 * return code, see FNVMMEMTRENDEZVOUS.)
1444 *
1445 * @param pVM The VM handle.
1446 * @param pVCpu The VMCPU handle of the EMT.
1447 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1448 */
1449static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1450{
1451 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1452 bool *pfSuspended = (bool *)pvUser;
1453
1454 /*
1455 * The first thread thru here tries to change the state. We shouldn't be
1456 * called again if this fails.
1457 */
1458 if (pVCpu->idCpu == pVM->cCpus - 1U)
1459 {
1460 PUVM pUVM = pVM->pUVM;
1461 int rc;
1462
1463 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1464 VMSTATE enmVMState = pVM->enmVMState;
1465 switch (enmVMState)
1466 {
1467 case VMSTATE_RUNNING_LS:
1468 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS);
1469 rc = VINF_SUCCESS;
1470 break;
1471
1472 case VMSTATE_SUSPENDED_EXT_LS:
1473 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1474 rc = VINF_SUCCESS;
1475 break;
1476
1477 case VMSTATE_DEBUGGING_LS:
1478 rc = VERR_TRY_AGAIN;
1479 break;
1480
1481 case VMSTATE_OFF_LS:
1482 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS);
1483 rc = VERR_SSM_LIVE_POWERED_OFF;
1484 break;
1485
1486 case VMSTATE_FATAL_ERROR_LS:
1487 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS);
1488 rc = VERR_SSM_LIVE_FATAL_ERROR;
1489 break;
1490
1491 case VMSTATE_GURU_MEDITATION_LS:
1492 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS);
1493 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1494 break;
1495
1496 case VMSTATE_POWERING_OFF_LS:
1497 case VMSTATE_SUSPENDING_EXT_LS:
1498 case VMSTATE_RESETTING_LS:
1499 default:
1500 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1501 rc = VERR_INTERNAL_ERROR_3;
1502 break;
1503 }
1504 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1505 if (RT_FAILURE(rc))
1506 {
1507 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1508 return rc;
1509 }
1510 }
1511
1512 VMSTATE enmVMState = VMR3GetState(pVM);
1513 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1514 ("%s\n", VMR3GetStateName(enmVMState)),
1515 VERR_INTERNAL_ERROR_4);
1516
1517 /*
1518 * Only EMT(0) have work to do since it's last thru here.
1519 */
1520 if (pVCpu->idCpu == 0)
1521 {
1522 vmR3SuspendDoWork(pVM);
1523 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1524 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1525 if (RT_FAILURE(rc))
1526 return VERR_INTERNAL_ERROR_3;
1527
1528 *pfSuspended = true;
1529 }
1530
1531 return VINF_EM_SUSPEND;
1532}
1533
1534
1535/**
1536 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1537 * SSMR3LiveDoStep1 failure.
1538 *
1539 * Doing this as a rendezvous operation avoids all annoying transition
1540 * states.
1541 *
1542 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1543 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1544 *
1545 * @param pVM The VM handle.
1546 * @param pVCpu The VMCPU handle of the EMT.
1547 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1548 */
1549static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1550{
1551 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1552 bool *pfSuspended = (bool *)pvUser;
1553 NOREF(pVCpu);
1554
1555 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1556 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1557 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1558 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1559 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1560 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1561 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1562 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1563 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1564 if (rc == 1)
1565 rc = VERR_SSM_LIVE_POWERED_OFF;
1566 else if (rc == 2)
1567 rc = VERR_SSM_LIVE_FATAL_ERROR;
1568 else if (rc == 3)
1569 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1570 else if (rc == 4)
1571 {
1572 *pfSuspended = true;
1573 rc = VINF_SUCCESS;
1574 }
1575 else if (rc > 0)
1576 rc = VINF_SUCCESS;
1577 return rc;
1578}
1579
1580
1581/**
1582 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1583 *
1584 * @returns VBox status code.
1585 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1586 *
1587 * @param pVM The VM handle.
1588 * @param pSSM The handle of saved state operation.
1589 *
1590 * @thread EMT(0)
1591 */
1592static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1593{
1594 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1595 VM_ASSERT_EMT0(pVM);
1596
1597 /*
1598 * Advance the state and mark if VMR3Suspend was called.
1599 */
1600 int rc = VINF_SUCCESS;
1601 VMSTATE enmVMState = VMR3GetState(pVM);
1602 if (enmVMState == VMSTATE_SUSPENDED_LS)
1603 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1604 else
1605 {
1606 if (enmVMState != VMSTATE_SAVING)
1607 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1608 rc = VINF_SSM_LIVE_SUSPENDED;
1609 }
1610
1611 /*
1612 * Finish up and release the handle. Careful with the status codes.
1613 */
1614 int rc2 = SSMR3LiveDoStep2(pSSM);
1615 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1616 rc = rc2;
1617
1618 rc2 = SSMR3LiveDone(pSSM);
1619 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1620 rc = rc2;
1621
1622 /*
1623 * Advance to the final state and return.
1624 */
1625 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1626 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1627 return rc;
1628}
1629
1630
1631/**
1632 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1633 * SSMR3LiveSave.
1634 *
1635 * @returns VBox status code.
1636 *
1637 * @param pVM The VM handle.
1638 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1639 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1640 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1641 * @param pvStreamOpsUser The user argument to the stream methods.
1642 * @param enmAfter What to do afterwards.
1643 * @param pfnProgress Progress callback. Optional.
1644 * @param pvProgressUser User argument for the progress callback.
1645 * @param ppSSM Where to return the saved state handle in case of a
1646 * live snapshot scenario.
1647 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1648 *
1649 * @thread EMT
1650 */
1651static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1652 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM,
1653 bool fSkipStateChanges)
1654{
1655 int rc = VINF_SUCCESS;
1656
1657 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1658 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1659
1660 /*
1661 * Validate input.
1662 */
1663 AssertPtrNull(pszFilename);
1664 AssertPtrNull(pStreamOps);
1665 AssertPtr(pVM);
1666 Assert( enmAfter == SSMAFTER_DESTROY
1667 || enmAfter == SSMAFTER_CONTINUE
1668 || enmAfter == SSMAFTER_TELEPORT);
1669 AssertPtr(ppSSM);
1670 *ppSSM = NULL;
1671
1672 /*
1673 * Change the state and perform/start the saving.
1674 */
1675 if (!fSkipStateChanges)
1676 {
1677 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1678 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1679 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1680 }
1681 else
1682 {
1683 Assert(enmAfter != SSMAFTER_TELEPORT);
1684 rc = 1;
1685 }
1686
1687 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1688 {
1689 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1690 if (!fSkipStateChanges)
1691 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1692 }
1693 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1694 {
1695 Assert(!fSkipStateChanges);
1696 if (enmAfter == SSMAFTER_TELEPORT)
1697 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1698 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1699 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1700 /* (We're not subject to cancellation just yet.) */
1701 }
1702 else
1703 Assert(RT_FAILURE(rc));
1704 return rc;
1705}
1706
1707
1708/**
1709 * Common worker for VMR3Save and VMR3Teleport.
1710 *
1711 * @returns VBox status code.
1712 *
1713 * @param pVM The VM handle.
1714 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1715 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1716 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1717 * @param pvStreamOpsUser The user argument to the stream methods.
1718 * @param enmAfter What to do afterwards.
1719 * @param pfnProgress Progress callback. Optional.
1720 * @param pvProgressUser User argument for the progress callback.
1721 * @param pfSuspended Set if we suspended the VM.
1722 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1723 *
1724 * @thread Non-EMT
1725 */
1726static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1727 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1728 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended,
1729 bool fSkipStateChanges)
1730{
1731 /*
1732 * Request the operation in EMT(0).
1733 */
1734 PSSMHANDLE pSSM;
1735 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/,
1736 (PFNRT)vmR3Save, 10, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1737 enmAfter, pfnProgress, pvProgressUser, &pSSM, fSkipStateChanges);
1738 if ( RT_SUCCESS(rc)
1739 && pSSM)
1740 {
1741 Assert(!fSkipStateChanges);
1742
1743 /*
1744 * Live snapshot.
1745 *
1746 * The state handling here is kind of tricky, doing it on EMT(0) helps
1747 * a bit. See the VMSTATE diagram for details.
1748 */
1749 rc = SSMR3LiveDoStep1(pSSM);
1750 if (RT_SUCCESS(rc))
1751 {
1752 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1753 for (;;)
1754 {
1755 /* Try suspend the VM. */
1756 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1757 vmR3LiveDoSuspend, pfSuspended);
1758 if (rc != VERR_TRY_AGAIN)
1759 break;
1760
1761 /* Wait for the state to change. */
1762 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1763 }
1764 if (RT_SUCCESS(rc))
1765 rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1766 else
1767 {
1768 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1769 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1770 }
1771 }
1772 else
1773 {
1774 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1775 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1776
1777 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1778 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1779 rc = rc2;
1780 }
1781 }
1782
1783 return rc;
1784}
1785
1786
1787/**
1788 * Save current VM state.
1789 *
1790 * Can be used for both saving the state and creating snapshots.
1791 *
1792 * When called for a VM in the Running state, the saved state is created live
1793 * and the VM is only suspended when the final part of the saving is preformed.
1794 * The VM state will not be restored to Running in this case and it's up to the
1795 * caller to call VMR3Resume if this is desirable. (The rational is that the
1796 * caller probably wish to reconfigure the disks before resuming the VM.)
1797 *
1798 * @returns VBox status code.
1799 *
1800 * @param pVM The VM which state should be saved.
1801 * @param pszFilename The name of the save state file.
1802 * @param pStreamOps The stream methods.
1803 * @param pvStreamOpsUser The user argument to the stream methods.
1804 * @param fContinueAfterwards Whether continue execution afterwards or not.
1805 * When in doubt, set this to true.
1806 * @param pfnProgress Progress callback. Optional.
1807 * @param pvUser User argument for the progress callback.
1808 * @param pfSuspended Set if we suspended the VM.
1809 *
1810 * @thread Non-EMT.
1811 * @vmstate Suspended or Running
1812 * @vmstateto Saving+Suspended or
1813 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1814 */
1815VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser, bool *pfSuspended)
1816{
1817 LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1818 pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1819
1820 /*
1821 * Validate input.
1822 */
1823 AssertPtr(pfSuspended);
1824 *pfSuspended = false;
1825 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1826 VM_ASSERT_OTHER_THREAD(pVM);
1827 AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
1828 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1829 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1830
1831 /*
1832 * Join paths with VMR3Teleport.
1833 */
1834 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1835 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1836 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1837 enmAfter, pfnProgress, pvUser, pfSuspended,
1838 false /* fSkipStateChanges */);
1839 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1840 return rc;
1841}
1842
1843/**
1844 * Save current VM state (used by FTM)
1845 *
1846 * Can be used for both saving the state and creating snapshots.
1847 *
1848 * When called for a VM in the Running state, the saved state is created live
1849 * and the VM is only suspended when the final part of the saving is preformed.
1850 * The VM state will not be restored to Running in this case and it's up to the
1851 * caller to call VMR3Resume if this is desirable. (The rational is that the
1852 * caller probably wish to reconfigure the disks before resuming the VM.)
1853 *
1854 * @returns VBox status code.
1855 *
1856 * @param pVM The VM which state should be saved.
1857 * @param pStreamOps The stream methods.
1858 * @param pvStreamOpsUser The user argument to the stream methods.
1859 * @param pfSuspended Set if we suspended the VM.
1860 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1861 *
1862 * @thread Any
1863 * @vmstate Suspended or Running
1864 * @vmstateto Saving+Suspended or
1865 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1866 */
1867VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended,
1868 bool fSkipStateChanges)
1869{
1870 LogFlow(("VMR3SaveFT: pVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
1871 pVM, pStreamOps, pvStreamOpsUser, pfSuspended));
1872
1873 /*
1874 * Validate input.
1875 */
1876 AssertPtr(pfSuspended);
1877 *pfSuspended = false;
1878 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1879 AssertReturn(pStreamOps, VERR_INVALID_PARAMETER);
1880
1881 /*
1882 * Join paths with VMR3Teleport.
1883 */
1884 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1885 NULL, pStreamOps, pvStreamOpsUser,
1886 SSMAFTER_CONTINUE, NULL, NULL, pfSuspended,
1887 fSkipStateChanges);
1888 LogFlow(("VMR3SaveFT: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1889 return rc;
1890}
1891
1892
1893/**
1894 * Teleport the VM (aka live migration).
1895 *
1896 * @returns VBox status code.
1897 *
1898 * @param pVM The VM which state should be saved.
1899 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1900 * @param pStreamOps The stream methods.
1901 * @param pvStreamOpsUser The user argument to the stream methods.
1902 * @param pfnProgress Progress callback. Optional.
1903 * @param pvProgressUser User argument for the progress callback.
1904 * @param pfSuspended Set if we suspended the VM.
1905 *
1906 * @thread Non-EMT.
1907 * @vmstate Suspended or Running
1908 * @vmstateto Saving+Suspended or
1909 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1910 */
1911VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1912 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1913{
1914 LogFlow(("VMR3Teleport: pVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1915 pVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1916
1917 /*
1918 * Validate input.
1919 */
1920 AssertPtr(pfSuspended);
1921 *pfSuspended = false;
1922 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1923 VM_ASSERT_OTHER_THREAD(pVM);
1924 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1925 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1926
1927 /*
1928 * Join paths with VMR3Save.
1929 */
1930 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime,
1931 NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1932 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended,
1933 false /* fSkipStateChanges */);
1934 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1935 return rc;
1936}
1937
1938
1939
1940/**
1941 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1942 *
1943 * @returns VBox status code.
1944 *
1945 * @param pVM The VM handle.
1946 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1947 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1948 * @param pvStreamOpsUser The user argument to the stream methods.
1949 * @param pfnProgress Progress callback. Optional.
1950 * @param pvUser User argument for the progress callback.
1951 * @param fTeleporting Indicates whether we're teleporting or not.
1952 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1953 *
1954 * @thread EMT.
1955 */
1956static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1957 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting,
1958 bool fSkipStateChanges)
1959{
1960 int rc = VINF_SUCCESS;
1961
1962 LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1963 pVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1964
1965 /*
1966 * Validate input (paranoia).
1967 */
1968 AssertPtr(pVM);
1969 AssertPtrNull(pszFilename);
1970 AssertPtrNull(pStreamOps);
1971 AssertPtrNull(pfnProgress);
1972
1973 if (!fSkipStateChanges)
1974 {
1975 /*
1976 * Change the state and perform the load.
1977 *
1978 * Always perform a relocation round afterwards to make sure hypervisor
1979 * selectors and such are correct.
1980 */
1981 rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1982 VMSTATE_LOADING, VMSTATE_CREATED,
1983 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1984 if (RT_FAILURE(rc))
1985 return rc;
1986 }
1987 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
1988
1989 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pVM);
1990 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
1991 if (RT_SUCCESS(rc))
1992 {
1993 VMR3Relocate(pVM, 0 /*offDelta*/);
1994 if (!fSkipStateChanges)
1995 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1996 }
1997 else
1998 {
1999 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
2000 if (!fSkipStateChanges)
2001 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
2002
2003 if (cErrorsPriorToSave == VMR3GetErrorCount(pVM))
2004 rc = VMSetError(pVM, rc, RT_SRC_POS,
2005 N_("Unable to restore the virtual machine's saved state from '%s'. "
2006 "It may be damaged or from an older version of VirtualBox. "
2007 "Please discard the saved state before starting the virtual machine"),
2008 pszFilename);
2009 }
2010
2011 return rc;
2012}
2013
2014
2015/**
2016 * Loads a VM state into a newly created VM or a one that is suspended.
2017 *
2018 * To restore a saved state on VM startup, call this function and then resume
2019 * the VM instead of powering it on.
2020 *
2021 * @returns VBox status code.
2022 *
2023 * @param pVM The VM handle.
2024 * @param pszFilename The name of the save state file.
2025 * @param pfnProgress Progress callback. Optional.
2026 * @param pvUser User argument for the progress callback.
2027 *
2028 * @thread Any thread.
2029 * @vmstate Created, Suspended
2030 * @vmstateto Loading+Suspended
2031 */
2032VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
2033{
2034 LogFlow(("VMR3LoadFromFile: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
2035 pVM, pszFilename, pszFilename, pfnProgress, pvUser));
2036
2037 /*
2038 * Validate input.
2039 */
2040 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2041 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
2042
2043 /*
2044 * Forward the request to EMT(0). No need to setup a rendezvous here
2045 * since there is no execution taking place when this call is allowed.
2046 */
2047 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2048 pVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
2049 false /*fTeleporting*/, false /* fSkipStateChanges */);
2050 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
2051 return rc;
2052}
2053
2054
2055/**
2056 * VMR3LoadFromFile for arbitrary file streams.
2057 *
2058 * @returns VBox status code.
2059 *
2060 * @param pVM The VM handle.
2061 * @param pStreamOps The stream methods.
2062 * @param pvStreamOpsUser The user argument to the stream methods.
2063 * @param pfnProgress Progress callback. Optional.
2064 * @param pvProgressUser User argument for the progress callback.
2065 *
2066 * @thread Any thread.
2067 * @vmstate Created, Suspended
2068 * @vmstateto Loading+Suspended
2069 */
2070VMMR3DECL(int) VMR3LoadFromStream(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2071 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
2072{
2073 LogFlow(("VMR3LoadFromStream: pVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
2074 pVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
2075
2076 /*
2077 * Validate input.
2078 */
2079 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2080 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2081
2082 /*
2083 * Forward the request to EMT(0). No need to setup a rendezvous here
2084 * since there is no execution taking place when this call is allowed.
2085 */
2086 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2087 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
2088 true /*fTeleporting*/, false /* fSkipStateChanges */);
2089 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2090 return rc;
2091}
2092
2093
2094/**
2095 * VMR3LoadFromFileFT for arbitrary file streams.
2096 *
2097 * @returns VBox status code.
2098 *
2099 * @param pVM The VM handle.
2100 * @param pStreamOps The stream methods.
2101 * @param pvStreamOpsUser The user argument to the stream methods.
2102 * @param pfnProgress Progress callback. Optional.
2103 * @param pvProgressUser User argument for the progress callback.
2104 *
2105 * @thread Any thread.
2106 * @vmstate Created, Suspended
2107 * @vmstateto Loading+Suspended
2108 */
2109VMMR3DECL(int) VMR3LoadFromStreamFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
2110{
2111 LogFlow(("VMR3LoadFromStreamFT: pVM=%p pStreamOps=%p pvStreamOpsUser=%p\n",
2112 pVM, pStreamOps, pvStreamOpsUser));
2113
2114 /*
2115 * Validate input.
2116 */
2117 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2118 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2119
2120 /*
2121 * Forward the request to EMT(0). No need to setup a rendezvous here
2122 * since there is no execution taking place when this call is allowed.
2123 */
2124 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2125 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
2126 true /*fTeleporting*/, true /* fSkipStateChanges */);
2127 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2128 return rc;
2129}
2130
2131/**
2132 * EMT rendezvous worker for VMR3PowerOff.
2133 *
2134 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
2135 * return code, see FNVMMEMTRENDEZVOUS.)
2136 *
2137 * @param pVM The VM handle.
2138 * @param pVCpu The VMCPU handle of the EMT.
2139 * @param pvUser Ignored.
2140 */
2141static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
2142{
2143 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
2144 Assert(!pvUser); NOREF(pvUser);
2145
2146 /*
2147 * The first EMT thru here will change the state to PoweringOff.
2148 */
2149 if (pVCpu->idCpu == pVM->cCpus - 1)
2150 {
2151 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
2152 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
2153 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
2154 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
2155 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
2156 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
2157 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
2158 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
2159 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
2160 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
2161 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
2162 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
2163 if (RT_FAILURE(rc))
2164 return rc;
2165 if (rc >= 7)
2166 SSMR3Cancel(pVM);
2167 }
2168
2169 /*
2170 * Check the state.
2171 */
2172 VMSTATE enmVMState = VMR3GetState(pVM);
2173 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2174 || enmVMState == VMSTATE_POWERING_OFF_LS,
2175 ("%s\n", VMR3GetStateName(enmVMState)),
2176 VERR_VM_INVALID_VM_STATE);
2177
2178 /*
2179 * EMT(0) does the actual power off work here *after* all the other EMTs
2180 * have been thru and entered the STOPPED state.
2181 */
2182 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2183 if (pVCpu->idCpu == 0)
2184 {
2185 /*
2186 * For debugging purposes, we will log a summary of the guest state at this point.
2187 */
2188 if (enmVMState != VMSTATE_GURU_MEDITATION)
2189 {
2190 /** @todo SMP support? */
2191 /** @todo make the state dumping at VMR3PowerOff optional. */
2192 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2193 RTLogRelPrintf("****************** Guest state at power off ******************\n");
2194 DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2195 RTLogRelPrintf("***\n");
2196 DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
2197 RTLogRelPrintf("***\n");
2198 DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2199 RTLogRelPrintf("***\n");
2200 DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2201 /** @todo dump guest call stack. */
2202#if 1 // "temporary" while debugging #1589
2203 RTLogRelPrintf("***\n");
2204 uint32_t esp = CPUMGetGuestESP(pVCpu);
2205 if ( CPUMGetGuestSS(pVCpu) == 0
2206 && esp < _64K)
2207 {
2208 uint8_t abBuf[PAGE_SIZE];
2209 RTLogRelPrintf("***\n"
2210 "ss:sp=0000:%04x ", esp);
2211 uint32_t Start = esp & ~(uint32_t)63;
2212 int rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, Start, 0x100);
2213 if (RT_SUCCESS(rc))
2214 RTLogRelPrintf("0000:%04x TO 0000:%04x:\n"
2215 "%.*Rhxd\n",
2216 Start, Start + 0x100 - 1,
2217 0x100, abBuf);
2218 else
2219 RTLogRelPrintf("rc=%Rrc\n", rc);
2220
2221 /* grub ... */
2222 if (esp < 0x2000 && esp > 0x1fc0)
2223 {
2224 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x800);
2225 if (RT_SUCCESS(rc))
2226 RTLogRelPrintf("0000:8000 TO 0000:87ff:\n"
2227 "%.*Rhxd\n",
2228 0x800, abBuf);
2229 }
2230 /* microsoft cdrom hang ... */
2231 if (true)
2232 {
2233 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x200);
2234 if (RT_SUCCESS(rc))
2235 RTLogRelPrintf("2000:0000 TO 2000:01ff:\n"
2236 "%.*Rhxd\n",
2237 0x200, abBuf);
2238 }
2239 }
2240#endif
2241 RTLogRelSetBuffering(fOldBuffered);
2242 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2243 }
2244
2245 /*
2246 * Perform the power off notifications and advance the state to
2247 * Off or OffLS.
2248 */
2249 PDMR3PowerOff(pVM);
2250
2251 PUVM pUVM = pVM->pUVM;
2252 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2253 enmVMState = pVM->enmVMState;
2254 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2255 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS);
2256 else
2257 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF);
2258 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2259 }
2260 return VINF_EM_OFF;
2261}
2262
2263
2264/**
2265 * Power off the VM.
2266 *
2267 * @returns VBox status code. When called on EMT, this will be a strict status
2268 * code that has to be propagated up the call stack.
2269 *
2270 * @param pVM The handle of the VM to be powered off.
2271 *
2272 * @thread Any thread.
2273 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2274 * @vmstateto Off or OffLS
2275 */
2276VMMR3DECL(int) VMR3PowerOff(PVM pVM)
2277{
2278 LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
2279 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2280
2281 /*
2282 * Gather all the EMTs to make sure there are no races before
2283 * changing the VM state.
2284 */
2285 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2286 vmR3PowerOff, NULL);
2287 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2288 return rc;
2289}
2290
2291
2292/**
2293 * Destroys the VM.
2294 *
2295 * The VM must be powered off (or never really powered on) to call this
2296 * function. The VM handle is destroyed and can no longer be used up successful
2297 * return.
2298 *
2299 * @returns VBox status code.
2300 *
2301 * @param pVM The handle of the VM which should be destroyed.
2302 *
2303 * @thread Any none emulation thread.
2304 * @vmstate Off, Created
2305 * @vmstateto N/A
2306 */
2307VMMR3DECL(int) VMR3Destroy(PVM pVM)
2308{
2309 LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
2310
2311 /*
2312 * Validate input.
2313 */
2314 if (!pVM)
2315 return VERR_INVALID_VM_HANDLE;
2316 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2317 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2318
2319 /*
2320 * Change VM state to destroying and unlink the VM.
2321 */
2322 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2323 if (RT_FAILURE(rc))
2324 return rc;
2325
2326 /** @todo lock this when we start having multiple machines in a process... */
2327 PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
2328 if (g_pUVMsHead == pUVM)
2329 g_pUVMsHead = pUVM->pNext;
2330 else
2331 {
2332 PUVM pPrev = g_pUVMsHead;
2333 while (pPrev && pPrev->pNext != pUVM)
2334 pPrev = pPrev->pNext;
2335 AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
2336
2337 pPrev->pNext = pUVM->pNext;
2338 }
2339 pUVM->pNext = NULL;
2340
2341 /*
2342 * Notify registered at destruction listeners.
2343 */
2344 vmR3AtDtor(pVM);
2345
2346 /*
2347 * Call vmR3Destroy on each of the EMTs ending with EMT(0) doing the bulk
2348 * of the cleanup.
2349 */
2350 /* vmR3Destroy on all EMTs, ending with EMT(0). */
2351 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2352 AssertLogRelRC(rc);
2353
2354 /* Wait for EMTs and destroy the UVM. */
2355 vmR3DestroyUVM(pUVM, 30000);
2356
2357 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2358 return VINF_SUCCESS;
2359}
2360
2361
2362/**
2363 * Internal destruction worker.
2364 *
2365 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2366 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2367 * VMR3Destroy().
2368 *
2369 * When called on EMT(0), it will performed the great bulk of the destruction.
2370 * When called on the other EMTs, they will do nothing and the whole purpose is
2371 * to return VINF_EM_TERMINATE so they break out of their run loops.
2372 *
2373 * @returns VINF_EM_TERMINATE.
2374 * @param pVM The VM handle.
2375 */
2376DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2377{
2378 PUVM pUVM = pVM->pUVM;
2379 PVMCPU pVCpu = VMMGetCpu(pVM);
2380 Assert(pVCpu);
2381 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2382
2383 /*
2384 * Only VCPU 0 does the full cleanup (last).
2385 */
2386 if (pVCpu->idCpu == 0)
2387 {
2388 /*
2389 * Dump statistics to the log.
2390 */
2391#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2392 RTLogFlags(NULL, "nodisabled nobuffered");
2393#endif
2394#ifdef VBOX_WITH_STATISTICS
2395 STAMR3Dump(pVM, "*");
2396#else
2397 LogRel(("************************* Statistics *************************\n"));
2398 STAMR3DumpToReleaseLog(pVM, "*");
2399 LogRel(("********************* End of statistics **********************\n"));
2400#endif
2401
2402 /*
2403 * Destroy the VM components.
2404 */
2405 int rc = TMR3Term(pVM);
2406 AssertRC(rc);
2407#ifdef VBOX_WITH_DEBUGGER
2408 rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
2409 pUVM->vm.s.pvDBGC = NULL;
2410#endif
2411 AssertRC(rc);
2412 rc = FTMR3Term(pVM);
2413 AssertRC(rc);
2414 rc = DBGFR3Term(pVM);
2415 AssertRC(rc);
2416 rc = PDMR3Term(pVM);
2417 AssertRC(rc);
2418 rc = IEMR3Term(pVM);
2419 AssertRC(rc);
2420 rc = EMR3Term(pVM);
2421 AssertRC(rc);
2422 rc = IOMR3Term(pVM);
2423 AssertRC(rc);
2424 rc = CSAMR3Term(pVM);
2425 AssertRC(rc);
2426 rc = PATMR3Term(pVM);
2427 AssertRC(rc);
2428 rc = TRPMR3Term(pVM);
2429 AssertRC(rc);
2430 rc = SELMR3Term(pVM);
2431 AssertRC(rc);
2432 rc = REMR3Term(pVM);
2433 AssertRC(rc);
2434 rc = HWACCMR3Term(pVM);
2435 AssertRC(rc);
2436 rc = PGMR3Term(pVM);
2437 AssertRC(rc);
2438 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2439 AssertRC(rc);
2440 rc = CPUMR3Term(pVM);
2441 AssertRC(rc);
2442 SSMR3Term(pVM);
2443 rc = PDMR3CritSectTerm(pVM);
2444 AssertRC(rc);
2445 rc = MMR3Term(pVM);
2446 AssertRC(rc);
2447
2448 /*
2449 * We're done, tell the other EMTs to quit.
2450 */
2451 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2452 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2453 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2454 }
2455 return VINF_EM_TERMINATE;
2456}
2457
2458
2459/**
2460 * Destroys the UVM portion.
2461 *
2462 * This is called as the final step in the VM destruction or as the cleanup
2463 * in case of a creation failure.
2464 *
2465 * @param pVM VM Handle.
2466 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2467 * threads.
2468 */
2469static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2470{
2471 /*
2472 * Signal termination of each the emulation threads and
2473 * wait for them to complete.
2474 */
2475 /* Signal them. */
2476 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2477 if (pUVM->pVM)
2478 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2479 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2480 {
2481 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2482 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2483 }
2484
2485 /* Wait for them. */
2486 uint64_t NanoTS = RTTimeNanoTS();
2487 RTTHREAD hSelf = RTThreadSelf();
2488 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2489 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2490 {
2491 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2492 if ( hThread != NIL_RTTHREAD
2493 && hThread != hSelf)
2494 {
2495 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2496 int rc2 = RTThreadWait(hThread,
2497 cMilliesElapsed < cMilliesEMTWait
2498 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2499 : 2000,
2500 NULL);
2501 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2502 rc2 = RTThreadWait(hThread, 1000, NULL);
2503 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2504 if (RT_SUCCESS(rc2))
2505 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2506 }
2507 }
2508
2509 /* Cleanup the semaphores. */
2510 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2511 {
2512 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2513 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2514 }
2515
2516 /*
2517 * Free the event semaphores associated with the request packets.
2518 */
2519 unsigned cReqs = 0;
2520 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2521 {
2522 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2523 pUVM->vm.s.apReqFree[i] = NULL;
2524 for (; pReq; pReq = pReq->pNext, cReqs++)
2525 {
2526 pReq->enmState = VMREQSTATE_INVALID;
2527 RTSemEventDestroy(pReq->EventSem);
2528 }
2529 }
2530 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2531
2532 /*
2533 * Kill all queued requests. (There really shouldn't be any!)
2534 */
2535 for (unsigned i = 0; i < 10; i++)
2536 {
2537 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pPriorityReqs, NULL, PVMREQ);
2538 if (!pReqHead)
2539 {
2540 pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pNormalReqs, NULL, PVMREQ);
2541 if (!pReqHead)
2542 break;
2543 }
2544 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2545
2546 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2547 {
2548 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_INTERNAL_ERROR);
2549 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2550 RTSemEventSignal(pReq->EventSem);
2551 RTThreadSleep(2);
2552 RTSemEventDestroy(pReq->EventSem);
2553 }
2554 /* give them a chance to respond before we free the request memory. */
2555 RTThreadSleep(32);
2556 }
2557
2558 /*
2559 * Now all queued VCPU requests (again, there shouldn't be any).
2560 */
2561 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2562 {
2563 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2564
2565 for (unsigned i = 0; i < 10; i++)
2566 {
2567 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pPriorityReqs, NULL, PVMREQ);
2568 if (!pReqHead)
2569 {
2570 pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pNormalReqs, NULL, PVMREQ);
2571 if (!pReqHead)
2572 break;
2573 }
2574 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2575
2576 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2577 {
2578 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_INTERNAL_ERROR);
2579 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2580 RTSemEventSignal(pReq->EventSem);
2581 RTThreadSleep(2);
2582 RTSemEventDestroy(pReq->EventSem);
2583 }
2584 /* give them a chance to respond before we free the request memory. */
2585 RTThreadSleep(32);
2586 }
2587 }
2588
2589 /*
2590 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2591 */
2592 PDMR3TermUVM(pUVM);
2593
2594 /*
2595 * Terminate the support library if initialized.
2596 */
2597 if (pUVM->vm.s.pSession)
2598 {
2599 int rc = SUPR3Term(false /*fForced*/);
2600 AssertRC(rc);
2601 pUVM->vm.s.pSession = NIL_RTR0PTR;
2602 }
2603
2604 /*
2605 * Release the UVM structure reference.
2606 */
2607 VMR3ReleaseUVM(pUVM);
2608
2609 /*
2610 * Clean up and flush logs.
2611 */
2612#ifdef LOG_ENABLED
2613 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2614#endif
2615 RTLogFlush(NULL);
2616}
2617
2618
2619/**
2620 * Enumerates the VMs in this process.
2621 *
2622 * @returns Pointer to the next VM.
2623 * @returns NULL when no more VMs.
2624 * @param pVMPrev The previous VM
2625 * Use NULL to start the enumeration.
2626 */
2627VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
2628{
2629 /*
2630 * This is quick and dirty. It has issues with VM being
2631 * destroyed during the enumeration.
2632 */
2633 PUVM pNext;
2634 if (pVMPrev)
2635 pNext = pVMPrev->pUVM->pNext;
2636 else
2637 pNext = g_pUVMsHead;
2638 return pNext ? pNext->pVM : NULL;
2639}
2640
2641
2642/**
2643 * Registers an at VM destruction callback.
2644 *
2645 * @returns VBox status code.
2646 * @param pfnAtDtor Pointer to callback.
2647 * @param pvUser User argument.
2648 */
2649VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
2650{
2651 /*
2652 * Check if already registered.
2653 */
2654 VM_ATDTOR_LOCK();
2655 PVMATDTOR pCur = g_pVMAtDtorHead;
2656 while (pCur)
2657 {
2658 if (pfnAtDtor == pCur->pfnAtDtor)
2659 {
2660 VM_ATDTOR_UNLOCK();
2661 AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
2662 return VERR_INVALID_PARAMETER;
2663 }
2664
2665 /* next */
2666 pCur = pCur->pNext;
2667 }
2668 VM_ATDTOR_UNLOCK();
2669
2670 /*
2671 * Allocate new entry.
2672 */
2673 PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
2674 if (!pVMAtDtor)
2675 return VERR_NO_MEMORY;
2676
2677 VM_ATDTOR_LOCK();
2678 pVMAtDtor->pfnAtDtor = pfnAtDtor;
2679 pVMAtDtor->pvUser = pvUser;
2680 pVMAtDtor->pNext = g_pVMAtDtorHead;
2681 g_pVMAtDtorHead = pVMAtDtor;
2682 VM_ATDTOR_UNLOCK();
2683
2684 return VINF_SUCCESS;
2685}
2686
2687
2688/**
2689 * Deregisters an at VM destruction callback.
2690 *
2691 * @returns VBox status code.
2692 * @param pfnAtDtor Pointer to callback.
2693 */
2694VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
2695{
2696 /*
2697 * Find it, unlink it and free it.
2698 */
2699 VM_ATDTOR_LOCK();
2700 PVMATDTOR pPrev = NULL;
2701 PVMATDTOR pCur = g_pVMAtDtorHead;
2702 while (pCur)
2703 {
2704 if (pfnAtDtor == pCur->pfnAtDtor)
2705 {
2706 if (pPrev)
2707 pPrev->pNext = pCur->pNext;
2708 else
2709 g_pVMAtDtorHead = pCur->pNext;
2710 pCur->pNext = NULL;
2711 VM_ATDTOR_UNLOCK();
2712
2713 RTMemFree(pCur);
2714 return VINF_SUCCESS;
2715 }
2716
2717 /* next */
2718 pPrev = pCur;
2719 pCur = pCur->pNext;
2720 }
2721 VM_ATDTOR_UNLOCK();
2722
2723 return VERR_INVALID_PARAMETER;
2724}
2725
2726
2727/**
2728 * Walks the list of at VM destructor callbacks.
2729 * @param pVM The VM which is about to be destroyed.
2730 */
2731static void vmR3AtDtor(PVM pVM)
2732{
2733 /*
2734 * Find it, unlink it and free it.
2735 */
2736 VM_ATDTOR_LOCK();
2737 for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
2738 pCur->pfnAtDtor(pVM, pCur->pvUser);
2739 VM_ATDTOR_UNLOCK();
2740}
2741
2742
2743/**
2744 * Worker which checks integrity of some internal structures.
2745 * This is yet another attempt to track down that AVL tree crash.
2746 */
2747static void vmR3CheckIntegrity(PVM pVM)
2748{
2749#ifdef VBOX_STRICT
2750 int rc = PGMR3CheckIntegrity(pVM);
2751 AssertReleaseRC(rc);
2752#endif
2753}
2754
2755
2756/**
2757 * EMT rendezvous worker for VMR3Reset.
2758 *
2759 * This is called by the emulation threads as a response to the reset request
2760 * issued by VMR3Reset().
2761 *
2762 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2763 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2764 *
2765 * @param pVM The VM handle.
2766 * @param pVCpu The VMCPU handle of the EMT.
2767 * @param pvUser Ignored.
2768 */
2769static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2770{
2771 Assert(!pvUser); NOREF(pvUser);
2772
2773 /*
2774 * The first EMT will try change the state to resetting. If this fails,
2775 * we won't get called for the other EMTs.
2776 */
2777 if (pVCpu->idCpu == pVM->cCpus - 1)
2778 {
2779 int rc = vmR3TrySetState(pVM, "VMR3Reset", 3,
2780 VMSTATE_RESETTING, VMSTATE_RUNNING,
2781 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2782 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2783 if (RT_FAILURE(rc))
2784 return rc;
2785 }
2786
2787 /*
2788 * Check the state.
2789 */
2790 VMSTATE enmVMState = VMR3GetState(pVM);
2791 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2792 || enmVMState == VMSTATE_RESETTING_LS,
2793 ("%s\n", VMR3GetStateName(enmVMState)),
2794 VERR_INTERNAL_ERROR_4);
2795
2796 /*
2797 * EMT(0) does the full cleanup *after* all the other EMTs has been
2798 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2799 *
2800 * Because there are per-cpu reset routines and order may/is important,
2801 * the following sequence looks a bit ugly...
2802 */
2803 if (pVCpu->idCpu == 0)
2804 vmR3CheckIntegrity(pVM);
2805
2806 /* Reset the VCpu state. */
2807 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2808
2809 /* Clear all pending forced actions. */
2810 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2811
2812 /*
2813 * Reset the VM components.
2814 */
2815 if (pVCpu->idCpu == 0)
2816 {
2817 PATMR3Reset(pVM);
2818 CSAMR3Reset(pVM);
2819 PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
2820 * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
2821/** @todo PGMR3Reset should be called after PDMR3Reset really, because we'll trash OS <-> hardware
2822 * communication structures residing in RAM when done in the other order. I.e. the device must be
2823 * quiesced first, then we clear the memory and plan tables. Probably have to make these things
2824 * explicit in some way, some memory setup pass or something.
2825 * (Example: DevAHCI may assert if memory is zeroed before it has read the FIS.)
2826 *
2827 * @bugref{4467}
2828 */
2829 MMR3Reset(pVM);
2830 PDMR3Reset(pVM);
2831 SELMR3Reset(pVM);
2832 TRPMR3Reset(pVM);
2833 REMR3Reset(pVM);
2834 IOMR3Reset(pVM);
2835 CPUMR3Reset(pVM);
2836 }
2837 CPUMR3ResetCpu(pVCpu);
2838 if (pVCpu->idCpu == 0)
2839 {
2840 TMR3Reset(pVM);
2841 EMR3Reset(pVM);
2842 HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2843
2844#ifdef LOG_ENABLED
2845 /*
2846 * Debug logging.
2847 */
2848 RTLogPrintf("\n\nThe VM was reset:\n");
2849 DBGFR3Info(pVM, "cpum", "verbose", NULL);
2850#endif
2851
2852 /*
2853 * Since EMT(0) is the last to go thru here, it will advance the state.
2854 * When a live save is active, we will move on to SuspendingLS but
2855 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2856 */
2857 PUVM pUVM = pVM->pUVM;
2858 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2859 enmVMState = pVM->enmVMState;
2860 if (enmVMState == VMSTATE_RESETTING)
2861 {
2862 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2863 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING);
2864 else
2865 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING);
2866 }
2867 else
2868 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS);
2869 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2870
2871 vmR3CheckIntegrity(pVM);
2872
2873 /*
2874 * Do the suspend bit as well.
2875 * It only requires some EMT(0) work at present.
2876 */
2877 if (enmVMState != VMSTATE_RESETTING)
2878 {
2879 vmR3SuspendDoWork(pVM);
2880 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2881 }
2882 }
2883
2884 return enmVMState == VMSTATE_RESETTING
2885 ? VINF_EM_RESET
2886 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2887}
2888
2889
2890/**
2891 * Reset the current VM.
2892 *
2893 * @returns VBox status code.
2894 * @param pVM VM to reset.
2895 */
2896VMMR3DECL(int) VMR3Reset(PVM pVM)
2897{
2898 LogFlow(("VMR3Reset:\n"));
2899 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2900
2901 /*
2902 * Gather all the EMTs to make sure there are no races before
2903 * changing the VM state.
2904 */
2905 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2906 vmR3Reset, NULL);
2907 LogFlow(("VMR3Reset: returns %Rrc\n", rc));
2908 return rc;
2909}
2910
2911
2912/**
2913 * Gets the user mode VM structure pointer given the VM handle.
2914 *
2915 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
2916 * invalid (asserted).
2917 * @param pVM The VM handle.
2918 * @sa VMR3GetVM, VMR3RetainUVM
2919 */
2920VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
2921{
2922 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
2923 return pVM->pUVM;
2924}
2925
2926
2927/**
2928 * Gets the shared VM structure pointer given the pointer to the user mode VM
2929 * structure.
2930 *
2931 * @returns Pointer to the shared VM structure.
2932 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
2933 * is currently associated with it.
2934 * @param pUVM The user mode VM handle.
2935 * @sa VMR3GetUVM
2936 */
2937VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
2938{
2939 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2940 return pUVM->pVM;
2941}
2942
2943
2944/**
2945 * Retain the user mode VM handle.
2946 *
2947 * @returns Reference count.
2948 * UINT32_MAX if @a pUVM is invalid.
2949 *
2950 * @param pUVM The user mode VM handle.
2951 * @sa VMR3ReleaseUVM
2952 */
2953VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
2954{
2955 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2956 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
2957 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
2958 return cRefs;
2959}
2960
2961
2962/**
2963 * Does the final release of the UVM structure.
2964 *
2965 * @param pUVM The user mode VM handle.
2966 */
2967static void vmR3DoReleaseUVM(PUVM pUVM)
2968{
2969 /*
2970 * Free the UVM.
2971 */
2972 Assert(!pUVM->pVM);
2973
2974 MMR3TermUVM(pUVM);
2975 STAMR3TermUVM(pUVM);
2976
2977 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2978 RTTlsFree(pUVM->vm.s.idxTLS);
2979 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
2980}
2981
2982
2983/**
2984 * Releases a refernece to the mode VM handle.
2985 *
2986 * @returns The new reference count, 0 if destroyed.
2987 * UINT32_MAX if @a pUVM is invalid.
2988 *
2989 * @param pUVM The user mode VM handle.
2990 * @sa VMR3RetainUVM
2991 */
2992VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
2993{
2994 if (!pUVM)
2995 return 0;
2996 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2997 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
2998 if (!cRefs)
2999 vmR3DoReleaseUVM(pUVM);
3000 else
3001 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
3002 return cRefs;
3003}
3004
3005
3006/**
3007 * Gets the VM name.
3008 *
3009 * @returns Pointer to a read-only string containing the name. NULL if called
3010 * too early.
3011 * @param pUVM The user mode VM handle.
3012 */
3013VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
3014{
3015 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3016 return pUVM->vm.s.pszName;
3017}
3018
3019
3020/**
3021 * Gets the VM UUID.
3022 *
3023 * @returns pUuid on success, NULL on failure.
3024 * @param pUVM The user mode VM handle.
3025 * @param pUuid Where to store the UUID.
3026 */
3027VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
3028{
3029 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3030 AssertPtrReturn(pUuid, NULL);
3031
3032 *pUuid = pUVM->vm.s.Uuid;
3033 return pUuid;
3034}
3035
3036
3037/**
3038 * Gets the current VM state.
3039 *
3040 * @returns The current VM state.
3041 * @param pVM VM handle.
3042 * @thread Any
3043 */
3044VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
3045{
3046 VM_ASSERT_VALID_EXT_RETURN(pVM, VMSTATE_TERMINATED);
3047 return pVM->enmVMState;
3048}
3049
3050
3051/**
3052 * Gets the current VM state.
3053 *
3054 * @returns The current VM state.
3055 * @param pUVM The user-mode VM handle.
3056 * @thread Any
3057 */
3058VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
3059{
3060 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
3061 if (RT_UNLIKELY(!pUVM->pVM))
3062 return VMSTATE_TERMINATED;
3063 return pUVM->pVM->enmVMState;
3064}
3065
3066
3067/**
3068 * Gets the state name string for a VM state.
3069 *
3070 * @returns Pointer to the state name. (readonly)
3071 * @param enmState The state.
3072 */
3073VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
3074{
3075 switch (enmState)
3076 {
3077 case VMSTATE_CREATING: return "CREATING";
3078 case VMSTATE_CREATED: return "CREATED";
3079 case VMSTATE_LOADING: return "LOADING";
3080 case VMSTATE_POWERING_ON: return "POWERING_ON";
3081 case VMSTATE_RESUMING: return "RESUMING";
3082 case VMSTATE_RUNNING: return "RUNNING";
3083 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
3084 case VMSTATE_RUNNING_FT: return "RUNNING_FT";
3085 case VMSTATE_RESETTING: return "RESETTING";
3086 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
3087 case VMSTATE_SUSPENDED: return "SUSPENDED";
3088 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
3089 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
3090 case VMSTATE_SUSPENDING: return "SUSPENDING";
3091 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
3092 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
3093 case VMSTATE_SAVING: return "SAVING";
3094 case VMSTATE_DEBUGGING: return "DEBUGGING";
3095 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
3096 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
3097 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
3098 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
3099 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
3100 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
3101 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
3102 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
3103 case VMSTATE_OFF: return "OFF";
3104 case VMSTATE_OFF_LS: return "OFF_LS";
3105 case VMSTATE_DESTROYING: return "DESTROYING";
3106 case VMSTATE_TERMINATED: return "TERMINATED";
3107
3108 default:
3109 AssertMsgFailed(("Unknown state %d\n", enmState));
3110 return "Unknown!\n";
3111 }
3112}
3113
3114
3115/**
3116 * Validates the state transition in strict builds.
3117 *
3118 * @returns true if valid, false if not.
3119 *
3120 * @param enmStateOld The old (current) state.
3121 * @param enmStateNew The proposed new state.
3122 *
3123 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
3124 * diagram (under State Machine Diagram).
3125 */
3126static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
3127{
3128#ifdef VBOX_STRICT
3129 switch (enmStateOld)
3130 {
3131 case VMSTATE_CREATING:
3132 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3133 break;
3134
3135 case VMSTATE_CREATED:
3136 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
3137 || enmStateNew == VMSTATE_POWERING_ON
3138 || enmStateNew == VMSTATE_POWERING_OFF
3139 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3140 break;
3141
3142 case VMSTATE_LOADING:
3143 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3144 || enmStateNew == VMSTATE_LOAD_FAILURE
3145 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3146 break;
3147
3148 case VMSTATE_POWERING_ON:
3149 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3150 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3151 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3152 break;
3153
3154 case VMSTATE_RESUMING:
3155 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3156 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3157 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3158 break;
3159
3160 case VMSTATE_RUNNING:
3161 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3162 || enmStateNew == VMSTATE_SUSPENDING
3163 || enmStateNew == VMSTATE_RESETTING
3164 || enmStateNew == VMSTATE_RUNNING_LS
3165 || enmStateNew == VMSTATE_RUNNING_FT
3166 || enmStateNew == VMSTATE_DEBUGGING
3167 || enmStateNew == VMSTATE_FATAL_ERROR
3168 || enmStateNew == VMSTATE_GURU_MEDITATION
3169 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3170 break;
3171
3172 case VMSTATE_RUNNING_LS:
3173 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3174 || enmStateNew == VMSTATE_SUSPENDING_LS
3175 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3176 || enmStateNew == VMSTATE_RESETTING_LS
3177 || enmStateNew == VMSTATE_RUNNING
3178 || enmStateNew == VMSTATE_DEBUGGING_LS
3179 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3180 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3181 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3182 break;
3183
3184 case VMSTATE_RUNNING_FT:
3185 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3186 || enmStateNew == VMSTATE_FATAL_ERROR
3187 || enmStateNew == VMSTATE_GURU_MEDITATION
3188 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3189 break;
3190
3191 case VMSTATE_RESETTING:
3192 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3193 break;
3194
3195 case VMSTATE_RESETTING_LS:
3196 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3197 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3198 break;
3199
3200 case VMSTATE_SUSPENDING:
3201 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3202 break;
3203
3204 case VMSTATE_SUSPENDING_LS:
3205 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3206 || enmStateNew == VMSTATE_SUSPENDED_LS
3207 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3208 break;
3209
3210 case VMSTATE_SUSPENDING_EXT_LS:
3211 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3212 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3213 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3214 break;
3215
3216 case VMSTATE_SUSPENDED:
3217 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3218 || enmStateNew == VMSTATE_SAVING
3219 || enmStateNew == VMSTATE_RESETTING
3220 || enmStateNew == VMSTATE_RESUMING
3221 || enmStateNew == VMSTATE_LOADING
3222 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3223 break;
3224
3225 case VMSTATE_SUSPENDED_LS:
3226 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3227 || enmStateNew == VMSTATE_SAVING
3228 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3229 break;
3230
3231 case VMSTATE_SUSPENDED_EXT_LS:
3232 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3233 || enmStateNew == VMSTATE_SAVING
3234 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3235 break;
3236
3237 case VMSTATE_SAVING:
3238 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3239 break;
3240
3241 case VMSTATE_DEBUGGING:
3242 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3243 || enmStateNew == VMSTATE_POWERING_OFF
3244 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3245 break;
3246
3247 case VMSTATE_DEBUGGING_LS:
3248 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3249 || enmStateNew == VMSTATE_RUNNING_LS
3250 || enmStateNew == VMSTATE_POWERING_OFF_LS
3251 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3252 break;
3253
3254 case VMSTATE_POWERING_OFF:
3255 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3256 break;
3257
3258 case VMSTATE_POWERING_OFF_LS:
3259 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3260 || enmStateNew == VMSTATE_OFF_LS
3261 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3262 break;
3263
3264 case VMSTATE_OFF:
3265 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3266 break;
3267
3268 case VMSTATE_OFF_LS:
3269 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3270 break;
3271
3272 case VMSTATE_FATAL_ERROR:
3273 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3274 break;
3275
3276 case VMSTATE_FATAL_ERROR_LS:
3277 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3278 || enmStateNew == VMSTATE_POWERING_OFF_LS
3279 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3280 break;
3281
3282 case VMSTATE_GURU_MEDITATION:
3283 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3284 || enmStateNew == VMSTATE_POWERING_OFF
3285 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3286 break;
3287
3288 case VMSTATE_GURU_MEDITATION_LS:
3289 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3290 || enmStateNew == VMSTATE_DEBUGGING_LS
3291 || enmStateNew == VMSTATE_POWERING_OFF_LS
3292 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3293 break;
3294
3295 case VMSTATE_LOAD_FAILURE:
3296 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3297 break;
3298
3299 case VMSTATE_DESTROYING:
3300 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3301 break;
3302
3303 case VMSTATE_TERMINATED:
3304 default:
3305 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3306 break;
3307 }
3308#endif /* VBOX_STRICT */
3309 return true;
3310}
3311
3312
3313/**
3314 * Does the state change callouts.
3315 *
3316 * The caller owns the AtStateCritSect.
3317 *
3318 * @param pVM The VM handle.
3319 * @param pUVM The UVM handle.
3320 * @param enmStateNew The New state.
3321 * @param enmStateOld The old state.
3322 */
3323static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3324{
3325 LogRel(("Changing the VM state from '%s' to '%s'.\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3326
3327 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3328 {
3329 pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
3330 if ( enmStateNew != VMSTATE_DESTROYING
3331 && pVM->enmVMState == VMSTATE_DESTROYING)
3332 break;
3333 AssertMsg(pVM->enmVMState == enmStateNew,
3334 ("You are not allowed to change the state while in the change callback, except "
3335 "from destroying the VM. There are restrictions in the way the state changes "
3336 "are propagated up to the EM execution loop and it makes the program flow very "
3337 "difficult to follow. (%s, expected %s, old %s)\n",
3338 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3339 VMR3GetStateName(enmStateOld)));
3340 }
3341}
3342
3343
3344/**
3345 * Sets the current VM state, with the AtStatCritSect already entered.
3346 *
3347 * @param pVM The VM handle.
3348 * @param pUVM The UVM handle.
3349 * @param enmStateNew The new state.
3350 * @param enmStateOld The old state.
3351 */
3352static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3353{
3354 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3355
3356 AssertMsg(pVM->enmVMState == enmStateOld,
3357 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3358 pUVM->vm.s.enmPrevVMState = enmStateOld;
3359 pVM->enmVMState = enmStateNew;
3360 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3361
3362 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3363}
3364
3365
3366/**
3367 * Sets the current VM state.
3368 *
3369 * @param pVM VM handle.
3370 * @param enmStateNew The new state.
3371 * @param enmStateOld The old state (for asserting only).
3372 */
3373static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3374{
3375 PUVM pUVM = pVM->pUVM;
3376 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3377
3378 AssertMsg(pVM->enmVMState == enmStateOld,
3379 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3380 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState);
3381
3382 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3383}
3384
3385
3386/**
3387 * Tries to perform a state transition.
3388 *
3389 * @returns The 1-based ordinal of the succeeding transition.
3390 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3391 *
3392 * @param pVM The VM handle.
3393 * @param pszWho Who is trying to change it.
3394 * @param cTransitions The number of transitions in the ellipsis.
3395 * @param ... Transition pairs; new, old.
3396 */
3397static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3398{
3399 va_list va;
3400 VMSTATE enmStateNew = VMSTATE_CREATED;
3401 VMSTATE enmStateOld = VMSTATE_CREATED;
3402
3403#ifdef VBOX_STRICT
3404 /*
3405 * Validate the input first.
3406 */
3407 va_start(va, cTransitions);
3408 for (unsigned i = 0; i < cTransitions; i++)
3409 {
3410 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3411 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3412 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3413 }
3414 va_end(va);
3415#endif
3416
3417 /*
3418 * Grab the lock and see if any of the proposed transitions works out.
3419 */
3420 va_start(va, cTransitions);
3421 int rc = VERR_VM_INVALID_VM_STATE;
3422 PUVM pUVM = pVM->pUVM;
3423 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3424
3425 VMSTATE enmStateCur = pVM->enmVMState;
3426
3427 for (unsigned i = 0; i < cTransitions; i++)
3428 {
3429 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3430 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3431 if (enmStateCur == enmStateOld)
3432 {
3433 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld);
3434 rc = i + 1;
3435 break;
3436 }
3437 }
3438
3439 if (RT_FAILURE(rc))
3440 {
3441 /*
3442 * Complain about it.
3443 */
3444 if (cTransitions == 1)
3445 {
3446 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3447 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3448 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3449 N_("%s failed because the VM state is %s instead of %s"),
3450 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3451 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3452 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3453 }
3454 else
3455 {
3456 va_end(va);
3457 va_start(va, cTransitions);
3458 LogRel(("%s:\n", pszWho));
3459 for (unsigned i = 0; i < cTransitions; i++)
3460 {
3461 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3462 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3463 LogRel(("%s%s -> %s",
3464 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3465 }
3466 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3467 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3468 N_("%s failed because the current VM state, %s, was not found in the state transition table"),
3469 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3470 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3471 pszWho, VMR3GetStateName(enmStateCur)));
3472 }
3473 }
3474
3475 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3476 va_end(va);
3477 Assert(rc > 0 || rc < 0);
3478 return rc;
3479}
3480
3481
3482/**
3483 * Flag a guru meditation ... a hack.
3484 *
3485 * @param pVM The VM handle
3486 *
3487 * @todo Rewrite this part. The guru meditation should be flagged
3488 * immediately by the VMM and not by VMEmt.cpp when it's all over.
3489 */
3490void vmR3SetGuruMeditation(PVM pVM)
3491{
3492 PUVM pUVM = pVM->pUVM;
3493 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3494
3495 VMSTATE enmStateCur = pVM->enmVMState;
3496 if (enmStateCur == VMSTATE_RUNNING)
3497 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING);
3498 else if (enmStateCur == VMSTATE_RUNNING_LS)
3499 {
3500 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS);
3501 SSMR3Cancel(pVM);
3502 }
3503
3504 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3505}
3506
3507
3508/**
3509 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3510 *
3511 * @param pVM The VM handle.
3512 */
3513void vmR3SetTerminated(PVM pVM)
3514{
3515 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3516}
3517
3518
3519/**
3520 * Checks if the VM was teleported and hasn't been fully resumed yet.
3521 *
3522 * This applies to both sides of the teleportation since we may leave a working
3523 * clone behind and the user is allowed to resume this...
3524 *
3525 * @returns true / false.
3526 * @param pVM The VM handle.
3527 * @thread Any thread.
3528 */
3529VMMR3DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3530{
3531 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3532 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3533}
3534
3535
3536/**
3537 * Registers a VM state change callback.
3538 *
3539 * You are not allowed to call any function which changes the VM state from a
3540 * state callback.
3541 *
3542 * @returns VBox status code.
3543 * @param pVM VM handle.
3544 * @param pfnAtState Pointer to callback.
3545 * @param pvUser User argument.
3546 * @thread Any.
3547 */
3548VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3549{
3550 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3551
3552 /*
3553 * Validate input.
3554 */
3555 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3556 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3557
3558 /*
3559 * Allocate a new record.
3560 */
3561 PUVM pUVM = pVM->pUVM;
3562 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3563 if (!pNew)
3564 return VERR_NO_MEMORY;
3565
3566 /* fill */
3567 pNew->pfnAtState = pfnAtState;
3568 pNew->pvUser = pvUser;
3569
3570 /* insert */
3571 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3572 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3573 *pUVM->vm.s.ppAtStateNext = pNew;
3574 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3575 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3576
3577 return VINF_SUCCESS;
3578}
3579
3580
3581/**
3582 * Deregisters a VM state change callback.
3583 *
3584 * @returns VBox status code.
3585 * @param pVM VM handle.
3586 * @param pfnAtState Pointer to callback.
3587 * @param pvUser User argument.
3588 * @thread Any.
3589 */
3590VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3591{
3592 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3593
3594 /*
3595 * Validate input.
3596 */
3597 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3598 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3599
3600 PUVM pUVM = pVM->pUVM;
3601 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3602
3603 /*
3604 * Search the list for the entry.
3605 */
3606 PVMATSTATE pPrev = NULL;
3607 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3608 while ( pCur
3609 && ( pCur->pfnAtState != pfnAtState
3610 || pCur->pvUser != pvUser))
3611 {
3612 pPrev = pCur;
3613 pCur = pCur->pNext;
3614 }
3615 if (!pCur)
3616 {
3617 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3618 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3619 return VERR_FILE_NOT_FOUND;
3620 }
3621
3622 /*
3623 * Unlink it.
3624 */
3625 if (pPrev)
3626 {
3627 pPrev->pNext = pCur->pNext;
3628 if (!pCur->pNext)
3629 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3630 }
3631 else
3632 {
3633 pUVM->vm.s.pAtState = pCur->pNext;
3634 if (!pCur->pNext)
3635 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3636 }
3637
3638 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3639
3640 /*
3641 * Free it.
3642 */
3643 pCur->pfnAtState = NULL;
3644 pCur->pNext = NULL;
3645 MMR3HeapFree(pCur);
3646
3647 return VINF_SUCCESS;
3648}
3649
3650
3651/**
3652 * Registers a VM error callback.
3653 *
3654 * @returns VBox status code.
3655 * @param pVM The VM handle.
3656 * @param pfnAtError Pointer to callback.
3657 * @param pvUser User argument.
3658 * @thread Any.
3659 */
3660VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3661{
3662 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3663 return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
3664}
3665
3666
3667/**
3668 * Registers a VM error callback.
3669 *
3670 * @returns VBox status code.
3671 * @param pUVM The VM handle.
3672 * @param pfnAtError Pointer to callback.
3673 * @param pvUser User argument.
3674 * @thread Any.
3675 */
3676VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3677{
3678 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3679
3680 /*
3681 * Validate input.
3682 */
3683 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3684 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3685
3686 /*
3687 * Allocate a new record.
3688 */
3689 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3690 if (!pNew)
3691 return VERR_NO_MEMORY;
3692
3693 /* fill */
3694 pNew->pfnAtError = pfnAtError;
3695 pNew->pvUser = pvUser;
3696
3697 /* insert */
3698 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3699 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3700 *pUVM->vm.s.ppAtErrorNext = pNew;
3701 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3702 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3703
3704 return VINF_SUCCESS;
3705}
3706
3707
3708/**
3709 * Deregisters a VM error callback.
3710 *
3711 * @returns VBox status code.
3712 * @param pVM The VM handle.
3713 * @param pfnAtError Pointer to callback.
3714 * @param pvUser User argument.
3715 * @thread Any.
3716 */
3717VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3718{
3719 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3720
3721 /*
3722 * Validate input.
3723 */
3724 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3725 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3726
3727 PUVM pUVM = pVM->pUVM;
3728 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3729
3730 /*
3731 * Search the list for the entry.
3732 */
3733 PVMATERROR pPrev = NULL;
3734 PVMATERROR pCur = pUVM->vm.s.pAtError;
3735 while ( pCur
3736 && ( pCur->pfnAtError != pfnAtError
3737 || pCur->pvUser != pvUser))
3738 {
3739 pPrev = pCur;
3740 pCur = pCur->pNext;
3741 }
3742 if (!pCur)
3743 {
3744 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3745 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3746 return VERR_FILE_NOT_FOUND;
3747 }
3748
3749 /*
3750 * Unlink it.
3751 */
3752 if (pPrev)
3753 {
3754 pPrev->pNext = pCur->pNext;
3755 if (!pCur->pNext)
3756 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3757 }
3758 else
3759 {
3760 pUVM->vm.s.pAtError = pCur->pNext;
3761 if (!pCur->pNext)
3762 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3763 }
3764
3765 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3766
3767 /*
3768 * Free it.
3769 */
3770 pCur->pfnAtError = NULL;
3771 pCur->pNext = NULL;
3772 MMR3HeapFree(pCur);
3773
3774 return VINF_SUCCESS;
3775}
3776
3777
3778/**
3779 * Ellipsis to va_list wrapper for calling pfnAtError.
3780 */
3781static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3782{
3783 va_list va;
3784 va_start(va, pszFormat);
3785 pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3786 va_end(va);
3787}
3788
3789
3790/**
3791 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3792 * The message is found in VMINT.
3793 *
3794 * @param pVM The VM handle.
3795 * @thread EMT.
3796 */
3797VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
3798{
3799 VM_ASSERT_EMT(pVM);
3800 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Contracts!\n"));
3801
3802 /*
3803 * Unpack the error (if we managed to format one).
3804 */
3805 PVMERROR pErr = pVM->vm.s.pErrorR3;
3806 const char *pszFile = NULL;
3807 const char *pszFunction = NULL;
3808 uint32_t iLine = 0;
3809 const char *pszMessage;
3810 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3811 if (pErr)
3812 {
3813 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3814 if (pErr->offFile)
3815 pszFile = (const char *)pErr + pErr->offFile;
3816 iLine = pErr->iLine;
3817 if (pErr->offFunction)
3818 pszFunction = (const char *)pErr + pErr->offFunction;
3819 if (pErr->offMessage)
3820 pszMessage = (const char *)pErr + pErr->offMessage;
3821 else
3822 pszMessage = "No message!";
3823 }
3824 else
3825 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3826
3827 /*
3828 * Call the at error callbacks.
3829 */
3830 PUVM pUVM = pVM->pUVM;
3831 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3832 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3833 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3834 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3835 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3836}
3837
3838
3839/**
3840 * Gets the number of errors raised via VMSetError.
3841 *
3842 * This can be used avoid double error messages.
3843 *
3844 * @returns The error count.
3845 * @param pVM The VM handle.
3846 */
3847VMMR3DECL(uint32_t) VMR3GetErrorCount(PVM pVM)
3848{
3849 AssertPtrReturn(pVM, 0);
3850 return VMR3GetErrorCountU(pVM->pUVM);
3851}
3852
3853
3854/**
3855 * Gets the number of errors raised via VMSetError.
3856 *
3857 * This can be used avoid double error messages.
3858 *
3859 * @returns The error count.
3860 * @param pVM The VM handle.
3861 */
3862VMMR3DECL(uint32_t) VMR3GetErrorCountU(PUVM pUVM)
3863{
3864 AssertPtrReturn(pUVM, 0);
3865 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3866 return pUVM->vm.s.cErrors;
3867}
3868
3869
3870/**
3871 * Creation time wrapper for vmR3SetErrorUV.
3872 *
3873 * @returns rc.
3874 * @param pUVM Pointer to the user mode VM structure.
3875 * @param rc The VBox status code.
3876 * @param RT_SRC_POS_DECL The source position of this error.
3877 * @param pszFormat Format string.
3878 * @param ... The arguments.
3879 * @thread Any thread.
3880 */
3881static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3882{
3883 va_list va;
3884 va_start(va, pszFormat);
3885 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3886 va_end(va);
3887 return rc;
3888}
3889
3890
3891/**
3892 * Worker which calls everyone listening to the VM error messages.
3893 *
3894 * @param pUVM Pointer to the user mode VM structure.
3895 * @param rc The VBox status code.
3896 * @param RT_SRC_POS_DECL The source position of this error.
3897 * @param pszFormat Format string.
3898 * @param pArgs Pointer to the format arguments.
3899 * @thread EMT
3900 */
3901DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3902{
3903 /*
3904 * Log the error.
3905 */
3906 va_list va3;
3907 va_copy(va3, *pArgs);
3908 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3909 "VMSetError: %N\n",
3910 pszFile, iLine, pszFunction, rc,
3911 pszFormat, &va3);
3912 va_end(va3);
3913
3914#ifdef LOG_ENABLED
3915 va_copy(va3, *pArgs);
3916 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3917 "%N\n",
3918 pszFile, iLine, pszFunction, rc,
3919 pszFormat, &va3);
3920 va_end(va3);
3921#endif
3922
3923 /*
3924 * Make a copy of the message.
3925 */
3926 if (pUVM->pVM)
3927 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3928
3929 /*
3930 * Call the at error callbacks.
3931 */
3932 bool fCalledSomeone = false;
3933 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3934 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3935 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3936 {
3937 va_list va2;
3938 va_copy(va2, *pArgs);
3939 pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3940 va_end(va2);
3941 fCalledSomeone = true;
3942 }
3943 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3944}
3945
3946
3947/**
3948 * Registers a VM runtime error callback.
3949 *
3950 * @returns VBox status code.
3951 * @param pVM The VM handle.
3952 * @param pfnAtRuntimeError Pointer to callback.
3953 * @param pvUser User argument.
3954 * @thread Any.
3955 */
3956VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3957{
3958 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3959
3960 /*
3961 * Validate input.
3962 */
3963 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3964 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3965
3966 /*
3967 * Allocate a new record.
3968 */
3969 PUVM pUVM = pVM->pUVM;
3970 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3971 if (!pNew)
3972 return VERR_NO_MEMORY;
3973
3974 /* fill */
3975 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3976 pNew->pvUser = pvUser;
3977
3978 /* insert */
3979 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3980 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3981 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3982 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3983 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3984
3985 return VINF_SUCCESS;
3986}
3987
3988
3989/**
3990 * Deregisters a VM runtime error callback.
3991 *
3992 * @returns VBox status code.
3993 * @param pVM The VM handle.
3994 * @param pfnAtRuntimeError Pointer to callback.
3995 * @param pvUser User argument.
3996 * @thread Any.
3997 */
3998VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3999{
4000 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
4001
4002 /*
4003 * Validate input.
4004 */
4005 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
4006 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4007
4008 PUVM pUVM = pVM->pUVM;
4009 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4010
4011 /*
4012 * Search the list for the entry.
4013 */
4014 PVMATRUNTIMEERROR pPrev = NULL;
4015 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
4016 while ( pCur
4017 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
4018 || pCur->pvUser != pvUser))
4019 {
4020 pPrev = pCur;
4021 pCur = pCur->pNext;
4022 }
4023 if (!pCur)
4024 {
4025 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
4026 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4027 return VERR_FILE_NOT_FOUND;
4028 }
4029
4030 /*
4031 * Unlink it.
4032 */
4033 if (pPrev)
4034 {
4035 pPrev->pNext = pCur->pNext;
4036 if (!pCur->pNext)
4037 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
4038 }
4039 else
4040 {
4041 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
4042 if (!pCur->pNext)
4043 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
4044 }
4045
4046 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4047
4048 /*
4049 * Free it.
4050 */
4051 pCur->pfnAtRuntimeError = NULL;
4052 pCur->pNext = NULL;
4053 MMR3HeapFree(pCur);
4054
4055 return VINF_SUCCESS;
4056}
4057
4058
4059/**
4060 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
4061 * the state to FatalError(LS).
4062 *
4063 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
4064 * return code, see FNVMMEMTRENDEZVOUS.)
4065 *
4066 * @param pVM The VM handle.
4067 * @param pVCpu The VMCPU handle of the EMT.
4068 * @param pvUser Ignored.
4069 */
4070static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
4071{
4072 NOREF(pVCpu);
4073 Assert(!pvUser); NOREF(pvUser);
4074
4075 /*
4076 * The first EMT thru here changes the state.
4077 */
4078 if (pVCpu->idCpu == pVM->cCpus - 1)
4079 {
4080 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
4081 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
4082 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
4083 if (RT_FAILURE(rc))
4084 return rc;
4085 if (rc == 2)
4086 SSMR3Cancel(pVM);
4087
4088 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
4089 }
4090
4091 /* This'll make sure we get out of whereever we are (e.g. REM). */
4092 return VINF_EM_SUSPEND;
4093}
4094
4095
4096/**
4097 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
4098 *
4099 * This does the common parts after the error has been saved / retrieved.
4100 *
4101 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4102 *
4103 * @param pVM The VM handle.
4104 * @param fFlags The error flags.
4105 * @param pszErrorId Error ID string.
4106 * @param pszFormat Format string.
4107 * @param pVa Pointer to the format arguments.
4108 */
4109static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4110{
4111 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
4112
4113 /*
4114 * Take actions before the call.
4115 */
4116 int rc;
4117 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4118 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4119 vmR3SetRuntimeErrorChangeState, NULL);
4120 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4121 rc = VMR3Suspend(pVM);
4122 else
4123 rc = VINF_SUCCESS;
4124
4125 /*
4126 * Do the callback round.
4127 */
4128 PUVM pUVM = pVM->pUVM;
4129 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4130 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4131 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4132 {
4133 va_list va;
4134 va_copy(va, *pVa);
4135 pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4136 va_end(va);
4137 }
4138 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4139
4140 return rc;
4141}
4142
4143
4144/**
4145 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4146 */
4147static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4148{
4149 va_list va;
4150 va_start(va, pszFormat);
4151 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4152 va_end(va);
4153 return rc;
4154}
4155
4156
4157/**
4158 * This is a worker function for RC and Ring-0 calls to VMSetError and
4159 * VMSetErrorV.
4160 *
4161 * The message is found in VMINT.
4162 *
4163 * @returns VBox status code, see VMSetRuntimeError.
4164 * @param pVM The VM handle.
4165 * @thread EMT.
4166 */
4167VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4168{
4169 VM_ASSERT_EMT(pVM);
4170 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4171
4172 /*
4173 * Unpack the error (if we managed to format one).
4174 */
4175 const char *pszErrorId = "SetRuntimeError";
4176 const char *pszMessage = "No message!";
4177 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4178 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4179 if (pErr)
4180 {
4181 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4182 if (pErr->offErrorId)
4183 pszErrorId = (const char *)pErr + pErr->offErrorId;
4184 if (pErr->offMessage)
4185 pszMessage = (const char *)pErr + pErr->offMessage;
4186 fFlags = pErr->fFlags;
4187 }
4188
4189 /*
4190 * Join cause with vmR3SetRuntimeErrorV.
4191 */
4192 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4193}
4194
4195
4196/**
4197 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4198 *
4199 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4200 *
4201 * @param pVM The VM handle.
4202 * @param fFlags The error flags.
4203 * @param pszErrorId Error ID string.
4204 * @param pszMessage The error message residing the MM heap.
4205 *
4206 * @thread EMT
4207 */
4208DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4209{
4210#if 0 /** @todo make copy of the error msg. */
4211 /*
4212 * Make a copy of the message.
4213 */
4214 va_list va2;
4215 va_copy(va2, *pVa);
4216 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4217 va_end(va2);
4218#endif
4219
4220 /*
4221 * Join paths with VMR3SetRuntimeErrorWorker.
4222 */
4223 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4224 MMR3HeapFree(pszMessage);
4225 return rc;
4226}
4227
4228
4229/**
4230 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4231 *
4232 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4233 *
4234 * @param pVM The VM handle.
4235 * @param fFlags The error flags.
4236 * @param pszErrorId Error ID string.
4237 * @param pszFormat Format string.
4238 * @param pVa Pointer to the format arguments.
4239 *
4240 * @thread EMT
4241 */
4242DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4243{
4244 /*
4245 * Make a copy of the message.
4246 */
4247 va_list va2;
4248 va_copy(va2, *pVa);
4249 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4250 va_end(va2);
4251
4252 /*
4253 * Join paths with VMR3SetRuntimeErrorWorker.
4254 */
4255 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4256}
4257
4258
4259/**
4260 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4261 *
4262 * This can be used avoid double error messages.
4263 *
4264 * @returns The runtime error count.
4265 * @param pVM The VM handle.
4266 */
4267VMMR3DECL(uint32_t) VMR3GetRuntimeErrorCount(PVM pVM)
4268{
4269 return pVM->pUVM->vm.s.cRuntimeErrors;
4270}
4271
4272
4273/**
4274 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4275 *
4276 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4277 *
4278 * @param pVM The VM handle.
4279 */
4280VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4281{
4282 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4283 return pUVCpu
4284 ? pUVCpu->idCpu
4285 : NIL_VMCPUID;
4286}
4287
4288
4289/**
4290 * Returns the native handle of the current EMT VMCPU thread.
4291 *
4292 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4293 * @param pVM The VM handle.
4294 * @thread EMT
4295 */
4296VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4297{
4298 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4299
4300 if (!pUVCpu)
4301 return NIL_RTNATIVETHREAD;
4302
4303 return pUVCpu->vm.s.NativeThreadEMT;
4304}
4305
4306
4307/**
4308 * Returns the native handle of the current EMT VMCPU thread.
4309 *
4310 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4311 * @param pVM The VM handle.
4312 * @thread EMT
4313 */
4314VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4315{
4316 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4317
4318 if (!pUVCpu)
4319 return NIL_RTNATIVETHREAD;
4320
4321 return pUVCpu->vm.s.NativeThreadEMT;
4322}
4323
4324
4325/**
4326 * Returns the handle of the current EMT VMCPU thread.
4327 *
4328 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4329 * @param pVM The VM handle.
4330 * @thread EMT
4331 */
4332VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
4333{
4334 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4335
4336 if (!pUVCpu)
4337 return NIL_RTTHREAD;
4338
4339 return pUVCpu->vm.s.ThreadEMT;
4340}
4341
4342
4343/**
4344 * Returns the handle of the current EMT VMCPU thread.
4345 *
4346 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4347 * @param pVM The VM handle.
4348 * @thread EMT
4349 */
4350VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
4351{
4352 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4353
4354 if (!pUVCpu)
4355 return NIL_RTTHREAD;
4356
4357 return pUVCpu->vm.s.ThreadEMT;
4358}
4359
4360
4361/**
4362 * Return the package and core id of a CPU.
4363 *
4364 * @returns VBOX status code.
4365 * @param pVM The VM to operate on.
4366 * @param idCpu Virtual CPU to get the ID from.
4367 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4368 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4369 *
4370 */
4371VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PVM pVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4372{
4373 /*
4374 * Validate input.
4375 */
4376 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4377 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4378 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4379 if (idCpu >= pVM->cCpus)
4380 return VERR_INVALID_CPU_ID;
4381
4382 /*
4383 * Set return values.
4384 */
4385#ifdef VBOX_WITH_MULTI_CORE
4386 *pidCpuCore = idCpu;
4387 *pidCpuPackage = 0;
4388#else
4389 *pidCpuCore = 0;
4390 *pidCpuPackage = idCpu;
4391#endif
4392
4393 return VINF_SUCCESS;
4394}
4395
4396
4397/**
4398 * Worker for VMR3HotUnplugCpu.
4399 *
4400 * @returns VINF_EM_WAIT_SPIP (strict status code).
4401 * @param pVM The VM handle.
4402 * @param idCpu The current CPU.
4403 */
4404static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4405{
4406 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4407 VMCPU_ASSERT_EMT(pVCpu);
4408
4409 /*
4410 * Reset per CPU resources.
4411 *
4412 * Actually only needed for VT-x because the CPU seems to be still in some
4413 * paged mode and startup fails after a new hot plug event. SVM works fine
4414 * even without this.
4415 */
4416 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4417 PGMR3ResetUnpluggedCpu(pVM, pVCpu);
4418 PDMR3ResetCpu(pVCpu);
4419 TRPMR3ResetCpu(pVCpu);
4420 CPUMR3ResetCpu(pVCpu);
4421 EMR3ResetCpu(pVCpu);
4422 HWACCMR3ResetCpu(pVCpu);
4423 return VINF_EM_WAIT_SIPI;
4424}
4425
4426
4427/**
4428 * Hot-unplugs a CPU from the guest.
4429 *
4430 * @returns VBox status code.
4431 * @param pVM The VM to operate on.
4432 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4433 */
4434VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4435{
4436 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4437 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4438
4439 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4440 * broadcast requests. Just note down somewhere that the CPU is
4441 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4442 * it out of the EM loops when offline. */
4443 return VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4444}
4445
4446
4447/**
4448 * Hot-plugs a CPU on the guest.
4449 *
4450 * @returns VBox status code.
4451 * @param pVM The VM to operate on.
4452 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4453 */
4454VMMR3DECL(int) VMR3HotPlugCpu(PVM pVM, VMCPUID idCpu)
4455{
4456 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4457 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4458
4459 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4460 return VINF_SUCCESS;
4461}
4462
4463
4464/**
4465 * Changes the VMM execution cap.
4466 *
4467 * @returns VBox status code.
4468 * @param pVM The VM to operate on.
4469 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4470 * 100 is max performance (default).
4471 */
4472VMMR3DECL(int) VMR3SetCpuExecutionCap(PVM pVM, uint32_t uCpuExecutionCap)
4473{
4474 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4475 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4476
4477 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4478 /* Note: not called from EMT. */
4479 pVM->uCpuExecutionCap = uCpuExecutionCap;
4480 return VINF_SUCCESS;
4481}
4482
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette