VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 40940

Last change on this file since 40940 was 40274, checked in by vboxsync, 13 years ago

Introduced VBOX_WITH_REM in Config.kmk and the VMM.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 157.7 KB
Line 
1/* $Id: VM.cpp 40274 2012-02-28 13:17:35Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41/*******************************************************************************
42* Header Files *
43*******************************************************************************/
44#define LOG_GROUP LOG_GROUP_VM
45#include <VBox/vmm/cfgm.h>
46#include <VBox/vmm/vmm.h>
47#include <VBox/vmm/gvmm.h>
48#include <VBox/vmm/mm.h>
49#include <VBox/vmm/cpum.h>
50#include <VBox/vmm/selm.h>
51#include <VBox/vmm/trpm.h>
52#include <VBox/vmm/dbgf.h>
53#include <VBox/vmm/pgm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/em.h>
57#include <VBox/vmm/iem.h>
58#ifdef VBOX_WITH_REM
59# include <VBox/vmm/rem.h>
60#endif
61#include <VBox/vmm/tm.h>
62#include <VBox/vmm/stam.h>
63#include <VBox/vmm/patm.h>
64#include <VBox/vmm/csam.h>
65#include <VBox/vmm/iom.h>
66#include <VBox/vmm/ssm.h>
67#include <VBox/vmm/ftm.h>
68#include <VBox/vmm/hwaccm.h>
69#include "VMInternal.h"
70#include <VBox/vmm/vm.h>
71#include <VBox/vmm/uvm.h>
72
73#include <VBox/sup.h>
74#include <VBox/dbg.h>
75#include <VBox/err.h>
76#include <VBox/param.h>
77#include <VBox/log.h>
78#include <iprt/assert.h>
79#include <iprt/alloc.h>
80#include <iprt/asm.h>
81#include <iprt/env.h>
82#include <iprt/string.h>
83#include <iprt/time.h>
84#include <iprt/semaphore.h>
85#include <iprt/thread.h>
86#include <iprt/uuid.h>
87
88
89/*******************************************************************************
90* Structures and Typedefs *
91*******************************************************************************/
92/**
93 * VM destruction callback registration record.
94 */
95typedef struct VMATDTOR
96{
97 /** Pointer to the next record in the list. */
98 struct VMATDTOR *pNext;
99 /** Pointer to the callback function. */
100 PFNVMATDTOR pfnAtDtor;
101 /** The user argument. */
102 void *pvUser;
103} VMATDTOR;
104/** Pointer to a VM destruction callback registration record. */
105typedef VMATDTOR *PVMATDTOR;
106
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111/** Pointer to the list of VMs. */
112static PUVM g_pUVMsHead = NULL;
113
114/** Pointer to the list of at VM destruction callbacks. */
115static PVMATDTOR g_pVMAtDtorHead = NULL;
116/** Lock the g_pVMAtDtorHead list. */
117#define VM_ATDTOR_LOCK() do { } while (0)
118/** Unlock the g_pVMAtDtorHead list. */
119#define VM_ATDTOR_UNLOCK() do { } while (0)
120
121
122/*******************************************************************************
123* Internal Functions *
124*******************************************************************************/
125static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
126static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
127static int vmR3InitRing3(PVM pVM, PUVM pUVM);
128static int vmR3InitRing0(PVM pVM);
129static int vmR3InitGC(PVM pVM);
130static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
131#ifdef LOG_ENABLED
132static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
133#endif
134static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
135static void vmR3AtDtor(PVM pVM);
136static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
137static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
138static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
139static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
140static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
141static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...);
142
143
144/**
145 * Do global VMM init.
146 *
147 * @returns VBox status code.
148 */
149VMMR3DECL(int) VMR3GlobalInit(void)
150{
151 /*
152 * Only once.
153 */
154 static bool volatile s_fDone = false;
155 if (s_fDone)
156 return VINF_SUCCESS;
157
158 /*
159 * We're done.
160 */
161 s_fDone = true;
162 return VINF_SUCCESS;
163}
164
165
166
167/**
168 * Creates a virtual machine by calling the supplied configuration constructor.
169 *
170 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
171 * called to start the execution.
172 *
173 * @returns 0 on success.
174 * @returns VBox error code on failure.
175 * @param cCpus Number of virtual CPUs for the new VM.
176 * @param pVmm2UserMethods An optional method table that the VMM can use
177 * to make the user perform various action, like
178 * for instance state saving.
179 * @param pfnVMAtError Pointer to callback function for setting VM
180 * errors. This was added as an implicit call to
181 * VMR3AtErrorRegister() since there is no way the
182 * caller can get to the VM handle early enough to
183 * do this on its own.
184 * This is called in the context of an EMT.
185 * @param pvUserVM The user argument passed to pfnVMAtError.
186 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
187 * This is called in the context of an EMT0.
188 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
189 * @param ppVM Where to store the 'handle' of the created VM.
190 */
191VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
192 PFNVMATERROR pfnVMAtError, void *pvUserVM,
193 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
194 PVM *ppVM)
195{
196 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
197 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
198
199 if (pVmm2UserMethods)
200 {
201 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
202 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
203 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
204 AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
205 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
206 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
207 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
208 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
209 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
210 }
211 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
212 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
213 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
214
215 /*
216 * Because of the current hackiness of the applications
217 * we'll have to initialize global stuff from here.
218 * Later the applications will take care of this in a proper way.
219 */
220 static bool fGlobalInitDone = false;
221 if (!fGlobalInitDone)
222 {
223 int rc = VMR3GlobalInit();
224 if (RT_FAILURE(rc))
225 return rc;
226 fGlobalInitDone = true;
227 }
228
229 /*
230 * Validate input.
231 */
232 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
233
234 /*
235 * Create the UVM so we can register the at-error callback
236 * and consolidate a bit of cleanup code.
237 */
238 PUVM pUVM = NULL; /* shuts up gcc */
239 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
240 if (RT_FAILURE(rc))
241 return rc;
242 if (pfnVMAtError)
243 rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
244 if (RT_SUCCESS(rc))
245 {
246 /*
247 * Initialize the support library creating the session for this VM.
248 */
249 rc = SUPR3Init(&pUVM->vm.s.pSession);
250 if (RT_SUCCESS(rc))
251 {
252 /*
253 * Call vmR3CreateU in the EMT thread and wait for it to finish.
254 *
255 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
256 * submitting a request to a specific VCPU without a pVM. So, to make
257 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
258 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
259 */
260 PVMREQ pReq;
261 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
262 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
263 if (RT_SUCCESS(rc))
264 {
265 rc = pReq->iStatus;
266 VMR3ReqFree(pReq);
267 if (RT_SUCCESS(rc))
268 {
269 /*
270 * Success!
271 */
272 *ppVM = pUVM->pVM;
273 LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
274 return VINF_SUCCESS;
275 }
276 }
277 else
278 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
279
280 /*
281 * An error occurred during VM creation. Set the error message directly
282 * using the initial callback, as the callback list might not exist yet.
283 */
284 const char *pszError;
285 switch (rc)
286 {
287 case VERR_VMX_IN_VMX_ROOT_MODE:
288#ifdef RT_OS_LINUX
289 pszError = N_("VirtualBox can't operate in VMX root mode. "
290 "Please disable the KVM kernel extension, recompile your kernel and reboot");
291#else
292 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
293#endif
294 break;
295
296#ifndef RT_OS_DARWIN
297 case VERR_HWACCM_CONFIG_MISMATCH:
298 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
299 "This hardware extension is required by the VM configuration");
300 break;
301#endif
302
303 case VERR_SVM_IN_USE:
304#ifdef RT_OS_LINUX
305 pszError = N_("VirtualBox can't enable the AMD-V extension. "
306 "Please disable the KVM kernel extension, recompile your kernel and reboot");
307#else
308 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
309#endif
310 break;
311
312#ifdef RT_OS_LINUX
313 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
314 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
315 "that no kernel modules from an older version of VirtualBox exist. "
316 "Then try to recompile and reload the kernel modules by executing "
317 "'/etc/init.d/vboxdrv setup' as root");
318 break;
319#endif
320
321 case VERR_RAW_MODE_INVALID_SMP:
322 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
323 "VirtualBox requires this hardware extension to emulate more than one "
324 "guest CPU");
325 break;
326
327 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
328#ifdef RT_OS_LINUX
329 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
330 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
331 "the VT-x extension in the VM settings. Note that without VT-x you have "
332 "to reduce the number of guest CPUs to one");
333#else
334 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
335 "extension. Either upgrade your kernel or disable the VT-x extension in the "
336 "VM settings. Note that without VT-x you have to reduce the number of guest "
337 "CPUs to one");
338#endif
339 break;
340
341 case VERR_PDM_DEVICE_NOT_FOUND:
342 pszError = N_("A virtual device is configured in the VM settings but the device "
343 "implementation is missing.\n"
344 "A possible reason for this error is a missing extension pack. Note "
345 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
346 "support and remote desktop) are only available from an 'extension "
347 "pack' which must be downloaded and installed separately");
348 break;
349
350 case VERR_PCI_PASSTHROUGH_NO_HWACCM:
351 pszError = N_("PCI passthrough requires VT-x/AMD-V");
352 break;
353
354 case VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING:
355 pszError = N_("PCI passthrough requires nested paging");
356 break;
357
358 default:
359 if (VMR3GetErrorCountU(pUVM) == 0)
360 pszError = RTErrGetFull(rc);
361 else
362 pszError = NULL; /* already set. */
363 break;
364 }
365 if (pszError)
366 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
367 }
368 else
369 {
370 /*
371 * An error occurred at support library initialization time (before the
372 * VM could be created). Set the error message directly using the
373 * initial callback, as the callback list doesn't exist yet.
374 */
375 const char *pszError;
376 switch (rc)
377 {
378 case VERR_VM_DRIVER_LOAD_ERROR:
379#ifdef RT_OS_LINUX
380 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
381 "was either not loaded or /dev/vboxdrv is not set up properly. "
382 "Re-setup the kernel module by executing "
383 "'/etc/init.d/vboxdrv setup' as root");
384#else
385 pszError = N_("VirtualBox kernel driver not loaded");
386#endif
387 break;
388 case VERR_VM_DRIVER_OPEN_ERROR:
389 pszError = N_("VirtualBox kernel driver cannot be opened");
390 break;
391 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
392#ifdef VBOX_WITH_HARDENING
393 /* This should only happen if the executable wasn't hardened - bad code/build. */
394 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
395 "Re-install VirtualBox. If you are building it yourself, you "
396 "should make sure it installed correctly and that the setuid "
397 "bit is set on the executables calling VMR3Create.");
398#else
399 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
400# if defined(RT_OS_DARWIN)
401 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
402 "If you have built VirtualBox yourself, make sure that you do not "
403 "have the vboxdrv KEXT from a different build or installation loaded.");
404# elif defined(RT_OS_LINUX)
405 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
406 "If you have built VirtualBox yourself, make sure that you do "
407 "not have the vboxdrv kernel module from a different build or "
408 "installation loaded. Also, make sure the vboxdrv udev rule gives "
409 "you the permission you need to access the device.");
410# elif defined(RT_OS_WINDOWS)
411 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
412# else /* solaris, freebsd, ++. */
413 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
414 "If you have built VirtualBox yourself, make sure that you do "
415 "not have the vboxdrv kernel module from a different install loaded.");
416# endif
417#endif
418 break;
419 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
420 case VERR_VM_DRIVER_NOT_INSTALLED:
421#ifdef RT_OS_LINUX
422 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
423 "was either not loaded or /dev/vboxdrv was not created for some "
424 "reason. Re-setup the kernel module by executing "
425 "'/etc/init.d/vboxdrv setup' as root");
426#else
427 pszError = N_("VirtualBox kernel driver not installed");
428#endif
429 break;
430 case VERR_NO_MEMORY:
431 pszError = N_("VirtualBox support library out of memory");
432 break;
433 case VERR_VERSION_MISMATCH:
434 case VERR_VM_DRIVER_VERSION_MISMATCH:
435 pszError = N_("The VirtualBox support driver which is running is from a different "
436 "version of VirtualBox. You can correct this by stopping all "
437 "running instances of VirtualBox and reinstalling the software.");
438 break;
439 default:
440 pszError = N_("Unknown error initializing kernel driver");
441 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
442 }
443 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
444 }
445 }
446
447 /* cleanup */
448 vmR3DestroyUVM(pUVM, 2000);
449 LogFlow(("VMR3Create: returns %Rrc\n", rc));
450 return rc;
451}
452
453
454/**
455 * Creates the UVM.
456 *
457 * This will not initialize the support library even if vmR3DestroyUVM
458 * will terminate that.
459 *
460 * @returns VBox status code.
461 * @param cCpus Number of virtual CPUs
462 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
463 * table.
464 * @param ppUVM Where to store the UVM pointer.
465 */
466static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
467{
468 uint32_t i;
469
470 /*
471 * Create and initialize the UVM.
472 */
473 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
474 AssertReturn(pUVM, VERR_NO_MEMORY);
475 pUVM->u32Magic = UVM_MAGIC;
476 pUVM->cCpus = cCpus;
477 pUVM->pVmm2UserMethods = pVmm2UserMethods;
478
479 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
480
481 pUVM->vm.s.cUvmRefs = 1;
482 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
483 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
484 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
485
486 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
487 RTUuidClear(&pUVM->vm.s.Uuid);
488
489 /* Initialize the VMCPU array in the UVM. */
490 for (i = 0; i < cCpus; i++)
491 {
492 pUVM->aCpus[i].pUVM = pUVM;
493 pUVM->aCpus[i].idCpu = i;
494 }
495
496 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
497 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
498 AssertRC(rc);
499 if (RT_SUCCESS(rc))
500 {
501 /* Allocate a halt method event semaphore for each VCPU. */
502 for (i = 0; i < cCpus; i++)
503 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
504 for (i = 0; i < cCpus; i++)
505 {
506 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
507 if (RT_FAILURE(rc))
508 break;
509 }
510 if (RT_SUCCESS(rc))
511 {
512 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
513 if (RT_SUCCESS(rc))
514 {
515 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
516 if (RT_SUCCESS(rc))
517 {
518 /*
519 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
520 */
521 rc = STAMR3InitUVM(pUVM);
522 if (RT_SUCCESS(rc))
523 {
524 rc = MMR3InitUVM(pUVM);
525 if (RT_SUCCESS(rc))
526 {
527 rc = PDMR3InitUVM(pUVM);
528 if (RT_SUCCESS(rc))
529 {
530 /*
531 * Start the emulation threads for all VMCPUs.
532 */
533 for (i = 0; i < cCpus; i++)
534 {
535 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
536 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
537 cCpus > 1 ? "EMT-%u" : "EMT", i);
538 if (RT_FAILURE(rc))
539 break;
540
541 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
542 }
543
544 if (RT_SUCCESS(rc))
545 {
546 *ppUVM = pUVM;
547 return VINF_SUCCESS;
548 }
549
550 /* bail out. */
551 while (i-- > 0)
552 {
553 /** @todo rainy day: terminate the EMTs. */
554 }
555 PDMR3TermUVM(pUVM);
556 }
557 MMR3TermUVM(pUVM);
558 }
559 STAMR3TermUVM(pUVM);
560 }
561 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
562 }
563 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
564 }
565 }
566 for (i = 0; i < cCpus; i++)
567 {
568 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
569 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
570 }
571 RTTlsFree(pUVM->vm.s.idxTLS);
572 }
573 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
574 return rc;
575}
576
577
578/**
579 * Creates and initializes the VM.
580 *
581 * @thread EMT
582 */
583static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
584{
585 /*
586 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
587 */
588 int rc = PDMR3LdrLoadVMMR0U(pUVM);
589 if (RT_FAILURE(rc))
590 {
591 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
592 * bird: what about moving the message down here? Main picks the first message, right? */
593 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
594 return rc; /* proper error message set later on */
595 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
596 }
597
598 /*
599 * Request GVMM to create a new VM for us.
600 */
601 GVMMCREATEVMREQ CreateVMReq;
602 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
603 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
604 CreateVMReq.pSession = pUVM->vm.s.pSession;
605 CreateVMReq.pVMR0 = NIL_RTR0PTR;
606 CreateVMReq.pVMR3 = NULL;
607 CreateVMReq.cCpus = cCpus;
608 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
609 if (RT_SUCCESS(rc))
610 {
611 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
612 AssertRelease(VALID_PTR(pVM));
613 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
614 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
615 AssertRelease(pVM->cCpus == cCpus);
616 AssertRelease(pVM->uCpuExecutionCap == 100);
617 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
618 AssertCompileMemberAlignment(VM, cpum, 64);
619 AssertCompileMemberAlignment(VM, tm, 64);
620 AssertCompileMemberAlignment(VM, aCpus, PAGE_SIZE);
621
622 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
623 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
624
625 /*
626 * Initialize the VM structure and our internal data (VMINT).
627 */
628 pVM->pUVM = pUVM;
629
630 for (VMCPUID i = 0; i < pVM->cCpus; i++)
631 {
632 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
633 pVM->aCpus[i].idCpu = i;
634 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
635 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
636 /* hNativeThreadR0 is initialized on EMT registration. */
637 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
638 pUVM->aCpus[i].pVM = pVM;
639 }
640
641
642 /*
643 * Init the configuration.
644 */
645 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
646 if (RT_SUCCESS(rc))
647 {
648 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
649 rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
650 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
651 pVM->fHWACCMEnabled = true;
652
653 /*
654 * If executing in fake suplib mode disable RR3 and RR0 in the config.
655 */
656 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
657 if (psz && !strcmp(psz, "fake"))
658 {
659 CFGMR3RemoveValue(pRoot, "RawR3Enabled");
660 CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
661 CFGMR3RemoveValue(pRoot, "RawR0Enabled");
662 CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
663 }
664
665 /*
666 * Make sure the CPU count in the config data matches.
667 */
668 if (RT_SUCCESS(rc))
669 {
670 uint32_t cCPUsCfg;
671 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
672 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
673 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
674 {
675 AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
676 cCPUsCfg, cCpus));
677 rc = VERR_INVALID_PARAMETER;
678 }
679 }
680
681 /*
682 * Get the CPU execution cap.
683 */
684 if (RT_SUCCESS(rc))
685 {
686 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
687 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc));
688 }
689
690 /*
691 * Get the VM name and UUID.
692 */
693 if (RT_SUCCESS(rc))
694 {
695 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
696 AssertLogRelMsg(RT_SUCCESS(rc), ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc));
697 }
698
699 if (RT_SUCCESS(rc))
700 {
701 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
702 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
703 rc = VINF_SUCCESS;
704 AssertLogRelMsg(RT_SUCCESS(rc), ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc));
705 }
706
707 if (RT_SUCCESS(rc))
708 {
709 /*
710 * Init the ring-3 components and ring-3 per cpu data, finishing it off
711 * by a relocation round (intermediate context finalization will do this).
712 */
713 rc = vmR3InitRing3(pVM, pUVM);
714 if (RT_SUCCESS(rc))
715 {
716 rc = PGMR3FinalizeMappings(pVM);
717 if (RT_SUCCESS(rc))
718 {
719
720 LogFlow(("Ring-3 init succeeded\n"));
721
722 /*
723 * Init the Ring-0 components.
724 */
725 rc = vmR3InitRing0(pVM);
726 if (RT_SUCCESS(rc))
727 {
728 /* Relocate again, because some switcher fixups depends on R0 init results. */
729 VMR3Relocate(pVM, 0);
730
731#ifdef VBOX_WITH_DEBUGGER
732 /*
733 * Init the tcp debugger console if we're building
734 * with debugger support.
735 */
736 void *pvUser = NULL;
737 rc = DBGCTcpCreate(pVM, &pvUser);
738 if ( RT_SUCCESS(rc)
739 || rc == VERR_NET_ADDRESS_IN_USE)
740 {
741 pUVM->vm.s.pvDBGC = pvUser;
742#endif
743 /*
744 * Init the Guest Context components.
745 */
746 rc = vmR3InitGC(pVM);
747 if (RT_SUCCESS(rc))
748 {
749 /*
750 * Now we can safely set the VM halt method to default.
751 */
752 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
753 if (RT_SUCCESS(rc))
754 {
755 /*
756 * Set the state and link into the global list.
757 */
758 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
759 pUVM->pNext = g_pUVMsHead;
760 g_pUVMsHead = pUVM;
761
762#ifdef LOG_ENABLED
763 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
764#endif
765 return VINF_SUCCESS;
766 }
767 }
768#ifdef VBOX_WITH_DEBUGGER
769 DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
770 pUVM->vm.s.pvDBGC = NULL;
771 }
772#endif
773 //..
774 }
775 }
776 vmR3Destroy(pVM);
777 }
778 }
779 //..
780
781 /* Clean CFGM. */
782 int rc2 = CFGMR3Term(pVM);
783 AssertRC(rc2);
784 }
785
786 /*
787 * Do automatic cleanups while the VM structure is still alive and all
788 * references to it are still working.
789 */
790 PDMR3CritSectTerm(pVM);
791
792 /*
793 * Drop all references to VM and the VMCPU structures, then
794 * tell GVMM to destroy the VM.
795 */
796 pUVM->pVM = NULL;
797 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
798 {
799 pUVM->aCpus[i].pVM = NULL;
800 pUVM->aCpus[i].pVCpu = NULL;
801 }
802 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
803
804 if (pUVM->cCpus > 1)
805 {
806 /* Poke the other EMTs since they may have stale pVM and pVCpu references
807 on the stack (see VMR3WaitU for instance) if they've been awakened after
808 VM creation. */
809 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
810 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
811 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
812 }
813
814 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
815 AssertRC(rc2);
816 }
817 else
818 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
819
820 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
821 return rc;
822}
823
824
825/**
826 * Register the calling EMT with GVM.
827 *
828 * @returns VBox status code.
829 * @param pVM The VM handle.
830 * @param idCpu The Virtual CPU ID.
831 */
832static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
833{
834 Assert(VMMGetCpuId(pVM) == idCpu);
835 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
836 if (RT_FAILURE(rc))
837 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
838 return rc;
839}
840
841
842/**
843 * Initializes all R3 components of the VM
844 */
845static int vmR3InitRing3(PVM pVM, PUVM pUVM)
846{
847 int rc;
848
849 /*
850 * Register the other EMTs with GVM.
851 */
852 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
853 {
854 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
855 if (RT_FAILURE(rc))
856 return rc;
857 }
858
859 /*
860 * Init all R3 components, the order here might be important.
861 */
862 rc = MMR3Init(pVM);
863 if (RT_SUCCESS(rc))
864 {
865 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
866 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
867 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
868 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
869 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
870 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
871 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
872 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
873 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
874 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
875 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
876 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
877 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
878 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
879
880 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
881 {
882 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
883 AssertRC(rc);
884 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
885 AssertRC(rc);
886 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/VM/CPU%d/Halt/BlockOverslept", idCpu);
887 AssertRC(rc);
888 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/VM/CPU%d/Halt/BlockInsomnia", idCpu);
889 AssertRC(rc);
890 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/VM/CPU%d/Halt/BlockOnTime", idCpu);
891 AssertRC(rc);
892 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
893 AssertRC(rc);
894 }
895
896 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
897 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
898 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
899 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
900 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
901 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
902 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
903 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
904
905 rc = CPUMR3Init(pVM);
906 if (RT_SUCCESS(rc))
907 {
908 rc = HWACCMR3Init(pVM);
909 if (RT_SUCCESS(rc))
910 {
911 rc = PGMR3Init(pVM);
912 if (RT_SUCCESS(rc))
913 {
914#ifdef VBOX_WITH_REM
915 rc = REMR3Init(pVM);
916#endif
917 if (RT_SUCCESS(rc))
918 {
919 rc = MMR3InitPaging(pVM);
920 if (RT_SUCCESS(rc))
921 rc = TMR3Init(pVM);
922 if (RT_SUCCESS(rc))
923 {
924 rc = FTMR3Init(pVM);
925 if (RT_SUCCESS(rc))
926 {
927 rc = VMMR3Init(pVM);
928 if (RT_SUCCESS(rc))
929 {
930 rc = SELMR3Init(pVM);
931 if (RT_SUCCESS(rc))
932 {
933 rc = TRPMR3Init(pVM);
934 if (RT_SUCCESS(rc))
935 {
936 rc = CSAMR3Init(pVM);
937 if (RT_SUCCESS(rc))
938 {
939 rc = PATMR3Init(pVM);
940 if (RT_SUCCESS(rc))
941 {
942 rc = IOMR3Init(pVM);
943 if (RT_SUCCESS(rc))
944 {
945 rc = EMR3Init(pVM);
946 if (RT_SUCCESS(rc))
947 {
948 rc = IEMR3Init(pVM);
949 if (RT_SUCCESS(rc))
950 {
951 rc = DBGFR3Init(pVM);
952 if (RT_SUCCESS(rc))
953 {
954 rc = PDMR3Init(pVM);
955 if (RT_SUCCESS(rc))
956 {
957 rc = PGMR3InitDynMap(pVM);
958 if (RT_SUCCESS(rc))
959 rc = MMR3HyperInitFinalize(pVM);
960 if (RT_SUCCESS(rc))
961 rc = PATMR3InitFinalize(pVM);
962 if (RT_SUCCESS(rc))
963 rc = PGMR3InitFinalize(pVM);
964 if (RT_SUCCESS(rc))
965 rc = SELMR3InitFinalize(pVM);
966 if (RT_SUCCESS(rc))
967 rc = TMR3InitFinalize(pVM);
968#ifdef VBOX_WITH_REM
969 if (RT_SUCCESS(rc))
970 rc = REMR3InitFinalize(pVM);
971#endif
972 if (RT_SUCCESS(rc))
973 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
974 if (RT_SUCCESS(rc))
975 {
976 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
977 return VINF_SUCCESS;
978 }
979
980 int rc2 = PDMR3Term(pVM);
981 AssertRC(rc2);
982 }
983 int rc2 = DBGFR3Term(pVM);
984 AssertRC(rc2);
985 }
986 int rc2 = IEMR3Term(pVM);
987 AssertRC(rc2);
988 }
989 int rc2 = EMR3Term(pVM);
990 AssertRC(rc2);
991 }
992 int rc2 = IOMR3Term(pVM);
993 AssertRC(rc2);
994 }
995 int rc2 = PATMR3Term(pVM);
996 AssertRC(rc2);
997 }
998 int rc2 = CSAMR3Term(pVM);
999 AssertRC(rc2);
1000 }
1001 int rc2 = TRPMR3Term(pVM);
1002 AssertRC(rc2);
1003 }
1004 int rc2 = SELMR3Term(pVM);
1005 AssertRC(rc2);
1006 }
1007 int rc2 = VMMR3Term(pVM);
1008 AssertRC(rc2);
1009 }
1010 int rc2 = FTMR3Term(pVM);
1011 AssertRC(rc2);
1012 }
1013 int rc2 = TMR3Term(pVM);
1014 AssertRC(rc2);
1015 }
1016#ifdef VBOX_WITH_REM
1017 int rc2 = REMR3Term(pVM);
1018 AssertRC(rc2);
1019#endif
1020 }
1021 int rc2 = PGMR3Term(pVM);
1022 AssertRC(rc2);
1023 }
1024 int rc2 = HWACCMR3Term(pVM);
1025 AssertRC(rc2);
1026 }
1027 //int rc2 = CPUMR3Term(pVM);
1028 //AssertRC(rc2);
1029 }
1030 /* MMR3Term is not called here because it'll kill the heap. */
1031 }
1032
1033 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
1034 return rc;
1035}
1036
1037
1038/**
1039 * Initializes all R0 components of the VM
1040 */
1041static int vmR3InitRing0(PVM pVM)
1042{
1043 LogFlow(("vmR3InitRing0:\n"));
1044
1045 /*
1046 * Check for FAKE suplib mode.
1047 */
1048 int rc = VINF_SUCCESS;
1049 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1050 if (!psz || strcmp(psz, "fake"))
1051 {
1052 /*
1053 * Call the VMMR0 component and let it do the init.
1054 */
1055 rc = VMMR3InitR0(pVM);
1056 }
1057 else
1058 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1059
1060 /*
1061 * Do notifications and return.
1062 */
1063 if (RT_SUCCESS(rc))
1064 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1065 if (RT_SUCCESS(rc))
1066 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HWACCM);
1067
1068 /** @todo Move this to the VMINITCOMPLETED_HWACCM notification handler. */
1069 if (RT_SUCCESS(rc))
1070 CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
1071
1072 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1073 return rc;
1074}
1075
1076
1077/**
1078 * Initializes all GC components of the VM
1079 */
1080static int vmR3InitGC(PVM pVM)
1081{
1082 LogFlow(("vmR3InitGC:\n"));
1083
1084 /*
1085 * Check for FAKE suplib mode.
1086 */
1087 int rc = VINF_SUCCESS;
1088 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1089 if (!psz || strcmp(psz, "fake"))
1090 {
1091 /*
1092 * Call the VMMR0 component and let it do the init.
1093 */
1094 rc = VMMR3InitRC(pVM);
1095 }
1096 else
1097 Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1098
1099 /*
1100 * Do notifications and return.
1101 */
1102 if (RT_SUCCESS(rc))
1103 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
1104 LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
1105 return rc;
1106}
1107
1108
1109/**
1110 * Do init completed notifications.
1111 *
1112 * @returns VBox status code.
1113 * @param pVM The VM handle.
1114 * @param enmWhat What's completed.
1115 */
1116static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1117{
1118 int rc = VMMR3InitCompleted(pVM, enmWhat);
1119 if (RT_SUCCESS(rc))
1120 rc = HWACCMR3InitCompleted(pVM, enmWhat);
1121 if (RT_SUCCESS(rc))
1122 rc = PGMR3InitCompleted(pVM, enmWhat);
1123 return rc;
1124}
1125
1126
1127#ifdef LOG_ENABLED
1128/**
1129 * Logger callback for inserting a custom prefix.
1130 *
1131 * @returns Number of chars written.
1132 * @param pLogger The logger.
1133 * @param pchBuf The output buffer.
1134 * @param cchBuf The output buffer size.
1135 * @param pvUser Pointer to the UVM structure.
1136 */
1137static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1138{
1139 AssertReturn(cchBuf >= 2, 0);
1140 PUVM pUVM = (PUVM)pvUser;
1141 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1142 if (pUVCpu)
1143 {
1144 static const char s_szHex[17] = "0123456789abcdef";
1145 VMCPUID const idCpu = pUVCpu->idCpu;
1146 pchBuf[1] = s_szHex[ idCpu & 15];
1147 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1148 }
1149 else
1150 {
1151 pchBuf[0] = 'x';
1152 pchBuf[1] = 'y';
1153 }
1154
1155 NOREF(pLogger);
1156 return 2;
1157}
1158#endif /* LOG_ENABLED */
1159
1160
1161/**
1162 * Calls the relocation functions for all VMM components so they can update
1163 * any GC pointers. When this function is called all the basic VM members
1164 * have been updated and the actual memory relocation have been done
1165 * by the PGM/MM.
1166 *
1167 * This is used both on init and on runtime relocations.
1168 *
1169 * @param pVM VM handle.
1170 * @param offDelta Relocation delta relative to old location.
1171 */
1172VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1173{
1174 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1175
1176 /*
1177 * The order here is very important!
1178 */
1179 PGMR3Relocate(pVM, offDelta);
1180 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1181 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1182 CPUMR3Relocate(pVM);
1183 HWACCMR3Relocate(pVM);
1184 SELMR3Relocate(pVM);
1185 VMMR3Relocate(pVM, offDelta);
1186 SELMR3Relocate(pVM); /* !hack! fix stack! */
1187 TRPMR3Relocate(pVM, offDelta);
1188 PATMR3Relocate(pVM);
1189 CSAMR3Relocate(pVM, offDelta);
1190 IOMR3Relocate(pVM, offDelta);
1191 EMR3Relocate(pVM);
1192 TMR3Relocate(pVM, offDelta);
1193 IEMR3Relocate(pVM);
1194 DBGFR3Relocate(pVM, offDelta);
1195 PDMR3Relocate(pVM, offDelta);
1196}
1197
1198
1199/**
1200 * EMT rendezvous worker for VMR3PowerOn.
1201 *
1202 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1203 * code, see FNVMMEMTRENDEZVOUS.)
1204 *
1205 * @param pVM The VM handle.
1206 * @param pVCpu The VMCPU handle of the EMT.
1207 * @param pvUser Ignored.
1208 */
1209static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1210{
1211 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1212 Assert(!pvUser); NOREF(pvUser);
1213
1214 /*
1215 * The first thread thru here tries to change the state. We shouldn't be
1216 * called again if this fails.
1217 */
1218 if (pVCpu->idCpu == pVM->cCpus - 1)
1219 {
1220 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1221 if (RT_FAILURE(rc))
1222 return rc;
1223 }
1224
1225 VMSTATE enmVMState = VMR3GetState(pVM);
1226 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1227 ("%s\n", VMR3GetStateName(enmVMState)),
1228 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1229
1230 /*
1231 * All EMTs changes their state to started.
1232 */
1233 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1234
1235 /*
1236 * EMT(0) is last thru here and it will make the notification calls
1237 * and advance the state.
1238 */
1239 if (pVCpu->idCpu == 0)
1240 {
1241 PDMR3PowerOn(pVM);
1242 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1243 }
1244
1245 return VINF_SUCCESS;
1246}
1247
1248
1249/**
1250 * Powers on the virtual machine.
1251 *
1252 * @returns VBox status code.
1253 *
1254 * @param pVM The VM to power on.
1255 *
1256 * @thread Any thread.
1257 * @vmstate Created
1258 * @vmstateto PoweringOn+Running
1259 */
1260VMMR3DECL(int) VMR3PowerOn(PVM pVM)
1261{
1262 LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
1263 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1264
1265 /*
1266 * Gather all the EMTs to reduce the init TSC drift and keep
1267 * the state changing APIs a bit uniform.
1268 */
1269 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1270 vmR3PowerOn, NULL);
1271 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1272 return rc;
1273}
1274
1275
1276/**
1277 * Does the suspend notifications.
1278 *
1279 * @param pVM The VM handle.
1280 * @thread EMT(0)
1281 */
1282static void vmR3SuspendDoWork(PVM pVM)
1283{
1284 PDMR3Suspend(pVM);
1285}
1286
1287
1288/**
1289 * EMT rendezvous worker for VMR3Suspend.
1290 *
1291 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1292 * return code, see FNVMMEMTRENDEZVOUS.)
1293 *
1294 * @param pVM The VM handle.
1295 * @param pVCpu The VMCPU handle of the EMT.
1296 * @param pvUser Ignored.
1297 */
1298static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1299{
1300 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1301 Assert(!pvUser); NOREF(pvUser);
1302
1303 /*
1304 * The first EMT switches the state to suspending. If this fails because
1305 * something was racing us in one way or the other, there will be no more
1306 * calls and thus the state assertion below is not going to annoy anyone.
1307 */
1308 if (pVCpu->idCpu == pVM->cCpus - 1)
1309 {
1310 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1311 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1312 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1313 if (RT_FAILURE(rc))
1314 return rc;
1315 }
1316
1317 VMSTATE enmVMState = VMR3GetState(pVM);
1318 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1319 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1320 ("%s\n", VMR3GetStateName(enmVMState)),
1321 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1322
1323 /*
1324 * EMT(0) does the actually suspending *after* all the other CPUs have
1325 * been thru here.
1326 */
1327 if (pVCpu->idCpu == 0)
1328 {
1329 vmR3SuspendDoWork(pVM);
1330
1331 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1332 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1333 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1334 if (RT_FAILURE(rc))
1335 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1336 }
1337
1338 return VINF_EM_SUSPEND;
1339}
1340
1341
1342/**
1343 * Suspends a running VM.
1344 *
1345 * @returns VBox status code. When called on EMT, this will be a strict status
1346 * code that has to be propagated up the call stack.
1347 *
1348 * @param pVM The VM to suspend.
1349 *
1350 * @thread Any thread.
1351 * @vmstate Running or RunningLS
1352 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1353 */
1354VMMR3DECL(int) VMR3Suspend(PVM pVM)
1355{
1356 LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
1357 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1358
1359 /*
1360 * Gather all the EMTs to make sure there are no races before
1361 * changing the VM state.
1362 */
1363 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1364 vmR3Suspend, NULL);
1365 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1366 return rc;
1367}
1368
1369
1370/**
1371 * EMT rendezvous worker for VMR3Resume.
1372 *
1373 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1374 * return code, see FNVMMEMTRENDEZVOUS.)
1375 *
1376 * @param pVM The VM handle.
1377 * @param pVCpu The VMCPU handle of the EMT.
1378 * @param pvUser Ignored.
1379 */
1380static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1381{
1382 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1383 Assert(!pvUser); NOREF(pvUser);
1384
1385 /*
1386 * The first thread thru here tries to change the state. We shouldn't be
1387 * called again if this fails.
1388 */
1389 if (pVCpu->idCpu == pVM->cCpus - 1)
1390 {
1391 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1392 if (RT_FAILURE(rc))
1393 return rc;
1394 }
1395
1396 VMSTATE enmVMState = VMR3GetState(pVM);
1397 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1398 ("%s\n", VMR3GetStateName(enmVMState)),
1399 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1400
1401#if 0
1402 /*
1403 * All EMTs changes their state to started.
1404 */
1405 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1406#endif
1407
1408 /*
1409 * EMT(0) is last thru here and it will make the notification calls
1410 * and advance the state.
1411 */
1412 if (pVCpu->idCpu == 0)
1413 {
1414 PDMR3Resume(pVM);
1415 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1416 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1417 }
1418
1419 return VINF_EM_RESUME;
1420}
1421
1422
1423/**
1424 * Resume VM execution.
1425 *
1426 * @returns VBox status code. When called on EMT, this will be a strict status
1427 * code that has to be propagated up the call stack.
1428 *
1429 * @param pVM The VM to resume.
1430 *
1431 * @thread Any thread.
1432 * @vmstate Suspended
1433 * @vmstateto Running
1434 */
1435VMMR3DECL(int) VMR3Resume(PVM pVM)
1436{
1437 LogFlow(("VMR3Resume: pVM=%p\n", pVM));
1438 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1439
1440 /*
1441 * Gather all the EMTs to make sure there are no races before
1442 * changing the VM state.
1443 */
1444 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1445 vmR3Resume, NULL);
1446 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1447 return rc;
1448}
1449
1450
1451/**
1452 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1453 * after the live step has been completed.
1454 *
1455 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1456 * return code, see FNVMMEMTRENDEZVOUS.)
1457 *
1458 * @param pVM The VM handle.
1459 * @param pVCpu The VMCPU handle of the EMT.
1460 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1461 */
1462static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1463{
1464 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1465 bool *pfSuspended = (bool *)pvUser;
1466
1467 /*
1468 * The first thread thru here tries to change the state. We shouldn't be
1469 * called again if this fails.
1470 */
1471 if (pVCpu->idCpu == pVM->cCpus - 1U)
1472 {
1473 PUVM pUVM = pVM->pUVM;
1474 int rc;
1475
1476 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1477 VMSTATE enmVMState = pVM->enmVMState;
1478 switch (enmVMState)
1479 {
1480 case VMSTATE_RUNNING_LS:
1481 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS);
1482 rc = VINF_SUCCESS;
1483 break;
1484
1485 case VMSTATE_SUSPENDED_EXT_LS:
1486 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1487 rc = VINF_SUCCESS;
1488 break;
1489
1490 case VMSTATE_DEBUGGING_LS:
1491 rc = VERR_TRY_AGAIN;
1492 break;
1493
1494 case VMSTATE_OFF_LS:
1495 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS);
1496 rc = VERR_SSM_LIVE_POWERED_OFF;
1497 break;
1498
1499 case VMSTATE_FATAL_ERROR_LS:
1500 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS);
1501 rc = VERR_SSM_LIVE_FATAL_ERROR;
1502 break;
1503
1504 case VMSTATE_GURU_MEDITATION_LS:
1505 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS);
1506 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1507 break;
1508
1509 case VMSTATE_POWERING_OFF_LS:
1510 case VMSTATE_SUSPENDING_EXT_LS:
1511 case VMSTATE_RESETTING_LS:
1512 default:
1513 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1514 rc = VERR_VM_UNEXPECTED_VM_STATE;
1515 break;
1516 }
1517 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1518 if (RT_FAILURE(rc))
1519 {
1520 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1521 return rc;
1522 }
1523 }
1524
1525 VMSTATE enmVMState = VMR3GetState(pVM);
1526 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1527 ("%s\n", VMR3GetStateName(enmVMState)),
1528 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1529
1530 /*
1531 * Only EMT(0) have work to do since it's last thru here.
1532 */
1533 if (pVCpu->idCpu == 0)
1534 {
1535 vmR3SuspendDoWork(pVM);
1536 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1537 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1538 if (RT_FAILURE(rc))
1539 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1540
1541 *pfSuspended = true;
1542 }
1543
1544 return VINF_EM_SUSPEND;
1545}
1546
1547
1548/**
1549 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1550 * SSMR3LiveDoStep1 failure.
1551 *
1552 * Doing this as a rendezvous operation avoids all annoying transition
1553 * states.
1554 *
1555 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1556 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1557 *
1558 * @param pVM The VM handle.
1559 * @param pVCpu The VMCPU handle of the EMT.
1560 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1561 */
1562static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1563{
1564 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1565 bool *pfSuspended = (bool *)pvUser;
1566 NOREF(pVCpu);
1567
1568 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1569 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1570 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1571 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1572 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1573 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1574 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1575 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1576 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1577 if (rc == 1)
1578 rc = VERR_SSM_LIVE_POWERED_OFF;
1579 else if (rc == 2)
1580 rc = VERR_SSM_LIVE_FATAL_ERROR;
1581 else if (rc == 3)
1582 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1583 else if (rc == 4)
1584 {
1585 *pfSuspended = true;
1586 rc = VINF_SUCCESS;
1587 }
1588 else if (rc > 0)
1589 rc = VINF_SUCCESS;
1590 return rc;
1591}
1592
1593
1594/**
1595 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1596 *
1597 * @returns VBox status code.
1598 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1599 *
1600 * @param pVM The VM handle.
1601 * @param pSSM The handle of saved state operation.
1602 *
1603 * @thread EMT(0)
1604 */
1605static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1606{
1607 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1608 VM_ASSERT_EMT0(pVM);
1609
1610 /*
1611 * Advance the state and mark if VMR3Suspend was called.
1612 */
1613 int rc = VINF_SUCCESS;
1614 VMSTATE enmVMState = VMR3GetState(pVM);
1615 if (enmVMState == VMSTATE_SUSPENDED_LS)
1616 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1617 else
1618 {
1619 if (enmVMState != VMSTATE_SAVING)
1620 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1621 rc = VINF_SSM_LIVE_SUSPENDED;
1622 }
1623
1624 /*
1625 * Finish up and release the handle. Careful with the status codes.
1626 */
1627 int rc2 = SSMR3LiveDoStep2(pSSM);
1628 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1629 rc = rc2;
1630
1631 rc2 = SSMR3LiveDone(pSSM);
1632 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1633 rc = rc2;
1634
1635 /*
1636 * Advance to the final state and return.
1637 */
1638 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1639 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1640 return rc;
1641}
1642
1643
1644/**
1645 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1646 * SSMR3LiveSave.
1647 *
1648 * @returns VBox status code.
1649 *
1650 * @param pVM The VM handle.
1651 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1652 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1653 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1654 * @param pvStreamOpsUser The user argument to the stream methods.
1655 * @param enmAfter What to do afterwards.
1656 * @param pfnProgress Progress callback. Optional.
1657 * @param pvProgressUser User argument for the progress callback.
1658 * @param ppSSM Where to return the saved state handle in case of a
1659 * live snapshot scenario.
1660 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1661 *
1662 * @thread EMT
1663 */
1664static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1665 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM,
1666 bool fSkipStateChanges)
1667{
1668 int rc = VINF_SUCCESS;
1669
1670 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1671 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1672
1673 /*
1674 * Validate input.
1675 */
1676 AssertPtrNull(pszFilename);
1677 AssertPtrNull(pStreamOps);
1678 AssertPtr(pVM);
1679 Assert( enmAfter == SSMAFTER_DESTROY
1680 || enmAfter == SSMAFTER_CONTINUE
1681 || enmAfter == SSMAFTER_TELEPORT);
1682 AssertPtr(ppSSM);
1683 *ppSSM = NULL;
1684
1685 /*
1686 * Change the state and perform/start the saving.
1687 */
1688 if (!fSkipStateChanges)
1689 {
1690 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1691 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1692 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1693 }
1694 else
1695 {
1696 Assert(enmAfter != SSMAFTER_TELEPORT);
1697 rc = 1;
1698 }
1699
1700 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1701 {
1702 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1703 if (!fSkipStateChanges)
1704 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1705 }
1706 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1707 {
1708 Assert(!fSkipStateChanges);
1709 if (enmAfter == SSMAFTER_TELEPORT)
1710 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1711 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1712 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1713 /* (We're not subject to cancellation just yet.) */
1714 }
1715 else
1716 Assert(RT_FAILURE(rc));
1717 return rc;
1718}
1719
1720
1721/**
1722 * Common worker for VMR3Save and VMR3Teleport.
1723 *
1724 * @returns VBox status code.
1725 *
1726 * @param pVM The VM handle.
1727 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1728 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1729 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1730 * @param pvStreamOpsUser The user argument to the stream methods.
1731 * @param enmAfter What to do afterwards.
1732 * @param pfnProgress Progress callback. Optional.
1733 * @param pvProgressUser User argument for the progress callback.
1734 * @param pfSuspended Set if we suspended the VM.
1735 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1736 *
1737 * @thread Non-EMT
1738 */
1739static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1740 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1741 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended,
1742 bool fSkipStateChanges)
1743{
1744 /*
1745 * Request the operation in EMT(0).
1746 */
1747 PSSMHANDLE pSSM;
1748 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/,
1749 (PFNRT)vmR3Save, 10, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1750 enmAfter, pfnProgress, pvProgressUser, &pSSM, fSkipStateChanges);
1751 if ( RT_SUCCESS(rc)
1752 && pSSM)
1753 {
1754 Assert(!fSkipStateChanges);
1755
1756 /*
1757 * Live snapshot.
1758 *
1759 * The state handling here is kind of tricky, doing it on EMT(0) helps
1760 * a bit. See the VMSTATE diagram for details.
1761 */
1762 rc = SSMR3LiveDoStep1(pSSM);
1763 if (RT_SUCCESS(rc))
1764 {
1765 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1766 for (;;)
1767 {
1768 /* Try suspend the VM. */
1769 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1770 vmR3LiveDoSuspend, pfSuspended);
1771 if (rc != VERR_TRY_AGAIN)
1772 break;
1773
1774 /* Wait for the state to change. */
1775 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1776 }
1777 if (RT_SUCCESS(rc))
1778 rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1779 else
1780 {
1781 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1782 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); NOREF(rc2);
1783 }
1784 }
1785 else
1786 {
1787 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1788 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1789
1790 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1791 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1792 rc = rc2;
1793 }
1794 }
1795
1796 return rc;
1797}
1798
1799
1800/**
1801 * Save current VM state.
1802 *
1803 * Can be used for both saving the state and creating snapshots.
1804 *
1805 * When called for a VM in the Running state, the saved state is created live
1806 * and the VM is only suspended when the final part of the saving is preformed.
1807 * The VM state will not be restored to Running in this case and it's up to the
1808 * caller to call VMR3Resume if this is desirable. (The rational is that the
1809 * caller probably wish to reconfigure the disks before resuming the VM.)
1810 *
1811 * @returns VBox status code.
1812 *
1813 * @param pVM The VM which state should be saved.
1814 * @param pszFilename The name of the save state file.
1815 * @param pStreamOps The stream methods.
1816 * @param pvStreamOpsUser The user argument to the stream methods.
1817 * @param fContinueAfterwards Whether continue execution afterwards or not.
1818 * When in doubt, set this to true.
1819 * @param pfnProgress Progress callback. Optional.
1820 * @param pvUser User argument for the progress callback.
1821 * @param pfSuspended Set if we suspended the VM.
1822 *
1823 * @thread Non-EMT.
1824 * @vmstate Suspended or Running
1825 * @vmstateto Saving+Suspended or
1826 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1827 */
1828VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser, bool *pfSuspended)
1829{
1830 LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1831 pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1832
1833 /*
1834 * Validate input.
1835 */
1836 AssertPtr(pfSuspended);
1837 *pfSuspended = false;
1838 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1839 VM_ASSERT_OTHER_THREAD(pVM);
1840 AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
1841 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1842 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1843
1844 /*
1845 * Join paths with VMR3Teleport.
1846 */
1847 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1848 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1849 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1850 enmAfter, pfnProgress, pvUser, pfSuspended,
1851 false /* fSkipStateChanges */);
1852 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1853 return rc;
1854}
1855
1856/**
1857 * Save current VM state (used by FTM)
1858 *
1859 * Can be used for both saving the state and creating snapshots.
1860 *
1861 * When called for a VM in the Running state, the saved state is created live
1862 * and the VM is only suspended when the final part of the saving is preformed.
1863 * The VM state will not be restored to Running in this case and it's up to the
1864 * caller to call VMR3Resume if this is desirable. (The rational is that the
1865 * caller probably wish to reconfigure the disks before resuming the VM.)
1866 *
1867 * @returns VBox status code.
1868 *
1869 * @param pVM The VM which state should be saved.
1870 * @param pStreamOps The stream methods.
1871 * @param pvStreamOpsUser The user argument to the stream methods.
1872 * @param pfSuspended Set if we suspended the VM.
1873 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1874 *
1875 * @thread Any
1876 * @vmstate Suspended or Running
1877 * @vmstateto Saving+Suspended or
1878 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1879 */
1880VMMR3DECL(int) VMR3SaveFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended,
1881 bool fSkipStateChanges)
1882{
1883 LogFlow(("VMR3SaveFT: pVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
1884 pVM, pStreamOps, pvStreamOpsUser, pfSuspended));
1885
1886 /*
1887 * Validate input.
1888 */
1889 AssertPtr(pfSuspended);
1890 *pfSuspended = false;
1891 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1892 AssertReturn(pStreamOps, VERR_INVALID_PARAMETER);
1893
1894 /*
1895 * Join paths with VMR3Teleport.
1896 */
1897 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1898 NULL, pStreamOps, pvStreamOpsUser,
1899 SSMAFTER_CONTINUE, NULL, NULL, pfSuspended,
1900 fSkipStateChanges);
1901 LogFlow(("VMR3SaveFT: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1902 return rc;
1903}
1904
1905
1906/**
1907 * Teleport the VM (aka live migration).
1908 *
1909 * @returns VBox status code.
1910 *
1911 * @param pVM The VM which state should be saved.
1912 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1913 * @param pStreamOps The stream methods.
1914 * @param pvStreamOpsUser The user argument to the stream methods.
1915 * @param pfnProgress Progress callback. Optional.
1916 * @param pvProgressUser User argument for the progress callback.
1917 * @param pfSuspended Set if we suspended the VM.
1918 *
1919 * @thread Non-EMT.
1920 * @vmstate Suspended or Running
1921 * @vmstateto Saving+Suspended or
1922 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1923 */
1924VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1925 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1926{
1927 LogFlow(("VMR3Teleport: pVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1928 pVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1929
1930 /*
1931 * Validate input.
1932 */
1933 AssertPtr(pfSuspended);
1934 *pfSuspended = false;
1935 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1936 VM_ASSERT_OTHER_THREAD(pVM);
1937 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1938 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1939
1940 /*
1941 * Join paths with VMR3Save.
1942 */
1943 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime,
1944 NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1945 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended,
1946 false /* fSkipStateChanges */);
1947 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1948 return rc;
1949}
1950
1951
1952
1953/**
1954 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1955 *
1956 * @returns VBox status code.
1957 *
1958 * @param pVM The VM handle.
1959 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1960 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1961 * @param pvStreamOpsUser The user argument to the stream methods.
1962 * @param pfnProgress Progress callback. Optional.
1963 * @param pvUser User argument for the progress callback.
1964 * @param fTeleporting Indicates whether we're teleporting or not.
1965 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1966 *
1967 * @thread EMT.
1968 */
1969static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1970 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting,
1971 bool fSkipStateChanges)
1972{
1973 int rc = VINF_SUCCESS;
1974
1975 LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1976 pVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1977
1978 /*
1979 * Validate input (paranoia).
1980 */
1981 AssertPtr(pVM);
1982 AssertPtrNull(pszFilename);
1983 AssertPtrNull(pStreamOps);
1984 AssertPtrNull(pfnProgress);
1985
1986 if (!fSkipStateChanges)
1987 {
1988 /*
1989 * Change the state and perform the load.
1990 *
1991 * Always perform a relocation round afterwards to make sure hypervisor
1992 * selectors and such are correct.
1993 */
1994 rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1995 VMSTATE_LOADING, VMSTATE_CREATED,
1996 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1997 if (RT_FAILURE(rc))
1998 return rc;
1999 }
2000 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
2001
2002 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pVM);
2003 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
2004 if (RT_SUCCESS(rc))
2005 {
2006 VMR3Relocate(pVM, 0 /*offDelta*/);
2007 if (!fSkipStateChanges)
2008 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
2009 }
2010 else
2011 {
2012 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
2013 if (!fSkipStateChanges)
2014 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
2015
2016 if (cErrorsPriorToSave == VMR3GetErrorCount(pVM))
2017 rc = VMSetError(pVM, rc, RT_SRC_POS,
2018 N_("Unable to restore the virtual machine's saved state from '%s'. "
2019 "It may be damaged or from an older version of VirtualBox. "
2020 "Please discard the saved state before starting the virtual machine"),
2021 pszFilename);
2022 }
2023
2024 return rc;
2025}
2026
2027
2028/**
2029 * Loads a VM state into a newly created VM or a one that is suspended.
2030 *
2031 * To restore a saved state on VM startup, call this function and then resume
2032 * the VM instead of powering it on.
2033 *
2034 * @returns VBox status code.
2035 *
2036 * @param pVM The VM handle.
2037 * @param pszFilename The name of the save state file.
2038 * @param pfnProgress Progress callback. Optional.
2039 * @param pvUser User argument for the progress callback.
2040 *
2041 * @thread Any thread.
2042 * @vmstate Created, Suspended
2043 * @vmstateto Loading+Suspended
2044 */
2045VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
2046{
2047 LogFlow(("VMR3LoadFromFile: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
2048 pVM, pszFilename, pszFilename, pfnProgress, pvUser));
2049
2050 /*
2051 * Validate input.
2052 */
2053 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2054 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
2055
2056 /*
2057 * Forward the request to EMT(0). No need to setup a rendezvous here
2058 * since there is no execution taking place when this call is allowed.
2059 */
2060 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2061 pVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
2062 false /*fTeleporting*/, false /* fSkipStateChanges */);
2063 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
2064 return rc;
2065}
2066
2067
2068/**
2069 * VMR3LoadFromFile for arbitrary file streams.
2070 *
2071 * @returns VBox status code.
2072 *
2073 * @param pVM The VM handle.
2074 * @param pStreamOps The stream methods.
2075 * @param pvStreamOpsUser The user argument to the stream methods.
2076 * @param pfnProgress Progress callback. Optional.
2077 * @param pvProgressUser User argument for the progress callback.
2078 *
2079 * @thread Any thread.
2080 * @vmstate Created, Suspended
2081 * @vmstateto Loading+Suspended
2082 */
2083VMMR3DECL(int) VMR3LoadFromStream(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2084 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
2085{
2086 LogFlow(("VMR3LoadFromStream: pVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
2087 pVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
2088
2089 /*
2090 * Validate input.
2091 */
2092 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2093 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2094
2095 /*
2096 * Forward the request to EMT(0). No need to setup a rendezvous here
2097 * since there is no execution taking place when this call is allowed.
2098 */
2099 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2100 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
2101 true /*fTeleporting*/, false /* fSkipStateChanges */);
2102 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2103 return rc;
2104}
2105
2106
2107/**
2108 * VMR3LoadFromFileFT for arbitrary file streams.
2109 *
2110 * @returns VBox status code.
2111 *
2112 * @param pVM The VM handle.
2113 * @param pStreamOps The stream methods.
2114 * @param pvStreamOpsUser The user argument to the stream methods.
2115 * @param pfnProgress Progress callback. Optional.
2116 * @param pvProgressUser User argument for the progress callback.
2117 *
2118 * @thread Any thread.
2119 * @vmstate Created, Suspended
2120 * @vmstateto Loading+Suspended
2121 */
2122VMMR3DECL(int) VMR3LoadFromStreamFT(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
2123{
2124 LogFlow(("VMR3LoadFromStreamFT: pVM=%p pStreamOps=%p pvStreamOpsUser=%p\n",
2125 pVM, pStreamOps, pvStreamOpsUser));
2126
2127 /*
2128 * Validate input.
2129 */
2130 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2131 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2132
2133 /*
2134 * Forward the request to EMT(0). No need to setup a rendezvous here
2135 * since there is no execution taking place when this call is allowed.
2136 */
2137 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2138 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
2139 true /*fTeleporting*/, true /* fSkipStateChanges */);
2140 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2141 return rc;
2142}
2143
2144/**
2145 * EMT rendezvous worker for VMR3PowerOff.
2146 *
2147 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
2148 * return code, see FNVMMEMTRENDEZVOUS.)
2149 *
2150 * @param pVM The VM handle.
2151 * @param pVCpu The VMCPU handle of the EMT.
2152 * @param pvUser Ignored.
2153 */
2154static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
2155{
2156 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
2157 Assert(!pvUser); NOREF(pvUser);
2158
2159 /*
2160 * The first EMT thru here will change the state to PoweringOff.
2161 */
2162 if (pVCpu->idCpu == pVM->cCpus - 1)
2163 {
2164 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
2165 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
2166 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
2167 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
2168 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
2169 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
2170 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
2171 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
2172 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
2173 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
2174 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
2175 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
2176 if (RT_FAILURE(rc))
2177 return rc;
2178 if (rc >= 7)
2179 SSMR3Cancel(pVM);
2180 }
2181
2182 /*
2183 * Check the state.
2184 */
2185 VMSTATE enmVMState = VMR3GetState(pVM);
2186 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2187 || enmVMState == VMSTATE_POWERING_OFF_LS,
2188 ("%s\n", VMR3GetStateName(enmVMState)),
2189 VERR_VM_INVALID_VM_STATE);
2190
2191 /*
2192 * EMT(0) does the actual power off work here *after* all the other EMTs
2193 * have been thru and entered the STOPPED state.
2194 */
2195 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2196 if (pVCpu->idCpu == 0)
2197 {
2198 /*
2199 * For debugging purposes, we will log a summary of the guest state at this point.
2200 */
2201 if (enmVMState != VMSTATE_GURU_MEDITATION)
2202 {
2203 /** @todo SMP support? */
2204 /** @todo make the state dumping at VMR3PowerOff optional. */
2205 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2206 RTLogRelPrintf("****************** Guest state at power off ******************\n");
2207 DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2208 RTLogRelPrintf("***\n");
2209 DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
2210 RTLogRelPrintf("***\n");
2211 DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2212 RTLogRelPrintf("***\n");
2213 DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2214 /** @todo dump guest call stack. */
2215#if 1 // "temporary" while debugging #1589
2216 RTLogRelPrintf("***\n");
2217 uint32_t esp = CPUMGetGuestESP(pVCpu);
2218 if ( CPUMGetGuestSS(pVCpu) == 0
2219 && esp < _64K)
2220 {
2221 uint8_t abBuf[PAGE_SIZE];
2222 RTLogRelPrintf("***\n"
2223 "ss:sp=0000:%04x ", esp);
2224 uint32_t Start = esp & ~(uint32_t)63;
2225 int rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, Start, 0x100);
2226 if (RT_SUCCESS(rc))
2227 RTLogRelPrintf("0000:%04x TO 0000:%04x:\n"
2228 "%.*Rhxd\n",
2229 Start, Start + 0x100 - 1,
2230 0x100, abBuf);
2231 else
2232 RTLogRelPrintf("rc=%Rrc\n", rc);
2233
2234 /* grub ... */
2235 if (esp < 0x2000 && esp > 0x1fc0)
2236 {
2237 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x800);
2238 if (RT_SUCCESS(rc))
2239 RTLogRelPrintf("0000:8000 TO 0000:87ff:\n"
2240 "%.*Rhxd\n",
2241 0x800, abBuf);
2242 }
2243 /* microsoft cdrom hang ... */
2244 if (true)
2245 {
2246 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x200);
2247 if (RT_SUCCESS(rc))
2248 RTLogRelPrintf("2000:0000 TO 2000:01ff:\n"
2249 "%.*Rhxd\n",
2250 0x200, abBuf);
2251 }
2252 }
2253#endif
2254 RTLogRelSetBuffering(fOldBuffered);
2255 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2256 }
2257
2258 /*
2259 * Perform the power off notifications and advance the state to
2260 * Off or OffLS.
2261 */
2262 PDMR3PowerOff(pVM);
2263
2264 PUVM pUVM = pVM->pUVM;
2265 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2266 enmVMState = pVM->enmVMState;
2267 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2268 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS);
2269 else
2270 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF);
2271 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2272 }
2273 return VINF_EM_OFF;
2274}
2275
2276
2277/**
2278 * Power off the VM.
2279 *
2280 * @returns VBox status code. When called on EMT, this will be a strict status
2281 * code that has to be propagated up the call stack.
2282 *
2283 * @param pVM The handle of the VM to be powered off.
2284 *
2285 * @thread Any thread.
2286 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2287 * @vmstateto Off or OffLS
2288 */
2289VMMR3DECL(int) VMR3PowerOff(PVM pVM)
2290{
2291 LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
2292 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2293
2294 /*
2295 * Gather all the EMTs to make sure there are no races before
2296 * changing the VM state.
2297 */
2298 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2299 vmR3PowerOff, NULL);
2300 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2301 return rc;
2302}
2303
2304
2305/**
2306 * Destroys the VM.
2307 *
2308 * The VM must be powered off (or never really powered on) to call this
2309 * function. The VM handle is destroyed and can no longer be used up successful
2310 * return.
2311 *
2312 * @returns VBox status code.
2313 *
2314 * @param pVM The handle of the VM which should be destroyed.
2315 *
2316 * @thread Any none emulation thread.
2317 * @vmstate Off, Created
2318 * @vmstateto N/A
2319 */
2320VMMR3DECL(int) VMR3Destroy(PVM pVM)
2321{
2322 LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
2323
2324 /*
2325 * Validate input.
2326 */
2327 if (!pVM)
2328 return VERR_INVALID_VM_HANDLE;
2329 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2330 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2331
2332 /*
2333 * Change VM state to destroying and unlink the VM.
2334 */
2335 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2336 if (RT_FAILURE(rc))
2337 return rc;
2338
2339 /** @todo lock this when we start having multiple machines in a process... */
2340 PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
2341 if (g_pUVMsHead == pUVM)
2342 g_pUVMsHead = pUVM->pNext;
2343 else
2344 {
2345 PUVM pPrev = g_pUVMsHead;
2346 while (pPrev && pPrev->pNext != pUVM)
2347 pPrev = pPrev->pNext;
2348 AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
2349
2350 pPrev->pNext = pUVM->pNext;
2351 }
2352 pUVM->pNext = NULL;
2353
2354 /*
2355 * Notify registered at destruction listeners.
2356 */
2357 vmR3AtDtor(pVM);
2358
2359 /*
2360 * Call vmR3Destroy on each of the EMTs ending with EMT(0) doing the bulk
2361 * of the cleanup.
2362 */
2363 /* vmR3Destroy on all EMTs, ending with EMT(0). */
2364 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2365 AssertLogRelRC(rc);
2366
2367 /* Wait for EMTs and destroy the UVM. */
2368 vmR3DestroyUVM(pUVM, 30000);
2369
2370 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2371 return VINF_SUCCESS;
2372}
2373
2374
2375/**
2376 * Internal destruction worker.
2377 *
2378 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2379 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2380 * VMR3Destroy().
2381 *
2382 * When called on EMT(0), it will performed the great bulk of the destruction.
2383 * When called on the other EMTs, they will do nothing and the whole purpose is
2384 * to return VINF_EM_TERMINATE so they break out of their run loops.
2385 *
2386 * @returns VINF_EM_TERMINATE.
2387 * @param pVM The VM handle.
2388 */
2389DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2390{
2391 PUVM pUVM = pVM->pUVM;
2392 PVMCPU pVCpu = VMMGetCpu(pVM);
2393 Assert(pVCpu);
2394 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2395
2396 /*
2397 * Only VCPU 0 does the full cleanup (last).
2398 */
2399 if (pVCpu->idCpu == 0)
2400 {
2401 /*
2402 * Dump statistics to the log.
2403 */
2404#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2405 RTLogFlags(NULL, "nodisabled nobuffered");
2406#endif
2407#ifdef VBOX_WITH_STATISTICS
2408 STAMR3Dump(pVM, "*");
2409#else
2410 LogRel(("************************* Statistics *************************\n"));
2411 STAMR3DumpToReleaseLog(pVM, "*");
2412 LogRel(("********************* End of statistics **********************\n"));
2413#endif
2414
2415 /*
2416 * Destroy the VM components.
2417 */
2418 int rc = TMR3Term(pVM);
2419 AssertRC(rc);
2420#ifdef VBOX_WITH_DEBUGGER
2421 rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
2422 pUVM->vm.s.pvDBGC = NULL;
2423#endif
2424 AssertRC(rc);
2425 rc = FTMR3Term(pVM);
2426 AssertRC(rc);
2427 rc = DBGFR3Term(pVM);
2428 AssertRC(rc);
2429 rc = PDMR3Term(pVM);
2430 AssertRC(rc);
2431 rc = IEMR3Term(pVM);
2432 AssertRC(rc);
2433 rc = EMR3Term(pVM);
2434 AssertRC(rc);
2435 rc = IOMR3Term(pVM);
2436 AssertRC(rc);
2437 rc = CSAMR3Term(pVM);
2438 AssertRC(rc);
2439 rc = PATMR3Term(pVM);
2440 AssertRC(rc);
2441 rc = TRPMR3Term(pVM);
2442 AssertRC(rc);
2443 rc = SELMR3Term(pVM);
2444 AssertRC(rc);
2445#ifdef VBOX_WITH_REM
2446 rc = REMR3Term(pVM);
2447 AssertRC(rc);
2448#endif
2449 rc = HWACCMR3Term(pVM);
2450 AssertRC(rc);
2451 rc = PGMR3Term(pVM);
2452 AssertRC(rc);
2453 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2454 AssertRC(rc);
2455 rc = CPUMR3Term(pVM);
2456 AssertRC(rc);
2457 SSMR3Term(pVM);
2458 rc = PDMR3CritSectTerm(pVM);
2459 AssertRC(rc);
2460 rc = MMR3Term(pVM);
2461 AssertRC(rc);
2462
2463 /*
2464 * We're done, tell the other EMTs to quit.
2465 */
2466 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2467 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2468 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2469 }
2470 return VINF_EM_TERMINATE;
2471}
2472
2473
2474/**
2475 * Destroys the UVM portion.
2476 *
2477 * This is called as the final step in the VM destruction or as the cleanup
2478 * in case of a creation failure.
2479 *
2480 * @param pVM VM Handle.
2481 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2482 * threads.
2483 */
2484static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2485{
2486 /*
2487 * Signal termination of each the emulation threads and
2488 * wait for them to complete.
2489 */
2490 /* Signal them. */
2491 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2492 if (pUVM->pVM)
2493 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2494 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2495 {
2496 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2497 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2498 }
2499
2500 /* Wait for them. */
2501 uint64_t NanoTS = RTTimeNanoTS();
2502 RTTHREAD hSelf = RTThreadSelf();
2503 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2504 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2505 {
2506 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2507 if ( hThread != NIL_RTTHREAD
2508 && hThread != hSelf)
2509 {
2510 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2511 int rc2 = RTThreadWait(hThread,
2512 cMilliesElapsed < cMilliesEMTWait
2513 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2514 : 2000,
2515 NULL);
2516 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2517 rc2 = RTThreadWait(hThread, 1000, NULL);
2518 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2519 if (RT_SUCCESS(rc2))
2520 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2521 }
2522 }
2523
2524 /* Cleanup the semaphores. */
2525 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2526 {
2527 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2528 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2529 }
2530
2531 /*
2532 * Free the event semaphores associated with the request packets.
2533 */
2534 unsigned cReqs = 0;
2535 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2536 {
2537 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2538 pUVM->vm.s.apReqFree[i] = NULL;
2539 for (; pReq; pReq = pReq->pNext, cReqs++)
2540 {
2541 pReq->enmState = VMREQSTATE_INVALID;
2542 RTSemEventDestroy(pReq->EventSem);
2543 }
2544 }
2545 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2546
2547 /*
2548 * Kill all queued requests. (There really shouldn't be any!)
2549 */
2550 for (unsigned i = 0; i < 10; i++)
2551 {
2552 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pPriorityReqs, NULL, PVMREQ);
2553 if (!pReqHead)
2554 {
2555 pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pNormalReqs, NULL, PVMREQ);
2556 if (!pReqHead)
2557 break;
2558 }
2559 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2560
2561 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2562 {
2563 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2564 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2565 RTSemEventSignal(pReq->EventSem);
2566 RTThreadSleep(2);
2567 RTSemEventDestroy(pReq->EventSem);
2568 }
2569 /* give them a chance to respond before we free the request memory. */
2570 RTThreadSleep(32);
2571 }
2572
2573 /*
2574 * Now all queued VCPU requests (again, there shouldn't be any).
2575 */
2576 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2577 {
2578 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2579
2580 for (unsigned i = 0; i < 10; i++)
2581 {
2582 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pPriorityReqs, NULL, PVMREQ);
2583 if (!pReqHead)
2584 {
2585 pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pNormalReqs, NULL, PVMREQ);
2586 if (!pReqHead)
2587 break;
2588 }
2589 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2590
2591 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2592 {
2593 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2594 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2595 RTSemEventSignal(pReq->EventSem);
2596 RTThreadSleep(2);
2597 RTSemEventDestroy(pReq->EventSem);
2598 }
2599 /* give them a chance to respond before we free the request memory. */
2600 RTThreadSleep(32);
2601 }
2602 }
2603
2604 /*
2605 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2606 */
2607 PDMR3TermUVM(pUVM);
2608
2609 /*
2610 * Terminate the support library if initialized.
2611 */
2612 if (pUVM->vm.s.pSession)
2613 {
2614 int rc = SUPR3Term(false /*fForced*/);
2615 AssertRC(rc);
2616 pUVM->vm.s.pSession = NIL_RTR0PTR;
2617 }
2618
2619 /*
2620 * Release the UVM structure reference.
2621 */
2622 VMR3ReleaseUVM(pUVM);
2623
2624 /*
2625 * Clean up and flush logs.
2626 */
2627#ifdef LOG_ENABLED
2628 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2629#endif
2630 RTLogFlush(NULL);
2631}
2632
2633
2634/**
2635 * Enumerates the VMs in this process.
2636 *
2637 * @returns Pointer to the next VM.
2638 * @returns NULL when no more VMs.
2639 * @param pVMPrev The previous VM
2640 * Use NULL to start the enumeration.
2641 */
2642VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
2643{
2644 /*
2645 * This is quick and dirty. It has issues with VM being
2646 * destroyed during the enumeration.
2647 */
2648 PUVM pNext;
2649 if (pVMPrev)
2650 pNext = pVMPrev->pUVM->pNext;
2651 else
2652 pNext = g_pUVMsHead;
2653 return pNext ? pNext->pVM : NULL;
2654}
2655
2656
2657/**
2658 * Registers an at VM destruction callback.
2659 *
2660 * @returns VBox status code.
2661 * @param pfnAtDtor Pointer to callback.
2662 * @param pvUser User argument.
2663 */
2664VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
2665{
2666 /*
2667 * Check if already registered.
2668 */
2669 VM_ATDTOR_LOCK();
2670 PVMATDTOR pCur = g_pVMAtDtorHead;
2671 while (pCur)
2672 {
2673 if (pfnAtDtor == pCur->pfnAtDtor)
2674 {
2675 VM_ATDTOR_UNLOCK();
2676 AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
2677 return VERR_INVALID_PARAMETER;
2678 }
2679
2680 /* next */
2681 pCur = pCur->pNext;
2682 }
2683 VM_ATDTOR_UNLOCK();
2684
2685 /*
2686 * Allocate new entry.
2687 */
2688 PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
2689 if (!pVMAtDtor)
2690 return VERR_NO_MEMORY;
2691
2692 VM_ATDTOR_LOCK();
2693 pVMAtDtor->pfnAtDtor = pfnAtDtor;
2694 pVMAtDtor->pvUser = pvUser;
2695 pVMAtDtor->pNext = g_pVMAtDtorHead;
2696 g_pVMAtDtorHead = pVMAtDtor;
2697 VM_ATDTOR_UNLOCK();
2698
2699 return VINF_SUCCESS;
2700}
2701
2702
2703/**
2704 * Deregisters an at VM destruction callback.
2705 *
2706 * @returns VBox status code.
2707 * @param pfnAtDtor Pointer to callback.
2708 */
2709VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
2710{
2711 /*
2712 * Find it, unlink it and free it.
2713 */
2714 VM_ATDTOR_LOCK();
2715 PVMATDTOR pPrev = NULL;
2716 PVMATDTOR pCur = g_pVMAtDtorHead;
2717 while (pCur)
2718 {
2719 if (pfnAtDtor == pCur->pfnAtDtor)
2720 {
2721 if (pPrev)
2722 pPrev->pNext = pCur->pNext;
2723 else
2724 g_pVMAtDtorHead = pCur->pNext;
2725 pCur->pNext = NULL;
2726 VM_ATDTOR_UNLOCK();
2727
2728 RTMemFree(pCur);
2729 return VINF_SUCCESS;
2730 }
2731
2732 /* next */
2733 pPrev = pCur;
2734 pCur = pCur->pNext;
2735 }
2736 VM_ATDTOR_UNLOCK();
2737
2738 return VERR_INVALID_PARAMETER;
2739}
2740
2741
2742/**
2743 * Walks the list of at VM destructor callbacks.
2744 * @param pVM The VM which is about to be destroyed.
2745 */
2746static void vmR3AtDtor(PVM pVM)
2747{
2748 /*
2749 * Find it, unlink it and free it.
2750 */
2751 VM_ATDTOR_LOCK();
2752 for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
2753 pCur->pfnAtDtor(pVM, pCur->pvUser);
2754 VM_ATDTOR_UNLOCK();
2755}
2756
2757
2758/**
2759 * Worker which checks integrity of some internal structures.
2760 * This is yet another attempt to track down that AVL tree crash.
2761 */
2762static void vmR3CheckIntegrity(PVM pVM)
2763{
2764#ifdef VBOX_STRICT
2765 int rc = PGMR3CheckIntegrity(pVM);
2766 AssertReleaseRC(rc);
2767#endif
2768}
2769
2770
2771/**
2772 * EMT rendezvous worker for VMR3Reset.
2773 *
2774 * This is called by the emulation threads as a response to the reset request
2775 * issued by VMR3Reset().
2776 *
2777 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2778 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2779 *
2780 * @param pVM The VM handle.
2781 * @param pVCpu The VMCPU handle of the EMT.
2782 * @param pvUser Ignored.
2783 */
2784static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2785{
2786 Assert(!pvUser); NOREF(pvUser);
2787
2788 /*
2789 * The first EMT will try change the state to resetting. If this fails,
2790 * we won't get called for the other EMTs.
2791 */
2792 if (pVCpu->idCpu == pVM->cCpus - 1)
2793 {
2794 int rc = vmR3TrySetState(pVM, "VMR3Reset", 3,
2795 VMSTATE_RESETTING, VMSTATE_RUNNING,
2796 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2797 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2798 if (RT_FAILURE(rc))
2799 return rc;
2800 }
2801
2802 /*
2803 * Check the state.
2804 */
2805 VMSTATE enmVMState = VMR3GetState(pVM);
2806 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2807 || enmVMState == VMSTATE_RESETTING_LS,
2808 ("%s\n", VMR3GetStateName(enmVMState)),
2809 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2810
2811 /*
2812 * EMT(0) does the full cleanup *after* all the other EMTs has been
2813 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2814 *
2815 * Because there are per-cpu reset routines and order may/is important,
2816 * the following sequence looks a bit ugly...
2817 */
2818 if (pVCpu->idCpu == 0)
2819 vmR3CheckIntegrity(pVM);
2820
2821 /* Reset the VCpu state. */
2822 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2823
2824 /* Clear all pending forced actions. */
2825 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2826
2827 /*
2828 * Reset the VM components.
2829 */
2830 if (pVCpu->idCpu == 0)
2831 {
2832 PATMR3Reset(pVM);
2833 CSAMR3Reset(pVM);
2834 PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
2835 * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
2836/** @todo PGMR3Reset should be called after PDMR3Reset really, because we'll trash OS <-> hardware
2837 * communication structures residing in RAM when done in the other order. I.e. the device must be
2838 * quiesced first, then we clear the memory and plan tables. Probably have to make these things
2839 * explicit in some way, some memory setup pass or something.
2840 * (Example: DevAHCI may assert if memory is zeroed before it has read the FIS.)
2841 *
2842 * @bugref{4467}
2843 */
2844 PDMR3Reset(pVM);
2845 SELMR3Reset(pVM);
2846 TRPMR3Reset(pVM);
2847#ifdef VBOX_WITH_REM
2848 REMR3Reset(pVM);
2849#endif
2850 IOMR3Reset(pVM);
2851 CPUMR3Reset(pVM);
2852 }
2853 CPUMR3ResetCpu(pVCpu);
2854 if (pVCpu->idCpu == 0)
2855 {
2856 TMR3Reset(pVM);
2857 EMR3Reset(pVM);
2858 HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2859
2860#ifdef LOG_ENABLED
2861 /*
2862 * Debug logging.
2863 */
2864 RTLogPrintf("\n\nThe VM was reset:\n");
2865 DBGFR3Info(pVM, "cpum", "verbose", NULL);
2866#endif
2867
2868 /*
2869 * Since EMT(0) is the last to go thru here, it will advance the state.
2870 * When a live save is active, we will move on to SuspendingLS but
2871 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2872 */
2873 PUVM pUVM = pVM->pUVM;
2874 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2875 enmVMState = pVM->enmVMState;
2876 if (enmVMState == VMSTATE_RESETTING)
2877 {
2878 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2879 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING);
2880 else
2881 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING);
2882 }
2883 else
2884 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS);
2885 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2886
2887 vmR3CheckIntegrity(pVM);
2888
2889 /*
2890 * Do the suspend bit as well.
2891 * It only requires some EMT(0) work at present.
2892 */
2893 if (enmVMState != VMSTATE_RESETTING)
2894 {
2895 vmR3SuspendDoWork(pVM);
2896 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2897 }
2898 }
2899
2900 return enmVMState == VMSTATE_RESETTING
2901 ? VINF_EM_RESET
2902 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2903}
2904
2905
2906/**
2907 * Reset the current VM.
2908 *
2909 * @returns VBox status code.
2910 * @param pVM VM to reset.
2911 */
2912VMMR3DECL(int) VMR3Reset(PVM pVM)
2913{
2914 LogFlow(("VMR3Reset:\n"));
2915 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2916
2917 /*
2918 * Gather all the EMTs to make sure there are no races before
2919 * changing the VM state.
2920 */
2921 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2922 vmR3Reset, NULL);
2923 LogFlow(("VMR3Reset: returns %Rrc\n", rc));
2924 return rc;
2925}
2926
2927
2928/**
2929 * Gets the user mode VM structure pointer given the VM handle.
2930 *
2931 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
2932 * invalid (asserted).
2933 * @param pVM The VM handle.
2934 * @sa VMR3GetVM, VMR3RetainUVM
2935 */
2936VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
2937{
2938 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
2939 return pVM->pUVM;
2940}
2941
2942
2943/**
2944 * Gets the shared VM structure pointer given the pointer to the user mode VM
2945 * structure.
2946 *
2947 * @returns Pointer to the shared VM structure.
2948 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
2949 * is currently associated with it.
2950 * @param pUVM The user mode VM handle.
2951 * @sa VMR3GetUVM
2952 */
2953VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
2954{
2955 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2956 return pUVM->pVM;
2957}
2958
2959
2960/**
2961 * Retain the user mode VM handle.
2962 *
2963 * @returns Reference count.
2964 * UINT32_MAX if @a pUVM is invalid.
2965 *
2966 * @param pUVM The user mode VM handle.
2967 * @sa VMR3ReleaseUVM
2968 */
2969VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
2970{
2971 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2972 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
2973 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
2974 return cRefs;
2975}
2976
2977
2978/**
2979 * Does the final release of the UVM structure.
2980 *
2981 * @param pUVM The user mode VM handle.
2982 */
2983static void vmR3DoReleaseUVM(PUVM pUVM)
2984{
2985 /*
2986 * Free the UVM.
2987 */
2988 Assert(!pUVM->pVM);
2989
2990 MMR3TermUVM(pUVM);
2991 STAMR3TermUVM(pUVM);
2992
2993 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2994 RTTlsFree(pUVM->vm.s.idxTLS);
2995 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
2996}
2997
2998
2999/**
3000 * Releases a refernece to the mode VM handle.
3001 *
3002 * @returns The new reference count, 0 if destroyed.
3003 * UINT32_MAX if @a pUVM is invalid.
3004 *
3005 * @param pUVM The user mode VM handle.
3006 * @sa VMR3RetainUVM
3007 */
3008VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
3009{
3010 if (!pUVM)
3011 return 0;
3012 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
3013 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
3014 if (!cRefs)
3015 vmR3DoReleaseUVM(pUVM);
3016 else
3017 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
3018 return cRefs;
3019}
3020
3021
3022/**
3023 * Gets the VM name.
3024 *
3025 * @returns Pointer to a read-only string containing the name. NULL if called
3026 * too early.
3027 * @param pUVM The user mode VM handle.
3028 */
3029VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
3030{
3031 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3032 return pUVM->vm.s.pszName;
3033}
3034
3035
3036/**
3037 * Gets the VM UUID.
3038 *
3039 * @returns pUuid on success, NULL on failure.
3040 * @param pUVM The user mode VM handle.
3041 * @param pUuid Where to store the UUID.
3042 */
3043VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
3044{
3045 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3046 AssertPtrReturn(pUuid, NULL);
3047
3048 *pUuid = pUVM->vm.s.Uuid;
3049 return pUuid;
3050}
3051
3052
3053/**
3054 * Gets the current VM state.
3055 *
3056 * @returns The current VM state.
3057 * @param pVM VM handle.
3058 * @thread Any
3059 */
3060VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
3061{
3062 AssertMsgReturn(RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE), ("%p\n", pVM), VMSTATE_TERMINATED);
3063 VMSTATE enmVMState = pVM->enmVMState;
3064 return enmVMState >= VMSTATE_CREATING && enmVMState <= VMSTATE_TERMINATED ? enmVMState : VMSTATE_TERMINATED;
3065}
3066
3067
3068/**
3069 * Gets the current VM state.
3070 *
3071 * @returns The current VM state.
3072 * @param pUVM The user-mode VM handle.
3073 * @thread Any
3074 */
3075VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
3076{
3077 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
3078 if (RT_UNLIKELY(!pUVM->pVM))
3079 return VMSTATE_TERMINATED;
3080 return pUVM->pVM->enmVMState;
3081}
3082
3083
3084/**
3085 * Gets the state name string for a VM state.
3086 *
3087 * @returns Pointer to the state name. (readonly)
3088 * @param enmState The state.
3089 */
3090VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
3091{
3092 switch (enmState)
3093 {
3094 case VMSTATE_CREATING: return "CREATING";
3095 case VMSTATE_CREATED: return "CREATED";
3096 case VMSTATE_LOADING: return "LOADING";
3097 case VMSTATE_POWERING_ON: return "POWERING_ON";
3098 case VMSTATE_RESUMING: return "RESUMING";
3099 case VMSTATE_RUNNING: return "RUNNING";
3100 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
3101 case VMSTATE_RUNNING_FT: return "RUNNING_FT";
3102 case VMSTATE_RESETTING: return "RESETTING";
3103 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
3104 case VMSTATE_SUSPENDED: return "SUSPENDED";
3105 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
3106 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
3107 case VMSTATE_SUSPENDING: return "SUSPENDING";
3108 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
3109 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
3110 case VMSTATE_SAVING: return "SAVING";
3111 case VMSTATE_DEBUGGING: return "DEBUGGING";
3112 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
3113 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
3114 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
3115 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
3116 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
3117 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
3118 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
3119 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
3120 case VMSTATE_OFF: return "OFF";
3121 case VMSTATE_OFF_LS: return "OFF_LS";
3122 case VMSTATE_DESTROYING: return "DESTROYING";
3123 case VMSTATE_TERMINATED: return "TERMINATED";
3124
3125 default:
3126 AssertMsgFailed(("Unknown state %d\n", enmState));
3127 return "Unknown!\n";
3128 }
3129}
3130
3131
3132/**
3133 * Validates the state transition in strict builds.
3134 *
3135 * @returns true if valid, false if not.
3136 *
3137 * @param enmStateOld The old (current) state.
3138 * @param enmStateNew The proposed new state.
3139 *
3140 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
3141 * diagram (under State Machine Diagram).
3142 */
3143static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
3144{
3145#ifdef VBOX_STRICT
3146 switch (enmStateOld)
3147 {
3148 case VMSTATE_CREATING:
3149 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3150 break;
3151
3152 case VMSTATE_CREATED:
3153 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
3154 || enmStateNew == VMSTATE_POWERING_ON
3155 || enmStateNew == VMSTATE_POWERING_OFF
3156 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3157 break;
3158
3159 case VMSTATE_LOADING:
3160 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3161 || enmStateNew == VMSTATE_LOAD_FAILURE
3162 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3163 break;
3164
3165 case VMSTATE_POWERING_ON:
3166 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3167 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3168 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3169 break;
3170
3171 case VMSTATE_RESUMING:
3172 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3173 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3174 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3175 break;
3176
3177 case VMSTATE_RUNNING:
3178 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3179 || enmStateNew == VMSTATE_SUSPENDING
3180 || enmStateNew == VMSTATE_RESETTING
3181 || enmStateNew == VMSTATE_RUNNING_LS
3182 || enmStateNew == VMSTATE_RUNNING_FT
3183 || enmStateNew == VMSTATE_DEBUGGING
3184 || enmStateNew == VMSTATE_FATAL_ERROR
3185 || enmStateNew == VMSTATE_GURU_MEDITATION
3186 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3187 break;
3188
3189 case VMSTATE_RUNNING_LS:
3190 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3191 || enmStateNew == VMSTATE_SUSPENDING_LS
3192 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3193 || enmStateNew == VMSTATE_RESETTING_LS
3194 || enmStateNew == VMSTATE_RUNNING
3195 || enmStateNew == VMSTATE_DEBUGGING_LS
3196 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3197 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3198 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3199 break;
3200
3201 case VMSTATE_RUNNING_FT:
3202 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3203 || enmStateNew == VMSTATE_FATAL_ERROR
3204 || enmStateNew == VMSTATE_GURU_MEDITATION
3205 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3206 break;
3207
3208 case VMSTATE_RESETTING:
3209 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3210 break;
3211
3212 case VMSTATE_RESETTING_LS:
3213 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3214 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3215 break;
3216
3217 case VMSTATE_SUSPENDING:
3218 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3219 break;
3220
3221 case VMSTATE_SUSPENDING_LS:
3222 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3223 || enmStateNew == VMSTATE_SUSPENDED_LS
3224 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3225 break;
3226
3227 case VMSTATE_SUSPENDING_EXT_LS:
3228 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3229 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3230 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3231 break;
3232
3233 case VMSTATE_SUSPENDED:
3234 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3235 || enmStateNew == VMSTATE_SAVING
3236 || enmStateNew == VMSTATE_RESETTING
3237 || enmStateNew == VMSTATE_RESUMING
3238 || enmStateNew == VMSTATE_LOADING
3239 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3240 break;
3241
3242 case VMSTATE_SUSPENDED_LS:
3243 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3244 || enmStateNew == VMSTATE_SAVING
3245 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3246 break;
3247
3248 case VMSTATE_SUSPENDED_EXT_LS:
3249 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3250 || enmStateNew == VMSTATE_SAVING
3251 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3252 break;
3253
3254 case VMSTATE_SAVING:
3255 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3256 break;
3257
3258 case VMSTATE_DEBUGGING:
3259 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3260 || enmStateNew == VMSTATE_POWERING_OFF
3261 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3262 break;
3263
3264 case VMSTATE_DEBUGGING_LS:
3265 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3266 || enmStateNew == VMSTATE_RUNNING_LS
3267 || enmStateNew == VMSTATE_POWERING_OFF_LS
3268 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3269 break;
3270
3271 case VMSTATE_POWERING_OFF:
3272 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3273 break;
3274
3275 case VMSTATE_POWERING_OFF_LS:
3276 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3277 || enmStateNew == VMSTATE_OFF_LS
3278 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3279 break;
3280
3281 case VMSTATE_OFF:
3282 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3283 break;
3284
3285 case VMSTATE_OFF_LS:
3286 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3287 break;
3288
3289 case VMSTATE_FATAL_ERROR:
3290 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3291 break;
3292
3293 case VMSTATE_FATAL_ERROR_LS:
3294 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3295 || enmStateNew == VMSTATE_POWERING_OFF_LS
3296 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3297 break;
3298
3299 case VMSTATE_GURU_MEDITATION:
3300 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3301 || enmStateNew == VMSTATE_POWERING_OFF
3302 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3303 break;
3304
3305 case VMSTATE_GURU_MEDITATION_LS:
3306 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3307 || enmStateNew == VMSTATE_DEBUGGING_LS
3308 || enmStateNew == VMSTATE_POWERING_OFF_LS
3309 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3310 break;
3311
3312 case VMSTATE_LOAD_FAILURE:
3313 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3314 break;
3315
3316 case VMSTATE_DESTROYING:
3317 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3318 break;
3319
3320 case VMSTATE_TERMINATED:
3321 default:
3322 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3323 break;
3324 }
3325#endif /* VBOX_STRICT */
3326 return true;
3327}
3328
3329
3330/**
3331 * Does the state change callouts.
3332 *
3333 * The caller owns the AtStateCritSect.
3334 *
3335 * @param pVM The VM handle.
3336 * @param pUVM The UVM handle.
3337 * @param enmStateNew The New state.
3338 * @param enmStateOld The old state.
3339 */
3340static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3341{
3342 LogRel(("Changing the VM state from '%s' to '%s'.\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3343
3344 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3345 {
3346 pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
3347 if ( enmStateNew != VMSTATE_DESTROYING
3348 && pVM->enmVMState == VMSTATE_DESTROYING)
3349 break;
3350 AssertMsg(pVM->enmVMState == enmStateNew,
3351 ("You are not allowed to change the state while in the change callback, except "
3352 "from destroying the VM. There are restrictions in the way the state changes "
3353 "are propagated up to the EM execution loop and it makes the program flow very "
3354 "difficult to follow. (%s, expected %s, old %s)\n",
3355 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3356 VMR3GetStateName(enmStateOld)));
3357 }
3358}
3359
3360
3361/**
3362 * Sets the current VM state, with the AtStatCritSect already entered.
3363 *
3364 * @param pVM The VM handle.
3365 * @param pUVM The UVM handle.
3366 * @param enmStateNew The new state.
3367 * @param enmStateOld The old state.
3368 */
3369static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3370{
3371 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3372
3373 AssertMsg(pVM->enmVMState == enmStateOld,
3374 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3375 pUVM->vm.s.enmPrevVMState = enmStateOld;
3376 pVM->enmVMState = enmStateNew;
3377 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3378
3379 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3380}
3381
3382
3383/**
3384 * Sets the current VM state.
3385 *
3386 * @param pVM VM handle.
3387 * @param enmStateNew The new state.
3388 * @param enmStateOld The old state (for asserting only).
3389 */
3390static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3391{
3392 PUVM pUVM = pVM->pUVM;
3393 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3394
3395 AssertMsg(pVM->enmVMState == enmStateOld,
3396 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3397 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState);
3398
3399 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3400}
3401
3402
3403/**
3404 * Tries to perform a state transition.
3405 *
3406 * @returns The 1-based ordinal of the succeeding transition.
3407 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3408 *
3409 * @param pVM The VM handle.
3410 * @param pszWho Who is trying to change it.
3411 * @param cTransitions The number of transitions in the ellipsis.
3412 * @param ... Transition pairs; new, old.
3413 */
3414static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3415{
3416 va_list va;
3417 VMSTATE enmStateNew = VMSTATE_CREATED;
3418 VMSTATE enmStateOld = VMSTATE_CREATED;
3419
3420#ifdef VBOX_STRICT
3421 /*
3422 * Validate the input first.
3423 */
3424 va_start(va, cTransitions);
3425 for (unsigned i = 0; i < cTransitions; i++)
3426 {
3427 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3428 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3429 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3430 }
3431 va_end(va);
3432#endif
3433
3434 /*
3435 * Grab the lock and see if any of the proposed transitions works out.
3436 */
3437 va_start(va, cTransitions);
3438 int rc = VERR_VM_INVALID_VM_STATE;
3439 PUVM pUVM = pVM->pUVM;
3440 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3441
3442 VMSTATE enmStateCur = pVM->enmVMState;
3443
3444 for (unsigned i = 0; i < cTransitions; i++)
3445 {
3446 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3447 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3448 if (enmStateCur == enmStateOld)
3449 {
3450 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld);
3451 rc = i + 1;
3452 break;
3453 }
3454 }
3455
3456 if (RT_FAILURE(rc))
3457 {
3458 /*
3459 * Complain about it.
3460 */
3461 if (cTransitions == 1)
3462 {
3463 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3464 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3465 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3466 N_("%s failed because the VM state is %s instead of %s"),
3467 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3468 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3469 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3470 }
3471 else
3472 {
3473 va_end(va);
3474 va_start(va, cTransitions);
3475 LogRel(("%s:\n", pszWho));
3476 for (unsigned i = 0; i < cTransitions; i++)
3477 {
3478 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3479 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3480 LogRel(("%s%s -> %s",
3481 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3482 }
3483 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3484 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3485 N_("%s failed because the current VM state, %s, was not found in the state transition table"),
3486 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3487 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3488 pszWho, VMR3GetStateName(enmStateCur)));
3489 }
3490 }
3491
3492 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3493 va_end(va);
3494 Assert(rc > 0 || rc < 0);
3495 return rc;
3496}
3497
3498
3499/**
3500 * Flag a guru meditation ... a hack.
3501 *
3502 * @param pVM The VM handle
3503 *
3504 * @todo Rewrite this part. The guru meditation should be flagged
3505 * immediately by the VMM and not by VMEmt.cpp when it's all over.
3506 */
3507void vmR3SetGuruMeditation(PVM pVM)
3508{
3509 PUVM pUVM = pVM->pUVM;
3510 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3511
3512 VMSTATE enmStateCur = pVM->enmVMState;
3513 if (enmStateCur == VMSTATE_RUNNING)
3514 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING);
3515 else if (enmStateCur == VMSTATE_RUNNING_LS)
3516 {
3517 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS);
3518 SSMR3Cancel(pVM);
3519 }
3520
3521 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3522}
3523
3524
3525/**
3526 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3527 *
3528 * @param pVM The VM handle.
3529 */
3530void vmR3SetTerminated(PVM pVM)
3531{
3532 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3533}
3534
3535
3536/**
3537 * Checks if the VM was teleported and hasn't been fully resumed yet.
3538 *
3539 * This applies to both sides of the teleportation since we may leave a working
3540 * clone behind and the user is allowed to resume this...
3541 *
3542 * @returns true / false.
3543 * @param pVM The VM handle.
3544 * @thread Any thread.
3545 */
3546VMMR3DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3547{
3548 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3549 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3550}
3551
3552
3553/**
3554 * Registers a VM state change callback.
3555 *
3556 * You are not allowed to call any function which changes the VM state from a
3557 * state callback.
3558 *
3559 * @returns VBox status code.
3560 * @param pVM VM handle.
3561 * @param pfnAtState Pointer to callback.
3562 * @param pvUser User argument.
3563 * @thread Any.
3564 */
3565VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3566{
3567 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3568
3569 /*
3570 * Validate input.
3571 */
3572 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3573 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3574
3575 /*
3576 * Allocate a new record.
3577 */
3578 PUVM pUVM = pVM->pUVM;
3579 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3580 if (!pNew)
3581 return VERR_NO_MEMORY;
3582
3583 /* fill */
3584 pNew->pfnAtState = pfnAtState;
3585 pNew->pvUser = pvUser;
3586
3587 /* insert */
3588 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3589 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3590 *pUVM->vm.s.ppAtStateNext = pNew;
3591 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3592 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3593
3594 return VINF_SUCCESS;
3595}
3596
3597
3598/**
3599 * Deregisters a VM state change callback.
3600 *
3601 * @returns VBox status code.
3602 * @param pVM VM handle.
3603 * @param pfnAtState Pointer to callback.
3604 * @param pvUser User argument.
3605 * @thread Any.
3606 */
3607VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3608{
3609 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3610
3611 /*
3612 * Validate input.
3613 */
3614 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3615 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3616
3617 PUVM pUVM = pVM->pUVM;
3618 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3619
3620 /*
3621 * Search the list for the entry.
3622 */
3623 PVMATSTATE pPrev = NULL;
3624 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3625 while ( pCur
3626 && ( pCur->pfnAtState != pfnAtState
3627 || pCur->pvUser != pvUser))
3628 {
3629 pPrev = pCur;
3630 pCur = pCur->pNext;
3631 }
3632 if (!pCur)
3633 {
3634 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3635 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3636 return VERR_FILE_NOT_FOUND;
3637 }
3638
3639 /*
3640 * Unlink it.
3641 */
3642 if (pPrev)
3643 {
3644 pPrev->pNext = pCur->pNext;
3645 if (!pCur->pNext)
3646 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3647 }
3648 else
3649 {
3650 pUVM->vm.s.pAtState = pCur->pNext;
3651 if (!pCur->pNext)
3652 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3653 }
3654
3655 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3656
3657 /*
3658 * Free it.
3659 */
3660 pCur->pfnAtState = NULL;
3661 pCur->pNext = NULL;
3662 MMR3HeapFree(pCur);
3663
3664 return VINF_SUCCESS;
3665}
3666
3667
3668/**
3669 * Registers a VM error callback.
3670 *
3671 * @returns VBox status code.
3672 * @param pVM The VM handle.
3673 * @param pfnAtError Pointer to callback.
3674 * @param pvUser User argument.
3675 * @thread Any.
3676 */
3677VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3678{
3679 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3680 return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
3681}
3682
3683
3684/**
3685 * Registers a VM error callback.
3686 *
3687 * @returns VBox status code.
3688 * @param pUVM The VM handle.
3689 * @param pfnAtError Pointer to callback.
3690 * @param pvUser User argument.
3691 * @thread Any.
3692 */
3693VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3694{
3695 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3696
3697 /*
3698 * Validate input.
3699 */
3700 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3701 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3702
3703 /*
3704 * Allocate a new record.
3705 */
3706 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3707 if (!pNew)
3708 return VERR_NO_MEMORY;
3709
3710 /* fill */
3711 pNew->pfnAtError = pfnAtError;
3712 pNew->pvUser = pvUser;
3713
3714 /* insert */
3715 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3716 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3717 *pUVM->vm.s.ppAtErrorNext = pNew;
3718 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3719 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3720
3721 return VINF_SUCCESS;
3722}
3723
3724
3725/**
3726 * Deregisters a VM error callback.
3727 *
3728 * @returns VBox status code.
3729 * @param pVM The VM handle.
3730 * @param pfnAtError Pointer to callback.
3731 * @param pvUser User argument.
3732 * @thread Any.
3733 */
3734VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3735{
3736 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3737
3738 /*
3739 * Validate input.
3740 */
3741 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3742 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3743
3744 PUVM pUVM = pVM->pUVM;
3745 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3746
3747 /*
3748 * Search the list for the entry.
3749 */
3750 PVMATERROR pPrev = NULL;
3751 PVMATERROR pCur = pUVM->vm.s.pAtError;
3752 while ( pCur
3753 && ( pCur->pfnAtError != pfnAtError
3754 || pCur->pvUser != pvUser))
3755 {
3756 pPrev = pCur;
3757 pCur = pCur->pNext;
3758 }
3759 if (!pCur)
3760 {
3761 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3762 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3763 return VERR_FILE_NOT_FOUND;
3764 }
3765
3766 /*
3767 * Unlink it.
3768 */
3769 if (pPrev)
3770 {
3771 pPrev->pNext = pCur->pNext;
3772 if (!pCur->pNext)
3773 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3774 }
3775 else
3776 {
3777 pUVM->vm.s.pAtError = pCur->pNext;
3778 if (!pCur->pNext)
3779 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3780 }
3781
3782 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3783
3784 /*
3785 * Free it.
3786 */
3787 pCur->pfnAtError = NULL;
3788 pCur->pNext = NULL;
3789 MMR3HeapFree(pCur);
3790
3791 return VINF_SUCCESS;
3792}
3793
3794
3795/**
3796 * Ellipsis to va_list wrapper for calling pfnAtError.
3797 */
3798static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3799{
3800 va_list va;
3801 va_start(va, pszFormat);
3802 pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3803 va_end(va);
3804}
3805
3806
3807/**
3808 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3809 * The message is found in VMINT.
3810 *
3811 * @param pVM The VM handle.
3812 * @thread EMT.
3813 */
3814VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
3815{
3816 VM_ASSERT_EMT(pVM);
3817 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Contracts!\n"));
3818
3819 /*
3820 * Unpack the error (if we managed to format one).
3821 */
3822 PVMERROR pErr = pVM->vm.s.pErrorR3;
3823 const char *pszFile = NULL;
3824 const char *pszFunction = NULL;
3825 uint32_t iLine = 0;
3826 const char *pszMessage;
3827 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3828 if (pErr)
3829 {
3830 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3831 if (pErr->offFile)
3832 pszFile = (const char *)pErr + pErr->offFile;
3833 iLine = pErr->iLine;
3834 if (pErr->offFunction)
3835 pszFunction = (const char *)pErr + pErr->offFunction;
3836 if (pErr->offMessage)
3837 pszMessage = (const char *)pErr + pErr->offMessage;
3838 else
3839 pszMessage = "No message!";
3840 }
3841 else
3842 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3843
3844 /*
3845 * Call the at error callbacks.
3846 */
3847 PUVM pUVM = pVM->pUVM;
3848 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3849 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3850 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3851 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3852 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3853}
3854
3855
3856/**
3857 * Gets the number of errors raised via VMSetError.
3858 *
3859 * This can be used avoid double error messages.
3860 *
3861 * @returns The error count.
3862 * @param pVM The VM handle.
3863 */
3864VMMR3DECL(uint32_t) VMR3GetErrorCount(PVM pVM)
3865{
3866 AssertPtrReturn(pVM, 0);
3867 return VMR3GetErrorCountU(pVM->pUVM);
3868}
3869
3870
3871/**
3872 * Gets the number of errors raised via VMSetError.
3873 *
3874 * This can be used avoid double error messages.
3875 *
3876 * @returns The error count.
3877 * @param pVM The VM handle.
3878 */
3879VMMR3DECL(uint32_t) VMR3GetErrorCountU(PUVM pUVM)
3880{
3881 AssertPtrReturn(pUVM, 0);
3882 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3883 return pUVM->vm.s.cErrors;
3884}
3885
3886
3887/**
3888 * Creation time wrapper for vmR3SetErrorUV.
3889 *
3890 * @returns rc.
3891 * @param pUVM Pointer to the user mode VM structure.
3892 * @param rc The VBox status code.
3893 * @param RT_SRC_POS_DECL The source position of this error.
3894 * @param pszFormat Format string.
3895 * @param ... The arguments.
3896 * @thread Any thread.
3897 */
3898static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3899{
3900 va_list va;
3901 va_start(va, pszFormat);
3902 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3903 va_end(va);
3904 return rc;
3905}
3906
3907
3908/**
3909 * Worker which calls everyone listening to the VM error messages.
3910 *
3911 * @param pUVM Pointer to the user mode VM structure.
3912 * @param rc The VBox status code.
3913 * @param RT_SRC_POS_DECL The source position of this error.
3914 * @param pszFormat Format string.
3915 * @param pArgs Pointer to the format arguments.
3916 * @thread EMT
3917 */
3918DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3919{
3920 /*
3921 * Log the error.
3922 */
3923 va_list va3;
3924 va_copy(va3, *pArgs);
3925 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3926 "VMSetError: %N\n",
3927 pszFile, iLine, pszFunction, rc,
3928 pszFormat, &va3);
3929 va_end(va3);
3930
3931#ifdef LOG_ENABLED
3932 va_copy(va3, *pArgs);
3933 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3934 "%N\n",
3935 pszFile, iLine, pszFunction, rc,
3936 pszFormat, &va3);
3937 va_end(va3);
3938#endif
3939
3940 /*
3941 * Make a copy of the message.
3942 */
3943 if (pUVM->pVM)
3944 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3945
3946 /*
3947 * Call the at error callbacks.
3948 */
3949 bool fCalledSomeone = false;
3950 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3951 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3952 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3953 {
3954 va_list va2;
3955 va_copy(va2, *pArgs);
3956 pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3957 va_end(va2);
3958 fCalledSomeone = true;
3959 }
3960 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3961}
3962
3963
3964/**
3965 * Registers a VM runtime error callback.
3966 *
3967 * @returns VBox status code.
3968 * @param pVM The VM handle.
3969 * @param pfnAtRuntimeError Pointer to callback.
3970 * @param pvUser User argument.
3971 * @thread Any.
3972 */
3973VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3974{
3975 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3976
3977 /*
3978 * Validate input.
3979 */
3980 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3981 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3982
3983 /*
3984 * Allocate a new record.
3985 */
3986 PUVM pUVM = pVM->pUVM;
3987 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3988 if (!pNew)
3989 return VERR_NO_MEMORY;
3990
3991 /* fill */
3992 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3993 pNew->pvUser = pvUser;
3994
3995 /* insert */
3996 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3997 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3998 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3999 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
4000 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4001
4002 return VINF_SUCCESS;
4003}
4004
4005
4006/**
4007 * Deregisters a VM runtime error callback.
4008 *
4009 * @returns VBox status code.
4010 * @param pVM The VM handle.
4011 * @param pfnAtRuntimeError Pointer to callback.
4012 * @param pvUser User argument.
4013 * @thread Any.
4014 */
4015VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
4016{
4017 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
4018
4019 /*
4020 * Validate input.
4021 */
4022 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
4023 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4024
4025 PUVM pUVM = pVM->pUVM;
4026 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4027
4028 /*
4029 * Search the list for the entry.
4030 */
4031 PVMATRUNTIMEERROR pPrev = NULL;
4032 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
4033 while ( pCur
4034 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
4035 || pCur->pvUser != pvUser))
4036 {
4037 pPrev = pCur;
4038 pCur = pCur->pNext;
4039 }
4040 if (!pCur)
4041 {
4042 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
4043 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4044 return VERR_FILE_NOT_FOUND;
4045 }
4046
4047 /*
4048 * Unlink it.
4049 */
4050 if (pPrev)
4051 {
4052 pPrev->pNext = pCur->pNext;
4053 if (!pCur->pNext)
4054 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
4055 }
4056 else
4057 {
4058 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
4059 if (!pCur->pNext)
4060 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
4061 }
4062
4063 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4064
4065 /*
4066 * Free it.
4067 */
4068 pCur->pfnAtRuntimeError = NULL;
4069 pCur->pNext = NULL;
4070 MMR3HeapFree(pCur);
4071
4072 return VINF_SUCCESS;
4073}
4074
4075
4076/**
4077 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
4078 * the state to FatalError(LS).
4079 *
4080 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
4081 * return code, see FNVMMEMTRENDEZVOUS.)
4082 *
4083 * @param pVM The VM handle.
4084 * @param pVCpu The VMCPU handle of the EMT.
4085 * @param pvUser Ignored.
4086 */
4087static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
4088{
4089 NOREF(pVCpu);
4090 Assert(!pvUser); NOREF(pvUser);
4091
4092 /*
4093 * The first EMT thru here changes the state.
4094 */
4095 if (pVCpu->idCpu == pVM->cCpus - 1)
4096 {
4097 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
4098 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
4099 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
4100 if (RT_FAILURE(rc))
4101 return rc;
4102 if (rc == 2)
4103 SSMR3Cancel(pVM);
4104
4105 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
4106 }
4107
4108 /* This'll make sure we get out of whereever we are (e.g. REM). */
4109 return VINF_EM_SUSPEND;
4110}
4111
4112
4113/**
4114 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
4115 *
4116 * This does the common parts after the error has been saved / retrieved.
4117 *
4118 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4119 *
4120 * @param pVM The VM handle.
4121 * @param fFlags The error flags.
4122 * @param pszErrorId Error ID string.
4123 * @param pszFormat Format string.
4124 * @param pVa Pointer to the format arguments.
4125 */
4126static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4127{
4128 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
4129
4130 /*
4131 * Take actions before the call.
4132 */
4133 int rc;
4134 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4135 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4136 vmR3SetRuntimeErrorChangeState, NULL);
4137 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4138 rc = VMR3Suspend(pVM);
4139 else
4140 rc = VINF_SUCCESS;
4141
4142 /*
4143 * Do the callback round.
4144 */
4145 PUVM pUVM = pVM->pUVM;
4146 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4147 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4148 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4149 {
4150 va_list va;
4151 va_copy(va, *pVa);
4152 pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4153 va_end(va);
4154 }
4155 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4156
4157 return rc;
4158}
4159
4160
4161/**
4162 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4163 */
4164static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4165{
4166 va_list va;
4167 va_start(va, pszFormat);
4168 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4169 va_end(va);
4170 return rc;
4171}
4172
4173
4174/**
4175 * This is a worker function for RC and Ring-0 calls to VMSetError and
4176 * VMSetErrorV.
4177 *
4178 * The message is found in VMINT.
4179 *
4180 * @returns VBox status code, see VMSetRuntimeError.
4181 * @param pVM The VM handle.
4182 * @thread EMT.
4183 */
4184VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4185{
4186 VM_ASSERT_EMT(pVM);
4187 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4188
4189 /*
4190 * Unpack the error (if we managed to format one).
4191 */
4192 const char *pszErrorId = "SetRuntimeError";
4193 const char *pszMessage = "No message!";
4194 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4195 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4196 if (pErr)
4197 {
4198 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4199 if (pErr->offErrorId)
4200 pszErrorId = (const char *)pErr + pErr->offErrorId;
4201 if (pErr->offMessage)
4202 pszMessage = (const char *)pErr + pErr->offMessage;
4203 fFlags = pErr->fFlags;
4204 }
4205
4206 /*
4207 * Join cause with vmR3SetRuntimeErrorV.
4208 */
4209 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4210}
4211
4212
4213/**
4214 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4215 *
4216 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4217 *
4218 * @param pVM The VM handle.
4219 * @param fFlags The error flags.
4220 * @param pszErrorId Error ID string.
4221 * @param pszMessage The error message residing the MM heap.
4222 *
4223 * @thread EMT
4224 */
4225DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4226{
4227#if 0 /** @todo make copy of the error msg. */
4228 /*
4229 * Make a copy of the message.
4230 */
4231 va_list va2;
4232 va_copy(va2, *pVa);
4233 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4234 va_end(va2);
4235#endif
4236
4237 /*
4238 * Join paths with VMR3SetRuntimeErrorWorker.
4239 */
4240 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4241 MMR3HeapFree(pszMessage);
4242 return rc;
4243}
4244
4245
4246/**
4247 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4248 *
4249 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4250 *
4251 * @param pVM The VM handle.
4252 * @param fFlags The error flags.
4253 * @param pszErrorId Error ID string.
4254 * @param pszFormat Format string.
4255 * @param pVa Pointer to the format arguments.
4256 *
4257 * @thread EMT
4258 */
4259DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4260{
4261 /*
4262 * Make a copy of the message.
4263 */
4264 va_list va2;
4265 va_copy(va2, *pVa);
4266 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4267 va_end(va2);
4268
4269 /*
4270 * Join paths with VMR3SetRuntimeErrorWorker.
4271 */
4272 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4273}
4274
4275
4276/**
4277 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4278 *
4279 * This can be used avoid double error messages.
4280 *
4281 * @returns The runtime error count.
4282 * @param pVM The VM handle.
4283 */
4284VMMR3DECL(uint32_t) VMR3GetRuntimeErrorCount(PVM pVM)
4285{
4286 return pVM->pUVM->vm.s.cRuntimeErrors;
4287}
4288
4289
4290/**
4291 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4292 *
4293 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4294 *
4295 * @param pVM The VM handle.
4296 */
4297VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4298{
4299 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4300 return pUVCpu
4301 ? pUVCpu->idCpu
4302 : NIL_VMCPUID;
4303}
4304
4305
4306/**
4307 * Returns the native handle of the current EMT VMCPU thread.
4308 *
4309 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4310 * @param pVM The VM handle.
4311 * @thread EMT
4312 */
4313VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4314{
4315 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4316
4317 if (!pUVCpu)
4318 return NIL_RTNATIVETHREAD;
4319
4320 return pUVCpu->vm.s.NativeThreadEMT;
4321}
4322
4323
4324/**
4325 * Returns the native handle of the current EMT VMCPU thread.
4326 *
4327 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4328 * @param pVM The VM handle.
4329 * @thread EMT
4330 */
4331VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4332{
4333 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4334
4335 if (!pUVCpu)
4336 return NIL_RTNATIVETHREAD;
4337
4338 return pUVCpu->vm.s.NativeThreadEMT;
4339}
4340
4341
4342/**
4343 * Returns the handle of the current EMT VMCPU thread.
4344 *
4345 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4346 * @param pVM The VM handle.
4347 * @thread EMT
4348 */
4349VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
4350{
4351 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4352
4353 if (!pUVCpu)
4354 return NIL_RTTHREAD;
4355
4356 return pUVCpu->vm.s.ThreadEMT;
4357}
4358
4359
4360/**
4361 * Returns the handle of the current EMT VMCPU thread.
4362 *
4363 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4364 * @param pVM The VM handle.
4365 * @thread EMT
4366 */
4367VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
4368{
4369 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4370
4371 if (!pUVCpu)
4372 return NIL_RTTHREAD;
4373
4374 return pUVCpu->vm.s.ThreadEMT;
4375}
4376
4377
4378/**
4379 * Return the package and core id of a CPU.
4380 *
4381 * @returns VBOX status code.
4382 * @param pVM The VM to operate on.
4383 * @param idCpu Virtual CPU to get the ID from.
4384 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4385 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4386 *
4387 */
4388VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PVM pVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4389{
4390 /*
4391 * Validate input.
4392 */
4393 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4394 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4395 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4396 if (idCpu >= pVM->cCpus)
4397 return VERR_INVALID_CPU_ID;
4398
4399 /*
4400 * Set return values.
4401 */
4402#ifdef VBOX_WITH_MULTI_CORE
4403 *pidCpuCore = idCpu;
4404 *pidCpuPackage = 0;
4405#else
4406 *pidCpuCore = 0;
4407 *pidCpuPackage = idCpu;
4408#endif
4409
4410 return VINF_SUCCESS;
4411}
4412
4413
4414/**
4415 * Worker for VMR3HotUnplugCpu.
4416 *
4417 * @returns VINF_EM_WAIT_SPIP (strict status code).
4418 * @param pVM The VM handle.
4419 * @param idCpu The current CPU.
4420 */
4421static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4422{
4423 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4424 VMCPU_ASSERT_EMT(pVCpu);
4425
4426 /*
4427 * Reset per CPU resources.
4428 *
4429 * Actually only needed for VT-x because the CPU seems to be still in some
4430 * paged mode and startup fails after a new hot plug event. SVM works fine
4431 * even without this.
4432 */
4433 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4434 PGMR3ResetUnpluggedCpu(pVM, pVCpu);
4435 PDMR3ResetCpu(pVCpu);
4436 TRPMR3ResetCpu(pVCpu);
4437 CPUMR3ResetCpu(pVCpu);
4438 EMR3ResetCpu(pVCpu);
4439 HWACCMR3ResetCpu(pVCpu);
4440 return VINF_EM_WAIT_SIPI;
4441}
4442
4443
4444/**
4445 * Hot-unplugs a CPU from the guest.
4446 *
4447 * @returns VBox status code.
4448 * @param pVM The VM to operate on.
4449 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4450 */
4451VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4452{
4453 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4454 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4455
4456 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4457 * broadcast requests. Just note down somewhere that the CPU is
4458 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4459 * it out of the EM loops when offline. */
4460 return VMR3ReqCallNoWait(pVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4461}
4462
4463
4464/**
4465 * Hot-plugs a CPU on the guest.
4466 *
4467 * @returns VBox status code.
4468 * @param pVM The VM to operate on.
4469 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4470 */
4471VMMR3DECL(int) VMR3HotPlugCpu(PVM pVM, VMCPUID idCpu)
4472{
4473 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4474 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4475
4476 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4477 return VINF_SUCCESS;
4478}
4479
4480
4481/**
4482 * Changes the VMM execution cap.
4483 *
4484 * @returns VBox status code.
4485 * @param pVM The VM to operate on.
4486 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4487 * 100 is max performance (default).
4488 */
4489VMMR3DECL(int) VMR3SetCpuExecutionCap(PVM pVM, uint32_t uCpuExecutionCap)
4490{
4491 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4492 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4493
4494 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4495 /* Note: not called from EMT. */
4496 pVM->uCpuExecutionCap = uCpuExecutionCap;
4497 return VINF_SUCCESS;
4498}
4499
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette