VirtualBox

source: vbox/trunk/src/VBox/VMM/VM.cpp@ 31768

Last change on this file since 31768 was 31737, checked in by vboxsync, 14 years ago

FT updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 144.2 KB
Line 
1/* $Id: VM.cpp 31737 2010-08-17 14:51:12Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happend to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41/*******************************************************************************
42* Header Files *
43*******************************************************************************/
44#define LOG_GROUP LOG_GROUP_VM
45#include <VBox/cfgm.h>
46#include <VBox/vmm.h>
47#include <VBox/gvmm.h>
48#include <VBox/mm.h>
49#include <VBox/cpum.h>
50#include <VBox/selm.h>
51#include <VBox/trpm.h>
52#include <VBox/dbgf.h>
53#include <VBox/pgm.h>
54#include <VBox/pdmapi.h>
55#include <VBox/pdmcritsect.h>
56#include <VBox/em.h>
57#include <VBox/rem.h>
58#include <VBox/tm.h>
59#include <VBox/stam.h>
60#include <VBox/patm.h>
61#include <VBox/csam.h>
62#include <VBox/iom.h>
63#include <VBox/ssm.h>
64#include <VBox/ftm.h>
65#include <VBox/hwaccm.h>
66#include "VMInternal.h"
67#include <VBox/vm.h>
68#include <VBox/uvm.h>
69
70#include <VBox/sup.h>
71#include <VBox/dbg.h>
72#include <VBox/err.h>
73#include <VBox/param.h>
74#include <VBox/log.h>
75#include <iprt/assert.h>
76#include <iprt/alloc.h>
77#include <iprt/asm.h>
78#include <iprt/env.h>
79#include <iprt/string.h>
80#include <iprt/time.h>
81#include <iprt/semaphore.h>
82#include <iprt/thread.h>
83
84
85/*******************************************************************************
86* Structures and Typedefs *
87*******************************************************************************/
88/**
89 * VM destruction callback registration record.
90 */
91typedef struct VMATDTOR
92{
93 /** Pointer to the next record in the list. */
94 struct VMATDTOR *pNext;
95 /** Pointer to the callback function. */
96 PFNVMATDTOR pfnAtDtor;
97 /** The user argument. */
98 void *pvUser;
99} VMATDTOR;
100/** Pointer to a VM destruction callback registration record. */
101typedef VMATDTOR *PVMATDTOR;
102
103
104/*******************************************************************************
105* Global Variables *
106*******************************************************************************/
107/** Pointer to the list of VMs. */
108static PUVM g_pUVMsHead = NULL;
109
110/** Pointer to the list of at VM destruction callbacks. */
111static PVMATDTOR g_pVMAtDtorHead = NULL;
112/** Lock the g_pVMAtDtorHead list. */
113#define VM_ATDTOR_LOCK() do { } while (0)
114/** Unlock the g_pVMAtDtorHead list. */
115#define VM_ATDTOR_UNLOCK() do { } while (0)
116
117
118/*******************************************************************************
119* Internal Functions *
120*******************************************************************************/
121static int vmR3CreateUVM(uint32_t cCpus, PUVM *ppUVM);
122static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
123static int vmR3InitRing3(PVM pVM, PUVM pUVM);
124static int vmR3InitVMCpu(PVM pVM);
125static int vmR3InitRing0(PVM pVM);
126static int vmR3InitGC(PVM pVM);
127static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
128static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
129static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
130static void vmR3AtDtor(PVM pVM);
131static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
132static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
133static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
134static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
135static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
136static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...);
137
138
139/**
140 * Do global VMM init.
141 *
142 * @returns VBox status code.
143 */
144VMMR3DECL(int) VMR3GlobalInit(void)
145{
146 /*
147 * Only once.
148 */
149 static bool volatile s_fDone = false;
150 if (s_fDone)
151 return VINF_SUCCESS;
152
153 /*
154 * We're done.
155 */
156 s_fDone = true;
157 return VINF_SUCCESS;
158}
159
160
161
162/**
163 * Creates a virtual machine by calling the supplied configuration constructor.
164 *
165 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
166 * called to start the execution.
167 *
168 * @returns 0 on success.
169 * @returns VBox error code on failure.
170 * @param cCpus Number of virtual CPUs for the new VM.
171 * @param pfnVMAtError Pointer to callback function for setting VM
172 * errors. This was added as an implicit call to
173 * VMR3AtErrorRegister() since there is no way the
174 * caller can get to the VM handle early enough to
175 * do this on its own.
176 * This is called in the context of an EMT.
177 * @param pvUserVM The user argument passed to pfnVMAtError.
178 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
179 * This is called in the context of an EMT0.
180 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
181 * @param ppVM Where to store the 'handle' of the created VM.
182 */
183VMMR3DECL(int) VMR3Create(uint32_t cCpus, PFNVMATERROR pfnVMAtError, void *pvUserVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM, PVM *ppVM)
184{
185 LogFlow(("VMR3Create: cCpus=%RU32 pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
186 cCpus, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
187
188 /*
189 * Because of the current hackiness of the applications
190 * we'll have to initialize global stuff from here.
191 * Later the applications will take care of this in a proper way.
192 */
193 static bool fGlobalInitDone = false;
194 if (!fGlobalInitDone)
195 {
196 int rc = VMR3GlobalInit();
197 if (RT_FAILURE(rc))
198 return rc;
199 fGlobalInitDone = true;
200 }
201
202 /*
203 * Validate input.
204 */
205 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
206
207 /*
208 * Create the UVM so we can register the at-error callback
209 * and consoliate a bit of cleanup code.
210 */
211 PUVM pUVM = NULL; /* shuts up gcc */
212 int rc = vmR3CreateUVM(cCpus, &pUVM);
213 if (RT_FAILURE(rc))
214 return rc;
215 if (pfnVMAtError)
216 rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
217 if (RT_SUCCESS(rc))
218 {
219 /*
220 * Initialize the support library creating the session for this VM.
221 */
222 rc = SUPR3Init(&pUVM->vm.s.pSession);
223 if (RT_SUCCESS(rc))
224 {
225 /*
226 * Call vmR3CreateU in the EMT thread and wait for it to finish.
227 *
228 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
229 * submitting a request to a specific VCPU without a pVM. So, to make
230 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
231 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
232 */
233 PVMREQ pReq;
234 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
235 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
236 if (RT_SUCCESS(rc))
237 {
238 rc = pReq->iStatus;
239 VMR3ReqFree(pReq);
240 if (RT_SUCCESS(rc))
241 {
242 /*
243 * Success!
244 */
245 *ppVM = pUVM->pVM;
246 LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
247 return VINF_SUCCESS;
248 }
249 }
250 else
251 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
252
253 /*
254 * An error occurred during VM creation. Set the error message directly
255 * using the initial callback, as the callback list doesn't exist yet.
256 */
257 const char *pszError = NULL;
258 switch (rc)
259 {
260 case VERR_VMX_IN_VMX_ROOT_MODE:
261#ifdef RT_OS_LINUX
262 pszError = N_("VirtualBox can't operate in VMX root mode. "
263 "Please disable the KVM kernel extension, recompile your kernel and reboot");
264#else
265 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
266#endif
267 break;
268
269 case VERR_SVM_IN_USE:
270#ifdef RT_OS_LINUX
271 pszError = N_("VirtualBox can't enable the AMD-V extension. "
272 "Please disable the KVM kernel extension, recompile your kernel and reboot");
273#else
274 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
275#endif
276 break;
277
278 case VERR_VERSION_MISMATCH:
279 pszError = N_("VMMR0 driver version mismatch. Please terminate all VMs, make sure that "
280 "VBoxNetDHCP is not running and try again. If you still get this error, "
281 "re-install VirtualBox");
282 break;
283
284#ifdef RT_OS_LINUX
285 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
286 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
287 "that no kernel modules from an older version of VirtualBox exist. "
288 "Then try to recompile and reload the kernel modules by executing "
289 "'/etc/init.d/vboxdrv setup' as root");
290 break;
291#endif
292
293 case VERR_RAW_MODE_INVALID_SMP:
294 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
295 "VirtualBox requires this hardware extension to emulate more than one "
296 "guest CPU");
297 break;
298
299 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
300#ifdef RT_OS_LINUX
301 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
302 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
303 "the VT-x extension in the VM settings. Note that without VT-x you have "
304 "to reduce the number of guest CPUs to one");
305#else
306 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
307 "extension. Either upgrade your kernel or disable the VT-x extension in the "
308 "VM settings. Note that without VT-x you have to reduce the number of guest "
309 "CPUs to one");
310#endif
311 break;
312
313 default:
314 pszError = N_("Unknown error creating VM");
315 break;
316 }
317 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
318 }
319 else
320 {
321 /*
322 * An error occurred at support library initialization time (before the
323 * VM could be created). Set the error message directly using the
324 * initial callback, as the callback list doesn't exist yet.
325 */
326 const char *pszError;
327 switch (rc)
328 {
329 case VERR_VM_DRIVER_LOAD_ERROR:
330#ifdef RT_OS_LINUX
331 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
332 "was either not loaded or /dev/vboxdrv is not set up properly. "
333 "Re-setup the kernel module by executing "
334 "'/etc/init.d/vboxdrv setup' as root");
335#else
336 pszError = N_("VirtualBox kernel driver not loaded");
337#endif
338 break;
339 case VERR_VM_DRIVER_OPEN_ERROR:
340 pszError = N_("VirtualBox kernel driver cannot be opened");
341 break;
342 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
343#ifdef VBOX_WITH_HARDENING
344 /* This should only happen if the executable wasn't hardened - bad code/build. */
345 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
346 "Re-install VirtualBox. If you are building it yourself, you "
347 "should make sure it installed correctly and that the setuid "
348 "bit is set on the executables calling VMR3Create.");
349#else
350 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
351# if defined(RT_OS_DARWIN)
352 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
353 "If you have built VirtualBox yourself, make sure that you do not "
354 "have the vboxdrv KEXT from a different build or installation loaded.");
355# elif defined(RT_OS_LINUX)
356 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
357 "If you have built VirtualBox yourself, make sure that you do "
358 "not have the vboxdrv kernel module from a different build or "
359 "installation loaded. Also, make sure the vboxdrv udev rule gives "
360 "you the permission you need to access the device.");
361# elif defined(RT_OS_WINDOWS)
362 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
363# else /* solaris, freebsd, ++. */
364 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
365 "If you have built VirtualBox yourself, make sure that you do "
366 "not have the vboxdrv kernel module from a different install loaded.");
367# endif
368#endif
369 break;
370 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
371 case VERR_VM_DRIVER_NOT_INSTALLED:
372#ifdef RT_OS_LINUX
373 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
374 "was either not loaded or /dev/vboxdrv was not created for some "
375 "reason. Re-setup the kernel module by executing "
376 "'/etc/init.d/vboxdrv setup' as root");
377#else
378 pszError = N_("VirtualBox kernel driver not installed");
379#endif
380 break;
381 case VERR_NO_MEMORY:
382 pszError = N_("VirtualBox support library out of memory");
383 break;
384 case VERR_VERSION_MISMATCH:
385 case VERR_VM_DRIVER_VERSION_MISMATCH:
386 pszError = N_("The VirtualBox support driver which is running is from a different "
387 "version of VirtualBox. You can correct this by stopping all "
388 "running instances of VirtualBox and reinstalling the software.");
389 break;
390 default:
391 pszError = N_("Unknown error initializing kernel driver");
392 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
393 }
394 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
395 }
396 }
397
398 /* cleanup */
399 vmR3DestroyUVM(pUVM, 2000);
400 LogFlow(("VMR3Create: returns %Rrc\n", rc));
401 return rc;
402}
403
404
405/**
406 * Creates the UVM.
407 *
408 * This will not initialize the support library even if vmR3DestroyUVM
409 * will terminate that.
410 *
411 * @returns VBox status code.
412 * @param cCpus Number of virtual CPUs
413 * @param ppUVM Where to store the UVM pointer.
414 */
415static int vmR3CreateUVM(uint32_t cCpus, PUVM *ppUVM)
416{
417 uint32_t i;
418
419 /*
420 * Create and initialize the UVM.
421 */
422 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
423 AssertReturn(pUVM, VERR_NO_MEMORY);
424 pUVM->u32Magic = UVM_MAGIC;
425 pUVM->cCpus = cCpus;
426
427 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
428
429 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
430 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
431 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
432
433 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
434
435 /* Initialize the VMCPU array in the UVM. */
436 for (i = 0; i < cCpus; i++)
437 {
438 pUVM->aCpus[i].pUVM = pUVM;
439 pUVM->aCpus[i].idCpu = i;
440 }
441
442 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
443 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
444 AssertRC(rc);
445 if (RT_SUCCESS(rc))
446 {
447 /* Allocate a halt method event semaphore for each VCPU. */
448 for (i = 0; i < cCpus; i++)
449 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
450 for (i = 0; i < cCpus; i++)
451 {
452 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
453 if (RT_FAILURE(rc))
454 break;
455 }
456 if (RT_SUCCESS(rc))
457 {
458 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
459 if (RT_SUCCESS(rc))
460 {
461 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
462 if (RT_SUCCESS(rc))
463 {
464 /*
465 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
466 */
467 rc = STAMR3InitUVM(pUVM);
468 if (RT_SUCCESS(rc))
469 {
470 rc = MMR3InitUVM(pUVM);
471 if (RT_SUCCESS(rc))
472 {
473 rc = PDMR3InitUVM(pUVM);
474 if (RT_SUCCESS(rc))
475 {
476 /*
477 * Start the emulation threads for all VMCPUs.
478 */
479 for (i = 0; i < cCpus; i++)
480 {
481 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
482 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
483 cCpus > 1 ? "EMT-%u" : "EMT", i);
484 if (RT_FAILURE(rc))
485 break;
486
487 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
488 }
489
490 if (RT_SUCCESS(rc))
491 {
492 *ppUVM = pUVM;
493 return VINF_SUCCESS;
494 }
495
496 /* bail out. */
497 while (i-- > 0)
498 {
499 /** @todo rainy day: terminate the EMTs. */
500 }
501 PDMR3TermUVM(pUVM);
502 }
503 MMR3TermUVM(pUVM);
504 }
505 STAMR3TermUVM(pUVM);
506 }
507 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
508 }
509 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
510 }
511 }
512 for (i = 0; i < cCpus; i++)
513 {
514 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
515 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
516 }
517 RTTlsFree(pUVM->vm.s.idxTLS);
518 }
519 RTMemPageFree(pUVM, sizeof(*pUVM));
520 return rc;
521}
522
523
524/**
525 * Creates and initializes the VM.
526 *
527 * @thread EMT
528 */
529static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
530{
531 int rc = VINF_SUCCESS;
532
533 /*
534 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
535 */
536 rc = PDMR3LdrLoadVMMR0U(pUVM);
537 if (RT_FAILURE(rc))
538 {
539 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
540 * bird: what about moving the message down here? Main picks the first message, right? */
541 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
542 return rc; /* proper error message set later on */
543 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
544 }
545
546 /*
547 * Request GVMM to create a new VM for us.
548 */
549 GVMMCREATEVMREQ CreateVMReq;
550 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
551 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
552 CreateVMReq.pSession = pUVM->vm.s.pSession;
553 CreateVMReq.pVMR0 = NIL_RTR0PTR;
554 CreateVMReq.pVMR3 = NULL;
555 CreateVMReq.cCpus = cCpus;
556 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
557 if (RT_SUCCESS(rc))
558 {
559 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
560 AssertRelease(VALID_PTR(pVM));
561 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
562 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
563 AssertRelease(pVM->cCpus == cCpus);
564 AssertRelease(pVM->uCpuPriority == 100);
565 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
566
567 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
568 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
569
570 /*
571 * Initialize the VM structure and our internal data (VMINT).
572 */
573 pVM->pUVM = pUVM;
574
575 for (VMCPUID i = 0; i < pVM->cCpus; i++)
576 {
577 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
578 pVM->aCpus[i].idCpu = i;
579 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
580 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
581 /* hNativeThreadR0 is initialized on EMT registration. */
582 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
583 pUVM->aCpus[i].pVM = pVM;
584 }
585
586
587 /*
588 * Init the configuration.
589 */
590 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
591 if (RT_SUCCESS(rc))
592 {
593 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
594 rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
595 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
596 pVM->fHWACCMEnabled = true;
597
598 /*
599 * If executing in fake suplib mode disable RR3 and RR0 in the config.
600 */
601 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
602 if (psz && !strcmp(psz, "fake"))
603 {
604 CFGMR3RemoveValue(pRoot, "RawR3Enabled");
605 CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
606 CFGMR3RemoveValue(pRoot, "RawR0Enabled");
607 CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
608 }
609
610 /*
611 * Make sure the CPU count in the config data matches.
612 */
613 if (RT_SUCCESS(rc))
614 {
615 uint32_t cCPUsCfg;
616 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
617 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
618 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
619 {
620 AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
621 cCPUsCfg, cCpus));
622 rc = VERR_INVALID_PARAMETER;
623 }
624 }
625 if (RT_SUCCESS(rc))
626 {
627 rc = CFGMR3QueryU32Def(pRoot, "CpuPriority", &pVM->uCpuPriority, 100);
628 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"CpuPriority\" as integer failed, rc=%Rrc\n", rc));
629
630 /*
631 * Init the ring-3 components and ring-3 per cpu data, finishing it off
632 * by a relocation round (intermediate context finalization will do this).
633 */
634 rc = vmR3InitRing3(pVM, pUVM);
635 if (RT_SUCCESS(rc))
636 {
637 rc = vmR3InitVMCpu(pVM);
638 if (RT_SUCCESS(rc))
639 rc = PGMR3FinalizeMappings(pVM);
640 if (RT_SUCCESS(rc))
641 {
642
643 LogFlow(("Ring-3 init succeeded\n"));
644
645 /*
646 * Init the Ring-0 components.
647 */
648 rc = vmR3InitRing0(pVM);
649 if (RT_SUCCESS(rc))
650 {
651 /* Relocate again, because some switcher fixups depends on R0 init results. */
652 VMR3Relocate(pVM, 0);
653
654#ifdef VBOX_WITH_DEBUGGER
655 /*
656 * Init the tcp debugger console if we're building
657 * with debugger support.
658 */
659 void *pvUser = NULL;
660 rc = DBGCTcpCreate(pVM, &pvUser);
661 if ( RT_SUCCESS(rc)
662 || rc == VERR_NET_ADDRESS_IN_USE)
663 {
664 pUVM->vm.s.pvDBGC = pvUser;
665#endif
666 /*
667 * Init the Guest Context components.
668 */
669 rc = vmR3InitGC(pVM);
670 if (RT_SUCCESS(rc))
671 {
672 /*
673 * Now we can safely set the VM halt method to default.
674 */
675 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
676 if (RT_SUCCESS(rc))
677 {
678 /*
679 * Set the state and link into the global list.
680 */
681 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
682 pUVM->pNext = g_pUVMsHead;
683 g_pUVMsHead = pUVM;
684
685#ifdef LOG_ENABLED
686 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
687#endif
688 return VINF_SUCCESS;
689 }
690 }
691#ifdef VBOX_WITH_DEBUGGER
692 DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
693 pUVM->vm.s.pvDBGC = NULL;
694 }
695#endif
696 //..
697 }
698 }
699 vmR3Destroy(pVM);
700 }
701 }
702 //..
703
704 /* Clean CFGM. */
705 int rc2 = CFGMR3Term(pVM);
706 AssertRC(rc2);
707 }
708
709 /*
710 * Do automatic cleanups while the VM structure is still alive and all
711 * references to it are still working.
712 */
713 PDMR3CritSectTerm(pVM);
714
715 /*
716 * Drop all references to VM and the VMCPU structures, then
717 * tell GVMM to destroy the VM.
718 */
719 pUVM->pVM = NULL;
720 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
721 {
722 pUVM->aCpus[i].pVM = NULL;
723 pUVM->aCpus[i].pVCpu = NULL;
724 }
725 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
726
727 if (pUVM->cCpus > 1)
728 {
729 /* Poke the other EMTs since they may have stale pVM and pVCpu references
730 on the stack (see VMR3WaitU for instance) if they've been awakened after
731 VM creation. */
732 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
733 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
734 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
735 }
736
737 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
738 AssertRC(rc2);
739 }
740 else
741 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
742
743 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
744 return rc;
745}
746
747
748/**
749 * Register the calling EMT with GVM.
750 *
751 * @returns VBox status code.
752 * @param pVM The VM handle.
753 * @param idCpu The Virtual CPU ID.
754 */
755static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
756{
757 Assert(VMMGetCpuId(pVM) == idCpu);
758 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
759 if (RT_FAILURE(rc))
760 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
761 return rc;
762}
763
764
765/**
766 * Initializes all R3 components of the VM
767 */
768static int vmR3InitRing3(PVM pVM, PUVM pUVM)
769{
770 int rc;
771
772 /*
773 * Register the other EMTs with GVM.
774 */
775 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
776 {
777 rc = VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
778 if (RT_FAILURE(rc))
779 return rc;
780 }
781
782 /*
783 * Init all R3 components, the order here might be important.
784 */
785 rc = MMR3Init(pVM);
786 if (RT_SUCCESS(rc))
787 {
788 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
789 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
790 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
791 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
792 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
793 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
794 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
795 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
796 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
797 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
798 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
799 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
800 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
801 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
802
803 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
804 {
805 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
806 AssertRC(rc);
807 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
808 AssertRC(rc);
809 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
810 AssertRC(rc);
811 }
812
813 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
814 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
815 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
816 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
817 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
818 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
819 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
820 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
821
822 rc = CPUMR3Init(pVM);
823 if (RT_SUCCESS(rc))
824 {
825 rc = HWACCMR3Init(pVM);
826 if (RT_SUCCESS(rc))
827 {
828 rc = PGMR3Init(pVM);
829 if (RT_SUCCESS(rc))
830 {
831 rc = REMR3Init(pVM);
832 if (RT_SUCCESS(rc))
833 {
834 rc = MMR3InitPaging(pVM);
835 if (RT_SUCCESS(rc))
836 rc = TMR3Init(pVM);
837 if (RT_SUCCESS(rc))
838 {
839 rc = FTMR3Init(pVM);
840 if (RT_SUCCESS(rc))
841 {
842 rc = VMMR3Init(pVM);
843 if (RT_SUCCESS(rc))
844 {
845 rc = SELMR3Init(pVM);
846 if (RT_SUCCESS(rc))
847 {
848 rc = TRPMR3Init(pVM);
849 if (RT_SUCCESS(rc))
850 {
851 rc = CSAMR3Init(pVM);
852 if (RT_SUCCESS(rc))
853 {
854 rc = PATMR3Init(pVM);
855 if (RT_SUCCESS(rc))
856 {
857 rc = IOMR3Init(pVM);
858 if (RT_SUCCESS(rc))
859 {
860 rc = EMR3Init(pVM);
861 if (RT_SUCCESS(rc))
862 {
863 rc = DBGFR3Init(pVM);
864 if (RT_SUCCESS(rc))
865 {
866 rc = PDMR3Init(pVM);
867 if (RT_SUCCESS(rc))
868 {
869 rc = PGMR3InitDynMap(pVM);
870 if (RT_SUCCESS(rc))
871 rc = MMR3HyperInitFinalize(pVM);
872 if (RT_SUCCESS(rc))
873 rc = PATMR3InitFinalize(pVM);
874 if (RT_SUCCESS(rc))
875 rc = PGMR3InitFinalize(pVM);
876 if (RT_SUCCESS(rc))
877 rc = SELMR3InitFinalize(pVM);
878 if (RT_SUCCESS(rc))
879 rc = TMR3InitFinalize(pVM);
880 if (RT_SUCCESS(rc))
881 rc = VMMR3InitFinalize(pVM);
882 if (RT_SUCCESS(rc))
883 rc = REMR3InitFinalize(pVM);
884 if (RT_SUCCESS(rc))
885 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
886 if (RT_SUCCESS(rc))
887 {
888 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
889 return VINF_SUCCESS;
890 }
891 int rc2 = PDMR3Term(pVM);
892 AssertRC(rc2);
893 }
894 int rc2 = DBGFR3Term(pVM);
895 AssertRC(rc2);
896 }
897 int rc2 = EMR3Term(pVM);
898 AssertRC(rc2);
899 }
900 int rc2 = IOMR3Term(pVM);
901 AssertRC(rc2);
902 }
903 int rc2 = PATMR3Term(pVM);
904 AssertRC(rc2);
905 }
906 int rc2 = CSAMR3Term(pVM);
907 AssertRC(rc2);
908 }
909 int rc2 = TRPMR3Term(pVM);
910 AssertRC(rc2);
911 }
912 int rc2 = SELMR3Term(pVM);
913 AssertRC(rc2);
914 }
915 int rc2 = VMMR3Term(pVM);
916 AssertRC(rc2);
917 }
918 int rc2 = FTMR3Term(pVM);
919 AssertRC(rc2);
920 }
921 int rc2 = TMR3Term(pVM);
922 AssertRC(rc2);
923 }
924 int rc2 = REMR3Term(pVM);
925 AssertRC(rc2);
926 }
927 int rc2 = PGMR3Term(pVM);
928 AssertRC(rc2);
929 }
930 int rc2 = HWACCMR3Term(pVM);
931 AssertRC(rc2);
932 }
933 //int rc2 = CPUMR3Term(pVM);
934 //AssertRC(rc2);
935 }
936 /* MMR3Term is not called here because it'll kill the heap. */
937 }
938
939 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
940 return rc;
941}
942
943
944/**
945 * Initializes all VM CPU components of the VM
946 */
947static int vmR3InitVMCpu(PVM pVM)
948{
949 int rc = VINF_SUCCESS;
950 int rc2;
951
952 rc = CPUMR3InitCPU(pVM);
953 if (RT_SUCCESS(rc))
954 {
955 rc = HWACCMR3InitCPU(pVM);
956 if (RT_SUCCESS(rc))
957 {
958 rc = PGMR3InitCPU(pVM);
959 if (RT_SUCCESS(rc))
960 {
961 rc = TMR3InitCPU(pVM);
962 if (RT_SUCCESS(rc))
963 {
964 rc = VMMR3InitCPU(pVM);
965 if (RT_SUCCESS(rc))
966 {
967 rc = EMR3InitCPU(pVM);
968 if (RT_SUCCESS(rc))
969 {
970 LogFlow(("vmR3InitVMCpu: returns %Rrc\n", VINF_SUCCESS));
971 return VINF_SUCCESS;
972 }
973
974 rc2 = VMMR3TermCPU(pVM);
975 AssertRC(rc2);
976 }
977 rc2 = TMR3TermCPU(pVM);
978 AssertRC(rc2);
979 }
980 rc2 = PGMR3TermCPU(pVM);
981 AssertRC(rc2);
982 }
983 rc2 = HWACCMR3TermCPU(pVM);
984 AssertRC(rc2);
985 }
986 rc2 = CPUMR3TermCPU(pVM);
987 AssertRC(rc2);
988 }
989 LogFlow(("vmR3InitVMCpu: returns %Rrc\n", rc));
990 return rc;
991}
992
993
994/**
995 * Initializes all R0 components of the VM
996 */
997static int vmR3InitRing0(PVM pVM)
998{
999 LogFlow(("vmR3InitRing0:\n"));
1000
1001 /*
1002 * Check for FAKE suplib mode.
1003 */
1004 int rc = VINF_SUCCESS;
1005 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1006 if (!psz || strcmp(psz, "fake"))
1007 {
1008 /*
1009 * Call the VMMR0 component and let it do the init.
1010 */
1011 rc = VMMR3InitR0(pVM);
1012 }
1013 else
1014 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1015
1016 /*
1017 * Do notifications and return.
1018 */
1019 if (RT_SUCCESS(rc))
1020 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1021
1022 /** @todo Move this to the VMINITCOMPLETED_RING0 notification handler. */
1023 if (RT_SUCCESS(rc))
1024 {
1025 rc = HWACCMR3InitFinalizeR0(pVM);
1026 CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
1027 }
1028
1029 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1030 return rc;
1031}
1032
1033
1034/**
1035 * Initializes all GC components of the VM
1036 */
1037static int vmR3InitGC(PVM pVM)
1038{
1039 LogFlow(("vmR3InitGC:\n"));
1040
1041 /*
1042 * Check for FAKE suplib mode.
1043 */
1044 int rc = VINF_SUCCESS;
1045 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1046 if (!psz || strcmp(psz, "fake"))
1047 {
1048 /*
1049 * Call the VMMR0 component and let it do the init.
1050 */
1051 rc = VMMR3InitRC(pVM);
1052 }
1053 else
1054 Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1055
1056 /*
1057 * Do notifications and return.
1058 */
1059 if (RT_SUCCESS(rc))
1060 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
1061 LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
1062 return rc;
1063}
1064
1065
1066/**
1067 * Do init completed notifications.
1068 * This notifications can fail.
1069 *
1070 * @param pVM The VM handle.
1071 * @param enmWhat What's completed.
1072 */
1073static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1074{
1075 return VINF_SUCCESS;
1076}
1077
1078
1079/**
1080 * Logger callback for inserting a custom prefix.
1081 *
1082 * @returns Number of chars written.
1083 * @param pLogger The logger.
1084 * @param pchBuf The output buffer.
1085 * @param cchBuf The output buffer size.
1086 * @param pvUser Pointer to the UVM structure.
1087 */
1088static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1089{
1090 AssertReturn(cchBuf >= 2, 0);
1091 PUVM pUVM = (PUVM)pvUser;
1092 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1093 if (pUVCpu)
1094 {
1095 static const char s_szHex[17] = "0123456789abcdef";
1096 VMCPUID const idCpu = pUVCpu->idCpu;
1097 pchBuf[1] = s_szHex[ idCpu & 15];
1098 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1099 }
1100 else
1101 {
1102 pchBuf[0] = 'x';
1103 pchBuf[1] = 'y';
1104 }
1105
1106 return 2;
1107}
1108
1109
1110/**
1111 * Calls the relocation functions for all VMM components so they can update
1112 * any GC pointers. When this function is called all the basic VM members
1113 * have been updated and the actual memory relocation have been done
1114 * by the PGM/MM.
1115 *
1116 * This is used both on init and on runtime relocations.
1117 *
1118 * @param pVM VM handle.
1119 * @param offDelta Relocation delta relative to old location.
1120 */
1121VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1122{
1123 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1124
1125 /*
1126 * The order here is very important!
1127 */
1128 PGMR3Relocate(pVM, offDelta);
1129 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1130 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1131 CPUMR3Relocate(pVM);
1132 HWACCMR3Relocate(pVM);
1133 SELMR3Relocate(pVM);
1134 VMMR3Relocate(pVM, offDelta);
1135 SELMR3Relocate(pVM); /* !hack! fix stack! */
1136 TRPMR3Relocate(pVM, offDelta);
1137 PATMR3Relocate(pVM);
1138 CSAMR3Relocate(pVM, offDelta);
1139 IOMR3Relocate(pVM, offDelta);
1140 EMR3Relocate(pVM);
1141 TMR3Relocate(pVM, offDelta);
1142 DBGFR3Relocate(pVM, offDelta);
1143 PDMR3Relocate(pVM, offDelta);
1144}
1145
1146
1147/**
1148 * EMT rendezvous worker for VMR3PowerOn.
1149 *
1150 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1151 * code, see FNVMMEMTRENDEZVOUS.)
1152 *
1153 * @param pVM The VM handle.
1154 * @param pVCpu The VMCPU handle of the EMT.
1155 * @param pvUser Ignored.
1156 */
1157static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1158{
1159 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1160 Assert(!pvUser); NOREF(pvUser);
1161
1162 /*
1163 * The first thread thru here tries to change the state. We shouldn't be
1164 * called again if this fails.
1165 */
1166 if (pVCpu->idCpu == pVM->cCpus - 1)
1167 {
1168 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1169 if (RT_FAILURE(rc))
1170 return rc;
1171 }
1172
1173 VMSTATE enmVMState = VMR3GetState(pVM);
1174 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1175 ("%s\n", VMR3GetStateName(enmVMState)),
1176 VERR_INTERNAL_ERROR_4);
1177
1178 /*
1179 * All EMTs changes their state to started.
1180 */
1181 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1182
1183 /*
1184 * EMT(0) is last thru here and it will make the notification calls
1185 * and advance the state.
1186 */
1187 if (pVCpu->idCpu == 0)
1188 {
1189 PDMR3PowerOn(pVM);
1190 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1191 }
1192
1193 return VINF_SUCCESS;
1194}
1195
1196
1197/**
1198 * Powers on the virtual machine.
1199 *
1200 * @returns VBox status code.
1201 *
1202 * @param pVM The VM to power on.
1203 *
1204 * @thread Any thread.
1205 * @vmstate Created
1206 * @vmstateto PoweringOn+Running
1207 */
1208VMMR3DECL(int) VMR3PowerOn(PVM pVM)
1209{
1210 LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
1211 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1212
1213 /*
1214 * Gather all the EMTs to reduce the init TSC drift and keep
1215 * the state changing APIs a bit uniform.
1216 */
1217 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1218 vmR3PowerOn, NULL);
1219 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1220 return rc;
1221}
1222
1223
1224/**
1225 * Does the suspend notifications.
1226 *
1227 * @param pVM The VM handle.
1228 * @thread EMT(0)
1229 */
1230static void vmR3SuspendDoWork(PVM pVM)
1231{
1232 PDMR3Suspend(pVM);
1233}
1234
1235
1236/**
1237 * EMT rendezvous worker for VMR3Suspend.
1238 *
1239 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1240 * return code, see FNVMMEMTRENDEZVOUS.)
1241 *
1242 * @param pVM The VM handle.
1243 * @param pVCpu The VMCPU handle of the EMT.
1244 * @param pvUser Ignored.
1245 */
1246static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1247{
1248 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1249 Assert(!pvUser); NOREF(pvUser);
1250
1251 /*
1252 * The first EMT switches the state to suspending. If this fails because
1253 * something was racing us in one way or the other, there will be no more
1254 * calls and thus the state assertion below is not going to annoy anyone.
1255 */
1256 if (pVCpu->idCpu == pVM->cCpus - 1)
1257 {
1258 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1259 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1260 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1261 if (RT_FAILURE(rc))
1262 return rc;
1263 }
1264
1265 VMSTATE enmVMState = VMR3GetState(pVM);
1266 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1267 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1268 ("%s\n", VMR3GetStateName(enmVMState)),
1269 VERR_INTERNAL_ERROR_4);
1270
1271 /*
1272 * EMT(0) does the actually suspending *after* all the other CPUs have
1273 * been thru here.
1274 */
1275 if (pVCpu->idCpu == 0)
1276 {
1277 vmR3SuspendDoWork(pVM);
1278
1279 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1280 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1281 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1282 if (RT_FAILURE(rc))
1283 return VERR_INTERNAL_ERROR_3;
1284 }
1285
1286 return VINF_EM_SUSPEND;
1287}
1288
1289
1290/**
1291 * Suspends a running VM.
1292 *
1293 * @returns VBox status code. When called on EMT, this will be a strict status
1294 * code that has to be propagated up the call stack.
1295 *
1296 * @param pVM The VM to suspend.
1297 *
1298 * @thread Any thread.
1299 * @vmstate Running or RunningLS
1300 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1301 */
1302VMMR3DECL(int) VMR3Suspend(PVM pVM)
1303{
1304 LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
1305 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1306
1307 /*
1308 * Gather all the EMTs to make sure there are no races before
1309 * changing the VM state.
1310 */
1311 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1312 vmR3Suspend, NULL);
1313 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1314 return rc;
1315}
1316
1317
1318/**
1319 * EMT rendezvous worker for VMR3Resume.
1320 *
1321 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1322 * return code, see FNVMMEMTRENDEZVOUS.)
1323 *
1324 * @param pVM The VM handle.
1325 * @param pVCpu The VMCPU handle of the EMT.
1326 * @param pvUser Ignored.
1327 */
1328static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1329{
1330 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1331 Assert(!pvUser); NOREF(pvUser);
1332
1333 /*
1334 * The first thread thru here tries to change the state. We shouldn't be
1335 * called again if this fails.
1336 */
1337 if (pVCpu->idCpu == pVM->cCpus - 1)
1338 {
1339 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1340 if (RT_FAILURE(rc))
1341 return rc;
1342 }
1343
1344 VMSTATE enmVMState = VMR3GetState(pVM);
1345 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1346 ("%s\n", VMR3GetStateName(enmVMState)),
1347 VERR_INTERNAL_ERROR_4);
1348
1349#if 0
1350 /*
1351 * All EMTs changes their state to started.
1352 */
1353 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1354#endif
1355
1356 /*
1357 * EMT(0) is last thru here and it will make the notification calls
1358 * and advance the state.
1359 */
1360 if (pVCpu->idCpu == 0)
1361 {
1362 PDMR3Resume(pVM);
1363 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1364 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1365 }
1366
1367 return VINF_EM_RESUME;
1368}
1369
1370
1371/**
1372 * Resume VM execution.
1373 *
1374 * @returns VBox status code. When called on EMT, this will be a strict status
1375 * code that has to be propagated up the call stack.
1376 *
1377 * @param pVM The VM to resume.
1378 *
1379 * @thread Any thread.
1380 * @vmstate Suspended
1381 * @vmstateto Running
1382 */
1383VMMR3DECL(int) VMR3Resume(PVM pVM)
1384{
1385 LogFlow(("VMR3Resume: pVM=%p\n", pVM));
1386 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1387
1388 /*
1389 * Gather all the EMTs to make sure there are no races before
1390 * changing the VM state.
1391 */
1392 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1393 vmR3Resume, NULL);
1394 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1395 return rc;
1396}
1397
1398
1399/**
1400 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1401 * after the live step has been completed.
1402 *
1403 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1404 * return code, see FNVMMEMTRENDEZVOUS.)
1405 *
1406 * @param pVM The VM handle.
1407 * @param pVCpu The VMCPU handle of the EMT.
1408 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1409 */
1410static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1411{
1412 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1413 bool *pfSuspended = (bool *)pvUser;
1414
1415 /*
1416 * The first thread thru here tries to change the state. We shouldn't be
1417 * called again if this fails.
1418 */
1419 if (pVCpu->idCpu == pVM->cCpus - 1U)
1420 {
1421 PUVM pUVM = pVM->pUVM;
1422 int rc;
1423
1424 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1425 VMSTATE enmVMState = pVM->enmVMState;
1426 switch (enmVMState)
1427 {
1428 case VMSTATE_RUNNING_LS:
1429 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS);
1430 rc = VINF_SUCCESS;
1431 break;
1432
1433 case VMSTATE_SUSPENDED_EXT_LS:
1434 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1435 rc = VINF_SUCCESS;
1436 break;
1437
1438 case VMSTATE_DEBUGGING_LS:
1439 rc = VERR_TRY_AGAIN;
1440 break;
1441
1442 case VMSTATE_OFF_LS:
1443 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS);
1444 rc = VERR_SSM_LIVE_POWERED_OFF;
1445 break;
1446
1447 case VMSTATE_FATAL_ERROR_LS:
1448 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS);
1449 rc = VERR_SSM_LIVE_FATAL_ERROR;
1450 break;
1451
1452 case VMSTATE_GURU_MEDITATION_LS:
1453 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS);
1454 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1455 break;
1456
1457 case VMSTATE_POWERING_OFF_LS:
1458 case VMSTATE_SUSPENDING_EXT_LS:
1459 case VMSTATE_RESETTING_LS:
1460 default:
1461 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1462 rc = VERR_INTERNAL_ERROR_3;
1463 break;
1464 }
1465 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1466 if (RT_FAILURE(rc))
1467 {
1468 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1469 return rc;
1470 }
1471 }
1472
1473 VMSTATE enmVMState = VMR3GetState(pVM);
1474 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1475 ("%s\n", VMR3GetStateName(enmVMState)),
1476 VERR_INTERNAL_ERROR_4);
1477
1478 /*
1479 * Only EMT(0) have work to do since it's last thru here.
1480 */
1481 if (pVCpu->idCpu == 0)
1482 {
1483 vmR3SuspendDoWork(pVM);
1484 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1485 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1486 if (RT_FAILURE(rc))
1487 return VERR_INTERNAL_ERROR_3;
1488
1489 *pfSuspended = true;
1490 }
1491
1492 return VINF_EM_SUSPEND;
1493}
1494
1495
1496/**
1497 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1498 * SSMR3LiveDoStep1 failure.
1499 *
1500 * Doing this as a rendezvous operation avoids all annoying transition
1501 * states.
1502 *
1503 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1504 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1505 *
1506 * @param pVM The VM handle.
1507 * @param pVCpu The VMCPU handle of the EMT.
1508 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1509 */
1510static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1511{
1512 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1513 bool *pfSuspended = (bool *)pvUser;
1514 NOREF(pVCpu);
1515
1516 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1517 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1518 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1519 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1520 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1521 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1522 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1523 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1524 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1525 if (rc == 1)
1526 rc = VERR_SSM_LIVE_POWERED_OFF;
1527 else if (rc == 2)
1528 rc = VERR_SSM_LIVE_FATAL_ERROR;
1529 else if (rc == 3)
1530 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1531 else if (rc == 4)
1532 {
1533 *pfSuspended = true;
1534 rc = VINF_SUCCESS;
1535 }
1536 else if (rc > 0)
1537 rc = VINF_SUCCESS;
1538 return rc;
1539}
1540
1541
1542/**
1543 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1544 *
1545 * @returns VBox status code.
1546 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1547 *
1548 * @param pVM The VM handle.
1549 * @param pSSM The handle of saved state operation.
1550 *
1551 * @thread EMT(0)
1552 */
1553static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1554{
1555 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1556 VM_ASSERT_EMT0(pVM);
1557
1558 /*
1559 * Advance the state and mark if VMR3Suspend was called.
1560 */
1561 int rc = VINF_SUCCESS;
1562 VMSTATE enmVMState = VMR3GetState(pVM);
1563 if (enmVMState == VMSTATE_SUSPENDED_LS)
1564 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1565 else
1566 {
1567 if (enmVMState != VMSTATE_SAVING)
1568 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1569 rc = VINF_SSM_LIVE_SUSPENDED;
1570 }
1571
1572 /*
1573 * Finish up and release the handle. Careful with the status codes.
1574 */
1575 int rc2 = SSMR3LiveDoStep2(pSSM);
1576 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1577 rc = rc2;
1578
1579 rc2 = SSMR3LiveDone(pSSM);
1580 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1581 rc = rc2;
1582
1583 /*
1584 * Advance to the final state and return.
1585 */
1586 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1587 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1588 return rc;
1589}
1590
1591
1592/**
1593 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1594 * SSMR3LiveSave.
1595 *
1596 * @returns VBox status code.
1597 *
1598 * @param pVM The VM handle.
1599 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1600 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1601 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1602 * @param pvStreamOpsUser The user argument to the stream methods.
1603 * @param enmAfter What to do afterwards.
1604 * @param pfnProgress Progress callback. Optional.
1605 * @param pvProgressUser User argument for the progress callback.
1606 * @param ppSSM Where to return the saved state handle in case of a
1607 * live snapshot scenario.
1608 * @thread EMT
1609 */
1610static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1611 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM)
1612{
1613 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1614 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1615
1616 /*
1617 * Validate input.
1618 */
1619 AssertPtrNull(pszFilename);
1620 AssertPtrNull(pStreamOps);
1621 AssertPtr(pVM);
1622 Assert( enmAfter == SSMAFTER_DESTROY
1623 || enmAfter == SSMAFTER_CONTINUE
1624 || enmAfter == SSMAFTER_TELEPORT);
1625 AssertPtr(ppSSM);
1626 *ppSSM = NULL;
1627
1628 /*
1629 * Change the state and perform/start the saving.
1630 */
1631 int rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1632 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1633 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1634 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1635 {
1636 Assert(!pStreamOps);
1637 rc = SSMR3Save(pVM, pszFilename, enmAfter, pfnProgress, pvProgressUser);
1638 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1639 }
1640 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1641 {
1642 if (enmAfter == SSMAFTER_TELEPORT)
1643 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1644 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1645 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1646 /* (We're not subject to cancellation just yet.) */
1647 }
1648 else
1649 Assert(RT_FAILURE(rc));
1650 return rc;
1651}
1652
1653
1654/**
1655 * Commmon worker for VMR3Save and VMR3Teleport.
1656 *
1657 * @returns VBox status code.
1658 *
1659 * @param pVM The VM handle.
1660 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1661 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1662 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1663 * @param pvStreamOpsUser The user argument to the stream methods.
1664 * @param enmAfter What to do afterwards.
1665 * @param pfnProgress Progress callback. Optional.
1666 * @param pvProgressUser User argument for the progress callback.
1667 * @param pfSuspended Set if we suspended the VM.
1668 *
1669 * @thread Non-EMT
1670 */
1671static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1672 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1673 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1674{
1675 /*
1676 * Request the operation in EMT(0).
1677 */
1678 PSSMHANDLE pSSM;
1679 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/,
1680 (PFNRT)vmR3Save, 9, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1681 enmAfter, pfnProgress, pvProgressUser, &pSSM);
1682 if ( RT_SUCCESS(rc)
1683 && pSSM)
1684 {
1685 /*
1686 * Live snapshot.
1687 *
1688 * The state handling here is kind of tricky, doing it on EMT(0) helps
1689 * a bit. See the VMSTATE diagram for details.
1690 */
1691 rc = SSMR3LiveDoStep1(pSSM);
1692 if (RT_SUCCESS(rc))
1693 {
1694 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1695 for (;;)
1696 {
1697 /* Try suspend the VM. */
1698 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1699 vmR3LiveDoSuspend, pfSuspended);
1700 if (rc != VERR_TRY_AGAIN)
1701 break;
1702
1703 /* Wait for the state to change. */
1704 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1705 }
1706 if (RT_SUCCESS(rc))
1707 rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1708 else
1709 {
1710 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1711 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1712 }
1713 }
1714 else
1715 {
1716 int rc2 = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1717 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1718
1719 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1720 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1721 rc = rc2;
1722 }
1723 }
1724
1725 return rc;
1726}
1727
1728
1729/**
1730 * Save current VM state.
1731 *
1732 * Can be used for both saving the state and creating snapshots.
1733 *
1734 * When called for a VM in the Running state, the saved state is created live
1735 * and the VM is only suspended when the final part of the saving is preformed.
1736 * The VM state will not be restored to Running in this case and it's up to the
1737 * caller to call VMR3Resume if this is desirable. (The rational is that the
1738 * caller probably wish to reconfigure the disks before resuming the VM.)
1739 *
1740 * @returns VBox status code.
1741 *
1742 * @param pVM The VM which state should be saved.
1743 * @param pszFilename The name of the save state file.
1744 * @param fContinueAfterwards Whether continue execution afterwards or not.
1745 * When in doubt, set this to true.
1746 * @param pfnProgress Progress callback. Optional.
1747 * @param pvUser User argument for the progress callback.
1748 * @param pfSuspended Set if we suspended the VM.
1749 *
1750 * @thread Non-EMT.
1751 * @vmstate Suspended or Running
1752 * @vmstateto Saving+Suspended or
1753 * RunningLS+SuspeningLS+SuspendedLS+Saving+Suspended.
1754 */
1755VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards,
1756 PFNVMPROGRESS pfnProgress, void *pvUser, bool *pfSuspended)
1757{
1758 LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1759 pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1760
1761 /*
1762 * Validate input.
1763 */
1764 AssertPtr(pfSuspended);
1765 *pfSuspended = false;
1766 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1767 VM_ASSERT_OTHER_THREAD(pVM);
1768 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1769 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1770 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1771
1772 /*
1773 * Join paths with VMR3Teleport.
1774 */
1775 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1776 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1777 pszFilename, NULL /*pStreamOps*/, NULL /*pvStreamOpsUser*/,
1778 enmAfter, pfnProgress, pvUser, pfSuspended);
1779 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1780 return rc;
1781}
1782
1783
1784/**
1785 * Teleport the VM (aka live migration).
1786 *
1787 * @returns VBox status code.
1788 *
1789 * @param pVM The VM which state should be saved.
1790 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1791 * @param pStreamOps The stream methods.
1792 * @param pvStreamOpsUser The user argument to the stream methods.
1793 * @param pfnProgress Progress callback. Optional.
1794 * @param pvProgressUser User argument for the progress callback.
1795 * @param pfSuspended Set if we suspended the VM.
1796 *
1797 * @thread Non-EMT.
1798 * @vmstate Suspended or Running
1799 * @vmstateto Saving+Suspended or
1800 * RunningLS+SuspeningLS+SuspendedLS+Saving+Suspended.
1801 */
1802VMMR3DECL(int) VMR3Teleport(PVM pVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1803 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1804{
1805 LogFlow(("VMR3Teleport: pVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1806 pVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1807
1808 /*
1809 * Validate input.
1810 */
1811 AssertPtr(pfSuspended);
1812 *pfSuspended = false;
1813 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1814 VM_ASSERT_OTHER_THREAD(pVM);
1815 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1816 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1817
1818 /*
1819 * Join paths with VMR3Save.
1820 */
1821 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime,
1822 NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1823 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended);
1824 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1825 return rc;
1826}
1827
1828
1829
1830/**
1831 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1832 *
1833 * @returns VBox status code.
1834 *
1835 * @param pVM The VM handle.
1836 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1837 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1838 * @param pvStreamOpsUser The user argument to the stream methods.
1839 * @param pfnProgress Progress callback. Optional.
1840 * @param pvUser User argument for the progress callback.
1841 * @param fTeleporting Indicates whether we're teleporting or not.
1842 *
1843 * @thread EMT.
1844 */
1845static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1846 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting)
1847{
1848 LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1849 pVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1850
1851 /*
1852 * Validate input (paranoia).
1853 */
1854 AssertPtr(pVM);
1855 AssertPtrNull(pszFilename);
1856 AssertPtrNull(pStreamOps);
1857 AssertPtrNull(pfnProgress);
1858
1859 /*
1860 * Change the state and perform the load.
1861 *
1862 * Always perform a relocation round afterwards to make sure hypervisor
1863 * selectors and such are correct.
1864 */
1865 int rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1866 VMSTATE_LOADING, VMSTATE_CREATED,
1867 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1868 if (RT_FAILURE(rc))
1869 return rc;
1870 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
1871
1872 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pVM);
1873 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
1874 if (RT_SUCCESS(rc))
1875 {
1876 VMR3Relocate(pVM, 0 /*offDelta*/);
1877 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1878 }
1879 else
1880 {
1881 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1882 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1883 if (cErrorsPriorToSave == VMR3GetErrorCount(pVM))
1884 rc = VMSetError(pVM, rc, RT_SRC_POS,
1885 N_("Unable to restore the virtual machine's saved state from '%s'. "
1886 "It may be damaged or from an older version of VirtualBox. "
1887 "Please discard the saved state before starting the virtual machine"),
1888 pszFilename);
1889 }
1890
1891 return rc;
1892}
1893
1894
1895/**
1896 * Loads a VM state into a newly created VM or a one that is suspended.
1897 *
1898 * To restore a saved state on VM startup, call this function and then resume
1899 * the VM instead of powering it on.
1900 *
1901 * @returns VBox status code.
1902 *
1903 * @param pVM The VM handle.
1904 * @param pszFilename The name of the save state file.
1905 * @param pfnProgress Progress callback. Optional.
1906 * @param pvUser User argument for the progress callback.
1907 *
1908 * @thread Any thread.
1909 * @vmstate Created, Suspended
1910 * @vmstateto Loading+Suspended
1911 */
1912VMMR3DECL(int) VMR3LoadFromFile(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
1913{
1914 LogFlow(("VMR3LoadFromFile: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
1915 pVM, pszFilename, pszFilename, pfnProgress, pvUser));
1916
1917 /*
1918 * Validate input.
1919 */
1920 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1921 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1922
1923 /*
1924 * Forward the request to EMT(0). No need to setup a rendezvous here
1925 * since there is no execution taking place when this call is allowed.
1926 */
1927 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 7,
1928 pVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
1929 false /*fTeleporting*/);
1930 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
1931 return rc;
1932}
1933
1934
1935/**
1936 * VMR3LoadFromFile for arbritrary file streams.
1937 *
1938 * @returns VBox status code.
1939 *
1940 * @param pVM The VM handle.
1941 * @param pStreamOps The stream methods.
1942 * @param pvStreamOpsUser The user argument to the stream methods.
1943 * @param pfnProgress Progress callback. Optional.
1944 * @param pvProgressUser User argument for the progress callback.
1945 *
1946 * @thread Any thread.
1947 * @vmstate Created, Suspended
1948 * @vmstateto Loading+Suspended
1949 */
1950VMMR3DECL(int) VMR3LoadFromStream(PVM pVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1951 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
1952{
1953 LogFlow(("VMR3LoadFromStream: pVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
1954 pVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1955
1956 /*
1957 * Validate input.
1958 */
1959 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1960 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1961
1962 /*
1963 * Forward the request to EMT(0). No need to setup a rendezvous here
1964 * since there is no execution taking place when this call is allowed.
1965 */
1966 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 7,
1967 pVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
1968 true /*fTeleporting*/);
1969 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
1970 return rc;
1971}
1972
1973
1974/**
1975 * EMT rendezvous worker for VMR3PowerOff.
1976 *
1977 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
1978 * return code, see FNVMMEMTRENDEZVOUS.)
1979 *
1980 * @param pVM The VM handle.
1981 * @param pVCpu The VMCPU handle of the EMT.
1982 * @param pvUser Ignored.
1983 */
1984static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
1985{
1986 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1987 Assert(!pvUser); NOREF(pvUser);
1988
1989 /*
1990 * The first EMT thru here will change the state to PoweringOff.
1991 */
1992 if (pVCpu->idCpu == pVM->cCpus - 1)
1993 {
1994 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
1995 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
1996 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
1997 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
1998 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
1999 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
2000 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
2001 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
2002 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
2003 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
2004 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
2005 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
2006 if (RT_FAILURE(rc))
2007 return rc;
2008 if (rc >= 7)
2009 SSMR3Cancel(pVM);
2010 }
2011
2012 /*
2013 * Check the state.
2014 */
2015 VMSTATE enmVMState = VMR3GetState(pVM);
2016 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2017 || enmVMState == VMSTATE_POWERING_OFF_LS,
2018 ("%s\n", VMR3GetStateName(enmVMState)),
2019 VERR_VM_INVALID_VM_STATE);
2020
2021 /*
2022 * EMT(0) does the actual power off work here *after* all the other EMTs
2023 * have been thru and entered the STOPPED state.
2024 */
2025 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2026 if (pVCpu->idCpu == 0)
2027 {
2028 /*
2029 * For debugging purposes, we will log a summary of the guest state at this point.
2030 */
2031 if (enmVMState != VMSTATE_GURU_MEDITATION)
2032 {
2033 /** @todo SMP support? */
2034 /** @todo make the state dumping at VMR3PowerOff optional. */
2035 RTLogRelPrintf("****************** Guest state at power off ******************\n");
2036 DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2037 RTLogRelPrintf("***\n");
2038 DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
2039 RTLogRelPrintf("***\n");
2040 DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2041 RTLogRelPrintf("***\n");
2042 DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2043 /** @todo dump guest call stack. */
2044#if 1 // "temporary" while debugging #1589
2045 RTLogRelPrintf("***\n");
2046 uint32_t esp = CPUMGetGuestESP(pVCpu);
2047 if ( CPUMGetGuestSS(pVCpu) == 0
2048 && esp < _64K)
2049 {
2050 uint8_t abBuf[PAGE_SIZE];
2051 RTLogRelPrintf("***\n"
2052 "ss:sp=0000:%04x ", esp);
2053 uint32_t Start = esp & ~(uint32_t)63;
2054 int rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, Start, 0x100);
2055 if (RT_SUCCESS(rc))
2056 RTLogRelPrintf("0000:%04x TO 0000:%04x:\n"
2057 "%.*Rhxd\n",
2058 Start, Start + 0x100 - 1,
2059 0x100, abBuf);
2060 else
2061 RTLogRelPrintf("rc=%Rrc\n", rc);
2062
2063 /* grub ... */
2064 if (esp < 0x2000 && esp > 0x1fc0)
2065 {
2066 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x800);
2067 if (RT_SUCCESS(rc))
2068 RTLogRelPrintf("0000:8000 TO 0000:87ff:\n"
2069 "%.*Rhxd\n",
2070 0x800, abBuf);
2071 }
2072 /* microsoft cdrom hang ... */
2073 if (true)
2074 {
2075 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x200);
2076 if (RT_SUCCESS(rc))
2077 RTLogRelPrintf("2000:0000 TO 2000:01ff:\n"
2078 "%.*Rhxd\n",
2079 0x200, abBuf);
2080 }
2081 }
2082#endif
2083 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2084 }
2085
2086 /*
2087 * Perform the power off notifications and advance the state to
2088 * Off or OffLS.
2089 */
2090 PDMR3PowerOff(pVM);
2091
2092 PUVM pUVM = pVM->pUVM;
2093 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2094 enmVMState = pVM->enmVMState;
2095 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2096 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS);
2097 else
2098 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF);
2099 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2100 }
2101 return VINF_EM_OFF;
2102}
2103
2104
2105/**
2106 * Power off the VM.
2107 *
2108 * @returns VBox status code. When called on EMT, this will be a strict status
2109 * code that has to be propagated up the call stack.
2110 *
2111 * @param pVM The handle of the VM to be powered off.
2112 *
2113 * @thread Any thread.
2114 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2115 * @vmstateto Off or OffLS
2116 */
2117VMMR3DECL(int) VMR3PowerOff(PVM pVM)
2118{
2119 LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
2120 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2121
2122 /*
2123 * Gather all the EMTs to make sure there are no races before
2124 * changing the VM state.
2125 */
2126 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2127 vmR3PowerOff, NULL);
2128 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2129 return rc;
2130}
2131
2132
2133/**
2134 * Destroys the VM.
2135 *
2136 * The VM must be powered off (or never really powered on) to call this
2137 * function. The VM handle is destroyed and can no longer be used up successful
2138 * return.
2139 *
2140 * @returns VBox status code.
2141 *
2142 * @param pVM The handle of the VM which should be destroyed.
2143 *
2144 * @thread Any none emulation thread.
2145 * @vmstate Off, Created
2146 * @vmstateto N/A
2147 */
2148VMMR3DECL(int) VMR3Destroy(PVM pVM)
2149{
2150 LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
2151
2152 /*
2153 * Validate input.
2154 */
2155 if (!pVM)
2156 return VERR_INVALID_PARAMETER;
2157 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2158 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2159
2160 /*
2161 * Change VM state to destroying and unlink the VM.
2162 */
2163 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2164 if (RT_FAILURE(rc))
2165 return rc;
2166
2167 /** @todo lock this when we start having multiple machines in a process... */
2168 PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
2169 if (g_pUVMsHead == pUVM)
2170 g_pUVMsHead = pUVM->pNext;
2171 else
2172 {
2173 PUVM pPrev = g_pUVMsHead;
2174 while (pPrev && pPrev->pNext != pUVM)
2175 pPrev = pPrev->pNext;
2176 AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
2177
2178 pPrev->pNext = pUVM->pNext;
2179 }
2180 pUVM->pNext = NULL;
2181
2182 /*
2183 * Notify registered at destruction listeners.
2184 */
2185 vmR3AtDtor(pVM);
2186
2187 /*
2188 * Call vmR3Destroy on each of the EMTs ending with EMT(0) doing the bulk
2189 * of the cleanup.
2190 */
2191 /* vmR3Destroy on all EMTs, ending with EMT(0). */
2192 rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2193 AssertLogRelRC(rc);
2194
2195 /* Wait for EMTs and destroy the UVM. */
2196 vmR3DestroyUVM(pUVM, 30000);
2197
2198 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2199 return VINF_SUCCESS;
2200}
2201
2202
2203/**
2204 * Internal destruction worker.
2205 *
2206 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2207 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2208 * VMR3Destroy().
2209 *
2210 * When called on EMT(0), it will performed the great bulk of the destruction.
2211 * When called on the other EMTs, they will do nothing and the whole purpose is
2212 * to return VINF_EM_TERMINATE so they break out of their run loops.
2213 *
2214 * @returns VINF_EM_TERMINATE.
2215 * @param pVM The VM handle.
2216 */
2217DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2218{
2219 PUVM pUVM = pVM->pUVM;
2220 PVMCPU pVCpu = VMMGetCpu(pVM);
2221 Assert(pVCpu);
2222 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2223
2224 /*
2225 * Only VCPU 0 does the full cleanup (last).
2226 */
2227 if (pVCpu->idCpu == 0)
2228 {
2229 /*
2230 * Dump statistics to the log.
2231 */
2232#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2233 RTLogFlags(NULL, "nodisabled nobuffered");
2234#endif
2235#ifdef VBOX_WITH_STATISTICS
2236 STAMR3Dump(pVM, "*");
2237#else
2238 LogRel(("************************* Statistics *************************\n"));
2239 STAMR3DumpToReleaseLog(pVM, "*");
2240 LogRel(("********************* End of statistics **********************\n"));
2241#endif
2242
2243 /*
2244 * Destroy the VM components.
2245 */
2246 int rc = TMR3Term(pVM);
2247 AssertRC(rc);
2248#ifdef VBOX_WITH_DEBUGGER
2249 rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
2250 pUVM->vm.s.pvDBGC = NULL;
2251#endif
2252 AssertRC(rc);
2253 rc = FTMR3Term(pVM);
2254 AssertRC(rc);
2255 rc = DBGFR3Term(pVM);
2256 AssertRC(rc);
2257 rc = PDMR3Term(pVM);
2258 AssertRC(rc);
2259 rc = EMR3Term(pVM);
2260 AssertRC(rc);
2261 rc = IOMR3Term(pVM);
2262 AssertRC(rc);
2263 rc = CSAMR3Term(pVM);
2264 AssertRC(rc);
2265 rc = PATMR3Term(pVM);
2266 AssertRC(rc);
2267 rc = TRPMR3Term(pVM);
2268 AssertRC(rc);
2269 rc = SELMR3Term(pVM);
2270 AssertRC(rc);
2271 rc = REMR3Term(pVM);
2272 AssertRC(rc);
2273 rc = HWACCMR3Term(pVM);
2274 AssertRC(rc);
2275 rc = PGMR3Term(pVM);
2276 AssertRC(rc);
2277 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2278 AssertRC(rc);
2279 rc = CPUMR3Term(pVM);
2280 AssertRC(rc);
2281 SSMR3Term(pVM);
2282 rc = PDMR3CritSectTerm(pVM);
2283 AssertRC(rc);
2284 rc = MMR3Term(pVM);
2285 AssertRC(rc);
2286
2287 /*
2288 * We're done, tell the other EMTs to quit.
2289 */
2290 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2291 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2292 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2293 }
2294 return VINF_EM_TERMINATE;
2295}
2296
2297
2298/**
2299 * Destroys the UVM portion.
2300 *
2301 * This is called as the final step in the VM destruction or as the cleanup
2302 * in case of a creation failure.
2303 *
2304 * @param pVM VM Handle.
2305 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2306 * threads.
2307 */
2308static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2309{
2310 /*
2311 * Signal termination of each the emulation threads and
2312 * wait for them to complete.
2313 */
2314 /* Signal them. */
2315 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2316 if (pUVM->pVM)
2317 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2318 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2319 {
2320 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2321 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2322 }
2323
2324 /* Wait for them. */
2325 uint64_t NanoTS = RTTimeNanoTS();
2326 RTTHREAD hSelf = RTThreadSelf();
2327 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2328 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2329 {
2330 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2331 if ( hThread != NIL_RTTHREAD
2332 && hThread != hSelf)
2333 {
2334 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2335 int rc2 = RTThreadWait(hThread,
2336 cMilliesElapsed < cMilliesEMTWait
2337 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2338 : 2000,
2339 NULL);
2340 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2341 rc2 = RTThreadWait(hThread, 1000, NULL);
2342 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2343 if (RT_SUCCESS(rc2))
2344 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2345 }
2346 }
2347
2348 /* Cleanup the semaphores. */
2349 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2350 {
2351 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2352 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2353 }
2354
2355 /*
2356 * Free the event semaphores associated with the request packets.
2357 */
2358 unsigned cReqs = 0;
2359 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2360 {
2361 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2362 pUVM->vm.s.apReqFree[i] = NULL;
2363 for (; pReq; pReq = pReq->pNext, cReqs++)
2364 {
2365 pReq->enmState = VMREQSTATE_INVALID;
2366 RTSemEventDestroy(pReq->EventSem);
2367 }
2368 }
2369 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2370
2371 /*
2372 * Kill all queued requests. (There really shouldn't be any!)
2373 */
2374 for (unsigned i = 0; i < 10; i++)
2375 {
2376 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pReqs, NULL, PVMREQ);
2377 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2378 if (!pReqHead)
2379 break;
2380 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2381 {
2382 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2383 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2384 RTSemEventSignal(pReq->EventSem);
2385 RTThreadSleep(2);
2386 RTSemEventDestroy(pReq->EventSem);
2387 }
2388 /* give them a chance to respond before we free the request memory. */
2389 RTThreadSleep(32);
2390 }
2391
2392 /*
2393 * Now all queued VCPU requests (again, there shouldn't be any).
2394 */
2395 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2396 {
2397 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2398
2399 for (unsigned i = 0; i < 10; i++)
2400 {
2401 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pReqs, NULL, PVMREQ);
2402 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2403 if (!pReqHead)
2404 break;
2405 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2406 {
2407 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2408 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2409 RTSemEventSignal(pReq->EventSem);
2410 RTThreadSleep(2);
2411 RTSemEventDestroy(pReq->EventSem);
2412 }
2413 /* give them a chance to respond before we free the request memory. */
2414 RTThreadSleep(32);
2415 }
2416 }
2417
2418 /*
2419 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2420 */
2421 PDMR3TermUVM(pUVM);
2422
2423 /*
2424 * Terminate the support library if initialized.
2425 */
2426 if (pUVM->vm.s.pSession)
2427 {
2428 int rc = SUPR3Term(false /*fForced*/);
2429 AssertRC(rc);
2430 pUVM->vm.s.pSession = NIL_RTR0PTR;
2431 }
2432
2433 /*
2434 * Destroy the MM heap and free the UVM structure.
2435 */
2436 MMR3TermUVM(pUVM);
2437 STAMR3TermUVM(pUVM);
2438
2439#ifdef LOG_ENABLED
2440 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2441#endif
2442 RTTlsFree(pUVM->vm.s.idxTLS);
2443
2444 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2445 RTMemPageFree(pUVM, sizeof(*pUVM));
2446
2447 RTLogFlush(NULL);
2448}
2449
2450
2451/**
2452 * Enumerates the VMs in this process.
2453 *
2454 * @returns Pointer to the next VM.
2455 * @returns NULL when no more VMs.
2456 * @param pVMPrev The previous VM
2457 * Use NULL to start the enumeration.
2458 */
2459VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
2460{
2461 /*
2462 * This is quick and dirty. It has issues with VM being
2463 * destroyed during the enumeration.
2464 */
2465 PUVM pNext;
2466 if (pVMPrev)
2467 pNext = pVMPrev->pUVM->pNext;
2468 else
2469 pNext = g_pUVMsHead;
2470 return pNext ? pNext->pVM : NULL;
2471}
2472
2473
2474/**
2475 * Registers an at VM destruction callback.
2476 *
2477 * @returns VBox status code.
2478 * @param pfnAtDtor Pointer to callback.
2479 * @param pvUser User argument.
2480 */
2481VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
2482{
2483 /*
2484 * Check if already registered.
2485 */
2486 VM_ATDTOR_LOCK();
2487 PVMATDTOR pCur = g_pVMAtDtorHead;
2488 while (pCur)
2489 {
2490 if (pfnAtDtor == pCur->pfnAtDtor)
2491 {
2492 VM_ATDTOR_UNLOCK();
2493 AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
2494 return VERR_INVALID_PARAMETER;
2495 }
2496
2497 /* next */
2498 pCur = pCur->pNext;
2499 }
2500 VM_ATDTOR_UNLOCK();
2501
2502 /*
2503 * Allocate new entry.
2504 */
2505 PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
2506 if (!pVMAtDtor)
2507 return VERR_NO_MEMORY;
2508
2509 VM_ATDTOR_LOCK();
2510 pVMAtDtor->pfnAtDtor = pfnAtDtor;
2511 pVMAtDtor->pvUser = pvUser;
2512 pVMAtDtor->pNext = g_pVMAtDtorHead;
2513 g_pVMAtDtorHead = pVMAtDtor;
2514 VM_ATDTOR_UNLOCK();
2515
2516 return VINF_SUCCESS;
2517}
2518
2519
2520/**
2521 * Deregisters an at VM destruction callback.
2522 *
2523 * @returns VBox status code.
2524 * @param pfnAtDtor Pointer to callback.
2525 */
2526VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
2527{
2528 /*
2529 * Find it, unlink it and free it.
2530 */
2531 VM_ATDTOR_LOCK();
2532 PVMATDTOR pPrev = NULL;
2533 PVMATDTOR pCur = g_pVMAtDtorHead;
2534 while (pCur)
2535 {
2536 if (pfnAtDtor == pCur->pfnAtDtor)
2537 {
2538 if (pPrev)
2539 pPrev->pNext = pCur->pNext;
2540 else
2541 g_pVMAtDtorHead = pCur->pNext;
2542 pCur->pNext = NULL;
2543 VM_ATDTOR_UNLOCK();
2544
2545 RTMemFree(pCur);
2546 return VINF_SUCCESS;
2547 }
2548
2549 /* next */
2550 pPrev = pCur;
2551 pCur = pCur->pNext;
2552 }
2553 VM_ATDTOR_UNLOCK();
2554
2555 return VERR_INVALID_PARAMETER;
2556}
2557
2558
2559/**
2560 * Walks the list of at VM destructor callbacks.
2561 * @param pVM The VM which is about to be destroyed.
2562 */
2563static void vmR3AtDtor(PVM pVM)
2564{
2565 /*
2566 * Find it, unlink it and free it.
2567 */
2568 VM_ATDTOR_LOCK();
2569 for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
2570 pCur->pfnAtDtor(pVM, pCur->pvUser);
2571 VM_ATDTOR_UNLOCK();
2572}
2573
2574
2575/**
2576 * Worker which checks integrity of some internal structures.
2577 * This is yet another attempt to track down that AVL tree crash.
2578 */
2579static void vmR3CheckIntegrity(PVM pVM)
2580{
2581#ifdef VBOX_STRICT
2582 int rc = PGMR3CheckIntegrity(pVM);
2583 AssertReleaseRC(rc);
2584#endif
2585}
2586
2587
2588/**
2589 * EMT rendezvous worker for VMR3Reset.
2590 *
2591 * This is called by the emulation threads as a response to the reset request
2592 * issued by VMR3Reset().
2593 *
2594 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2595 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2596 *
2597 * @param pVM The VM handle.
2598 * @param pVCpu The VMCPU handle of the EMT.
2599 * @param pvUser Ignored.
2600 */
2601static DECLCALLBACK(VBOXSTRICTRC) vmR3Reset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2602{
2603 Assert(!pvUser); NOREF(pvUser);
2604
2605 /*
2606 * The first EMT will try change the state to resetting. If this fails,
2607 * we won't get called for the other EMTs.
2608 */
2609 if (pVCpu->idCpu == pVM->cCpus - 1)
2610 {
2611 int rc = vmR3TrySetState(pVM, "VMR3Reset", 3,
2612 VMSTATE_RESETTING, VMSTATE_RUNNING,
2613 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2614 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2615 if (RT_FAILURE(rc))
2616 return rc;
2617 }
2618
2619 /*
2620 * Check the state.
2621 */
2622 VMSTATE enmVMState = VMR3GetState(pVM);
2623 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2624 || enmVMState == VMSTATE_RESETTING_LS,
2625 ("%s\n", VMR3GetStateName(enmVMState)),
2626 VERR_INTERNAL_ERROR_4);
2627
2628 /*
2629 * EMT(0) does the full cleanup *after* all the other EMTs has been
2630 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2631 *
2632 * Because there are per-cpu reset routines and order may/is important,
2633 * the following sequence looks a bit ugly...
2634 */
2635 if (pVCpu->idCpu == 0)
2636 vmR3CheckIntegrity(pVM);
2637
2638 /* Reset the VCpu state. */
2639 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2640
2641 /* Clear all pending forced actions. */
2642 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2643
2644 /*
2645 * Reset the VM components.
2646 */
2647 if (pVCpu->idCpu == 0)
2648 {
2649 PATMR3Reset(pVM);
2650 CSAMR3Reset(pVM);
2651 PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
2652 * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
2653/** @todo PGMR3Reset should be called after PDMR3Reset really, because we'll trash OS <-> hardware
2654 * communication structures residing in RAM when done in the other order. I.e. the device must be
2655 * quiesced first, then we clear the memory and plan tables. Probably have to make these things
2656 * explicit in some way, some memory setup pass or something.
2657 * (Example: DevAHCI may assert if memory is zeroed before it've read the FIS.)
2658 *
2659 * @bugref{4467}
2660 */
2661 MMR3Reset(pVM);
2662 PDMR3Reset(pVM);
2663 SELMR3Reset(pVM);
2664 TRPMR3Reset(pVM);
2665 REMR3Reset(pVM);
2666 IOMR3Reset(pVM);
2667 CPUMR3Reset(pVM);
2668 }
2669 CPUMR3ResetCpu(pVCpu);
2670 if (pVCpu->idCpu == 0)
2671 {
2672 TMR3Reset(pVM);
2673 EMR3Reset(pVM);
2674 HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2675
2676#ifdef LOG_ENABLED
2677 /*
2678 * Debug logging.
2679 */
2680 RTLogPrintf("\n\nThe VM was reset:\n");
2681 DBGFR3Info(pVM, "cpum", "verbose", NULL);
2682#endif
2683
2684 /*
2685 * Since EMT(0) is the last to go thru here, it will advance the state.
2686 * When a live save is active, we will move on to SuspendingLS but
2687 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2688 */
2689 PUVM pUVM = pVM->pUVM;
2690 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2691 enmVMState = pVM->enmVMState;
2692 if (enmVMState == VMSTATE_RESETTING)
2693 {
2694 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2695 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING);
2696 else
2697 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING);
2698 }
2699 else
2700 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS);
2701 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2702
2703 vmR3CheckIntegrity(pVM);
2704
2705 /*
2706 * Do the suspend bit as well.
2707 * It only requires some EMT(0) work at present.
2708 */
2709 if (enmVMState != VMSTATE_RESETTING)
2710 {
2711 vmR3SuspendDoWork(pVM);
2712 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2713 }
2714 }
2715
2716 return enmVMState == VMSTATE_RESETTING
2717 ? VINF_EM_RESET
2718 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2719}
2720
2721
2722/**
2723 * Reset the current VM.
2724 *
2725 * @returns VBox status code.
2726 * @param pVM VM to reset.
2727 */
2728VMMR3DECL(int) VMR3Reset(PVM pVM)
2729{
2730 LogFlow(("VMR3Reset:\n"));
2731 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2732
2733 /*
2734 * Gather all the EMTs to make sure there are no races before
2735 * changing the VM state.
2736 */
2737 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2738 vmR3Reset, NULL);
2739 LogFlow(("VMR3Reset: returns %Rrc\n", rc));
2740 return rc;
2741}
2742
2743
2744/**
2745 * Gets the current VM state.
2746 *
2747 * @returns The current VM state.
2748 * @param pVM VM handle.
2749 * @thread Any
2750 */
2751VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
2752{
2753 return pVM->enmVMState;
2754}
2755
2756
2757/**
2758 * Gets the state name string for a VM state.
2759 *
2760 * @returns Pointer to the state name. (readonly)
2761 * @param enmState The state.
2762 */
2763VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
2764{
2765 switch (enmState)
2766 {
2767 case VMSTATE_CREATING: return "CREATING";
2768 case VMSTATE_CREATED: return "CREATED";
2769 case VMSTATE_LOADING: return "LOADING";
2770 case VMSTATE_POWERING_ON: return "POWERING_ON";
2771 case VMSTATE_RESUMING: return "RESUMING";
2772 case VMSTATE_RUNNING: return "RUNNING";
2773 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
2774 case VMSTATE_RUNNING_FT: return "RUNNING_FT";
2775 case VMSTATE_RESETTING: return "RESETTING";
2776 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
2777 case VMSTATE_SUSPENDED: return "SUSPENDED";
2778 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
2779 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
2780 case VMSTATE_SUSPENDING: return "SUSPENDING";
2781 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
2782 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
2783 case VMSTATE_SAVING: return "SAVING";
2784 case VMSTATE_DEBUGGING: return "DEBUGGING";
2785 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
2786 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
2787 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
2788 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
2789 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
2790 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
2791 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
2792 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
2793 case VMSTATE_OFF: return "OFF";
2794 case VMSTATE_OFF_LS: return "OFF_LS";
2795 case VMSTATE_DESTROYING: return "DESTROYING";
2796 case VMSTATE_TERMINATED: return "TERMINATED";
2797
2798 default:
2799 AssertMsgFailed(("Unknown state %d\n", enmState));
2800 return "Unknown!\n";
2801 }
2802}
2803
2804
2805/**
2806 * Validates the state transition in strict builds.
2807 *
2808 * @returns true if valid, false if not.
2809 *
2810 * @param enmStateOld The old (current) state.
2811 * @param enmStateNew The proposed new state.
2812 *
2813 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
2814 * diagram (under State Machine Diagram).
2815 */
2816static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
2817{
2818#ifdef VBOX_STRICT
2819 switch (enmStateOld)
2820 {
2821 case VMSTATE_CREATING:
2822 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2823 break;
2824
2825 case VMSTATE_CREATED:
2826 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
2827 || enmStateNew == VMSTATE_POWERING_ON
2828 || enmStateNew == VMSTATE_POWERING_OFF
2829 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2830 break;
2831
2832 case VMSTATE_LOADING:
2833 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
2834 || enmStateNew == VMSTATE_LOAD_FAILURE
2835 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2836 break;
2837
2838 case VMSTATE_POWERING_ON:
2839 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2840 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
2841 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2842 break;
2843
2844 case VMSTATE_RESUMING:
2845 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2846 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
2847 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2848 break;
2849
2850 case VMSTATE_RUNNING:
2851 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2852 || enmStateNew == VMSTATE_SUSPENDING
2853 || enmStateNew == VMSTATE_RESETTING
2854 || enmStateNew == VMSTATE_RUNNING_LS
2855 || enmStateNew == VMSTATE_RUNNING_FT
2856 || enmStateNew == VMSTATE_DEBUGGING
2857 || enmStateNew == VMSTATE_FATAL_ERROR
2858 || enmStateNew == VMSTATE_GURU_MEDITATION
2859 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2860 break;
2861
2862 case VMSTATE_RUNNING_LS:
2863 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
2864 || enmStateNew == VMSTATE_SUSPENDING_LS
2865 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
2866 || enmStateNew == VMSTATE_RESETTING_LS
2867 || enmStateNew == VMSTATE_RUNNING
2868 || enmStateNew == VMSTATE_DEBUGGING_LS
2869 || enmStateNew == VMSTATE_FATAL_ERROR_LS
2870 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
2871 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2872 break;
2873
2874 case VMSTATE_RUNNING_FT:
2875 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2876 || enmStateNew == VMSTATE_FATAL_ERROR
2877 || enmStateNew == VMSTATE_GURU_MEDITATION
2878 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2879 break;
2880
2881 case VMSTATE_RESETTING:
2882 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2883 break;
2884
2885 case VMSTATE_RESETTING_LS:
2886 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
2887 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2888 break;
2889
2890 case VMSTATE_SUSPENDING:
2891 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2892 break;
2893
2894 case VMSTATE_SUSPENDING_LS:
2895 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
2896 || enmStateNew == VMSTATE_SUSPENDED_LS
2897 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2898 break;
2899
2900 case VMSTATE_SUSPENDING_EXT_LS:
2901 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
2902 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
2903 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2904 break;
2905
2906 case VMSTATE_SUSPENDED:
2907 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2908 || enmStateNew == VMSTATE_SAVING
2909 || enmStateNew == VMSTATE_RESETTING
2910 || enmStateNew == VMSTATE_RESUMING
2911 || enmStateNew == VMSTATE_LOADING
2912 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2913 break;
2914
2915 case VMSTATE_SUSPENDED_LS:
2916 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
2917 || enmStateNew == VMSTATE_SAVING
2918 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2919 break;
2920
2921 case VMSTATE_SUSPENDED_EXT_LS:
2922 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
2923 || enmStateNew == VMSTATE_SAVING
2924 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2925 break;
2926
2927 case VMSTATE_SAVING:
2928 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2929 break;
2930
2931 case VMSTATE_DEBUGGING:
2932 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2933 || enmStateNew == VMSTATE_POWERING_OFF
2934 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2935 break;
2936
2937 case VMSTATE_DEBUGGING_LS:
2938 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
2939 || enmStateNew == VMSTATE_RUNNING_LS
2940 || enmStateNew == VMSTATE_POWERING_OFF_LS
2941 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2942 break;
2943
2944 case VMSTATE_POWERING_OFF:
2945 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2946 break;
2947
2948 case VMSTATE_POWERING_OFF_LS:
2949 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2950 || enmStateNew == VMSTATE_OFF_LS
2951 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2952 break;
2953
2954 case VMSTATE_OFF:
2955 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2956 break;
2957
2958 case VMSTATE_OFF_LS:
2959 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2960 break;
2961
2962 case VMSTATE_FATAL_ERROR:
2963 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2964 break;
2965
2966 case VMSTATE_FATAL_ERROR_LS:
2967 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
2968 || enmStateNew == VMSTATE_POWERING_OFF_LS
2969 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2970 break;
2971
2972 case VMSTATE_GURU_MEDITATION:
2973 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
2974 || enmStateNew == VMSTATE_POWERING_OFF
2975 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2976 break;
2977
2978 case VMSTATE_GURU_MEDITATION_LS:
2979 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
2980 || enmStateNew == VMSTATE_DEBUGGING_LS
2981 || enmStateNew == VMSTATE_POWERING_OFF_LS
2982 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2983 break;
2984
2985 case VMSTATE_LOAD_FAILURE:
2986 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2987 break;
2988
2989 case VMSTATE_DESTROYING:
2990 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2991 break;
2992
2993 case VMSTATE_TERMINATED:
2994 default:
2995 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2996 break;
2997 }
2998#endif /* VBOX_STRICT */
2999 return true;
3000}
3001
3002
3003/**
3004 * Does the state change callouts.
3005 *
3006 * The caller owns the AtStateCritSect.
3007 *
3008 * @param pVM The VM handle.
3009 * @param pUVM The UVM handle.
3010 * @param enmStateNew The New state.
3011 * @param enmStateOld The old state.
3012 */
3013static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3014{
3015 LogRel(("Changing the VM state from '%s' to '%s'.\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3016
3017 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3018 {
3019 pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
3020 if ( enmStateNew != VMSTATE_DESTROYING
3021 && pVM->enmVMState == VMSTATE_DESTROYING)
3022 break;
3023 AssertMsg(pVM->enmVMState == enmStateNew,
3024 ("You are not allowed to change the state while in the change callback, except "
3025 "from destroying the VM. There are restrictions in the way the state changes "
3026 "are propagated up to the EM execution loop and it makes the program flow very "
3027 "difficult to follow. (%s, expected %s, old %s)\n",
3028 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3029 VMR3GetStateName(enmStateOld)));
3030 }
3031}
3032
3033
3034/**
3035 * Sets the current VM state, with the AtStatCritSect already entered.
3036 *
3037 * @param pVM The VM handle.
3038 * @param pUVM The UVM handle.
3039 * @param enmStateNew The new state.
3040 * @param enmStateOld The old state.
3041 */
3042static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3043{
3044 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3045
3046 AssertMsg(pVM->enmVMState == enmStateOld,
3047 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3048 pUVM->vm.s.enmPrevVMState = enmStateOld;
3049 pVM->enmVMState = enmStateNew;
3050 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3051
3052 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3053}
3054
3055
3056/**
3057 * Sets the current VM state.
3058 *
3059 * @param pVM VM handle.
3060 * @param enmStateNew The new state.
3061 * @param enmStateOld The old state (for asserting only).
3062 */
3063static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3064{
3065 PUVM pUVM = pVM->pUVM;
3066 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3067
3068 AssertMsg(pVM->enmVMState == enmStateOld,
3069 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3070 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState);
3071
3072 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3073}
3074
3075
3076/**
3077 * Tries to perform a state transition.
3078 *
3079 * @returns The 1-based ordinal of the succeeding transition.
3080 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3081 *
3082 * @param pVM The VM handle.
3083 * @param pszWho Who is trying to change it.
3084 * @param cTransitions The number of transitions in the ellipsis.
3085 * @param ... Transition pairs; new, old.
3086 */
3087static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3088{
3089 va_list va;
3090 VMSTATE enmStateNew = VMSTATE_CREATED;
3091 VMSTATE enmStateOld = VMSTATE_CREATED;
3092
3093#ifdef VBOX_STRICT
3094 /*
3095 * Validate the input first.
3096 */
3097 va_start(va, cTransitions);
3098 for (unsigned i = 0; i < cTransitions; i++)
3099 {
3100 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3101 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3102 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3103 }
3104 va_end(va);
3105#endif
3106
3107 /*
3108 * Grab the lock and see if any of the proposed transisions works out.
3109 */
3110 va_start(va, cTransitions);
3111 int rc = VERR_VM_INVALID_VM_STATE;
3112 PUVM pUVM = pVM->pUVM;
3113 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3114
3115 VMSTATE enmStateCur = pVM->enmVMState;
3116
3117 for (unsigned i = 0; i < cTransitions; i++)
3118 {
3119 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3120 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3121 if (enmStateCur == enmStateOld)
3122 {
3123 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld);
3124 rc = i + 1;
3125 break;
3126 }
3127 }
3128
3129 if (RT_FAILURE(rc))
3130 {
3131 /*
3132 * Complain about it.
3133 */
3134 if (cTransitions == 1)
3135 {
3136 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3137 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3138 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3139 N_("%s failed because the VM state is %s instead of %s"),
3140 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3141 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3142 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3143 }
3144 else
3145 {
3146 va_end(va);
3147 va_start(va, cTransitions);
3148 LogRel(("%s:\n", pszWho));
3149 for (unsigned i = 0; i < cTransitions; i++)
3150 {
3151 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3152 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3153 LogRel(("%s%s -> %s",
3154 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3155 }
3156 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3157 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3158 N_("%s failed because the current VM state, %s, was not found in the state transition table"),
3159 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3160 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3161 pszWho, VMR3GetStateName(enmStateCur)));
3162 }
3163 }
3164
3165 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3166 va_end(va);
3167 Assert(rc > 0 || rc < 0);
3168 return rc;
3169}
3170
3171
3172/**
3173 * Flag a guru meditation ... a hack.
3174 *
3175 * @param pVM The VM handle
3176 *
3177 * @todo Rewrite this part. The guru meditation should be flagged
3178 * immediately by the VMM and not by VMEmt.cpp when it's all over.
3179 */
3180void vmR3SetGuruMeditation(PVM pVM)
3181{
3182 PUVM pUVM = pVM->pUVM;
3183 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3184
3185 VMSTATE enmStateCur = pVM->enmVMState;
3186 if (enmStateCur == VMSTATE_RUNNING)
3187 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING);
3188 else if (enmStateCur == VMSTATE_RUNNING_LS)
3189 {
3190 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS);
3191 SSMR3Cancel(pVM);
3192 }
3193
3194 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3195}
3196
3197
3198/**
3199 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3200 *
3201 * @param pVM The VM handle.
3202 */
3203void vmR3SetTerminated(PVM pVM)
3204{
3205 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3206}
3207
3208
3209/**
3210 * Checks if the VM was teleported and hasn't been fully resumed yet.
3211 *
3212 * This applies to both sides of the teleportation since we may leave a working
3213 * clone behind and the user is allowed to resume this...
3214 *
3215 * @returns true / false.
3216 * @param pVM The VM handle.
3217 * @thread Any thread.
3218 */
3219VMMR3DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3220{
3221 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3222 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3223}
3224
3225
3226/**
3227 * Registers a VM state change callback.
3228 *
3229 * You are not allowed to call any function which changes the VM state from a
3230 * state callback.
3231 *
3232 * @returns VBox status code.
3233 * @param pVM VM handle.
3234 * @param pfnAtState Pointer to callback.
3235 * @param pvUser User argument.
3236 * @thread Any.
3237 */
3238VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3239{
3240 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3241
3242 /*
3243 * Validate input.
3244 */
3245 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3246 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3247
3248 /*
3249 * Allocate a new record.
3250 */
3251 PUVM pUVM = pVM->pUVM;
3252 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3253 if (!pNew)
3254 return VERR_NO_MEMORY;
3255
3256 /* fill */
3257 pNew->pfnAtState = pfnAtState;
3258 pNew->pvUser = pvUser;
3259
3260 /* insert */
3261 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3262 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3263 *pUVM->vm.s.ppAtStateNext = pNew;
3264 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3265 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3266
3267 return VINF_SUCCESS;
3268}
3269
3270
3271/**
3272 * Deregisters a VM state change callback.
3273 *
3274 * @returns VBox status code.
3275 * @param pVM VM handle.
3276 * @param pfnAtState Pointer to callback.
3277 * @param pvUser User argument.
3278 * @thread Any.
3279 */
3280VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
3281{
3282 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3283
3284 /*
3285 * Validate input.
3286 */
3287 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3288 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3289
3290 PUVM pUVM = pVM->pUVM;
3291 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3292
3293 /*
3294 * Search the list for the entry.
3295 */
3296 PVMATSTATE pPrev = NULL;
3297 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3298 while ( pCur
3299 && ( pCur->pfnAtState != pfnAtState
3300 || pCur->pvUser != pvUser))
3301 {
3302 pPrev = pCur;
3303 pCur = pCur->pNext;
3304 }
3305 if (!pCur)
3306 {
3307 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3308 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3309 return VERR_FILE_NOT_FOUND;
3310 }
3311
3312 /*
3313 * Unlink it.
3314 */
3315 if (pPrev)
3316 {
3317 pPrev->pNext = pCur->pNext;
3318 if (!pCur->pNext)
3319 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3320 }
3321 else
3322 {
3323 pUVM->vm.s.pAtState = pCur->pNext;
3324 if (!pCur->pNext)
3325 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3326 }
3327
3328 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3329
3330 /*
3331 * Free it.
3332 */
3333 pCur->pfnAtState = NULL;
3334 pCur->pNext = NULL;
3335 MMR3HeapFree(pCur);
3336
3337 return VINF_SUCCESS;
3338}
3339
3340
3341/**
3342 * Registers a VM error callback.
3343 *
3344 * @returns VBox status code.
3345 * @param pVM The VM handle.
3346 * @param pfnAtError Pointer to callback.
3347 * @param pvUser User argument.
3348 * @thread Any.
3349 */
3350VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3351{
3352 return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
3353}
3354
3355
3356/**
3357 * Registers a VM error callback.
3358 *
3359 * @returns VBox status code.
3360 * @param pUVM The VM handle.
3361 * @param pfnAtError Pointer to callback.
3362 * @param pvUser User argument.
3363 * @thread Any.
3364 */
3365VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3366{
3367 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3368
3369 /*
3370 * Validate input.
3371 */
3372 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3373 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3374
3375 /*
3376 * Allocate a new record.
3377 */
3378 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3379 if (!pNew)
3380 return VERR_NO_MEMORY;
3381
3382 /* fill */
3383 pNew->pfnAtError = pfnAtError;
3384 pNew->pvUser = pvUser;
3385
3386 /* insert */
3387 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3388 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3389 *pUVM->vm.s.ppAtErrorNext = pNew;
3390 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3391 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3392
3393 return VINF_SUCCESS;
3394}
3395
3396
3397/**
3398 * Deregisters a VM error callback.
3399 *
3400 * @returns VBox status code.
3401 * @param pVM The VM handle.
3402 * @param pfnAtError Pointer to callback.
3403 * @param pvUser User argument.
3404 * @thread Any.
3405 */
3406VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3407{
3408 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3409
3410 /*
3411 * Validate input.
3412 */
3413 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3414 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3415
3416 PUVM pUVM = pVM->pUVM;
3417 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3418
3419 /*
3420 * Search the list for the entry.
3421 */
3422 PVMATERROR pPrev = NULL;
3423 PVMATERROR pCur = pUVM->vm.s.pAtError;
3424 while ( pCur
3425 && ( pCur->pfnAtError != pfnAtError
3426 || pCur->pvUser != pvUser))
3427 {
3428 pPrev = pCur;
3429 pCur = pCur->pNext;
3430 }
3431 if (!pCur)
3432 {
3433 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3434 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3435 return VERR_FILE_NOT_FOUND;
3436 }
3437
3438 /*
3439 * Unlink it.
3440 */
3441 if (pPrev)
3442 {
3443 pPrev->pNext = pCur->pNext;
3444 if (!pCur->pNext)
3445 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3446 }
3447 else
3448 {
3449 pUVM->vm.s.pAtError = pCur->pNext;
3450 if (!pCur->pNext)
3451 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3452 }
3453
3454 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3455
3456 /*
3457 * Free it.
3458 */
3459 pCur->pfnAtError = NULL;
3460 pCur->pNext = NULL;
3461 MMR3HeapFree(pCur);
3462
3463 return VINF_SUCCESS;
3464}
3465
3466
3467/**
3468 * Ellipsis to va_list wrapper for calling pfnAtError.
3469 */
3470static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3471{
3472 va_list va;
3473 va_start(va, pszFormat);
3474 pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3475 va_end(va);
3476}
3477
3478
3479/**
3480 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3481 * The message is found in VMINT.
3482 *
3483 * @param pVM The VM handle.
3484 * @thread EMT.
3485 */
3486VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
3487{
3488 VM_ASSERT_EMT(pVM);
3489 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Contrats!\n"));
3490
3491 /*
3492 * Unpack the error (if we managed to format one).
3493 */
3494 PVMERROR pErr = pVM->vm.s.pErrorR3;
3495 const char *pszFile = NULL;
3496 const char *pszFunction = NULL;
3497 uint32_t iLine = 0;
3498 const char *pszMessage;
3499 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3500 if (pErr)
3501 {
3502 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3503 if (pErr->offFile)
3504 pszFile = (const char *)pErr + pErr->offFile;
3505 iLine = pErr->iLine;
3506 if (pErr->offFunction)
3507 pszFunction = (const char *)pErr + pErr->offFunction;
3508 if (pErr->offMessage)
3509 pszMessage = (const char *)pErr + pErr->offMessage;
3510 else
3511 pszMessage = "No message!";
3512 }
3513 else
3514 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3515
3516 /*
3517 * Call the at error callbacks.
3518 */
3519 PUVM pUVM = pVM->pUVM;
3520 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3521 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3522 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3523 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3524 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3525}
3526
3527
3528/**
3529 * Gets the number of errors raised via VMSetError.
3530 *
3531 * This can be used avoid double error messages.
3532 *
3533 * @returns The error count.
3534 * @param pVM The VM handle.
3535 */
3536VMMR3DECL(uint32_t) VMR3GetErrorCount(PVM pVM)
3537{
3538 return pVM->pUVM->vm.s.cErrors;
3539}
3540
3541
3542/**
3543 * Creation time wrapper for vmR3SetErrorUV.
3544 *
3545 * @returns rc.
3546 * @param pUVM Pointer to the user mode VM structure.
3547 * @param rc The VBox status code.
3548 * @param RT_SRC_POS_DECL The source position of this error.
3549 * @param pszFormat Format string.
3550 * @param ... The arguments.
3551 * @thread Any thread.
3552 */
3553static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3554{
3555 va_list va;
3556 va_start(va, pszFormat);
3557 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3558 va_end(va);
3559 return rc;
3560}
3561
3562
3563/**
3564 * Worker which calls everyone listening to the VM error messages.
3565 *
3566 * @param pUVM Pointer to the user mode VM structure.
3567 * @param rc The VBox status code.
3568 * @param RT_SRC_POS_DECL The source position of this error.
3569 * @param pszFormat Format string.
3570 * @param pArgs Pointer to the format arguments.
3571 * @thread EMT
3572 */
3573DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3574{
3575 /*
3576 * Log the error.
3577 */
3578 va_list va3;
3579 va_copy(va3, *pArgs);
3580 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3581 "VMSetError: %N\n",
3582 pszFile, iLine, pszFunction, rc,
3583 pszFormat, &va3);
3584 va_end(va3);
3585
3586#ifdef LOG_ENABLED
3587 va_copy(va3, *pArgs);
3588 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3589 "%N\n",
3590 pszFile, iLine, pszFunction, rc,
3591 pszFormat, &va3);
3592 va_end(va3);
3593#endif
3594
3595 /*
3596 * Make a copy of the message.
3597 */
3598 if (pUVM->pVM)
3599 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3600
3601 /*
3602 * Call the at error callbacks.
3603 */
3604 bool fCalledSomeone = false;
3605 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3606 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3607 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3608 {
3609 va_list va2;
3610 va_copy(va2, *pArgs);
3611 pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3612 va_end(va2);
3613 fCalledSomeone = true;
3614 }
3615 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3616}
3617
3618
3619/**
3620 * Registers a VM runtime error callback.
3621 *
3622 * @returns VBox status code.
3623 * @param pVM The VM handle.
3624 * @param pfnAtRuntimeError Pointer to callback.
3625 * @param pvUser User argument.
3626 * @thread Any.
3627 */
3628VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3629{
3630 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3631
3632 /*
3633 * Validate input.
3634 */
3635 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3636 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3637
3638 /*
3639 * Allocate a new record.
3640 */
3641 PUVM pUVM = pVM->pUVM;
3642 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3643 if (!pNew)
3644 return VERR_NO_MEMORY;
3645
3646 /* fill */
3647 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3648 pNew->pvUser = pvUser;
3649
3650 /* insert */
3651 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3652 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3653 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3654 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3655 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3656
3657 return VINF_SUCCESS;
3658}
3659
3660
3661/**
3662 * Deregisters a VM runtime error callback.
3663 *
3664 * @returns VBox status code.
3665 * @param pVM The VM handle.
3666 * @param pfnAtRuntimeError Pointer to callback.
3667 * @param pvUser User argument.
3668 * @thread Any.
3669 */
3670VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3671{
3672 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3673
3674 /*
3675 * Validate input.
3676 */
3677 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3678 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3679
3680 PUVM pUVM = pVM->pUVM;
3681 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3682
3683 /*
3684 * Search the list for the entry.
3685 */
3686 PVMATRUNTIMEERROR pPrev = NULL;
3687 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3688 while ( pCur
3689 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3690 || pCur->pvUser != pvUser))
3691 {
3692 pPrev = pCur;
3693 pCur = pCur->pNext;
3694 }
3695 if (!pCur)
3696 {
3697 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
3698 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3699 return VERR_FILE_NOT_FOUND;
3700 }
3701
3702 /*
3703 * Unlink it.
3704 */
3705 if (pPrev)
3706 {
3707 pPrev->pNext = pCur->pNext;
3708 if (!pCur->pNext)
3709 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
3710 }
3711 else
3712 {
3713 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
3714 if (!pCur->pNext)
3715 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
3716 }
3717
3718 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3719
3720 /*
3721 * Free it.
3722 */
3723 pCur->pfnAtRuntimeError = NULL;
3724 pCur->pNext = NULL;
3725 MMR3HeapFree(pCur);
3726
3727 return VINF_SUCCESS;
3728}
3729
3730
3731/**
3732 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
3733 * the state to FatalError(LS).
3734 *
3735 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPENED. (This is a strict
3736 * return code, see FNVMMEMTRENDEZVOUS.)
3737 *
3738 * @param pVM The VM handle.
3739 * @param pVCpu The VMCPU handle of the EMT.
3740 * @param pvUser Ignored.
3741 */
3742static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
3743{
3744 NOREF(pVCpu);
3745 Assert(!pvUser); NOREF(pvUser);
3746
3747 /*
3748 * The first EMT thru here changes the state.
3749 */
3750 if (pVCpu->idCpu == pVM->cCpus - 1)
3751 {
3752 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
3753 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
3754 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
3755 if (RT_FAILURE(rc))
3756 return rc;
3757 if (rc == 2)
3758 SSMR3Cancel(pVM);
3759
3760 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
3761 }
3762
3763 /* This'll make sure we get out of whereever we are (e.g. REM). */
3764 return VINF_EM_SUSPEND;
3765}
3766
3767
3768/**
3769 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
3770 *
3771 * This does the common parts after the error has been saved / retrieved.
3772 *
3773 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3774 *
3775 * @param pVM The VM handle.
3776 * @param fFlags The error flags.
3777 * @param pszErrorId Error ID string.
3778 * @param pszFormat Format string.
3779 * @param pVa Pointer to the format arguments.
3780 */
3781static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
3782{
3783 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
3784
3785 /*
3786 * Take actions before the call.
3787 */
3788 int rc;
3789 if (fFlags & VMSETRTERR_FLAGS_FATAL)
3790 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
3791 vmR3SetRuntimeErrorChangeState, NULL);
3792 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
3793 rc = VMR3Suspend(pVM);
3794 else
3795 rc = VINF_SUCCESS;
3796
3797 /*
3798 * Do the callback round.
3799 */
3800 PUVM pUVM = pVM->pUVM;
3801 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3802 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3803 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
3804 {
3805 va_list va;
3806 va_copy(va, *pVa);
3807 pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
3808 va_end(va);
3809 }
3810 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3811
3812 return rc;
3813}
3814
3815
3816/**
3817 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
3818 */
3819static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
3820{
3821 va_list va;
3822 va_start(va, pszFormat);
3823 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
3824 va_end(va);
3825 return rc;
3826}
3827
3828
3829/**
3830 * This is a worker function for RC and Ring-0 calls to VMSetError and
3831 * VMSetErrorV.
3832 *
3833 * The message is found in VMINT.
3834 *
3835 * @returns VBox status code, see VMSetRuntimeError.
3836 * @param pVM The VM handle.
3837 * @thread EMT.
3838 */
3839VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
3840{
3841 VM_ASSERT_EMT(pVM);
3842 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
3843
3844 /*
3845 * Unpack the error (if we managed to format one).
3846 */
3847 const char *pszErrorId = "SetRuntimeError";
3848 const char *pszMessage = "No message!";
3849 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
3850 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
3851 if (pErr)
3852 {
3853 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3854 if (pErr->offErrorId)
3855 pszErrorId = (const char *)pErr + pErr->offErrorId;
3856 if (pErr->offMessage)
3857 pszMessage = (const char *)pErr + pErr->offMessage;
3858 fFlags = pErr->fFlags;
3859 }
3860
3861 /*
3862 * Join cause with vmR3SetRuntimeErrorV.
3863 */
3864 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
3865}
3866
3867
3868/**
3869 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
3870 *
3871 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3872 *
3873 * @param pVM The VM handle.
3874 * @param fFlags The error flags.
3875 * @param pszErrorId Error ID string.
3876 * @param pszMessage The error message residing the MM heap.
3877 *
3878 * @thread EMT
3879 */
3880DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
3881{
3882#if 0 /** @todo make copy of the error msg. */
3883 /*
3884 * Make a copy of the message.
3885 */
3886 va_list va2;
3887 va_copy(va2, *pVa);
3888 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
3889 va_end(va2);
3890#endif
3891
3892 /*
3893 * Join paths with VMR3SetRuntimeErrorWorker.
3894 */
3895 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
3896 MMR3HeapFree(pszMessage);
3897 return rc;
3898}
3899
3900
3901/**
3902 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
3903 *
3904 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3905 *
3906 * @param pVM The VM handle.
3907 * @param fFlags The error flags.
3908 * @param pszErrorId Error ID string.
3909 * @param pszFormat Format string.
3910 * @param pVa Pointer to the format arguments.
3911 *
3912 * @thread EMT
3913 */
3914DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
3915{
3916 /*
3917 * Make a copy of the message.
3918 */
3919 va_list va2;
3920 va_copy(va2, *pVa);
3921 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
3922 va_end(va2);
3923
3924 /*
3925 * Join paths with VMR3SetRuntimeErrorWorker.
3926 */
3927 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
3928}
3929
3930
3931/**
3932 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
3933 *
3934 * This can be used avoid double error messages.
3935 *
3936 * @returns The runtime error count.
3937 * @param pVM The VM handle.
3938 */
3939VMMR3DECL(uint32_t) VMR3GetRuntimeErrorCount(PVM pVM)
3940{
3941 return pVM->pUVM->vm.s.cRuntimeErrors;
3942}
3943
3944
3945/**
3946 * Gets the ID virtual of the virtual CPU assoicated with the calling thread.
3947 *
3948 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
3949 *
3950 * @param pVM The VM handle.
3951 */
3952VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
3953{
3954 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
3955 return pUVCpu
3956 ? pUVCpu->idCpu
3957 : NIL_VMCPUID;
3958}
3959
3960
3961/**
3962 * Returns the native handle of the current EMT VMCPU thread.
3963 *
3964 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
3965 * @param pVM The VM handle.
3966 * @thread EMT
3967 */
3968VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
3969{
3970 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
3971
3972 if (!pUVCpu)
3973 return NIL_RTNATIVETHREAD;
3974
3975 return pUVCpu->vm.s.NativeThreadEMT;
3976}
3977
3978
3979/**
3980 * Returns the native handle of the current EMT VMCPU thread.
3981 *
3982 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
3983 * @param pVM The VM handle.
3984 * @thread EMT
3985 */
3986VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
3987{
3988 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
3989
3990 if (!pUVCpu)
3991 return NIL_RTNATIVETHREAD;
3992
3993 return pUVCpu->vm.s.NativeThreadEMT;
3994}
3995
3996
3997/**
3998 * Returns the handle of the current EMT VMCPU thread.
3999 *
4000 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4001 * @param pVM The VM handle.
4002 * @thread EMT
4003 */
4004VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
4005{
4006 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4007
4008 if (!pUVCpu)
4009 return NIL_RTTHREAD;
4010
4011 return pUVCpu->vm.s.ThreadEMT;
4012}
4013
4014
4015/**
4016 * Returns the handle of the current EMT VMCPU thread.
4017 *
4018 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4019 * @param pVM The VM handle.
4020 * @thread EMT
4021 */
4022VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
4023{
4024 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4025
4026 if (!pUVCpu)
4027 return NIL_RTTHREAD;
4028
4029 return pUVCpu->vm.s.ThreadEMT;
4030}
4031
4032
4033/**
4034 * Return the package and core id of a CPU.
4035 *
4036 * @returns VBOX status code.
4037 * @param pVM The VM to operate on.
4038 * @param idCpu Virtual CPU to get the ID from.
4039 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4040 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4041 *
4042 */
4043VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PVM pVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4044{
4045 if (idCpu >= pVM->cCpus)
4046 return VERR_INVALID_CPU_ID;
4047
4048#ifdef VBOX_WITH_MULTI_CORE
4049 *pidCpuCore = idCpu;
4050 *pidCpuPackage = 0;
4051#else
4052 *pidCpuCore = 0;
4053 *pidCpuPackage = idCpu;
4054#endif
4055
4056 return VINF_SUCCESS;
4057}
4058
4059
4060/**
4061 * Worker for VMR3HotUnplugCpu.
4062 *
4063 * @returns VINF_EM_WAIT_SPIP (strict status code).
4064 * @param pVM The VM handle.
4065 * @param idCpu The current CPU.
4066 */
4067static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4068{
4069 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4070 VMCPU_ASSERT_EMT(pVCpu);
4071
4072 /*
4073 * Reset per CPU resources.
4074 *
4075 * Actually only needed for VT-x because the CPU seems to be still in some
4076 * paged mode and startup fails after a new hot plug event. SVM works fine
4077 * even without this.
4078 */
4079 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4080 PGMR3ResetUnpluggedCpu(pVM, pVCpu);
4081 PDMR3ResetCpu(pVCpu);
4082 TRPMR3ResetCpu(pVCpu);
4083 CPUMR3ResetCpu(pVCpu);
4084 EMR3ResetCpu(pVCpu);
4085 HWACCMR3ResetCpu(pVCpu);
4086 return VINF_EM_WAIT_SIPI;
4087}
4088
4089
4090/**
4091 * Hot-unplugs a CPU from the guest.
4092 *
4093 * @returns VBox status code.
4094 * @param pVM The VM to operate on.
4095 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4096 */
4097VMMR3DECL(int) VMR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4098{
4099 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4100
4101 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4102 * broadcast requests. Just note down somewhere that the CPU is
4103 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4104 * it out of the EM loops when offline. */
4105 return VMR3ReqCallNoWaitU(pVM->pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4106}
4107
4108
4109/**
4110 * Hot-plugs a CPU on the guest.
4111 *
4112 * @returns VBox status code.
4113 * @param pVM The VM to operate on.
4114 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4115 */
4116VMMR3DECL(int) VMR3HotPlugCpu(PVM pVM, VMCPUID idCpu)
4117{
4118 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4119
4120 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4121 return VINF_SUCCESS;
4122}
4123
4124
4125/**
4126 * Changes the VCPU priority.
4127 *
4128 * @returns VBox status code.
4129 * @param pVM The VM to operate on.
4130 * @param ulCpuPriority New CPU priority
4131 */
4132VMMR3DECL(int) VMR3SetCpuPriority(PVM pVM, unsigned ulCpuPriority)
4133{
4134 AssertReturn(ulCpuPriority > 0 && ulCpuPriority <= 100, VERR_INVALID_PARAMETER);
4135
4136 Log(("VMR3SetCpuPriority: new priority = %d\n", ulCpuPriority));
4137 /* Note: not called from EMT. */
4138 pVM->uCpuPriority = ulCpuPriority;
4139 return VINF_SUCCESS;
4140}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette