VirtualBox

source: vbox/trunk/src/VBox/VMM/VM.cpp@ 23011

Last change on this file since 23011 was 23011, checked in by vboxsync, 15 years ago

VMM,VMMDev: Some VMMR3ReqCall refactoring.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 123.0 KB
Line 
1/* $Id: VM.cpp 23011 2009-09-14 15:57:38Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_vm VM API
23 *
24 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
25 * use to create a VMM instance for running a guest in. It also provides
26 * facilities for queuing request for execution in EMT (serialization purposes
27 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
28 *
29 *
30 * @section sec_vm_design Design Critique / Things To Do
31 *
32 * In hindsight this component is a big design mistake, all this stuff really
33 * belongs in the VMM component. It just seemed like a kind of ok idea at a
34 * time when the VMM bit was a kind of vague. 'VM' also happend to be the name
35 * of the per-VM instance structure (see vm.h), so it kind of made sense.
36 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
37 * is some minor functionally and some "routing" services.
38 *
39 * Fixing this is just a matter of some more or less straight forward
40 * refactoring, the question is just when someone will get to it. Moving the EMT
41 * would be a good start.
42 *
43 */
44
45/*******************************************************************************
46* Header Files *
47*******************************************************************************/
48#define LOG_GROUP LOG_GROUP_VM
49#include <VBox/cfgm.h>
50#include <VBox/vmm.h>
51#include <VBox/gvmm.h>
52#include <VBox/mm.h>
53#include <VBox/cpum.h>
54#include <VBox/selm.h>
55#include <VBox/trpm.h>
56#include <VBox/dbgf.h>
57#include <VBox/pgm.h>
58#include <VBox/pdmapi.h>
59#include <VBox/pdmcritsect.h>
60#include <VBox/em.h>
61#include <VBox/rem.h>
62#include <VBox/tm.h>
63#include <VBox/stam.h>
64#include <VBox/patm.h>
65#ifdef VBOX_WITH_VMI
66# include <VBox/parav.h>
67#endif
68#include <VBox/csam.h>
69#include <VBox/iom.h>
70#include <VBox/ssm.h>
71#include <VBox/hwaccm.h>
72#include "VMInternal.h"
73#include <VBox/vm.h>
74#include <VBox/uvm.h>
75
76#include <VBox/sup.h>
77#include <VBox/dbg.h>
78#include <VBox/err.h>
79#include <VBox/param.h>
80#include <VBox/log.h>
81#include <iprt/assert.h>
82#include <iprt/alloc.h>
83#include <iprt/asm.h>
84#include <iprt/env.h>
85#include <iprt/string.h>
86#include <iprt/time.h>
87#include <iprt/semaphore.h>
88#include <iprt/thread.h>
89
90
91/*******************************************************************************
92* Structures and Typedefs *
93*******************************************************************************/
94/**
95 * VM destruction callback registration record.
96 */
97typedef struct VMATDTOR
98{
99 /** Pointer to the next record in the list. */
100 struct VMATDTOR *pNext;
101 /** Pointer to the callback function. */
102 PFNVMATDTOR pfnAtDtor;
103 /** The user argument. */
104 void *pvUser;
105} VMATDTOR;
106/** Pointer to a VM destruction callback registration record. */
107typedef VMATDTOR *PVMATDTOR;
108
109
110/*******************************************************************************
111* Global Variables *
112*******************************************************************************/
113/** Pointer to the list of VMs. */
114static PUVM g_pUVMsHead = NULL;
115
116/** Pointer to the list of at VM destruction callbacks. */
117static PVMATDTOR g_pVMAtDtorHead = NULL;
118/** Lock the g_pVMAtDtorHead list. */
119#define VM_ATDTOR_LOCK() do { } while (0)
120/** Unlock the g_pVMAtDtorHead list. */
121#define VM_ATDTOR_UNLOCK() do { } while (0)
122
123
124/*******************************************************************************
125* Internal Functions *
126*******************************************************************************/
127static int vmR3CreateUVM(uint32_t cCpus, PUVM *ppUVM);
128static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
129static int vmR3InitRing3(PVM pVM, PUVM pUVM);
130static int vmR3InitVMCpu(PVM pVM);
131static int vmR3InitRing0(PVM pVM);
132static int vmR3InitGC(PVM pVM);
133static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
134static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
135static DECLCALLBACK(int) vmR3PowerOff(PVM pVM);
136static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
137static void vmR3AtDtor(PVM pVM);
138static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
139static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
140static bool vmR3TrySetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
141static unsigned vmR3TrySetState2(PVM pVM, VMSTATE enmStateNew1, VMSTATE enmStateOld1, VMSTATE enmStateNew2, VMSTATE enmStateOld2);
142static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
143static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
144static unsigned vmR3SetState2(PVM pVM, VMSTATE enmStateNew1, VMSTATE enmStateOld1, VMSTATE enmStateNew2, VMSTATE enmStateOld2);
145static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...);
146
147
148/**
149 * Do global VMM init.
150 *
151 * @returns VBox status code.
152 */
153VMMR3DECL(int) VMR3GlobalInit(void)
154{
155 /*
156 * Only once.
157 */
158 static bool volatile s_fDone = false;
159 if (s_fDone)
160 return VINF_SUCCESS;
161
162 /*
163 * We're done.
164 */
165 s_fDone = true;
166 return VINF_SUCCESS;
167}
168
169
170
171/**
172 * Creates a virtual machine by calling the supplied configuration constructor.
173 *
174 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
175 * called to start the execution.
176 *
177 * @returns 0 on success.
178 * @returns VBox error code on failure.
179 * @param cCpus Number of virtual CPUs for the new VM.
180 * @param pfnVMAtError Pointer to callback function for setting VM
181 * errors. This was added as an implicit call to
182 * VMR3AtErrorRegister() since there is no way the
183 * caller can get to the VM handle early enough to
184 * do this on its own.
185 * This is called in the context of an EMT.
186 * @param pvUserVM The user argument passed to pfnVMAtError.
187 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
188 * This is called in the context of an EMT0.
189 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
190 * @param ppVM Where to store the 'handle' of the created VM.
191 */
192VMMR3DECL(int) VMR3Create(uint32_t cCpus, PFNVMATERROR pfnVMAtError, void *pvUserVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM, PVM *ppVM)
193{
194 LogFlow(("VMR3Create: cCpus=%RU32 pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n",
195 cCpus, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));
196
197 /*
198 * Because of the current hackiness of the applications
199 * we'll have to initialize global stuff from here.
200 * Later the applications will take care of this in a proper way.
201 */
202 static bool fGlobalInitDone = false;
203 if (!fGlobalInitDone)
204 {
205 int rc = VMR3GlobalInit();
206 if (RT_FAILURE(rc))
207 return rc;
208 fGlobalInitDone = true;
209 }
210
211 /*
212 * Validate input.
213 */
214 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
215
216 /*
217 * Create the UVM so we can register the at-error callback
218 * and consoliate a bit of cleanup code.
219 */
220 PUVM pUVM = NULL; /* shuts up gcc */
221 int rc = vmR3CreateUVM(cCpus, &pUVM);
222 if (RT_FAILURE(rc))
223 return rc;
224 if (pfnVMAtError)
225 rc = VMR3AtErrorRegisterU(pUVM, pfnVMAtError, pvUserVM);
226 if (RT_SUCCESS(rc))
227 {
228 /*
229 * Initialize the support library creating the session for this VM.
230 */
231 rc = SUPR3Init(&pUVM->vm.s.pSession);
232 if (RT_SUCCESS(rc))
233 {
234 /*
235 * Call vmR3CreateU in the EMT thread and wait for it to finish.
236 *
237 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
238 * submitting a request to a specific VCPU without a pVM. So, to make
239 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
240 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
241 */
242 PVMREQ pReq;
243 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
244 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
245 if (RT_SUCCESS(rc))
246 {
247 rc = pReq->iStatus;
248 VMR3ReqFree(pReq);
249 if (RT_SUCCESS(rc))
250 {
251 /*
252 * Success!
253 */
254 *ppVM = pUVM->pVM;
255 LogFlow(("VMR3Create: returns VINF_SUCCESS *ppVM=%p\n", *ppVM));
256 return VINF_SUCCESS;
257 }
258 }
259 else
260 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
261
262 /*
263 * An error occurred during VM creation. Set the error message directly
264 * using the initial callback, as the callback list doesn't exist yet.
265 */
266 const char *pszError = NULL;
267 switch (rc)
268 {
269 case VERR_VMX_IN_VMX_ROOT_MODE:
270#ifdef RT_OS_LINUX
271 pszError = N_("VirtualBox can't operate in VMX root mode. "
272 "Please disable the KVM kernel extension, recompile your kernel and reboot");
273#else
274 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
275#endif
276 break;
277
278 case VERR_VERSION_MISMATCH:
279 pszError = N_("VMMR0 driver version mismatch. Please terminate all VMs, make sure that "
280 "VBoxNetDHCP is not running and try again. If you still get this error, "
281 "re-install VirtualBox");
282 break;
283
284#ifdef RT_OS_LINUX
285 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
286 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
287 "that no kernel modules from an older version of VirtualBox exist. "
288 "Then try to recompile and reload the kernel modules by executing "
289 "'/etc/init.d/vboxdrv setup' as root");
290 break;
291#endif
292
293 case VERR_RAW_MODE_INVALID_SMP:
294 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
295 "VirtualBox requires this hardware extension to emulate more than one "
296 "guest CPU");
297 break;
298
299 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
300#ifdef RT_OS_LINUX
301 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
302 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
303 "the VT-x extension in the VM settings. Note that without VT-x you have "
304 "to reduce the number of guest CPUs to one");
305#else
306 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
307 "extension. Either upgrade your kernel or disable the VT-x extension in the "
308 "VM settings. Note that without VT-x you have to reduce the number of guest "
309 "CPUs to one");
310#endif
311 break;
312
313 default:
314 pszError = N_("Unknown error creating VM");
315 break;
316 }
317 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
318 }
319 else
320 {
321 /*
322 * An error occurred at support library initialization time (before the
323 * VM could be created). Set the error message directly using the
324 * initial callback, as the callback list doesn't exist yet.
325 */
326 const char *pszError;
327 switch (rc)
328 {
329 case VERR_VM_DRIVER_LOAD_ERROR:
330#ifdef RT_OS_LINUX
331 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
332 "was either not loaded or /dev/vboxdrv is not set up properly. "
333 "Re-setup the kernel module by executing "
334 "'/etc/init.d/vboxdrv setup' as root");
335#else
336 pszError = N_("VirtualBox kernel driver not loaded");
337#endif
338 break;
339 case VERR_VM_DRIVER_OPEN_ERROR:
340 pszError = N_("VirtualBox kernel driver cannot be opened");
341 break;
342 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
343#ifdef VBOX_WITH_HARDENING
344 /* This should only happen if the executable wasn't hardened - bad code/build. */
345 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
346 "Re-install VirtualBox. If you are building it yourself, you "
347 "should make sure it installed correctly and that the setuid "
348 "bit is set on the executables calling VMR3Create.");
349#else
350 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
351# if defined(RT_OS_DARWIN)
352 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
353 "If you have built VirtualBox yourself, make sure that you do not "
354 "have the vboxdrv KEXT from a different build or installation loaded.");
355# elif defined(RT_OS_LINUX)
356 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
357 "If you have built VirtualBox yourself, make sure that you do "
358 "not have the vboxdrv kernel module from a different build or "
359 "installation loaded. Also, make sure the vboxdrv udev rule gives "
360 "you the permission you need to access the device.");
361# elif defined(RT_OS_WINDOWS)
362 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
363# else /* solaris, freebsd, ++. */
364 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
365 "If you have built VirtualBox yourself, make sure that you do "
366 "not have the vboxdrv kernel module from a different install loaded.");
367# endif
368#endif
369 break;
370 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
371 case VERR_VM_DRIVER_NOT_INSTALLED:
372#ifdef RT_OS_LINUX
373 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
374 "was either not loaded or /dev/vboxdrv was not created for some "
375 "reason. Re-setup the kernel module by executing "
376 "'/etc/init.d/vboxdrv setup' as root");
377#else
378 pszError = N_("VirtualBox kernel driver not installed");
379#endif
380 break;
381 case VERR_NO_MEMORY:
382 pszError = N_("VirtualBox support library out of memory");
383 break;
384 case VERR_VERSION_MISMATCH:
385 case VERR_VM_DRIVER_VERSION_MISMATCH:
386 pszError = N_("The VirtualBox support driver which is running is from a different "
387 "version of VirtualBox. You can correct this by stopping all "
388 "running instances of VirtualBox and reinstalling the software.");
389 break;
390 default:
391 pszError = N_("Unknown error initializing kernel driver");
392 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
393 }
394 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
395 }
396 }
397
398 /* cleanup */
399 vmR3DestroyUVM(pUVM, 2000);
400 LogFlow(("VMR3Create: returns %Rrc\n", rc));
401 return rc;
402}
403
404
405/**
406 * Creates the UVM.
407 *
408 * This will not initialize the support library even if vmR3DestroyUVM
409 * will terminate that.
410 *
411 * @returns VBox status code.
412 * @param cCpus Number of virtual CPUs
413 * @param ppUVM Where to store the UVM pointer.
414 */
415static int vmR3CreateUVM(uint32_t cCpus, PUVM *ppUVM)
416{
417 uint32_t i;
418
419 /*
420 * Create and initialize the UVM.
421 */
422 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
423 AssertReturn(pUVM, VERR_NO_MEMORY);
424 pUVM->u32Magic = UVM_MAGIC;
425 pUVM->cCpus = cCpus;
426
427 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
428
429 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
430 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
431 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
432
433 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
434
435 /* Initialize the VMCPU array in the UVM. */
436 for (i = 0; i < cCpus; i++)
437 {
438 pUVM->aCpus[i].pUVM = pUVM;
439 pUVM->aCpus[i].idCpu = i;
440 }
441
442 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
443 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
444 AssertRC(rc);
445 if (RT_SUCCESS(rc))
446 {
447 /* Allocate a halt method event semaphore for each VCPU. */
448 for (i = 0; i < cCpus; i++)
449 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
450 for (i = 0; i < cCpus; i++)
451 {
452 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
453 if (RT_FAILURE(rc))
454 break;
455 }
456 if (RT_SUCCESS(rc))
457 {
458 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
459 if (RT_SUCCESS(rc))
460 {
461 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
462 if (RT_SUCCESS(rc))
463 {
464 /*
465 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
466 */
467 rc = STAMR3InitUVM(pUVM);
468 if (RT_SUCCESS(rc))
469 {
470 rc = MMR3InitUVM(pUVM);
471 if (RT_SUCCESS(rc))
472 {
473 rc = PDMR3InitUVM(pUVM);
474 if (RT_SUCCESS(rc))
475 {
476 /*
477 * Start the emulation threads for all VMCPUs.
478 */
479 for (i = 0; i < cCpus; i++)
480 {
481 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M,
482 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
483 cCpus > 1 ? "EMT-%u" : "EMT", i);
484 if (RT_FAILURE(rc))
485 break;
486
487 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
488 }
489
490 if (RT_SUCCESS(rc))
491 {
492 *ppUVM = pUVM;
493 return VINF_SUCCESS;
494 }
495
496 /* bail out. */
497 while (i-- > 0)
498 {
499 /** @todo rainy day: terminate the EMTs. */
500 }
501 PDMR3TermUVM(pUVM);
502 }
503 MMR3TermUVM(pUVM);
504 }
505 STAMR3TermUVM(pUVM);
506 }
507 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
508 }
509 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
510 }
511 }
512 for (i = 0; i < cCpus; i++)
513 {
514 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
515 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
516 }
517 RTTlsFree(pUVM->vm.s.idxTLS);
518 }
519 RTMemPageFree(pUVM);
520 return rc;
521}
522
523
524/**
525 * Creates and initializes the VM.
526 *
527 * @thread EMT
528 */
529static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
530{
531 int rc = VINF_SUCCESS;
532
533 /*
534 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
535 */
536 rc = PDMR3LdrLoadVMMR0U(pUVM);
537 if (RT_FAILURE(rc))
538 {
539 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
540 * bird: what about moving the message down here? Main picks the first message, right? */
541 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
542 return rc; /* proper error message set later on */
543 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
544 }
545
546 /*
547 * Request GVMM to create a new VM for us.
548 */
549 GVMMCREATEVMREQ CreateVMReq;
550 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
551 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
552 CreateVMReq.pSession = pUVM->vm.s.pSession;
553 CreateVMReq.pVMR0 = NIL_RTR0PTR;
554 CreateVMReq.pVMR3 = NULL;
555 CreateVMReq.cCpus = cCpus;
556 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
557 if (RT_SUCCESS(rc))
558 {
559 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
560 AssertRelease(VALID_PTR(pVM));
561 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
562 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
563 AssertRelease(pVM->cCpus == cCpus);
564 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
565
566 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
567 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
568
569 /*
570 * Initialize the VM structure and our internal data (VMINT).
571 */
572 pVM->pUVM = pUVM;
573
574 for (VMCPUID i = 0; i < pVM->cCpus; i++)
575 {
576 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
577 pVM->aCpus[i].idCpu = i;
578 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
579 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
580
581 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
582 pUVM->aCpus[i].pVM = pVM;
583 }
584
585
586 /*
587 * Init the configuration.
588 */
589 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
590 if (RT_SUCCESS(rc))
591 {
592 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
593 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
594 pVM->fHWACCMEnabled = true;
595
596 /*
597 * If executing in fake suplib mode disable RR3 and RR0 in the config.
598 */
599 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
600 if (psz && !strcmp(psz, "fake"))
601 {
602 CFGMR3RemoveValue(CFGMR3GetRoot(pVM), "RawR3Enabled");
603 CFGMR3InsertInteger(CFGMR3GetRoot(pVM), "RawR3Enabled", 0);
604 CFGMR3RemoveValue(CFGMR3GetRoot(pVM), "RawR0Enabled");
605 CFGMR3InsertInteger(CFGMR3GetRoot(pVM), "RawR0Enabled", 0);
606 }
607
608 /*
609 * Make sure the CPU count in the config data matches.
610 */
611 if (RT_SUCCESS(rc))
612 {
613 uint32_t cCPUsCfg;
614 rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "NumCPUs", &cCPUsCfg, 1);
615 AssertLogRelMsgRC(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc));
616 if (RT_SUCCESS(rc) && cCPUsCfg != cCpus)
617 {
618 AssertLogRelMsgFailed(("Configuration error: \"NumCPUs\"=%RU32 and VMR3CreateVM::cCpus=%RU32 does not match!\n",
619 cCPUsCfg, cCpus));
620 rc = VERR_INVALID_PARAMETER;
621 }
622 }
623 if (RT_SUCCESS(rc))
624 {
625 /*
626 * Init the ring-3 components and ring-3 per cpu data, finishing it off
627 * by a relocation round (intermediate context finalization will do this).
628 */
629 rc = vmR3InitRing3(pVM, pUVM);
630 if (RT_SUCCESS(rc))
631 {
632 rc = vmR3InitVMCpu(pVM);
633 if (RT_SUCCESS(rc))
634 rc = PGMR3FinalizeMappings(pVM);
635 if (RT_SUCCESS(rc))
636 {
637
638 LogFlow(("Ring-3 init succeeded\n"));
639
640 /*
641 * Init the Ring-0 components.
642 */
643 rc = vmR3InitRing0(pVM);
644 if (RT_SUCCESS(rc))
645 {
646 /* Relocate again, because some switcher fixups depends on R0 init results. */
647 VMR3Relocate(pVM, 0);
648
649#ifdef VBOX_WITH_DEBUGGER
650 /*
651 * Init the tcp debugger console if we're building
652 * with debugger support.
653 */
654 void *pvUser = NULL;
655 rc = DBGCTcpCreate(pVM, &pvUser);
656 if ( RT_SUCCESS(rc)
657 || rc == VERR_NET_ADDRESS_IN_USE)
658 {
659 pUVM->vm.s.pvDBGC = pvUser;
660#endif
661 /*
662 * Init the Guest Context components.
663 */
664 rc = vmR3InitGC(pVM);
665 if (RT_SUCCESS(rc))
666 {
667 /*
668 * Now we can safely set the VM halt method to default.
669 */
670 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
671 if (RT_SUCCESS(rc))
672 {
673 /*
674 * Set the state and link into the global list.
675 */
676 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
677 pUVM->pNext = g_pUVMsHead;
678 g_pUVMsHead = pUVM;
679
680#ifdef LOG_ENABLED
681 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
682#endif
683 return VINF_SUCCESS;
684 }
685 }
686#ifdef VBOX_WITH_DEBUGGER
687 DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
688 pUVM->vm.s.pvDBGC = NULL;
689 }
690#endif
691 //..
692 }
693 }
694 vmR3Destroy(pVM);
695 }
696 }
697 //..
698
699 /* Clean CFGM. */
700 int rc2 = CFGMR3Term(pVM);
701 AssertRC(rc2);
702 }
703
704 /*
705 * Drop all references to VM and the VMCPU structures, then
706 * tell GVMM to destroy the VM.
707 */
708 pUVM->pVM = NULL;
709 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
710 {
711 pUVM->aCpus[i].pVM = NULL;
712 pUVM->aCpus[i].pVCpu = NULL;
713 }
714 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
715
716 if (pUVM->cCpus > 1)
717 {
718 /* Poke the other EMTs since they may have stale pVM and pVCpu references
719 on the stack (see VMR3WaitU for instance) if they've been awakened after
720 VM creation. */
721 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
722 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
723 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
724 }
725
726 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
727 AssertRC(rc2);
728 }
729 else
730 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
731
732 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
733 return rc;
734}
735
736
737/**
738 * Register the calling EMT with GVM.
739 *
740 * @returns VBox status code.
741 * @param pVM The VM handle.
742 * @param idCpu The Virtual CPU ID.
743 */
744static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
745{
746 Assert(VMMGetCpuId(pVM) == idCpu);
747 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
748 if (RT_FAILURE(rc))
749 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
750 return rc;
751}
752
753
754/**
755 * Initializes all R3 components of the VM
756 */
757static int vmR3InitRing3(PVM pVM, PUVM pUVM)
758{
759 int rc;
760
761 /*
762 * Register the other EMTs with GVM.
763 */
764 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
765 {
766 rc = VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
767 if (RT_FAILURE(rc))
768 return rc;
769 }
770
771 /*
772 * Init all R3 components, the order here might be important.
773 */
774 rc = MMR3Init(pVM);
775 if (RT_SUCCESS(rc))
776 {
777 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
778 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
779 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
780 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
781 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
782 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
783 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
784 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
785 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
786 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
787 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
788 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
789 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
790 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
791
792 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
793 {
794 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu);
795 AssertRC(rc);
796 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu);
797 AssertRC(rc);
798 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu);
799 AssertRC(rc);
800 }
801
802 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
803 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
804 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
805 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
806 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
807
808 rc = CPUMR3Init(pVM);
809 if (RT_SUCCESS(rc))
810 {
811 rc = HWACCMR3Init(pVM);
812 if (RT_SUCCESS(rc))
813 {
814 rc = PGMR3Init(pVM);
815 if (RT_SUCCESS(rc))
816 {
817 rc = REMR3Init(pVM);
818 if (RT_SUCCESS(rc))
819 {
820 rc = MMR3InitPaging(pVM);
821 if (RT_SUCCESS(rc))
822 rc = TMR3Init(pVM);
823 if (RT_SUCCESS(rc))
824 {
825 rc = VMMR3Init(pVM);
826 if (RT_SUCCESS(rc))
827 {
828 rc = SELMR3Init(pVM);
829 if (RT_SUCCESS(rc))
830 {
831 rc = TRPMR3Init(pVM);
832 if (RT_SUCCESS(rc))
833 {
834 rc = CSAMR3Init(pVM);
835 if (RT_SUCCESS(rc))
836 {
837 rc = PATMR3Init(pVM);
838 if (RT_SUCCESS(rc))
839 {
840#ifdef VBOX_WITH_VMI
841 rc = PARAVR3Init(pVM);
842 if (RT_SUCCESS(rc))
843 {
844#endif
845 rc = IOMR3Init(pVM);
846 if (RT_SUCCESS(rc))
847 {
848 rc = EMR3Init(pVM);
849 if (RT_SUCCESS(rc))
850 {
851 rc = DBGFR3Init(pVM);
852 if (RT_SUCCESS(rc))
853 {
854 rc = PDMR3Init(pVM);
855 if (RT_SUCCESS(rc))
856 {
857 rc = PGMR3InitDynMap(pVM);
858 if (RT_SUCCESS(rc))
859 rc = MMR3HyperInitFinalize(pVM);
860 if (RT_SUCCESS(rc))
861 rc = PATMR3InitFinalize(pVM);
862 if (RT_SUCCESS(rc))
863 rc = PGMR3InitFinalize(pVM);
864 if (RT_SUCCESS(rc))
865 rc = SELMR3InitFinalize(pVM);
866 if (RT_SUCCESS(rc))
867 rc = TMR3InitFinalize(pVM);
868 if (RT_SUCCESS(rc))
869 rc = VMMR3InitFinalize(pVM);
870 if (RT_SUCCESS(rc))
871 rc = REMR3InitFinalize(pVM);
872 if (RT_SUCCESS(rc))
873 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
874 if (RT_SUCCESS(rc))
875 {
876 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
877 return VINF_SUCCESS;
878 }
879 int rc2 = PDMR3Term(pVM);
880 AssertRC(rc2);
881 }
882 int rc2 = DBGFR3Term(pVM);
883 AssertRC(rc2);
884 }
885 int rc2 = EMR3Term(pVM);
886 AssertRC(rc2);
887 }
888 int rc2 = IOMR3Term(pVM);
889 AssertRC(rc2);
890 }
891#ifdef VBOX_WITH_VMI
892 int rc2 = PARAVR3Term(pVM);
893 AssertRC(rc2);
894 }
895#endif
896 int rc2 = PATMR3Term(pVM);
897 AssertRC(rc2);
898 }
899 int rc2 = CSAMR3Term(pVM);
900 AssertRC(rc2);
901 }
902 int rc2 = TRPMR3Term(pVM);
903 AssertRC(rc2);
904 }
905 int rc2 = SELMR3Term(pVM);
906 AssertRC(rc2);
907 }
908 int rc2 = VMMR3Term(pVM);
909 AssertRC(rc2);
910 }
911 int rc2 = TMR3Term(pVM);
912 AssertRC(rc2);
913 }
914 int rc2 = REMR3Term(pVM);
915 AssertRC(rc2);
916 }
917 int rc2 = PGMR3Term(pVM);
918 AssertRC(rc2);
919 }
920 int rc2 = HWACCMR3Term(pVM);
921 AssertRC(rc2);
922 }
923 //int rc2 = CPUMR3Term(pVM);
924 //AssertRC(rc2);
925 }
926 /* MMR3Term is not called here because it'll kill the heap. */
927 }
928
929 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
930 return rc;
931}
932
933
934/**
935 * Initializes all VM CPU components of the VM
936 */
937static int vmR3InitVMCpu(PVM pVM)
938{
939 int rc = VINF_SUCCESS;
940 int rc2;
941
942 rc = CPUMR3InitCPU(pVM);
943 if (RT_SUCCESS(rc))
944 {
945 rc = HWACCMR3InitCPU(pVM);
946 if (RT_SUCCESS(rc))
947 {
948 rc = PGMR3InitCPU(pVM);
949 if (RT_SUCCESS(rc))
950 {
951 rc = TMR3InitCPU(pVM);
952 if (RT_SUCCESS(rc))
953 {
954 rc = VMMR3InitCPU(pVM);
955 if (RT_SUCCESS(rc))
956 {
957 rc = EMR3InitCPU(pVM);
958 if (RT_SUCCESS(rc))
959 {
960 LogFlow(("vmR3InitVMCpu: returns %Rrc\n", VINF_SUCCESS));
961 return VINF_SUCCESS;
962 }
963
964 rc2 = VMMR3TermCPU(pVM);
965 AssertRC(rc2);
966 }
967 rc2 = TMR3TermCPU(pVM);
968 AssertRC(rc2);
969 }
970 rc2 = PGMR3TermCPU(pVM);
971 AssertRC(rc2);
972 }
973 rc2 = HWACCMR3TermCPU(pVM);
974 AssertRC(rc2);
975 }
976 rc2 = CPUMR3TermCPU(pVM);
977 AssertRC(rc2);
978 }
979 LogFlow(("vmR3InitVMCpu: returns %Rrc\n", rc));
980 return rc;
981}
982
983
984/**
985 * Initializes all R0 components of the VM
986 */
987static int vmR3InitRing0(PVM pVM)
988{
989 LogFlow(("vmR3InitRing0:\n"));
990
991 /*
992 * Check for FAKE suplib mode.
993 */
994 int rc = VINF_SUCCESS;
995 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
996 if (!psz || strcmp(psz, "fake"))
997 {
998 /*
999 * Call the VMMR0 component and let it do the init.
1000 */
1001 rc = VMMR3InitR0(pVM);
1002 }
1003 else
1004 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1005
1006 /*
1007 * Do notifications and return.
1008 */
1009 if (RT_SUCCESS(rc))
1010 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1011
1012 /** todo: move this to the VMINITCOMPLETED_RING0 notification handler once implemented */
1013 if (RT_SUCCESS(rc))
1014 rc = HWACCMR3InitFinalizeR0(pVM);
1015
1016 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1017 return rc;
1018}
1019
1020
1021/**
1022 * Initializes all GC components of the VM
1023 */
1024static int vmR3InitGC(PVM pVM)
1025{
1026 LogFlow(("vmR3InitGC:\n"));
1027
1028 /*
1029 * Check for FAKE suplib mode.
1030 */
1031 int rc = VINF_SUCCESS;
1032 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1033 if (!psz || strcmp(psz, "fake"))
1034 {
1035 /*
1036 * Call the VMMR0 component and let it do the init.
1037 */
1038 rc = VMMR3InitRC(pVM);
1039 }
1040 else
1041 Log(("vmR3InitGC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1042
1043 /*
1044 * Do notifications and return.
1045 */
1046 if (RT_SUCCESS(rc))
1047 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_GC);
1048 LogFlow(("vmR3InitGC: returns %Rrc\n", rc));
1049 return rc;
1050}
1051
1052
1053/**
1054 * Do init completed notifications.
1055 * This notifications can fail.
1056 *
1057 * @param pVM The VM handle.
1058 * @param enmWhat What's completed.
1059 */
1060static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1061{
1062 return VINF_SUCCESS;
1063}
1064
1065
1066/**
1067 * Logger callback for inserting a custom prefix.
1068 *
1069 * @returns Number of chars written.
1070 * @param pLogger The logger.
1071 * @param pchBuf The output buffer.
1072 * @param cchBuf The output buffer size.
1073 * @param pvUser Pointer to the UVM structure.
1074 */
1075static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1076{
1077 AssertReturn(cchBuf >= 2, 0);
1078 PUVM pUVM = (PUVM)pvUser;
1079 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1080 if (pUVCpu)
1081 {
1082 static const char s_szHex[17] = "0123456789abcdef";
1083 VMCPUID const idCpu = pUVCpu->idCpu;
1084 pchBuf[1] = s_szHex[ idCpu & 15];
1085 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1086 }
1087 else
1088 {
1089 pchBuf[0] = 'x';
1090 pchBuf[1] = 'y';
1091 }
1092
1093 return 2;
1094}
1095
1096
1097/**
1098 * Calls the relocation functions for all VMM components so they can update
1099 * any GC pointers. When this function is called all the basic VM members
1100 * have been updated and the actual memory relocation have been done
1101 * by the PGM/MM.
1102 *
1103 * This is used both on init and on runtime relocations.
1104 *
1105 * @param pVM VM handle.
1106 * @param offDelta Relocation delta relative to old location.
1107 */
1108VMMR3DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1109{
1110 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1111
1112 /*
1113 * The order here is very important!
1114 */
1115 PGMR3Relocate(pVM, offDelta);
1116 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1117 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1118 CPUMR3Relocate(pVM);
1119 HWACCMR3Relocate(pVM);
1120 SELMR3Relocate(pVM);
1121 VMMR3Relocate(pVM, offDelta);
1122 SELMR3Relocate(pVM); /* !hack! fix stack! */
1123 TRPMR3Relocate(pVM, offDelta);
1124 PATMR3Relocate(pVM);
1125 CSAMR3Relocate(pVM, offDelta);
1126 IOMR3Relocate(pVM, offDelta);
1127 EMR3Relocate(pVM);
1128 TMR3Relocate(pVM, offDelta);
1129 DBGFR3Relocate(pVM, offDelta);
1130 PDMR3Relocate(pVM, offDelta);
1131}
1132
1133
1134/**
1135 * Power on the virtual machine.
1136 *
1137 * @returns 0 on success.
1138 * @returns VBox error code on failure.
1139 * @param pVM VM to power on.
1140 * @thread EMT
1141 */
1142static DECLCALLBACK(int) vmR3PowerOn(PVM pVM)
1143{
1144 LogFlow(("vmR3PowerOn: pVM=%p\n", pVM));
1145
1146 /*
1147 * EMT(0) does the actual power on work *before* the other EMTs
1148 * get here, they just need to set their state to STARTED so they
1149 * get out of the EMT loop and into EM.
1150 */
1151 PVMCPU pVCpu = VMMGetCpu(pVM);
1152 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1153 if (pVCpu->idCpu != 0)
1154 return VINF_SUCCESS;
1155
1156 /*
1157 * Try change the state.
1158 */
1159 AssertMsgReturn(vmR3TrySetState(pVM, VMSTATE_POWERING_ON, VMSTATE_CREATED),
1160 ("%s\n", VMR3GetStateName(pVM->enmVMState)),
1161 VERR_VM_INVALID_VM_STATE);
1162
1163 /*
1164 * Change the state, notify the components and resume the execution.
1165 */
1166 PDMR3PowerOn(pVM);
1167 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1168
1169 return VINF_SUCCESS;
1170}
1171
1172
1173/**
1174 * Powers on the virtual machine.
1175 *
1176 * @returns VBox status code.
1177 *
1178 * @param pVM The VM to power on.
1179 *
1180 * @thread Any thread.
1181 * @vmstate Created
1182 * @vmstateto PoweringOn, Running
1183 */
1184VMMR3DECL(int) VMR3PowerOn(PVM pVM)
1185{
1186 LogFlow(("VMR3PowerOn: pVM=%p\n", pVM));
1187 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1188
1189 /*
1190 * Forward the request to the EMTs (EMT(0) first as it does all the
1191 * work upfront).
1192 */
1193 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL, (PFNRT)vmR3PowerOn, 1, pVM);
1194 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1195 return rc;
1196}
1197
1198
1199/**
1200 * EMT worker for vmR3SuspendCommon.
1201 *
1202 * @returns VBox strict status code.
1203 * @retval VINF_EM_SUSPEND.
1204 * @retval VERR_VM_INVALID_VM_STATE.
1205 *
1206 * @param pVM VM to suspend.
1207 * @param fFatal Whether it's a fatal error or normal suspend.
1208 *
1209 * @thread EMT
1210 */
1211static DECLCALLBACK(int) vmR3Suspend(PVM pVM, bool fFatal)
1212{
1213 LogFlow(("vmR3Suspend: pVM=%p\n", pVM));
1214
1215 /*
1216 * The first EMT switches the state to suspending.
1217 */
1218 PVMCPU pVCpu = VMMGetCpu(pVM);
1219 if (pVCpu->idCpu == pVM->cCpus - 1)
1220 AssertMsgReturn(vmR3TrySetState2(pVM,
1221 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1222 VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS),
1223 ("%s\n", VMR3GetStateName(pVM->enmVMState)),
1224 VERR_VM_INVALID_VM_STATE);
1225
1226 VMSTATE enmVMState = pVM->enmVMState;
1227 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1228 || enmVMState == VMSTATE_SUSPENDING_LS,
1229 ("%s\n", VMR3GetStateName(enmVMState)),
1230 VERR_INTERNAL_ERROR_5);
1231
1232 /*
1233 * EMT(0) does the actually suspending *after* all the other CPUs has
1234 * been thru here.
1235 */
1236 if (pVCpu->idCpu == 0)
1237 {
1238 /* Perform suspend notification. */
1239 PDMR3Suspend(pVM);
1240
1241 /* Change to the final state. */
1242 if (pVM->enmVMState != VMSTATE_SUSPENDING_LS)
1243 vmR3SetState(pVM, fFatal ? VMSTATE_FATAL_ERROR : VMSTATE_SUSPENDED, VMSTATE_SUSPENDING);
1244 else if (!fFatal)
1245 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1246 else
1247 {
1248 vmR3SetState(pVM, VMSTATE_FATAL_ERROR_LS, VMSTATE_SUSPENDING_LS);
1249 SSMR3Cancel(pVM);
1250 }
1251 }
1252
1253 return VINF_EM_SUSPEND;
1254}
1255
1256
1257/**
1258 * Common worker for VMR3Suspend and vmR3SetRuntimeErrorCommon.
1259 *
1260 * They both suspends the VM, but the latter ends up in the VMSTATE_FATAL_ERROR
1261 * instead of VMSTATE_SUSPENDED.
1262 *
1263 * @returns VBox strict status code.
1264 * @param pVM The VM handle.
1265 * @param fFatal Whether it's a fatal error or not.
1266 *
1267 * @thread Any thread.
1268 * @vmstate Running or RunningLS
1269 * @vmstateto Suspending + Suspended/FatalError or SuspendingLS +
1270 * SuspendedLS/FatalErrorLS
1271 */
1272static int vmR3SuspendCommon(PVM pVM, bool fFatal)
1273{
1274 /*
1275 * Forward the operation to EMT in reverse order so EMT(0) can do the
1276 * actual suspending after the other ones have stopped running guest code.
1277 */
1278 return VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Suspend, 2, pVM, fFatal);
1279}
1280
1281
1282/**
1283 * Suspends a running VM.
1284 *
1285 * @returns VBox status code. When called on EMT, this will be a strict status
1286 * code that has to be propagated up the call stack.
1287 *
1288 * @param pVM The VM to suspend.
1289 *
1290 * @thread Any thread.
1291 * @vmstate Running or RunningLS
1292 * @vmstateto Suspending + Suspended or SuspendingLS + SuspendedLS
1293 */
1294VMMR3DECL(int) VMR3Suspend(PVM pVM)
1295{
1296 LogFlow(("VMR3Suspend: pVM=%p\n", pVM));
1297 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1298 int rc = vmR3SuspendCommon(pVM, false /*fFatal*/);
1299 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1300 return rc;
1301}
1302
1303
1304/**
1305 * Resume VM execution.
1306 *
1307 * @returns 0 on success.
1308 * @returns VBox error code on failure.
1309 * @param pVM The VM to resume.
1310 * @thread EMT
1311 */
1312static DECLCALLBACK(int) vmR3Resume(PVM pVM)
1313{
1314 LogFlow(("vmR3Resume: pVM=%p\n", pVM));
1315
1316 /*
1317 * EMT(0) does all the work *before* the others wake up.
1318 */
1319 PVMCPU pVCpu = VMMGetCpu(pVM);
1320 if (pVCpu->idCpu == 0)
1321 {
1322 AssertMsgReturn(vmR3TrySetState(pVM, VMSTATE_RESUMING, VMSTATE_SUSPENDED),
1323 ("%s\n", VMR3GetStateName(pVM->enmVMState)),
1324 VERR_VM_INVALID_VM_STATE);
1325
1326 /* Perform resume notifications. */
1327 PDMR3Resume(pVM);
1328
1329 /* Advance to the final state. */
1330 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1331 }
1332
1333 /** @todo there is a race here: Someone could suspend, power off, raise a fatal
1334 * error (both kinds), save the vm, or start a live save operation before
1335 * we get here on all CPUs. Only safe way is a cross call, or to make
1336 * the last thread flip the state from Resuming to Running. While the
1337 * latter seems easy and perhaps more attractive, the former might be
1338 * better wrt TSC/TM... */
1339 AssertMsgReturn(pVM->enmVMState == VMSTATE_RUNNING, ("%s\n", VMR3GetStateName(pVM->enmVMState)), VERR_VM_INVALID_VM_STATE);
1340 return VINF_EM_RESUME;
1341}
1342
1343
1344
1345
1346/**
1347 * Resume VM execution.
1348 *
1349 * @returns VBox status code. When called on EMT, this will be a strict status
1350 * code that has to be propagated up the call stack.
1351 *
1352 * @param pVM The VM to resume.
1353 *
1354 * @thread Any thread.
1355 * @vmstate Suspended
1356 * @vmstateto Running
1357 */
1358VMMR3DECL(int) VMR3Resume(PVM pVM)
1359{
1360 LogFlow(("VMR3Resume: pVM=%p\n", pVM));
1361 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1362
1363 /*
1364 * Forward the request to the EMTs (EMT(0) first as it does all the
1365 * work upfront).
1366 */
1367 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL, (PFNRT)vmR3Resume, 1, pVM);
1368 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1369 return rc;
1370}
1371
1372
1373/**
1374 * Worker for VMR3Save that validates the state and calls SSMR3Save.
1375 *
1376 * @returns VBox status code.
1377 *
1378 * @param pVM The VM handle.
1379 * @param pszFilename The name of the save state file.
1380 * @param enmAfter What to do afterwards.
1381 * @param pfnProgress Progress callback. Optional.
1382 * @param pvUser User argument for the progress callback.
1383 * @param ppSSM Where to return the saved state handle in case of a
1384 * live snapshot scenario.
1385 * @thread EMT
1386 */
1387static DECLCALLBACK(int) vmR3Save(PVM pVM, const char *pszFilename, SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvUser, PSSMHANDLE *ppSSM)
1388{
1389 LogFlow(("vmR3Save: pVM=%p pszFilename=%p:{%s} enmAfter=%d pfnProgress=%p pvUser=%p ppSSM=%p\n", pVM, pszFilename, pszFilename, enmAfter, pfnProgress, pvUser, ppSSM));
1390
1391 /*
1392 * Validate input.
1393 */
1394 AssertPtr(pszFilename);
1395 AssertPtr(pVM);
1396 Assert(enmAfter == SSMAFTER_DESTROY || enmAfter == SSMAFTER_CONTINUE);
1397 AssertPtr(ppSSM);
1398 *ppSSM = NULL;
1399
1400 /*
1401 * Change the state and perform/start the saving.
1402 */
1403 int rc;
1404 if (vmR3TrySetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED))
1405 {
1406 rc = SSMR3Save(pVM, pszFilename, enmAfter, pfnProgress, pvUser);
1407 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1408 }
1409 else if (vmR3TrySetState(pVM, VMSTATE_RUNNING, VMSTATE_RUNNING_LS))
1410 {
1411 rc = SSMR3LiveToFile(pVM, pszFilename, enmAfter, pfnProgress, pvUser, ppSSM);
1412 /* (We're not subject to cancellation just yet.) */
1413 }
1414
1415 return rc;
1416}
1417
1418
1419/**
1420 * Worker for VMR3Save continues a live save on EMT(0).
1421 *
1422 * @returns VBox status code.
1423 *
1424 * @param pVM The VM handle.
1425 * @param pSSM The handle of saved state operation.
1426 * @thread EMT(0)
1427 */
1428static DECLCALLBACK(int) vmR3SaveLiveStep2(PVM pVM, PSSMHANDLE pSSM)
1429{
1430 LogFlow(("vmR3SaveLiveStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1431 VM_ASSERT_EMT0(pVM);
1432 Assert(pVM->enmVMState == VMSTATE_SUSPENDED_LS);
1433
1434 int rc = SSMR3LiveDoStep2(pSSM);
1435 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS);
1436
1437 return rc;
1438}
1439
1440
1441/**
1442 * Save current VM state.
1443 *
1444 * Can be used for both saving the state and creating snapshots.
1445 *
1446 * When called for a VM in the Running state, the saved state is created live
1447 * and the VM is only suspended when the final part of the saving is preformed.
1448 * The VM state will not be restored to Running in this case and it's up to the
1449 * caller to call VMR3Resume if this is desirable. (The rational is that the
1450 * caller probably wish to reconfigure the disks before resuming the VM.)
1451 *
1452 * @returns VBox status code.
1453 *
1454 * @param pVM The VM which state should be saved.
1455 * @param pszFilename The name of the save state file.
1456 * @param fContinueAfterwards Whether continue execution afterwards or not.
1457 * When in doubt, set this to true.
1458 * @param pfnProgress Progress callback. Optional.
1459 * @param pvUser User argument for the progress callback.
1460 *
1461 * @thread Non-EMT.
1462 * @vmstate Suspended or Running
1463 * @vmstateto Saving+Suspended or
1464 * RunningLS+SuspeningLS+SuspendedLS+Saving+Suspended.
1465 */
1466VMMR3DECL(int) VMR3Save(PVM pVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser)
1467{
1468 LogFlow(("VMR3Save: pVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p\n",
1469 pVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser));
1470
1471 /*
1472 * Validate input.
1473 */
1474 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1475 VM_ASSERT_OTHER_THREAD(pVM);
1476 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1477 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1478 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1479
1480 /*
1481 * Request the operation in EMT(0).
1482 */
1483 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1484 PSSMHANDLE pSSM;
1485 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/,
1486 (PFNRT)vmR3Save, 6, pVM, pszFilename, enmAfter, pfnProgress, pvUser, &pSSM);
1487 if ( RT_SUCCESS(rc)
1488 && pSSM)
1489 {
1490#if 0 /** @todo later*/
1491 /*
1492 * Live snapshot.
1493 */
1494 rc = SSMR3LiveDoStep1(pSSM);
1495 if (RT_SUCCESS(rc))
1496 {
1497 rc = vmR3SuspendCommon(pVM, false /*fFatal*/);
1498 if (RT_SUCCESS(rc))
1499 rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3SaveLiveStep2, 2, pVM, pSSM);
1500 }
1501 else
1502 AssertLogRelMsg( pVM->enmVMState == VMSTATE_RUNNING_LS
1503 || pVM->enmVMState == VMSTATE_RESETTING_LS
1504 || pVM->enmVMState == VMSTATE_POWERING_OFF_LS
1505 || pVM->enmVMState == VMSTATE_FATAL_ERROR_LS
1506 || pVM->enmVMState == VMSTATE_GURU_MEDITATION_LS,
1507 ("%s rc=%Rrc\n", pVM->enmVMState, rc));
1508
1509 int rc2 = SSMR3LiveDone(pSSM);
1510 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1511 rc = rc2;
1512
1513 /*
1514 * Work the state.
1515 */
1516 if (pVM->enmVMState == VMSTATE_SUSPENDED)
1517 Log(("VMR3Save: Suspended; rc=%Rrc\n", rc));
1518 else if (vmR3TrySetState(pVM, VMSTATE_RUNNING, VMSTATE_RUNNING_LS))
1519 Log(("VMR3Save: Cancelled while running; rc=%Rrc\n", rc));
1520 else if (vmR3TrySetState(pVM, VMSTATE_SUSPEND, VMSTATE_SUSPEND_LS))
1521 Log(("VMR3Save: Cancelled while suspended; rc=%Rrc\n", rc));
1522 else if (vmR3TrySetState(pVM, VMSTATE_POWERING_OFF, VMSTATE_POWERING_OFF_LS))
1523 {
1524 /** @todo needs more work. */
1525 Log(("VMR3Save: Powering off; rc=%Rrc\n", rc));
1526 }
1527 else if (vmR3TrySetState(pVM, VMSTATE_RESETTING, VMSTATE_RESETTING_LS))
1528 {
1529 /** @todo needs more work. */
1530 Log(("VMR3Save: Resetting; rc=%Rrc\n", rc));
1531 }
1532 else if (vmR3TrySetState(pVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS))
1533 Log(("VMR3Save: Fatal error; rc=%Rrc\n", rc));
1534 else if (vmR3TrySetState(pVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS))
1535 Log(("VMR3Save: Guru meditation; rc=%Rrc\n", rc));
1536 else
1537 {
1538 AssertLogRelMsgFailed(("%s rc=%Rrc\n", pVM->enmVMState, rc));
1539 rc = VERR_INTERNAL_ERROR_4;
1540 }
1541#else
1542 rc = VERR_NOT_IMPLEMENTED;
1543 SSMR3LiveDone(pSSM);
1544#endif
1545 }
1546
1547 LogFlow(("VMR3Save: returns %Rrc\n", rc));
1548 return rc;
1549}
1550
1551
1552/**
1553 * Loads a new VM state.
1554 *
1555 * To restore a saved state on VM startup, call this function and then
1556 * resume the VM instead of powering it on.
1557 *
1558 * @returns VBox status code.
1559 * @param pVM The VM handle.
1560 * @param pszFilename The name of the save state file.
1561 * @param pfnProgress Progress callback. Optional.
1562 * @param pvUser User argument for the progress callback.
1563 * @thread EMT.
1564 */
1565static DECLCALLBACK(int) vmR3Load(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
1566{
1567 LogFlow(("vmR3Load: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n", pVM, pszFilename, pszFilename, pfnProgress, pvUser));
1568
1569 /*
1570 * Validate input (paranoia).
1571 */
1572 AssertPtr(pVM);
1573 AssertPtr(pszFilename);
1574
1575 /*
1576 * Change the state and perform the load.
1577 *
1578 * Always perform a relocation round afterwards to make sure hypervisor
1579 * selectors and such are correct.
1580 */
1581 if (!vmR3TrySetState2(pVM,
1582 VMSTATE_LOADING, VMSTATE_CREATED,
1583 VMSTATE_LOADING, VMSTATE_SUSPENDED))
1584 return VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS, N_("Invalid VM state (%s) for restoring state from '%s'"),
1585 VMR3GetStateName(pVM->enmVMState), pszFilename);
1586
1587 int rc = SSMR3Load(pVM, pszFilename, SSMAFTER_RESUME, pfnProgress, pvUser);
1588 if (RT_SUCCESS(rc))
1589 {
1590 VMR3Relocate(pVM, 0 /*offDelta*/);
1591 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1592 }
1593 else
1594 {
1595 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1596 rc = VMSetError(pVM, rc, RT_SRC_POS,
1597 N_("Unable to restore the virtual machine's saved state from '%s'. It may be damaged or from an older version of VirtualBox. Please discard the saved state before starting the virtual machine"),
1598 pszFilename);
1599 }
1600
1601 return rc;
1602}
1603
1604
1605/**
1606 * Loads a VM state into a newly created VM or a one that is suspended.
1607 *
1608 * To restore a saved state on VM startup, call this function and then resume
1609 * the VM instead of powering it on.
1610 *
1611 * @returns VBox status code.
1612 *
1613 * @param pVM The VM handle.
1614 * @param pszFilename The name of the save state file.
1615 * @param pfnProgress Progress callback. Optional.
1616 * @param pvUser User argument for the progress callback.
1617 *
1618 * @thread Any thread.
1619 * @vmstate Created, Suspended
1620 * @vmstateto Loading+Suspended
1621 */
1622VMMR3DECL(int) VMR3Load(PVM pVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
1623{
1624 LogFlow(("VMR3Load: pVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n", pVM, pszFilename, pszFilename, pfnProgress, pvUser));
1625
1626 /*
1627 * Validate input.
1628 */
1629 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1630 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1631
1632 /*
1633 * Request the operation in EMT.
1634 */
1635 int rc = VMR3ReqCallWaitU(pVM->pUVM, 0 /*idDstCpu*/,
1636 (PFNRT)vmR3Load, 4, pVM, pszFilename, pfnProgress, pvUser);
1637 LogFlow(("VMR3Load: returns %Rrc\n", rc));
1638 return rc;
1639}
1640
1641
1642/**
1643 * Worker for VMR3PowerOff that does the actually powering off on EMT(0) after
1644 * cycling thru the other EMTs first.
1645 *
1646 * @returns VBox status code.
1647 *
1648 * @param pVM The VM handle.
1649 *
1650 * @thread EMT.
1651 */
1652static DECLCALLBACK(int) vmR3PowerOff(PVM pVM)
1653{
1654 LogFlow(("vmR3PowerOff: pVM=%p\n", pVM));
1655
1656 /*
1657 * The first EMT thru here will change the state to PoweringOff.
1658 */
1659 PVMCPU pVCpu = VMMGetCpu(pVM);
1660 if (pVCpu->idCpu == pVM->cCpus - 1)
1661 {
1662 PUVM pUVM = pVM->pUVM;
1663 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1664
1665 VMSTATE enmStateNew;
1666 VMSTATE enmStateOld = pVM->enmVMState;
1667 switch (enmStateOld)
1668 {
1669 case VMSTATE_RUNNING:
1670 case VMSTATE_SUSPENDED:
1671 case VMSTATE_DEBUGGING:
1672 case VMSTATE_LOAD_FAILURE:
1673 case VMSTATE_GURU_MEDITATION:
1674 case VMSTATE_FATAL_ERROR:
1675 enmStateNew = VMSTATE_POWERING_OFF;
1676 break;
1677
1678 case VMSTATE_RUNNING_LS:
1679 case VMSTATE_DEBUGGING_LS:
1680 case VMSTATE_GURU_MEDITATION_LS:
1681 case VMSTATE_FATAL_ERROR_LS:
1682 enmStateNew = VMSTATE_POWERING_OFF_LS;
1683 break;
1684
1685 default:
1686 AssertLogRelMsgFailed(("%s\n", VMR3GetStateName(enmStateOld)));
1687 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1688 return VERR_VM_INVALID_VM_STATE;
1689 }
1690
1691 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld);
1692 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1693 }
1694
1695 /*
1696 * Check the state.
1697 */
1698 VMSTATE const enmVMState = pVM->enmVMState;
1699 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
1700 || enmVMState == VMSTATE_POWERING_OFF_LS,
1701 ("%s\n", VMR3GetStateName(enmVMState)),
1702 VERR_VM_INVALID_VM_STATE);
1703
1704 /*
1705 * EMT(0) does the actual power off work here *after* all the other EMTs
1706 * have been thru and entered the STOPPED state.
1707 */
1708 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
1709 if (pVCpu->idCpu == 0)
1710 {
1711 /*
1712 * For debugging purposes, we will log a summary of the guest state at this point.
1713 */
1714 if (enmVMState != VMSTATE_GURU_MEDITATION)
1715 {
1716 /** @todo SMP support? */
1717 PVMCPU pVCpu = VMMGetCpu(pVM);
1718
1719 /** @todo make the state dumping at VMR3PowerOff optional. */
1720 RTLogRelPrintf("****************** Guest state at power off ******************\n");
1721 DBGFR3Info(pVM, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
1722 RTLogRelPrintf("***\n");
1723 DBGFR3Info(pVM, "mode", NULL, DBGFR3InfoLogRelHlp());
1724 RTLogRelPrintf("***\n");
1725 DBGFR3Info(pVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
1726 RTLogRelPrintf("***\n");
1727 DBGFR3Info(pVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
1728 /** @todo dump guest call stack. */
1729#if 1 // "temporary" while debugging #1589
1730 RTLogRelPrintf("***\n");
1731 uint32_t esp = CPUMGetGuestESP(pVCpu);
1732 if ( CPUMGetGuestSS(pVCpu) == 0
1733 && esp < _64K)
1734 {
1735 uint8_t abBuf[PAGE_SIZE];
1736 RTLogRelPrintf("***\n"
1737 "ss:sp=0000:%04x ", esp);
1738 uint32_t Start = esp & ~(uint32_t)63;
1739 int rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, Start, 0x100);
1740 if (RT_SUCCESS(rc))
1741 RTLogRelPrintf("0000:%04x TO 0000:%04x:\n"
1742 "%.*Rhxd\n",
1743 Start, Start + 0x100 - 1,
1744 0x100, abBuf);
1745 else
1746 RTLogRelPrintf("rc=%Rrc\n", rc);
1747
1748 /* grub ... */
1749 if (esp < 0x2000 && esp > 0x1fc0)
1750 {
1751 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x800);
1752 if (RT_SUCCESS(rc))
1753 RTLogRelPrintf("0000:8000 TO 0000:87ff:\n"
1754 "%.*Rhxd\n",
1755 0x800, abBuf);
1756 }
1757 /* microsoft cdrom hang ... */
1758 if (true)
1759 {
1760 rc = PGMPhysSimpleReadGCPhys(pVM, abBuf, 0x8000, 0x200);
1761 if (RT_SUCCESS(rc))
1762 RTLogRelPrintf("2000:0000 TO 2000:01ff:\n"
1763 "%.*Rhxd\n",
1764 0x200, abBuf);
1765 }
1766 }
1767#endif
1768 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
1769 }
1770
1771 /*
1772 * Perform the power off notifications and advance the state to
1773 * Off or OffLS.
1774 */
1775 PDMR3PowerOff(pVM);
1776 vmR3SetState2(pVM,
1777 VMSTATE_OFF, VMSTATE_POWERING_OFF,
1778 VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS);
1779 }
1780 return VINF_EM_OFF;
1781}
1782
1783
1784/**
1785 * Power Off the VM.
1786 *
1787 * @returns 0 on success.
1788 * @returns VBox error code on failure.
1789 * @param pVM VM which should be destroyed.
1790 * @thread Any thread.
1791 * @vmstate Suspended, Running, Guru Meditation, Load Failure
1792 * @vmstateto Off
1793 */
1794VMMR3DECL(int) VMR3PowerOff(PVM pVM)
1795{
1796 LogFlow(("VMR3PowerOff: pVM=%p\n", pVM));
1797
1798 /*
1799 * Validate input.
1800 */
1801 if (!pVM)
1802 {
1803 AssertMsgFailed(("Invalid VM pointer\n"));
1804 return VERR_INVALID_PARAMETER;
1805 }
1806
1807 /*
1808 * Request the operation in EMT. (in reverse order as VCPU 0 does the actual work)
1809 */
1810 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3PowerOff, 1, pVM);
1811 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
1812 return rc;
1813}
1814
1815
1816/**
1817 * Destroys the VM.
1818 *
1819 * The VM must be powered off (or never really powered on) to call this
1820 * function. The VM handle is destroyed and can no longer be used up successful
1821 * return.
1822 *
1823 * @returns VBox status code.
1824 *
1825 * @param pVM The handle of the VM which should be destroyed.
1826 *
1827 * @thread EMT(0) or any none emulation thread.
1828 * @vmstate Off, Created
1829 * @vmstateto N/A
1830 */
1831VMMR3DECL(int) VMR3Destroy(PVM pVM)
1832{
1833 LogFlow(("VMR3Destroy: pVM=%p\n", pVM));
1834
1835 /*
1836 * Validate input.
1837 */
1838 if (!pVM)
1839 return VERR_INVALID_PARAMETER;
1840 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1841 Assert(VMMGetCpuId(pVM) == 0 || VMMGetCpuId(pVM) == NIL_VMCPUID);
1842
1843 /*
1844 * Change VM state to destroying and unlink the VM.
1845 */
1846 AssertLogRelMsgReturn(vmR3TrySetState(pVM, VMSTATE_DESTROYING, VMSTATE_OFF),
1847 ("%s\n", VMR3GetStateName(pVM->enmVMState)),
1848 VERR_VM_INVALID_VM_STATE);
1849
1850 /** @todo lock this when we start having multiple machines in a process... */
1851 PUVM pUVM = pVM->pUVM; AssertPtr(pUVM);
1852 if (g_pUVMsHead == pUVM)
1853 g_pUVMsHead = pUVM->pNext;
1854 else
1855 {
1856 PUVM pPrev = g_pUVMsHead;
1857 while (pPrev && pPrev->pNext != pUVM)
1858 pPrev = pPrev->pNext;
1859 AssertMsgReturn(pPrev, ("pUVM=%p / pVM=%p is INVALID!\n", pUVM, pVM), VERR_INVALID_PARAMETER);
1860
1861 pPrev->pNext = pUVM->pNext;
1862 }
1863 pUVM->pNext = NULL;
1864
1865 /*
1866 * Notify registered at destruction listeners.
1867 */
1868 vmR3AtDtor(pVM);
1869
1870 /*
1871 * EMT(0) does the final cleanup, so if we're it calling VMR3Destroy then
1872 * we'll have to postpone parts of it till later. Otherwise, call
1873 * vmR3Destroy on each of the EMTs in ending with EMT(0) doing the bulk
1874 * of the cleanup.
1875 */
1876 if (VMMGetCpuId(pVM) == 0)
1877 {
1878 pUVM->vm.s.fEMTDoesTheCleanup = true;
1879 pUVM->vm.s.fTerminateEMT = true;
1880 VM_FF_SET(pVM, VM_FF_TERMINATE);
1881
1882 /* Terminate the other EMTs. */
1883 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
1884 {
1885 int rc = VMR3ReqCallWaitU(pUVM, idCpu, (PFNRT)vmR3Destroy, 1, pVM);
1886 AssertLogRelRC(rc);
1887 }
1888 }
1889 else
1890 {
1891 /* vmR3Destroy on all EMTs, ending with EMT(0). */
1892 int rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
1893 AssertLogRelRC(rc);
1894
1895 /* Wait for EMTs and destroy the UVM. */
1896 vmR3DestroyUVM(pUVM, 30000);
1897 }
1898
1899 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
1900 return VINF_SUCCESS;
1901}
1902
1903
1904/**
1905 * Internal destruction worker.
1906 *
1907 * This is either called from VMR3Destroy via VMR3ReqCallU or from
1908 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
1909 * VMR3Destroy().
1910 *
1911 * When called on EMT(0), it will performed the great bulk of the destruction.
1912 * When called on the other EMTs, they will do nothing and the whole purpose is
1913 * to return VINF_EM_TERMINATE so they break out of their run loops.
1914 *
1915 * @returns VINF_EM_TERMINATE.
1916 * @param pVM The VM handle.
1917 */
1918DECLCALLBACK(int) vmR3Destroy(PVM pVM)
1919{
1920 PUVM pUVM = pVM->pUVM;
1921 PVMCPU pVCpu = VMMGetCpu(pVM);
1922 Assert(pVCpu);
1923 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
1924
1925 /*
1926 * Only VCPU 0 does the full cleanup.
1927 */
1928 if (pVCpu->idCpu == 0)
1929 {
1930
1931 /*
1932 * Dump statistics to the log.
1933 */
1934#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
1935 RTLogFlags(NULL, "nodisabled nobuffered");
1936#endif
1937#ifdef VBOX_WITH_STATISTICS
1938 STAMR3Dump(pVM, "*");
1939#else
1940 LogRel(("************************* Statistics *************************\n"));
1941 STAMR3DumpToReleaseLog(pVM, "*");
1942 LogRel(("********************* End of statistics **********************\n"));
1943#endif
1944
1945 /*
1946 * Destroy the VM components.
1947 */
1948 int rc = TMR3Term(pVM);
1949 AssertRC(rc);
1950#ifdef VBOX_WITH_DEBUGGER
1951 rc = DBGCTcpTerminate(pVM, pUVM->vm.s.pvDBGC);
1952 pUVM->vm.s.pvDBGC = NULL;
1953#endif
1954 AssertRC(rc);
1955 rc = DBGFR3Term(pVM);
1956 AssertRC(rc);
1957 rc = PDMR3Term(pVM);
1958 AssertRC(rc);
1959 rc = EMR3Term(pVM);
1960 AssertRC(rc);
1961 rc = IOMR3Term(pVM);
1962 AssertRC(rc);
1963 rc = CSAMR3Term(pVM);
1964 AssertRC(rc);
1965 rc = PATMR3Term(pVM);
1966 AssertRC(rc);
1967 rc = TRPMR3Term(pVM);
1968 AssertRC(rc);
1969 rc = SELMR3Term(pVM);
1970 AssertRC(rc);
1971 rc = REMR3Term(pVM);
1972 AssertRC(rc);
1973 rc = HWACCMR3Term(pVM);
1974 AssertRC(rc);
1975 rc = PGMR3Term(pVM);
1976 AssertRC(rc);
1977 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
1978 AssertRC(rc);
1979 rc = CPUMR3Term(pVM);
1980 AssertRC(rc);
1981 SSMR3Term(pVM);
1982 rc = PDMR3CritSectTerm(pVM);
1983 AssertRC(rc);
1984 rc = MMR3Term(pVM);
1985 AssertRC(rc);
1986
1987 /*
1988 * We're done in this thread (EMT).
1989 */
1990 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
1991 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_TERMINATE);
1992 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
1993 }
1994 return VINF_EM_TERMINATE;
1995}
1996
1997
1998/**
1999 * Called at the end of the EMT procedure to take care of the final cleanup.
2000 *
2001 * Currently only EMT(0) will do work here. It will destroy the shared VM
2002 * structure if it is still around. If EMT(0) was the caller of VMR3Destroy it
2003 * will destroy UVM and nothing will be left behind upon exit. But if some
2004 * other thread is calling VMR3Destroy, they will clean up UVM after all EMTs
2005 * has exitted.
2006 *
2007 * @param pUVM The UVM handle.
2008 * @param idCpu The virtual CPU id.
2009 */
2010void vmR3DestroyFinalBitFromEMT(PUVM pUVM, VMCPUID idCpu)
2011{
2012 /*
2013 * Only EMT(0) has work to do here.
2014 */
2015 if (idCpu != 0)
2016 return;
2017 Assert( !pUVM->pVM
2018 || VMMGetCpuId(pUVM->pVM) == 0);
2019
2020 /*
2021 * If we have a shared VM structure, change its state to Terminated and
2022 * tell GVMM to destroy it.
2023 */
2024 if (pUVM->pVM)
2025 {
2026 vmR3SetState(pUVM->pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
2027 int rc = SUPR3CallVMMR0Ex(pUVM->pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
2028 AssertLogRelRC(rc);
2029 pUVM->pVM = NULL;
2030 }
2031
2032 /*
2033 * If EMT(0) called VMR3Destroy, then it will destroy UVM as well.
2034 */
2035 if (pUVM->vm.s.fEMTDoesTheCleanup)
2036 vmR3DestroyUVM(pUVM, 30000);
2037}
2038
2039
2040/**
2041 * Destroys the UVM portion.
2042 *
2043 * This is called as the final step in the VM destruction or as the cleanup
2044 * in case of a creation failure. If EMT(0) called VMR3Destroy, meaning
2045 * VMINTUSERPERVM::fEMTDoesTheCleanup is true, it will call this as
2046 * vmR3DestroyFinalBitFromEMT completes.
2047 *
2048 * @param pVM VM Handle.
2049 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2050 * threads.
2051 */
2052static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2053{
2054 /*
2055 * Signal termination of each the emulation threads and
2056 * wait for them to complete.
2057 */
2058 /* Signal them. */
2059 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2060 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2061 {
2062 ASMAtomicUoWriteBool(&pUVM->aCpus[i].vm.s.fTerminateEMT, true);
2063 if (pUVM->pVM)
2064 VM_FF_SET(pUVM->pVM, VM_FF_TERMINATE);
2065 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2066 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2067 }
2068
2069 /* Wait for them. */
2070 uint64_t NanoTS = RTTimeNanoTS();
2071 RTTHREAD hSelf = RTThreadSelf();
2072 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2073 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2074 {
2075 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2076 if ( hThread != NIL_RTTHREAD
2077 && hThread != hSelf)
2078 {
2079 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2080 int rc2 = RTThreadWait(hThread,
2081 cMilliesElapsed < cMilliesEMTWait
2082 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2083 : 2000,
2084 NULL);
2085 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2086 rc2 = RTThreadWait(hThread, 1000, NULL);
2087 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2088 if (RT_SUCCESS(rc2))
2089 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2090 }
2091 }
2092
2093 /* Cleanup the semaphores. */
2094 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2095 {
2096 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2097 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2098 }
2099
2100 /*
2101 * Free the event semaphores associated with the request packets.
2102 */
2103 unsigned cReqs = 0;
2104 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2105 {
2106 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2107 pUVM->vm.s.apReqFree[i] = NULL;
2108 for (; pReq; pReq = pReq->pNext, cReqs++)
2109 {
2110 pReq->enmState = VMREQSTATE_INVALID;
2111 RTSemEventDestroy(pReq->EventSem);
2112 }
2113 }
2114 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2115
2116 /*
2117 * Kill all queued requests. (There really shouldn't be any!)
2118 */
2119 for (unsigned i = 0; i < 10; i++)
2120 {
2121 PVMREQ pReqHead = (PVMREQ)ASMAtomicXchgPtr((void *volatile *)&pUVM->vm.s.pReqs, NULL);
2122 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2123 if (!pReqHead)
2124 break;
2125 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2126 {
2127 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2128 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2129 RTSemEventSignal(pReq->EventSem);
2130 RTThreadSleep(2);
2131 RTSemEventDestroy(pReq->EventSem);
2132 }
2133 /* give them a chance to respond before we free the request memory. */
2134 RTThreadSleep(32);
2135 }
2136
2137 /*
2138 * Now all queued VCPU requests (again, there shouldn't be any).
2139 */
2140 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2141 {
2142 PUVMCPU pUVCpu = &pUVM->aCpus[i];
2143
2144 for (unsigned i = 0; i < 10; i++)
2145 {
2146 PVMREQ pReqHead = (PVMREQ)ASMAtomicXchgPtr((void *volatile *)&pUVCpu->vm.s.pReqs, NULL);
2147 AssertMsg(!pReqHead, ("This isn't supposed to happen! VMR3Destroy caller has to serialize this.\n"));
2148 if (!pReqHead)
2149 break;
2150 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2151 {
2152 ASMAtomicUoWriteSize(&pReq->iStatus, VERR_INTERNAL_ERROR);
2153 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2154 RTSemEventSignal(pReq->EventSem);
2155 RTThreadSleep(2);
2156 RTSemEventDestroy(pReq->EventSem);
2157 }
2158 /* give them a chance to respond before we free the request memory. */
2159 RTThreadSleep(32);
2160 }
2161 }
2162
2163 /*
2164 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2165 */
2166 PDMR3TermUVM(pUVM);
2167
2168 /*
2169 * Terminate the support library if initialized.
2170 */
2171 if (pUVM->vm.s.pSession)
2172 {
2173 int rc = SUPR3Term(false /*fForced*/);
2174 AssertRC(rc);
2175 pUVM->vm.s.pSession = NIL_RTR0PTR;
2176 }
2177
2178 /*
2179 * Destroy the MM heap and free the UVM structure.
2180 */
2181 MMR3TermUVM(pUVM);
2182 STAMR3TermUVM(pUVM);
2183
2184#ifdef LOG_ENABLED
2185 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2186#endif
2187 RTTlsFree(pUVM->vm.s.idxTLS);
2188
2189 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2190 RTMemPageFree(pUVM);
2191
2192 RTLogFlush(NULL);
2193}
2194
2195
2196/**
2197 * Enumerates the VMs in this process.
2198 *
2199 * @returns Pointer to the next VM.
2200 * @returns NULL when no more VMs.
2201 * @param pVMPrev The previous VM
2202 * Use NULL to start the enumeration.
2203 */
2204VMMR3DECL(PVM) VMR3EnumVMs(PVM pVMPrev)
2205{
2206 /*
2207 * This is quick and dirty. It has issues with VM being
2208 * destroyed during the enumeration.
2209 */
2210 PUVM pNext;
2211 if (pVMPrev)
2212 pNext = pVMPrev->pUVM->pNext;
2213 else
2214 pNext = g_pUVMsHead;
2215 return pNext ? pNext->pVM : NULL;
2216}
2217
2218
2219/**
2220 * Registers an at VM destruction callback.
2221 *
2222 * @returns VBox status code.
2223 * @param pfnAtDtor Pointer to callback.
2224 * @param pvUser User argument.
2225 */
2226VMMR3DECL(int) VMR3AtDtorRegister(PFNVMATDTOR pfnAtDtor, void *pvUser)
2227{
2228 /*
2229 * Check if already registered.
2230 */
2231 VM_ATDTOR_LOCK();
2232 PVMATDTOR pCur = g_pVMAtDtorHead;
2233 while (pCur)
2234 {
2235 if (pfnAtDtor == pCur->pfnAtDtor)
2236 {
2237 VM_ATDTOR_UNLOCK();
2238 AssertMsgFailed(("Already registered at destruction callback %p!\n", pfnAtDtor));
2239 return VERR_INVALID_PARAMETER;
2240 }
2241
2242 /* next */
2243 pCur = pCur->pNext;
2244 }
2245 VM_ATDTOR_UNLOCK();
2246
2247 /*
2248 * Allocate new entry.
2249 */
2250 PVMATDTOR pVMAtDtor = (PVMATDTOR)RTMemAlloc(sizeof(*pVMAtDtor));
2251 if (!pVMAtDtor)
2252 return VERR_NO_MEMORY;
2253
2254 VM_ATDTOR_LOCK();
2255 pVMAtDtor->pfnAtDtor = pfnAtDtor;
2256 pVMAtDtor->pvUser = pvUser;
2257 pVMAtDtor->pNext = g_pVMAtDtorHead;
2258 g_pVMAtDtorHead = pVMAtDtor;
2259 VM_ATDTOR_UNLOCK();
2260
2261 return VINF_SUCCESS;
2262}
2263
2264
2265/**
2266 * Deregisters an at VM destruction callback.
2267 *
2268 * @returns VBox status code.
2269 * @param pfnAtDtor Pointer to callback.
2270 */
2271VMMR3DECL(int) VMR3AtDtorDeregister(PFNVMATDTOR pfnAtDtor)
2272{
2273 /*
2274 * Find it, unlink it and free it.
2275 */
2276 VM_ATDTOR_LOCK();
2277 PVMATDTOR pPrev = NULL;
2278 PVMATDTOR pCur = g_pVMAtDtorHead;
2279 while (pCur)
2280 {
2281 if (pfnAtDtor == pCur->pfnAtDtor)
2282 {
2283 if (pPrev)
2284 pPrev->pNext = pCur->pNext;
2285 else
2286 g_pVMAtDtorHead = pCur->pNext;
2287 pCur->pNext = NULL;
2288 VM_ATDTOR_UNLOCK();
2289
2290 RTMemFree(pCur);
2291 return VINF_SUCCESS;
2292 }
2293
2294 /* next */
2295 pPrev = pCur;
2296 pCur = pCur->pNext;
2297 }
2298 VM_ATDTOR_UNLOCK();
2299
2300 return VERR_INVALID_PARAMETER;
2301}
2302
2303
2304/**
2305 * Walks the list of at VM destructor callbacks.
2306 * @param pVM The VM which is about to be destroyed.
2307 */
2308static void vmR3AtDtor(PVM pVM)
2309{
2310 /*
2311 * Find it, unlink it and free it.
2312 */
2313 VM_ATDTOR_LOCK();
2314 for (PVMATDTOR pCur = g_pVMAtDtorHead; pCur; pCur = pCur->pNext)
2315 pCur->pfnAtDtor(pVM, pCur->pvUser);
2316 VM_ATDTOR_UNLOCK();
2317}
2318
2319
2320/**
2321 * Worker which checks integrity of some internal structures.
2322 * This is yet another attempt to track down that AVL tree crash.
2323 */
2324static void vmR3CheckIntegrity(PVM pVM)
2325{
2326#ifdef VBOX_STRICT
2327 int rc = PGMR3CheckIntegrity(pVM);
2328 AssertReleaseRC(rc);
2329#endif
2330}
2331
2332
2333/**
2334 * Reset request processor.
2335 *
2336 * This is called by the emulation threads as a response to the
2337 * reset request issued by VMR3Reset().
2338 *
2339 * @returns VBox status code.
2340 * @param pVM VM to reset.
2341 */
2342static DECLCALLBACK(int) vmR3Reset(PVM pVM)
2343{
2344 PVMCPU pVCpu = VMMGetCpu(pVM);
2345
2346 /*
2347 * The first EMT will try change the state to resetting.
2348 */
2349 if (pVCpu->idCpu == pVM->cCpus - 1)
2350 {
2351 PUVM pUVM = pVM->pUVM;
2352 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2353 VMSTATE enmStateOld = pVM->enmVMState;
2354 switch (enmStateOld)
2355 {
2356 case VMSTATE_RUNNING:
2357 case VMSTATE_SUSPENDED:
2358 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RESETTING, enmStateOld);
2359 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2360 break;
2361
2362 case VMSTATE_RUNNING_LS:
2363 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RESETTING_LS, enmStateOld);
2364 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2365
2366 SSMR3Cancel(pVM);
2367 break;
2368
2369 default:
2370 AssertLogRelMsgFailed(("%s\n", VMR3GetStateName(enmStateOld)));
2371 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2372 return VERR_VM_INVALID_VM_STATE;
2373 }
2374 }
2375
2376 /*
2377 * Check the state.
2378 */
2379 VMSTATE enmVMState = pVM->enmVMState;
2380 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2381 || enmVMState == VMSTATE_RESETTING_LS,
2382 ("%s\n", VMR3GetStateName(enmVMState)),
2383 VERR_VM_INVALID_VM_STATE);
2384
2385 /*
2386 * EMT(0) does the full cleanup *after* all the other EMTs has been
2387 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2388 *
2389 * Because there are per-cpu reset routines and order may/is important,
2390 * the following sequence looks a bit ugly...
2391 */
2392 if (pVCpu->idCpu == 0)
2393 vmR3CheckIntegrity(pVM);
2394
2395 /* Reset the VCpu state. */
2396 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2397
2398 /* Clear all pending forced actions. */
2399 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2400
2401 /*
2402 * Reset the VM components.
2403 */
2404 if (pVCpu->idCpu == 0)
2405 {
2406 PATMR3Reset(pVM);
2407 CSAMR3Reset(pVM);
2408 PGMR3Reset(pVM); /* We clear VM RAM in PGMR3Reset. It's vital PDMR3Reset is executed
2409 * _afterwards_. E.g. ACPI sets up RAM tables during init/reset. */
2410 MMR3Reset(pVM);
2411 PDMR3Reset(pVM);
2412 SELMR3Reset(pVM);
2413 TRPMR3Reset(pVM);
2414 REMR3Reset(pVM);
2415 IOMR3Reset(pVM);
2416 CPUMR3Reset(pVM);
2417 }
2418 CPUMR3ResetCpu(pVCpu);
2419 if (pVCpu->idCpu == 0)
2420 {
2421 TMR3Reset(pVM);
2422 EMR3Reset(pVM);
2423 HWACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2424
2425#ifdef LOG_ENABLED
2426 /*
2427 * Debug logging.
2428 */
2429 RTLogPrintf("\n\nThe VM was reset:\n");
2430 DBGFR3Info(pVM, "cpum", "verbose", NULL);
2431#endif
2432
2433 /*
2434 * Since EMT(0) is the last to go thru here, it will advance the state.
2435 */
2436 PUVM pUVM = pVM->pUVM;
2437 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2438 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2439 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING);
2440 else
2441 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, pVM->enmVMState);
2442 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2443
2444 vmR3CheckIntegrity(pVM);
2445 }
2446
2447 return VINF_EM_RESET;
2448}
2449
2450
2451/**
2452 * Reset the current VM.
2453 *
2454 * @returns VBox status code.
2455 * @param pVM VM to reset.
2456 */
2457VMMR3DECL(int) VMR3Reset(PVM pVM)
2458{
2459 LogFlow(("VMR3Reset:\n"));
2460 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2461
2462 /*
2463 * Forward the query on
2464 * Queue reset request to the emulation thread
2465 * and wait for it to be processed. (in reverse order as VCPU 0 does the real cleanup)
2466 */
2467 int rc = VMR3ReqCallWaitU(pVM->pUVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Reset, 1, pVM);
2468 AssertLogRelRC(rc);
2469 return rc;
2470}
2471
2472
2473/**
2474 * Gets the current VM state.
2475 *
2476 * @returns The current VM state.
2477 * @param pVM VM handle.
2478 * @thread Any
2479 */
2480VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
2481{
2482 return pVM->enmVMState;
2483}
2484
2485
2486/**
2487 * Gets the state name string for a VM state.
2488 *
2489 * @returns Pointer to the state name. (readonly)
2490 * @param enmState The state.
2491 */
2492VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
2493{
2494 switch (enmState)
2495 {
2496 case VMSTATE_CREATING: return "CREATING";
2497 case VMSTATE_CREATED: return "CREATED";
2498 case VMSTATE_LOADING: return "LOADING";
2499 case VMSTATE_POWERING_ON: return "POWERING_ON";
2500 case VMSTATE_RESUMING: return "RESUMING";
2501 case VMSTATE_RUNNING: return "RUNNING";
2502 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
2503 case VMSTATE_RESETTING: return "RESETTING";
2504 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
2505 case VMSTATE_SUSPENDED: return "SUSPENDED";
2506 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
2507 case VMSTATE_SUSPENDING: return "SUSPENDING";
2508 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
2509 case VMSTATE_SAVING: return "SAVING";
2510 case VMSTATE_DEBUGGING: return "DEBUGGING";
2511 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
2512 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
2513 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
2514 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
2515 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
2516 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
2517 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
2518 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
2519 case VMSTATE_OFF: return "OFF";
2520 case VMSTATE_DESTROYING: return "DESTROYING";
2521 case VMSTATE_TERMINATED: return "TERMINATED";
2522
2523 default:
2524 AssertMsgFailed(("Unknown state %d\n", enmState));
2525 return "Unknown!\n";
2526 }
2527}
2528
2529
2530/**
2531 * Validates the state transition in strict builds.
2532 *
2533 * @returns true if valid, false if not.
2534 *
2535 * @param enmStateOld The old (current) state.
2536 * @param enmStateNew The proposed new state.
2537 */
2538static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
2539{
2540#ifdef /*VBOX_STRICT*/ DEBUG_bird
2541 switch (enmStateOld)
2542 {
2543 case VMSTATE_CREATING:
2544 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2545 break;
2546
2547 case VMSTATE_CREATED:
2548 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
2549 || enmStateNew == VMSTATE_POWERING_ON
2550 || enmStateNew == VMSTATE_POWERING_OFF
2551 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2552 break;
2553
2554 case VMSTATE_LOADING:
2555 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
2556 || enmStateNew == VMSTATE_LOAD_FAILURE
2557 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2558 break;
2559
2560 case VMSTATE_POWERING_ON:
2561 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2562 || enmStateNew == VMSTATE_FATAL_ERROR /*?*/
2563 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2564 break;
2565
2566 case VMSTATE_RESUMING:
2567 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2568 || enmStateNew == VMSTATE_FATAL_ERROR /*?*/
2569 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2570 break;
2571
2572 case VMSTATE_RUNNING:
2573 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2574 || enmStateNew == VMSTATE_SUSPENDING
2575 || enmStateNew == VMSTATE_RESETTING
2576 || enmStateNew == VMSTATE_RUNNING_LS
2577 || enmStateNew == VMSTATE_DEBUGGING
2578 || enmStateNew == VMSTATE_FATAL_ERROR
2579 || enmStateNew == VMSTATE_GURU_MEDITATION
2580 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2581 break;
2582
2583 case VMSTATE_RUNNING_LS:
2584 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
2585 || enmStateNew == VMSTATE_SUSPENDING_LS
2586 || enmStateNew == VMSTATE_RESETTING_LS
2587 || enmStateNew == VMSTATE_RUNNING
2588 || enmStateNew == VMSTATE_DEBUGGING_LS
2589 || enmStateNew == VMSTATE_FATAL_ERROR_LS
2590 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
2591 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2592 break;
2593
2594 case VMSTATE_RESETTING:
2595 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2596 break;
2597
2598 case VMSTATE_RESETTING_LS:
2599 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2600 break;
2601
2602 case VMSTATE_SUSPENDING:
2603 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2604 break;
2605
2606 case VMSTATE_SUSPENDING_LS:
2607 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED_LS, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2608 break;
2609
2610 case VMSTATE_SUSPENDED:
2611 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2612 || enmStateNew == VMSTATE_SAVING
2613 || enmStateNew == VMSTATE_RESETTING
2614 || enmStateNew == VMSTATE_RESUMING
2615 || enmStateNew == VMSTATE_LOADING
2616 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2617 break;
2618
2619 case VMSTATE_SUSPENDED_LS:
2620 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED_LS
2621 || enmStateNew == VMSTATE_SUSPENDED
2622 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2623 break;
2624
2625 case VMSTATE_SAVING:
2626 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2627 break;
2628
2629 case VMSTATE_DEBUGGING:
2630 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2631 || enmStateNew == VMSTATE_POWERING_OFF
2632 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2633 break;
2634
2635 case VMSTATE_DEBUGGING_LS:
2636 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING_LS
2637 || enmStateNew == VMSTATE_POWERING_OFF_LS
2638 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2639 break;
2640
2641 case VMSTATE_POWERING_OFF:
2642 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2643 break;
2644
2645 case VMSTATE_POWERING_OFF_LS:
2646 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2647 || enmStateNew == VMSTATE_OFF_LS
2648 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2649 break;
2650
2651 case VMSTATE_OFF:
2652 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2653 break;
2654
2655 case VMSTATE_OFF_LS:
2656 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2657 break;
2658
2659 case VMSTATE_FATAL_ERROR:
2660 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2661 break;
2662
2663 case VMSTATE_FATAL_ERROR_LS:
2664 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
2665 || enmStateNew == VMSTATE_POWERING_OFF_LS
2666 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2667 break;
2668
2669 case VMSTATE_GURU_MEDITATION:
2670 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
2671 || enmStateNew == VMSTATE_POWERING_OFF
2672 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2673 break;
2674
2675 case VMSTATE_GURU_MEDITATION_LS:
2676 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
2677 || enmStateNew == VMSTATE_POWERING_OFF_LS
2678 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2679 break;
2680
2681 case VMSTATE_LOAD_FAILURE:
2682 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2683 break;
2684
2685 case VMSTATE_DESTROYING:
2686 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2687 break;
2688
2689 case VMSTATE_TERMINATED:
2690 default:
2691 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2692 break;
2693 }
2694#endif /* VBOX_STRICT */
2695 return true;
2696}
2697
2698
2699/**
2700 * Does the state change callouts.
2701 *
2702 * The caller owns the AtStateCritSect.
2703 *
2704 * @param pVM The VM handle.
2705 * @param pUVM The UVM handle.
2706 * @param enmStateNew The New state.
2707 * @param enmStateOld The old state.
2708 */
2709static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
2710{
2711 LogRel(("Changing the VM state from '%s' to '%s'.\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
2712
2713 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
2714 {
2715 pCur->pfnAtState(pVM, enmStateNew, enmStateOld, pCur->pvUser);
2716 if ( enmStateNew != VMSTATE_DESTROYING
2717 && pVM->enmVMState == VMSTATE_DESTROYING)
2718 break;
2719 AssertMsg(pVM->enmVMState == enmStateNew,
2720 ("You are not allowed to change the state while in the change callback, except "
2721 "from destroying the VM. There are restrictions in the way the state changes "
2722 "are propagated up to the EM execution loop and it makes the program flow very "
2723 "difficult to follow.\n"));
2724 }
2725}
2726
2727
2728/**
2729 * Sets the current VM state, with the AtStatCritSect already entered.
2730 *
2731 * @param pVM The VM handle.
2732 * @param pUVM The UVM handle.
2733 * @param enmStateNew The new state.
2734 * @param enmStateOld The old state.
2735 */
2736static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
2737{
2738 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
2739
2740 Assert(pVM->enmVMState == enmStateOld);
2741 pUVM->vm.s.enmPrevVMState = enmStateOld;
2742 pVM->enmVMState = enmStateNew;
2743
2744 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
2745}
2746
2747
2748/**
2749 * Tries to set the VM state.
2750 *
2751 * @returns true on success, false on failure.
2752 * @param pVM VM handle.
2753 * @param enmStateNew The new state.
2754 */
2755static bool vmR3TrySetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
2756{
2757 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
2758
2759 bool fRc = true;
2760 PUVM pUVM = pVM->pUVM;
2761 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2762 if (pVM->enmVMState == enmStateOld)
2763 {
2764 pVM->enmVMState = enmStateNew;
2765 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
2766 }
2767 else
2768 {
2769 Log(("vmR3TrySetState: failed enmVMState=%s, enmStateOld=%s, enmStateNew=%s\n",
2770 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
2771 fRc = false;
2772 }
2773 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2774 return fRc;
2775}
2776
2777
2778/**
2779 * Tries to set the VM state give two alternative transisions.
2780 *
2781 * @returns 1 if the first alternative, 2 if the second, and 0 on failure.
2782 * @param pVM The VM handle.
2783 * @param enmStateNew1 New state, alternative 1.
2784 * @param enmStateOld1 Old state, alternative 1.
2785 * @param enmStateNew2 New state, alternative 2.
2786 * @param enmStateOld2 Old state, alternative 2.
2787 */
2788static unsigned vmR3TrySetState2(PVM pVM, VMSTATE enmStateNew1, VMSTATE enmStateOld1, VMSTATE enmStateNew2, VMSTATE enmStateOld2)
2789{
2790 vmR3ValidateStateTransition(enmStateOld1, enmStateNew1);
2791 vmR3ValidateStateTransition(enmStateOld2, enmStateNew2);
2792
2793 PUVM pUVM = pVM->pUVM;
2794 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2795
2796 unsigned rc;
2797 VMSTATE enmStateOld = pVM->enmVMState;
2798 if (enmStateOld == enmStateOld1)
2799 {
2800 vmR3SetStateLocked(pVM, pUVM, enmStateNew1, enmStateOld1);
2801 rc = 1;
2802 }
2803 else if (enmStateOld == enmStateOld2)
2804 {
2805 vmR3SetStateLocked(pVM, pUVM, enmStateNew2, enmStateOld2);
2806 rc = 2;
2807 }
2808 else
2809 rc = 0;
2810
2811 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2812 return rc;
2813}
2814
2815
2816/**
2817 * Sets the current VM state.
2818 *
2819 * @param pVM VM handle.
2820 * @param enmStateNew The new state.
2821 * @param enmStateOld The old state (for asserting only).
2822 */
2823static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
2824{
2825 PUVM pUVM = pVM->pUVM;
2826 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2827
2828 AssertMsg(enmStateOld == pVM->enmVMState, ("\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(pVM->enmVMState)));
2829 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState);
2830
2831 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2832}
2833
2834
2835/**
2836 * Sets the current VM state to one of the two specified new states depending on
2837 * the what the current (old) state is.
2838 *
2839 * @returns 1 if the first alternative, 2 if the second.
2840 * @param pVM The VM handle.
2841 * @param enmStateNew1 New state, alternative 1.
2842 * @param enmStateOld1 Old state, alternative 1.
2843 * @param enmStateNew2 New state, alternative 2.
2844 * @param enmStateOld2 Old state, alternative 2.
2845 */
2846static unsigned vmR3SetState2(PVM pVM, VMSTATE enmStateNew1, VMSTATE enmStateOld1, VMSTATE enmStateNew2, VMSTATE enmStateOld2)
2847{
2848 vmR3ValidateStateTransition(enmStateOld1, enmStateNew1);
2849 vmR3ValidateStateTransition(enmStateOld2, enmStateNew2);
2850
2851 PUVM pUVM = pVM->pUVM;
2852 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2853
2854 unsigned rc;
2855 VMSTATE enmStateOld = pVM->enmVMState;
2856 if (enmStateOld == enmStateOld1)
2857 {
2858 vmR3SetStateLocked(pVM, pUVM, enmStateNew1, enmStateOld1);
2859 rc = 1;
2860 }
2861 else
2862 {
2863 AssertLogRelMsg(enmStateOld == enmStateOld2,
2864 ("%s, expected %s or %s (-> %s or %s)\n",
2865 VMR3GetStateName(enmStateOld),
2866 VMR3GetStateName(enmStateOld1),
2867 VMR3GetStateName(enmStateOld2),
2868 VMR3GetStateName(enmStateNew1),
2869 VMR3GetStateName(enmStateNew2)));
2870 vmR3SetStateLocked(pVM, pUVM, enmStateNew2, enmStateOld2);
2871 rc = 2;
2872 }
2873
2874 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2875 return rc;
2876}
2877
2878
2879/**
2880 * Flag a guru meditation ... a hack.
2881 *
2882 * @param pVM The VM handle
2883 *
2884 * @todo Rewrite this part. The guru meditation should be flagged
2885 * immediately by the VMM and not by VMEmt.cpp when it's all over.
2886 */
2887void vmR3SetGuruMeditation(PVM pVM)
2888{
2889 if (vmR3TrySetState(pVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING))
2890 return;
2891 if (vmR3TrySetState(pVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS))
2892 SSMR3Cancel(pVM);
2893}
2894
2895
2896/**
2897 * Registers a VM state change callback.
2898 *
2899 * You are not allowed to call any function which changes the VM state from a
2900 * state callback, except VMR3Destroy().
2901 *
2902 * @returns VBox status code.
2903 * @param pVM VM handle.
2904 * @param pfnAtState Pointer to callback.
2905 * @param pvUser User argument.
2906 * @thread Any.
2907 */
2908VMMR3DECL(int) VMR3AtStateRegister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
2909{
2910 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
2911
2912 /*
2913 * Validate input.
2914 */
2915 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
2916 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2917
2918 /*
2919 * Allocate a new record.
2920 */
2921 PUVM pUVM = pVM->pUVM;
2922 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
2923 if (!pNew)
2924 return VERR_NO_MEMORY;
2925
2926 /* fill */
2927 pNew->pfnAtState = pfnAtState;
2928 pNew->pvUser = pvUser;
2929
2930 /* insert */
2931 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2932 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
2933 *pUVM->vm.s.ppAtStateNext = pNew;
2934 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
2935 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2936
2937 return VINF_SUCCESS;
2938}
2939
2940
2941/**
2942 * Deregisters a VM state change callback.
2943 *
2944 * @returns VBox status code.
2945 * @param pVM VM handle.
2946 * @param pfnAtState Pointer to callback.
2947 * @param pvUser User argument.
2948 * @thread Any.
2949 */
2950VMMR3DECL(int) VMR3AtStateDeregister(PVM pVM, PFNVMATSTATE pfnAtState, void *pvUser)
2951{
2952 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
2953
2954 /*
2955 * Validate input.
2956 */
2957 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
2958 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2959
2960 PUVM pUVM = pVM->pUVM;
2961 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2962
2963 /*
2964 * Search the list for the entry.
2965 */
2966 PVMATSTATE pPrev = NULL;
2967 PVMATSTATE pCur = pUVM->vm.s.pAtState;
2968 while ( pCur
2969 && ( pCur->pfnAtState != pfnAtState
2970 || pCur->pvUser != pvUser))
2971 {
2972 pPrev = pCur;
2973 pCur = pCur->pNext;
2974 }
2975 if (!pCur)
2976 {
2977 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
2978 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2979 return VERR_FILE_NOT_FOUND;
2980 }
2981
2982 /*
2983 * Unlink it.
2984 */
2985 if (pPrev)
2986 {
2987 pPrev->pNext = pCur->pNext;
2988 if (!pCur->pNext)
2989 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
2990 }
2991 else
2992 {
2993 pUVM->vm.s.pAtState = pCur->pNext;
2994 if (!pCur->pNext)
2995 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
2996 }
2997
2998 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2999
3000 /*
3001 * Free it.
3002 */
3003 pCur->pfnAtState = NULL;
3004 pCur->pNext = NULL;
3005 MMR3HeapFree(pCur);
3006
3007 return VINF_SUCCESS;
3008}
3009
3010
3011/**
3012 * Registers a VM error callback.
3013 *
3014 * @returns VBox status code.
3015 * @param pVM The VM handle.
3016 * @param pfnAtError Pointer to callback.
3017 * @param pvUser User argument.
3018 * @thread Any.
3019 */
3020VMMR3DECL(int) VMR3AtErrorRegister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3021{
3022 return VMR3AtErrorRegisterU(pVM->pUVM, pfnAtError, pvUser);
3023}
3024
3025
3026/**
3027 * Registers a VM error callback.
3028 *
3029 * @returns VBox status code.
3030 * @param pUVM The VM handle.
3031 * @param pfnAtError Pointer to callback.
3032 * @param pvUser User argument.
3033 * @thread Any.
3034 */
3035VMMR3DECL(int) VMR3AtErrorRegisterU(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3036{
3037 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3038
3039 /*
3040 * Validate input.
3041 */
3042 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3043 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3044
3045 /*
3046 * Allocate a new record.
3047 */
3048 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3049 if (!pNew)
3050 return VERR_NO_MEMORY;
3051
3052 /* fill */
3053 pNew->pfnAtError = pfnAtError;
3054 pNew->pvUser = pvUser;
3055
3056 /* insert */
3057 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3058 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3059 *pUVM->vm.s.ppAtErrorNext = pNew;
3060 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3061 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3062
3063 return VINF_SUCCESS;
3064}
3065
3066
3067/**
3068 * Deregisters a VM error callback.
3069 *
3070 * @returns VBox status code.
3071 * @param pVM The VM handle.
3072 * @param pfnAtError Pointer to callback.
3073 * @param pvUser User argument.
3074 * @thread Any.
3075 */
3076VMMR3DECL(int) VMR3AtErrorDeregister(PVM pVM, PFNVMATERROR pfnAtError, void *pvUser)
3077{
3078 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3079
3080 /*
3081 * Validate input.
3082 */
3083 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3084 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3085
3086 PUVM pUVM = pVM->pUVM;
3087 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3088
3089 /*
3090 * Search the list for the entry.
3091 */
3092 PVMATERROR pPrev = NULL;
3093 PVMATERROR pCur = pUVM->vm.s.pAtError;
3094 while ( pCur
3095 && ( pCur->pfnAtError != pfnAtError
3096 || pCur->pvUser != pvUser))
3097 {
3098 pPrev = pCur;
3099 pCur = pCur->pNext;
3100 }
3101 if (!pCur)
3102 {
3103 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3104 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3105 return VERR_FILE_NOT_FOUND;
3106 }
3107
3108 /*
3109 * Unlink it.
3110 */
3111 if (pPrev)
3112 {
3113 pPrev->pNext = pCur->pNext;
3114 if (!pCur->pNext)
3115 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3116 }
3117 else
3118 {
3119 pUVM->vm.s.pAtError = pCur->pNext;
3120 if (!pCur->pNext)
3121 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3122 }
3123
3124 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3125
3126 /*
3127 * Free it.
3128 */
3129 pCur->pfnAtError = NULL;
3130 pCur->pNext = NULL;
3131 MMR3HeapFree(pCur);
3132
3133 return VINF_SUCCESS;
3134}
3135
3136
3137/**
3138 * Ellipsis to va_list wrapper for calling pfnAtError.
3139 */
3140static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3141{
3142 va_list va;
3143 va_start(va, pszFormat);
3144 pCur->pfnAtError(pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3145 va_end(va);
3146}
3147
3148
3149/**
3150 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3151 * The message is found in VMINT.
3152 *
3153 * @param pVM The VM handle.
3154 * @thread EMT.
3155 */
3156VMMR3DECL(void) VMR3SetErrorWorker(PVM pVM)
3157{
3158 VM_ASSERT_EMT(pVM);
3159 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Contrats!\n"));
3160
3161 /*
3162 * Unpack the error (if we managed to format one).
3163 */
3164 PVMERROR pErr = pVM->vm.s.pErrorR3;
3165 const char *pszFile = NULL;
3166 const char *pszFunction = NULL;
3167 uint32_t iLine = 0;
3168 const char *pszMessage;
3169 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3170 if (pErr)
3171 {
3172 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3173 if (pErr->offFile)
3174 pszFile = (const char *)pErr + pErr->offFile;
3175 iLine = pErr->iLine;
3176 if (pErr->offFunction)
3177 pszFunction = (const char *)pErr + pErr->offFunction;
3178 if (pErr->offMessage)
3179 pszMessage = (const char *)pErr + pErr->offMessage;
3180 else
3181 pszMessage = "No message!";
3182 }
3183 else
3184 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3185
3186 /*
3187 * Call the at error callbacks.
3188 */
3189 PUVM pUVM = pVM->pUVM;
3190 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3191 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3192 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3193 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3194}
3195
3196
3197/**
3198 * Creation time wrapper for vmR3SetErrorUV.
3199 *
3200 * @returns rc.
3201 * @param pUVM Pointer to the user mode VM structure.
3202 * @param rc The VBox status code.
3203 * @param RT_SRC_POS_DECL The source position of this error.
3204 * @param pszFormat Format string.
3205 * @param ... The arguments.
3206 * @thread Any thread.
3207 */
3208static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3209{
3210 va_list va;
3211 va_start(va, pszFormat);
3212 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3213 va_end(va);
3214 return rc;
3215}
3216
3217
3218/**
3219 * Worker which calls everyone listening to the VM error messages.
3220 *
3221 * @param pUVM Pointer to the user mode VM structure.
3222 * @param rc The VBox status code.
3223 * @param RT_SRC_POS_DECL The source position of this error.
3224 * @param pszFormat Format string.
3225 * @param pArgs Pointer to the format arguments.
3226 * @thread EMT
3227 */
3228DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3229{
3230#ifdef LOG_ENABLED
3231 /*
3232 * Log the error.
3233 */
3234 RTLogPrintf("VMSetError: %s(%d) %s\n", pszFile, iLine, pszFunction);
3235 va_list va3;
3236 va_copy(va3, *pArgs);
3237 RTLogPrintfV(pszFormat, va3);
3238 va_end(va3);
3239 RTLogPrintf("\n");
3240#endif
3241
3242 /*
3243 * Make a copy of the message.
3244 */
3245 if (pUVM->pVM)
3246 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3247
3248 /*
3249 * Call the at error callbacks.
3250 */
3251 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3252 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3253 {
3254 va_list va2;
3255 va_copy(va2, *pArgs);
3256 pCur->pfnAtError(pUVM->pVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3257 va_end(va2);
3258 }
3259 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3260}
3261
3262
3263/**
3264 * Registers a VM runtime error callback.
3265 *
3266 * @returns VBox status code.
3267 * @param pVM The VM handle.
3268 * @param pfnAtRuntimeError Pointer to callback.
3269 * @param pvUser User argument.
3270 * @thread Any.
3271 */
3272VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3273{
3274 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3275
3276 /*
3277 * Validate input.
3278 */
3279 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3280 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3281
3282 /*
3283 * Allocate a new record.
3284 */
3285 PUVM pUVM = pVM->pUVM;
3286 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3287 if (!pNew)
3288 return VERR_NO_MEMORY;
3289
3290 /* fill */
3291 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3292 pNew->pvUser = pvUser;
3293
3294 /* insert */
3295 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3296 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3297 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3298 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3299 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3300
3301 return VINF_SUCCESS;
3302}
3303
3304
3305/**
3306 * Deregisters a VM runtime error callback.
3307 *
3308 * @returns VBox status code.
3309 * @param pVM The VM handle.
3310 * @param pfnAtRuntimeError Pointer to callback.
3311 * @param pvUser User argument.
3312 * @thread Any.
3313 */
3314VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PVM pVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3315{
3316 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3317
3318 /*
3319 * Validate input.
3320 */
3321 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3322 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3323
3324 PUVM pUVM = pVM->pUVM;
3325 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3326
3327 /*
3328 * Search the list for the entry.
3329 */
3330 PVMATRUNTIMEERROR pPrev = NULL;
3331 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3332 while ( pCur
3333 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3334 || pCur->pvUser != pvUser))
3335 {
3336 pPrev = pCur;
3337 pCur = pCur->pNext;
3338 }
3339 if (!pCur)
3340 {
3341 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
3342 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3343 return VERR_FILE_NOT_FOUND;
3344 }
3345
3346 /*
3347 * Unlink it.
3348 */
3349 if (pPrev)
3350 {
3351 pPrev->pNext = pCur->pNext;
3352 if (!pCur->pNext)
3353 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
3354 }
3355 else
3356 {
3357 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
3358 if (!pCur->pNext)
3359 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
3360 }
3361
3362 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3363
3364 /*
3365 * Free it.
3366 */
3367 pCur->pfnAtRuntimeError = NULL;
3368 pCur->pNext = NULL;
3369 MMR3HeapFree(pCur);
3370
3371 return VINF_SUCCESS;
3372}
3373
3374
3375/**
3376 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
3377 *
3378 * This does the common parts after the error has been saved / retrieved.
3379 *
3380 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3381 *
3382 * @param pVM The VM handle.
3383 * @param fFlags The error flags.
3384 * @param pszErrorId Error ID string.
3385 * @param pszFormat Format string.
3386 * @param pVa Pointer to the format arguments.
3387 */
3388static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
3389{
3390 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
3391
3392 /*
3393 * Take actions before the call.
3394 */
3395 int rc = VINF_SUCCESS;
3396 if (fFlags & VMSETRTERR_FLAGS_FATAL)
3397 rc = vmR3SuspendCommon(pVM, true /*fFatal*/);
3398 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
3399 rc = vmR3SuspendCommon(pVM, false /*fFatal*/);
3400
3401 /*
3402 * Do the callback round.
3403 */
3404 PUVM pUVM = pVM->pUVM;
3405 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3406 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
3407 {
3408 va_list va;
3409 va_copy(va, *pVa);
3410 pCur->pfnAtRuntimeError(pVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
3411 va_end(va);
3412 }
3413 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3414
3415 return rc;
3416}
3417
3418
3419/**
3420 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
3421 */
3422static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
3423{
3424 va_list va;
3425 va_start(va, pszFormat);
3426 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
3427 va_end(va);
3428 return rc;
3429}
3430
3431
3432/**
3433 * This is a worker function for RC and Ring-0 calls to VMSetError and
3434 * VMSetErrorV.
3435 *
3436 * The message is found in VMINT.
3437 *
3438 * @returns VBox status code, see VMSetRuntimeError.
3439 * @param pVM The VM handle.
3440 * @thread EMT.
3441 */
3442VMMR3DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
3443{
3444 VM_ASSERT_EMT(pVM);
3445 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
3446
3447 /*
3448 * Unpack the error (if we managed to format one).
3449 */
3450 const char *pszErrorId = "SetRuntimeError";
3451 const char *pszMessage = "No message!";
3452 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
3453 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
3454 if (pErr)
3455 {
3456 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3457 if (pErr->offErrorId)
3458 pszErrorId = (const char *)pErr + pErr->offErrorId;
3459 if (pErr->offMessage)
3460 pszMessage = (const char *)pErr + pErr->offMessage;
3461 fFlags = pErr->fFlags;
3462 }
3463
3464 /*
3465 * Join cause with vmR3SetRuntimeErrorV.
3466 */
3467 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
3468}
3469
3470
3471/**
3472 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
3473 *
3474 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3475 *
3476 * @param pVM The VM handle.
3477 * @param fFlags The error flags.
3478 * @param pszErrorId Error ID string.
3479 * @param pszMessage The error message residing the MM heap.
3480 *
3481 * @thread EMT
3482 */
3483DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
3484{
3485#if 0 /** @todo make copy of the error msg. */
3486 /*
3487 * Make a copy of the message.
3488 */
3489 va_list va2;
3490 va_copy(va2, *pVa);
3491 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
3492 va_end(va2);
3493#endif
3494
3495 /*
3496 * Join paths with VMR3SetRuntimeErrorWorker.
3497 */
3498 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
3499 MMR3HeapFree(pszMessage);
3500 return rc;
3501}
3502
3503
3504/**
3505 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
3506 *
3507 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3508 *
3509 * @param pVM The VM handle.
3510 * @param fFlags The error flags.
3511 * @param pszErrorId Error ID string.
3512 * @param pszFormat Format string.
3513 * @param pVa Pointer to the format arguments.
3514 *
3515 * @thread EMT
3516 */
3517DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
3518{
3519 /*
3520 * Make a copy of the message.
3521 */
3522 va_list va2;
3523 va_copy(va2, *pVa);
3524 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
3525 va_end(va2);
3526
3527 /*
3528 * Join paths with VMR3SetRuntimeErrorWorker.
3529 */
3530 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
3531}
3532
3533
3534/**
3535 * Gets the ID virtual of the virtual CPU assoicated with the calling thread.
3536 *
3537 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
3538 *
3539 * @param pVM The VM handle.
3540 */
3541VMMR3DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
3542{
3543 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
3544 return pUVCpu
3545 ? pUVCpu->idCpu
3546 : NIL_VMCPUID;
3547}
3548
3549
3550/**
3551 * Returns the native handle of the current EMT VMCPU thread.
3552 *
3553 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
3554 * @param pVM The VM handle.
3555 * @thread EMT
3556 */
3557VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
3558{
3559 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
3560
3561 if (!pUVCpu)
3562 return NIL_RTNATIVETHREAD;
3563
3564 return pUVCpu->vm.s.NativeThreadEMT;
3565}
3566
3567
3568/**
3569 * Returns the native handle of the current EMT VMCPU thread.
3570 *
3571 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
3572 * @param pVM The VM handle.
3573 * @thread EMT
3574 */
3575VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
3576{
3577 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
3578
3579 if (!pUVCpu)
3580 return NIL_RTNATIVETHREAD;
3581
3582 return pUVCpu->vm.s.NativeThreadEMT;
3583}
3584
3585
3586/**
3587 * Returns the handle of the current EMT VMCPU thread.
3588 *
3589 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
3590 * @param pVM The VM handle.
3591 * @thread EMT
3592 */
3593VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PVM pVM)
3594{
3595 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
3596
3597 if (!pUVCpu)
3598 return NIL_RTTHREAD;
3599
3600 return pUVCpu->vm.s.ThreadEMT;
3601}
3602
3603
3604/**
3605 * Returns the handle of the current EMT VMCPU thread.
3606 *
3607 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
3608 * @param pVM The VM handle.
3609 * @thread EMT
3610 */
3611VMMR3DECL(RTTHREAD) VMR3GetVMCPUThreadU(PUVM pUVM)
3612{
3613 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
3614
3615 if (!pUVCpu)
3616 return NIL_RTTHREAD;
3617
3618 return pUVCpu->vm.s.ThreadEMT;
3619}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette