VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 78157

Last change on this file since 78157 was 77196, checked in by vboxsync, 6 years ago

Help text: explain about EFI Secure Boot and signing.
ticketref:18312: Unable to build Virtualbox 6.0 on Fedora 29
When a user tries to use VirtualBox on a Linux system with EFI Secure Boot
enabled, it sometimes fails in a non-obvious way due to unsigned kernel
modules. As it is rather tricky to detect this properly, mention it in the
help text which is shown when there are problems with the modules.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 170.6 KB
Line 
1/* $Id: VM.cpp 77196 2019-02-07 13:33:50Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41
42/*********************************************************************************************************************************
43* Header Files *
44*********************************************************************************************************************************/
45#define LOG_GROUP LOG_GROUP_VM
46#include <VBox/vmm/cfgm.h>
47#include <VBox/vmm/vmm.h>
48#include <VBox/vmm/gvmm.h>
49#include <VBox/vmm/mm.h>
50#include <VBox/vmm/cpum.h>
51#include <VBox/vmm/selm.h>
52#include <VBox/vmm/trpm.h>
53#include <VBox/vmm/dbgf.h>
54#include <VBox/vmm/pgm.h>
55#include <VBox/vmm/pdmapi.h>
56#include <VBox/vmm/pdmdev.h>
57#include <VBox/vmm/pdmcritsect.h>
58#include <VBox/vmm/em.h>
59#include <VBox/vmm/iem.h>
60#ifdef VBOX_WITH_REM
61# include <VBox/vmm/rem.h>
62#endif
63#include <VBox/vmm/nem.h>
64#include <VBox/vmm/apic.h>
65#include <VBox/vmm/tm.h>
66#include <VBox/vmm/stam.h>
67#include <VBox/vmm/patm.h>
68#include <VBox/vmm/csam.h>
69#include <VBox/vmm/iom.h>
70#include <VBox/vmm/ssm.h>
71#include <VBox/vmm/ftm.h>
72#include <VBox/vmm/hm.h>
73#include <VBox/vmm/gim.h>
74#include "VMInternal.h"
75#include <VBox/vmm/vm.h>
76#include <VBox/vmm/uvm.h>
77
78#include <VBox/sup.h>
79#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
80# include <VBox/VBoxTpG.h>
81#endif
82#include <VBox/dbg.h>
83#include <VBox/err.h>
84#include <VBox/param.h>
85#include <VBox/log.h>
86#include <iprt/assert.h>
87#include <iprt/alloc.h>
88#include <iprt/asm.h>
89#include <iprt/env.h>
90#include <iprt/string.h>
91#include <iprt/time.h>
92#include <iprt/semaphore.h>
93#include <iprt/thread.h>
94#include <iprt/uuid.h>
95
96
97/*********************************************************************************************************************************
98* Internal Functions *
99*********************************************************************************************************************************/
100static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
101static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
102static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus);
103static int vmR3InitRing3(PVM pVM, PUVM pUVM);
104static int vmR3InitRing0(PVM pVM);
105#ifdef VBOX_WITH_RAW_MODE
106static int vmR3InitRC(PVM pVM);
107#endif
108static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
109static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
110static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
111static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
112static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
113static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF);
114static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
115static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...) RT_IPRT_FORMAT_ATTR(6, 7);
116
117
118/**
119 * Creates a virtual machine by calling the supplied configuration constructor.
120 *
121 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
122 * called to start the execution.
123 *
124 * @returns 0 on success.
125 * @returns VBox error code on failure.
126 * @param cCpus Number of virtual CPUs for the new VM.
127 * @param pVmm2UserMethods An optional method table that the VMM can use
128 * to make the user perform various action, like
129 * for instance state saving.
130 * @param pfnVMAtError Pointer to callback function for setting VM
131 * errors. This was added as an implicit call to
132 * VMR3AtErrorRegister() since there is no way the
133 * caller can get to the VM handle early enough to
134 * do this on its own.
135 * This is called in the context of an EMT.
136 * @param pvUserVM The user argument passed to pfnVMAtError.
137 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
138 * This is called in the context of an EMT0.
139 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
140 * @param ppVM Where to optionally store the 'handle' of the
141 * created VM.
142 * @param ppUVM Where to optionally store the user 'handle' of
143 * the created VM, this includes one reference as
144 * if VMR3RetainUVM() was called. The caller
145 * *MUST* remember to pass the returned value to
146 * VMR3ReleaseUVM() once done with the handle.
147 */
148VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
149 PFNVMATERROR pfnVMAtError, void *pvUserVM,
150 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
151 PVM *ppVM, PUVM *ppUVM)
152{
153 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p ppUVM=%p\n",
154 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM, ppUVM));
155
156 if (pVmm2UserMethods)
157 {
158 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
159 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
160 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
161 AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
162 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
163 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
164 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
165 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
166 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff, VERR_INVALID_POINTER);
167 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
168 }
169 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
170 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
171 AssertPtrNullReturn(ppVM, VERR_INVALID_POINTER);
172 AssertPtrNullReturn(ppUVM, VERR_INVALID_POINTER);
173 AssertReturn(ppVM || ppUVM, VERR_INVALID_PARAMETER);
174
175 /*
176 * Validate input.
177 */
178 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
179
180 /*
181 * Create the UVM so we can register the at-error callback
182 * and consolidate a bit of cleanup code.
183 */
184 PUVM pUVM = NULL; /* shuts up gcc */
185 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
186 if (RT_FAILURE(rc))
187 return rc;
188 if (pfnVMAtError)
189 rc = VMR3AtErrorRegister(pUVM, pfnVMAtError, pvUserVM);
190 if (RT_SUCCESS(rc))
191 {
192 /*
193 * Initialize the support library creating the session for this VM.
194 */
195 rc = SUPR3Init(&pUVM->vm.s.pSession);
196 if (RT_SUCCESS(rc))
197 {
198#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
199 /* Now that we've opened the device, we can register trace probes. */
200 static bool s_fRegisteredProbes = false;
201 if (ASMAtomicCmpXchgBool(&s_fRegisteredProbes, true, false))
202 SUPR3TracerRegisterModule(~(uintptr_t)0, "VBoxVMM", &g_VTGObjHeader, (uintptr_t)&g_VTGObjHeader,
203 SUP_TRACER_UMOD_FLAGS_SHARED);
204#endif
205
206 /*
207 * Call vmR3CreateU in the EMT thread and wait for it to finish.
208 *
209 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
210 * submitting a request to a specific VCPU without a pVM. So, to make
211 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
212 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
213 */
214 PVMREQ pReq;
215 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
216 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
217 if (RT_SUCCESS(rc))
218 {
219 rc = pReq->iStatus;
220 VMR3ReqFree(pReq);
221 if (RT_SUCCESS(rc))
222 {
223 /*
224 * Success!
225 */
226 if (ppVM)
227 *ppVM = pUVM->pVM;
228 if (ppUVM)
229 {
230 VMR3RetainUVM(pUVM);
231 *ppUVM = pUVM;
232 }
233 LogFlow(("VMR3Create: returns VINF_SUCCESS (pVM=%p, pUVM=%p\n", pUVM->pVM, pUVM));
234 return VINF_SUCCESS;
235 }
236 }
237 else
238 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
239
240 /*
241 * An error occurred during VM creation. Set the error message directly
242 * using the initial callback, as the callback list might not exist yet.
243 */
244 const char *pszError;
245 switch (rc)
246 {
247 case VERR_VMX_IN_VMX_ROOT_MODE:
248#ifdef RT_OS_LINUX
249 pszError = N_("VirtualBox can't operate in VMX root mode. "
250 "Please disable the KVM kernel extension, recompile your kernel and reboot");
251#else
252 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
253#endif
254 break;
255
256#ifndef RT_OS_DARWIN
257 case VERR_HM_CONFIG_MISMATCH:
258 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
259 "This hardware extension is required by the VM configuration");
260 break;
261#endif
262
263 case VERR_SVM_IN_USE:
264#ifdef RT_OS_LINUX
265 pszError = N_("VirtualBox can't enable the AMD-V extension. "
266 "Please disable the KVM kernel extension, recompile your kernel and reboot");
267#else
268 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
269#endif
270 break;
271
272#ifdef RT_OS_LINUX
273 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
274 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
275 "that VirtualBox is correctly installed, and if you are using EFI "
276 "Secure Boot that the modules are signed if necessary in the right "
277 "way for your host system. Then try to recompile and reload the "
278 "kernel modules by executing "
279 "'/sbin/vboxconfig' as root");
280 break;
281#endif
282
283 case VERR_RAW_MODE_INVALID_SMP:
284 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
285 "VirtualBox requires this hardware extension to emulate more than one "
286 "guest CPU");
287 break;
288
289 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
290#ifdef RT_OS_LINUX
291 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
292 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
293 "the VT-x extension in the VM settings. Note that without VT-x you have "
294 "to reduce the number of guest CPUs to one");
295#else
296 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
297 "extension. Either upgrade your kernel or disable the VT-x extension in the "
298 "VM settings. Note that without VT-x you have to reduce the number of guest "
299 "CPUs to one");
300#endif
301 break;
302
303 case VERR_PDM_DEVICE_NOT_FOUND:
304 pszError = N_("A virtual device is configured in the VM settings but the device "
305 "implementation is missing.\n"
306 "A possible reason for this error is a missing extension pack. Note "
307 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
308 "support and remote desktop) are only available from an 'extension "
309 "pack' which must be downloaded and installed separately");
310 break;
311
312 case VERR_PCI_PASSTHROUGH_NO_HM:
313 pszError = N_("PCI passthrough requires VT-x/AMD-V");
314 break;
315
316 case VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING:
317 pszError = N_("PCI passthrough requires nested paging");
318 break;
319
320 default:
321 if (VMR3GetErrorCount(pUVM) == 0)
322 pszError = RTErrGetFull(rc);
323 else
324 pszError = NULL; /* already set. */
325 break;
326 }
327 if (pszError)
328 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
329 }
330 else
331 {
332 /*
333 * An error occurred at support library initialization time (before the
334 * VM could be created). Set the error message directly using the
335 * initial callback, as the callback list doesn't exist yet.
336 */
337 const char *pszError;
338 switch (rc)
339 {
340 case VERR_VM_DRIVER_LOAD_ERROR:
341#ifdef RT_OS_LINUX
342 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
343 "was either not loaded, /dev/vboxdrv is not set up properly, "
344 "or you are using EFI Secure Boot and the module is not signed "
345 "in the right way for your system. If necessary, try setting up "
346 "the kernel module again by executing "
347 "'/sbin/vboxconfig' as root");
348#else
349 pszError = N_("VirtualBox kernel driver not loaded");
350#endif
351 break;
352 case VERR_VM_DRIVER_OPEN_ERROR:
353 pszError = N_("VirtualBox kernel driver cannot be opened");
354 break;
355 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
356#ifdef VBOX_WITH_HARDENING
357 /* This should only happen if the executable wasn't hardened - bad code/build. */
358 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
359 "Re-install VirtualBox. If you are building it yourself, you "
360 "should make sure it installed correctly and that the setuid "
361 "bit is set on the executables calling VMR3Create.");
362#else
363 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
364# if defined(RT_OS_DARWIN)
365 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
366 "If you have built VirtualBox yourself, make sure that you do not "
367 "have the vboxdrv KEXT from a different build or installation loaded.");
368# elif defined(RT_OS_LINUX)
369 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
370 "If you have built VirtualBox yourself, make sure that you do "
371 "not have the vboxdrv kernel module from a different build or "
372 "installation loaded. Also, make sure the vboxdrv udev rule gives "
373 "you the permission you need to access the device.");
374# elif defined(RT_OS_WINDOWS)
375 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
376# else /* solaris, freebsd, ++. */
377 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
378 "If you have built VirtualBox yourself, make sure that you do "
379 "not have the vboxdrv kernel module from a different install loaded.");
380# endif
381#endif
382 break;
383 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
384 case VERR_VM_DRIVER_NOT_INSTALLED:
385#ifdef RT_OS_LINUX
386 pszError = N_("VirtualBox kernel driver not Installed. The vboxdrv kernel module "
387 "was either not loaded, /dev/vboxdrv is not set up properly, "
388 "or you are using EFI Secure Boot and the module is not signed "
389 "in the right way for your system. If necessary, try setting up "
390 "the kernel module again by executing "
391 "'/sbin/vboxconfig' as root");
392#else
393 pszError = N_("VirtualBox kernel driver not installed");
394#endif
395 break;
396 case VERR_NO_MEMORY:
397 pszError = N_("VirtualBox support library out of memory");
398 break;
399 case VERR_VERSION_MISMATCH:
400 case VERR_VM_DRIVER_VERSION_MISMATCH:
401 pszError = N_("The VirtualBox support driver which is running is from a different "
402 "version of VirtualBox. You can correct this by stopping all "
403 "running instances of VirtualBox and reinstalling the software.");
404 break;
405 default:
406 pszError = N_("Unknown error initializing kernel driver");
407 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
408 }
409 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
410 }
411 }
412
413 /* cleanup */
414 vmR3DestroyUVM(pUVM, 2000);
415 LogFlow(("VMR3Create: returns %Rrc\n", rc));
416 return rc;
417}
418
419
420/**
421 * Creates the UVM.
422 *
423 * This will not initialize the support library even if vmR3DestroyUVM
424 * will terminate that.
425 *
426 * @returns VBox status code.
427 * @param cCpus Number of virtual CPUs
428 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
429 * table.
430 * @param ppUVM Where to store the UVM pointer.
431 */
432static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
433{
434 uint32_t i;
435
436 /*
437 * Create and initialize the UVM.
438 */
439 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_UOFFSETOF_DYN(UVM, aCpus[cCpus]));
440 AssertReturn(pUVM, VERR_NO_MEMORY);
441 pUVM->u32Magic = UVM_MAGIC;
442 pUVM->cCpus = cCpus;
443 pUVM->pVmm2UserMethods = pVmm2UserMethods;
444
445 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
446
447 pUVM->vm.s.cUvmRefs = 1;
448 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
449 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
450 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
451
452 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
453 RTUuidClear(&pUVM->vm.s.Uuid);
454
455 /* Initialize the VMCPU array in the UVM. */
456 for (i = 0; i < cCpus; i++)
457 {
458 pUVM->aCpus[i].pUVM = pUVM;
459 pUVM->aCpus[i].idCpu = i;
460 }
461
462 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
463 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
464 AssertRC(rc);
465 if (RT_SUCCESS(rc))
466 {
467 /* Allocate a halt method event semaphore for each VCPU. */
468 for (i = 0; i < cCpus; i++)
469 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
470 for (i = 0; i < cCpus; i++)
471 {
472 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
473 if (RT_FAILURE(rc))
474 break;
475 }
476 if (RT_SUCCESS(rc))
477 {
478 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
479 if (RT_SUCCESS(rc))
480 {
481 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
482 if (RT_SUCCESS(rc))
483 {
484 /*
485 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
486 */
487 rc = PDMR3InitUVM(pUVM);
488 if (RT_SUCCESS(rc))
489 {
490 rc = STAMR3InitUVM(pUVM);
491 if (RT_SUCCESS(rc))
492 {
493 rc = MMR3InitUVM(pUVM);
494 if (RT_SUCCESS(rc))
495 {
496 /*
497 * Start the emulation threads for all VMCPUs.
498 */
499 for (i = 0; i < cCpus; i++)
500 {
501 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i],
502 _1M, RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
503 cCpus > 1 ? "EMT-%u" : "EMT", i);
504 if (RT_FAILURE(rc))
505 break;
506
507 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
508 }
509
510 if (RT_SUCCESS(rc))
511 {
512 *ppUVM = pUVM;
513 return VINF_SUCCESS;
514 }
515
516 /* bail out. */
517 while (i-- > 0)
518 {
519 /** @todo rainy day: terminate the EMTs. */
520 }
521 MMR3TermUVM(pUVM);
522 }
523 STAMR3TermUVM(pUVM);
524 }
525 PDMR3TermUVM(pUVM);
526 }
527 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
528 }
529 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
530 }
531 }
532 for (i = 0; i < cCpus; i++)
533 {
534 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
535 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
536 }
537 RTTlsFree(pUVM->vm.s.idxTLS);
538 }
539 RTMemPageFree(pUVM, RT_UOFFSETOF_DYN(UVM, aCpus[pUVM->cCpus]));
540 return rc;
541}
542
543
544/**
545 * Creates and initializes the VM.
546 *
547 * @thread EMT
548 */
549static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
550{
551#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
552 /*
553 * Require SSE2 to be present (already checked for in supdrv, so we
554 * shouldn't ever really get here).
555 */
556 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
557 {
558 LogRel(("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1)));
559 return VERR_UNSUPPORTED_CPU;
560 }
561#endif
562
563 /*
564 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
565 */
566 int rc = PDMR3LdrLoadVMMR0U(pUVM);
567 if (RT_FAILURE(rc))
568 {
569 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
570 * bird: what about moving the message down here? Main picks the first message, right? */
571 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
572 return rc; /* proper error message set later on */
573 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
574 }
575
576 /*
577 * Request GVMM to create a new VM for us.
578 */
579 GVMMCREATEVMREQ CreateVMReq;
580 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
581 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
582 CreateVMReq.pSession = pUVM->vm.s.pSession;
583 CreateVMReq.pVMR0 = NIL_RTR0PTR;
584 CreateVMReq.pVMR3 = NULL;
585 CreateVMReq.cCpus = cCpus;
586 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
587 if (RT_SUCCESS(rc))
588 {
589 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
590 AssertRelease(VALID_PTR(pVM));
591 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
592 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
593 AssertRelease(pVM->cCpus == cCpus);
594 AssertRelease(pVM->uCpuExecutionCap == 100);
595 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
596 AssertCompileMemberAlignment(VM, cpum, 64);
597 AssertCompileMemberAlignment(VM, tm, 64);
598 AssertCompileMemberAlignment(VM, aCpus, PAGE_SIZE);
599
600 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
601 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
602
603 /*
604 * Initialize the VM structure and our internal data (VMINT).
605 */
606 pVM->pUVM = pUVM;
607
608 for (VMCPUID i = 0; i < pVM->cCpus; i++)
609 {
610 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
611 pVM->aCpus[i].idCpu = i;
612 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
613 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
614 /* hNativeThreadR0 is initialized on EMT registration. */
615 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
616 pUVM->aCpus[i].pVM = pVM;
617 }
618
619
620 /*
621 * Init the configuration.
622 */
623 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
624 if (RT_SUCCESS(rc))
625 {
626 rc = vmR3ReadBaseConfig(pVM, pUVM, cCpus);
627 if (RT_SUCCESS(rc))
628 {
629 /*
630 * Init the ring-3 components and ring-3 per cpu data, finishing it off
631 * by a relocation round (intermediate context finalization will do this).
632 */
633 rc = vmR3InitRing3(pVM, pUVM);
634 if (RT_SUCCESS(rc))
635 {
636 rc = PGMR3FinalizeMappings(pVM);
637 if (RT_SUCCESS(rc))
638 {
639
640 LogFlow(("Ring-3 init succeeded\n"));
641
642 /*
643 * Init the Ring-0 components.
644 */
645 rc = vmR3InitRing0(pVM);
646 if (RT_SUCCESS(rc))
647 {
648 /* Relocate again, because some switcher fixups depends on R0 init results. */
649 VMR3Relocate(pVM, 0 /* offDelta */);
650
651#ifdef VBOX_WITH_DEBUGGER
652 /*
653 * Init the tcp debugger console if we're building
654 * with debugger support.
655 */
656 void *pvUser = NULL;
657 rc = DBGCTcpCreate(pUVM, &pvUser);
658 if ( RT_SUCCESS(rc)
659 || rc == VERR_NET_ADDRESS_IN_USE)
660 {
661 pUVM->vm.s.pvDBGC = pvUser;
662#endif
663 /*
664 * Init the Raw-Mode Context components.
665 */
666#ifdef VBOX_WITH_RAW_MODE
667 rc = vmR3InitRC(pVM);
668 if (RT_SUCCESS(rc))
669#endif
670 {
671 /*
672 * Now we can safely set the VM halt method to default.
673 */
674 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
675 if (RT_SUCCESS(rc))
676 {
677 /*
678 * Set the state and we're done.
679 */
680 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
681 return VINF_SUCCESS;
682 }
683 }
684#ifdef VBOX_WITH_DEBUGGER
685 DBGCTcpTerminate(pUVM, pUVM->vm.s.pvDBGC);
686 pUVM->vm.s.pvDBGC = NULL;
687 }
688#endif
689 //..
690 }
691 }
692 vmR3Destroy(pVM);
693 }
694 }
695 //..
696
697 /* Clean CFGM. */
698 int rc2 = CFGMR3Term(pVM);
699 AssertRC(rc2);
700 }
701
702 /*
703 * Do automatic cleanups while the VM structure is still alive and all
704 * references to it are still working.
705 */
706 PDMR3CritSectBothTerm(pVM);
707
708 /*
709 * Drop all references to VM and the VMCPU structures, then
710 * tell GVMM to destroy the VM.
711 */
712 pUVM->pVM = NULL;
713 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
714 {
715 pUVM->aCpus[i].pVM = NULL;
716 pUVM->aCpus[i].pVCpu = NULL;
717 }
718 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
719
720 if (pUVM->cCpus > 1)
721 {
722 /* Poke the other EMTs since they may have stale pVM and pVCpu references
723 on the stack (see VMR3WaitU for instance) if they've been awakened after
724 VM creation. */
725 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
726 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
727 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
728 }
729
730 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
731 AssertRC(rc2);
732 }
733 else
734 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
735
736 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
737 return rc;
738}
739
740
741/**
742 * Reads the base configuation from CFGM.
743 *
744 * @returns VBox status code.
745 * @param pVM The cross context VM structure.
746 * @param pUVM The user mode VM structure.
747 * @param cCpus The CPU count given to VMR3Create.
748 */
749static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus)
750{
751 int rc;
752 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
753
754 /*
755 * If executing in fake suplib mode disable RR3 and RR0 in the config.
756 */
757 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
758 if (psz && !strcmp(psz, "fake"))
759 {
760 CFGMR3RemoveValue(pRoot, "RawR3Enabled");
761 CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
762 CFGMR3RemoveValue(pRoot, "RawR0Enabled");
763 CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
764 }
765
766 /*
767 * Base EM and HM config properties.
768 */
769 /** @todo We don't need to read any of this here. The relevant modules reads
770 * them again and will be in a better position to set them correctly. */
771 Assert(pVM->fRecompileUser == false); /* ASSUMES all zeros at this point */
772 bool fEnabled;
773 rc = CFGMR3QueryBoolDef(pRoot, "RawR3Enabled", &fEnabled, false); AssertRCReturn(rc, rc);
774 pVM->fRecompileUser = !fEnabled;
775 rc = CFGMR3QueryBoolDef(pRoot, "RawR0Enabled", &fEnabled, false); AssertRCReturn(rc, rc);
776 pVM->fRecompileSupervisor = !fEnabled;
777#ifdef VBOX_WITH_RAW_MODE
778# ifdef VBOX_WITH_RAW_RING1
779 rc = CFGMR3QueryBoolDef(pRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
780# endif
781 rc = CFGMR3QueryBoolDef(pRoot, "PATMEnabled", &pVM->fPATMEnabled, true); AssertRCReturn(rc, rc);
782 rc = CFGMR3QueryBoolDef(pRoot, "CSAMEnabled", &pVM->fCSAMEnabled, true); AssertRCReturn(rc, rc);
783 rc = CFGMR3QueryBoolDef(pRoot, "HMEnabled", &pVM->fHMEnabled, true); AssertRCReturn(rc, rc);
784#else
785 pVM->fHMEnabled = true;
786#endif
787 LogRel(("VM: fHMEnabled=%RTbool (configured) fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n"
788 "VM: fRawRing1Enabled=%RTbool CSAM=%RTbool PATM=%RTbool\n",
789 pVM->fHMEnabled, pVM->fRecompileUser, pVM->fRecompileSupervisor,
790 pVM->fRawRing1Enabled, pVM->fCSAMEnabled, pVM->fPATMEnabled));
791
792 /*
793 * Make sure the CPU count in the config data matches.
794 */
795 uint32_t cCPUsCfg;
796 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
797 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc), rc);
798 AssertLogRelMsgReturn(cCPUsCfg == cCpus,
799 ("Configuration error: \"NumCPUs\"=%RU32 and VMR3Create::cCpus=%RU32 does not match!\n",
800 cCPUsCfg, cCpus),
801 VERR_INVALID_PARAMETER);
802
803 /*
804 * Get the CPU execution cap.
805 */
806 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
807 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc), rc);
808
809 /*
810 * Get the VM name and UUID.
811 */
812 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
813 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc), rc);
814
815 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
816 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
817 rc = VINF_SUCCESS;
818 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc), rc);
819
820 rc = CFGMR3QueryBoolDef(pRoot, "PowerOffInsteadOfReset", &pVM->vm.s.fPowerOffInsteadOfReset, false);
821 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"PowerOffInsteadOfReset\" failed, rc=%Rrc\n", rc), rc);
822
823 return VINF_SUCCESS;
824}
825
826
827/**
828 * Register the calling EMT with GVM.
829 *
830 * @returns VBox status code.
831 * @param pVM The cross context VM structure.
832 * @param idCpu The Virtual CPU ID.
833 */
834static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
835{
836 Assert(VMMGetCpuId(pVM) == idCpu);
837 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
838 if (RT_FAILURE(rc))
839 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
840 return rc;
841}
842
843
844/**
845 * Initializes all R3 components of the VM
846 */
847static int vmR3InitRing3(PVM pVM, PUVM pUVM)
848{
849 int rc;
850
851 /*
852 * Register the other EMTs with GVM.
853 */
854 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
855 {
856 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
857 if (RT_FAILURE(rc))
858 return rc;
859 }
860
861 /*
862 * Register statistics.
863 */
864 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
865 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
866 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
867 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
868 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
869 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
870 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
871 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
872 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
873 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
874 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
875 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
876 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
877 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
878
879 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
880 {
881 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/CPU%d/VM/Halt/Yield", idCpu);
882 AssertRC(rc);
883 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/CPU%d/VM/Halt/Block", idCpu);
884 AssertRC(rc);
885 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/CPU%d/VM/Halt/BlockOverslept", idCpu);
886 AssertRC(rc);
887 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/CPU%d/VM/Halt/BlockInsomnia", idCpu);
888 AssertRC(rc);
889 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/CPU%d/VM/Halt/BlockOnTime", idCpu);
890 AssertRC(rc);
891 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/CPU%d/VM/Halt/Timers", idCpu);
892 AssertRC(rc);
893 }
894
895 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
896 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
897 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
898 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
899 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
900 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
901 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
902 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
903
904 /*
905 * Init all R3 components, the order here might be important.
906 * NEM and HM shall be initialized first!
907 */
908 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
909 rc = NEMR3InitConfig(pVM);
910 if (RT_SUCCESS(rc))
911 rc = HMR3Init(pVM);
912 if (RT_SUCCESS(rc))
913 {
914 ASMCompilerBarrier(); /* HMR3Init will have modified bMainExecutionEngine */
915#ifdef VBOX_WITH_RAW_MODE
916 Assert( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT
917 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_RAW_MODE
918 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API);
919#else
920 Assert( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT
921 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API);
922#endif
923 rc = MMR3Init(pVM);
924 if (RT_SUCCESS(rc))
925 {
926 rc = CPUMR3Init(pVM);
927 if (RT_SUCCESS(rc))
928 {
929 rc = NEMR3InitAfterCPUM(pVM);
930 if (RT_SUCCESS(rc))
931 rc = PGMR3Init(pVM);
932 if (RT_SUCCESS(rc))
933 {
934#ifdef VBOX_WITH_REM
935 rc = REMR3Init(pVM);
936#endif
937 if (RT_SUCCESS(rc))
938 {
939 rc = MMR3InitPaging(pVM);
940 if (RT_SUCCESS(rc))
941 rc = TMR3Init(pVM);
942 if (RT_SUCCESS(rc))
943 {
944 rc = FTMR3Init(pVM);
945 if (RT_SUCCESS(rc))
946 {
947 rc = VMMR3Init(pVM);
948 if (RT_SUCCESS(rc))
949 {
950 rc = SELMR3Init(pVM);
951 if (RT_SUCCESS(rc))
952 {
953 rc = TRPMR3Init(pVM);
954 if (RT_SUCCESS(rc))
955 {
956#ifdef VBOX_WITH_RAW_MODE
957 rc = CSAMR3Init(pVM);
958 if (RT_SUCCESS(rc))
959 {
960 rc = PATMR3Init(pVM);
961 if (RT_SUCCESS(rc))
962 {
963#endif
964 rc = IOMR3Init(pVM);
965 if (RT_SUCCESS(rc))
966 {
967 rc = EMR3Init(pVM);
968 if (RT_SUCCESS(rc))
969 {
970 rc = IEMR3Init(pVM);
971 if (RT_SUCCESS(rc))
972 {
973 rc = DBGFR3Init(pVM);
974 if (RT_SUCCESS(rc))
975 {
976 /* GIM must be init'd before PDM, gimdevR3Construct()
977 requires GIM provider to be setup. */
978 rc = GIMR3Init(pVM);
979 if (RT_SUCCESS(rc))
980 {
981 rc = PDMR3Init(pVM);
982 if (RT_SUCCESS(rc))
983 {
984 rc = PGMR3InitDynMap(pVM);
985 if (RT_SUCCESS(rc))
986 rc = MMR3HyperInitFinalize(pVM);
987#ifdef VBOX_WITH_RAW_MODE
988 if (RT_SUCCESS(rc))
989 rc = PATMR3InitFinalize(pVM);
990#endif
991 if (RT_SUCCESS(rc))
992 rc = PGMR3InitFinalize(pVM);
993 if (RT_SUCCESS(rc))
994 rc = SELMR3InitFinalize(pVM);
995 if (RT_SUCCESS(rc))
996 rc = TMR3InitFinalize(pVM);
997#ifdef VBOX_WITH_REM
998 if (RT_SUCCESS(rc))
999 rc = REMR3InitFinalize(pVM);
1000#endif
1001 if (RT_SUCCESS(rc))
1002 {
1003 PGMR3MemSetup(pVM, false /*fAtReset*/);
1004 PDMR3MemSetup(pVM, false /*fAtReset*/);
1005 }
1006 if (RT_SUCCESS(rc))
1007 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
1008 if (RT_SUCCESS(rc))
1009 {
1010 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
1011 return VINF_SUCCESS;
1012 }
1013
1014 int rc2 = PDMR3Term(pVM);
1015 AssertRC(rc2);
1016 }
1017 int rc2 = GIMR3Term(pVM);
1018 AssertRC(rc2);
1019 }
1020 int rc2 = DBGFR3Term(pVM);
1021 AssertRC(rc2);
1022 }
1023 int rc2 = IEMR3Term(pVM);
1024 AssertRC(rc2);
1025 }
1026 int rc2 = EMR3Term(pVM);
1027 AssertRC(rc2);
1028 }
1029 int rc2 = IOMR3Term(pVM);
1030 AssertRC(rc2);
1031 }
1032#ifdef VBOX_WITH_RAW_MODE
1033 int rc2 = PATMR3Term(pVM);
1034 AssertRC(rc2);
1035 }
1036 int rc2 = CSAMR3Term(pVM);
1037 AssertRC(rc2);
1038 }
1039#endif
1040 int rc2 = TRPMR3Term(pVM);
1041 AssertRC(rc2);
1042 }
1043 int rc2 = SELMR3Term(pVM);
1044 AssertRC(rc2);
1045 }
1046 int rc2 = VMMR3Term(pVM);
1047 AssertRC(rc2);
1048 }
1049 int rc2 = FTMR3Term(pVM);
1050 AssertRC(rc2);
1051 }
1052 int rc2 = TMR3Term(pVM);
1053 AssertRC(rc2);
1054 }
1055#ifdef VBOX_WITH_REM
1056 int rc2 = REMR3Term(pVM);
1057 AssertRC(rc2);
1058#endif
1059 }
1060 int rc2 = PGMR3Term(pVM);
1061 AssertRC(rc2);
1062 }
1063 //int rc2 = CPUMR3Term(pVM);
1064 //AssertRC(rc2);
1065 }
1066 /* MMR3Term is not called here because it'll kill the heap. */
1067 }
1068 int rc2 = HMR3Term(pVM);
1069 AssertRC(rc2);
1070 }
1071 NEMR3Term(pVM);
1072
1073 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
1074 return rc;
1075}
1076
1077
1078/**
1079 * Initializes all R0 components of the VM.
1080 */
1081static int vmR3InitRing0(PVM pVM)
1082{
1083 LogFlow(("vmR3InitRing0:\n"));
1084
1085 /*
1086 * Check for FAKE suplib mode.
1087 */
1088 int rc = VINF_SUCCESS;
1089 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1090 if (!psz || strcmp(psz, "fake"))
1091 {
1092 /*
1093 * Call the VMMR0 component and let it do the init.
1094 */
1095 rc = VMMR3InitR0(pVM);
1096 }
1097 else
1098 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1099
1100 /*
1101 * Do notifications and return.
1102 */
1103 if (RT_SUCCESS(rc))
1104 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1105 if (RT_SUCCESS(rc))
1106 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HM);
1107
1108 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1109 return rc;
1110}
1111
1112
1113#ifdef VBOX_WITH_RAW_MODE
1114/**
1115 * Initializes all RC components of the VM
1116 */
1117static int vmR3InitRC(PVM pVM)
1118{
1119 LogFlow(("vmR3InitRC:\n"));
1120
1121 /*
1122 * Check for FAKE suplib mode.
1123 */
1124 int rc = VINF_SUCCESS;
1125 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1126 if (!psz || strcmp(psz, "fake"))
1127 {
1128 /*
1129 * Call the VMMR0 component and let it do the init.
1130 */
1131 rc = VMMR3InitRC(pVM);
1132 }
1133 else
1134 Log(("vmR3InitRC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1135
1136 /*
1137 * Do notifications and return.
1138 */
1139 if (RT_SUCCESS(rc))
1140 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RC);
1141 LogFlow(("vmR3InitRC: returns %Rrc\n", rc));
1142 return rc;
1143}
1144#endif /* VBOX_WITH_RAW_MODE */
1145
1146
1147/**
1148 * Do init completed notifications.
1149 *
1150 * @returns VBox status code.
1151 * @param pVM The cross context VM structure.
1152 * @param enmWhat What's completed.
1153 */
1154static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1155{
1156 int rc = VMMR3InitCompleted(pVM, enmWhat);
1157 if (RT_SUCCESS(rc))
1158 rc = HMR3InitCompleted(pVM, enmWhat);
1159 if (RT_SUCCESS(rc))
1160 rc = NEMR3InitCompleted(pVM, enmWhat);
1161 if (RT_SUCCESS(rc))
1162 rc = PGMR3InitCompleted(pVM, enmWhat);
1163 if (RT_SUCCESS(rc))
1164 rc = CPUMR3InitCompleted(pVM, enmWhat);
1165 if (RT_SUCCESS(rc))
1166 rc = EMR3InitCompleted(pVM, enmWhat);
1167 if (enmWhat == VMINITCOMPLETED_RING3)
1168 {
1169#ifndef VBOX_WITH_RAW_MODE
1170 if (RT_SUCCESS(rc))
1171 rc = SSMR3RegisterStub(pVM, "CSAM", 0);
1172 if (RT_SUCCESS(rc))
1173 rc = SSMR3RegisterStub(pVM, "PATM", 0);
1174#endif
1175#ifndef VBOX_WITH_REM
1176 if (RT_SUCCESS(rc))
1177 rc = SSMR3RegisterStub(pVM, "rem", 1);
1178#endif
1179 }
1180 if (RT_SUCCESS(rc))
1181 rc = PDMR3InitCompleted(pVM, enmWhat);
1182 return rc;
1183}
1184
1185
1186/**
1187 * Calls the relocation functions for all VMM components so they can update
1188 * any GC pointers. When this function is called all the basic VM members
1189 * have been updated and the actual memory relocation have been done
1190 * by the PGM/MM.
1191 *
1192 * This is used both on init and on runtime relocations.
1193 *
1194 * @param pVM The cross context VM structure.
1195 * @param offDelta Relocation delta relative to old location.
1196 */
1197VMMR3_INT_DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1198{
1199 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1200
1201 /*
1202 * The order here is very important!
1203 */
1204 PGMR3Relocate(pVM, offDelta);
1205 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1206 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1207 CPUMR3Relocate(pVM);
1208 HMR3Relocate(pVM);
1209 SELMR3Relocate(pVM);
1210 VMMR3Relocate(pVM, offDelta);
1211 SELMR3Relocate(pVM); /* !hack! fix stack! */
1212 TRPMR3Relocate(pVM, offDelta);
1213#ifdef VBOX_WITH_RAW_MODE
1214 PATMR3Relocate(pVM, (RTRCINTPTR)offDelta);
1215 CSAMR3Relocate(pVM, offDelta);
1216#endif
1217 IOMR3Relocate(pVM, offDelta);
1218 EMR3Relocate(pVM);
1219 TMR3Relocate(pVM, offDelta);
1220 IEMR3Relocate(pVM);
1221 DBGFR3Relocate(pVM, offDelta);
1222 PDMR3Relocate(pVM, offDelta);
1223 GIMR3Relocate(pVM, offDelta);
1224}
1225
1226
1227/**
1228 * EMT rendezvous worker for VMR3PowerOn.
1229 *
1230 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1231 * code, see FNVMMEMTRENDEZVOUS.)
1232 *
1233 * @param pVM The cross context VM structure.
1234 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1235 * @param pvUser Ignored.
1236 */
1237static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1238{
1239 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1240 Assert(!pvUser); NOREF(pvUser);
1241
1242 /*
1243 * The first thread thru here tries to change the state. We shouldn't be
1244 * called again if this fails.
1245 */
1246 if (pVCpu->idCpu == pVM->cCpus - 1)
1247 {
1248 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1249 if (RT_FAILURE(rc))
1250 return rc;
1251 }
1252
1253 VMSTATE enmVMState = VMR3GetState(pVM);
1254 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1255 ("%s\n", VMR3GetStateName(enmVMState)),
1256 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1257
1258 /*
1259 * All EMTs changes their state to started.
1260 */
1261 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1262
1263 /*
1264 * EMT(0) is last thru here and it will make the notification calls
1265 * and advance the state.
1266 */
1267 if (pVCpu->idCpu == 0)
1268 {
1269 PDMR3PowerOn(pVM);
1270 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1271 }
1272
1273 return VINF_SUCCESS;
1274}
1275
1276
1277/**
1278 * Powers on the virtual machine.
1279 *
1280 * @returns VBox status code.
1281 *
1282 * @param pUVM The VM to power on.
1283 *
1284 * @thread Any thread.
1285 * @vmstate Created
1286 * @vmstateto PoweringOn+Running
1287 */
1288VMMR3DECL(int) VMR3PowerOn(PUVM pUVM)
1289{
1290 LogFlow(("VMR3PowerOn: pUVM=%p\n", pUVM));
1291 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1292 PVM pVM = pUVM->pVM;
1293 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1294
1295 /*
1296 * Gather all the EMTs to reduce the init TSC drift and keep
1297 * the state changing APIs a bit uniform.
1298 */
1299 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1300 vmR3PowerOn, NULL);
1301 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1302 return rc;
1303}
1304
1305
1306/**
1307 * Does the suspend notifications.
1308 *
1309 * @param pVM The cross context VM structure.
1310 * @thread EMT(0)
1311 */
1312static void vmR3SuspendDoWork(PVM pVM)
1313{
1314 PDMR3Suspend(pVM);
1315}
1316
1317
1318/**
1319 * EMT rendezvous worker for VMR3Suspend.
1320 *
1321 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1322 * return code, see FNVMMEMTRENDEZVOUS.)
1323 *
1324 * @param pVM The cross context VM structure.
1325 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1326 * @param pvUser Ignored.
1327 */
1328static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1329{
1330 VMSUSPENDREASON enmReason = (VMSUSPENDREASON)(uintptr_t)pvUser;
1331 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
1332
1333 /*
1334 * The first EMT switches the state to suspending. If this fails because
1335 * something was racing us in one way or the other, there will be no more
1336 * calls and thus the state assertion below is not going to annoy anyone.
1337 */
1338 if (pVCpu->idCpu == pVM->cCpus - 1)
1339 {
1340 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1341 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1342 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1343 if (RT_FAILURE(rc))
1344 return rc;
1345 pVM->pUVM->vm.s.enmSuspendReason = enmReason;
1346 }
1347
1348 VMSTATE enmVMState = VMR3GetState(pVM);
1349 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1350 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1351 ("%s\n", VMR3GetStateName(enmVMState)),
1352 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1353
1354 /*
1355 * EMT(0) does the actually suspending *after* all the other CPUs have
1356 * been thru here.
1357 */
1358 if (pVCpu->idCpu == 0)
1359 {
1360 vmR3SuspendDoWork(pVM);
1361
1362 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1363 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1364 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1365 if (RT_FAILURE(rc))
1366 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1367 }
1368
1369 return VINF_EM_SUSPEND;
1370}
1371
1372
1373/**
1374 * Suspends a running VM.
1375 *
1376 * @returns VBox status code. When called on EMT, this will be a strict status
1377 * code that has to be propagated up the call stack.
1378 *
1379 * @param pUVM The VM to suspend.
1380 * @param enmReason The reason for suspending.
1381 *
1382 * @thread Any thread.
1383 * @vmstate Running or RunningLS
1384 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1385 */
1386VMMR3DECL(int) VMR3Suspend(PUVM pUVM, VMSUSPENDREASON enmReason)
1387{
1388 LogFlow(("VMR3Suspend: pUVM=%p\n", pUVM));
1389 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1390 AssertReturn(enmReason > VMSUSPENDREASON_INVALID && enmReason < VMSUSPENDREASON_END, VERR_INVALID_PARAMETER);
1391
1392 /*
1393 * Gather all the EMTs to make sure there are no races before
1394 * changing the VM state.
1395 */
1396 int rc = VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1397 vmR3Suspend, (void *)(uintptr_t)enmReason);
1398 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1399 return rc;
1400}
1401
1402
1403/**
1404 * Retrieves the reason for the most recent suspend.
1405 *
1406 * @returns Suspend reason. VMSUSPENDREASON_INVALID if no suspend has been done
1407 * or the handle is invalid.
1408 * @param pUVM The user mode VM handle.
1409 */
1410VMMR3DECL(VMSUSPENDREASON) VMR3GetSuspendReason(PUVM pUVM)
1411{
1412 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSUSPENDREASON_INVALID);
1413 return pUVM->vm.s.enmSuspendReason;
1414}
1415
1416
1417/**
1418 * EMT rendezvous worker for VMR3Resume.
1419 *
1420 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1421 * return code, see FNVMMEMTRENDEZVOUS.)
1422 *
1423 * @param pVM The cross context VM structure.
1424 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1425 * @param pvUser Reason.
1426 */
1427static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1428{
1429 VMRESUMEREASON enmReason = (VMRESUMEREASON)(uintptr_t)pvUser;
1430 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
1431
1432 /*
1433 * The first thread thru here tries to change the state. We shouldn't be
1434 * called again if this fails.
1435 */
1436 if (pVCpu->idCpu == pVM->cCpus - 1)
1437 {
1438 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1439 if (RT_FAILURE(rc))
1440 return rc;
1441 pVM->pUVM->vm.s.enmResumeReason = enmReason;
1442 }
1443
1444 VMSTATE enmVMState = VMR3GetState(pVM);
1445 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1446 ("%s\n", VMR3GetStateName(enmVMState)),
1447 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1448
1449#if 0
1450 /*
1451 * All EMTs changes their state to started.
1452 */
1453 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1454#endif
1455
1456 /*
1457 * EMT(0) is last thru here and it will make the notification calls
1458 * and advance the state.
1459 */
1460 if (pVCpu->idCpu == 0)
1461 {
1462 PDMR3Resume(pVM);
1463 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1464 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1465 }
1466
1467 return VINF_EM_RESUME;
1468}
1469
1470
1471/**
1472 * Resume VM execution.
1473 *
1474 * @returns VBox status code. When called on EMT, this will be a strict status
1475 * code that has to be propagated up the call stack.
1476 *
1477 * @param pUVM The user mode VM handle.
1478 * @param enmReason The reason we're resuming.
1479 *
1480 * @thread Any thread.
1481 * @vmstate Suspended
1482 * @vmstateto Running
1483 */
1484VMMR3DECL(int) VMR3Resume(PUVM pUVM, VMRESUMEREASON enmReason)
1485{
1486 LogFlow(("VMR3Resume: pUVM=%p\n", pUVM));
1487 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1488 PVM pVM = pUVM->pVM;
1489 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1490 AssertReturn(enmReason > VMRESUMEREASON_INVALID && enmReason < VMRESUMEREASON_END, VERR_INVALID_PARAMETER);
1491
1492 /*
1493 * Gather all the EMTs to make sure there are no races before
1494 * changing the VM state.
1495 */
1496 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1497 vmR3Resume, (void *)(uintptr_t)enmReason);
1498 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1499 return rc;
1500}
1501
1502
1503/**
1504 * Retrieves the reason for the most recent resume.
1505 *
1506 * @returns Resume reason. VMRESUMEREASON_INVALID if no suspend has been
1507 * done or the handle is invalid.
1508 * @param pUVM The user mode VM handle.
1509 */
1510VMMR3DECL(VMRESUMEREASON) VMR3GetResumeReason(PUVM pUVM)
1511{
1512 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMRESUMEREASON_INVALID);
1513 return pUVM->vm.s.enmResumeReason;
1514}
1515
1516
1517/**
1518 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1519 * after the live step has been completed.
1520 *
1521 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1522 * return code, see FNVMMEMTRENDEZVOUS.)
1523 *
1524 * @param pVM The cross context VM structure.
1525 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1526 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1527 */
1528static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1529{
1530 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1531 bool *pfSuspended = (bool *)pvUser;
1532
1533 /*
1534 * The first thread thru here tries to change the state. We shouldn't be
1535 * called again if this fails.
1536 */
1537 if (pVCpu->idCpu == pVM->cCpus - 1U)
1538 {
1539 PUVM pUVM = pVM->pUVM;
1540 int rc;
1541
1542 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1543 VMSTATE enmVMState = pVM->enmVMState;
1544 switch (enmVMState)
1545 {
1546 case VMSTATE_RUNNING_LS:
1547 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS, false /*fSetRatherThanClearFF*/);
1548 rc = VINF_SUCCESS;
1549 break;
1550
1551 case VMSTATE_SUSPENDED_EXT_LS:
1552 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1553 rc = VINF_SUCCESS;
1554 break;
1555
1556 case VMSTATE_DEBUGGING_LS:
1557 rc = VERR_TRY_AGAIN;
1558 break;
1559
1560 case VMSTATE_OFF_LS:
1561 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS, false /*fSetRatherThanClearFF*/);
1562 rc = VERR_SSM_LIVE_POWERED_OFF;
1563 break;
1564
1565 case VMSTATE_FATAL_ERROR_LS:
1566 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, false /*fSetRatherThanClearFF*/);
1567 rc = VERR_SSM_LIVE_FATAL_ERROR;
1568 break;
1569
1570 case VMSTATE_GURU_MEDITATION_LS:
1571 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, false /*fSetRatherThanClearFF*/);
1572 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1573 break;
1574
1575 case VMSTATE_POWERING_OFF_LS:
1576 case VMSTATE_SUSPENDING_EXT_LS:
1577 case VMSTATE_RESETTING_LS:
1578 default:
1579 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1580 rc = VERR_VM_UNEXPECTED_VM_STATE;
1581 break;
1582 }
1583 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1584 if (RT_FAILURE(rc))
1585 {
1586 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1587 return rc;
1588 }
1589 }
1590
1591 VMSTATE enmVMState = VMR3GetState(pVM);
1592 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1593 ("%s\n", VMR3GetStateName(enmVMState)),
1594 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1595
1596 /*
1597 * Only EMT(0) have work to do since it's last thru here.
1598 */
1599 if (pVCpu->idCpu == 0)
1600 {
1601 vmR3SuspendDoWork(pVM);
1602 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1603 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1604 if (RT_FAILURE(rc))
1605 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1606
1607 *pfSuspended = true;
1608 }
1609
1610 return VINF_EM_SUSPEND;
1611}
1612
1613
1614/**
1615 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1616 * SSMR3LiveDoStep1 failure.
1617 *
1618 * Doing this as a rendezvous operation avoids all annoying transition
1619 * states.
1620 *
1621 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1622 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1623 *
1624 * @param pVM The cross context VM structure.
1625 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1626 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1627 */
1628static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1629{
1630 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1631 bool *pfSuspended = (bool *)pvUser;
1632 NOREF(pVCpu);
1633
1634 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1635 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1636 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1637 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1638 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1639 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1640 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1641 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1642 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1643 if (rc == 1)
1644 rc = VERR_SSM_LIVE_POWERED_OFF;
1645 else if (rc == 2)
1646 rc = VERR_SSM_LIVE_FATAL_ERROR;
1647 else if (rc == 3)
1648 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1649 else if (rc == 4)
1650 {
1651 *pfSuspended = true;
1652 rc = VINF_SUCCESS;
1653 }
1654 else if (rc > 0)
1655 rc = VINF_SUCCESS;
1656 return rc;
1657}
1658
1659
1660/**
1661 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1662 *
1663 * @returns VBox status code.
1664 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1665 *
1666 * @param pVM The cross context VM structure.
1667 * @param pSSM The handle of saved state operation.
1668 *
1669 * @thread EMT(0)
1670 */
1671static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1672{
1673 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1674 VM_ASSERT_EMT0(pVM);
1675
1676 /*
1677 * Advance the state and mark if VMR3Suspend was called.
1678 */
1679 int rc = VINF_SUCCESS;
1680 VMSTATE enmVMState = VMR3GetState(pVM);
1681 if (enmVMState == VMSTATE_SUSPENDED_LS)
1682 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1683 else
1684 {
1685 if (enmVMState != VMSTATE_SAVING)
1686 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1687 rc = VINF_SSM_LIVE_SUSPENDED;
1688 }
1689
1690 /*
1691 * Finish up and release the handle. Careful with the status codes.
1692 */
1693 int rc2 = SSMR3LiveDoStep2(pSSM);
1694 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1695 rc = rc2;
1696
1697 rc2 = SSMR3LiveDone(pSSM);
1698 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1699 rc = rc2;
1700
1701 /*
1702 * Advance to the final state and return.
1703 */
1704 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1705 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1706 return rc;
1707}
1708
1709
1710/**
1711 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1712 * SSMR3LiveSave.
1713 *
1714 * @returns VBox status code.
1715 *
1716 * @param pVM The cross context VM structure.
1717 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1718 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1719 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1720 * @param pvStreamOpsUser The user argument to the stream methods.
1721 * @param enmAfter What to do afterwards.
1722 * @param pfnProgress Progress callback. Optional.
1723 * @param pvProgressUser User argument for the progress callback.
1724 * @param ppSSM Where to return the saved state handle in case of a
1725 * live snapshot scenario.
1726 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1727 *
1728 * @thread EMT
1729 */
1730static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1731 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM,
1732 bool fSkipStateChanges)
1733{
1734 int rc = VINF_SUCCESS;
1735
1736 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1737 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1738
1739 /*
1740 * Validate input.
1741 */
1742 AssertPtrNull(pszFilename);
1743 AssertPtrNull(pStreamOps);
1744 AssertPtr(pVM);
1745 Assert( enmAfter == SSMAFTER_DESTROY
1746 || enmAfter == SSMAFTER_CONTINUE
1747 || enmAfter == SSMAFTER_TELEPORT);
1748 AssertPtr(ppSSM);
1749 *ppSSM = NULL;
1750
1751 /*
1752 * Change the state and perform/start the saving.
1753 */
1754 if (!fSkipStateChanges)
1755 {
1756 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1757 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1758 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1759 }
1760 else
1761 {
1762 Assert(enmAfter != SSMAFTER_TELEPORT);
1763 rc = 1;
1764 }
1765
1766 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1767 {
1768 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1769 if (!fSkipStateChanges)
1770 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1771 }
1772 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1773 {
1774 Assert(!fSkipStateChanges);
1775 if (enmAfter == SSMAFTER_TELEPORT)
1776 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1777 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1778 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1779 /* (We're not subject to cancellation just yet.) */
1780 }
1781 else
1782 Assert(RT_FAILURE(rc));
1783 return rc;
1784}
1785
1786
1787/**
1788 * Common worker for VMR3Save and VMR3Teleport.
1789 *
1790 * @returns VBox status code.
1791 *
1792 * @param pVM The cross context VM structure.
1793 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1794 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1795 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1796 * @param pvStreamOpsUser The user argument to the stream methods.
1797 * @param enmAfter What to do afterwards.
1798 * @param pfnProgress Progress callback. Optional.
1799 * @param pvProgressUser User argument for the progress callback.
1800 * @param pfSuspended Set if we suspended the VM.
1801 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1802 *
1803 * @thread Non-EMT
1804 */
1805static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1806 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1807 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended,
1808 bool fSkipStateChanges)
1809{
1810 /*
1811 * Request the operation in EMT(0).
1812 */
1813 PSSMHANDLE pSSM;
1814 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/,
1815 (PFNRT)vmR3Save, 10, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1816 enmAfter, pfnProgress, pvProgressUser, &pSSM, fSkipStateChanges);
1817 if ( RT_SUCCESS(rc)
1818 && pSSM)
1819 {
1820 Assert(!fSkipStateChanges);
1821
1822 /*
1823 * Live snapshot.
1824 *
1825 * The state handling here is kind of tricky, doing it on EMT(0) helps
1826 * a bit. See the VMSTATE diagram for details.
1827 */
1828 rc = SSMR3LiveDoStep1(pSSM);
1829 if (RT_SUCCESS(rc))
1830 {
1831 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1832 for (;;)
1833 {
1834 /* Try suspend the VM. */
1835 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1836 vmR3LiveDoSuspend, pfSuspended);
1837 if (rc != VERR_TRY_AGAIN)
1838 break;
1839
1840 /* Wait for the state to change. */
1841 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1842 }
1843 if (RT_SUCCESS(rc))
1844 rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1845 else
1846 {
1847 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1848 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); NOREF(rc2);
1849 }
1850 }
1851 else
1852 {
1853 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1854 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1855
1856 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1857 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1858 rc = rc2;
1859 }
1860 }
1861
1862 return rc;
1863}
1864
1865
1866/**
1867 * Save current VM state.
1868 *
1869 * Can be used for both saving the state and creating snapshots.
1870 *
1871 * When called for a VM in the Running state, the saved state is created live
1872 * and the VM is only suspended when the final part of the saving is preformed.
1873 * The VM state will not be restored to Running in this case and it's up to the
1874 * caller to call VMR3Resume if this is desirable. (The rational is that the
1875 * caller probably wish to reconfigure the disks before resuming the VM.)
1876 *
1877 * @returns VBox status code.
1878 *
1879 * @param pUVM The VM which state should be saved.
1880 * @param pszFilename The name of the save state file.
1881 * @param fContinueAfterwards Whether continue execution afterwards or not.
1882 * When in doubt, set this to true.
1883 * @param pfnProgress Progress callback. Optional.
1884 * @param pvUser User argument for the progress callback.
1885 * @param pfSuspended Set if we suspended the VM.
1886 *
1887 * @thread Non-EMT.
1888 * @vmstate Suspended or Running
1889 * @vmstateto Saving+Suspended or
1890 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1891 */
1892VMMR3DECL(int) VMR3Save(PUVM pUVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser,
1893 bool *pfSuspended)
1894{
1895 LogFlow(("VMR3Save: pUVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1896 pUVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1897
1898 /*
1899 * Validate input.
1900 */
1901 AssertPtr(pfSuspended);
1902 *pfSuspended = false;
1903 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1904 PVM pVM = pUVM->pVM;
1905 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1906 VM_ASSERT_OTHER_THREAD(pVM);
1907 AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
1908 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1909 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1910
1911 /*
1912 * Join paths with VMR3Teleport.
1913 */
1914 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1915 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1916 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1917 enmAfter, pfnProgress, pvUser, pfSuspended,
1918 false /* fSkipStateChanges */);
1919 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1920 return rc;
1921}
1922
1923/**
1924 * Save current VM state (used by FTM)
1925 *
1926 *
1927 * @returns VBox status code.
1928 *
1929 * @param pUVM The user mode VM handle.
1930 * @param pStreamOps The stream methods.
1931 * @param pvStreamOpsUser The user argument to the stream methods.
1932 * @param pfSuspended Set if we suspended the VM.
1933 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1934 *
1935 * @thread Any
1936 * @vmstate Suspended or Running
1937 * @vmstateto Saving+Suspended or
1938 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1939 */
1940VMMR3_INT_DECL(int) VMR3SaveFT(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended, bool fSkipStateChanges)
1941{
1942 LogFlow(("VMR3SaveFT: pUVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
1943 pUVM, pStreamOps, pvStreamOpsUser, pfSuspended));
1944
1945 /*
1946 * Validate input.
1947 */
1948 AssertPtr(pfSuspended);
1949 *pfSuspended = false;
1950 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1951 PVM pVM = pUVM->pVM;
1952 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1953 AssertReturn(pStreamOps, VERR_INVALID_PARAMETER);
1954
1955 /*
1956 * Join paths with VMR3Teleport.
1957 */
1958 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1959 NULL, pStreamOps, pvStreamOpsUser,
1960 SSMAFTER_CONTINUE, NULL, NULL, pfSuspended,
1961 fSkipStateChanges);
1962 LogFlow(("VMR3SaveFT: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1963 return rc;
1964}
1965
1966
1967/**
1968 * Teleport the VM (aka live migration).
1969 *
1970 * @returns VBox status code.
1971 *
1972 * @param pUVM The VM which state should be saved.
1973 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1974 * @param pStreamOps The stream methods.
1975 * @param pvStreamOpsUser The user argument to the stream methods.
1976 * @param pfnProgress Progress callback. Optional.
1977 * @param pvProgressUser User argument for the progress callback.
1978 * @param pfSuspended Set if we suspended the VM.
1979 *
1980 * @thread Non-EMT.
1981 * @vmstate Suspended or Running
1982 * @vmstateto Saving+Suspended or
1983 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1984 */
1985VMMR3DECL(int) VMR3Teleport(PUVM pUVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1986 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1987{
1988 LogFlow(("VMR3Teleport: pUVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1989 pUVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1990
1991 /*
1992 * Validate input.
1993 */
1994 AssertPtr(pfSuspended);
1995 *pfSuspended = false;
1996 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1997 PVM pVM = pUVM->pVM;
1998 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1999 VM_ASSERT_OTHER_THREAD(pVM);
2000 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2001 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
2002
2003 /*
2004 * Join paths with VMR3Save.
2005 */
2006 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime,
2007 NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
2008 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended,
2009 false /* fSkipStateChanges */);
2010 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
2011 return rc;
2012}
2013
2014
2015
2016/**
2017 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
2018 *
2019 * @returns VBox status code.
2020 *
2021 * @param pUVM Pointer to the VM.
2022 * @param pszFilename The name of the file. NULL if pStreamOps is used.
2023 * @param pStreamOps The stream methods. NULL if pszFilename is used.
2024 * @param pvStreamOpsUser The user argument to the stream methods.
2025 * @param pfnProgress Progress callback. Optional.
2026 * @param pvProgressUser User argument for the progress callback.
2027 * @param fTeleporting Indicates whether we're teleporting or not.
2028 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
2029 *
2030 * @thread EMT.
2031 */
2032static DECLCALLBACK(int) vmR3Load(PUVM pUVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2033 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting,
2034 bool fSkipStateChanges)
2035{
2036 int rc = VINF_SUCCESS;
2037
2038 LogFlow(("vmR3Load: pUVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
2039 pUVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
2040
2041 /*
2042 * Validate input (paranoia).
2043 */
2044 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2045 PVM pVM = pUVM->pVM;
2046 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2047 AssertPtrNull(pszFilename);
2048 AssertPtrNull(pStreamOps);
2049 AssertPtrNull(pfnProgress);
2050
2051 if (!fSkipStateChanges)
2052 {
2053 /*
2054 * Change the state and perform the load.
2055 *
2056 * Always perform a relocation round afterwards to make sure hypervisor
2057 * selectors and such are correct.
2058 */
2059 rc = vmR3TrySetState(pVM, "VMR3Load", 2,
2060 VMSTATE_LOADING, VMSTATE_CREATED,
2061 VMSTATE_LOADING, VMSTATE_SUSPENDED);
2062 if (RT_FAILURE(rc))
2063 return rc;
2064 }
2065 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
2066
2067 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pUVM);
2068 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
2069 if (RT_SUCCESS(rc))
2070 {
2071 VMR3Relocate(pVM, 0 /*offDelta*/);
2072 if (!fSkipStateChanges)
2073 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
2074 }
2075 else
2076 {
2077 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
2078 if (!fSkipStateChanges)
2079 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
2080
2081 if (cErrorsPriorToSave == VMR3GetErrorCount(pUVM))
2082 rc = VMSetError(pVM, rc, RT_SRC_POS,
2083 N_("Unable to restore the virtual machine's saved state from '%s'. "
2084 "It may be damaged or from an older version of VirtualBox. "
2085 "Please discard the saved state before starting the virtual machine"),
2086 pszFilename);
2087 }
2088
2089 return rc;
2090}
2091
2092
2093/**
2094 * Loads a VM state into a newly created VM or a one that is suspended.
2095 *
2096 * To restore a saved state on VM startup, call this function and then resume
2097 * the VM instead of powering it on.
2098 *
2099 * @returns VBox status code.
2100 *
2101 * @param pUVM The user mode VM structure.
2102 * @param pszFilename The name of the save state file.
2103 * @param pfnProgress Progress callback. Optional.
2104 * @param pvUser User argument for the progress callback.
2105 *
2106 * @thread Any thread.
2107 * @vmstate Created, Suspended
2108 * @vmstateto Loading+Suspended
2109 */
2110VMMR3DECL(int) VMR3LoadFromFile(PUVM pUVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
2111{
2112 LogFlow(("VMR3LoadFromFile: pUVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
2113 pUVM, pszFilename, pszFilename, pfnProgress, pvUser));
2114
2115 /*
2116 * Validate input.
2117 */
2118 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2119 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
2120
2121 /*
2122 * Forward the request to EMT(0). No need to setup a rendezvous here
2123 * since there is no execution taking place when this call is allowed.
2124 */
2125 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2126 pUVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
2127 false /*fTeleporting*/, false /* fSkipStateChanges */);
2128 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
2129 return rc;
2130}
2131
2132
2133/**
2134 * VMR3LoadFromFile for arbitrary file streams.
2135 *
2136 * @returns VBox status code.
2137 *
2138 * @param pUVM Pointer to the VM.
2139 * @param pStreamOps The stream methods.
2140 * @param pvStreamOpsUser The user argument to the stream methods.
2141 * @param pfnProgress Progress callback. Optional.
2142 * @param pvProgressUser User argument for the progress callback.
2143 *
2144 * @thread Any thread.
2145 * @vmstate Created, Suspended
2146 * @vmstateto Loading+Suspended
2147 */
2148VMMR3DECL(int) VMR3LoadFromStream(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2149 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
2150{
2151 LogFlow(("VMR3LoadFromStream: pUVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
2152 pUVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
2153
2154 /*
2155 * Validate input.
2156 */
2157 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2158 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2159
2160 /*
2161 * Forward the request to EMT(0). No need to setup a rendezvous here
2162 * since there is no execution taking place when this call is allowed.
2163 */
2164 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2165 pUVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
2166 true /*fTeleporting*/, false /* fSkipStateChanges */);
2167 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2168 return rc;
2169}
2170
2171
2172/**
2173 * Special version for the FT component, it skips state changes.
2174 *
2175 * @returns VBox status code.
2176 *
2177 * @param pUVM The VM handle.
2178 * @param pStreamOps The stream methods.
2179 * @param pvStreamOpsUser The user argument to the stream methods.
2180 *
2181 * @thread Any thread.
2182 * @vmstate Created, Suspended
2183 * @vmstateto Loading+Suspended
2184 */
2185VMMR3_INT_DECL(int) VMR3LoadFromStreamFT(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
2186{
2187 LogFlow(("VMR3LoadFromStreamFT: pUVM=%p pStreamOps=%p pvStreamOpsUser=%p\n", pUVM, pStreamOps, pvStreamOpsUser));
2188
2189 /*
2190 * Validate input.
2191 */
2192 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2193 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2194
2195 /*
2196 * Forward the request to EMT(0). No need to setup a rendezvous here
2197 * since there is no execution taking place when this call is allowed.
2198 */
2199 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2200 pUVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
2201 true /*fTeleporting*/, true /* fSkipStateChanges */);
2202 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2203 return rc;
2204}
2205
2206/**
2207 * EMT rendezvous worker for VMR3PowerOff.
2208 *
2209 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
2210 * return code, see FNVMMEMTRENDEZVOUS.)
2211 *
2212 * @param pVM The cross context VM structure.
2213 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2214 * @param pvUser Ignored.
2215 */
2216static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
2217{
2218 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
2219 Assert(!pvUser); NOREF(pvUser);
2220
2221 /*
2222 * The first EMT thru here will change the state to PoweringOff.
2223 */
2224 if (pVCpu->idCpu == pVM->cCpus - 1)
2225 {
2226 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
2227 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
2228 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
2229 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
2230 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
2231 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
2232 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
2233 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
2234 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
2235 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
2236 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
2237 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
2238 if (RT_FAILURE(rc))
2239 return rc;
2240 if (rc >= 7)
2241 SSMR3Cancel(pVM->pUVM);
2242 }
2243
2244 /*
2245 * Check the state.
2246 */
2247 VMSTATE enmVMState = VMR3GetState(pVM);
2248 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2249 || enmVMState == VMSTATE_POWERING_OFF_LS,
2250 ("%s\n", VMR3GetStateName(enmVMState)),
2251 VERR_VM_INVALID_VM_STATE);
2252
2253 /*
2254 * EMT(0) does the actual power off work here *after* all the other EMTs
2255 * have been thru and entered the STOPPED state.
2256 */
2257 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2258 if (pVCpu->idCpu == 0)
2259 {
2260 /*
2261 * For debugging purposes, we will log a summary of the guest state at this point.
2262 */
2263 if (enmVMState != VMSTATE_GURU_MEDITATION)
2264 {
2265 /** @todo make the state dumping at VMR3PowerOff optional. */
2266 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2267 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
2268 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2269 RTLogRelPrintf("***\n");
2270 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp());
2271 RTLogRelPrintf("***\n");
2272 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
2273 RTLogRelPrintf("***\n");
2274 DBGFR3Info(pVM->pUVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2275 RTLogRelPrintf("***\n");
2276 DBGFR3Info(pVM->pUVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2277 /** @todo dump guest call stack. */
2278 RTLogRelSetBuffering(fOldBuffered);
2279 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2280 }
2281
2282 /*
2283 * Perform the power off notifications and advance the state to
2284 * Off or OffLS.
2285 */
2286 PDMR3PowerOff(pVM);
2287 DBGFR3PowerOff(pVM);
2288
2289 PUVM pUVM = pVM->pUVM;
2290 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2291 enmVMState = pVM->enmVMState;
2292 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2293 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS, false /*fSetRatherThanClearFF*/);
2294 else
2295 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF, false /*fSetRatherThanClearFF*/);
2296 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2297 }
2298 else if (enmVMState != VMSTATE_GURU_MEDITATION)
2299 {
2300 /** @todo make the state dumping at VMR3PowerOff optional. */
2301 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2302 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
2303 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2304 RTLogRelPrintf("***\n");
2305 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp());
2306 RTLogRelPrintf("***\n");
2307 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
2308 RTLogRelPrintf("***\n");
2309 RTLogRelSetBuffering(fOldBuffered);
2310 RTLogRelPrintf("************** End of Guest state at power off for VCpu %u ***************\n", pVCpu->idCpu);
2311 }
2312
2313 return VINF_EM_OFF;
2314}
2315
2316
2317/**
2318 * Power off the VM.
2319 *
2320 * @returns VBox status code. When called on EMT, this will be a strict status
2321 * code that has to be propagated up the call stack.
2322 *
2323 * @param pUVM The handle of the VM to be powered off.
2324 *
2325 * @thread Any thread.
2326 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2327 * @vmstateto Off or OffLS
2328 */
2329VMMR3DECL(int) VMR3PowerOff(PUVM pUVM)
2330{
2331 LogFlow(("VMR3PowerOff: pUVM=%p\n", pUVM));
2332 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2333 PVM pVM = pUVM->pVM;
2334 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2335
2336 /*
2337 * Gather all the EMTs to make sure there are no races before
2338 * changing the VM state.
2339 */
2340 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2341 vmR3PowerOff, NULL);
2342 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2343 return rc;
2344}
2345
2346
2347/**
2348 * Destroys the VM.
2349 *
2350 * The VM must be powered off (or never really powered on) to call this
2351 * function. The VM handle is destroyed and can no longer be used up successful
2352 * return.
2353 *
2354 * @returns VBox status code.
2355 *
2356 * @param pUVM The user mode VM handle.
2357 *
2358 * @thread Any none emulation thread.
2359 * @vmstate Off, Created
2360 * @vmstateto N/A
2361 */
2362VMMR3DECL(int) VMR3Destroy(PUVM pUVM)
2363{
2364 LogFlow(("VMR3Destroy: pUVM=%p\n", pUVM));
2365
2366 /*
2367 * Validate input.
2368 */
2369 if (!pUVM)
2370 return VERR_INVALID_VM_HANDLE;
2371 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2372 PVM pVM = pUVM->pVM;
2373 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2374 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2375
2376 /*
2377 * Change VM state to destroying and aall vmR3Destroy on each of the EMTs
2378 * ending with EMT(0) doing the bulk of the cleanup.
2379 */
2380 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2381 if (RT_FAILURE(rc))
2382 return rc;
2383
2384 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2385 AssertLogRelRC(rc);
2386
2387 /*
2388 * Wait for EMTs to quit and destroy the UVM.
2389 */
2390 vmR3DestroyUVM(pUVM, 30000);
2391
2392 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2393 return VINF_SUCCESS;
2394}
2395
2396
2397/**
2398 * Internal destruction worker.
2399 *
2400 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2401 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2402 * VMR3Destroy().
2403 *
2404 * When called on EMT(0), it will performed the great bulk of the destruction.
2405 * When called on the other EMTs, they will do nothing and the whole purpose is
2406 * to return VINF_EM_TERMINATE so they break out of their run loops.
2407 *
2408 * @returns VINF_EM_TERMINATE.
2409 * @param pVM The cross context VM structure.
2410 */
2411DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2412{
2413 PUVM pUVM = pVM->pUVM;
2414 PVMCPU pVCpu = VMMGetCpu(pVM);
2415 Assert(pVCpu);
2416 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2417
2418 /*
2419 * Only VCPU 0 does the full cleanup (last).
2420 */
2421 if (pVCpu->idCpu == 0)
2422 {
2423 /*
2424 * Dump statistics to the log.
2425 */
2426#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2427 RTLogFlags(NULL, "nodisabled nobuffered");
2428#endif
2429//#ifdef VBOX_WITH_STATISTICS
2430// STAMR3Dump(pUVM, "*");
2431//#else
2432 LogRel(("************************* Statistics *************************\n"));
2433 STAMR3DumpToReleaseLog(pUVM, "*");
2434 LogRel(("********************* End of statistics **********************\n"));
2435//#endif
2436
2437 /*
2438 * Destroy the VM components.
2439 */
2440 int rc = TMR3Term(pVM);
2441 AssertRC(rc);
2442#ifdef VBOX_WITH_DEBUGGER
2443 rc = DBGCTcpTerminate(pUVM, pUVM->vm.s.pvDBGC);
2444 pUVM->vm.s.pvDBGC = NULL;
2445#endif
2446 AssertRC(rc);
2447 rc = FTMR3Term(pVM);
2448 AssertRC(rc);
2449 rc = PDMR3Term(pVM);
2450 AssertRC(rc);
2451 rc = GIMR3Term(pVM);
2452 AssertRC(rc);
2453 rc = DBGFR3Term(pVM);
2454 AssertRC(rc);
2455 rc = IEMR3Term(pVM);
2456 AssertRC(rc);
2457 rc = EMR3Term(pVM);
2458 AssertRC(rc);
2459 rc = IOMR3Term(pVM);
2460 AssertRC(rc);
2461#ifdef VBOX_WITH_RAW_MODE
2462 rc = CSAMR3Term(pVM);
2463 AssertRC(rc);
2464 rc = PATMR3Term(pVM);
2465 AssertRC(rc);
2466#endif
2467 rc = TRPMR3Term(pVM);
2468 AssertRC(rc);
2469 rc = SELMR3Term(pVM);
2470 AssertRC(rc);
2471#ifdef VBOX_WITH_REM
2472 rc = REMR3Term(pVM);
2473 AssertRC(rc);
2474#endif
2475 rc = HMR3Term(pVM);
2476 AssertRC(rc);
2477 rc = NEMR3Term(pVM);
2478 AssertRC(rc);
2479 rc = PGMR3Term(pVM);
2480 AssertRC(rc);
2481 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2482 AssertRC(rc);
2483 rc = CPUMR3Term(pVM);
2484 AssertRC(rc);
2485 SSMR3Term(pVM);
2486 rc = PDMR3CritSectBothTerm(pVM);
2487 AssertRC(rc);
2488 rc = MMR3Term(pVM);
2489 AssertRC(rc);
2490
2491 /*
2492 * We're done, tell the other EMTs to quit.
2493 */
2494 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2495 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2496 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2497 }
2498
2499 /*
2500 * Decrement the active EMT count here.
2501 */
2502 PUVMCPU pUVCpu = &pUVM->aCpus[pVCpu->idCpu];
2503 if (!pUVCpu->vm.s.fBeenThruVmDestroy)
2504 {
2505 pUVCpu->vm.s.fBeenThruVmDestroy = true;
2506 ASMAtomicDecU32(&pUVM->vm.s.cActiveEmts);
2507 }
2508 else
2509 AssertFailed();
2510
2511 return VINF_EM_TERMINATE;
2512}
2513
2514
2515/**
2516 * Destroys the UVM portion.
2517 *
2518 * This is called as the final step in the VM destruction or as the cleanup
2519 * in case of a creation failure.
2520 *
2521 * @param pUVM The user mode VM structure.
2522 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2523 * threads.
2524 */
2525static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2526{
2527 /*
2528 * Signal termination of each the emulation threads and
2529 * wait for them to complete.
2530 */
2531 /* Signal them - in reverse order since EMT(0) waits for the others. */
2532 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2533 if (pUVM->pVM)
2534 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2535 VMCPUID iCpu = pUVM->cCpus;
2536 while (iCpu-- > 0)
2537 {
2538 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2539 RTSemEventSignal(pUVM->aCpus[iCpu].vm.s.EventSemWait);
2540 }
2541
2542 /* Wait for EMT(0), it in turn waits for the rest. */
2543 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2544
2545 RTTHREAD const hSelf = RTThreadSelf();
2546 RTTHREAD hThread = pUVM->aCpus[0].vm.s.ThreadEMT;
2547 if ( hThread != NIL_RTTHREAD
2548 && hThread != hSelf)
2549 {
2550 int rc2 = RTThreadWait(hThread, RT_MAX(cMilliesEMTWait, 2000), NULL);
2551 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2552 rc2 = RTThreadWait(hThread, 1000, NULL);
2553 AssertLogRelMsgRC(rc2, ("iCpu=0 rc=%Rrc\n", rc2));
2554 if (RT_SUCCESS(rc2))
2555 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2556 }
2557
2558 /* Just in case we're in a weird failure situation w/o EMT(0) to do the
2559 waiting, wait the other EMTs too. */
2560 for (iCpu = 1; iCpu < pUVM->cCpus; iCpu++)
2561 {
2562 ASMAtomicXchgHandle(&pUVM->aCpus[iCpu].vm.s.ThreadEMT, NIL_RTTHREAD, &hThread);
2563 if (hThread != NIL_RTTHREAD)
2564 {
2565 if (hThread != hSelf)
2566 {
2567 int rc2 = RTThreadWait(hThread, 250 /*ms*/, NULL);
2568 AssertLogRelMsgRC(rc2, ("iCpu=%u rc=%Rrc\n", iCpu, rc2));
2569 if (RT_SUCCESS(rc2))
2570 continue;
2571 }
2572 pUVM->aCpus[iCpu].vm.s.ThreadEMT = hThread;
2573 }
2574 }
2575
2576 /* Cleanup the semaphores. */
2577 iCpu = pUVM->cCpus;
2578 while (iCpu-- > 0)
2579 {
2580 RTSemEventDestroy(pUVM->aCpus[iCpu].vm.s.EventSemWait);
2581 pUVM->aCpus[iCpu].vm.s.EventSemWait = NIL_RTSEMEVENT;
2582 }
2583
2584 /*
2585 * Free the event semaphores associated with the request packets.
2586 */
2587 unsigned cReqs = 0;
2588 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2589 {
2590 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2591 pUVM->vm.s.apReqFree[i] = NULL;
2592 for (; pReq; pReq = pReq->pNext, cReqs++)
2593 {
2594 pReq->enmState = VMREQSTATE_INVALID;
2595 RTSemEventDestroy(pReq->EventSem);
2596 }
2597 }
2598 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2599
2600 /*
2601 * Kill all queued requests. (There really shouldn't be any!)
2602 */
2603 for (unsigned i = 0; i < 10; i++)
2604 {
2605 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pPriorityReqs, NULL, PVMREQ);
2606 if (!pReqHead)
2607 {
2608 pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pNormalReqs, NULL, PVMREQ);
2609 if (!pReqHead)
2610 break;
2611 }
2612 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2613
2614 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2615 {
2616 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2617 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2618 RTSemEventSignal(pReq->EventSem);
2619 RTThreadSleep(2);
2620 RTSemEventDestroy(pReq->EventSem);
2621 }
2622 /* give them a chance to respond before we free the request memory. */
2623 RTThreadSleep(32);
2624 }
2625
2626 /*
2627 * Now all queued VCPU requests (again, there shouldn't be any).
2628 */
2629 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2630 {
2631 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2632
2633 for (unsigned i = 0; i < 10; i++)
2634 {
2635 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pPriorityReqs, NULL, PVMREQ);
2636 if (!pReqHead)
2637 {
2638 pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pNormalReqs, NULL, PVMREQ);
2639 if (!pReqHead)
2640 break;
2641 }
2642 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2643
2644 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2645 {
2646 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2647 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2648 RTSemEventSignal(pReq->EventSem);
2649 RTThreadSleep(2);
2650 RTSemEventDestroy(pReq->EventSem);
2651 }
2652 /* give them a chance to respond before we free the request memory. */
2653 RTThreadSleep(32);
2654 }
2655 }
2656
2657 /*
2658 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2659 */
2660 PDMR3TermUVM(pUVM);
2661
2662 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
2663 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
2664
2665 /*
2666 * Terminate the support library if initialized.
2667 */
2668 if (pUVM->vm.s.pSession)
2669 {
2670 int rc = SUPR3Term(false /*fForced*/);
2671 AssertRC(rc);
2672 pUVM->vm.s.pSession = NIL_RTR0PTR;
2673 }
2674
2675 /*
2676 * Release the UVM structure reference.
2677 */
2678 VMR3ReleaseUVM(pUVM);
2679
2680 /*
2681 * Clean up and flush logs.
2682 */
2683 RTLogFlush(NULL);
2684}
2685
2686
2687/**
2688 * Worker which checks integrity of some internal structures.
2689 * This is yet another attempt to track down that AVL tree crash.
2690 */
2691static void vmR3CheckIntegrity(PVM pVM)
2692{
2693#ifdef VBOX_STRICT
2694 int rc = PGMR3CheckIntegrity(pVM);
2695 AssertReleaseRC(rc);
2696#else
2697 RT_NOREF_PV(pVM);
2698#endif
2699}
2700
2701
2702/**
2703 * EMT rendezvous worker for VMR3ResetFF for doing soft/warm reset.
2704 *
2705 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESCHEDULE.
2706 * (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
2707 *
2708 * @param pVM The cross context VM structure.
2709 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2710 * @param pvUser The reset flags.
2711 */
2712static DECLCALLBACK(VBOXSTRICTRC) vmR3SoftReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2713{
2714 uint32_t fResetFlags = *(uint32_t *)pvUser;
2715
2716
2717 /*
2718 * The first EMT will try change the state to resetting. If this fails,
2719 * we won't get called for the other EMTs.
2720 */
2721 if (pVCpu->idCpu == pVM->cCpus - 1)
2722 {
2723 int rc = vmR3TrySetState(pVM, "vmR3ResetSoft", 3,
2724 VMSTATE_SOFT_RESETTING, VMSTATE_RUNNING,
2725 VMSTATE_SOFT_RESETTING, VMSTATE_SUSPENDED,
2726 VMSTATE_SOFT_RESETTING_LS, VMSTATE_RUNNING_LS);
2727 if (RT_FAILURE(rc))
2728 return rc;
2729 pVM->vm.s.cResets++;
2730 pVM->vm.s.cSoftResets++;
2731 }
2732
2733 /*
2734 * Check the state.
2735 */
2736 VMSTATE enmVMState = VMR3GetState(pVM);
2737 AssertLogRelMsgReturn( enmVMState == VMSTATE_SOFT_RESETTING
2738 || enmVMState == VMSTATE_SOFT_RESETTING_LS,
2739 ("%s\n", VMR3GetStateName(enmVMState)),
2740 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2741
2742 /*
2743 * EMT(0) does the full cleanup *after* all the other EMTs has been
2744 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2745 *
2746 * Because there are per-cpu reset routines and order may/is important,
2747 * the following sequence looks a bit ugly...
2748 */
2749
2750 /* Reset the VCpu state. */
2751 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2752
2753 /*
2754 * Soft reset the VM components.
2755 */
2756 if (pVCpu->idCpu == 0)
2757 {
2758#ifdef VBOX_WITH_REM
2759 REMR3Reset(pVM);
2760#endif
2761 PDMR3SoftReset(pVM, fResetFlags);
2762 TRPMR3Reset(pVM);
2763 CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
2764 EMR3Reset(pVM);
2765 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2766 NEMR3Reset(pVM);
2767
2768 /*
2769 * Since EMT(0) is the last to go thru here, it will advance the state.
2770 * (Unlike vmR3HardReset we won't be doing any suspending of live
2771 * migration VMs here since memory is unchanged.)
2772 */
2773 PUVM pUVM = pVM->pUVM;
2774 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2775 enmVMState = pVM->enmVMState;
2776 if (enmVMState == VMSTATE_SOFT_RESETTING)
2777 {
2778 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2779 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
2780 else
2781 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
2782 }
2783 else
2784 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING_LS, VMSTATE_SOFT_RESETTING_LS, false /*fSetRatherThanClearFF*/);
2785 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2786 }
2787
2788 return VINF_EM_RESCHEDULE;
2789}
2790
2791
2792/**
2793 * EMT rendezvous worker for VMR3Reset and VMR3ResetFF.
2794 *
2795 * This is called by the emulation threads as a response to the reset request
2796 * issued by VMR3Reset().
2797 *
2798 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2799 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2800 *
2801 * @param pVM The cross context VM structure.
2802 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2803 * @param pvUser Ignored.
2804 */
2805static DECLCALLBACK(VBOXSTRICTRC) vmR3HardReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2806{
2807 Assert(!pvUser); NOREF(pvUser);
2808
2809 /*
2810 * The first EMT will try change the state to resetting. If this fails,
2811 * we won't get called for the other EMTs.
2812 */
2813 if (pVCpu->idCpu == pVM->cCpus - 1)
2814 {
2815 int rc = vmR3TrySetState(pVM, "vmR3HardReset", 3,
2816 VMSTATE_RESETTING, VMSTATE_RUNNING,
2817 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2818 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2819 if (RT_FAILURE(rc))
2820 return rc;
2821 pVM->vm.s.cResets++;
2822 pVM->vm.s.cHardResets++;
2823 }
2824
2825 /*
2826 * Check the state.
2827 */
2828 VMSTATE enmVMState = VMR3GetState(pVM);
2829 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2830 || enmVMState == VMSTATE_RESETTING_LS,
2831 ("%s\n", VMR3GetStateName(enmVMState)),
2832 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2833
2834 /*
2835 * EMT(0) does the full cleanup *after* all the other EMTs has been
2836 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2837 *
2838 * Because there are per-cpu reset routines and order may/is important,
2839 * the following sequence looks a bit ugly...
2840 */
2841 if (pVCpu->idCpu == 0)
2842 vmR3CheckIntegrity(pVM);
2843
2844 /* Reset the VCpu state. */
2845 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2846
2847 /* Clear all pending forced actions. */
2848 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2849
2850 /*
2851 * Reset the VM components.
2852 */
2853 if (pVCpu->idCpu == 0)
2854 {
2855#ifdef VBOX_WITH_RAW_MODE
2856 PATMR3Reset(pVM);
2857 CSAMR3Reset(pVM);
2858#endif
2859 GIMR3Reset(pVM); /* This must come *before* PDM and TM. */
2860 PDMR3Reset(pVM);
2861 PGMR3Reset(pVM);
2862 SELMR3Reset(pVM);
2863 TRPMR3Reset(pVM);
2864#ifdef VBOX_WITH_REM
2865 REMR3Reset(pVM);
2866#endif
2867 IOMR3Reset(pVM);
2868 CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
2869 TMR3Reset(pVM);
2870 EMR3Reset(pVM);
2871 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2872 NEMR3Reset(pVM);
2873
2874 /*
2875 * Do memory setup.
2876 */
2877 PGMR3MemSetup(pVM, true /*fAtReset*/);
2878 PDMR3MemSetup(pVM, true /*fAtReset*/);
2879
2880 /*
2881 * Since EMT(0) is the last to go thru here, it will advance the state.
2882 * When a live save is active, we will move on to SuspendingLS but
2883 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2884 */
2885 PUVM pUVM = pVM->pUVM;
2886 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2887 enmVMState = pVM->enmVMState;
2888 if (enmVMState == VMSTATE_RESETTING)
2889 {
2890 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2891 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
2892 else
2893 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
2894 }
2895 else
2896 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS, false /*fSetRatherThanClearFF*/);
2897 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2898
2899 vmR3CheckIntegrity(pVM);
2900
2901 /*
2902 * Do the suspend bit as well.
2903 * It only requires some EMT(0) work at present.
2904 */
2905 if (enmVMState != VMSTATE_RESETTING)
2906 {
2907 vmR3SuspendDoWork(pVM);
2908 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2909 }
2910 }
2911
2912 return enmVMState == VMSTATE_RESETTING
2913 ? VINF_EM_RESET
2914 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2915}
2916
2917
2918/**
2919 * Internal worker for VMR3Reset, VMR3ResetFF, VMR3TripleFault.
2920 *
2921 * @returns VBox status code.
2922 * @param pVM The cross context VM structure.
2923 * @param fHardReset Whether it's a hard reset or not.
2924 * @param fResetFlags The reset flags (PDMVMRESET_F_XXX).
2925 */
2926static VBOXSTRICTRC vmR3ResetCommon(PVM pVM, bool fHardReset, uint32_t fResetFlags)
2927{
2928 LogFlow(("vmR3ResetCommon: fHardReset=%RTbool fResetFlags=%#x\n", fHardReset, fResetFlags));
2929 int rc;
2930 if (fHardReset)
2931 {
2932 /*
2933 * Hard reset.
2934 */
2935 /* Check whether we're supposed to power off instead of resetting. */
2936 if (pVM->vm.s.fPowerOffInsteadOfReset)
2937 {
2938 PUVM pUVM = pVM->pUVM;
2939 if ( pUVM->pVmm2UserMethods
2940 && pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff)
2941 pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff(pUVM->pVmm2UserMethods, pUVM);
2942 return VMR3PowerOff(pUVM);
2943 }
2944
2945 /* Gather all the EMTs to make sure there are no races before changing
2946 the VM state. */
2947 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2948 vmR3HardReset, NULL);
2949 }
2950 else
2951 {
2952 /*
2953 * Soft reset. Since we only support this with a single CPU active,
2954 * we must be on EMT #0 here.
2955 */
2956 VM_ASSERT_EMT0(pVM);
2957 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2958 vmR3SoftReset, &fResetFlags);
2959 }
2960
2961 LogFlow(("vmR3ResetCommon: returns %Rrc\n", rc));
2962 return rc;
2963}
2964
2965
2966
2967/**
2968 * Reset the current VM.
2969 *
2970 * @returns VBox status code.
2971 * @param pUVM The VM to reset.
2972 */
2973VMMR3DECL(int) VMR3Reset(PUVM pUVM)
2974{
2975 LogFlow(("VMR3Reset:\n"));
2976 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2977 PVM pVM = pUVM->pVM;
2978 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2979
2980 return VBOXSTRICTRC_VAL(vmR3ResetCommon(pVM, true, 0));
2981}
2982
2983
2984/**
2985 * Handle the reset force flag or triple fault.
2986 *
2987 * This handles both soft and hard resets (see PDMVMRESET_F_XXX).
2988 *
2989 * @returns VBox status code.
2990 * @param pVM The cross context VM structure.
2991 * @thread EMT
2992 *
2993 * @remarks Caller is expected to clear the VM_FF_RESET force flag.
2994 */
2995VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetFF(PVM pVM)
2996{
2997 LogFlow(("VMR3ResetFF:\n"));
2998
2999 /*
3000 * First consult the firmware on whether this is a hard or soft reset.
3001 */
3002 uint32_t fResetFlags;
3003 bool fHardReset = PDMR3GetResetInfo(pVM, 0 /*fOverride*/, &fResetFlags);
3004 return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
3005}
3006
3007
3008/**
3009 * For handling a CPU reset on triple fault.
3010 *
3011 * According to one mainboard manual, a CPU triple fault causes the 286 CPU to
3012 * send a SHUTDOWN signal to the chipset. The chipset responds by sending a
3013 * RESET signal to the CPU. So, it should be very similar to a soft/warm reset.
3014 *
3015 * @returns VBox status code.
3016 * @param pVM The cross context VM structure.
3017 * @thread EMT
3018 */
3019VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetTripleFault(PVM pVM)
3020{
3021 LogFlow(("VMR3ResetTripleFault:\n"));
3022
3023 /*
3024 * First consult the firmware on whether this is a hard or soft reset.
3025 */
3026 uint32_t fResetFlags;
3027 bool fHardReset = PDMR3GetResetInfo(pVM, PDMVMRESET_F_TRIPLE_FAULT, &fResetFlags);
3028 return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
3029}
3030
3031
3032/**
3033 * Gets the user mode VM structure pointer given Pointer to the VM.
3034 *
3035 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
3036 * invalid (asserted).
3037 * @param pVM The cross context VM structure.
3038 * @sa VMR3GetVM, VMR3RetainUVM
3039 */
3040VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
3041{
3042 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
3043 return pVM->pUVM;
3044}
3045
3046
3047/**
3048 * Gets the shared VM structure pointer given the pointer to the user mode VM
3049 * structure.
3050 *
3051 * @returns Pointer to the VM.
3052 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
3053 * is currently associated with it.
3054 * @param pUVM The user mode VM handle.
3055 * @sa VMR3GetUVM
3056 */
3057VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
3058{
3059 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3060 return pUVM->pVM;
3061}
3062
3063
3064/**
3065 * Retain the user mode VM handle.
3066 *
3067 * @returns Reference count.
3068 * UINT32_MAX if @a pUVM is invalid.
3069 *
3070 * @param pUVM The user mode VM handle.
3071 * @sa VMR3ReleaseUVM
3072 */
3073VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
3074{
3075 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
3076 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
3077 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
3078 return cRefs;
3079}
3080
3081
3082/**
3083 * Does the final release of the UVM structure.
3084 *
3085 * @param pUVM The user mode VM handle.
3086 */
3087static void vmR3DoReleaseUVM(PUVM pUVM)
3088{
3089 /*
3090 * Free the UVM.
3091 */
3092 Assert(!pUVM->pVM);
3093
3094 MMR3HeapFree(pUVM->vm.s.pszName);
3095 pUVM->vm.s.pszName = NULL;
3096
3097 MMR3TermUVM(pUVM);
3098 STAMR3TermUVM(pUVM);
3099
3100 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
3101 RTTlsFree(pUVM->vm.s.idxTLS);
3102 RTMemPageFree(pUVM, RT_UOFFSETOF_DYN(UVM, aCpus[pUVM->cCpus]));
3103}
3104
3105
3106/**
3107 * Releases a refernece to the mode VM handle.
3108 *
3109 * @returns The new reference count, 0 if destroyed.
3110 * UINT32_MAX if @a pUVM is invalid.
3111 *
3112 * @param pUVM The user mode VM handle.
3113 * @sa VMR3RetainUVM
3114 */
3115VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
3116{
3117 if (!pUVM)
3118 return 0;
3119 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
3120 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
3121 if (!cRefs)
3122 vmR3DoReleaseUVM(pUVM);
3123 else
3124 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
3125 return cRefs;
3126}
3127
3128
3129/**
3130 * Gets the VM name.
3131 *
3132 * @returns Pointer to a read-only string containing the name. NULL if called
3133 * too early.
3134 * @param pUVM The user mode VM handle.
3135 */
3136VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
3137{
3138 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3139 return pUVM->vm.s.pszName;
3140}
3141
3142
3143/**
3144 * Gets the VM UUID.
3145 *
3146 * @returns pUuid on success, NULL on failure.
3147 * @param pUVM The user mode VM handle.
3148 * @param pUuid Where to store the UUID.
3149 */
3150VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
3151{
3152 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3153 AssertPtrReturn(pUuid, NULL);
3154
3155 *pUuid = pUVM->vm.s.Uuid;
3156 return pUuid;
3157}
3158
3159
3160/**
3161 * Gets the current VM state.
3162 *
3163 * @returns The current VM state.
3164 * @param pVM The cross context VM structure.
3165 * @thread Any
3166 */
3167VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
3168{
3169 AssertMsgReturn(RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE), ("%p\n", pVM), VMSTATE_TERMINATED);
3170 VMSTATE enmVMState = pVM->enmVMState;
3171 return enmVMState >= VMSTATE_CREATING && enmVMState <= VMSTATE_TERMINATED ? enmVMState : VMSTATE_TERMINATED;
3172}
3173
3174
3175/**
3176 * Gets the current VM state.
3177 *
3178 * @returns The current VM state.
3179 * @param pUVM The user-mode VM handle.
3180 * @thread Any
3181 */
3182VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
3183{
3184 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
3185 if (RT_UNLIKELY(!pUVM->pVM))
3186 return VMSTATE_TERMINATED;
3187 return pUVM->pVM->enmVMState;
3188}
3189
3190
3191/**
3192 * Gets the state name string for a VM state.
3193 *
3194 * @returns Pointer to the state name. (readonly)
3195 * @param enmState The state.
3196 */
3197VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
3198{
3199 switch (enmState)
3200 {
3201 case VMSTATE_CREATING: return "CREATING";
3202 case VMSTATE_CREATED: return "CREATED";
3203 case VMSTATE_LOADING: return "LOADING";
3204 case VMSTATE_POWERING_ON: return "POWERING_ON";
3205 case VMSTATE_RESUMING: return "RESUMING";
3206 case VMSTATE_RUNNING: return "RUNNING";
3207 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
3208 case VMSTATE_RUNNING_FT: return "RUNNING_FT";
3209 case VMSTATE_RESETTING: return "RESETTING";
3210 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
3211 case VMSTATE_SOFT_RESETTING: return "SOFT_RESETTING";
3212 case VMSTATE_SOFT_RESETTING_LS: return "SOFT_RESETTING_LS";
3213 case VMSTATE_SUSPENDED: return "SUSPENDED";
3214 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
3215 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
3216 case VMSTATE_SUSPENDING: return "SUSPENDING";
3217 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
3218 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
3219 case VMSTATE_SAVING: return "SAVING";
3220 case VMSTATE_DEBUGGING: return "DEBUGGING";
3221 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
3222 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
3223 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
3224 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
3225 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
3226 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
3227 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
3228 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
3229 case VMSTATE_OFF: return "OFF";
3230 case VMSTATE_OFF_LS: return "OFF_LS";
3231 case VMSTATE_DESTROYING: return "DESTROYING";
3232 case VMSTATE_TERMINATED: return "TERMINATED";
3233
3234 default:
3235 AssertMsgFailed(("Unknown state %d\n", enmState));
3236 return "Unknown!\n";
3237 }
3238}
3239
3240
3241/**
3242 * Validates the state transition in strict builds.
3243 *
3244 * @returns true if valid, false if not.
3245 *
3246 * @param enmStateOld The old (current) state.
3247 * @param enmStateNew The proposed new state.
3248 *
3249 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
3250 * diagram (under State Machine Diagram).
3251 */
3252static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
3253{
3254#ifndef VBOX_STRICT
3255 RT_NOREF2(enmStateOld, enmStateNew);
3256#else
3257 switch (enmStateOld)
3258 {
3259 case VMSTATE_CREATING:
3260 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3261 break;
3262
3263 case VMSTATE_CREATED:
3264 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
3265 || enmStateNew == VMSTATE_POWERING_ON
3266 || enmStateNew == VMSTATE_POWERING_OFF
3267 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3268 break;
3269
3270 case VMSTATE_LOADING:
3271 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3272 || enmStateNew == VMSTATE_LOAD_FAILURE
3273 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3274 break;
3275
3276 case VMSTATE_POWERING_ON:
3277 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3278 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3279 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3280 break;
3281
3282 case VMSTATE_RESUMING:
3283 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3284 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3285 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3286 break;
3287
3288 case VMSTATE_RUNNING:
3289 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3290 || enmStateNew == VMSTATE_SUSPENDING
3291 || enmStateNew == VMSTATE_RESETTING
3292 || enmStateNew == VMSTATE_SOFT_RESETTING
3293 || enmStateNew == VMSTATE_RUNNING_LS
3294 || enmStateNew == VMSTATE_RUNNING_FT
3295 || enmStateNew == VMSTATE_DEBUGGING
3296 || enmStateNew == VMSTATE_FATAL_ERROR
3297 || enmStateNew == VMSTATE_GURU_MEDITATION
3298 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3299 break;
3300
3301 case VMSTATE_RUNNING_LS:
3302 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3303 || enmStateNew == VMSTATE_SUSPENDING_LS
3304 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3305 || enmStateNew == VMSTATE_RESETTING_LS
3306 || enmStateNew == VMSTATE_SOFT_RESETTING_LS
3307 || enmStateNew == VMSTATE_RUNNING
3308 || enmStateNew == VMSTATE_DEBUGGING_LS
3309 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3310 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3311 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3312 break;
3313
3314 case VMSTATE_RUNNING_FT:
3315 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3316 || enmStateNew == VMSTATE_FATAL_ERROR
3317 || enmStateNew == VMSTATE_GURU_MEDITATION
3318 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3319 break;
3320
3321 case VMSTATE_RESETTING:
3322 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3323 break;
3324
3325 case VMSTATE_SOFT_RESETTING:
3326 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3327 break;
3328
3329 case VMSTATE_RESETTING_LS:
3330 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3331 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3332 break;
3333
3334 case VMSTATE_SOFT_RESETTING_LS:
3335 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING_LS
3336 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3337 break;
3338
3339 case VMSTATE_SUSPENDING:
3340 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3341 break;
3342
3343 case VMSTATE_SUSPENDING_LS:
3344 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3345 || enmStateNew == VMSTATE_SUSPENDED_LS
3346 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3347 break;
3348
3349 case VMSTATE_SUSPENDING_EXT_LS:
3350 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3351 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3352 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3353 break;
3354
3355 case VMSTATE_SUSPENDED:
3356 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3357 || enmStateNew == VMSTATE_SAVING
3358 || enmStateNew == VMSTATE_RESETTING
3359 || enmStateNew == VMSTATE_SOFT_RESETTING
3360 || enmStateNew == VMSTATE_RESUMING
3361 || enmStateNew == VMSTATE_LOADING
3362 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3363 break;
3364
3365 case VMSTATE_SUSPENDED_LS:
3366 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3367 || enmStateNew == VMSTATE_SAVING
3368 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3369 break;
3370
3371 case VMSTATE_SUSPENDED_EXT_LS:
3372 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3373 || enmStateNew == VMSTATE_SAVING
3374 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3375 break;
3376
3377 case VMSTATE_SAVING:
3378 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3379 break;
3380
3381 case VMSTATE_DEBUGGING:
3382 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3383 || enmStateNew == VMSTATE_POWERING_OFF
3384 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3385 break;
3386
3387 case VMSTATE_DEBUGGING_LS:
3388 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3389 || enmStateNew == VMSTATE_RUNNING_LS
3390 || enmStateNew == VMSTATE_POWERING_OFF_LS
3391 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3392 break;
3393
3394 case VMSTATE_POWERING_OFF:
3395 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3396 break;
3397
3398 case VMSTATE_POWERING_OFF_LS:
3399 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3400 || enmStateNew == VMSTATE_OFF_LS
3401 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3402 break;
3403
3404 case VMSTATE_OFF:
3405 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3406 break;
3407
3408 case VMSTATE_OFF_LS:
3409 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3410 break;
3411
3412 case VMSTATE_FATAL_ERROR:
3413 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3414 break;
3415
3416 case VMSTATE_FATAL_ERROR_LS:
3417 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3418 || enmStateNew == VMSTATE_POWERING_OFF_LS
3419 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3420 break;
3421
3422 case VMSTATE_GURU_MEDITATION:
3423 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3424 || enmStateNew == VMSTATE_POWERING_OFF
3425 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3426 break;
3427
3428 case VMSTATE_GURU_MEDITATION_LS:
3429 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3430 || enmStateNew == VMSTATE_DEBUGGING_LS
3431 || enmStateNew == VMSTATE_POWERING_OFF_LS
3432 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3433 break;
3434
3435 case VMSTATE_LOAD_FAILURE:
3436 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3437 break;
3438
3439 case VMSTATE_DESTROYING:
3440 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3441 break;
3442
3443 case VMSTATE_TERMINATED:
3444 default:
3445 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3446 break;
3447 }
3448#endif /* VBOX_STRICT */
3449 return true;
3450}
3451
3452
3453/**
3454 * Does the state change callouts.
3455 *
3456 * The caller owns the AtStateCritSect.
3457 *
3458 * @param pVM The cross context VM structure.
3459 * @param pUVM The UVM handle.
3460 * @param enmStateNew The New state.
3461 * @param enmStateOld The old state.
3462 */
3463static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3464{
3465 LogRel(("Changing the VM state from '%s' to '%s'\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3466
3467 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3468 {
3469 pCur->pfnAtState(pUVM, enmStateNew, enmStateOld, pCur->pvUser);
3470 if ( enmStateNew != VMSTATE_DESTROYING
3471 && pVM->enmVMState == VMSTATE_DESTROYING)
3472 break;
3473 AssertMsg(pVM->enmVMState == enmStateNew,
3474 ("You are not allowed to change the state while in the change callback, except "
3475 "from destroying the VM. There are restrictions in the way the state changes "
3476 "are propagated up to the EM execution loop and it makes the program flow very "
3477 "difficult to follow. (%s, expected %s, old %s)\n",
3478 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3479 VMR3GetStateName(enmStateOld)));
3480 }
3481}
3482
3483
3484/**
3485 * Sets the current VM state, with the AtStatCritSect already entered.
3486 *
3487 * @param pVM The cross context VM structure.
3488 * @param pUVM The UVM handle.
3489 * @param enmStateNew The new state.
3490 * @param enmStateOld The old state.
3491 * @param fSetRatherThanClearFF The usual behavior is to clear the
3492 * VM_FF_CHECK_VM_STATE force flag, but for
3493 * some transitions (-> guru) we need to kick
3494 * the other EMTs to stop what they're doing.
3495 */
3496static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF)
3497{
3498 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3499
3500 AssertMsg(pVM->enmVMState == enmStateOld,
3501 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3502
3503 pUVM->vm.s.enmPrevVMState = enmStateOld;
3504 pVM->enmVMState = enmStateNew;
3505
3506 if (!fSetRatherThanClearFF)
3507 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3508 else if (pVM->cCpus > 0)
3509 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
3510
3511 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3512}
3513
3514
3515/**
3516 * Sets the current VM state.
3517 *
3518 * @param pVM The cross context VM structure.
3519 * @param enmStateNew The new state.
3520 * @param enmStateOld The old state (for asserting only).
3521 */
3522static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3523{
3524 PUVM pUVM = pVM->pUVM;
3525 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3526
3527 RT_NOREF_PV(enmStateOld);
3528 AssertMsg(pVM->enmVMState == enmStateOld,
3529 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3530 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState, false /*fSetRatherThanClearFF*/);
3531
3532 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3533}
3534
3535
3536/**
3537 * Tries to perform a state transition.
3538 *
3539 * @returns The 1-based ordinal of the succeeding transition.
3540 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3541 *
3542 * @param pVM The cross context VM structure.
3543 * @param pszWho Who is trying to change it.
3544 * @param cTransitions The number of transitions in the ellipsis.
3545 * @param ... Transition pairs; new, old.
3546 */
3547static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3548{
3549 va_list va;
3550 VMSTATE enmStateNew = VMSTATE_CREATED;
3551 VMSTATE enmStateOld = VMSTATE_CREATED;
3552
3553#ifdef VBOX_STRICT
3554 /*
3555 * Validate the input first.
3556 */
3557 va_start(va, cTransitions);
3558 for (unsigned i = 0; i < cTransitions; i++)
3559 {
3560 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3561 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3562 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3563 }
3564 va_end(va);
3565#endif
3566
3567 /*
3568 * Grab the lock and see if any of the proposed transitions works out.
3569 */
3570 va_start(va, cTransitions);
3571 int rc = VERR_VM_INVALID_VM_STATE;
3572 PUVM pUVM = pVM->pUVM;
3573 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3574
3575 VMSTATE enmStateCur = pVM->enmVMState;
3576
3577 for (unsigned i = 0; i < cTransitions; i++)
3578 {
3579 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3580 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3581 if (enmStateCur == enmStateOld)
3582 {
3583 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld, false /*fSetRatherThanClearFF*/);
3584 rc = i + 1;
3585 break;
3586 }
3587 }
3588
3589 if (RT_FAILURE(rc))
3590 {
3591 /*
3592 * Complain about it.
3593 */
3594 if (cTransitions == 1)
3595 {
3596 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3597 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3598 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3599 N_("%s failed because the VM state is %s instead of %s"),
3600 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3601 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3602 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3603 }
3604 else
3605 {
3606 va_end(va);
3607 va_start(va, cTransitions);
3608 LogRel(("%s:\n", pszWho));
3609 for (unsigned i = 0; i < cTransitions; i++)
3610 {
3611 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3612 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3613 LogRel(("%s%s -> %s",
3614 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3615 }
3616 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3617 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3618 N_("%s failed because the current VM state, %s, was not found in the state transition table (old state %s)"),
3619 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3620 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3621 pszWho, VMR3GetStateName(enmStateCur)));
3622 }
3623 }
3624
3625 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3626 va_end(va);
3627 Assert(rc > 0 || rc < 0);
3628 return rc;
3629}
3630
3631
3632/**
3633 * Interface used by EM to signal that it's entering the guru meditation state.
3634 *
3635 * This will notifying other threads.
3636 *
3637 * @returns true if the state changed to Guru, false if no state change.
3638 * @param pVM The cross context VM structure.
3639 */
3640VMMR3_INT_DECL(bool) VMR3SetGuruMeditation(PVM pVM)
3641{
3642 PUVM pUVM = pVM->pUVM;
3643 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3644
3645 VMSTATE enmStateCur = pVM->enmVMState;
3646 bool fRc = true;
3647 if (enmStateCur == VMSTATE_RUNNING)
3648 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING, true /*fSetRatherThanClearFF*/);
3649 else if (enmStateCur == VMSTATE_RUNNING_LS)
3650 {
3651 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS, true /*fSetRatherThanClearFF*/);
3652 SSMR3Cancel(pUVM);
3653 }
3654 else
3655 fRc = false;
3656
3657 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3658 return fRc;
3659}
3660
3661
3662/**
3663 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3664 *
3665 * @param pVM The cross context VM structure.
3666 */
3667void vmR3SetTerminated(PVM pVM)
3668{
3669 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3670}
3671
3672
3673/**
3674 * Checks if the VM was teleported and hasn't been fully resumed yet.
3675 *
3676 * This applies to both sides of the teleportation since we may leave a working
3677 * clone behind and the user is allowed to resume this...
3678 *
3679 * @returns true / false.
3680 * @param pVM The cross context VM structure.
3681 * @thread Any thread.
3682 */
3683VMMR3_INT_DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3684{
3685 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3686 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3687}
3688
3689
3690/**
3691 * Registers a VM state change callback.
3692 *
3693 * You are not allowed to call any function which changes the VM state from a
3694 * state callback.
3695 *
3696 * @returns VBox status code.
3697 * @param pUVM The VM handle.
3698 * @param pfnAtState Pointer to callback.
3699 * @param pvUser User argument.
3700 * @thread Any.
3701 */
3702VMMR3DECL(int) VMR3AtStateRegister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
3703{
3704 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3705
3706 /*
3707 * Validate input.
3708 */
3709 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3710 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3711
3712 /*
3713 * Allocate a new record.
3714 */
3715 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3716 if (!pNew)
3717 return VERR_NO_MEMORY;
3718
3719 /* fill */
3720 pNew->pfnAtState = pfnAtState;
3721 pNew->pvUser = pvUser;
3722
3723 /* insert */
3724 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3725 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3726 *pUVM->vm.s.ppAtStateNext = pNew;
3727 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3728 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3729
3730 return VINF_SUCCESS;
3731}
3732
3733
3734/**
3735 * Deregisters a VM state change callback.
3736 *
3737 * @returns VBox status code.
3738 * @param pUVM The VM handle.
3739 * @param pfnAtState Pointer to callback.
3740 * @param pvUser User argument.
3741 * @thread Any.
3742 */
3743VMMR3DECL(int) VMR3AtStateDeregister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
3744{
3745 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3746
3747 /*
3748 * Validate input.
3749 */
3750 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3751 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3752
3753 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3754
3755 /*
3756 * Search the list for the entry.
3757 */
3758 PVMATSTATE pPrev = NULL;
3759 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3760 while ( pCur
3761 && ( pCur->pfnAtState != pfnAtState
3762 || pCur->pvUser != pvUser))
3763 {
3764 pPrev = pCur;
3765 pCur = pCur->pNext;
3766 }
3767 if (!pCur)
3768 {
3769 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3770 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3771 return VERR_FILE_NOT_FOUND;
3772 }
3773
3774 /*
3775 * Unlink it.
3776 */
3777 if (pPrev)
3778 {
3779 pPrev->pNext = pCur->pNext;
3780 if (!pCur->pNext)
3781 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3782 }
3783 else
3784 {
3785 pUVM->vm.s.pAtState = pCur->pNext;
3786 if (!pCur->pNext)
3787 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3788 }
3789
3790 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3791
3792 /*
3793 * Free it.
3794 */
3795 pCur->pfnAtState = NULL;
3796 pCur->pNext = NULL;
3797 MMR3HeapFree(pCur);
3798
3799 return VINF_SUCCESS;
3800}
3801
3802
3803/**
3804 * Registers a VM error callback.
3805 *
3806 * @returns VBox status code.
3807 * @param pUVM The VM handle.
3808 * @param pfnAtError Pointer to callback.
3809 * @param pvUser User argument.
3810 * @thread Any.
3811 */
3812VMMR3DECL(int) VMR3AtErrorRegister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3813{
3814 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3815
3816 /*
3817 * Validate input.
3818 */
3819 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3820 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3821
3822 /*
3823 * Allocate a new record.
3824 */
3825 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3826 if (!pNew)
3827 return VERR_NO_MEMORY;
3828
3829 /* fill */
3830 pNew->pfnAtError = pfnAtError;
3831 pNew->pvUser = pvUser;
3832
3833 /* insert */
3834 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3835 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3836 *pUVM->vm.s.ppAtErrorNext = pNew;
3837 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3838 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3839
3840 return VINF_SUCCESS;
3841}
3842
3843
3844/**
3845 * Deregisters a VM error callback.
3846 *
3847 * @returns VBox status code.
3848 * @param pUVM The VM handle.
3849 * @param pfnAtError Pointer to callback.
3850 * @param pvUser User argument.
3851 * @thread Any.
3852 */
3853VMMR3DECL(int) VMR3AtErrorDeregister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3854{
3855 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3856
3857 /*
3858 * Validate input.
3859 */
3860 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3861 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3862
3863 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3864
3865 /*
3866 * Search the list for the entry.
3867 */
3868 PVMATERROR pPrev = NULL;
3869 PVMATERROR pCur = pUVM->vm.s.pAtError;
3870 while ( pCur
3871 && ( pCur->pfnAtError != pfnAtError
3872 || pCur->pvUser != pvUser))
3873 {
3874 pPrev = pCur;
3875 pCur = pCur->pNext;
3876 }
3877 if (!pCur)
3878 {
3879 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3880 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3881 return VERR_FILE_NOT_FOUND;
3882 }
3883
3884 /*
3885 * Unlink it.
3886 */
3887 if (pPrev)
3888 {
3889 pPrev->pNext = pCur->pNext;
3890 if (!pCur->pNext)
3891 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3892 }
3893 else
3894 {
3895 pUVM->vm.s.pAtError = pCur->pNext;
3896 if (!pCur->pNext)
3897 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3898 }
3899
3900 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3901
3902 /*
3903 * Free it.
3904 */
3905 pCur->pfnAtError = NULL;
3906 pCur->pNext = NULL;
3907 MMR3HeapFree(pCur);
3908
3909 return VINF_SUCCESS;
3910}
3911
3912
3913/**
3914 * Ellipsis to va_list wrapper for calling pfnAtError.
3915 */
3916static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3917{
3918 va_list va;
3919 va_start(va, pszFormat);
3920 pCur->pfnAtError(pVM->pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3921 va_end(va);
3922}
3923
3924
3925/**
3926 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3927 * The message is found in VMINT.
3928 *
3929 * @param pVM The cross context VM structure.
3930 * @thread EMT.
3931 */
3932VMMR3_INT_DECL(void) VMR3SetErrorWorker(PVM pVM)
3933{
3934 VM_ASSERT_EMT(pVM);
3935 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Congrats!\n"));
3936
3937 /*
3938 * Unpack the error (if we managed to format one).
3939 */
3940 PVMERROR pErr = pVM->vm.s.pErrorR3;
3941 const char *pszFile = NULL;
3942 const char *pszFunction = NULL;
3943 uint32_t iLine = 0;
3944 const char *pszMessage;
3945 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3946 if (pErr)
3947 {
3948 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3949 if (pErr->offFile)
3950 pszFile = (const char *)pErr + pErr->offFile;
3951 iLine = pErr->iLine;
3952 if (pErr->offFunction)
3953 pszFunction = (const char *)pErr + pErr->offFunction;
3954 if (pErr->offMessage)
3955 pszMessage = (const char *)pErr + pErr->offMessage;
3956 else
3957 pszMessage = "No message!";
3958 }
3959 else
3960 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3961
3962 /*
3963 * Call the at error callbacks.
3964 */
3965 PUVM pUVM = pVM->pUVM;
3966 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3967 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3968 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3969 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3970 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3971}
3972
3973
3974/**
3975 * Gets the number of errors raised via VMSetError.
3976 *
3977 * This can be used avoid double error messages.
3978 *
3979 * @returns The error count.
3980 * @param pUVM The VM handle.
3981 */
3982VMMR3_INT_DECL(uint32_t) VMR3GetErrorCount(PUVM pUVM)
3983{
3984 AssertPtrReturn(pUVM, 0);
3985 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3986 return pUVM->vm.s.cErrors;
3987}
3988
3989
3990/**
3991 * Creation time wrapper for vmR3SetErrorUV.
3992 *
3993 * @returns rc.
3994 * @param pUVM Pointer to the user mode VM structure.
3995 * @param rc The VBox status code.
3996 * @param SRC_POS The source position of this error.
3997 * @param pszFormat Format string.
3998 * @param ... The arguments.
3999 * @thread Any thread.
4000 */
4001static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
4002{
4003 va_list va;
4004 va_start(va, pszFormat);
4005 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
4006 va_end(va);
4007 return rc;
4008}
4009
4010
4011/**
4012 * Worker which calls everyone listening to the VM error messages.
4013 *
4014 * @param pUVM Pointer to the user mode VM structure.
4015 * @param rc The VBox status code.
4016 * @param SRC_POS The source position of this error.
4017 * @param pszFormat Format string.
4018 * @param pArgs Pointer to the format arguments.
4019 * @thread EMT
4020 */
4021DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
4022{
4023 /*
4024 * Log the error.
4025 */
4026 va_list va3;
4027 va_copy(va3, *pArgs);
4028 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
4029 "VMSetError: %N\n",
4030 pszFile, iLine, pszFunction, rc,
4031 pszFormat, &va3);
4032 va_end(va3);
4033
4034#ifdef LOG_ENABLED
4035 va_copy(va3, *pArgs);
4036 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
4037 "%N\n",
4038 pszFile, iLine, pszFunction, rc,
4039 pszFormat, &va3);
4040 va_end(va3);
4041#endif
4042
4043 /*
4044 * Make a copy of the message.
4045 */
4046 if (pUVM->pVM)
4047 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
4048
4049 /*
4050 * Call the at error callbacks.
4051 */
4052 bool fCalledSomeone = false;
4053 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4054 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
4055 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
4056 {
4057 va_list va2;
4058 va_copy(va2, *pArgs);
4059 pCur->pfnAtError(pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
4060 va_end(va2);
4061 fCalledSomeone = true;
4062 }
4063 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4064}
4065
4066
4067/**
4068 * Sets the error message.
4069 *
4070 * @returns rc. Meaning you can do:
4071 * @code
4072 * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
4073 * @endcode
4074 * @param pUVM The user mode VM handle.
4075 * @param rc VBox status code.
4076 * @param SRC_POS Use RT_SRC_POS.
4077 * @param pszFormat Error message format string.
4078 * @param ... Error message arguments.
4079 * @thread Any
4080 */
4081VMMR3DECL(int) VMR3SetError(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
4082{
4083 va_list va;
4084 va_start(va, pszFormat);
4085 int rcRet = VMR3SetErrorV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
4086 va_end(va);
4087 return rcRet;
4088}
4089
4090
4091/**
4092 * Sets the error message.
4093 *
4094 * @returns rc. Meaning you can do:
4095 * @code
4096 * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
4097 * @endcode
4098 * @param pUVM The user mode VM handle.
4099 * @param rc VBox status code.
4100 * @param SRC_POS Use RT_SRC_POS.
4101 * @param pszFormat Error message format string.
4102 * @param va Error message arguments.
4103 * @thread Any
4104 */
4105VMMR3DECL(int) VMR3SetErrorV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
4106{
4107 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4108
4109 /* Take shortcut when called on EMT, skipping VM handle requirement + validation. */
4110 if (VMR3GetVMCPUThread(pUVM) != NIL_RTTHREAD)
4111 {
4112 va_list vaCopy;
4113 va_copy(vaCopy, va);
4114 vmR3SetErrorUV(pUVM, rc, RT_SRC_POS_ARGS, pszFormat, &vaCopy);
4115 va_end(vaCopy);
4116 return rc;
4117 }
4118
4119 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
4120 return VMSetErrorV(pUVM->pVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
4121}
4122
4123
4124
4125/**
4126 * Registers a VM runtime error callback.
4127 *
4128 * @returns VBox status code.
4129 * @param pUVM The user mode VM structure.
4130 * @param pfnAtRuntimeError Pointer to callback.
4131 * @param pvUser User argument.
4132 * @thread Any.
4133 */
4134VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
4135{
4136 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
4137
4138 /*
4139 * Validate input.
4140 */
4141 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
4142 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4143
4144 /*
4145 * Allocate a new record.
4146 */
4147 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
4148 if (!pNew)
4149 return VERR_NO_MEMORY;
4150
4151 /* fill */
4152 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
4153 pNew->pvUser = pvUser;
4154
4155 /* insert */
4156 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4157 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
4158 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
4159 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
4160 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4161
4162 return VINF_SUCCESS;
4163}
4164
4165
4166/**
4167 * Deregisters a VM runtime error callback.
4168 *
4169 * @returns VBox status code.
4170 * @param pUVM The user mode VM handle.
4171 * @param pfnAtRuntimeError Pointer to callback.
4172 * @param pvUser User argument.
4173 * @thread Any.
4174 */
4175VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
4176{
4177 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
4178
4179 /*
4180 * Validate input.
4181 */
4182 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
4183 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4184
4185 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4186
4187 /*
4188 * Search the list for the entry.
4189 */
4190 PVMATRUNTIMEERROR pPrev = NULL;
4191 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
4192 while ( pCur
4193 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
4194 || pCur->pvUser != pvUser))
4195 {
4196 pPrev = pCur;
4197 pCur = pCur->pNext;
4198 }
4199 if (!pCur)
4200 {
4201 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
4202 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4203 return VERR_FILE_NOT_FOUND;
4204 }
4205
4206 /*
4207 * Unlink it.
4208 */
4209 if (pPrev)
4210 {
4211 pPrev->pNext = pCur->pNext;
4212 if (!pCur->pNext)
4213 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
4214 }
4215 else
4216 {
4217 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
4218 if (!pCur->pNext)
4219 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
4220 }
4221
4222 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4223
4224 /*
4225 * Free it.
4226 */
4227 pCur->pfnAtRuntimeError = NULL;
4228 pCur->pNext = NULL;
4229 MMR3HeapFree(pCur);
4230
4231 return VINF_SUCCESS;
4232}
4233
4234
4235/**
4236 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
4237 * the state to FatalError(LS).
4238 *
4239 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
4240 * return code, see FNVMMEMTRENDEZVOUS.)
4241 *
4242 * @param pVM The cross context VM structure.
4243 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4244 * @param pvUser Ignored.
4245 */
4246static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
4247{
4248 NOREF(pVCpu);
4249 Assert(!pvUser); NOREF(pvUser);
4250
4251 /*
4252 * The first EMT thru here changes the state.
4253 */
4254 if (pVCpu->idCpu == pVM->cCpus - 1)
4255 {
4256 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
4257 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
4258 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
4259 if (RT_FAILURE(rc))
4260 return rc;
4261 if (rc == 2)
4262 SSMR3Cancel(pVM->pUVM);
4263
4264 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
4265 }
4266
4267 /* This'll make sure we get out of whereever we are (e.g. REM). */
4268 return VINF_EM_SUSPEND;
4269}
4270
4271
4272/**
4273 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
4274 *
4275 * This does the common parts after the error has been saved / retrieved.
4276 *
4277 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4278 *
4279 * @param pVM The cross context VM structure.
4280 * @param fFlags The error flags.
4281 * @param pszErrorId Error ID string.
4282 * @param pszFormat Format string.
4283 * @param pVa Pointer to the format arguments.
4284 */
4285static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4286{
4287 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
4288 PUVM pUVM = pVM->pUVM;
4289
4290 /*
4291 * Take actions before the call.
4292 */
4293 int rc;
4294 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4295 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4296 vmR3SetRuntimeErrorChangeState, NULL);
4297 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4298 rc = VMR3Suspend(pUVM, VMSUSPENDREASON_RUNTIME_ERROR);
4299 else
4300 rc = VINF_SUCCESS;
4301
4302 /*
4303 * Do the callback round.
4304 */
4305 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4306 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4307 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4308 {
4309 va_list va;
4310 va_copy(va, *pVa);
4311 pCur->pfnAtRuntimeError(pUVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4312 va_end(va);
4313 }
4314 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4315
4316 return rc;
4317}
4318
4319
4320/**
4321 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4322 */
4323static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4324{
4325 va_list va;
4326 va_start(va, pszFormat);
4327 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4328 va_end(va);
4329 return rc;
4330}
4331
4332
4333/**
4334 * This is a worker function for RC and Ring-0 calls to VMSetError and
4335 * VMSetErrorV.
4336 *
4337 * The message is found in VMINT.
4338 *
4339 * @returns VBox status code, see VMSetRuntimeError.
4340 * @param pVM The cross context VM structure.
4341 * @thread EMT.
4342 */
4343VMMR3_INT_DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4344{
4345 VM_ASSERT_EMT(pVM);
4346 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4347
4348 /*
4349 * Unpack the error (if we managed to format one).
4350 */
4351 const char *pszErrorId = "SetRuntimeError";
4352 const char *pszMessage = "No message!";
4353 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4354 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4355 if (pErr)
4356 {
4357 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4358 if (pErr->offErrorId)
4359 pszErrorId = (const char *)pErr + pErr->offErrorId;
4360 if (pErr->offMessage)
4361 pszMessage = (const char *)pErr + pErr->offMessage;
4362 fFlags = pErr->fFlags;
4363 }
4364
4365 /*
4366 * Join cause with vmR3SetRuntimeErrorV.
4367 */
4368 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4369}
4370
4371
4372/**
4373 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4374 *
4375 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4376 *
4377 * @param pVM The cross context VM structure.
4378 * @param fFlags The error flags.
4379 * @param pszErrorId Error ID string.
4380 * @param pszMessage The error message residing the MM heap.
4381 *
4382 * @thread EMT
4383 */
4384DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4385{
4386#if 0 /** @todo make copy of the error msg. */
4387 /*
4388 * Make a copy of the message.
4389 */
4390 va_list va2;
4391 va_copy(va2, *pVa);
4392 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4393 va_end(va2);
4394#endif
4395
4396 /*
4397 * Join paths with VMR3SetRuntimeErrorWorker.
4398 */
4399 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4400 MMR3HeapFree(pszMessage);
4401 return rc;
4402}
4403
4404
4405/**
4406 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4407 *
4408 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4409 *
4410 * @param pVM The cross context VM structure.
4411 * @param fFlags The error flags.
4412 * @param pszErrorId Error ID string.
4413 * @param pszFormat Format string.
4414 * @param pVa Pointer to the format arguments.
4415 *
4416 * @thread EMT
4417 */
4418DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4419{
4420 /*
4421 * Make a copy of the message.
4422 */
4423 va_list va2;
4424 va_copy(va2, *pVa);
4425 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4426 va_end(va2);
4427
4428 /*
4429 * Join paths with VMR3SetRuntimeErrorWorker.
4430 */
4431 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4432}
4433
4434
4435/**
4436 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4437 *
4438 * This can be used avoid double error messages.
4439 *
4440 * @returns The runtime error count.
4441 * @param pUVM The user mode VM handle.
4442 */
4443VMMR3_INT_DECL(uint32_t) VMR3GetRuntimeErrorCount(PUVM pUVM)
4444{
4445 return pUVM->vm.s.cRuntimeErrors;
4446}
4447
4448
4449/**
4450 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4451 *
4452 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4453 *
4454 * @param pVM The cross context VM structure.
4455 */
4456VMMR3_INT_DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4457{
4458 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4459 return pUVCpu
4460 ? pUVCpu->idCpu
4461 : NIL_VMCPUID;
4462}
4463
4464
4465/**
4466 * Checks if the VM is long-mode (64-bit) capable or not.
4467 *
4468 * @returns true if VM can operate in long-mode, false otherwise.
4469 * @param pVM The cross context VM structure.
4470 */
4471VMMR3_INT_DECL(bool) VMR3IsLongModeAllowed(PVM pVM)
4472{
4473 switch (pVM->bMainExecutionEngine)
4474 {
4475 case VM_EXEC_ENGINE_HW_VIRT:
4476 return HMIsLongModeAllowed(pVM);
4477
4478 case VM_EXEC_ENGINE_NATIVE_API:
4479#ifndef IN_RC
4480 return NEMHCIsLongModeAllowed(pVM);
4481#else
4482 return false;
4483#endif
4484
4485 case VM_EXEC_ENGINE_NOT_SET:
4486 AssertFailed();
4487 RT_FALL_THRU();
4488 default:
4489 return false;
4490 }
4491}
4492
4493
4494/**
4495 * Returns the native ID of the current EMT VMCPU thread.
4496 *
4497 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4498 * @param pVM The cross context VM structure.
4499 * @thread EMT
4500 */
4501VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4502{
4503 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4504
4505 if (!pUVCpu)
4506 return NIL_RTNATIVETHREAD;
4507
4508 return pUVCpu->vm.s.NativeThreadEMT;
4509}
4510
4511
4512/**
4513 * Returns the native ID of the current EMT VMCPU thread.
4514 *
4515 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4516 * @param pUVM The user mode VM structure.
4517 * @thread EMT
4518 */
4519VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4520{
4521 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4522
4523 if (!pUVCpu)
4524 return NIL_RTNATIVETHREAD;
4525
4526 return pUVCpu->vm.s.NativeThreadEMT;
4527}
4528
4529
4530/**
4531 * Returns the handle of the current EMT VMCPU thread.
4532 *
4533 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4534 * @param pUVM The user mode VM handle.
4535 * @thread EMT
4536 */
4537VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PUVM pUVM)
4538{
4539 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4540
4541 if (!pUVCpu)
4542 return NIL_RTTHREAD;
4543
4544 return pUVCpu->vm.s.ThreadEMT;
4545}
4546
4547
4548/**
4549 * Returns the handle of the current EMT VMCPU thread.
4550 *
4551 * @returns The IPRT thread handle.
4552 * @param pUVCpu The user mode CPU handle.
4553 * @thread EMT
4554 */
4555VMMR3_INT_DECL(RTTHREAD) VMR3GetThreadHandle(PUVMCPU pUVCpu)
4556{
4557 return pUVCpu->vm.s.ThreadEMT;
4558}
4559
4560
4561/**
4562 * Return the package and core ID of a CPU.
4563 *
4564 * @returns VBOX status code.
4565 * @param pUVM The user mode VM handle.
4566 * @param idCpu Virtual CPU to get the ID from.
4567 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4568 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4569 *
4570 */
4571VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PUVM pUVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4572{
4573 /*
4574 * Validate input.
4575 */
4576 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4577 PVM pVM = pUVM->pVM;
4578 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4579 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4580 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4581 if (idCpu >= pVM->cCpus)
4582 return VERR_INVALID_CPU_ID;
4583
4584 /*
4585 * Set return values.
4586 */
4587#ifdef VBOX_WITH_MULTI_CORE
4588 *pidCpuCore = idCpu;
4589 *pidCpuPackage = 0;
4590#else
4591 *pidCpuCore = 0;
4592 *pidCpuPackage = idCpu;
4593#endif
4594
4595 return VINF_SUCCESS;
4596}
4597
4598
4599/**
4600 * Worker for VMR3HotUnplugCpu.
4601 *
4602 * @returns VINF_EM_WAIT_SPIP (strict status code).
4603 * @param pVM The cross context VM structure.
4604 * @param idCpu The current CPU.
4605 */
4606static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4607{
4608 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4609 VMCPU_ASSERT_EMT(pVCpu);
4610
4611 /*
4612 * Reset per CPU resources.
4613 *
4614 * Actually only needed for VT-x because the CPU seems to be still in some
4615 * paged mode and startup fails after a new hot plug event. SVM works fine
4616 * even without this.
4617 */
4618 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4619 PGMR3ResetCpu(pVM, pVCpu);
4620 PDMR3ResetCpu(pVCpu);
4621 TRPMR3ResetCpu(pVCpu);
4622 CPUMR3ResetCpu(pVM, pVCpu);
4623 EMR3ResetCpu(pVCpu);
4624 HMR3ResetCpu(pVCpu);
4625 NEMR3ResetCpu(pVCpu, false /*fInitIpi*/);
4626 return VINF_EM_WAIT_SIPI;
4627}
4628
4629
4630/**
4631 * Hot-unplugs a CPU from the guest.
4632 *
4633 * @returns VBox status code.
4634 * @param pUVM The user mode VM handle.
4635 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4636 */
4637VMMR3DECL(int) VMR3HotUnplugCpu(PUVM pUVM, VMCPUID idCpu)
4638{
4639 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4640 PVM pVM = pUVM->pVM;
4641 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4642 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4643
4644 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4645 * broadcast requests. Just note down somewhere that the CPU is
4646 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4647 * it out of the EM loops when offline. */
4648 return VMR3ReqCallNoWaitU(pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4649}
4650
4651
4652/**
4653 * Hot-plugs a CPU on the guest.
4654 *
4655 * @returns VBox status code.
4656 * @param pUVM The user mode VM handle.
4657 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4658 */
4659VMMR3DECL(int) VMR3HotPlugCpu(PUVM pUVM, VMCPUID idCpu)
4660{
4661 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4662 PVM pVM = pUVM->pVM;
4663 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4664 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4665
4666 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4667 return VINF_SUCCESS;
4668}
4669
4670
4671/**
4672 * Changes the VMM execution cap.
4673 *
4674 * @returns VBox status code.
4675 * @param pUVM The user mode VM structure.
4676 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4677 * 100 is max performance (default).
4678 */
4679VMMR3DECL(int) VMR3SetCpuExecutionCap(PUVM pUVM, uint32_t uCpuExecutionCap)
4680{
4681 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4682 PVM pVM = pUVM->pVM;
4683 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4684 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4685
4686 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4687 /* Note: not called from EMT. */
4688 pVM->uCpuExecutionCap = uCpuExecutionCap;
4689 return VINF_SUCCESS;
4690}
4691
4692
4693/**
4694 * Control whether the VM should power off when resetting.
4695 *
4696 * @returns VBox status code.
4697 * @param pUVM The user mode VM handle.
4698 * @param fPowerOffInsteadOfReset Flag whether the VM should power off when
4699 * resetting.
4700 */
4701VMMR3DECL(int) VMR3SetPowerOffInsteadOfReset(PUVM pUVM, bool fPowerOffInsteadOfReset)
4702{
4703 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4704 PVM pVM = pUVM->pVM;
4705 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4706
4707 /* Note: not called from EMT. */
4708 pVM->vm.s.fPowerOffInsteadOfReset = fPowerOffInsteadOfReset;
4709 return VINF_SUCCESS;
4710}
4711
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette