VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 93661

Last change on this file since 93661 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 157.9 KB
Line 
1/* $Id: VM.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41
42/*********************************************************************************************************************************
43* Header Files *
44*********************************************************************************************************************************/
45#define LOG_GROUP LOG_GROUP_VM
46#include <VBox/vmm/cfgm.h>
47#include <VBox/vmm/vmm.h>
48#include <VBox/vmm/gvmm.h>
49#include <VBox/vmm/mm.h>
50#include <VBox/vmm/cpum.h>
51#include <VBox/vmm/selm.h>
52#include <VBox/vmm/trpm.h>
53#include <VBox/vmm/dbgf.h>
54#include <VBox/vmm/pgm.h>
55#include <VBox/vmm/pdmapi.h>
56#include <VBox/vmm/pdmdev.h>
57#include <VBox/vmm/pdmcritsect.h>
58#include <VBox/vmm/em.h>
59#include <VBox/vmm/iem.h>
60#include <VBox/vmm/nem.h>
61#include <VBox/vmm/apic.h>
62#include <VBox/vmm/tm.h>
63#include <VBox/vmm/stam.h>
64#include <VBox/vmm/iom.h>
65#include <VBox/vmm/ssm.h>
66#include <VBox/vmm/hm.h>
67#include <VBox/vmm/gim.h>
68#include "VMInternal.h"
69#include <VBox/vmm/vmcc.h>
70
71#include <VBox/sup.h>
72#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
73# include <VBox/VBoxTpG.h>
74#endif
75#include <VBox/dbg.h>
76#include <VBox/err.h>
77#include <VBox/param.h>
78#include <VBox/log.h>
79#include <iprt/assert.h>
80#include <iprt/alloca.h>
81#include <iprt/asm.h>
82#include <iprt/env.h>
83#include <iprt/mem.h>
84#include <iprt/semaphore.h>
85#include <iprt/string.h>
86#ifdef RT_OS_DARWIN
87# include <iprt/system.h>
88#endif
89#include <iprt/time.h>
90#include <iprt/thread.h>
91#include <iprt/uuid.h>
92
93
94/*********************************************************************************************************************************
95* Internal Functions *
96*********************************************************************************************************************************/
97static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
98static DECLCALLBACK(int) vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
99static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus);
100static int vmR3InitRing3(PVM pVM, PUVM pUVM);
101static int vmR3InitRing0(PVM pVM);
102static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
103static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
104static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
105static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
106static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
107static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF);
108static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
109static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...) RT_IPRT_FORMAT_ATTR(6, 7);
110
111
112/**
113 * Creates a virtual machine by calling the supplied configuration constructor.
114 *
115 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
116 * called to start the execution.
117 *
118 * @returns 0 on success.
119 * @returns VBox error code on failure.
120 * @param cCpus Number of virtual CPUs for the new VM.
121 * @param pVmm2UserMethods An optional method table that the VMM can use
122 * to make the user perform various action, like
123 * for instance state saving.
124 * @param pfnVMAtError Pointer to callback function for setting VM
125 * errors. This was added as an implicit call to
126 * VMR3AtErrorRegister() since there is no way the
127 * caller can get to the VM handle early enough to
128 * do this on its own.
129 * This is called in the context of an EMT.
130 * @param pvUserVM The user argument passed to pfnVMAtError.
131 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
132 * This is called in the context of an EMT0.
133 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
134 * @param ppVM Where to optionally store the 'handle' of the
135 * created VM.
136 * @param ppUVM Where to optionally store the user 'handle' of
137 * the created VM, this includes one reference as
138 * if VMR3RetainUVM() was called. The caller
139 * *MUST* remember to pass the returned value to
140 * VMR3ReleaseUVM() once done with the handle.
141 */
142VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
143 PFNVMATERROR pfnVMAtError, void *pvUserVM,
144 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
145 PVM *ppVM, PUVM *ppUVM)
146{
147 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p ppUVM=%p\n",
148 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM, ppUVM));
149
150 if (pVmm2UserMethods)
151 {
152 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
153 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
154 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
155 AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
156 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
157 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
158 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
159 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
160 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff, VERR_INVALID_POINTER);
161 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
162 }
163 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
164 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
165 AssertPtrNullReturn(ppVM, VERR_INVALID_POINTER);
166 AssertPtrNullReturn(ppUVM, VERR_INVALID_POINTER);
167 AssertReturn(ppVM || ppUVM, VERR_INVALID_PARAMETER);
168
169 /*
170 * Validate input.
171 */
172 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
173
174 /*
175 * Create the UVM so we can register the at-error callback
176 * and consolidate a bit of cleanup code.
177 */
178 PUVM pUVM = NULL; /* shuts up gcc */
179 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
180 if (RT_FAILURE(rc))
181 return rc;
182 if (pfnVMAtError)
183 rc = VMR3AtErrorRegister(pUVM, pfnVMAtError, pvUserVM);
184 if (RT_SUCCESS(rc))
185 {
186 /*
187 * Initialize the support library creating the session for this VM.
188 */
189 rc = SUPR3Init(&pUVM->vm.s.pSession);
190 if (RT_SUCCESS(rc))
191 {
192#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
193 /* Now that we've opened the device, we can register trace probes. */
194 static bool s_fRegisteredProbes = false;
195 if (!SUPR3IsDriverless() && ASMAtomicCmpXchgBool(&s_fRegisteredProbes, true, false))
196 SUPR3TracerRegisterModule(~(uintptr_t)0, "VBoxVMM", &g_VTGObjHeader, (uintptr_t)&g_VTGObjHeader,
197 SUP_TRACER_UMOD_FLAGS_SHARED);
198#endif
199
200 /*
201 * Call vmR3CreateU in the EMT thread and wait for it to finish.
202 *
203 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
204 * submitting a request to a specific VCPU without a pVM. So, to make
205 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
206 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
207 */
208 PVMREQ pReq;
209 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
210 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
211 if (RT_SUCCESS(rc))
212 {
213 rc = pReq->iStatus;
214 VMR3ReqFree(pReq);
215 if (RT_SUCCESS(rc))
216 {
217 /*
218 * Success!
219 */
220 if (ppVM)
221 *ppVM = pUVM->pVM;
222 if (ppUVM)
223 {
224 VMR3RetainUVM(pUVM);
225 *ppUVM = pUVM;
226 }
227 LogFlow(("VMR3Create: returns VINF_SUCCESS (pVM=%p, pUVM=%p\n", pUVM->pVM, pUVM));
228 return VINF_SUCCESS;
229 }
230 }
231 else
232 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
233
234 /*
235 * An error occurred during VM creation. Set the error message directly
236 * using the initial callback, as the callback list might not exist yet.
237 */
238 const char *pszError;
239 switch (rc)
240 {
241 case VERR_VMX_IN_VMX_ROOT_MODE:
242#ifdef RT_OS_LINUX
243 pszError = N_("VirtualBox can't operate in VMX root mode. "
244 "Please disable the KVM kernel extension, recompile your kernel and reboot");
245#else
246 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
247#endif
248 break;
249
250#ifndef RT_OS_DARWIN
251 case VERR_HM_CONFIG_MISMATCH:
252 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
253 "This hardware extension is required by the VM configuration");
254 break;
255#endif
256
257 case VERR_SVM_IN_USE:
258#ifdef RT_OS_LINUX
259 pszError = N_("VirtualBox can't enable the AMD-V extension. "
260 "Please disable the KVM kernel extension, recompile your kernel and reboot");
261#else
262 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
263#endif
264 break;
265
266#ifdef RT_OS_LINUX
267 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
268 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
269 "that VirtualBox is correctly installed, and if you are using EFI "
270 "Secure Boot that the modules are signed if necessary in the right "
271 "way for your host system. Then try to recompile and reload the "
272 "kernel modules by executing "
273 "'/sbin/vboxconfig' as root");
274 break;
275#endif
276
277 case VERR_RAW_MODE_INVALID_SMP:
278 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
279 "VirtualBox requires this hardware extension to emulate more than one "
280 "guest CPU");
281 break;
282
283 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
284#ifdef RT_OS_LINUX
285 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
286 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
287 "the VT-x extension in the VM settings. Note that without VT-x you have "
288 "to reduce the number of guest CPUs to one");
289#else
290 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
291 "extension. Either upgrade your kernel or disable the VT-x extension in the "
292 "VM settings. Note that without VT-x you have to reduce the number of guest "
293 "CPUs to one");
294#endif
295 break;
296
297 case VERR_PDM_DEVICE_NOT_FOUND:
298 pszError = N_("A virtual device is configured in the VM settings but the device "
299 "implementation is missing.\n"
300 "A possible reason for this error is a missing extension pack. Note "
301 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
302 "support and remote desktop) are only available from an 'extension "
303 "pack' which must be downloaded and installed separately");
304 break;
305
306 case VERR_PCI_PASSTHROUGH_NO_HM:
307 pszError = N_("PCI passthrough requires VT-x/AMD-V");
308 break;
309
310 case VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING:
311 pszError = N_("PCI passthrough requires nested paging");
312 break;
313
314 default:
315 if (VMR3GetErrorCount(pUVM) == 0)
316 {
317 pszError = (char *)alloca(1024);
318 RTErrQueryMsgFull(rc, (char *)pszError, 1024, false /*fFailIfUnknown*/);
319 }
320 else
321 pszError = NULL; /* already set. */
322 break;
323 }
324 if (pszError)
325 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
326 }
327 else
328 {
329 /*
330 * An error occurred at support library initialization time (before the
331 * VM could be created). Set the error message directly using the
332 * initial callback, as the callback list doesn't exist yet.
333 */
334 const char *pszError;
335 switch (rc)
336 {
337 case VERR_VM_DRIVER_LOAD_ERROR:
338#ifdef RT_OS_LINUX
339 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
340 "was either not loaded, /dev/vboxdrv is not set up properly, "
341 "or you are using EFI Secure Boot and the module is not signed "
342 "in the right way for your system. If necessary, try setting up "
343 "the kernel module again by executing "
344 "'/sbin/vboxconfig' as root");
345#else
346 pszError = N_("VirtualBox kernel driver not loaded");
347#endif
348 break;
349 case VERR_VM_DRIVER_OPEN_ERROR:
350 pszError = N_("VirtualBox kernel driver cannot be opened");
351 break;
352 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
353#ifdef VBOX_WITH_HARDENING
354 /* This should only happen if the executable wasn't hardened - bad code/build. */
355 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
356 "Re-install VirtualBox. If you are building it yourself, you "
357 "should make sure it installed correctly and that the setuid "
358 "bit is set on the executables calling VMR3Create.");
359#else
360 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
361# if defined(RT_OS_DARWIN)
362 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
363 "If you have built VirtualBox yourself, make sure that you do not "
364 "have the vboxdrv KEXT from a different build or installation loaded.");
365# elif defined(RT_OS_LINUX)
366 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
367 "If you have built VirtualBox yourself, make sure that you do "
368 "not have the vboxdrv kernel module from a different build or "
369 "installation loaded. Also, make sure the vboxdrv udev rule gives "
370 "you the permission you need to access the device.");
371# elif defined(RT_OS_WINDOWS)
372 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
373# else /* solaris, freebsd, ++. */
374 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
375 "If you have built VirtualBox yourself, make sure that you do "
376 "not have the vboxdrv kernel module from a different install loaded.");
377# endif
378#endif
379 break;
380 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
381 case VERR_VM_DRIVER_NOT_INSTALLED:
382#ifdef RT_OS_LINUX
383 pszError = N_("VirtualBox kernel driver not Installed. The vboxdrv kernel module "
384 "was either not loaded, /dev/vboxdrv is not set up properly, "
385 "or you are using EFI Secure Boot and the module is not signed "
386 "in the right way for your system. If necessary, try setting up "
387 "the kernel module again by executing "
388 "'/sbin/vboxconfig' as root");
389#else
390 pszError = N_("VirtualBox kernel driver not installed");
391#endif
392 break;
393 case VERR_NO_MEMORY:
394 pszError = N_("VirtualBox support library out of memory");
395 break;
396 case VERR_VERSION_MISMATCH:
397 case VERR_VM_DRIVER_VERSION_MISMATCH:
398 pszError = N_("The VirtualBox support driver which is running is from a different "
399 "version of VirtualBox. You can correct this by stopping all "
400 "running instances of VirtualBox and reinstalling the software.");
401 break;
402 default:
403 pszError = N_("Unknown error initializing kernel driver");
404 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
405 }
406 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
407 }
408 }
409
410 /* cleanup */
411 vmR3DestroyUVM(pUVM, 2000);
412 LogFlow(("VMR3Create: returns %Rrc\n", rc));
413 return rc;
414}
415
416
417/**
418 * Creates the UVM.
419 *
420 * This will not initialize the support library even if vmR3DestroyUVM
421 * will terminate that.
422 *
423 * @returns VBox status code.
424 * @param cCpus Number of virtual CPUs
425 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
426 * table.
427 * @param ppUVM Where to store the UVM pointer.
428 */
429static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
430{
431 uint32_t i;
432
433 /*
434 * Create and initialize the UVM.
435 */
436 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_UOFFSETOF_DYN(UVM, aCpus[cCpus]));
437 AssertReturn(pUVM, VERR_NO_MEMORY);
438 pUVM->u32Magic = UVM_MAGIC;
439 pUVM->cCpus = cCpus;
440 pUVM->pVmm2UserMethods = pVmm2UserMethods;
441
442 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
443
444 pUVM->vm.s.cUvmRefs = 1;
445 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
446 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
447 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
448
449 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
450 RTUuidClear(&pUVM->vm.s.Uuid);
451
452 /* Initialize the VMCPU array in the UVM. */
453 for (i = 0; i < cCpus; i++)
454 {
455 pUVM->aCpus[i].pUVM = pUVM;
456 pUVM->aCpus[i].idCpu = i;
457 }
458
459 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
460 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
461 AssertRC(rc);
462 if (RT_SUCCESS(rc))
463 {
464 /* Allocate a halt method event semaphore for each VCPU. */
465 for (i = 0; i < cCpus; i++)
466 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
467 for (i = 0; i < cCpus; i++)
468 {
469 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
470 if (RT_FAILURE(rc))
471 break;
472 }
473 if (RT_SUCCESS(rc))
474 {
475 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
476 if (RT_SUCCESS(rc))
477 {
478 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
479 if (RT_SUCCESS(rc))
480 {
481 /*
482 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
483 */
484 rc = PDMR3InitUVM(pUVM);
485 if (RT_SUCCESS(rc))
486 {
487 rc = STAMR3InitUVM(pUVM);
488 if (RT_SUCCESS(rc))
489 {
490 rc = MMR3InitUVM(pUVM);
491 if (RT_SUCCESS(rc))
492 {
493 /*
494 * Start the emulation threads for all VMCPUs.
495 */
496 for (i = 0; i < cCpus; i++)
497 {
498 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i],
499 _1M, RTTHREADTYPE_EMULATION,
500 RTTHREADFLAGS_WAITABLE | RTTHREADFLAGS_COM_MTA | RTTHREADFLAGS_NO_SIGNALS,
501 cCpus > 1 ? "EMT-%u" : "EMT", i);
502 if (RT_FAILURE(rc))
503 break;
504
505 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
506 }
507
508 if (RT_SUCCESS(rc))
509 {
510 *ppUVM = pUVM;
511 return VINF_SUCCESS;
512 }
513
514 /* bail out. */
515 while (i-- > 0)
516 {
517 /** @todo rainy day: terminate the EMTs. */
518 }
519 MMR3TermUVM(pUVM);
520 }
521 STAMR3TermUVM(pUVM);
522 }
523 PDMR3TermUVM(pUVM);
524 }
525 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
526 }
527 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
528 }
529 }
530 for (i = 0; i < cCpus; i++)
531 {
532 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
533 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
534 }
535 RTTlsFree(pUVM->vm.s.idxTLS);
536 }
537 RTMemPageFree(pUVM, RT_UOFFSETOF_DYN(UVM, aCpus[pUVM->cCpus]));
538 return rc;
539}
540
541
542/**
543 * Creates and initializes the VM.
544 *
545 * @thread EMT
546 */
547static DECLCALLBACK(int) vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
548{
549#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
550 /*
551 * Require SSE2 to be present (already checked for in supdrv, so we
552 * shouldn't ever really get here).
553 */
554 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
555 {
556 LogRel(("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1)));
557 return VERR_UNSUPPORTED_CPU;
558 }
559#endif
560
561
562 /*
563 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
564 */
565 if (!SUPR3IsDriverless())
566 {
567 int rc = PDMR3LdrLoadVMMR0U(pUVM);
568 if (RT_FAILURE(rc))
569 {
570 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
571 * bird: what about moving the message down here? Main picks the first message, right? */
572 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
573 return rc; /* proper error message set later on */
574 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
575 }
576 }
577
578 /*
579 * Request GVMM to create a new VM for us.
580 */
581 RTR0PTR pVMR0;
582 int rc = GVMMR3CreateVM(pUVM, cCpus, pUVM->vm.s.pSession, &pUVM->pVM, &pVMR0);
583 if (RT_SUCCESS(rc))
584 {
585 PVM pVM = pUVM->pVM;
586 AssertRelease(RT_VALID_PTR(pVM));
587 AssertRelease(pVM->pVMR0ForCall == pVMR0);
588 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
589 AssertRelease(pVM->cCpus == cCpus);
590 AssertRelease(pVM->uCpuExecutionCap == 100);
591 AssertCompileMemberAlignment(VM, cpum, 64);
592 AssertCompileMemberAlignment(VM, tm, 64);
593
594 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n", pUVM, pVM, pVMR0, pVM->hSelf, pVM->cCpus));
595
596 /*
597 * Initialize the VM structure and our internal data (VMINT).
598 */
599 pVM->pUVM = pUVM;
600
601 for (VMCPUID i = 0; i < pVM->cCpus; i++)
602 {
603 PVMCPU pVCpu = pVM->apCpusR3[i];
604 pVCpu->pUVCpu = &pUVM->aCpus[i];
605 pVCpu->idCpu = i;
606 pVCpu->hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
607 pVCpu->hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
608 Assert(pVCpu->hNativeThread != NIL_RTNATIVETHREAD);
609 /* hNativeThreadR0 is initialized on EMT registration. */
610 pUVM->aCpus[i].pVCpu = pVCpu;
611 pUVM->aCpus[i].pVM = pVM;
612 }
613
614 /*
615 * Init the configuration.
616 */
617 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
618 if (RT_SUCCESS(rc))
619 {
620 rc = vmR3ReadBaseConfig(pVM, pUVM, cCpus);
621 if (RT_SUCCESS(rc))
622 {
623 /*
624 * Init the ring-3 components and ring-3 per cpu data, finishing it off
625 * by a relocation round (intermediate context finalization will do this).
626 */
627 rc = vmR3InitRing3(pVM, pUVM);
628 if (RT_SUCCESS(rc))
629 {
630 LogFlow(("Ring-3 init succeeded\n"));
631
632 /*
633 * Init the Ring-0 components.
634 */
635 rc = vmR3InitRing0(pVM);
636 if (RT_SUCCESS(rc))
637 {
638 /* Relocate again, because some switcher fixups depends on R0 init results. */
639 VMR3Relocate(pVM, 0 /* offDelta */);
640
641#ifdef VBOX_WITH_DEBUGGER
642 /*
643 * Init the tcp debugger console if we're building
644 * with debugger support.
645 */
646 void *pvUser = NULL;
647 rc = DBGCIoCreate(pUVM, &pvUser);
648 if ( RT_SUCCESS(rc)
649 || rc == VERR_NET_ADDRESS_IN_USE)
650 {
651 pUVM->vm.s.pvDBGC = pvUser;
652#endif
653 /*
654 * Now we can safely set the VM halt method to default.
655 */
656 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
657 if (RT_SUCCESS(rc))
658 {
659 /*
660 * Set the state and we're done.
661 */
662 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
663 return VINF_SUCCESS;
664 }
665#ifdef VBOX_WITH_DEBUGGER
666 DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC);
667 pUVM->vm.s.pvDBGC = NULL;
668 }
669#endif
670 //..
671 }
672 vmR3Destroy(pVM);
673 }
674 }
675 //..
676
677 /* Clean CFGM. */
678 int rc2 = CFGMR3Term(pVM);
679 AssertRC(rc2);
680 }
681
682 /*
683 * Do automatic cleanups while the VM structure is still alive and all
684 * references to it are still working.
685 */
686 PDMR3CritSectBothTerm(pVM);
687
688 /*
689 * Drop all references to VM and the VMCPU structures, then
690 * tell GVMM to destroy the VM.
691 */
692 pUVM->pVM = NULL;
693 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
694 {
695 pUVM->aCpus[i].pVM = NULL;
696 pUVM->aCpus[i].pVCpu = NULL;
697 }
698 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
699
700 if (pUVM->cCpus > 1)
701 {
702 /* Poke the other EMTs since they may have stale pVM and pVCpu references
703 on the stack (see VMR3WaitU for instance) if they've been awakened after
704 VM creation. */
705 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
706 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
707 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
708 }
709
710 int rc2 = GVMMR3DestroyVM(pUVM, pVM);
711 AssertRC(rc2);
712 }
713 else
714 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
715
716 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
717 return rc;
718}
719
720
721/**
722 * Reads the base configuation from CFGM.
723 *
724 * @returns VBox status code.
725 * @param pVM The cross context VM structure.
726 * @param pUVM The user mode VM structure.
727 * @param cCpus The CPU count given to VMR3Create.
728 */
729static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus)
730{
731 int rc;
732 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
733
734 /*
735 * Base EM and HM config properties.
736 */
737 pVM->fHMEnabled = true;
738
739 /*
740 * Make sure the CPU count in the config data matches.
741 */
742 uint32_t cCPUsCfg;
743 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
744 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc), rc);
745 AssertLogRelMsgReturn(cCPUsCfg == cCpus,
746 ("Configuration error: \"NumCPUs\"=%RU32 and VMR3Create::cCpus=%RU32 does not match!\n",
747 cCPUsCfg, cCpus),
748 VERR_INVALID_PARAMETER);
749
750 /*
751 * Get the CPU execution cap.
752 */
753 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
754 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc), rc);
755
756 /*
757 * Get the VM name and UUID.
758 */
759 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
760 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc), rc);
761
762 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
763 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
764 rc = VINF_SUCCESS;
765 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc), rc);
766
767 rc = CFGMR3QueryBoolDef(pRoot, "PowerOffInsteadOfReset", &pVM->vm.s.fPowerOffInsteadOfReset, false);
768 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"PowerOffInsteadOfReset\" failed, rc=%Rrc\n", rc), rc);
769
770 return VINF_SUCCESS;
771}
772
773
774/**
775 * Initializes all R3 components of the VM
776 */
777static int vmR3InitRing3(PVM pVM, PUVM pUVM)
778{
779 int rc;
780
781 /*
782 * Register the other EMTs with GVM.
783 */
784 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
785 {
786 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)GVMMR3RegisterVCpu, 2, pVM, idCpu);
787 if (RT_FAILURE(rc))
788 return rc;
789 }
790
791 /*
792 * Register statistics.
793 */
794 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
795 {
796 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/CPU%d/VM/Halt/Yield", idCpu);
797 AssertRC(rc);
798 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/CPU%d/VM/Halt/Block", idCpu);
799 AssertRC(rc);
800 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/CPU%d/VM/Halt/BlockOverslept", idCpu);
801 AssertRC(rc);
802 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/CPU%d/VM/Halt/BlockInsomnia", idCpu);
803 AssertRC(rc);
804 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/CPU%d/VM/Halt/BlockOnTime", idCpu);
805 AssertRC(rc);
806 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/CPU%d/VM/Halt/Timers", idCpu);
807 AssertRC(rc);
808 }
809
810 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
811 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
812 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
813 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
814 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
815 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
816 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
817 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
818
819 /* Statistics for ring-0 components: */
820 STAM_REL_REG(pVM, &pVM->R0Stats.gmm.cChunkTlbHits, STAMTYPE_COUNTER, "/GMM/ChunkTlbHits", STAMUNIT_OCCURENCES, "GMMR0PageIdToVirt chunk TBL hits");
821 STAM_REL_REG(pVM, &pVM->R0Stats.gmm.cChunkTlbMisses, STAMTYPE_COUNTER, "/GMM/ChunkTlbMisses", STAMUNIT_OCCURENCES, "GMMR0PageIdToVirt chunk TBL misses");
822
823 /*
824 * Init all R3 components, the order here might be important.
825 * NEM and HM shall be initialized first!
826 */
827 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NOT_SET);
828 rc = NEMR3InitConfig(pVM);
829 if (RT_SUCCESS(rc))
830 rc = HMR3Init(pVM);
831 if (RT_SUCCESS(rc))
832 {
833 ASMCompilerBarrier(); /* HMR3Init will have modified bMainExecutionEngine */
834 Assert( pVM->bMainExecutionEngine == VM_EXEC_ENGINE_HW_VIRT
835 || pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API);
836 rc = MMR3Init(pVM);
837 if (RT_SUCCESS(rc))
838 {
839 rc = CPUMR3Init(pVM);
840 if (RT_SUCCESS(rc))
841 {
842 rc = NEMR3InitAfterCPUM(pVM);
843 if (RT_SUCCESS(rc))
844 rc = PGMR3Init(pVM);
845 if (RT_SUCCESS(rc))
846 {
847 rc = MMR3InitPaging(pVM);
848 if (RT_SUCCESS(rc))
849 rc = TMR3Init(pVM);
850 if (RT_SUCCESS(rc))
851 {
852 rc = VMMR3Init(pVM);
853 if (RT_SUCCESS(rc))
854 {
855 rc = SELMR3Init(pVM);
856 if (RT_SUCCESS(rc))
857 {
858 rc = TRPMR3Init(pVM);
859 if (RT_SUCCESS(rc))
860 {
861 rc = SSMR3RegisterStub(pVM, "CSAM", 0);
862 if (RT_SUCCESS(rc))
863 {
864 rc = SSMR3RegisterStub(pVM, "PATM", 0);
865 if (RT_SUCCESS(rc))
866 {
867 rc = IOMR3Init(pVM);
868 if (RT_SUCCESS(rc))
869 {
870 rc = EMR3Init(pVM);
871 if (RT_SUCCESS(rc))
872 {
873 rc = IEMR3Init(pVM);
874 if (RT_SUCCESS(rc))
875 {
876 rc = DBGFR3Init(pVM);
877 if (RT_SUCCESS(rc))
878 {
879 /* GIM must be init'd before PDM, gimdevR3Construct()
880 requires GIM provider to be setup. */
881 rc = GIMR3Init(pVM);
882 if (RT_SUCCESS(rc))
883 {
884 rc = PDMR3Init(pVM);
885 if (RT_SUCCESS(rc))
886 {
887 rc = MMR3HyperInitFinalize(pVM);
888 if (RT_SUCCESS(rc))
889 rc = PGMR3InitFinalize(pVM);
890 if (RT_SUCCESS(rc))
891 rc = TMR3InitFinalize(pVM);
892 if (RT_SUCCESS(rc))
893 {
894 PGMR3MemSetup(pVM, false /*fAtReset*/);
895 PDMR3MemSetup(pVM, false /*fAtReset*/);
896 }
897 if (RT_SUCCESS(rc))
898 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
899 if (RT_SUCCESS(rc))
900 {
901 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
902 return VINF_SUCCESS;
903 }
904
905 int rc2 = PDMR3Term(pVM);
906 AssertRC(rc2);
907 }
908 int rc2 = GIMR3Term(pVM);
909 AssertRC(rc2);
910 }
911 int rc2 = DBGFR3Term(pVM);
912 AssertRC(rc2);
913 }
914 int rc2 = IEMR3Term(pVM);
915 AssertRC(rc2);
916 }
917 int rc2 = EMR3Term(pVM);
918 AssertRC(rc2);
919 }
920 int rc2 = IOMR3Term(pVM);
921 AssertRC(rc2);
922 }
923 }
924 }
925 int rc2 = TRPMR3Term(pVM);
926 AssertRC(rc2);
927 }
928 int rc2 = SELMR3Term(pVM);
929 AssertRC(rc2);
930 }
931 int rc2 = VMMR3Term(pVM);
932 AssertRC(rc2);
933 }
934 int rc2 = TMR3Term(pVM);
935 AssertRC(rc2);
936 }
937 int rc2 = PGMR3Term(pVM);
938 AssertRC(rc2);
939 }
940 //int rc2 = CPUMR3Term(pVM);
941 //AssertRC(rc2);
942 }
943 /* MMR3Term is not called here because it'll kill the heap. */
944 }
945 int rc2 = HMR3Term(pVM);
946 AssertRC(rc2);
947 }
948 NEMR3Term(pVM);
949
950 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
951 return rc;
952}
953
954
955/**
956 * Initializes all R0 components of the VM.
957 */
958static int vmR3InitRing0(PVM pVM)
959{
960 LogFlow(("vmR3InitRing0:\n"));
961
962 /*
963 * Check for FAKE suplib mode.
964 */
965 int rc = VINF_SUCCESS;
966 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
967 if (!psz || strcmp(psz, "fake"))
968 {
969 /*
970 * Call the VMMR0 component and let it do the init.
971 */
972 rc = VMMR3InitR0(pVM);
973 }
974 else
975 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
976
977 /*
978 * Do notifications and return.
979 */
980 if (RT_SUCCESS(rc))
981 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
982 if (RT_SUCCESS(rc))
983 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HM);
984
985 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
986 return rc;
987}
988
989
990/**
991 * Do init completed notifications.
992 *
993 * @returns VBox status code.
994 * @param pVM The cross context VM structure.
995 * @param enmWhat What's completed.
996 */
997static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
998{
999 int rc = VMMR3InitCompleted(pVM, enmWhat);
1000 if (RT_SUCCESS(rc))
1001 rc = HMR3InitCompleted(pVM, enmWhat);
1002 if (RT_SUCCESS(rc))
1003 rc = NEMR3InitCompleted(pVM, enmWhat);
1004 if (RT_SUCCESS(rc))
1005 rc = PGMR3InitCompleted(pVM, enmWhat);
1006 if (RT_SUCCESS(rc))
1007 rc = CPUMR3InitCompleted(pVM, enmWhat);
1008 if (RT_SUCCESS(rc))
1009 rc = EMR3InitCompleted(pVM, enmWhat);
1010 if (enmWhat == VMINITCOMPLETED_RING3)
1011 {
1012 if (RT_SUCCESS(rc))
1013 rc = SSMR3RegisterStub(pVM, "rem", 1);
1014 }
1015 if (RT_SUCCESS(rc))
1016 rc = PDMR3InitCompleted(pVM, enmWhat);
1017
1018 /* IOM *must* come after PDM, as device (DevPcArch) may register some final
1019 handlers in their init completion method. */
1020 if (RT_SUCCESS(rc))
1021 rc = IOMR3InitCompleted(pVM, enmWhat);
1022 return rc;
1023}
1024
1025
1026/**
1027 * Calls the relocation functions for all VMM components so they can update
1028 * any GC pointers. When this function is called all the basic VM members
1029 * have been updated and the actual memory relocation have been done
1030 * by the PGM/MM.
1031 *
1032 * This is used both on init and on runtime relocations.
1033 *
1034 * @param pVM The cross context VM structure.
1035 * @param offDelta Relocation delta relative to old location.
1036 */
1037VMMR3_INT_DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1038{
1039 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1040
1041 /*
1042 * The order here is very important!
1043 */
1044 PGMR3Relocate(pVM, offDelta);
1045 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1046 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1047 CPUMR3Relocate(pVM);
1048 HMR3Relocate(pVM);
1049 SELMR3Relocate(pVM);
1050 VMMR3Relocate(pVM, offDelta);
1051 SELMR3Relocate(pVM); /* !hack! fix stack! */
1052 TRPMR3Relocate(pVM, offDelta);
1053 IOMR3Relocate(pVM, offDelta);
1054 EMR3Relocate(pVM);
1055 TMR3Relocate(pVM, offDelta);
1056 IEMR3Relocate(pVM);
1057 DBGFR3Relocate(pVM, offDelta);
1058 PDMR3Relocate(pVM, offDelta);
1059 GIMR3Relocate(pVM, offDelta);
1060}
1061
1062
1063/**
1064 * EMT rendezvous worker for VMR3PowerOn.
1065 *
1066 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1067 * code, see FNVMMEMTRENDEZVOUS.)
1068 *
1069 * @param pVM The cross context VM structure.
1070 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1071 * @param pvUser Ignored.
1072 */
1073static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1074{
1075 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1076 Assert(!pvUser); NOREF(pvUser);
1077
1078 /*
1079 * The first thread thru here tries to change the state. We shouldn't be
1080 * called again if this fails.
1081 */
1082 if (pVCpu->idCpu == pVM->cCpus - 1)
1083 {
1084 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1085 if (RT_FAILURE(rc))
1086 return rc;
1087 }
1088
1089 VMSTATE enmVMState = VMR3GetState(pVM);
1090 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1091 ("%s\n", VMR3GetStateName(enmVMState)),
1092 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1093
1094 /*
1095 * All EMTs changes their state to started.
1096 */
1097 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1098
1099 /*
1100 * EMT(0) is last thru here and it will make the notification calls
1101 * and advance the state.
1102 */
1103 if (pVCpu->idCpu == 0)
1104 {
1105 PDMR3PowerOn(pVM);
1106 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1107 }
1108
1109 return VINF_SUCCESS;
1110}
1111
1112
1113/**
1114 * Powers on the virtual machine.
1115 *
1116 * @returns VBox status code.
1117 *
1118 * @param pUVM The VM to power on.
1119 *
1120 * @thread Any thread.
1121 * @vmstate Created
1122 * @vmstateto PoweringOn+Running
1123 */
1124VMMR3DECL(int) VMR3PowerOn(PUVM pUVM)
1125{
1126 LogFlow(("VMR3PowerOn: pUVM=%p\n", pUVM));
1127 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1128 PVM pVM = pUVM->pVM;
1129 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1130
1131 /*
1132 * Gather all the EMTs to reduce the init TSC drift and keep
1133 * the state changing APIs a bit uniform.
1134 */
1135 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1136 vmR3PowerOn, NULL);
1137 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1138 return rc;
1139}
1140
1141
1142/**
1143 * Does the suspend notifications.
1144 *
1145 * @param pVM The cross context VM structure.
1146 * @thread EMT(0)
1147 */
1148static void vmR3SuspendDoWork(PVM pVM)
1149{
1150 PDMR3Suspend(pVM);
1151}
1152
1153
1154/**
1155 * EMT rendezvous worker for VMR3Suspend.
1156 *
1157 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1158 * return code, see FNVMMEMTRENDEZVOUS.)
1159 *
1160 * @param pVM The cross context VM structure.
1161 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1162 * @param pvUser Ignored.
1163 */
1164static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1165{
1166 VMSUSPENDREASON enmReason = (VMSUSPENDREASON)(uintptr_t)pvUser;
1167 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
1168
1169 /*
1170 * The first EMT switches the state to suspending. If this fails because
1171 * something was racing us in one way or the other, there will be no more
1172 * calls and thus the state assertion below is not going to annoy anyone.
1173 */
1174 if (pVCpu->idCpu == pVM->cCpus - 1)
1175 {
1176 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1177 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1178 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1179 if (RT_FAILURE(rc))
1180 return rc;
1181 pVM->pUVM->vm.s.enmSuspendReason = enmReason;
1182 }
1183
1184 VMSTATE enmVMState = VMR3GetState(pVM);
1185 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1186 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1187 ("%s\n", VMR3GetStateName(enmVMState)),
1188 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1189
1190 /*
1191 * EMT(0) does the actually suspending *after* all the other CPUs have
1192 * been thru here.
1193 */
1194 if (pVCpu->idCpu == 0)
1195 {
1196 vmR3SuspendDoWork(pVM);
1197
1198 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1199 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1200 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1201 if (RT_FAILURE(rc))
1202 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1203 }
1204
1205 return VINF_EM_SUSPEND;
1206}
1207
1208
1209/**
1210 * Suspends a running VM.
1211 *
1212 * @returns VBox status code. When called on EMT, this will be a strict status
1213 * code that has to be propagated up the call stack.
1214 *
1215 * @param pUVM The VM to suspend.
1216 * @param enmReason The reason for suspending.
1217 *
1218 * @thread Any thread.
1219 * @vmstate Running or RunningLS
1220 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1221 */
1222VMMR3DECL(int) VMR3Suspend(PUVM pUVM, VMSUSPENDREASON enmReason)
1223{
1224 LogFlow(("VMR3Suspend: pUVM=%p\n", pUVM));
1225 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1226 AssertReturn(enmReason > VMSUSPENDREASON_INVALID && enmReason < VMSUSPENDREASON_END, VERR_INVALID_PARAMETER);
1227
1228 /*
1229 * Gather all the EMTs to make sure there are no races before
1230 * changing the VM state.
1231 */
1232 int rc = VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1233 vmR3Suspend, (void *)(uintptr_t)enmReason);
1234 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1235 return rc;
1236}
1237
1238
1239/**
1240 * Retrieves the reason for the most recent suspend.
1241 *
1242 * @returns Suspend reason. VMSUSPENDREASON_INVALID if no suspend has been done
1243 * or the handle is invalid.
1244 * @param pUVM The user mode VM handle.
1245 */
1246VMMR3DECL(VMSUSPENDREASON) VMR3GetSuspendReason(PUVM pUVM)
1247{
1248 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSUSPENDREASON_INVALID);
1249 return pUVM->vm.s.enmSuspendReason;
1250}
1251
1252
1253/**
1254 * EMT rendezvous worker for VMR3Resume.
1255 *
1256 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1257 * return code, see FNVMMEMTRENDEZVOUS.)
1258 *
1259 * @param pVM The cross context VM structure.
1260 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1261 * @param pvUser Reason.
1262 */
1263static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1264{
1265 VMRESUMEREASON enmReason = (VMRESUMEREASON)(uintptr_t)pvUser;
1266 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
1267
1268 /*
1269 * The first thread thru here tries to change the state. We shouldn't be
1270 * called again if this fails.
1271 */
1272 if (pVCpu->idCpu == pVM->cCpus - 1)
1273 {
1274 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1275 if (RT_FAILURE(rc))
1276 return rc;
1277 pVM->pUVM->vm.s.enmResumeReason = enmReason;
1278 }
1279
1280 VMSTATE enmVMState = VMR3GetState(pVM);
1281 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1282 ("%s\n", VMR3GetStateName(enmVMState)),
1283 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1284
1285#if 0
1286 /*
1287 * All EMTs changes their state to started.
1288 */
1289 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1290#endif
1291
1292 /*
1293 * EMT(0) is last thru here and it will make the notification calls
1294 * and advance the state.
1295 */
1296 if (pVCpu->idCpu == 0)
1297 {
1298 PDMR3Resume(pVM);
1299 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1300 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1301 }
1302
1303 return VINF_EM_RESUME;
1304}
1305
1306
1307/**
1308 * Resume VM execution.
1309 *
1310 * @returns VBox status code. When called on EMT, this will be a strict status
1311 * code that has to be propagated up the call stack.
1312 *
1313 * @param pUVM The user mode VM handle.
1314 * @param enmReason The reason we're resuming.
1315 *
1316 * @thread Any thread.
1317 * @vmstate Suspended
1318 * @vmstateto Running
1319 */
1320VMMR3DECL(int) VMR3Resume(PUVM pUVM, VMRESUMEREASON enmReason)
1321{
1322 LogFlow(("VMR3Resume: pUVM=%p\n", pUVM));
1323 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1324 PVM pVM = pUVM->pVM;
1325 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1326 AssertReturn(enmReason > VMRESUMEREASON_INVALID && enmReason < VMRESUMEREASON_END, VERR_INVALID_PARAMETER);
1327
1328 /*
1329 * Gather all the EMTs to make sure there are no races before
1330 * changing the VM state.
1331 */
1332 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1333 vmR3Resume, (void *)(uintptr_t)enmReason);
1334 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1335 return rc;
1336}
1337
1338
1339/**
1340 * Retrieves the reason for the most recent resume.
1341 *
1342 * @returns Resume reason. VMRESUMEREASON_INVALID if no suspend has been
1343 * done or the handle is invalid.
1344 * @param pUVM The user mode VM handle.
1345 */
1346VMMR3DECL(VMRESUMEREASON) VMR3GetResumeReason(PUVM pUVM)
1347{
1348 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMRESUMEREASON_INVALID);
1349 return pUVM->vm.s.enmResumeReason;
1350}
1351
1352
1353/**
1354 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1355 * after the live step has been completed.
1356 *
1357 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1358 * return code, see FNVMMEMTRENDEZVOUS.)
1359 *
1360 * @param pVM The cross context VM structure.
1361 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1362 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1363 */
1364static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1365{
1366 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1367 bool *pfSuspended = (bool *)pvUser;
1368
1369 /*
1370 * The first thread thru here tries to change the state. We shouldn't be
1371 * called again if this fails.
1372 */
1373 if (pVCpu->idCpu == pVM->cCpus - 1U)
1374 {
1375 PUVM pUVM = pVM->pUVM;
1376 int rc;
1377
1378 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1379 VMSTATE enmVMState = pVM->enmVMState;
1380 switch (enmVMState)
1381 {
1382 case VMSTATE_RUNNING_LS:
1383 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS, false /*fSetRatherThanClearFF*/);
1384 rc = VINF_SUCCESS;
1385 break;
1386
1387 case VMSTATE_SUSPENDED_EXT_LS:
1388 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1389 rc = VINF_SUCCESS;
1390 break;
1391
1392 case VMSTATE_DEBUGGING_LS:
1393 rc = VERR_TRY_AGAIN;
1394 break;
1395
1396 case VMSTATE_OFF_LS:
1397 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS, false /*fSetRatherThanClearFF*/);
1398 rc = VERR_SSM_LIVE_POWERED_OFF;
1399 break;
1400
1401 case VMSTATE_FATAL_ERROR_LS:
1402 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, false /*fSetRatherThanClearFF*/);
1403 rc = VERR_SSM_LIVE_FATAL_ERROR;
1404 break;
1405
1406 case VMSTATE_GURU_MEDITATION_LS:
1407 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, false /*fSetRatherThanClearFF*/);
1408 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1409 break;
1410
1411 case VMSTATE_POWERING_OFF_LS:
1412 case VMSTATE_SUSPENDING_EXT_LS:
1413 case VMSTATE_RESETTING_LS:
1414 default:
1415 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1416 rc = VERR_VM_UNEXPECTED_VM_STATE;
1417 break;
1418 }
1419 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1420 if (RT_FAILURE(rc))
1421 {
1422 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1423 return rc;
1424 }
1425 }
1426
1427 VMSTATE enmVMState = VMR3GetState(pVM);
1428 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1429 ("%s\n", VMR3GetStateName(enmVMState)),
1430 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1431
1432 /*
1433 * Only EMT(0) have work to do since it's last thru here.
1434 */
1435 if (pVCpu->idCpu == 0)
1436 {
1437 vmR3SuspendDoWork(pVM);
1438 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1439 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1440 if (RT_FAILURE(rc))
1441 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1442
1443 *pfSuspended = true;
1444 }
1445
1446 return VINF_EM_SUSPEND;
1447}
1448
1449
1450/**
1451 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1452 * SSMR3LiveDoStep1 failure.
1453 *
1454 * Doing this as a rendezvous operation avoids all annoying transition
1455 * states.
1456 *
1457 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1458 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1459 *
1460 * @param pVM The cross context VM structure.
1461 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1462 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1463 */
1464static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1465{
1466 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1467 bool *pfSuspended = (bool *)pvUser;
1468 NOREF(pVCpu);
1469
1470 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1471 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1472 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1473 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1474 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1475 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1476 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1477 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1478 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1479 if (rc == 1)
1480 rc = VERR_SSM_LIVE_POWERED_OFF;
1481 else if (rc == 2)
1482 rc = VERR_SSM_LIVE_FATAL_ERROR;
1483 else if (rc == 3)
1484 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1485 else if (rc == 4)
1486 {
1487 *pfSuspended = true;
1488 rc = VINF_SUCCESS;
1489 }
1490 else if (rc > 0)
1491 rc = VINF_SUCCESS;
1492 return rc;
1493}
1494
1495
1496/**
1497 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1498 *
1499 * @returns VBox status code.
1500 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1501 *
1502 * @param pVM The cross context VM structure.
1503 * @param pSSM The handle of saved state operation.
1504 *
1505 * @thread EMT(0)
1506 */
1507static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1508{
1509 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1510 VM_ASSERT_EMT0(pVM);
1511
1512 /*
1513 * Advance the state and mark if VMR3Suspend was called.
1514 */
1515 int rc = VINF_SUCCESS;
1516 VMSTATE enmVMState = VMR3GetState(pVM);
1517 if (enmVMState == VMSTATE_SUSPENDED_LS)
1518 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1519 else
1520 {
1521 if (enmVMState != VMSTATE_SAVING)
1522 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1523 rc = VINF_SSM_LIVE_SUSPENDED;
1524 }
1525
1526 /*
1527 * Finish up and release the handle. Careful with the status codes.
1528 */
1529 int rc2 = SSMR3LiveDoStep2(pSSM);
1530 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1531 rc = rc2;
1532
1533 rc2 = SSMR3LiveDone(pSSM);
1534 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1535 rc = rc2;
1536
1537 /*
1538 * Advance to the final state and return.
1539 */
1540 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1541 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1542 return rc;
1543}
1544
1545
1546/**
1547 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1548 * SSMR3LiveSave.
1549 *
1550 * @returns VBox status code.
1551 *
1552 * @param pVM The cross context VM structure.
1553 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1554 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1555 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1556 * @param pvStreamOpsUser The user argument to the stream methods.
1557 * @param enmAfter What to do afterwards.
1558 * @param pfnProgress Progress callback. Optional.
1559 * @param pvProgressUser User argument for the progress callback.
1560 * @param ppSSM Where to return the saved state handle in case of a
1561 * live snapshot scenario.
1562 *
1563 * @thread EMT
1564 */
1565static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1566 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM)
1567{
1568 int rc = VINF_SUCCESS;
1569
1570 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1571 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1572
1573 /*
1574 * Validate input.
1575 */
1576 AssertPtrNull(pszFilename);
1577 AssertPtrNull(pStreamOps);
1578 AssertPtr(pVM);
1579 Assert( enmAfter == SSMAFTER_DESTROY
1580 || enmAfter == SSMAFTER_CONTINUE
1581 || enmAfter == SSMAFTER_TELEPORT);
1582 AssertPtr(ppSSM);
1583 *ppSSM = NULL;
1584
1585 /*
1586 * Change the state and perform/start the saving.
1587 */
1588 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1589 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1590 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1591 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1592 {
1593 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1594 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1595 }
1596 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1597 {
1598 if (enmAfter == SSMAFTER_TELEPORT)
1599 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1600 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1601 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1602 /* (We're not subject to cancellation just yet.) */
1603 }
1604 else
1605 Assert(RT_FAILURE(rc));
1606 return rc;
1607}
1608
1609
1610/**
1611 * Common worker for VMR3Save and VMR3Teleport.
1612 *
1613 * @returns VBox status code.
1614 *
1615 * @param pVM The cross context VM structure.
1616 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1617 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1618 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1619 * @param pvStreamOpsUser The user argument to the stream methods.
1620 * @param enmAfter What to do afterwards.
1621 * @param pfnProgress Progress callback. Optional.
1622 * @param pvProgressUser User argument for the progress callback.
1623 * @param pfSuspended Set if we suspended the VM.
1624 *
1625 * @thread Non-EMT
1626 */
1627static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1628 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1629 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1630{
1631 /*
1632 * Request the operation in EMT(0).
1633 */
1634 PSSMHANDLE pSSM;
1635 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/,
1636 (PFNRT)vmR3Save, 9, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1637 enmAfter, pfnProgress, pvProgressUser, &pSSM);
1638 if ( RT_SUCCESS(rc)
1639 && pSSM)
1640 {
1641 /*
1642 * Live snapshot.
1643 *
1644 * The state handling here is kind of tricky, doing it on EMT(0) helps
1645 * a bit. See the VMSTATE diagram for details.
1646 */
1647 rc = SSMR3LiveDoStep1(pSSM);
1648 if (RT_SUCCESS(rc))
1649 {
1650 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1651 for (;;)
1652 {
1653 /* Try suspend the VM. */
1654 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1655 vmR3LiveDoSuspend, pfSuspended);
1656 if (rc != VERR_TRY_AGAIN)
1657 break;
1658
1659 /* Wait for the state to change. */
1660 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1661 }
1662 if (RT_SUCCESS(rc))
1663 rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1664 else
1665 {
1666 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1667 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); NOREF(rc2);
1668 }
1669 }
1670 else
1671 {
1672 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1673 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1674
1675 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1676 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1677 rc = rc2;
1678 }
1679 }
1680
1681 return rc;
1682}
1683
1684
1685/**
1686 * Save current VM state.
1687 *
1688 * Can be used for both saving the state and creating snapshots.
1689 *
1690 * When called for a VM in the Running state, the saved state is created live
1691 * and the VM is only suspended when the final part of the saving is preformed.
1692 * The VM state will not be restored to Running in this case and it's up to the
1693 * caller to call VMR3Resume if this is desirable. (The rational is that the
1694 * caller probably wish to reconfigure the disks before resuming the VM.)
1695 *
1696 * @returns VBox status code.
1697 *
1698 * @param pUVM The VM which state should be saved.
1699 * @param pszFilename The name of the save state file.
1700 * @param fContinueAfterwards Whether continue execution afterwards or not.
1701 * When in doubt, set this to true.
1702 * @param pfnProgress Progress callback. Optional.
1703 * @param pvUser User argument for the progress callback.
1704 * @param pfSuspended Set if we suspended the VM.
1705 *
1706 * @thread Non-EMT.
1707 * @vmstate Suspended or Running
1708 * @vmstateto Saving+Suspended or
1709 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1710 */
1711VMMR3DECL(int) VMR3Save(PUVM pUVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser,
1712 bool *pfSuspended)
1713{
1714 LogFlow(("VMR3Save: pUVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1715 pUVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1716
1717 /*
1718 * Validate input.
1719 */
1720 AssertPtr(pfSuspended);
1721 *pfSuspended = false;
1722 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1723 PVM pVM = pUVM->pVM;
1724 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1725 VM_ASSERT_OTHER_THREAD(pVM);
1726 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1727 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1728 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1729
1730 /*
1731 * Join paths with VMR3Teleport.
1732 */
1733 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1734 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1735 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1736 enmAfter, pfnProgress, pvUser, pfSuspended);
1737 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1738 return rc;
1739}
1740
1741
1742/**
1743 * Teleport the VM (aka live migration).
1744 *
1745 * @returns VBox status code.
1746 *
1747 * @param pUVM The VM which state should be saved.
1748 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1749 * @param pStreamOps The stream methods.
1750 * @param pvStreamOpsUser The user argument to the stream methods.
1751 * @param pfnProgress Progress callback. Optional.
1752 * @param pvProgressUser User argument for the progress callback.
1753 * @param pfSuspended Set if we suspended the VM.
1754 *
1755 * @thread Non-EMT.
1756 * @vmstate Suspended or Running
1757 * @vmstateto Saving+Suspended or
1758 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1759 */
1760VMMR3DECL(int) VMR3Teleport(PUVM pUVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1761 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
1762{
1763 LogFlow(("VMR3Teleport: pUVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
1764 pUVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1765
1766 /*
1767 * Validate input.
1768 */
1769 AssertPtr(pfSuspended);
1770 *pfSuspended = false;
1771 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1772 PVM pVM = pUVM->pVM;
1773 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1774 VM_ASSERT_OTHER_THREAD(pVM);
1775 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1776 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1777
1778 /*
1779 * Join paths with VMR3Save.
1780 */
1781 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime, NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
1782 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended);
1783 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1784 return rc;
1785}
1786
1787
1788
1789/**
1790 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
1791 *
1792 * @returns VBox status code.
1793 *
1794 * @param pUVM Pointer to the VM.
1795 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1796 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1797 * @param pvStreamOpsUser The user argument to the stream methods.
1798 * @param pfnProgress Progress callback. Optional.
1799 * @param pvProgressUser User argument for the progress callback.
1800 * @param fTeleporting Indicates whether we're teleporting or not.
1801 *
1802 * @thread EMT.
1803 */
1804static DECLCALLBACK(int) vmR3Load(PUVM pUVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1805 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting)
1806{
1807 LogFlow(("vmR3Load: pUVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
1808 pUVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
1809
1810 /*
1811 * Validate input (paranoia).
1812 */
1813 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1814 PVM pVM = pUVM->pVM;
1815 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1816 AssertPtrNull(pszFilename);
1817 AssertPtrNull(pStreamOps);
1818 AssertPtrNull(pfnProgress);
1819
1820 /*
1821 * Change the state and perform the load.
1822 *
1823 * Always perform a relocation round afterwards to make sure hypervisor
1824 * selectors and such are correct.
1825 */
1826 int rc = vmR3TrySetState(pVM, "VMR3Load", 2,
1827 VMSTATE_LOADING, VMSTATE_CREATED,
1828 VMSTATE_LOADING, VMSTATE_SUSPENDED);
1829 if (RT_FAILURE(rc))
1830 return rc;
1831
1832 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
1833
1834 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pUVM);
1835 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
1836 if (RT_SUCCESS(rc))
1837 {
1838 VMR3Relocate(pVM, 0 /*offDelta*/);
1839 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
1840 }
1841 else
1842 {
1843 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1844 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
1845
1846 if (cErrorsPriorToSave == VMR3GetErrorCount(pUVM))
1847 rc = VMSetError(pVM, rc, RT_SRC_POS,
1848 N_("Unable to restore the virtual machine's saved state from '%s'. "
1849 "It may be damaged or from an older version of VirtualBox. "
1850 "Please discard the saved state before starting the virtual machine"),
1851 pszFilename);
1852 }
1853
1854 return rc;
1855}
1856
1857
1858/**
1859 * Loads a VM state into a newly created VM or a one that is suspended.
1860 *
1861 * To restore a saved state on VM startup, call this function and then resume
1862 * the VM instead of powering it on.
1863 *
1864 * @returns VBox status code.
1865 *
1866 * @param pUVM The user mode VM structure.
1867 * @param pszFilename The name of the save state file.
1868 * @param pfnProgress Progress callback. Optional.
1869 * @param pvUser User argument for the progress callback.
1870 *
1871 * @thread Any thread.
1872 * @vmstate Created, Suspended
1873 * @vmstateto Loading+Suspended
1874 */
1875VMMR3DECL(int) VMR3LoadFromFile(PUVM pUVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
1876{
1877 LogFlow(("VMR3LoadFromFile: pUVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
1878 pUVM, pszFilename, pszFilename, pfnProgress, pvUser));
1879
1880 /*
1881 * Validate input.
1882 */
1883 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1884 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
1885
1886 /*
1887 * Forward the request to EMT(0). No need to setup a rendezvous here
1888 * since there is no execution taking place when this call is allowed.
1889 */
1890 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 7,
1891 pUVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/,
1892 pfnProgress, pvUser, false /*fTeleporting*/);
1893 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
1894 return rc;
1895}
1896
1897
1898/**
1899 * VMR3LoadFromFile for arbitrary file streams.
1900 *
1901 * @returns VBox status code.
1902 *
1903 * @param pUVM Pointer to the VM.
1904 * @param pStreamOps The stream methods.
1905 * @param pvStreamOpsUser The user argument to the stream methods.
1906 * @param pfnProgress Progress callback. Optional.
1907 * @param pvProgressUser User argument for the progress callback.
1908 *
1909 * @thread Any thread.
1910 * @vmstate Created, Suspended
1911 * @vmstateto Loading+Suspended
1912 */
1913VMMR3DECL(int) VMR3LoadFromStream(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1914 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
1915{
1916 LogFlow(("VMR3LoadFromStream: pUVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
1917 pUVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
1918
1919 /*
1920 * Validate input.
1921 */
1922 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1923 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
1924
1925 /*
1926 * Forward the request to EMT(0). No need to setup a rendezvous here
1927 * since there is no execution taking place when this call is allowed.
1928 */
1929 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 7,
1930 pUVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress,
1931 pvProgressUser, true /*fTeleporting*/);
1932 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
1933 return rc;
1934}
1935
1936
1937/**
1938 * EMT rendezvous worker for VMR3PowerOff.
1939 *
1940 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
1941 * return code, see FNVMMEMTRENDEZVOUS.)
1942 *
1943 * @param pVM The cross context VM structure.
1944 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1945 * @param pvUser Ignored.
1946 */
1947static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
1948{
1949 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1950 Assert(!pvUser); NOREF(pvUser);
1951
1952 /*
1953 * The first EMT thru here will change the state to PoweringOff.
1954 */
1955 if (pVCpu->idCpu == pVM->cCpus - 1)
1956 {
1957 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
1958 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
1959 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
1960 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
1961 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
1962 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
1963 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
1964 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
1965 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
1966 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
1967 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
1968 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
1969 if (RT_FAILURE(rc))
1970 return rc;
1971 if (rc >= 7)
1972 SSMR3Cancel(pVM->pUVM);
1973 }
1974
1975 /*
1976 * Check the state.
1977 */
1978 VMSTATE enmVMState = VMR3GetState(pVM);
1979 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
1980 || enmVMState == VMSTATE_POWERING_OFF_LS,
1981 ("%s\n", VMR3GetStateName(enmVMState)),
1982 VERR_VM_INVALID_VM_STATE);
1983
1984 /*
1985 * EMT(0) does the actual power off work here *after* all the other EMTs
1986 * have been thru and entered the STOPPED state.
1987 */
1988 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
1989 if (pVCpu->idCpu == 0)
1990 {
1991 /*
1992 * For debugging purposes, we will log a summary of the guest state at this point.
1993 */
1994 if (enmVMState != VMSTATE_GURU_MEDITATION)
1995 {
1996 /** @todo make the state dumping at VMR3PowerOff optional. */
1997 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
1998 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
1999 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2000 RTLogRelPrintf("***\n");
2001 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp());
2002 RTLogRelPrintf("***\n");
2003 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
2004 RTLogRelPrintf("***\n");
2005 DBGFR3Info(pVM->pUVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2006 RTLogRelPrintf("***\n");
2007 DBGFR3Info(pVM->pUVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2008 /** @todo dump guest call stack. */
2009 RTLogRelSetBuffering(fOldBuffered);
2010 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2011 }
2012
2013 /*
2014 * Perform the power off notifications and advance the state to
2015 * Off or OffLS.
2016 */
2017 PDMR3PowerOff(pVM);
2018 DBGFR3PowerOff(pVM);
2019
2020 PUVM pUVM = pVM->pUVM;
2021 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2022 enmVMState = pVM->enmVMState;
2023 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2024 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS, false /*fSetRatherThanClearFF*/);
2025 else
2026 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF, false /*fSetRatherThanClearFF*/);
2027 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2028 }
2029 else if (enmVMState != VMSTATE_GURU_MEDITATION)
2030 {
2031 /** @todo make the state dumping at VMR3PowerOff optional. */
2032 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2033 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
2034 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2035 RTLogRelPrintf("***\n");
2036 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp());
2037 RTLogRelPrintf("***\n");
2038 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
2039 RTLogRelPrintf("***\n");
2040 RTLogRelSetBuffering(fOldBuffered);
2041 RTLogRelPrintf("************** End of Guest state at power off for VCpu %u ***************\n", pVCpu->idCpu);
2042 }
2043
2044 return VINF_EM_OFF;
2045}
2046
2047
2048/**
2049 * Power off the VM.
2050 *
2051 * @returns VBox status code. When called on EMT, this will be a strict status
2052 * code that has to be propagated up the call stack.
2053 *
2054 * @param pUVM The handle of the VM to be powered off.
2055 *
2056 * @thread Any thread.
2057 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2058 * @vmstateto Off or OffLS
2059 */
2060VMMR3DECL(int) VMR3PowerOff(PUVM pUVM)
2061{
2062 LogFlow(("VMR3PowerOff: pUVM=%p\n", pUVM));
2063 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2064 PVM pVM = pUVM->pVM;
2065 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2066
2067 /*
2068 * Gather all the EMTs to make sure there are no races before
2069 * changing the VM state.
2070 */
2071 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2072 vmR3PowerOff, NULL);
2073 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2074 return rc;
2075}
2076
2077
2078/**
2079 * Destroys the VM.
2080 *
2081 * The VM must be powered off (or never really powered on) to call this
2082 * function. The VM handle is destroyed and can no longer be used up successful
2083 * return.
2084 *
2085 * @returns VBox status code.
2086 *
2087 * @param pUVM The user mode VM handle.
2088 *
2089 * @thread Any none emulation thread.
2090 * @vmstate Off, Created
2091 * @vmstateto N/A
2092 */
2093VMMR3DECL(int) VMR3Destroy(PUVM pUVM)
2094{
2095 LogFlow(("VMR3Destroy: pUVM=%p\n", pUVM));
2096
2097 /*
2098 * Validate input.
2099 */
2100 if (!pUVM)
2101 return VERR_INVALID_VM_HANDLE;
2102 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2103 PVM pVM = pUVM->pVM;
2104 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2105 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2106
2107 /*
2108 * Change VM state to destroying and aall vmR3Destroy on each of the EMTs
2109 * ending with EMT(0) doing the bulk of the cleanup.
2110 */
2111 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2112 if (RT_FAILURE(rc))
2113 return rc;
2114
2115 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2116 AssertLogRelRC(rc);
2117
2118 /*
2119 * Wait for EMTs to quit and destroy the UVM.
2120 */
2121 vmR3DestroyUVM(pUVM, 30000);
2122
2123 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2124 return VINF_SUCCESS;
2125}
2126
2127
2128/**
2129 * Internal destruction worker.
2130 *
2131 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2132 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2133 * VMR3Destroy().
2134 *
2135 * When called on EMT(0), it will performed the great bulk of the destruction.
2136 * When called on the other EMTs, they will do nothing and the whole purpose is
2137 * to return VINF_EM_TERMINATE so they break out of their run loops.
2138 *
2139 * @returns VINF_EM_TERMINATE.
2140 * @param pVM The cross context VM structure.
2141 */
2142DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2143{
2144 PUVM pUVM = pVM->pUVM;
2145 PVMCPU pVCpu = VMMGetCpu(pVM);
2146 Assert(pVCpu);
2147 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2148
2149 /*
2150 * Only VCPU 0 does the full cleanup (last).
2151 */
2152 if (pVCpu->idCpu == 0)
2153 {
2154 /*
2155 * Dump statistics to the log.
2156 */
2157#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2158 RTLogFlags(NULL, "nodisabled nobuffered");
2159#endif
2160//#ifdef VBOX_WITH_STATISTICS
2161// STAMR3Dump(pUVM, "*");
2162//#else
2163 LogRel(("************************* Statistics *************************\n"));
2164 STAMR3DumpToReleaseLog(pUVM, "*");
2165 LogRel(("********************* End of statistics **********************\n"));
2166//#endif
2167
2168 /*
2169 * Destroy the VM components.
2170 */
2171 int rc = TMR3Term(pVM);
2172 AssertRC(rc);
2173#ifdef VBOX_WITH_DEBUGGER
2174 rc = DBGCIoTerminate(pUVM, pUVM->vm.s.pvDBGC);
2175 pUVM->vm.s.pvDBGC = NULL;
2176#endif
2177 AssertRC(rc);
2178 rc = PDMR3Term(pVM);
2179 AssertRC(rc);
2180 rc = GIMR3Term(pVM);
2181 AssertRC(rc);
2182 rc = DBGFR3Term(pVM);
2183 AssertRC(rc);
2184 rc = IEMR3Term(pVM);
2185 AssertRC(rc);
2186 rc = EMR3Term(pVM);
2187 AssertRC(rc);
2188 rc = IOMR3Term(pVM);
2189 AssertRC(rc);
2190 rc = TRPMR3Term(pVM);
2191 AssertRC(rc);
2192 rc = SELMR3Term(pVM);
2193 AssertRC(rc);
2194 rc = HMR3Term(pVM);
2195 AssertRC(rc);
2196 rc = NEMR3Term(pVM);
2197 AssertRC(rc);
2198 rc = PGMR3Term(pVM);
2199 AssertRC(rc);
2200 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2201 AssertRC(rc);
2202 rc = CPUMR3Term(pVM);
2203 AssertRC(rc);
2204 SSMR3Term(pVM);
2205 rc = PDMR3CritSectBothTerm(pVM);
2206 AssertRC(rc);
2207 rc = MMR3Term(pVM);
2208 AssertRC(rc);
2209
2210 /*
2211 * We're done, tell the other EMTs to quit.
2212 */
2213 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2214 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2215 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2216 }
2217
2218 /*
2219 * Decrement the active EMT count here.
2220 */
2221 PUVMCPU pUVCpu = &pUVM->aCpus[pVCpu->idCpu];
2222 if (!pUVCpu->vm.s.fBeenThruVmDestroy)
2223 {
2224 pUVCpu->vm.s.fBeenThruVmDestroy = true;
2225 ASMAtomicDecU32(&pUVM->vm.s.cActiveEmts);
2226 }
2227 else
2228 AssertFailed();
2229
2230 return VINF_EM_TERMINATE;
2231}
2232
2233
2234/**
2235 * Destroys the UVM portion.
2236 *
2237 * This is called as the final step in the VM destruction or as the cleanup
2238 * in case of a creation failure.
2239 *
2240 * @param pUVM The user mode VM structure.
2241 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2242 * threads.
2243 */
2244static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2245{
2246 /*
2247 * Signal termination of each the emulation threads and
2248 * wait for them to complete.
2249 */
2250 /* Signal them - in reverse order since EMT(0) waits for the others. */
2251 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2252 if (pUVM->pVM)
2253 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2254 VMCPUID iCpu = pUVM->cCpus;
2255 while (iCpu-- > 0)
2256 {
2257 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2258 RTSemEventSignal(pUVM->aCpus[iCpu].vm.s.EventSemWait);
2259 }
2260
2261 /* Wait for EMT(0), it in turn waits for the rest. */
2262 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2263
2264 RTTHREAD const hSelf = RTThreadSelf();
2265 RTTHREAD hThread = pUVM->aCpus[0].vm.s.ThreadEMT;
2266 if ( hThread != NIL_RTTHREAD
2267 && hThread != hSelf)
2268 {
2269 int rc2 = RTThreadWait(hThread, RT_MAX(cMilliesEMTWait, 2000), NULL);
2270 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2271 rc2 = RTThreadWait(hThread, 1000, NULL);
2272 AssertLogRelMsgRC(rc2, ("iCpu=0 rc=%Rrc\n", rc2));
2273 if (RT_SUCCESS(rc2))
2274 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2275 }
2276
2277 /* Just in case we're in a weird failure situation w/o EMT(0) to do the
2278 waiting, wait the other EMTs too. */
2279 for (iCpu = 1; iCpu < pUVM->cCpus; iCpu++)
2280 {
2281 ASMAtomicXchgHandle(&pUVM->aCpus[iCpu].vm.s.ThreadEMT, NIL_RTTHREAD, &hThread);
2282 if (hThread != NIL_RTTHREAD)
2283 {
2284 if (hThread != hSelf)
2285 {
2286 int rc2 = RTThreadWait(hThread, 250 /*ms*/, NULL);
2287 AssertLogRelMsgRC(rc2, ("iCpu=%u rc=%Rrc\n", iCpu, rc2));
2288 if (RT_SUCCESS(rc2))
2289 continue;
2290 }
2291 pUVM->aCpus[iCpu].vm.s.ThreadEMT = hThread;
2292 }
2293 }
2294
2295 /* Cleanup the semaphores. */
2296 iCpu = pUVM->cCpus;
2297 while (iCpu-- > 0)
2298 {
2299 RTSemEventDestroy(pUVM->aCpus[iCpu].vm.s.EventSemWait);
2300 pUVM->aCpus[iCpu].vm.s.EventSemWait = NIL_RTSEMEVENT;
2301 }
2302
2303 /*
2304 * Free the event semaphores associated with the request packets.
2305 */
2306 unsigned cReqs = 0;
2307 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2308 {
2309 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2310 pUVM->vm.s.apReqFree[i] = NULL;
2311 for (; pReq; pReq = pReq->pNext, cReqs++)
2312 {
2313 pReq->enmState = VMREQSTATE_INVALID;
2314 RTSemEventDestroy(pReq->EventSem);
2315 }
2316 }
2317 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2318
2319 /*
2320 * Kill all queued requests. (There really shouldn't be any!)
2321 */
2322 for (unsigned i = 0; i < 10; i++)
2323 {
2324 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pPriorityReqs, NULL, PVMREQ);
2325 if (!pReqHead)
2326 {
2327 pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pNormalReqs, NULL, PVMREQ);
2328 if (!pReqHead)
2329 break;
2330 }
2331 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2332
2333 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2334 {
2335 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2336 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2337 RTSemEventSignal(pReq->EventSem);
2338 RTThreadSleep(2);
2339 RTSemEventDestroy(pReq->EventSem);
2340 }
2341 /* give them a chance to respond before we free the request memory. */
2342 RTThreadSleep(32);
2343 }
2344
2345 /*
2346 * Now all queued VCPU requests (again, there shouldn't be any).
2347 */
2348 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2349 {
2350 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2351
2352 for (unsigned i = 0; i < 10; i++)
2353 {
2354 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pPriorityReqs, NULL, PVMREQ);
2355 if (!pReqHead)
2356 {
2357 pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pNormalReqs, NULL, PVMREQ);
2358 if (!pReqHead)
2359 break;
2360 }
2361 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2362
2363 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2364 {
2365 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2366 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2367 RTSemEventSignal(pReq->EventSem);
2368 RTThreadSleep(2);
2369 RTSemEventDestroy(pReq->EventSem);
2370 }
2371 /* give them a chance to respond before we free the request memory. */
2372 RTThreadSleep(32);
2373 }
2374 }
2375
2376 /*
2377 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2378 */
2379 PDMR3TermUVM(pUVM);
2380
2381 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
2382 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
2383
2384 /*
2385 * Terminate the support library if initialized.
2386 */
2387 if (pUVM->vm.s.pSession)
2388 {
2389 int rc = SUPR3Term(false /*fForced*/);
2390 AssertRC(rc);
2391 pUVM->vm.s.pSession = NIL_RTR0PTR;
2392 }
2393
2394 /*
2395 * Release the UVM structure reference.
2396 */
2397 VMR3ReleaseUVM(pUVM);
2398
2399 /*
2400 * Clean up and flush logs.
2401 */
2402 RTLogFlush(NULL);
2403}
2404
2405
2406/**
2407 * Worker which checks integrity of some internal structures.
2408 * This is yet another attempt to track down that AVL tree crash.
2409 */
2410static void vmR3CheckIntegrity(PVM pVM)
2411{
2412#ifdef VBOX_STRICT
2413 int rc = PGMR3CheckIntegrity(pVM);
2414 AssertReleaseRC(rc);
2415#else
2416 RT_NOREF_PV(pVM);
2417#endif
2418}
2419
2420
2421/**
2422 * EMT rendezvous worker for VMR3ResetFF for doing soft/warm reset.
2423 *
2424 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESCHEDULE.
2425 * (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
2426 *
2427 * @param pVM The cross context VM structure.
2428 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2429 * @param pvUser The reset flags.
2430 */
2431static DECLCALLBACK(VBOXSTRICTRC) vmR3SoftReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2432{
2433 uint32_t fResetFlags = *(uint32_t *)pvUser;
2434
2435
2436 /*
2437 * The first EMT will try change the state to resetting. If this fails,
2438 * we won't get called for the other EMTs.
2439 */
2440 if (pVCpu->idCpu == pVM->cCpus - 1)
2441 {
2442 int rc = vmR3TrySetState(pVM, "vmR3ResetSoft", 3,
2443 VMSTATE_SOFT_RESETTING, VMSTATE_RUNNING,
2444 VMSTATE_SOFT_RESETTING, VMSTATE_SUSPENDED,
2445 VMSTATE_SOFT_RESETTING_LS, VMSTATE_RUNNING_LS);
2446 if (RT_FAILURE(rc))
2447 return rc;
2448 pVM->vm.s.cResets++;
2449 pVM->vm.s.cSoftResets++;
2450 }
2451
2452 /*
2453 * Check the state.
2454 */
2455 VMSTATE enmVMState = VMR3GetState(pVM);
2456 AssertLogRelMsgReturn( enmVMState == VMSTATE_SOFT_RESETTING
2457 || enmVMState == VMSTATE_SOFT_RESETTING_LS,
2458 ("%s\n", VMR3GetStateName(enmVMState)),
2459 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2460
2461 /*
2462 * EMT(0) does the full cleanup *after* all the other EMTs has been
2463 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2464 *
2465 * Because there are per-cpu reset routines and order may/is important,
2466 * the following sequence looks a bit ugly...
2467 */
2468
2469 /* Reset the VCpu state. */
2470 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2471
2472 /*
2473 * Soft reset the VM components.
2474 */
2475 if (pVCpu->idCpu == 0)
2476 {
2477 PDMR3SoftReset(pVM, fResetFlags);
2478 TRPMR3Reset(pVM);
2479 CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
2480 EMR3Reset(pVM);
2481 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2482 NEMR3Reset(pVM);
2483
2484 /*
2485 * Since EMT(0) is the last to go thru here, it will advance the state.
2486 * (Unlike vmR3HardReset we won't be doing any suspending of live
2487 * migration VMs here since memory is unchanged.)
2488 */
2489 PUVM pUVM = pVM->pUVM;
2490 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2491 enmVMState = pVM->enmVMState;
2492 if (enmVMState == VMSTATE_SOFT_RESETTING)
2493 {
2494 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2495 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
2496 else
2497 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
2498 }
2499 else
2500 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING_LS, VMSTATE_SOFT_RESETTING_LS, false /*fSetRatherThanClearFF*/);
2501 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2502 }
2503
2504 return VINF_EM_RESCHEDULE;
2505}
2506
2507
2508/**
2509 * EMT rendezvous worker for VMR3Reset and VMR3ResetFF.
2510 *
2511 * This is called by the emulation threads as a response to the reset request
2512 * issued by VMR3Reset().
2513 *
2514 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2515 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2516 *
2517 * @param pVM The cross context VM structure.
2518 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2519 * @param pvUser Ignored.
2520 */
2521static DECLCALLBACK(VBOXSTRICTRC) vmR3HardReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2522{
2523 Assert(!pvUser); NOREF(pvUser);
2524
2525 /*
2526 * The first EMT will try change the state to resetting. If this fails,
2527 * we won't get called for the other EMTs.
2528 */
2529 if (pVCpu->idCpu == pVM->cCpus - 1)
2530 {
2531 int rc = vmR3TrySetState(pVM, "vmR3HardReset", 3,
2532 VMSTATE_RESETTING, VMSTATE_RUNNING,
2533 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2534 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2535 if (RT_FAILURE(rc))
2536 return rc;
2537 pVM->vm.s.cResets++;
2538 pVM->vm.s.cHardResets++;
2539 }
2540
2541 /*
2542 * Check the state.
2543 */
2544 VMSTATE enmVMState = VMR3GetState(pVM);
2545 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2546 || enmVMState == VMSTATE_RESETTING_LS,
2547 ("%s\n", VMR3GetStateName(enmVMState)),
2548 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2549
2550 /*
2551 * EMT(0) does the full cleanup *after* all the other EMTs has been
2552 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2553 *
2554 * Because there are per-cpu reset routines and order may/is important,
2555 * the following sequence looks a bit ugly...
2556 */
2557 if (pVCpu->idCpu == 0)
2558 vmR3CheckIntegrity(pVM);
2559
2560 /* Reset the VCpu state. */
2561 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2562
2563 /* Clear all pending forced actions. */
2564 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2565
2566 /*
2567 * Reset the VM components.
2568 */
2569 if (pVCpu->idCpu == 0)
2570 {
2571 GIMR3Reset(pVM); /* This must come *before* PDM and TM. */
2572 PDMR3Reset(pVM);
2573 PGMR3Reset(pVM);
2574 SELMR3Reset(pVM);
2575 TRPMR3Reset(pVM);
2576 IOMR3Reset(pVM);
2577 CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
2578 TMR3Reset(pVM);
2579 EMR3Reset(pVM);
2580 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2581 NEMR3Reset(pVM);
2582
2583 /*
2584 * Do memory setup.
2585 */
2586 PGMR3MemSetup(pVM, true /*fAtReset*/);
2587 PDMR3MemSetup(pVM, true /*fAtReset*/);
2588
2589 /*
2590 * Since EMT(0) is the last to go thru here, it will advance the state.
2591 * When a live save is active, we will move on to SuspendingLS but
2592 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2593 */
2594 PUVM pUVM = pVM->pUVM;
2595 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2596 enmVMState = pVM->enmVMState;
2597 if (enmVMState == VMSTATE_RESETTING)
2598 {
2599 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2600 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
2601 else
2602 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
2603 }
2604 else
2605 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS, false /*fSetRatherThanClearFF*/);
2606 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2607
2608 vmR3CheckIntegrity(pVM);
2609
2610 /*
2611 * Do the suspend bit as well.
2612 * It only requires some EMT(0) work at present.
2613 */
2614 if (enmVMState != VMSTATE_RESETTING)
2615 {
2616 vmR3SuspendDoWork(pVM);
2617 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2618 }
2619 }
2620
2621 return enmVMState == VMSTATE_RESETTING
2622 ? VINF_EM_RESET
2623 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2624}
2625
2626
2627/**
2628 * Internal worker for VMR3Reset, VMR3ResetFF, VMR3TripleFault.
2629 *
2630 * @returns VBox status code.
2631 * @param pVM The cross context VM structure.
2632 * @param fHardReset Whether it's a hard reset or not.
2633 * @param fResetFlags The reset flags (PDMVMRESET_F_XXX).
2634 */
2635static VBOXSTRICTRC vmR3ResetCommon(PVM pVM, bool fHardReset, uint32_t fResetFlags)
2636{
2637 LogFlow(("vmR3ResetCommon: fHardReset=%RTbool fResetFlags=%#x\n", fHardReset, fResetFlags));
2638 int rc;
2639 if (fHardReset)
2640 {
2641 /*
2642 * Hard reset.
2643 */
2644 /* Check whether we're supposed to power off instead of resetting. */
2645 if (pVM->vm.s.fPowerOffInsteadOfReset)
2646 {
2647 PUVM pUVM = pVM->pUVM;
2648 if ( pUVM->pVmm2UserMethods
2649 && pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff)
2650 pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff(pUVM->pVmm2UserMethods, pUVM);
2651 return VMR3PowerOff(pUVM);
2652 }
2653
2654 /* Gather all the EMTs to make sure there are no races before changing
2655 the VM state. */
2656 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2657 vmR3HardReset, NULL);
2658 }
2659 else
2660 {
2661 /*
2662 * Soft reset. Since we only support this with a single CPU active,
2663 * we must be on EMT #0 here.
2664 */
2665 VM_ASSERT_EMT0(pVM);
2666 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2667 vmR3SoftReset, &fResetFlags);
2668 }
2669
2670 LogFlow(("vmR3ResetCommon: returns %Rrc\n", rc));
2671 return rc;
2672}
2673
2674
2675
2676/**
2677 * Reset the current VM.
2678 *
2679 * @returns VBox status code.
2680 * @param pUVM The VM to reset.
2681 */
2682VMMR3DECL(int) VMR3Reset(PUVM pUVM)
2683{
2684 LogFlow(("VMR3Reset:\n"));
2685 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2686 PVM pVM = pUVM->pVM;
2687 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2688
2689 return VBOXSTRICTRC_VAL(vmR3ResetCommon(pVM, true, 0));
2690}
2691
2692
2693/**
2694 * Handle the reset force flag or triple fault.
2695 *
2696 * This handles both soft and hard resets (see PDMVMRESET_F_XXX).
2697 *
2698 * @returns VBox status code.
2699 * @param pVM The cross context VM structure.
2700 * @thread EMT
2701 *
2702 * @remarks Caller is expected to clear the VM_FF_RESET force flag.
2703 */
2704VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetFF(PVM pVM)
2705{
2706 LogFlow(("VMR3ResetFF:\n"));
2707
2708 /*
2709 * First consult the firmware on whether this is a hard or soft reset.
2710 */
2711 uint32_t fResetFlags;
2712 bool fHardReset = PDMR3GetResetInfo(pVM, 0 /*fOverride*/, &fResetFlags);
2713 return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
2714}
2715
2716
2717/**
2718 * For handling a CPU reset on triple fault.
2719 *
2720 * According to one mainboard manual, a CPU triple fault causes the 286 CPU to
2721 * send a SHUTDOWN signal to the chipset. The chipset responds by sending a
2722 * RESET signal to the CPU. So, it should be very similar to a soft/warm reset.
2723 *
2724 * @returns VBox status code.
2725 * @param pVM The cross context VM structure.
2726 * @thread EMT
2727 */
2728VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetTripleFault(PVM pVM)
2729{
2730 LogFlow(("VMR3ResetTripleFault:\n"));
2731
2732 /*
2733 * First consult the firmware on whether this is a hard or soft reset.
2734 */
2735 uint32_t fResetFlags;
2736 bool fHardReset = PDMR3GetResetInfo(pVM, PDMVMRESET_F_TRIPLE_FAULT, &fResetFlags);
2737 return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
2738}
2739
2740
2741/**
2742 * Gets the user mode VM structure pointer given Pointer to the VM.
2743 *
2744 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
2745 * invalid (asserted).
2746 * @param pVM The cross context VM structure.
2747 * @sa VMR3GetVM, VMR3RetainUVM
2748 */
2749VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
2750{
2751 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
2752 return pVM->pUVM;
2753}
2754
2755
2756/**
2757 * Gets the shared VM structure pointer given the pointer to the user mode VM
2758 * structure.
2759 *
2760 * @returns Pointer to the VM.
2761 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
2762 * is currently associated with it.
2763 * @param pUVM The user mode VM handle.
2764 * @sa VMR3GetUVM
2765 */
2766VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
2767{
2768 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2769 return pUVM->pVM;
2770}
2771
2772
2773/**
2774 * Retain the user mode VM handle.
2775 *
2776 * @returns Reference count.
2777 * UINT32_MAX if @a pUVM is invalid.
2778 *
2779 * @param pUVM The user mode VM handle.
2780 * @sa VMR3ReleaseUVM
2781 */
2782VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
2783{
2784 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2785 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
2786 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
2787 return cRefs;
2788}
2789
2790
2791/**
2792 * Does the final release of the UVM structure.
2793 *
2794 * @param pUVM The user mode VM handle.
2795 */
2796static void vmR3DoReleaseUVM(PUVM pUVM)
2797{
2798 /*
2799 * Free the UVM.
2800 */
2801 Assert(!pUVM->pVM);
2802
2803 MMR3HeapFree(pUVM->vm.s.pszName);
2804 pUVM->vm.s.pszName = NULL;
2805
2806 MMR3TermUVM(pUVM);
2807 STAMR3TermUVM(pUVM);
2808
2809 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
2810 RTTlsFree(pUVM->vm.s.idxTLS);
2811 RTMemPageFree(pUVM, RT_UOFFSETOF_DYN(UVM, aCpus[pUVM->cCpus]));
2812}
2813
2814
2815/**
2816 * Releases a refernece to the mode VM handle.
2817 *
2818 * @returns The new reference count, 0 if destroyed.
2819 * UINT32_MAX if @a pUVM is invalid.
2820 *
2821 * @param pUVM The user mode VM handle.
2822 * @sa VMR3RetainUVM
2823 */
2824VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
2825{
2826 if (!pUVM)
2827 return 0;
2828 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
2829 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
2830 if (!cRefs)
2831 vmR3DoReleaseUVM(pUVM);
2832 else
2833 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
2834 return cRefs;
2835}
2836
2837
2838/**
2839 * Gets the VM name.
2840 *
2841 * @returns Pointer to a read-only string containing the name. NULL if called
2842 * too early.
2843 * @param pUVM The user mode VM handle.
2844 */
2845VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
2846{
2847 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2848 return pUVM->vm.s.pszName;
2849}
2850
2851
2852/**
2853 * Gets the VM UUID.
2854 *
2855 * @returns pUuid on success, NULL on failure.
2856 * @param pUVM The user mode VM handle.
2857 * @param pUuid Where to store the UUID.
2858 */
2859VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
2860{
2861 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
2862 AssertPtrReturn(pUuid, NULL);
2863
2864 *pUuid = pUVM->vm.s.Uuid;
2865 return pUuid;
2866}
2867
2868
2869/**
2870 * Gets the current VM state.
2871 *
2872 * @returns The current VM state.
2873 * @param pVM The cross context VM structure.
2874 * @thread Any
2875 */
2876VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
2877{
2878 AssertMsgReturn(RT_VALID_ALIGNED_PTR(pVM, HOST_PAGE_SIZE), ("%p\n", pVM), VMSTATE_TERMINATED);
2879 VMSTATE enmVMState = pVM->enmVMState;
2880 return enmVMState >= VMSTATE_CREATING && enmVMState <= VMSTATE_TERMINATED ? enmVMState : VMSTATE_TERMINATED;
2881}
2882
2883
2884/**
2885 * Gets the current VM state.
2886 *
2887 * @returns The current VM state.
2888 * @param pUVM The user-mode VM handle.
2889 * @thread Any
2890 */
2891VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
2892{
2893 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
2894 if (RT_UNLIKELY(!pUVM->pVM))
2895 return VMSTATE_TERMINATED;
2896 return pUVM->pVM->enmVMState;
2897}
2898
2899
2900/**
2901 * Gets the state name string for a VM state.
2902 *
2903 * @returns Pointer to the state name. (readonly)
2904 * @param enmState The state.
2905 */
2906VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
2907{
2908 switch (enmState)
2909 {
2910 case VMSTATE_CREATING: return "CREATING";
2911 case VMSTATE_CREATED: return "CREATED";
2912 case VMSTATE_LOADING: return "LOADING";
2913 case VMSTATE_POWERING_ON: return "POWERING_ON";
2914 case VMSTATE_RESUMING: return "RESUMING";
2915 case VMSTATE_RUNNING: return "RUNNING";
2916 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
2917 case VMSTATE_RESETTING: return "RESETTING";
2918 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
2919 case VMSTATE_SOFT_RESETTING: return "SOFT_RESETTING";
2920 case VMSTATE_SOFT_RESETTING_LS: return "SOFT_RESETTING_LS";
2921 case VMSTATE_SUSPENDED: return "SUSPENDED";
2922 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
2923 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
2924 case VMSTATE_SUSPENDING: return "SUSPENDING";
2925 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
2926 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
2927 case VMSTATE_SAVING: return "SAVING";
2928 case VMSTATE_DEBUGGING: return "DEBUGGING";
2929 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
2930 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
2931 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
2932 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
2933 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
2934 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
2935 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
2936 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
2937 case VMSTATE_OFF: return "OFF";
2938 case VMSTATE_OFF_LS: return "OFF_LS";
2939 case VMSTATE_DESTROYING: return "DESTROYING";
2940 case VMSTATE_TERMINATED: return "TERMINATED";
2941
2942 default:
2943 AssertMsgFailed(("Unknown state %d\n", enmState));
2944 return "Unknown!\n";
2945 }
2946}
2947
2948
2949/**
2950 * Validates the state transition in strict builds.
2951 *
2952 * @returns true if valid, false if not.
2953 *
2954 * @param enmStateOld The old (current) state.
2955 * @param enmStateNew The proposed new state.
2956 *
2957 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
2958 * diagram (under State Machine Diagram).
2959 */
2960static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
2961{
2962#ifndef VBOX_STRICT
2963 RT_NOREF2(enmStateOld, enmStateNew);
2964#else
2965 switch (enmStateOld)
2966 {
2967 case VMSTATE_CREATING:
2968 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2969 break;
2970
2971 case VMSTATE_CREATED:
2972 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
2973 || enmStateNew == VMSTATE_POWERING_ON
2974 || enmStateNew == VMSTATE_POWERING_OFF
2975 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2976 break;
2977
2978 case VMSTATE_LOADING:
2979 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
2980 || enmStateNew == VMSTATE_LOAD_FAILURE
2981 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2982 break;
2983
2984 case VMSTATE_POWERING_ON:
2985 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2986 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
2987 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2988 break;
2989
2990 case VMSTATE_RESUMING:
2991 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
2992 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
2993 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
2994 break;
2995
2996 case VMSTATE_RUNNING:
2997 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
2998 || enmStateNew == VMSTATE_SUSPENDING
2999 || enmStateNew == VMSTATE_RESETTING
3000 || enmStateNew == VMSTATE_SOFT_RESETTING
3001 || enmStateNew == VMSTATE_RUNNING_LS
3002 || enmStateNew == VMSTATE_DEBUGGING
3003 || enmStateNew == VMSTATE_FATAL_ERROR
3004 || enmStateNew == VMSTATE_GURU_MEDITATION
3005 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3006 break;
3007
3008 case VMSTATE_RUNNING_LS:
3009 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3010 || enmStateNew == VMSTATE_SUSPENDING_LS
3011 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3012 || enmStateNew == VMSTATE_RESETTING_LS
3013 || enmStateNew == VMSTATE_SOFT_RESETTING_LS
3014 || enmStateNew == VMSTATE_RUNNING
3015 || enmStateNew == VMSTATE_DEBUGGING_LS
3016 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3017 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3018 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3019 break;
3020
3021 case VMSTATE_RESETTING:
3022 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3023 break;
3024
3025 case VMSTATE_SOFT_RESETTING:
3026 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3027 break;
3028
3029 case VMSTATE_RESETTING_LS:
3030 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3031 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3032 break;
3033
3034 case VMSTATE_SOFT_RESETTING_LS:
3035 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING_LS
3036 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3037 break;
3038
3039 case VMSTATE_SUSPENDING:
3040 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3041 break;
3042
3043 case VMSTATE_SUSPENDING_LS:
3044 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3045 || enmStateNew == VMSTATE_SUSPENDED_LS
3046 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3047 break;
3048
3049 case VMSTATE_SUSPENDING_EXT_LS:
3050 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3051 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3052 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3053 break;
3054
3055 case VMSTATE_SUSPENDED:
3056 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3057 || enmStateNew == VMSTATE_SAVING
3058 || enmStateNew == VMSTATE_RESETTING
3059 || enmStateNew == VMSTATE_SOFT_RESETTING
3060 || enmStateNew == VMSTATE_RESUMING
3061 || enmStateNew == VMSTATE_LOADING
3062 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3063 break;
3064
3065 case VMSTATE_SUSPENDED_LS:
3066 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3067 || enmStateNew == VMSTATE_SAVING
3068 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3069 break;
3070
3071 case VMSTATE_SUSPENDED_EXT_LS:
3072 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3073 || enmStateNew == VMSTATE_SAVING
3074 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3075 break;
3076
3077 case VMSTATE_SAVING:
3078 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3079 break;
3080
3081 case VMSTATE_DEBUGGING:
3082 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3083 || enmStateNew == VMSTATE_POWERING_OFF
3084 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3085 break;
3086
3087 case VMSTATE_DEBUGGING_LS:
3088 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3089 || enmStateNew == VMSTATE_RUNNING_LS
3090 || enmStateNew == VMSTATE_POWERING_OFF_LS
3091 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3092 break;
3093
3094 case VMSTATE_POWERING_OFF:
3095 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3096 break;
3097
3098 case VMSTATE_POWERING_OFF_LS:
3099 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3100 || enmStateNew == VMSTATE_OFF_LS
3101 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3102 break;
3103
3104 case VMSTATE_OFF:
3105 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3106 break;
3107
3108 case VMSTATE_OFF_LS:
3109 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3110 break;
3111
3112 case VMSTATE_FATAL_ERROR:
3113 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3114 break;
3115
3116 case VMSTATE_FATAL_ERROR_LS:
3117 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3118 || enmStateNew == VMSTATE_POWERING_OFF_LS
3119 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3120 break;
3121
3122 case VMSTATE_GURU_MEDITATION:
3123 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3124 || enmStateNew == VMSTATE_POWERING_OFF
3125 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3126 break;
3127
3128 case VMSTATE_GURU_MEDITATION_LS:
3129 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3130 || enmStateNew == VMSTATE_DEBUGGING_LS
3131 || enmStateNew == VMSTATE_POWERING_OFF_LS
3132 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3133 break;
3134
3135 case VMSTATE_LOAD_FAILURE:
3136 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3137 break;
3138
3139 case VMSTATE_DESTROYING:
3140 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3141 break;
3142
3143 case VMSTATE_TERMINATED:
3144 default:
3145 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3146 break;
3147 }
3148#endif /* VBOX_STRICT */
3149 return true;
3150}
3151
3152
3153/**
3154 * Does the state change callouts.
3155 *
3156 * The caller owns the AtStateCritSect.
3157 *
3158 * @param pVM The cross context VM structure.
3159 * @param pUVM The UVM handle.
3160 * @param enmStateNew The New state.
3161 * @param enmStateOld The old state.
3162 */
3163static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3164{
3165 LogRel(("Changing the VM state from '%s' to '%s'\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3166
3167 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3168 {
3169 pCur->pfnAtState(pUVM, VMMR3GetVTable(), enmStateNew, enmStateOld, pCur->pvUser);
3170 if ( enmStateNew != VMSTATE_DESTROYING
3171 && pVM->enmVMState == VMSTATE_DESTROYING)
3172 break;
3173 AssertMsg(pVM->enmVMState == enmStateNew,
3174 ("You are not allowed to change the state while in the change callback, except "
3175 "from destroying the VM. There are restrictions in the way the state changes "
3176 "are propagated up to the EM execution loop and it makes the program flow very "
3177 "difficult to follow. (%s, expected %s, old %s)\n",
3178 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3179 VMR3GetStateName(enmStateOld)));
3180 }
3181}
3182
3183
3184/**
3185 * Sets the current VM state, with the AtStatCritSect already entered.
3186 *
3187 * @param pVM The cross context VM structure.
3188 * @param pUVM The UVM handle.
3189 * @param enmStateNew The new state.
3190 * @param enmStateOld The old state.
3191 * @param fSetRatherThanClearFF The usual behavior is to clear the
3192 * VM_FF_CHECK_VM_STATE force flag, but for
3193 * some transitions (-> guru) we need to kick
3194 * the other EMTs to stop what they're doing.
3195 */
3196static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF)
3197{
3198 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3199
3200 AssertMsg(pVM->enmVMState == enmStateOld,
3201 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3202
3203 pUVM->vm.s.enmPrevVMState = enmStateOld;
3204 pVM->enmVMState = enmStateNew;
3205
3206 if (!fSetRatherThanClearFF)
3207 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3208 else if (pVM->cCpus > 0)
3209 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
3210
3211 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3212}
3213
3214
3215/**
3216 * Sets the current VM state.
3217 *
3218 * @param pVM The cross context VM structure.
3219 * @param enmStateNew The new state.
3220 * @param enmStateOld The old state (for asserting only).
3221 */
3222static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3223{
3224 PUVM pUVM = pVM->pUVM;
3225 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3226
3227 RT_NOREF_PV(enmStateOld);
3228 AssertMsg(pVM->enmVMState == enmStateOld,
3229 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3230 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState, false /*fSetRatherThanClearFF*/);
3231
3232 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3233}
3234
3235
3236/**
3237 * Tries to perform a state transition.
3238 *
3239 * @returns The 1-based ordinal of the succeeding transition.
3240 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3241 *
3242 * @param pVM The cross context VM structure.
3243 * @param pszWho Who is trying to change it.
3244 * @param cTransitions The number of transitions in the ellipsis.
3245 * @param ... Transition pairs; new, old.
3246 */
3247static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3248{
3249 va_list va;
3250 VMSTATE enmStateNew = VMSTATE_CREATED;
3251 VMSTATE enmStateOld = VMSTATE_CREATED;
3252
3253#ifdef VBOX_STRICT
3254 /*
3255 * Validate the input first.
3256 */
3257 va_start(va, cTransitions);
3258 for (unsigned i = 0; i < cTransitions; i++)
3259 {
3260 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3261 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3262 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3263 }
3264 va_end(va);
3265#endif
3266
3267 /*
3268 * Grab the lock and see if any of the proposed transitions works out.
3269 */
3270 va_start(va, cTransitions);
3271 int rc = VERR_VM_INVALID_VM_STATE;
3272 PUVM pUVM = pVM->pUVM;
3273 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3274
3275 VMSTATE enmStateCur = pVM->enmVMState;
3276
3277 for (unsigned i = 0; i < cTransitions; i++)
3278 {
3279 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3280 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3281 if (enmStateCur == enmStateOld)
3282 {
3283 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld, false /*fSetRatherThanClearFF*/);
3284 rc = i + 1;
3285 break;
3286 }
3287 }
3288
3289 if (RT_FAILURE(rc))
3290 {
3291 /*
3292 * Complain about it.
3293 */
3294 const char * const pszStateCur = VMR3GetStateName(enmStateCur);
3295 if (cTransitions == 1)
3296 {
3297 LogRel(("%s: %s -> %s failed, because the VM state is actually %s!\n",
3298 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), pszStateCur));
3299 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS, N_("%s failed because the VM state is %s instead of %s"),
3300 pszWho, pszStateCur, VMR3GetStateName(enmStateOld));
3301 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3302 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), pszStateCur));
3303 }
3304 else
3305 {
3306 char szTransitions[4096];
3307 size_t cchTransitions = 0;
3308 szTransitions[0] = '\0';
3309 va_end(va);
3310 va_start(va, cTransitions);
3311 for (unsigned i = 0; i < cTransitions; i++)
3312 {
3313 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3314 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3315 const char * const pszStateNew = VMR3GetStateName(enmStateNew);
3316 const char * const pszStateOld = VMR3GetStateName(enmStateOld);
3317 LogRel(("%s%s -> %s", i ? ", " : " ", pszStateOld, pszStateNew));
3318 cchTransitions += RTStrPrintf(&szTransitions[cchTransitions], sizeof(szTransitions) - cchTransitions,
3319 "%s%s -> %s", i ? ", " : " ", pszStateOld, pszStateNew);
3320 }
3321 Assert(cchTransitions < sizeof(szTransitions) - 64);
3322
3323 LogRel(("%s: %s failed, because the VM state is actually %s!\n", pszWho, szTransitions, pszStateCur));
3324 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3325 N_("%s failed because the current VM state, %s, was not found in the state transition table (%s)"),
3326 pszWho, pszStateCur, szTransitions);
3327 AssertMsgFailed(("%s - state=%s, transitions: %s. Check the cTransitions passed us.\n",
3328 pszWho, pszStateCur, szTransitions));
3329 }
3330 }
3331
3332 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3333 va_end(va);
3334 Assert(rc > 0 || rc < 0);
3335 return rc;
3336}
3337
3338
3339/**
3340 * Interface used by EM to signal that it's entering the guru meditation state.
3341 *
3342 * This will notifying other threads.
3343 *
3344 * @returns true if the state changed to Guru, false if no state change.
3345 * @param pVM The cross context VM structure.
3346 */
3347VMMR3_INT_DECL(bool) VMR3SetGuruMeditation(PVM pVM)
3348{
3349 PUVM pUVM = pVM->pUVM;
3350 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3351
3352 VMSTATE enmStateCur = pVM->enmVMState;
3353 bool fRc = true;
3354 if (enmStateCur == VMSTATE_RUNNING)
3355 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING, true /*fSetRatherThanClearFF*/);
3356 else if (enmStateCur == VMSTATE_RUNNING_LS)
3357 {
3358 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS, true /*fSetRatherThanClearFF*/);
3359 SSMR3Cancel(pUVM);
3360 }
3361 else
3362 fRc = false;
3363
3364 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3365 return fRc;
3366}
3367
3368
3369/**
3370 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3371 *
3372 * @param pVM The cross context VM structure.
3373 */
3374void vmR3SetTerminated(PVM pVM)
3375{
3376 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3377}
3378
3379
3380/**
3381 * Checks if the VM was teleported and hasn't been fully resumed yet.
3382 *
3383 * This applies to both sides of the teleportation since we may leave a working
3384 * clone behind and the user is allowed to resume this...
3385 *
3386 * @returns true / false.
3387 * @param pVM The cross context VM structure.
3388 * @thread Any thread.
3389 */
3390VMMR3_INT_DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3391{
3392 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3393 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3394}
3395
3396
3397/**
3398 * Registers a VM state change callback.
3399 *
3400 * You are not allowed to call any function which changes the VM state from a
3401 * state callback.
3402 *
3403 * @returns VBox status code.
3404 * @param pUVM The VM handle.
3405 * @param pfnAtState Pointer to callback.
3406 * @param pvUser User argument.
3407 * @thread Any.
3408 */
3409VMMR3DECL(int) VMR3AtStateRegister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
3410{
3411 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3412
3413 /*
3414 * Validate input.
3415 */
3416 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3417 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3418
3419 /*
3420 * Allocate a new record.
3421 */
3422 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3423 if (!pNew)
3424 return VERR_NO_MEMORY;
3425
3426 /* fill */
3427 pNew->pfnAtState = pfnAtState;
3428 pNew->pvUser = pvUser;
3429
3430 /* insert */
3431 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3432 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3433 *pUVM->vm.s.ppAtStateNext = pNew;
3434 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3435 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3436
3437 return VINF_SUCCESS;
3438}
3439
3440
3441/**
3442 * Deregisters a VM state change callback.
3443 *
3444 * @returns VBox status code.
3445 * @param pUVM The VM handle.
3446 * @param pfnAtState Pointer to callback.
3447 * @param pvUser User argument.
3448 * @thread Any.
3449 */
3450VMMR3DECL(int) VMR3AtStateDeregister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
3451{
3452 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3453
3454 /*
3455 * Validate input.
3456 */
3457 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3458 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3459
3460 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3461
3462 /*
3463 * Search the list for the entry.
3464 */
3465 PVMATSTATE pPrev = NULL;
3466 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3467 while ( pCur
3468 && ( pCur->pfnAtState != pfnAtState
3469 || pCur->pvUser != pvUser))
3470 {
3471 pPrev = pCur;
3472 pCur = pCur->pNext;
3473 }
3474 if (!pCur)
3475 {
3476 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3477 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3478 return VERR_FILE_NOT_FOUND;
3479 }
3480
3481 /*
3482 * Unlink it.
3483 */
3484 if (pPrev)
3485 {
3486 pPrev->pNext = pCur->pNext;
3487 if (!pCur->pNext)
3488 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3489 }
3490 else
3491 {
3492 pUVM->vm.s.pAtState = pCur->pNext;
3493 if (!pCur->pNext)
3494 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3495 }
3496
3497 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3498
3499 /*
3500 * Free it.
3501 */
3502 pCur->pfnAtState = NULL;
3503 pCur->pNext = NULL;
3504 MMR3HeapFree(pCur);
3505
3506 return VINF_SUCCESS;
3507}
3508
3509
3510/**
3511 * Registers a VM error callback.
3512 *
3513 * @returns VBox status code.
3514 * @param pUVM The VM handle.
3515 * @param pfnAtError Pointer to callback.
3516 * @param pvUser User argument.
3517 * @thread Any.
3518 */
3519VMMR3DECL(int) VMR3AtErrorRegister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3520{
3521 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3522
3523 /*
3524 * Validate input.
3525 */
3526 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3527 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3528
3529 /*
3530 * Allocate a new record.
3531 */
3532 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3533 if (!pNew)
3534 return VERR_NO_MEMORY;
3535
3536 /* fill */
3537 pNew->pfnAtError = pfnAtError;
3538 pNew->pvUser = pvUser;
3539
3540 /* insert */
3541 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3542 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3543 *pUVM->vm.s.ppAtErrorNext = pNew;
3544 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3545 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3546
3547 return VINF_SUCCESS;
3548}
3549
3550
3551/**
3552 * Deregisters a VM error callback.
3553 *
3554 * @returns VBox status code.
3555 * @param pUVM The VM handle.
3556 * @param pfnAtError Pointer to callback.
3557 * @param pvUser User argument.
3558 * @thread Any.
3559 */
3560VMMR3DECL(int) VMR3AtErrorDeregister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3561{
3562 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3563
3564 /*
3565 * Validate input.
3566 */
3567 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3568 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3569
3570 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3571
3572 /*
3573 * Search the list for the entry.
3574 */
3575 PVMATERROR pPrev = NULL;
3576 PVMATERROR pCur = pUVM->vm.s.pAtError;
3577 while ( pCur
3578 && ( pCur->pfnAtError != pfnAtError
3579 || pCur->pvUser != pvUser))
3580 {
3581 pPrev = pCur;
3582 pCur = pCur->pNext;
3583 }
3584 if (!pCur)
3585 {
3586 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3587 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3588 return VERR_FILE_NOT_FOUND;
3589 }
3590
3591 /*
3592 * Unlink it.
3593 */
3594 if (pPrev)
3595 {
3596 pPrev->pNext = pCur->pNext;
3597 if (!pCur->pNext)
3598 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3599 }
3600 else
3601 {
3602 pUVM->vm.s.pAtError = pCur->pNext;
3603 if (!pCur->pNext)
3604 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3605 }
3606
3607 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3608
3609 /*
3610 * Free it.
3611 */
3612 pCur->pfnAtError = NULL;
3613 pCur->pNext = NULL;
3614 MMR3HeapFree(pCur);
3615
3616 return VINF_SUCCESS;
3617}
3618
3619
3620/**
3621 * Ellipsis to va_list wrapper for calling pfnAtError.
3622 */
3623static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3624{
3625 va_list va;
3626 va_start(va, pszFormat);
3627 pCur->pfnAtError(pVM->pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3628 va_end(va);
3629}
3630
3631
3632/**
3633 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3634 * The message is found in VMINT.
3635 *
3636 * @param pVM The cross context VM structure.
3637 * @thread EMT.
3638 */
3639VMMR3_INT_DECL(void) VMR3SetErrorWorker(PVM pVM)
3640{
3641 VM_ASSERT_EMT(pVM);
3642 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Congrats!\n"));
3643
3644 /*
3645 * Unpack the error (if we managed to format one).
3646 */
3647 PVMERROR pErr = pVM->vm.s.pErrorR3;
3648 const char *pszFile = NULL;
3649 const char *pszFunction = NULL;
3650 uint32_t iLine = 0;
3651 const char *pszMessage;
3652 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3653 if (pErr)
3654 {
3655 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3656 if (pErr->offFile)
3657 pszFile = (const char *)pErr + pErr->offFile;
3658 iLine = pErr->iLine;
3659 if (pErr->offFunction)
3660 pszFunction = (const char *)pErr + pErr->offFunction;
3661 if (pErr->offMessage)
3662 pszMessage = (const char *)pErr + pErr->offMessage;
3663 else
3664 pszMessage = "No message!";
3665 }
3666 else
3667 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3668
3669 /*
3670 * Call the at error callbacks.
3671 */
3672 PUVM pUVM = pVM->pUVM;
3673 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3674 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3675 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3676 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3677 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3678}
3679
3680
3681/**
3682 * Gets the number of errors raised via VMSetError.
3683 *
3684 * This can be used avoid double error messages.
3685 *
3686 * @returns The error count.
3687 * @param pUVM The VM handle.
3688 */
3689VMMR3_INT_DECL(uint32_t) VMR3GetErrorCount(PUVM pUVM)
3690{
3691 AssertPtrReturn(pUVM, 0);
3692 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3693 return pUVM->vm.s.cErrors;
3694}
3695
3696
3697/**
3698 * Creation time wrapper for vmR3SetErrorUV.
3699 *
3700 * @returns rc.
3701 * @param pUVM Pointer to the user mode VM structure.
3702 * @param rc The VBox status code.
3703 * @param SRC_POS The source position of this error.
3704 * @param pszFormat Format string.
3705 * @param ... The arguments.
3706 * @thread Any thread.
3707 */
3708static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3709{
3710 va_list va;
3711 va_start(va, pszFormat);
3712 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
3713 va_end(va);
3714 return rc;
3715}
3716
3717
3718/**
3719 * Worker which calls everyone listening to the VM error messages.
3720 *
3721 * @param pUVM Pointer to the user mode VM structure.
3722 * @param rc The VBox status code.
3723 * @param SRC_POS The source position of this error.
3724 * @param pszFormat Format string.
3725 * @param pArgs Pointer to the format arguments.
3726 * @thread EMT
3727 */
3728DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
3729{
3730 /*
3731 * Log the error.
3732 */
3733 va_list va3;
3734 va_copy(va3, *pArgs);
3735 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3736 "VMSetError: %N\n",
3737 pszFile, iLine, pszFunction, rc,
3738 pszFormat, &va3);
3739 va_end(va3);
3740
3741#ifdef LOG_ENABLED
3742 va_copy(va3, *pArgs);
3743 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
3744 "%N\n",
3745 pszFile, iLine, pszFunction, rc,
3746 pszFormat, &va3);
3747 va_end(va3);
3748#endif
3749
3750 /*
3751 * Make a copy of the message.
3752 */
3753 if (pUVM->pVM)
3754 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
3755
3756 /*
3757 * Call the at error callbacks.
3758 */
3759 bool fCalledSomeone = false;
3760 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3761 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
3762 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3763 {
3764 va_list va2;
3765 va_copy(va2, *pArgs);
3766 pCur->pfnAtError(pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
3767 va_end(va2);
3768 fCalledSomeone = true;
3769 }
3770 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3771}
3772
3773
3774/**
3775 * Sets the error message.
3776 *
3777 * @returns rc. Meaning you can do:
3778 * @code
3779 * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
3780 * @endcode
3781 * @param pUVM The user mode VM handle.
3782 * @param rc VBox status code.
3783 * @param SRC_POS Use RT_SRC_POS.
3784 * @param pszFormat Error message format string.
3785 * @param ... Error message arguments.
3786 * @thread Any
3787 */
3788VMMR3DECL(int) VMR3SetError(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3789{
3790 va_list va;
3791 va_start(va, pszFormat);
3792 int rcRet = VMR3SetErrorV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
3793 va_end(va);
3794 return rcRet;
3795}
3796
3797
3798/**
3799 * Sets the error message.
3800 *
3801 * @returns rc. Meaning you can do:
3802 * @code
3803 * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
3804 * @endcode
3805 * @param pUVM The user mode VM handle.
3806 * @param rc VBox status code.
3807 * @param SRC_POS Use RT_SRC_POS.
3808 * @param pszFormat Error message format string.
3809 * @param va Error message arguments.
3810 * @thread Any
3811 */
3812VMMR3DECL(int) VMR3SetErrorV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
3813{
3814 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3815
3816 /* Take shortcut when called on EMT, skipping VM handle requirement + validation. */
3817 if (VMR3GetVMCPUThread(pUVM) != NIL_RTTHREAD)
3818 {
3819 va_list vaCopy;
3820 va_copy(vaCopy, va);
3821 vmR3SetErrorUV(pUVM, rc, RT_SRC_POS_ARGS, pszFormat, &vaCopy);
3822 va_end(vaCopy);
3823 return rc;
3824 }
3825
3826 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
3827 return VMSetErrorV(pUVM->pVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
3828}
3829
3830
3831
3832/**
3833 * Registers a VM runtime error callback.
3834 *
3835 * @returns VBox status code.
3836 * @param pUVM The user mode VM structure.
3837 * @param pfnAtRuntimeError Pointer to callback.
3838 * @param pvUser User argument.
3839 * @thread Any.
3840 */
3841VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3842{
3843 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3844
3845 /*
3846 * Validate input.
3847 */
3848 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3849 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3850
3851 /*
3852 * Allocate a new record.
3853 */
3854 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3855 if (!pNew)
3856 return VERR_NO_MEMORY;
3857
3858 /* fill */
3859 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
3860 pNew->pvUser = pvUser;
3861
3862 /* insert */
3863 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3864 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
3865 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
3866 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
3867 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3868
3869 return VINF_SUCCESS;
3870}
3871
3872
3873/**
3874 * Deregisters a VM runtime error callback.
3875 *
3876 * @returns VBox status code.
3877 * @param pUVM The user mode VM handle.
3878 * @param pfnAtRuntimeError Pointer to callback.
3879 * @param pvUser User argument.
3880 * @thread Any.
3881 */
3882VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
3883{
3884 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
3885
3886 /*
3887 * Validate input.
3888 */
3889 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
3890 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3891
3892 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3893
3894 /*
3895 * Search the list for the entry.
3896 */
3897 PVMATRUNTIMEERROR pPrev = NULL;
3898 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
3899 while ( pCur
3900 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
3901 || pCur->pvUser != pvUser))
3902 {
3903 pPrev = pCur;
3904 pCur = pCur->pNext;
3905 }
3906 if (!pCur)
3907 {
3908 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
3909 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3910 return VERR_FILE_NOT_FOUND;
3911 }
3912
3913 /*
3914 * Unlink it.
3915 */
3916 if (pPrev)
3917 {
3918 pPrev->pNext = pCur->pNext;
3919 if (!pCur->pNext)
3920 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
3921 }
3922 else
3923 {
3924 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
3925 if (!pCur->pNext)
3926 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
3927 }
3928
3929 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3930
3931 /*
3932 * Free it.
3933 */
3934 pCur->pfnAtRuntimeError = NULL;
3935 pCur->pNext = NULL;
3936 MMR3HeapFree(pCur);
3937
3938 return VINF_SUCCESS;
3939}
3940
3941
3942/**
3943 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
3944 * the state to FatalError(LS).
3945 *
3946 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
3947 * return code, see FNVMMEMTRENDEZVOUS.)
3948 *
3949 * @param pVM The cross context VM structure.
3950 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3951 * @param pvUser Ignored.
3952 */
3953static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
3954{
3955 NOREF(pVCpu);
3956 Assert(!pvUser); NOREF(pvUser);
3957
3958 /*
3959 * The first EMT thru here changes the state.
3960 */
3961 if (pVCpu->idCpu == pVM->cCpus - 1)
3962 {
3963 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
3964 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
3965 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
3966 if (RT_FAILURE(rc))
3967 return rc;
3968 if (rc == 2)
3969 SSMR3Cancel(pVM->pUVM);
3970
3971 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
3972 }
3973
3974 /* This'll make sure we get out of whereever we are (e.g. REM). */
3975 return VINF_EM_SUSPEND;
3976}
3977
3978
3979/**
3980 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
3981 *
3982 * This does the common parts after the error has been saved / retrieved.
3983 *
3984 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
3985 *
3986 * @param pVM The cross context VM structure.
3987 * @param fFlags The error flags.
3988 * @param pszErrorId Error ID string.
3989 * @param pszFormat Format string.
3990 * @param pVa Pointer to the format arguments.
3991 */
3992static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
3993{
3994 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
3995 PUVM pUVM = pVM->pUVM;
3996
3997 /*
3998 * Take actions before the call.
3999 */
4000 int rc;
4001 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4002 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4003 vmR3SetRuntimeErrorChangeState, NULL);
4004 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4005 rc = VMR3Suspend(pUVM, VMSUSPENDREASON_RUNTIME_ERROR);
4006 else
4007 rc = VINF_SUCCESS;
4008
4009 /*
4010 * Do the callback round.
4011 */
4012 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4013 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4014 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4015 {
4016 va_list va;
4017 va_copy(va, *pVa);
4018 pCur->pfnAtRuntimeError(pUVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4019 va_end(va);
4020 }
4021 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4022
4023 return rc;
4024}
4025
4026
4027/**
4028 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4029 */
4030static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4031{
4032 va_list va;
4033 va_start(va, pszFormat);
4034 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4035 va_end(va);
4036 return rc;
4037}
4038
4039
4040/**
4041 * This is a worker function for RC and Ring-0 calls to VMSetError and
4042 * VMSetErrorV.
4043 *
4044 * The message is found in VMINT.
4045 *
4046 * @returns VBox status code, see VMSetRuntimeError.
4047 * @param pVM The cross context VM structure.
4048 * @thread EMT.
4049 */
4050VMMR3_INT_DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4051{
4052 VM_ASSERT_EMT(pVM);
4053 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4054
4055 /*
4056 * Unpack the error (if we managed to format one).
4057 */
4058 const char *pszErrorId = "SetRuntimeError";
4059 const char *pszMessage = "No message!";
4060 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4061 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4062 if (pErr)
4063 {
4064 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4065 if (pErr->offErrorId)
4066 pszErrorId = (const char *)pErr + pErr->offErrorId;
4067 if (pErr->offMessage)
4068 pszMessage = (const char *)pErr + pErr->offMessage;
4069 fFlags = pErr->fFlags;
4070 }
4071
4072 /*
4073 * Join cause with vmR3SetRuntimeErrorV.
4074 */
4075 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4076}
4077
4078
4079/**
4080 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4081 *
4082 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4083 *
4084 * @param pVM The cross context VM structure.
4085 * @param fFlags The error flags.
4086 * @param pszErrorId Error ID string.
4087 * @param pszMessage The error message residing the MM heap.
4088 *
4089 * @thread EMT
4090 */
4091DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4092{
4093#if 0 /** @todo make copy of the error msg. */
4094 /*
4095 * Make a copy of the message.
4096 */
4097 va_list va2;
4098 va_copy(va2, *pVa);
4099 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4100 va_end(va2);
4101#endif
4102
4103 /*
4104 * Join paths with VMR3SetRuntimeErrorWorker.
4105 */
4106 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4107 MMR3HeapFree(pszMessage);
4108 return rc;
4109}
4110
4111
4112/**
4113 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4114 *
4115 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4116 *
4117 * @param pVM The cross context VM structure.
4118 * @param fFlags The error flags.
4119 * @param pszErrorId Error ID string.
4120 * @param pszFormat Format string.
4121 * @param pVa Pointer to the format arguments.
4122 *
4123 * @thread EMT
4124 */
4125DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4126{
4127 /*
4128 * Make a copy of the message.
4129 */
4130 va_list va2;
4131 va_copy(va2, *pVa);
4132 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4133 va_end(va2);
4134
4135 /*
4136 * Join paths with VMR3SetRuntimeErrorWorker.
4137 */
4138 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4139}
4140
4141
4142/**
4143 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4144 *
4145 * This can be used avoid double error messages.
4146 *
4147 * @returns The runtime error count.
4148 * @param pUVM The user mode VM handle.
4149 */
4150VMMR3_INT_DECL(uint32_t) VMR3GetRuntimeErrorCount(PUVM pUVM)
4151{
4152 return pUVM->vm.s.cRuntimeErrors;
4153}
4154
4155
4156/**
4157 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4158 *
4159 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4160 *
4161 * @param pVM The cross context VM structure.
4162 */
4163VMMR3_INT_DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4164{
4165 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4166 return pUVCpu
4167 ? pUVCpu->idCpu
4168 : NIL_VMCPUID;
4169}
4170
4171
4172/**
4173 * Checks if the VM is long-mode (64-bit) capable or not.
4174 *
4175 * @returns true if VM can operate in long-mode, false otherwise.
4176 * @param pVM The cross context VM structure.
4177 */
4178VMMR3_INT_DECL(bool) VMR3IsLongModeAllowed(PVM pVM)
4179{
4180 switch (pVM->bMainExecutionEngine)
4181 {
4182 case VM_EXEC_ENGINE_HW_VIRT:
4183 return HMIsLongModeAllowed(pVM);
4184
4185 case VM_EXEC_ENGINE_NATIVE_API:
4186 return NEMHCIsLongModeAllowed(pVM);
4187
4188 case VM_EXEC_ENGINE_NOT_SET:
4189 AssertFailed();
4190 RT_FALL_THRU();
4191 default:
4192 return false;
4193 }
4194}
4195
4196
4197/**
4198 * Returns the native ID of the current EMT VMCPU thread.
4199 *
4200 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4201 * @param pVM The cross context VM structure.
4202 * @thread EMT
4203 */
4204VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4205{
4206 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4207
4208 if (!pUVCpu)
4209 return NIL_RTNATIVETHREAD;
4210
4211 return pUVCpu->vm.s.NativeThreadEMT;
4212}
4213
4214
4215/**
4216 * Returns the native ID of the current EMT VMCPU thread.
4217 *
4218 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4219 * @param pUVM The user mode VM structure.
4220 * @thread EMT
4221 */
4222VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4223{
4224 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4225
4226 if (!pUVCpu)
4227 return NIL_RTNATIVETHREAD;
4228
4229 return pUVCpu->vm.s.NativeThreadEMT;
4230}
4231
4232
4233/**
4234 * Returns the handle of the current EMT VMCPU thread.
4235 *
4236 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4237 * @param pUVM The user mode VM handle.
4238 * @thread EMT
4239 */
4240VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PUVM pUVM)
4241{
4242 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4243
4244 if (!pUVCpu)
4245 return NIL_RTTHREAD;
4246
4247 return pUVCpu->vm.s.ThreadEMT;
4248}
4249
4250
4251/**
4252 * Returns the handle of the current EMT VMCPU thread.
4253 *
4254 * @returns The IPRT thread handle.
4255 * @param pUVCpu The user mode CPU handle.
4256 * @thread EMT
4257 */
4258VMMR3_INT_DECL(RTTHREAD) VMR3GetThreadHandle(PUVMCPU pUVCpu)
4259{
4260 return pUVCpu->vm.s.ThreadEMT;
4261}
4262
4263
4264/**
4265 * Return the package and core ID of a CPU.
4266 *
4267 * @returns VBOX status code.
4268 * @param pUVM The user mode VM handle.
4269 * @param idCpu Virtual CPU to get the ID from.
4270 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4271 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4272 *
4273 */
4274VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PUVM pUVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4275{
4276 /*
4277 * Validate input.
4278 */
4279 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4280 PVM pVM = pUVM->pVM;
4281 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4282 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4283 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4284 if (idCpu >= pVM->cCpus)
4285 return VERR_INVALID_CPU_ID;
4286
4287 /*
4288 * Set return values.
4289 */
4290#ifdef VBOX_WITH_MULTI_CORE
4291 *pidCpuCore = idCpu;
4292 *pidCpuPackage = 0;
4293#else
4294 *pidCpuCore = 0;
4295 *pidCpuPackage = idCpu;
4296#endif
4297
4298 return VINF_SUCCESS;
4299}
4300
4301
4302/**
4303 * Worker for VMR3HotUnplugCpu.
4304 *
4305 * @returns VINF_EM_WAIT_SPIP (strict status code).
4306 * @param pVM The cross context VM structure.
4307 * @param idCpu The current CPU.
4308 */
4309static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4310{
4311 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4312 VMCPU_ASSERT_EMT(pVCpu);
4313
4314 /*
4315 * Reset per CPU resources.
4316 *
4317 * Actually only needed for VT-x because the CPU seems to be still in some
4318 * paged mode and startup fails after a new hot plug event. SVM works fine
4319 * even without this.
4320 */
4321 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4322 PGMR3ResetCpu(pVM, pVCpu);
4323 PDMR3ResetCpu(pVCpu);
4324 TRPMR3ResetCpu(pVCpu);
4325 CPUMR3ResetCpu(pVM, pVCpu);
4326 EMR3ResetCpu(pVCpu);
4327 HMR3ResetCpu(pVCpu);
4328 NEMR3ResetCpu(pVCpu, false /*fInitIpi*/);
4329 return VINF_EM_WAIT_SIPI;
4330}
4331
4332
4333/**
4334 * Hot-unplugs a CPU from the guest.
4335 *
4336 * @returns VBox status code.
4337 * @param pUVM The user mode VM handle.
4338 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4339 */
4340VMMR3DECL(int) VMR3HotUnplugCpu(PUVM pUVM, VMCPUID idCpu)
4341{
4342 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4343 PVM pVM = pUVM->pVM;
4344 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4345 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4346
4347 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4348 * broadcast requests. Just note down somewhere that the CPU is
4349 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4350 * it out of the EM loops when offline. */
4351 return VMR3ReqCallNoWaitU(pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4352}
4353
4354
4355/**
4356 * Hot-plugs a CPU on the guest.
4357 *
4358 * @returns VBox status code.
4359 * @param pUVM The user mode VM handle.
4360 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4361 */
4362VMMR3DECL(int) VMR3HotPlugCpu(PUVM pUVM, VMCPUID idCpu)
4363{
4364 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4365 PVM pVM = pUVM->pVM;
4366 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4367 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4368
4369 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4370 return VINF_SUCCESS;
4371}
4372
4373
4374/**
4375 * Changes the VMM execution cap.
4376 *
4377 * @returns VBox status code.
4378 * @param pUVM The user mode VM structure.
4379 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4380 * 100 is max performance (default).
4381 */
4382VMMR3DECL(int) VMR3SetCpuExecutionCap(PUVM pUVM, uint32_t uCpuExecutionCap)
4383{
4384 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4385 PVM pVM = pUVM->pVM;
4386 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4387 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4388
4389 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4390 /* Note: not called from EMT. */
4391 pVM->uCpuExecutionCap = uCpuExecutionCap;
4392 return VINF_SUCCESS;
4393}
4394
4395
4396/**
4397 * Control whether the VM should power off when resetting.
4398 *
4399 * @returns VBox status code.
4400 * @param pUVM The user mode VM handle.
4401 * @param fPowerOffInsteadOfReset Flag whether the VM should power off when
4402 * resetting.
4403 */
4404VMMR3DECL(int) VMR3SetPowerOffInsteadOfReset(PUVM pUVM, bool fPowerOffInsteadOfReset)
4405{
4406 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4407 PVM pVM = pUVM->pVM;
4408 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4409
4410 /* Note: not called from EMT. */
4411 pVM->vm.s.fPowerOffInsteadOfReset = fPowerOffInsteadOfReset;
4412 return VINF_SUCCESS;
4413}
4414
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette