VirtualBox

source: vbox/trunk/src/VBox/VMM/VMEmt.cpp@ 13785

Last change on this file since 13785 was 13785, checked in by vboxsync, 16 years ago

More updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 38.0 KB
Line 
1/* $Id: VMEmt.cpp 13785 2008-11-04 13:11:37Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/tm.h>
28#include <VBox/dbgf.h>
29#include <VBox/em.h>
30#include <VBox/pdmapi.h>
31#include <VBox/rem.h>
32#include <VBox/tm.h>
33#include "VMInternal.h"
34#include <VBox/vm.h>
35#include <VBox/uvm.h>
36
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/semaphore.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/time.h>
45
46
47/**
48 * The emulation thread.
49 *
50 * @returns Thread exit code.
51 * @param ThreadSelf The handle to the executing thread.
52 * @param pvArgs Pointer to the user mode VM structure (UVM).
53 */
54DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
55{
56 PUVMCPU pUVMCPU = (PUVMCPU)pvArgs;
57 PUVM pUVM = pUVMCPU->pUVM;
58 RTCPUID idCPU = pUVMCPU->idCPU;
59 int rc = VINF_SUCCESS;
60
61 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
62 ("Invalid arguments to the emulation thread!\n"));
63
64 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVMCPU);
65 AssertReleaseMsgReturn(RT_SUCCESS(rc), ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
66
67 /*
68 * Init the native thread member.
69 */
70 pUVM->vm.s.NativeThreadEMT = RTThreadGetNative(ThreadSelf); /* @todo should go away */
71 pUVMCPU->vm.s.NativeThreadEMT = RTThreadGetNative(ThreadSelf);
72
73 /*
74 * The request loop.
75 */
76 volatile VMSTATE enmBefore = VMSTATE_CREATING; /* volatile because of setjmp */
77 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM));
78 for (;;)
79 {
80 /* Requested to exit the EMT thread out of sync? (currently only VMR3WaitForResume) */
81 if (setjmp(pUVMCPU->vm.s.emtJumpEnv) != 0)
82 {
83 rc = VINF_SUCCESS;
84 break;
85 }
86
87 /*
88 * During early init there is no pVM, so make a special path
89 * for that to keep things clearly separate.
90 */
91 if (!pUVM->pVM)
92 {
93 /*
94 * Check for termination first.
95 */
96 if (pUVM->vm.s.fTerminateEMT)
97 {
98 rc = VINF_EM_TERMINATE;
99 break;
100 }
101 if (pUVM->vm.s.pReqs)
102 {
103 /*
104 * Service execute in EMT request.
105 */
106 rc = VMR3ReqProcessU(pUVM, VMREQDEST_ANY);
107 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
108 }
109 else
110 {
111 /*
112 * Nothing important is pending, so wait for something.
113 */
114 rc = VMR3WaitU(pUVM);
115 if (VBOX_FAILURE(rc))
116 break;
117 }
118 }
119 else
120 {
121
122 /*
123 * Pending requests which needs servicing?
124 *
125 * We check for state changes in addition to status codes when
126 * servicing requests. (Look after the ifs.)
127 */
128 PVM pVM = pUVM->pVM;
129 enmBefore = pVM->enmVMState;
130 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
131 || pUVM->vm.s.fTerminateEMT)
132 {
133 rc = VINF_EM_TERMINATE;
134 break;
135 }
136 if (pUVM->vm.s.pReqs)
137 {
138 /*
139 * Service execute in EMT request.
140 */
141 rc = VMR3ReqProcessU(pUVM, VMREQDEST_ANY);
142 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
143 }
144 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
145 {
146 /*
147 * Service the debugger request.
148 */
149 rc = DBGFR3VMMForcedAction(pVM);
150 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
151 }
152 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
153 {
154 /*
155 * Service a delayed reset request.
156 */
157 rc = VMR3Reset(pVM);
158 VM_FF_CLEAR(pVM, VM_FF_RESET);
159 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
160 }
161 else
162 {
163 /*
164 * Nothing important is pending, so wait for something.
165 */
166 rc = VMR3WaitU(pUVM);
167 if (VBOX_FAILURE(rc))
168 break;
169 }
170
171 /*
172 * Check for termination requests, these have extremely high priority.
173 */
174 if ( rc == VINF_EM_TERMINATE
175 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
176 || pUVM->vm.s.fTerminateEMT)
177 break;
178 }
179
180 /*
181 * Some requests (both VMR3Req* and the DBGF) can potentially
182 * resume or start the VM, in that case we'll get a change in
183 * VM status indicating that we're now running.
184 */
185 if ( VBOX_SUCCESS(rc)
186 && pUVM->pVM
187 && enmBefore != pUVM->pVM->enmVMState
188 && pUVM->pVM->enmVMState == VMSTATE_RUNNING)
189 {
190 PVM pVM = pUVM->pVM;
191 rc = EMR3ExecuteVM(pVM);
192 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Vrc, enmVMState=%d\n", rc, pVM->enmVMState));
193 if ( EMGetState(pVM) == EMSTATE_GURU_MEDITATION
194 && pVM->enmVMState == VMSTATE_RUNNING)
195 vmR3SetState(pVM, VMSTATE_GURU_MEDITATION);
196 }
197
198 } /* forever */
199
200
201 /*
202 * Exiting.
203 */
204 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Vrc enmBefore=%d enmVMState=%d\n",
205 ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
206 if (pUVM->vm.s.fEMTDoesTheCleanup)
207 {
208 Log(("vmR3EmulationThread: executing delayed Destroy\n"));
209 Assert(pUVM->pVM);
210 vmR3Destroy(pUVM->pVM);
211 vmR3DestroyFinalBitFromEMT(pUVM);
212 }
213 else
214 {
215 vmR3DestroyFinalBitFromEMT(pUVM);
216
217 pUVM->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
218 }
219 Log(("vmR3EmulationThread: EMT is terminated.\n"));
220 return rc;
221}
222
223
224/**
225 * Wait for VM to be resumed. Handle events like vmR3EmulationThread does.
226 * In case the VM is stopped, clean up and long jump to the main EMT loop.
227 *
228 * @returns VINF_SUCCESS or doesn't return
229 * @param pVM VM handle.
230 */
231VMMR3DECL(int) VMR3WaitForResume(PVM pVM)
232{
233 /*
234 * The request loop.
235 */
236 PUVMCPU pUVMCPU;
237 PUVM pUVM = pVM->pUVM;
238 VMSTATE enmBefore;
239 int rc;
240
241 pUVMCPU = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
242 AssertReturn(pUVMCPU, VERR_INTERNAL_ERROR);
243
244 for (;;)
245 {
246
247 /*
248 * Pending requests which needs servicing?
249 *
250 * We check for state changes in addition to status codes when
251 * servicing requests. (Look after the ifs.)
252 */
253 enmBefore = pVM->enmVMState;
254 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
255 || pUVM->vm.s.fTerminateEMT)
256 {
257 rc = VINF_EM_TERMINATE;
258 break;
259 }
260 else if (pUVM->vm.s.pReqs)
261 {
262 /*
263 * Service execute in EMT request.
264 */
265 rc = VMR3ReqProcessU(pUVM, VMREQDEST_ANY);
266 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
267 }
268 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
269 {
270 /*
271 * Service the debugger request.
272 */
273 rc = DBGFR3VMMForcedAction(pVM);
274 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
275 }
276 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
277 {
278 /*
279 * Service a delay reset request.
280 */
281 rc = VMR3Reset(pVM);
282 VM_FF_CLEAR(pVM, VM_FF_RESET);
283 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
284 }
285 else
286 {
287 /*
288 * Nothing important is pending, so wait for something.
289 */
290 rc = VMR3WaitU(pUVM);
291 if (VBOX_FAILURE(rc))
292 break;
293 }
294
295 /*
296 * Check for termination requests, these are extremely high priority.
297 */
298 if ( rc == VINF_EM_TERMINATE
299 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
300 || pUVM->vm.s.fTerminateEMT)
301 break;
302
303 /*
304 * Some requests (both VMR3Req* and the DBGF) can potentially
305 * resume or start the VM, in that case we'll get a change in
306 * VM status indicating that we're now running.
307 */
308 if ( VBOX_SUCCESS(rc)
309 && enmBefore != pVM->enmVMState
310 && pVM->enmVMState == VMSTATE_RUNNING)
311 {
312 /* Only valid exit reason. */
313 return VINF_SUCCESS;
314 }
315
316 } /* forever */
317
318 /* Return to the main loop in vmR3EmulationThread, which will clean up for us. */
319 longjmp(pUVMCPU->vm.s.emtJumpEnv, 1);
320}
321
322
323/**
324 * Gets the name of a halt method.
325 *
326 * @returns Pointer to a read only string.
327 * @param enmMethod The method.
328 */
329static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
330{
331 switch (enmMethod)
332 {
333 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
334 case VMHALTMETHOD_DEFAULT: return "default";
335 case VMHALTMETHOD_OLD: return "old";
336 case VMHALTMETHOD_1: return "method1";
337 //case VMHALTMETHOD_2: return "method2";
338 case VMHALTMETHOD_GLOBAL_1: return "global1";
339 default: return "unknown";
340 }
341}
342
343
344/**
345 * The old halt loop.
346 *
347 * @param pUVM Pointer to the user mode VM structure.
348 */
349static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVM pUVM, const uint32_t fMask, uint64_t /* u64Now*/)
350{
351 /*
352 * Halt loop.
353 */
354 PVM pVM = pUVM->pVM;
355 int rc = VINF_SUCCESS;
356 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
357 //unsigned cLoops = 0;
358 for (;;)
359 {
360 /*
361 * Work the timers and check if we can exit.
362 * The poll call gives us the ticks left to the next event in
363 * addition to perhaps set an FF.
364 */
365 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
366 PDMR3Poll(pVM);
367 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
368 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
369 TMR3TimerQueuesDo(pVM);
370 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
371 if (VM_FF_ISPENDING(pVM, fMask))
372 break;
373 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
374 if (VM_FF_ISPENDING(pVM, fMask))
375 break;
376
377 /*
378 * Wait for a while. Someone will wake us up or interrupt the call if
379 * anything needs our attention.
380 */
381 if (u64NanoTS < 50000)
382 {
383 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
384 /* spin */;
385 }
386 else
387 {
388 VMMR3YieldStop(pVM);
389 //uint64_t u64Start = RTTimeNanoTS();
390 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
391 {
392 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
393 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, a);
394 RTThreadYield(); /* this is the best we can do here */
395 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, a);
396 }
397 else if (u64NanoTS < 2000000)
398 {
399 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
400 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
401 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1);
402 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
403 }
404 else
405 {
406 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
407 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
408 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
409 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
410 }
411 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
412 //RTLogPrintf(" -> rc=%Vrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
413 }
414 if (rc == VERR_TIMEOUT)
415 rc = VINF_SUCCESS;
416 else if (VBOX_FAILURE(rc))
417 {
418 AssertRC(rc != VERR_INTERRUPTED);
419 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
420 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
421 VM_FF_SET(pVM, VM_FF_TERMINATE);
422 rc = VERR_INTERNAL_ERROR;
423 break;
424 }
425 }
426
427 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
428 return rc;
429}
430
431
432/**
433 * Initialize the configuration of halt method 1 & 2.
434 *
435 * @return VBox status code. Failure on invalid CFGM data.
436 * @param pVM The VM handle.
437 */
438static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
439{
440 /*
441 * The defaults.
442 */
443#if 1 /* DEBUGGING STUFF - REMOVE LATER */
444 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
445 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
446 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
447 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
448 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
449#else
450 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
451 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
452 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
453 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
454 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
455#endif
456
457 /*
458 * Query overrides.
459 *
460 * I don't have time to bother with niceities such as invalid value checks
461 * here right now. sorry.
462 */
463 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
464 if (pCfg)
465 {
466 uint32_t u32;
467 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
468 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
469 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
470 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
471 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
472 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
473 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
474 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
475 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
476 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
477 LogRel(("HaltedMethod1 config: %d/%d/%d/%d/%d\n",
478 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
479 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
480 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
481 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
482 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
483 }
484
485 return VINF_SUCCESS;
486}
487
488
489/**
490 * Initialize halt method 1.
491 *
492 * @return VBox status code.
493 * @param pUVM Pointer to the user mode VM structure.
494 */
495static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
496{
497 return vmR3HaltMethod12ReadConfigU(pUVM);
498}
499
500
501/**
502 * Method 1 - Block whenever possible, and when lagging behind
503 * switch to spinning for 10-30ms with occational blocking until
504 * the lag has been eliminated.
505 */
506static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
507{
508 PVM pVM = pUVM->pVM;
509
510 /*
511 * To simplify things, we decide up-front whether we should switch to spinning or
512 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
513 * and that it will generate interrupts or other events that will cause us to exit
514 * the halt loop.
515 */
516 bool fBlockOnce = false;
517 bool fSpinning = false;
518 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
519 if (u32CatchUpPct /* non-zero if catching up */)
520 {
521 if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
522 {
523 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
524 if (fSpinning)
525 {
526 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
527 fBlockOnce = u64Now - pUVM->vm.s.Halt.Method12.u64LastBlockTS
528 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
529 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
530 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
531 }
532 else
533 {
534 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
535 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
536 }
537 }
538 else
539 {
540 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
541 if (fSpinning)
542 pUVM->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
543 }
544 }
545 else if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
546 {
547 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
548 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
549 }
550
551 /*
552 * Halt loop.
553 */
554 int rc = VINF_SUCCESS;
555 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
556 unsigned cLoops = 0;
557 for (;; cLoops++)
558 {
559 /*
560 * Work the timers and check if we can exit.
561 */
562 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
563 PDMR3Poll(pVM);
564 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
565 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
566 TMR3TimerQueuesDo(pVM);
567 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
568 if (VM_FF_ISPENDING(pVM, fMask))
569 break;
570
571 /*
572 * Estimate time left to the next event.
573 */
574 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
575 if (VM_FF_ISPENDING(pVM, fMask))
576 break;
577
578 /*
579 * Block if we're not spinning and the interval isn't all that small.
580 */
581 if ( ( !fSpinning
582 || fBlockOnce)
583#if 1 /* DEBUGGING STUFF - REMOVE LATER */
584 && u64NanoTS >= 100000) /* 0.100 ms */
585#else
586 && u64NanoTS >= 250000) /* 0.250 ms */
587#endif
588 {
589 const uint64_t Start = pUVM->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
590 VMMR3YieldStop(pVM);
591
592 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
593 if (cMilliSecs <= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
594 cMilliSecs = 1;
595 else
596 cMilliSecs -= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
597 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
598 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
599 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, cMilliSecs);
600 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
601 if (rc == VERR_TIMEOUT)
602 rc = VINF_SUCCESS;
603 else if (VBOX_FAILURE(rc))
604 {
605 AssertRC(rc != VERR_INTERRUPTED);
606 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
607 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
608 VM_FF_SET(pVM, VM_FF_TERMINATE);
609 rc = VERR_INTERNAL_ERROR;
610 break;
611 }
612
613 /*
614 * Calc the statistics.
615 * Update averages every 16th time, and flush parts of the history every 64th time.
616 */
617 const uint64_t Elapsed = RTTimeNanoTS() - Start;
618 pUVM->vm.s.Halt.Method12.cNSBlocked += Elapsed;
619 if (Elapsed > u64NanoTS)
620 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
621 pUVM->vm.s.Halt.Method12.cBlocks++;
622 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0xf))
623 {
624 pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVM->vm.s.Halt.Method12.cNSBlockedTooLong / pUVM->vm.s.Halt.Method12.cBlocks;
625 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0x3f))
626 {
627 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong = pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
628 pUVM->vm.s.Halt.Method12.cBlocks = 0x40;
629 }
630 }
631 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
632
633 /*
634 * Clear the block once flag if we actually blocked.
635 */
636 if ( fBlockOnce
637 && Elapsed > 100000 /* 0.1 ms */)
638 fBlockOnce = false;
639 }
640 }
641 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
642
643 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
644 return rc;
645}
646
647
648/**
649 * Initialize the global 1 halt method.
650 *
651 * @return VBox status code.
652 * @param pUVM Pointer to the user mode VM structure.
653 */
654static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
655{
656 return VINF_SUCCESS;
657}
658
659
660/**
661 * The global 1 halt method - Block in GMM (ring-0) and let it
662 * try take care of the global scheduling of EMT threads.
663 */
664static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
665{
666 PVM pVM = pUVM->pVM;
667
668 /*
669 * Halt loop.
670 */
671 int rc = VINF_SUCCESS;
672 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
673 unsigned cLoops = 0;
674 for (;; cLoops++)
675 {
676 /*
677 * Work the timers and check if we can exit.
678 */
679 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
680 PDMR3Poll(pVM);
681 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
682 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
683 TMR3TimerQueuesDo(pVM);
684 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
685 if (VM_FF_ISPENDING(pVM, fMask))
686 break;
687
688 /*
689 * Estimate time left to the next event.
690 */
691 uint64_t u64Delta;
692 uint64_t u64GipTime = TMTimerPollGIP(pVM, &u64Delta);
693 if (VM_FF_ISPENDING(pVM, fMask))
694 break;
695
696 /*
697 * Block if we're not spinning and the interval isn't all that small.
698 */
699 if (u64Delta > 50000 /* 0.050ms */)
700 {
701 VMMR3YieldStop(pVM);
702 if (VM_FF_ISPENDING(pVM, fMask))
703 break;
704
705 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
706 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, c);
707 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
708 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, c);
709 if (rc == VERR_INTERRUPTED)
710 rc = VINF_SUCCESS;
711 else if (VBOX_FAILURE(rc))
712 {
713 AssertMsgFailed(("VMMR0_DO_GVMM_SCHED_HALT->%Vrc\n", rc));
714 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
715 VM_FF_SET(pVM, VM_FF_TERMINATE);
716 rc = VERR_INTERNAL_ERROR;
717 break;
718 }
719 }
720 /*
721 * When spinning call upon the GVMM and do some wakups once
722 * in a while, it's not like we're actually busy or anything.
723 */
724 else if (!(cLoops & 0x1fff))
725 {
726 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, d);
727 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
728 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, d);
729 }
730 }
731 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
732
733 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
734 return rc;
735}
736
737
738/**
739 * The global 1 halt method - VMR3Wait() worker.
740 *
741 * @returns VBox status code.
742 * @param pUVM Pointer to the user mode VM structure.
743 */
744static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVM pUVM)
745{
746 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
747
748 PVM pVM = pUVM->pVM;
749 int rc = VINF_SUCCESS;
750 for (;;)
751 {
752 /*
753 * Check Relevant FFs.
754 */
755 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
756 break;
757
758 /*
759 * Wait for a while. Someone will wake us up or interrupt the call if
760 * anything needs our attention.
761 */
762 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
763 if (rc == VERR_INTERRUPTED)
764 rc = VINF_SUCCESS;
765 else if (VBOX_FAILURE(rc))
766 {
767 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
768 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
769 VM_FF_SET(pVM, VM_FF_TERMINATE);
770 rc = VERR_INTERNAL_ERROR;
771 break;
772 }
773
774 }
775
776 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
777 return rc;
778}
779
780
781/**
782 * The global 1 halt method - VMR3NotifyFF() worker.
783 *
784 * @param pUVM Pointer to the user mode VM structure.
785 * @param fNotifiedREM See VMR3NotifyFF().
786 */
787static DECLCALLBACK(void) vmR3HaltGlobal1NotifyFF(PUVM pUVM, bool fNotifiedREM)
788{
789 if (pUVM->vm.s.fWait)
790 {
791 int rc = SUPCallVMMR0Ex(pUVM->pVM->pVMR0, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
792 AssertRC(rc);
793 }
794 else if (!fNotifiedREM)
795 REMR3NotifyFF(pUVM->pVM);
796}
797
798
799/**
800 * Bootstrap VMR3Wait() worker.
801 *
802 * @returns VBox status code.
803 * @param pUVM Pointer to the user mode VM structure.
804 */
805static DECLCALLBACK(int) vmR3BootstrapWait(PUVM pUVM)
806{
807 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
808
809 int rc = VINF_SUCCESS;
810 for (;;)
811 {
812 /*
813 * Check Relevant FFs.
814 */
815 if (pUVM->vm.s.pReqs)
816 break;
817 if ( pUVM->pVM
818 && VM_FF_ISPENDING(pUVM->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
819 break;
820 if (pUVM->vm.s.fTerminateEMT)
821 break;
822
823 /*
824 * Wait for a while. Someone will wake us up or interrupt the call if
825 * anything needs our attention.
826 */
827 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
828 if (rc == VERR_TIMEOUT)
829 rc = VINF_SUCCESS;
830 else if (VBOX_FAILURE(rc))
831 {
832 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
833 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
834 if (pUVM->pVM)
835 VM_FF_SET(pUVM->pVM, VM_FF_TERMINATE);
836 rc = VERR_INTERNAL_ERROR;
837 break;
838 }
839
840 }
841
842 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
843 return rc;
844}
845
846
847/**
848 * Bootstrap VMR3NotifyFF() worker.
849 *
850 * @param pUVM Pointer to the user mode VM structure.
851 * @param fNotifiedREM See VMR3NotifyFF().
852 */
853static DECLCALLBACK(void) vmR3BootstrapNotifyFF(PUVM pUVM, bool fNotifiedREM)
854{
855 if (pUVM->vm.s.fWait)
856 {
857 int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
858 AssertRC(rc);
859 }
860}
861
862
863/**
864 * Default VMR3Wait() worker.
865 *
866 * @returns VBox status code.
867 * @param pUVM Pointer to the user mode VM structure.
868 */
869static DECLCALLBACK(int) vmR3DefaultWait(PUVM pUVM)
870{
871 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
872
873 PVM pVM = pUVM->pVM;
874 int rc = VINF_SUCCESS;
875 for (;;)
876 {
877 /*
878 * Check Relevant FFs.
879 */
880 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
881 break;
882
883 /*
884 * Wait for a while. Someone will wake us up or interrupt the call if
885 * anything needs our attention.
886 */
887 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
888 if (rc == VERR_TIMEOUT)
889 rc = VINF_SUCCESS;
890 else if (VBOX_FAILURE(rc))
891 {
892 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
893 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
894 VM_FF_SET(pVM, VM_FF_TERMINATE);
895 rc = VERR_INTERNAL_ERROR;
896 break;
897 }
898
899 }
900
901 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
902 return rc;
903}
904
905
906/**
907 * Default VMR3NotifyFF() worker.
908 *
909 * @param pUVM Pointer to the user mode VM structure.
910 * @param fNotifiedREM See VMR3NotifyFF().
911 */
912static DECLCALLBACK(void) vmR3DefaultNotifyFF(PUVM pUVM, bool fNotifiedREM)
913{
914 if (pUVM->vm.s.fWait)
915 {
916 int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
917 AssertRC(rc);
918 }
919 else if (!fNotifiedREM)
920 REMR3NotifyFF(pUVM->pVM);
921}
922
923
924/**
925 * Array with halt method descriptors.
926 * VMINT::iHaltMethod contains an index into this array.
927 */
928static const struct VMHALTMETHODDESC
929{
930 /** The halt method id. */
931 VMHALTMETHOD enmHaltMethod;
932 /** The init function for loading config and initialize variables. */
933 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
934 /** The term function. */
935 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
936 /** The halt function. */
937 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVM pUVM, const uint32_t fMask, uint64_t u64Now));
938 /** The wait function. */
939 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVM pUVM));
940 /** The notifyFF function. */
941 DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PUVM pUVM, bool fNotifiedREM));
942} g_aHaltMethods[] =
943{
944 { VMHALTMETHOD_BOOTSTRAP, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyFF },
945 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF },
946 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyFF },
947 //{ VMHALTMETHOD_2, vmR3HaltMethod2Init, vmR3HaltMethod2Term, vmR3HaltMethod2DoHalt, vmR3HaltMethod2Wait, vmR3HaltMethod2NotifyFF },
948 { VMHALTMETHOD_GLOBAL_1,vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyFF },
949};
950
951
952/**
953 * Notify the emulation thread (EMT) about pending Forced Action (FF).
954 *
955 * This function is called by thread other than EMT to make
956 * sure EMT wakes up and promptly service an FF request.
957 *
958 * @param pVM VM handle.
959 * @param fNotifiedREM Set if REM have already been notified. If clear the
960 * generic REMR3NotifyFF() method is called.
961 */
962VMMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM)
963{
964 LogFlow(("VMR3NotifyFF:\n"));
965 PUVM pUVM = pVM->pUVM;
966 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
967}
968
969
970/**
971 * Notify the emulation thread (EMT) about pending Forced Action (FF).
972 *
973 * This function is called by thread other than EMT to make
974 * sure EMT wakes up and promptly service an FF request.
975 *
976 * @param pUVM Pointer to the user mode VM structure.
977 * @param fNotifiedREM Set if REM have already been notified. If clear the
978 * generic REMR3NotifyFF() method is called.
979 */
980VMMR3DECL(void) VMR3NotifyFFU(PUVM pUVM, bool fNotifiedREM)
981{
982 LogFlow(("VMR3NotifyFF:\n"));
983 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
984}
985
986
987/**
988 * Halted VM Wait.
989 * Any external event will unblock the thread.
990 *
991 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
992 * case an appropriate status code is returned.
993 * @param pVM VM handle.
994 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
995 * @thread The emulation thread.
996 */
997VMMR3DECL(int) VMR3WaitHalted(PVM pVM, bool fIgnoreInterrupts)
998{
999 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
1000
1001 /*
1002 * Check Relevant FFs.
1003 */
1004 const uint32_t fMask = !fIgnoreInterrupts
1005 ? VM_FF_EXTERNAL_HALTED_MASK
1006 : VM_FF_EXTERNAL_HALTED_MASK & ~(VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC);
1007 if (VM_FF_ISPENDING(pVM, fMask))
1008 {
1009 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
1010 return VINF_SUCCESS;
1011 }
1012
1013 /*
1014 * The yielder is suspended while we're halting, while TM might have clock(s) running
1015 * only at certain times and need to be notified..
1016 */
1017 VMMR3YieldSuspend(pVM);
1018 TMNotifyStartOfHalt(pVM);
1019
1020 /*
1021 * Record halt averages for the last second.
1022 */
1023 PUVM pUVM = pVM->pUVM;
1024 uint64_t u64Now = RTTimeNanoTS();
1025 int64_t off = u64Now - pUVM->vm.s.u64HaltsStartTS;
1026 if (off > 1000000000)
1027 {
1028 if (off > _4G || !pUVM->vm.s.cHalts)
1029 {
1030 pUVM->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1031 pUVM->vm.s.HaltFrequency = 1;
1032 }
1033 else
1034 {
1035 pUVM->vm.s.HaltInterval = (uint32_t)off / pUVM->vm.s.cHalts;
1036 pUVM->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVM->vm.s.cHalts, 1000000000, (uint32_t)off);
1037 }
1038 pUVM->vm.s.u64HaltsStartTS = u64Now;
1039 pUVM->vm.s.cHalts = 0;
1040 }
1041 pUVM->vm.s.cHalts++;
1042
1043 /*
1044 * Do the halt.
1045 */
1046 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVM, fMask, u64Now);
1047
1048 /*
1049 * Notify TM and resume the yielder
1050 */
1051 TMNotifyEndOfHalt(pVM);
1052 VMMR3YieldResume(pVM);
1053
1054 LogFlow(("VMR3WaitHalted: returns %Vrc (FF %#x)\n", rc, pVM->fForcedActions));
1055 return rc;
1056}
1057
1058
1059/**
1060 * Suspended VM Wait.
1061 * Only a handful of forced actions will cause the function to
1062 * return to the caller.
1063 *
1064 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
1065 * case an appropriate status code is returned.
1066 * @param pUVM Pointer to the user mode VM structure.
1067 * @thread The emulation thread.
1068 */
1069VMMR3DECL(int) VMR3WaitU(PUVM pUVM)
1070{
1071 LogFlow(("VMR3WaitU:\n"));
1072
1073 /*
1074 * Check Relevant FFs.
1075 */
1076 PVM pVM = pUVM->pVM;
1077 if ( pVM
1078 && VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
1079 {
1080 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
1081 return VINF_SUCCESS;
1082 }
1083
1084 /*
1085 * Do waiting according to the halt method (so VMR3NotifyFF
1086 * doesn't have to special case anything).
1087 */
1088 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVM);
1089 LogFlow(("VMR3WaitU: returns %Vrc (FF %#x)\n", rc, pVM ? pVM->fForcedActions : 0));
1090 return rc;
1091}
1092
1093
1094/**
1095 * Changes the halt method.
1096 *
1097 * @returns VBox status code.
1098 * @param pUVM Pointer to the user mode VM structure.
1099 * @param enmHaltMethod The new halt method.
1100 * @thread EMT.
1101 */
1102int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1103{
1104 PVM pVM = pUVM->pVM; Assert(pVM);
1105 VM_ASSERT_EMT(pVM);
1106 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1107
1108 /*
1109 * Resolve default (can be overridden in the configuration).
1110 */
1111 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1112 {
1113 uint32_t u32;
1114 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1115 if (VBOX_SUCCESS(rc))
1116 {
1117 enmHaltMethod = (VMHALTMETHOD)u32;
1118 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1119 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1120 }
1121 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1122 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1123 else
1124 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1125 //enmHaltMethod = VMHALTMETHOD_1;
1126 //enmHaltMethod = VMHALTMETHOD_OLD;
1127 }
1128 LogRel(("VM: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1129
1130 /*
1131 * Find the descriptor.
1132 */
1133 unsigned i = 0;
1134 while ( i < RT_ELEMENTS(g_aHaltMethods)
1135 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1136 i++;
1137 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1138
1139 /*
1140 * Terminate the old one.
1141 */
1142 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1143 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1144 {
1145 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1146 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1147 }
1148
1149 /*
1150 * Init the new one.
1151 */
1152 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1153 if (g_aHaltMethods[i].pfnInit)
1154 {
1155 int rc = g_aHaltMethods[i].pfnInit(pUVM);
1156 AssertRCReturn(rc, rc);
1157 }
1158 pUVM->vm.s.enmHaltMethod = enmHaltMethod;
1159
1160 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1161 return VINF_SUCCESS;
1162}
1163
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette