VirtualBox

source: vbox/trunk/src/VBox/VMM/VMEmt.cpp@ 11945

Last change on this file since 11945 was 8795, checked in by vboxsync, 17 years ago

Fiex bad OFF -> GURU state transition.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.3 KB
Line 
1/* $Id: VMEmt.cpp 8795 2008-05-13 17:44:24Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/tm.h>
28#include <VBox/dbgf.h>
29#include <VBox/em.h>
30#include <VBox/pdmapi.h>
31#include <VBox/rem.h>
32#include "VMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/uvm.h>
35
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#include <iprt/semaphore.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43#include <iprt/time.h>
44
45
46
47
48/**
49 * The emulation thread.
50 *
51 * @returns Thread exit code.
52 * @param ThreadSelf The handle to the executing thread.
53 * @param pvArgs Pointer to the user mode VM structure (UVM).
54 */
55DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
56{
57 PUVM pUVM = (PUVM)pvArgs;
58 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
59 ("Invalid arguments to the emulation thread!\n"));
60
61 /*
62 * Init the native thread member.
63 */
64 pUVM->vm.s.NativeThreadEMT = RTThreadGetNative(ThreadSelf);
65
66 /*
67 * The request loop.
68 */
69 int rc = VINF_SUCCESS;
70 VMSTATE enmBefore = VMSTATE_CREATING;
71 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM));
72 for (;;)
73 {
74 /* Requested to exit the EMT thread out of sync? (currently only VMR3WaitForResume) */
75 if (setjmp(pUVM->vm.s.emtJumpEnv) != 0)
76 {
77 rc = VINF_SUCCESS;
78 break;
79 }
80
81 /*
82 * During early init there is no pVM, so make a special path
83 * for that to keep things clearly separate.
84 */
85 if (!pUVM->pVM)
86 {
87 /*
88 * Check for termination first.
89 */
90 if (pUVM->vm.s.fTerminateEMT)
91 {
92 rc = VINF_EM_TERMINATE;
93 break;
94 }
95 if (pUVM->vm.s.pReqs)
96 {
97 /*
98 * Service execute in EMT request.
99 */
100 rc = VMR3ReqProcessU(pUVM);
101 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
102 }
103 else
104 {
105 /*
106 * Nothing important is pending, so wait for something.
107 */
108 rc = VMR3WaitU(pUVM);
109 if (VBOX_FAILURE(rc))
110 break;
111 }
112 }
113 else
114 {
115
116 /*
117 * Pending requests which needs servicing?
118 *
119 * We check for state changes in addition to status codes when
120 * servicing requests. (Look after the ifs.)
121 */
122 PVM pVM = pUVM->pVM;
123 enmBefore = pVM->enmVMState;
124 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
125 || pUVM->vm.s.fTerminateEMT)
126 {
127 rc = VINF_EM_TERMINATE;
128 break;
129 }
130 if (pUVM->vm.s.pReqs)
131 {
132 /*
133 * Service execute in EMT request.
134 */
135 rc = VMR3ReqProcessU(pUVM);
136 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
137 }
138 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
139 {
140 /*
141 * Service the debugger request.
142 */
143 rc = DBGFR3VMMForcedAction(pVM);
144 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
145 }
146 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
147 {
148 /*
149 * Service a delayed reset request.
150 */
151 rc = VMR3Reset(pVM);
152 VM_FF_CLEAR(pVM, VM_FF_RESET);
153 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
154 }
155 else
156 {
157 /*
158 * Nothing important is pending, so wait for something.
159 */
160 rc = VMR3WaitU(pUVM);
161 if (VBOX_FAILURE(rc))
162 break;
163 }
164
165 /*
166 * Check for termination requests, these have extremely high priority.
167 */
168 if ( rc == VINF_EM_TERMINATE
169 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
170 || pUVM->vm.s.fTerminateEMT)
171 break;
172 }
173
174 /*
175 * Some requests (both VMR3Req* and the DBGF) can potentially
176 * resume or start the VM, in that case we'll get a change in
177 * VM status indicating that we're now running.
178 */
179 if ( VBOX_SUCCESS(rc)
180 && pUVM->pVM
181 && enmBefore != pUVM->pVM->enmVMState
182 && pUVM->pVM->enmVMState == VMSTATE_RUNNING)
183 {
184 PVM pVM = pUVM->pVM;
185 rc = EMR3ExecuteVM(pVM);
186 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Vrc, enmVMState=%d\n", rc, pVM->enmVMState));
187 if ( EMGetState(pVM) == EMSTATE_GURU_MEDITATION
188 && pVM->enmVMState == VMSTATE_RUNNING)
189 vmR3SetState(pVM, VMSTATE_GURU_MEDITATION);
190 }
191
192 } /* forever */
193
194
195 /*
196 * Exiting.
197 */
198 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Vrc enmBefore=%d enmVMState=%d\n",
199 ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
200 if (pUVM->vm.s.fEMTDoesTheCleanup)
201 {
202 Log(("vmR3EmulationThread: executing delayed Destroy\n"));
203 Assert(pUVM->pVM);
204 vmR3Destroy(pUVM->pVM);
205 vmR3DestroyFinalBitFromEMT(pUVM);
206 }
207 else
208 {
209 vmR3DestroyFinalBitFromEMT(pUVM);
210
211 /* we don't reset ThreadEMT here because it's used in waiting. */
212 pUVM->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
213 }
214 Log(("vmR3EmulationThread: EMT is terminated.\n"));
215 return rc;
216}
217
218
219/**
220 * Wait for VM to be resumed. Handle events like vmR3EmulationThread does.
221 * In case the VM is stopped, clean up and long jump to the main EMT loop.
222 *
223 * @returns VINF_SUCCESS or doesn't return
224 * @param pVM VM handle.
225 */
226VMR3DECL(int) VMR3WaitForResume(PVM pVM)
227{
228 /*
229 * The request loop.
230 */
231 PUVM pUVM = pVM->pUVM;
232 VMSTATE enmBefore;
233 int rc;
234 for (;;)
235 {
236
237 /*
238 * Pending requests which needs servicing?
239 *
240 * We check for state changes in addition to status codes when
241 * servicing requests. (Look after the ifs.)
242 */
243 enmBefore = pVM->enmVMState;
244 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
245 || pUVM->vm.s.fTerminateEMT)
246 {
247 rc = VINF_EM_TERMINATE;
248 break;
249 }
250 else if (pUVM->vm.s.pReqs)
251 {
252 /*
253 * Service execute in EMT request.
254 */
255 rc = VMR3ReqProcessU(pUVM);
256 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
257 }
258 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
259 {
260 /*
261 * Service the debugger request.
262 */
263 rc = DBGFR3VMMForcedAction(pVM);
264 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
265 }
266 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
267 {
268 /*
269 * Service a delay reset request.
270 */
271 rc = VMR3Reset(pVM);
272 VM_FF_CLEAR(pVM, VM_FF_RESET);
273 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
274 }
275 else
276 {
277 /*
278 * Nothing important is pending, so wait for something.
279 */
280 rc = VMR3WaitU(pUVM);
281 if (VBOX_FAILURE(rc))
282 break;
283 }
284
285 /*
286 * Check for termination requests, these are extremely high priority.
287 */
288 if ( rc == VINF_EM_TERMINATE
289 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
290 || pUVM->vm.s.fTerminateEMT)
291 break;
292
293 /*
294 * Some requests (both VMR3Req* and the DBGF) can potentially
295 * resume or start the VM, in that case we'll get a change in
296 * VM status indicating that we're now running.
297 */
298 if ( VBOX_SUCCESS(rc)
299 && enmBefore != pVM->enmVMState
300 && pVM->enmVMState == VMSTATE_RUNNING)
301 {
302 /* Only valid exit reason. */
303 return VINF_SUCCESS;
304 }
305
306 } /* forever */
307
308 /* Return to the main loop in vmR3EmulationThread, which will clean up for us. */
309 longjmp(pUVM->vm.s.emtJumpEnv, 1);
310}
311
312
313/**
314 * Gets the name of a halt method.
315 *
316 * @returns Pointer to a read only string.
317 * @param enmMethod The method.
318 */
319static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
320{
321 switch (enmMethod)
322 {
323 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
324 case VMHALTMETHOD_DEFAULT: return "default";
325 case VMHALTMETHOD_OLD: return "old";
326 case VMHALTMETHOD_1: return "method1";
327 //case VMHALTMETHOD_2: return "method2";
328 case VMHALTMETHOD_GLOBAL_1: return "global1";
329 default: return "unknown";
330 }
331}
332
333
334/**
335 * The old halt loop.
336 *
337 * @param pUVM Pointer to the user mode VM structure.
338 */
339static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVM pUVM, const uint32_t fMask, uint64_t /* u64Now*/)
340{
341 /*
342 * Halt loop.
343 */
344 PVM pVM = pUVM->pVM;
345 int rc = VINF_SUCCESS;
346 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
347 //unsigned cLoops = 0;
348 for (;;)
349 {
350 /*
351 * Work the timers and check if we can exit.
352 * The poll call gives us the ticks left to the next event in
353 * addition to perhaps set an FF.
354 */
355 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
356 PDMR3Poll(pVM);
357 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
358 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
359 TMR3TimerQueuesDo(pVM);
360 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
361 if (VM_FF_ISPENDING(pVM, fMask))
362 break;
363 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
364 if (VM_FF_ISPENDING(pVM, fMask))
365 break;
366
367 /*
368 * Wait for a while. Someone will wake us up or interrupt the call if
369 * anything needs our attention.
370 */
371 if (u64NanoTS < 50000)
372 {
373 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
374 /* spin */;
375 }
376 else
377 {
378 VMMR3YieldStop(pVM);
379 //uint64_t u64Start = RTTimeNanoTS();
380 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
381 {
382 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
383 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, a);
384 RTThreadYield(); /* this is the best we can do here */
385 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, a);
386 }
387 else if (u64NanoTS < 2000000)
388 {
389 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
390 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
391 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1);
392 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
393 }
394 else
395 {
396 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
397 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
398 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
399 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
400 }
401 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
402 //RTLogPrintf(" -> rc=%Vrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
403 }
404 if (rc == VERR_TIMEOUT)
405 rc = VINF_SUCCESS;
406 else if (VBOX_FAILURE(rc))
407 {
408 AssertRC(rc != VERR_INTERRUPTED);
409 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
410 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
411 VM_FF_SET(pVM, VM_FF_TERMINATE);
412 rc = VERR_INTERNAL_ERROR;
413 break;
414 }
415 }
416
417 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
418 return rc;
419}
420
421
422/**
423 * Initialize the configuration of halt method 1 & 2.
424 *
425 * @return VBox status code. Failure on invalid CFGM data.
426 * @param pVM The VM handle.
427 */
428static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
429{
430 /*
431 * The defaults.
432 */
433#if 1 /* DEBUGGING STUFF - REMOVE LATER */
434 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
435 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
436 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
437 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
438 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
439#else
440 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
441 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
442 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
443 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
444 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
445#endif
446
447 /*
448 * Query overrides.
449 *
450 * I don't have time to bother with niceities such as invalid value checks
451 * here right now. sorry.
452 */
453 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
454 if (pCfg)
455 {
456 uint32_t u32;
457 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
458 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
459 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
460 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
461 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
462 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
463 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
464 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
465 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
466 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
467 LogRel(("HaltedMethod1 config: %d/%d/%d/%d/%d\n",
468 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
469 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
470 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
471 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
472 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
473 }
474
475 return VINF_SUCCESS;
476}
477
478
479/**
480 * Initialize halt method 1.
481 *
482 * @return VBox status code.
483 * @param pUVM Pointer to the user mode VM structure.
484 */
485static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
486{
487 return vmR3HaltMethod12ReadConfigU(pUVM);
488}
489
490
491/**
492 * Method 1 - Block whenever possible, and when lagging behind
493 * switch to spinning for 10-30ms with occational blocking until
494 * the lag has been eliminated.
495 */
496static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
497{
498 PVM pVM = pUVM->pVM;
499
500 /*
501 * To simplify things, we decide up-front whether we should switch to spinning or
502 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
503 * and that it will generate interrupts or other events that will cause us to exit
504 * the halt loop.
505 */
506 bool fBlockOnce = false;
507 bool fSpinning = false;
508 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
509 if (u32CatchUpPct /* non-zero if catching up */)
510 {
511 if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
512 {
513 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
514 if (fSpinning)
515 {
516 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
517 fBlockOnce = u64Now - pUVM->vm.s.Halt.Method12.u64LastBlockTS
518 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
519 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
520 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
521 }
522 else
523 {
524 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
525 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
526 }
527 }
528 else
529 {
530 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
531 if (fSpinning)
532 pUVM->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
533 }
534 }
535 else if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
536 {
537 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
538 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
539 }
540
541 /*
542 * Halt loop.
543 */
544 int rc = VINF_SUCCESS;
545 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
546 unsigned cLoops = 0;
547 for (;; cLoops++)
548 {
549 /*
550 * Work the timers and check if we can exit.
551 */
552 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
553 PDMR3Poll(pVM);
554 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
555 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
556 TMR3TimerQueuesDo(pVM);
557 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
558 if (VM_FF_ISPENDING(pVM, fMask))
559 break;
560
561 /*
562 * Estimate time left to the next event.
563 */
564 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
565 if (VM_FF_ISPENDING(pVM, fMask))
566 break;
567
568 /*
569 * Block if we're not spinning and the interval isn't all that small.
570 */
571 if ( ( !fSpinning
572 || fBlockOnce)
573#if 1 /* DEBUGGING STUFF - REMOVE LATER */
574 && u64NanoTS >= 100000) /* 0.100 ms */
575#else
576 && u64NanoTS >= 250000) /* 0.250 ms */
577#endif
578 {
579 const uint64_t Start = pUVM->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
580 VMMR3YieldStop(pVM);
581
582 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
583 if (cMilliSecs <= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
584 cMilliSecs = 1;
585 else
586 cMilliSecs -= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
587 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
588 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
589 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, cMilliSecs);
590 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
591 if (rc == VERR_TIMEOUT)
592 rc = VINF_SUCCESS;
593 else if (VBOX_FAILURE(rc))
594 {
595 AssertRC(rc != VERR_INTERRUPTED);
596 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
597 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
598 VM_FF_SET(pVM, VM_FF_TERMINATE);
599 rc = VERR_INTERNAL_ERROR;
600 break;
601 }
602
603 /*
604 * Calc the statistics.
605 * Update averages every 16th time, and flush parts of the history every 64th time.
606 */
607 const uint64_t Elapsed = RTTimeNanoTS() - Start;
608 pUVM->vm.s.Halt.Method12.cNSBlocked += Elapsed;
609 if (Elapsed > u64NanoTS)
610 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
611 pUVM->vm.s.Halt.Method12.cBlocks++;
612 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0xf))
613 {
614 pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVM->vm.s.Halt.Method12.cNSBlockedTooLong / pUVM->vm.s.Halt.Method12.cBlocks;
615 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0x3f))
616 {
617 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong = pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
618 pUVM->vm.s.Halt.Method12.cBlocks = 0x40;
619 }
620 }
621 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
622
623 /*
624 * Clear the block once flag if we actually blocked.
625 */
626 if ( fBlockOnce
627 && Elapsed > 100000 /* 0.1 ms */)
628 fBlockOnce = false;
629 }
630 }
631 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
632
633 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
634 return rc;
635}
636
637
638/**
639 * Initialize the global 1 halt method.
640 *
641 * @return VBox status code.
642 * @param pUVM Pointer to the user mode VM structure.
643 */
644static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
645{
646 return VINF_SUCCESS;
647}
648
649
650/**
651 * The global 1 halt method - Block in GMM (ring-0) and let it
652 * try take care of the global scheduling of EMT threads.
653 */
654static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
655{
656 PVM pVM = pUVM->pVM;
657
658 /*
659 * Halt loop.
660 */
661 int rc = VINF_SUCCESS;
662 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
663 unsigned cLoops = 0;
664 for (;; cLoops++)
665 {
666 /*
667 * Work the timers and check if we can exit.
668 */
669 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
670 PDMR3Poll(pVM);
671 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
672 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
673 TMR3TimerQueuesDo(pVM);
674 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
675 if (VM_FF_ISPENDING(pVM, fMask))
676 break;
677
678 /*
679 * Estimate time left to the next event.
680 */
681 uint64_t u64Delta;
682 uint64_t u64GipTime = TMTimerPollGIP(pVM, &u64Delta);
683 if (VM_FF_ISPENDING(pVM, fMask))
684 break;
685
686 /*
687 * Block if we're not spinning and the interval isn't all that small.
688 */
689 if (u64Delta > 50000 /* 0.050ms */)
690 {
691 VMMR3YieldStop(pVM);
692 if (VM_FF_ISPENDING(pVM, fMask))
693 break;
694
695 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
696 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, c);
697 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
698 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, c);
699 if (rc == VERR_INTERRUPTED)
700 rc = VINF_SUCCESS;
701 else if (VBOX_FAILURE(rc))
702 {
703 AssertMsgFailed(("VMMR0_DO_GVMM_SCHED_HALT->%Vrc\n", rc));
704 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
705 VM_FF_SET(pVM, VM_FF_TERMINATE);
706 rc = VERR_INTERNAL_ERROR;
707 break;
708 }
709 }
710 /*
711 * When spinning call upon the GVMM and do some wakups once
712 * in a while, it's not like we're actually busy or anything.
713 */
714 else if (!(cLoops & 0x1fff))
715 {
716 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, d);
717 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
718 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, d);
719 }
720 }
721 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
722
723 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
724 return rc;
725}
726
727
728/**
729 * The global 1 halt method - VMR3Wait() worker.
730 *
731 * @returns VBox status code.
732 * @param pUVM Pointer to the user mode VM structure.
733 */
734static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVM pUVM)
735{
736 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
737
738 PVM pVM = pUVM->pVM;
739 int rc = VINF_SUCCESS;
740 for (;;)
741 {
742 /*
743 * Check Relevant FFs.
744 */
745 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
746 break;
747
748 /*
749 * Wait for a while. Someone will wake us up or interrupt the call if
750 * anything needs our attention.
751 */
752 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
753 if (rc == VERR_INTERRUPTED)
754 rc = VINF_SUCCESS;
755 else if (VBOX_FAILURE(rc))
756 {
757 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
758 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
759 VM_FF_SET(pVM, VM_FF_TERMINATE);
760 rc = VERR_INTERNAL_ERROR;
761 break;
762 }
763
764 }
765
766 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
767 return rc;
768}
769
770
771/**
772 * The global 1 halt method - VMR3NotifyFF() worker.
773 *
774 * @param pUVM Pointer to the user mode VM structure.
775 * @param fNotifiedREM See VMR3NotifyFF().
776 */
777static DECLCALLBACK(void) vmR3HaltGlobal1NotifyFF(PUVM pUVM, bool fNotifiedREM)
778{
779 if (pUVM->vm.s.fWait)
780 {
781 int rc = SUPCallVMMR0Ex(pUVM->pVM->pVMR0, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
782 AssertRC(rc);
783 }
784 else if (!fNotifiedREM)
785 REMR3NotifyFF(pUVM->pVM);
786}
787
788
789/**
790 * Bootstrap VMR3Wait() worker.
791 *
792 * @returns VBox status code.
793 * @param pUVM Pointer to the user mode VM structure.
794 */
795static DECLCALLBACK(int) vmR3BootstrapWait(PUVM pUVM)
796{
797 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
798
799 int rc = VINF_SUCCESS;
800 for (;;)
801 {
802 /*
803 * Check Relevant FFs.
804 */
805 if (pUVM->vm.s.pReqs)
806 break;
807 if ( pUVM->pVM
808 && VM_FF_ISPENDING(pUVM->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
809 break;
810 if (pUVM->vm.s.fTerminateEMT)
811 break;
812
813 /*
814 * Wait for a while. Someone will wake us up or interrupt the call if
815 * anything needs our attention.
816 */
817 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
818 if (rc == VERR_TIMEOUT)
819 rc = VINF_SUCCESS;
820 else if (VBOX_FAILURE(rc))
821 {
822 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
823 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
824 if (pUVM->pVM)
825 VM_FF_SET(pUVM->pVM, VM_FF_TERMINATE);
826 rc = VERR_INTERNAL_ERROR;
827 break;
828 }
829
830 }
831
832 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
833 return rc;
834}
835
836
837/**
838 * Bootstrap VMR3NotifyFF() worker.
839 *
840 * @param pUVM Pointer to the user mode VM structure.
841 * @param fNotifiedREM See VMR3NotifyFF().
842 */
843static DECLCALLBACK(void) vmR3BootstrapNotifyFF(PUVM pUVM, bool fNotifiedREM)
844{
845 if (pUVM->vm.s.fWait)
846 {
847 int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
848 AssertRC(rc);
849 }
850}
851
852
853
854/**
855 * Default VMR3Wait() worker.
856 *
857 * @returns VBox status code.
858 * @param pUVM Pointer to the user mode VM structure.
859 */
860static DECLCALLBACK(int) vmR3DefaultWait(PUVM pUVM)
861{
862 ASMAtomicWriteBool(&pUVM->vm.s.fWait, true);
863
864 PVM pVM = pUVM->pVM;
865 int rc = VINF_SUCCESS;
866 for (;;)
867 {
868 /*
869 * Check Relevant FFs.
870 */
871 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
872 break;
873
874 /*
875 * Wait for a while. Someone will wake us up or interrupt the call if
876 * anything needs our attention.
877 */
878 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
879 if (rc == VERR_TIMEOUT)
880 rc = VINF_SUCCESS;
881 else if (VBOX_FAILURE(rc))
882 {
883 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
884 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
885 VM_FF_SET(pVM, VM_FF_TERMINATE);
886 rc = VERR_INTERNAL_ERROR;
887 break;
888 }
889
890 }
891
892 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
893 return rc;
894}
895
896
897/**
898 * Default VMR3NotifyFF() worker.
899 *
900 * @param pUVM Pointer to the user mode VM structure.
901 * @param fNotifiedREM See VMR3NotifyFF().
902 */
903static DECLCALLBACK(void) vmR3DefaultNotifyFF(PUVM pUVM, bool fNotifiedREM)
904{
905 if (pUVM->vm.s.fWait)
906 {
907 int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
908 AssertRC(rc);
909 }
910 else if (!fNotifiedREM)
911 REMR3NotifyFF(pUVM->pVM);
912}
913
914
915/**
916 * Array with halt method descriptors.
917 * VMINT::iHaltMethod contains an index into this array.
918 */
919static const struct VMHALTMETHODDESC
920{
921 /** The halt method id. */
922 VMHALTMETHOD enmHaltMethod;
923 /** The init function for loading config and initialize variables. */
924 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
925 /** The term function. */
926 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
927 /** The halt function. */
928 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVM pUVM, const uint32_t fMask, uint64_t u64Now));
929 /** The wait function. */
930 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVM pUVM));
931 /** The notifyFF function. */
932 DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PUVM pUVM, bool fNotifiedREM));
933} g_aHaltMethods[] =
934{
935 { VMHALTMETHOD_BOOTSTRAP, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyFF },
936 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF },
937 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyFF },
938 //{ VMHALTMETHOD_2, vmR3HaltMethod2Init, vmR3HaltMethod2Term, vmR3HaltMethod2DoHalt, vmR3HaltMethod2Wait, vmR3HaltMethod2NotifyFF },
939 { VMHALTMETHOD_GLOBAL_1,vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyFF },
940};
941
942
943/**
944 * Notify the emulation thread (EMT) about pending Forced Action (FF).
945 *
946 * This function is called by thread other than EMT to make
947 * sure EMT wakes up and promptly service an FF request.
948 *
949 * @param pVM VM handle.
950 * @param fNotifiedREM Set if REM have already been notified. If clear the
951 * generic REMR3NotifyFF() method is called.
952 */
953VMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM)
954{
955 LogFlow(("VMR3NotifyFF:\n"));
956 PUVM pUVM = pVM->pUVM;
957 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
958}
959
960
961/**
962 * Notify the emulation thread (EMT) about pending Forced Action (FF).
963 *
964 * This function is called by thread other than EMT to make
965 * sure EMT wakes up and promptly service an FF request.
966 *
967 * @param pUVM Pointer to the user mode VM structure.
968 * @param fNotifiedREM Set if REM have already been notified. If clear the
969 * generic REMR3NotifyFF() method is called.
970 */
971VMR3DECL(void) VMR3NotifyFFU(PUVM pUVM, bool fNotifiedREM)
972{
973 LogFlow(("VMR3NotifyFF:\n"));
974 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
975}
976
977
978/**
979 * Halted VM Wait.
980 * Any external event will unblock the thread.
981 *
982 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
983 * case an appropriate status code is returned.
984 * @param pVM VM handle.
985 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
986 * @thread The emulation thread.
987 */
988VMR3DECL(int) VMR3WaitHalted(PVM pVM, bool fIgnoreInterrupts)
989{
990 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
991
992 /*
993 * Check Relevant FFs.
994 */
995 const uint32_t fMask = !fIgnoreInterrupts
996 ? VM_FF_EXTERNAL_HALTED_MASK
997 : VM_FF_EXTERNAL_HALTED_MASK & ~(VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC);
998 if (VM_FF_ISPENDING(pVM, fMask))
999 {
1000 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
1001 return VINF_SUCCESS;
1002 }
1003
1004 /*
1005 * The yielder is suspended while we're halting.
1006 */
1007 VMMR3YieldSuspend(pVM);
1008
1009 /*
1010 * Record halt averages for the last second.
1011 */
1012 PUVM pUVM = pVM->pUVM;
1013 uint64_t u64Now = RTTimeNanoTS();
1014 int64_t off = u64Now - pUVM->vm.s.u64HaltsStartTS;
1015 if (off > 1000000000)
1016 {
1017 if (off > _4G || !pUVM->vm.s.cHalts)
1018 {
1019 pUVM->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1020 pUVM->vm.s.HaltFrequency = 1;
1021 }
1022 else
1023 {
1024 pUVM->vm.s.HaltInterval = (uint32_t)off / pUVM->vm.s.cHalts;
1025 pUVM->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVM->vm.s.cHalts, 1000000000, (uint32_t)off);
1026 }
1027 pUVM->vm.s.u64HaltsStartTS = u64Now;
1028 pUVM->vm.s.cHalts = 0;
1029 }
1030 pUVM->vm.s.cHalts++;
1031
1032 /*
1033 * Do the halt.
1034 */
1035 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVM, fMask, u64Now);
1036
1037 /*
1038 * Resume the yielder.
1039 */
1040 VMMR3YieldResume(pVM);
1041
1042 LogFlow(("VMR3WaitHalted: returns %Vrc (FF %#x)\n", rc, pVM->fForcedActions));
1043 return rc;
1044}
1045
1046
1047/**
1048 * Suspended VM Wait.
1049 * Only a handful of forced actions will cause the function to
1050 * return to the caller.
1051 *
1052 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
1053 * case an appropriate status code is returned.
1054 * @param pUVM Pointer to the user mode VM structure.
1055 * @thread The emulation thread.
1056 */
1057VMR3DECL(int) VMR3WaitU(PUVM pUVM)
1058{
1059 LogFlow(("VMR3WaitU:\n"));
1060
1061 /*
1062 * Check Relevant FFs.
1063 */
1064 PVM pVM = pUVM->pVM;
1065 if ( pVM
1066 && VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
1067 {
1068 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
1069 return VINF_SUCCESS;
1070 }
1071
1072 /*
1073 * Do waiting according to the halt method (so VMR3NotifyFF
1074 * doesn't have to special case anything).
1075 */
1076 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVM);
1077 LogFlow(("VMR3WaitU: returns %Vrc (FF %#x)\n", rc, pVM ? pVM->fForcedActions : 0));
1078 return rc;
1079}
1080
1081
1082/**
1083 * Changes the halt method.
1084 *
1085 * @returns VBox status code.
1086 * @param pUVM Pointer to the user mode VM structure.
1087 * @param enmHaltMethod The new halt method.
1088 * @thread EMT.
1089 */
1090int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1091{
1092 PVM pVM = pUVM->pVM; Assert(pVM);
1093 VM_ASSERT_EMT(pVM);
1094 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1095
1096 /*
1097 * Resolve default (can be overridden in the configuration).
1098 */
1099 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1100 {
1101 uint32_t u32;
1102 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1103 if (VBOX_SUCCESS(rc))
1104 {
1105 enmHaltMethod = (VMHALTMETHOD)u32;
1106 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1107 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1108 }
1109 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1110 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1111 else
1112 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1113 //enmHaltMethod = VMHALTMETHOD_1;
1114 //enmHaltMethod = VMHALTMETHOD_OLD;
1115 }
1116 LogRel(("VM: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1117
1118 /*
1119 * Find the descriptor.
1120 */
1121 unsigned i = 0;
1122 while ( i < RT_ELEMENTS(g_aHaltMethods)
1123 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1124 i++;
1125 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1126
1127 /*
1128 * Terminate the old one.
1129 */
1130 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1131 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1132 {
1133 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1134 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1135 }
1136
1137 /*
1138 * Init the new one.
1139 */
1140 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1141 if (g_aHaltMethods[i].pfnInit)
1142 {
1143 int rc = g_aHaltMethods[i].pfnInit(pUVM);
1144 AssertRCReturn(rc, rc);
1145 }
1146 pUVM->vm.s.enmHaltMethod = enmHaltMethod;
1147
1148 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1149 return VINF_SUCCESS;
1150}
1151
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette