VirtualBox

source: vbox/trunk/src/VBox/VMM/VMEmt.cpp@ 2833

Last change on this file since 2833 was 2830, checked in by vboxsync, 18 years ago

Halt loop - work in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 19.5 KB
Line 
1/* $Id: VMEmt.cpp 2830 2007-05-23 19:53:38Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/tm.h>
28#include <VBox/dbgf.h>
29#include <VBox/em.h>
30#include <VBox/pdm.h>
31#include <VBox/rem.h>
32#include "VMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <iprt/assert.h>
38#include <iprt/asm.h>
39#include <iprt/semaphore.h>
40#include <iprt/thread.h>
41#include <iprt/time.h>
42
43
44
45/**
46 * The emulation thread.
47 *
48 * @returns Thread exit code.
49 * @param ThreadSelf The handle to the executing thread.
50 * @param pvArgs Pointer to a VMEMULATIONTHREADARGS structure.
51 */
52DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
53{
54 PVMEMULATIONTHREADARGS pArgs = (PVMEMULATIONTHREADARGS)pvArgs;
55 AssertReleaseMsg(pArgs && pArgs->pVM, ("Invalid arguments to the emulation thread!\n"));
56
57 /*
58 * Init the native thread member.
59 */
60 PVM pVM = pArgs->pVM;
61 pVM->NativeThreadEMT = RTThreadGetNative(ThreadSelf);
62
63 /*
64 * The request loop.
65 */
66 VMSTATE enmBefore;
67 int rc;
68 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pVM=%p\n", ThreadSelf, pVM));
69 for (;;)
70 {
71 /* Requested to exit the EMT thread out of sync? (currently only VMR3WaitForResume) */
72 if (setjmp(pVM->vm.s.emtJumpEnv) != 0)
73 {
74 rc = VINF_SUCCESS;
75 break;
76 }
77
78 /*
79 * Pending requests which needs servicing?
80 *
81 * We check for state changes in addition to status codes when
82 * servicing requests. (Look after the ifs.)
83 */
84 enmBefore = pVM->enmVMState;
85 if (VM_FF_ISSET(pVM, VM_FF_TERMINATE))
86 {
87 rc = VINF_EM_TERMINATE;
88 break;
89 }
90 else if (pVM->vm.s.pReqs)
91 {
92 /*
93 * Service execute in EMT request.
94 */
95 rc = VMR3ReqProcess(pVM);
96 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
97 }
98 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
99 {
100 /*
101 * Service the debugger request.
102 */
103 rc = DBGFR3VMMForcedAction(pVM);
104 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
105 }
106 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
107 {
108 /*
109 * Service a delay reset request.
110 */
111 rc = VMR3Reset(pVM);
112 VM_FF_CLEAR(pVM, VM_FF_RESET);
113 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
114 }
115 else
116 {
117 /*
118 * Nothing important is pending, so wait for something.
119 */
120 rc = VMR3Wait(pVM);
121 if (VBOX_FAILURE(rc))
122 break;
123 }
124
125 /*
126 * Check for termination requests, these are extremely high priority.
127 */
128 if ( rc == VINF_EM_TERMINATE
129 || VM_FF_ISSET(pVM, VM_FF_TERMINATE))
130 break;
131
132 /*
133 * Some requests (both VMR3Req* and the DBGF) can potentially
134 * resume or start the VM, in that case we'll get a change in
135 * VM status indicating that we're now running.
136 */
137 if ( VBOX_SUCCESS(rc)
138 && enmBefore != pVM->enmVMState
139 && (pVM->enmVMState == VMSTATE_RUNNING))
140 {
141 rc = EMR3ExecuteVM(pVM);
142 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Vrc, enmVMState=%d\n", rc, pVM->enmVMState));
143 }
144
145 } /* forever */
146
147
148 /*
149 * Exiting.
150 */
151 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pVM=%p rc=%Vrc enmBefore=%d enmVMState=%d\n",
152 ThreadSelf, pVM, rc, enmBefore, pVM->enmVMState));
153 if (pVM->vm.s.fEMTDoesTheCleanup)
154 {
155 Log(("vmR3EmulationThread: executing delayed Destroy\n"));
156 vmR3Destroy(pVM);
157 vmR3DestroyFinalBit(pVM);
158 Log(("vmR3EmulationThread: EMT is terminated.\n"));
159 }
160 else
161 {
162 /* we don't reset ThreadEMT here because it's used in waiting. */
163 pVM->NativeThreadEMT = NIL_RTNATIVETHREAD;
164 }
165 return rc;
166}
167
168/**
169 * Wait for VM to be resumed. Handle events like vmR3EmulationThread does.
170 * In case the VM is stopped, clean up and long jump to the main EMT loop.
171 *
172 * @returns VINF_SUCCESS or doesn't return
173 * @param pVM VM handle.
174 */
175VMR3DECL(int) VMR3WaitForResume(PVM pVM)
176{
177 /*
178 * The request loop.
179 */
180 VMSTATE enmBefore;
181 int rc;
182 for (;;)
183 {
184
185 /*
186 * Pending requests which needs servicing?
187 *
188 * We check for state changes in addition to status codes when
189 * servicing requests. (Look after the ifs.)
190 */
191 enmBefore = pVM->enmVMState;
192 if (VM_FF_ISSET(pVM, VM_FF_TERMINATE))
193 {
194 rc = VINF_EM_TERMINATE;
195 break;
196 }
197 else if (pVM->vm.s.pReqs)
198 {
199 /*
200 * Service execute in EMT request.
201 */
202 rc = VMR3ReqProcess(pVM);
203 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
204 }
205 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
206 {
207 /*
208 * Service the debugger request.
209 */
210 rc = DBGFR3VMMForcedAction(pVM);
211 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
212 }
213 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
214 {
215 /*
216 * Service a delay reset request.
217 */
218 rc = VMR3Reset(pVM);
219 VM_FF_CLEAR(pVM, VM_FF_RESET);
220 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
221 }
222 else
223 {
224 /*
225 * Nothing important is pending, so wait for something.
226 */
227 rc = VMR3Wait(pVM);
228 if (VBOX_FAILURE(rc))
229 break;
230 }
231
232 /*
233 * Check for termination requests, these are extremely high priority.
234 */
235 if ( rc == VINF_EM_TERMINATE
236 || VM_FF_ISSET(pVM, VM_FF_TERMINATE))
237 break;
238
239 /*
240 * Some requests (both VMR3Req* and the DBGF) can potentially
241 * resume or start the VM, in that case we'll get a change in
242 * VM status indicating that we're now running.
243 */
244 if ( VBOX_SUCCESS(rc)
245 && enmBefore != pVM->enmVMState
246 && (pVM->enmVMState == VMSTATE_RUNNING))
247 {
248 /* Only valid exit reason. */
249 return VINF_SUCCESS;
250 }
251
252 } /* forever */
253
254 /* Return to the main loop in vmR3EmulationThread, which will clean up for us. */
255 longjmp(pVM->vm.s.emtJumpEnv, 1);
256}
257
258/**
259 * Notify the emulation thread (EMT) about pending Forced Action (FF).
260 *
261 * This function is called by thread other than EMT to make
262 * sure EMT wakes up and promptly service an FF request.
263 *
264 * @param pVM VM handle.
265 * @param fNotifiedREM Set if REM have already been notified. If clear the
266 * generic REMR3NotifyFF() method is called.
267 */
268VMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM)
269{
270 LogFlow(("VMR3NotifyFF:\n"));
271 if (pVM->vm.s.fWait)
272 {
273 int rc = RTSemEventSignal(pVM->vm.s.EventSemWait);
274 AssertRC(rc);
275 }
276 else if (!fNotifiedREM)
277 REMR3NotifyFF(pVM);
278}
279
280
281/**
282 * The old halt loop.
283 */
284DECLCALLBACK(int) vmR3WaitHaltedOld(PVM pVM, const uint32_t fMask)
285{
286 /*
287 * Halt loop.
288 */
289 int rc = VINF_SUCCESS;
290 ASMAtomicXchgU32(&pVM->vm.s.fWait, 1);
291 //unsigned cLoops = 0;
292 for (;;)
293 {
294 /*
295 * Work the timers and check if we can exit.
296 * The poll call gives us the ticks left to the next event in
297 * addition to perhaps set an FF.
298 */
299 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltPoll, a);
300 PDMR3Poll(pVM);
301 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltPoll, a);
302 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltTimers, b);
303 TMR3TimerQueuesDo(pVM);
304 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltTimers, b);
305 if (VM_FF_ISPENDING(pVM, fMask))
306 break;
307 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
308 if (VM_FF_ISPENDING(pVM, fMask))
309 break;
310
311 /*
312 * Wait for a while. Someone will wake us up or interrupt the call if
313 * anything needs our attention.
314 */
315 if (u64NanoTS < 50000)
316 {
317 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
318 /* spin */;
319 }
320 else
321 {
322 VMMR3YieldStop(pVM);
323 //uint64_t u64Start = RTTimeNanoTS();
324 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
325 {
326 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
327 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltYield, a);
328 RTThreadYield(); /* this is the best we can do here */
329 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltYield, a);
330 }
331 else if (u64NanoTS < 2000000)
332 {
333 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
334 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltBlock, a);
335 rc = RTSemEventWait(pVM->vm.s.EventSemWait, 1);
336 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltBlock, a);
337 }
338 else
339 {
340 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
341 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltBlock, a);
342 rc = RTSemEventWait(pVM->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
343 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltBlock, a);
344 }
345 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
346 //RTLogPrintf(" -> rc=%Vrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
347 }
348 if (rc == VERR_TIMEOUT)
349 rc = VINF_SUCCESS;
350 else if (VBOX_FAILURE(rc))
351 {
352 AssertRC(rc != VERR_INTERRUPTED);
353 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
354 VM_FF_SET(pVM, VM_FF_TERMINATE);
355 rc = VERR_INTERNAL_ERROR;
356 break;
357 }
358 }
359
360 return rc;
361}
362
363
364/**
365 * Method 1 - Block whenever possible, and when lagging behind
366 * switch to spinning for 10-30ms with occational blocking until
367 * the lag has been eliminated.
368 */
369DECLCALLBACK(int) vmR3WaitHaltedMethod1(PVM pVM, const uint32_t fMask, uint64_t u64Now)
370{
371 /*
372 * To simplify things, we decide up-front whether we should switch
373 * to spinning or not. This makes some assumptions about the cause
374 * of the spinning (PIT/RTC/PCNet) and that it will generate interrupts
375 * or other events that means we should exit the halt loop.
376 */
377 bool fBlockOnce = false;
378 bool fSpinning = false;
379 if (TMVirtualSyncGetCatchUpPct(pVM) /* non-zero if catching up */)
380 {
381 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
382 fSpinning = u64Lag > 25000000 /* 25ms */;
383 if (fSpinning)
384 {
385 if (!pVM->vm.s.Halt.Method12.u64StartSpinTS)
386 pVM->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
387 else if (u64Now - pVM->vm.s.Halt.Method12.u64LastBlockTS > 32000000 /* 32ms */)
388 fBlockOnce = true;
389 }
390 else if (pVM->vm.s.Halt.Method12.u64StartSpinTS)
391 pVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
392 }
393 else if (pVM->vm.s.Halt.Method12.u64StartSpinTS)
394 pVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
395
396 /*
397 * Halt loop.
398 */
399 int rc = VINF_SUCCESS;
400 ASMAtomicXchgU32(&pVM->vm.s.fWait, 1);
401 unsigned cLoops = 0;
402 for (;; cLoops++)
403 {
404 /*
405 * Work the timers and check if we can exit.
406 */
407 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltPoll, a);
408 PDMR3Poll(pVM);
409 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltPoll, a);
410 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltTimers, b);
411 TMR3TimerQueuesDo(pVM);
412 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltTimers, b);
413 if (VM_FF_ISPENDING(pVM, fMask))
414 break;
415
416 /*
417 * Estimate time left to the next event.
418 */
419 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
420 if (VM_FF_ISPENDING(pVM, fMask))
421 break;
422
423 /*
424 * Block if we're not spinning and the interval isn't all that small.
425 */
426 if ( ( !fSpinning
427 || fBlockOnce)
428 && u64NanoTS >= 250000) /* 0.250 ms */
429 {
430 const uint64_t Start = pVM->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
431 VMMR3YieldStop(pVM);
432
433 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
434 if (cMilliSecs <= pVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
435 cMilliSecs = 1;
436 else
437 cMilliSecs -= pVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
438//RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
439 STAM_REL_PROFILE_START(&pVM->vm.s.StatHaltBlock, a);
440 rc = RTSemEventWait(pVM->vm.s.EventSemWait, cMilliSecs);
441 STAM_REL_PROFILE_STOP(&pVM->vm.s.StatHaltBlock, a);
442 if (rc == VERR_TIMEOUT)
443 rc = VINF_SUCCESS;
444 else if (VBOX_FAILURE(rc))
445 {
446 AssertRC(rc != VERR_INTERRUPTED);
447 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
448 VM_FF_SET(pVM, VM_FF_TERMINATE);
449 rc = VERR_INTERNAL_ERROR;
450 break;
451 }
452
453 /*
454 * Calc the statistics.
455 * Update averages every 16th time, and flush parts of the history every 64th time.
456 */
457 const uint64_t Elapsed = RTTimeNanoTS() - Start;
458 pVM->vm.s.Halt.Method12.cNSBlocked += Elapsed;
459 if (Elapsed > u64NanoTS)
460 pVM->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
461 pVM->vm.s.Halt.Method12.cBlocks++;
462 if (!(pVM->vm.s.Halt.Method12.cBlocks & 0xf))
463 {
464 pVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pVM->vm.s.Halt.Method12.cNSBlockedTooLong / pVM->vm.s.Halt.Method12.cBlocks;
465 if (!(pVM->vm.s.Halt.Method12.cBlocks & 0x3f))
466 {
467 pVM->vm.s.Halt.Method12.cNSBlockedTooLong = pVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
468 pVM->vm.s.Halt.Method12.cBlocks = 0x40;
469 }
470 }
471//RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
472
473 /*
474 * Clear the block once flag if we actually blocked.
475 */
476 if ( fBlockOnce
477 && Elapsed > 100000 /* 0.1 ms */)
478 fBlockOnce = false;
479 }
480 }
481
482 return rc;
483}
484
485
486/**
487 * Halted VM Wait.
488 * Any external event will unblock the thread.
489 *
490 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
491 * case an appropriate status code is returned.
492 * @param pVM VM handle.
493 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
494 * @thread The emulation thread.
495 */
496VMR3DECL(int) VMR3WaitHalted(PVM pVM, bool fIgnoreInterrupts)
497{
498 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
499
500 /*
501 * Check Relevant FFs.
502 */
503 const uint32_t fMask = !fIgnoreInterrupts
504 ? VM_FF_EXTERNAL_HALTED_MASK
505 : VM_FF_EXTERNAL_HALTED_MASK & ~(VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC);
506 if (VM_FF_ISPENDING(pVM, fMask))
507 {
508 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
509 return VINF_SUCCESS;
510 }
511
512 /*
513 * The yielder is suspended while we're halting.
514 */
515 VMMR3YieldSuspend(pVM);
516
517 /*
518 * Record halt averages for the last second.
519 */
520 uint64_t u64Now = RTTimeNanoTS();
521 int64_t off = u64Now - pVM->vm.s.u64HaltsStartTS;
522 if (off > 1000000000)
523 {
524 if (off > _4G || !pVM->vm.s.cHalts)
525 {
526 pVM->vm.s.HaltInterval = 1000000000 /* 1 sec */;
527 pVM->vm.s.HaltFrequency = 1;
528 }
529 else
530 {
531 pVM->vm.s.HaltInterval = (uint32_t)off / pVM->vm.s.cHalts;
532 pVM->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pVM->vm.s.cHalts, 1000000000, (uint32_t)off);
533 }
534 pVM->vm.s.u64HaltsStartTS = u64Now;
535 pVM->vm.s.cHalts = 0;
536 }
537 pVM->vm.s.cHalts++;
538
539 /*
540 * Do the halt.
541 */
542#if 1
543 int rc = vmR3WaitHaltedOld(pVM, fMask);
544#elif 0 /* work in progress */
545 int rc = vmR3WaitHaltedMethod1(pVM, fMask, u64Now);
546#else
547# error "misconfigured halt"
548#endif
549
550 /*
551 * Resume the yielder and tell the world we're not blocking.
552 */
553 ASMAtomicXchgU32(&pVM->vm.s.fWait, 0);
554 VMMR3YieldResume(pVM);
555
556 LogFlow(("VMR3WaitHalted: returns %Vrc (FF %#x)\n", rc, pVM->fForcedActions));
557 return rc;
558}
559
560
561/**
562 * Suspended VM Wait.
563 * Only a handful of forced actions will cause the function to
564 * return to the caller.
565 *
566 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
567 * case an appropriate status code is returned.
568 * @param pVM VM handle.
569 * @thread The emulation thread.
570 */
571VMR3DECL(int) VMR3Wait(PVM pVM)
572{
573 LogFlow(("VMR3Wait:\n"));
574
575 /*
576 * Check Relevant FFs.
577 */
578 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
579 {
580 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
581 return VINF_SUCCESS;
582 }
583
584 int rc = VINF_SUCCESS;
585 ASMAtomicXchgU32(&pVM->vm.s.fWait, 1);
586 for (;;)
587 {
588 /*
589 * Check Relevant FFs.
590 */
591 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
592 break;
593
594 /*
595 * Wait for a while. Someone will wake us up or interrupt the call if
596 * anything needs our attention.
597 */
598 rc = RTSemEventWait(pVM->vm.s.EventSemWait, 1000);
599 if (rc == VERR_TIMEOUT)
600 rc = VINF_SUCCESS;
601 else if (VBOX_FAILURE(rc))
602 {
603 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
604 VM_FF_SET(pVM, VM_FF_TERMINATE);
605 rc = VERR_INTERNAL_ERROR;
606 break;
607 }
608
609 }
610 ASMAtomicXchgU32(&pVM->vm.s.fWait, 0);
611
612 LogFlow(("VMR3Wait: returns %Vrc (FF %#x)\n", rc, pVM->fForcedActions));
613 return rc;
614}
615
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette