VirtualBox

source: vbox/trunk/src/VBox/VMM/VMEmt.cpp@ 19686

Last change on this file since 19686 was 19682, checked in by vboxsync, 16 years ago

Try to cleanup after one VCPU goes into guru meditation mode. Release all owned locks so the other VCPUs will be unblocked.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.0 KB
Line 
1/* $Id: VMEmt.cpp 19682 2009-05-14 10:15:44Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/tm.h>
28#include <VBox/dbgf.h>
29#include <VBox/em.h>
30#include <VBox/pdmapi.h>
31#include <VBox/rem.h>
32#include <VBox/tm.h>
33#include "VMInternal.h"
34#include <VBox/vm.h>
35#include <VBox/uvm.h>
36#include <VBox/mm.h>
37#include <VBox/pgm.h>
38#include <VBox/iom.h>
39#include <VBox/pdm.h>
40
41#include <VBox/err.h>
42#include <VBox/log.h>
43#include <iprt/assert.h>
44#include <iprt/asm.h>
45#include <iprt/semaphore.h>
46#include <iprt/string.h>
47#include <iprt/thread.h>
48#include <iprt/time.h>
49
50
51/*******************************************************************************
52* Internal Functions *
53*******************************************************************************/
54int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu);
55
56
57/**
58 * The emulation thread main function.
59 *
60 * @returns Thread exit code.
61 * @param ThreadSelf The handle to the executing thread.
62 * @param pvArgs Pointer to the user mode per-VCpu structure (UVMPCU).
63 */
64DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
65{
66 PUVMCPU pUVCpu = (PUVMCPU)pvArgs;
67 return vmR3EmulationThreadWithId(ThreadSelf, pUVCpu, pUVCpu->idCpu);
68}
69
70
71/**
72 * The emulation thread main function, with Virtual CPU ID for debugging.
73 *
74 * @returns Thread exit code.
75 * @param ThreadSelf The handle to the executing thread.
76 * @param pUVCpu Pointer to the user mode per-VCpu structure.
77 * @param idCpu The virtual CPU ID, for backtrace purposes.
78 */
79int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu)
80{
81 PUVM pUVM = pUVCpu->pUVM;
82 int rc;
83
84 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
85 ("Invalid arguments to the emulation thread!\n"));
86
87 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu);
88 AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
89
90 /*
91 * The request loop.
92 */
93 rc = VINF_SUCCESS;
94 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM));
95 VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */
96 for (;;)
97 {
98 /*
99 * During early init there is no pVM, so make a special path
100 * for that to keep things clearly separate.
101 */
102 if (!pUVM->pVM)
103 {
104 /*
105 * Check for termination first.
106 */
107 if (pUVM->vm.s.fTerminateEMT)
108 {
109 rc = VINF_EM_TERMINATE;
110 break;
111 }
112
113 /*
114 * Only the first VCPU may initialize the VM during early init
115 * and must therefore service all VMCPUID_ANY requests.
116 * See also VMR3Create
117 */
118 if ( pUVM->vm.s.pReqs
119 && pUVCpu->idCpu == 0)
120 {
121 /*
122 * Service execute in any EMT request.
123 */
124 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY);
125 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
126 }
127 else if (pUVCpu->vm.s.pReqs)
128 {
129 /*
130 * Service execute in specific EMT request.
131 */
132 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu);
133 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
134 }
135 else
136 {
137 /*
138 * Nothing important is pending, so wait for something.
139 */
140 rc = VMR3WaitU(pUVCpu);
141 if (RT_FAILURE(rc))
142 break;
143 }
144 }
145 else
146 {
147 /*
148 * Pending requests which needs servicing?
149 *
150 * We check for state changes in addition to status codes when
151 * servicing requests. (Look after the ifs.)
152 */
153 PVM pVM = pUVM->pVM;
154 enmBefore = pVM->enmVMState;
155 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
156 || pUVM->vm.s.fTerminateEMT)
157 {
158 rc = VINF_EM_TERMINATE;
159 break;
160 }
161 if (pUVM->vm.s.pReqs)
162 {
163 /*
164 * Service execute in any EMT request.
165 */
166 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY);
167 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
168 }
169 else if (pUVCpu->vm.s.pReqs)
170 {
171 /*
172 * Service execute in specific EMT request.
173 */
174 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu);
175 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pVM->enmVMState));
176 }
177 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
178 {
179 /*
180 * Service the debugger request.
181 */
182 rc = DBGFR3VMMForcedAction(pVM);
183 Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
184 }
185 else if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET_BIT))
186 {
187 /*
188 * Service a delayed reset request.
189 */
190 rc = VMR3Reset(pVM);
191 VM_FF_CLEAR(pVM, VM_FF_RESET);
192 Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
193 }
194 else
195 {
196 /*
197 * Nothing important is pending, so wait for something.
198 */
199 rc = VMR3WaitU(pUVCpu);
200 if (RT_FAILURE(rc))
201 break;
202 }
203
204 /*
205 * Check for termination requests, these have extremely high priority.
206 */
207 if ( rc == VINF_EM_TERMINATE
208 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
209 || pUVM->vm.s.fTerminateEMT)
210 break;
211 }
212
213 /*
214 * Some requests (both VMR3Req* and the DBGF) can potentially resume
215 * or start the VM, in that case we'll get a change in VM status
216 * indicating that we're now running.
217 */
218 if ( RT_SUCCESS(rc)
219 && pUVM->pVM)
220 {
221 PVM pVM = pUVM->pVM;
222 PVMCPU pVCpu = &pVM->aCpus[idCpu];
223 if ( pVM->enmVMState == VMSTATE_RUNNING
224 && VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu)))
225 {
226 rc = EMR3ExecuteVM(pVM, pVCpu);
227 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState));
228 if ( EMGetState(pVCpu) == EMSTATE_GURU_MEDITATION
229 && pVM->enmVMState == VMSTATE_RUNNING)
230 {
231 /* Release owned locks to make sure other VCPUs can continue in case they were waiting for one. */
232 MMR3ReleaseOwnedLocks(pVM);
233 PGMR3ReleaseOwnedLocks(pVM);
234 PDMR3ReleaseOwnedLocks(pVM);
235 IOMR3ReleaseOwnedLocks(pVM);
236 EMR3ReleaseOwnedLocks(pVM);
237
238 vmR3SetState(pVM, VMSTATE_GURU_MEDITATION);
239 }
240 }
241 }
242
243 } /* forever */
244
245
246 /*
247 * Exiting.
248 */
249 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n",
250 ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
251 if (pUVM->vm.s.fEMTDoesTheCleanup)
252 {
253 Log(("vmR3EmulationThread: executing delayed Destroy\n"));
254 Assert(pUVM->pVM);
255 vmR3Destroy(pUVM->pVM);
256 vmR3DestroyFinalBitFromEMT(pUVM);
257 }
258 else
259 {
260 vmR3DestroyFinalBitFromEMT(pUVM);
261
262 pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
263 }
264 Log(("vmR3EmulationThread: EMT is terminated.\n"));
265 return rc;
266}
267
268
269/**
270 * Gets the name of a halt method.
271 *
272 * @returns Pointer to a read only string.
273 * @param enmMethod The method.
274 */
275static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
276{
277 switch (enmMethod)
278 {
279 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
280 case VMHALTMETHOD_DEFAULT: return "default";
281 case VMHALTMETHOD_OLD: return "old";
282 case VMHALTMETHOD_1: return "method1";
283 //case VMHALTMETHOD_2: return "method2";
284 case VMHALTMETHOD_GLOBAL_1: return "global1";
285 default: return "unknown";
286 }
287}
288
289
290/**
291 * The old halt loop.
292 */
293static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
294{
295 /*
296 * Halt loop.
297 */
298 PVM pVM = pUVCpu->pVM;
299 PVMCPU pVCpu = pUVCpu->pVCpu;
300
301 int rc = VINF_SUCCESS;
302 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
303 //unsigned cLoops = 0;
304 for (;;)
305 {
306 /*
307 * Work the timers and check if we can exit.
308 * The poll call gives us the ticks left to the next event in
309 * addition to perhaps set an FF.
310 */
311 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
312 TMR3TimerQueuesDo(pVM);
313 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
314 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
315 || VMCPU_FF_ISPENDING(pVCpu, fMask))
316 break;
317 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM, pVCpu));
318 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
319 || VMCPU_FF_ISPENDING(pVCpu, fMask))
320 break;
321
322 /*
323 * Wait for a while. Someone will wake us up or interrupt the call if
324 * anything needs our attention.
325 */
326 if (u64NanoTS < 50000)
327 {
328 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
329 /* spin */;
330 }
331 else
332 {
333 VMMR3YieldStop(pVM);
334 //uint64_t u64Start = RTTimeNanoTS();
335 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
336 {
337 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
338 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, a);
339 RTThreadYield(); /* this is the best we can do here */
340 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, a);
341 }
342 else if (u64NanoTS < 2000000)
343 {
344 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
345 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
346 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
347 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
348 }
349 else
350 {
351 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
352 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
353 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
354 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
355 }
356 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
357 //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
358 }
359 if (rc == VERR_TIMEOUT)
360 rc = VINF_SUCCESS;
361 else if (RT_FAILURE(rc))
362 {
363 AssertRC(rc != VERR_INTERRUPTED);
364 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
365 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
366 VM_FF_SET(pVM, VM_FF_TERMINATE);
367 rc = VERR_INTERNAL_ERROR;
368 break;
369 }
370 }
371
372 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
373 return rc;
374}
375
376
377/**
378 * Initialize the configuration of halt method 1 & 2.
379 *
380 * @return VBox status code. Failure on invalid CFGM data.
381 * @param pVM The VM handle.
382 */
383static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
384{
385 /*
386 * The defaults.
387 */
388#if 1 /* DEBUGGING STUFF - REMOVE LATER */
389 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
390 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
391 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
392 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
393 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
394#else
395 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
396 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
397 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
398 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
399 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
400#endif
401
402 /*
403 * Query overrides.
404 *
405 * I don't have time to bother with niceities such as invalid value checks
406 * here right now. sorry.
407 */
408 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
409 if (pCfg)
410 {
411 uint32_t u32;
412 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
413 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
414 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
415 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
416 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
417 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
418 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
419 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
420 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
421 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
422 LogRel(("HaltedMethod1 config: %d/%d/%d/%d/%d\n",
423 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
424 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
425 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
426 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
427 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
428 }
429
430 return VINF_SUCCESS;
431}
432
433
434/**
435 * Initialize halt method 1.
436 *
437 * @return VBox status code.
438 * @param pUVM Pointer to the user mode VM structure.
439 */
440static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
441{
442 return vmR3HaltMethod12ReadConfigU(pUVM);
443}
444
445
446/**
447 * Method 1 - Block whenever possible, and when lagging behind
448 * switch to spinning for 10-30ms with occational blocking until
449 * the lag has been eliminated.
450 */
451static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
452{
453 PUVM pUVM = pUVCpu->pUVM;
454 PVMCPU pVCpu = pUVCpu->pVCpu;
455 PVM pVM = pUVCpu->pVM;
456
457 /*
458 * To simplify things, we decide up-front whether we should switch to spinning or
459 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
460 * and that it will generate interrupts or other events that will cause us to exit
461 * the halt loop.
462 */
463 bool fBlockOnce = false;
464 bool fSpinning = false;
465 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
466 if (u32CatchUpPct /* non-zero if catching up */)
467 {
468 if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
469 {
470 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
471 if (fSpinning)
472 {
473 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
474 fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
475 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
476 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
477 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
478 }
479 else
480 {
481 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
482 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
483 }
484 }
485 else
486 {
487 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
488 if (fSpinning)
489 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
490 }
491 }
492 else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
493 {
494 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
495 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
496 }
497
498 /*
499 * Halt loop.
500 */
501 int rc = VINF_SUCCESS;
502 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
503 unsigned cLoops = 0;
504 for (;; cLoops++)
505 {
506 /*
507 * Work the timers and check if we can exit.
508 */
509 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
510 TMR3TimerQueuesDo(pVM);
511 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
512 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
513 || VMCPU_FF_ISPENDING(pVCpu, fMask))
514 break;
515
516 /*
517 * Estimate time left to the next event.
518 */
519 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM, pVCpu));
520 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
521 || VMCPU_FF_ISPENDING(pVCpu, fMask))
522 break;
523
524 /*
525 * Block if we're not spinning and the interval isn't all that small.
526 */
527 if ( ( !fSpinning
528 || fBlockOnce)
529#if 1 /* DEBUGGING STUFF - REMOVE LATER */
530 && u64NanoTS >= 100000) /* 0.100 ms */
531#else
532 && u64NanoTS >= 250000) /* 0.250 ms */
533#endif
534 {
535 const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
536 VMMR3YieldStop(pVM);
537
538 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
539 if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
540 cMilliSecs = 1;
541 else
542 cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
543 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
544 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
545 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
546 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
547 if (rc == VERR_TIMEOUT)
548 rc = VINF_SUCCESS;
549 else if (RT_FAILURE(rc))
550 {
551 AssertRC(rc != VERR_INTERRUPTED);
552 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
553 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
554 VM_FF_SET(pVM, VM_FF_TERMINATE);
555 rc = VERR_INTERNAL_ERROR;
556 break;
557 }
558
559 /*
560 * Calc the statistics.
561 * Update averages every 16th time, and flush parts of the history every 64th time.
562 */
563 const uint64_t Elapsed = RTTimeNanoTS() - Start;
564 pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
565 if (Elapsed > u64NanoTS)
566 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
567 pUVCpu->vm.s.Halt.Method12.cBlocks++;
568 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
569 {
570 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
571 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
572 {
573 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
574 pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
575 }
576 }
577 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
578
579 /*
580 * Clear the block once flag if we actually blocked.
581 */
582 if ( fBlockOnce
583 && Elapsed > 100000 /* 0.1 ms */)
584 fBlockOnce = false;
585 }
586 }
587 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
588
589 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
590 return rc;
591}
592
593
594/**
595 * Initialize the global 1 halt method.
596 *
597 * @return VBox status code.
598 * @param pUVM Pointer to the user mode VM structure.
599 */
600static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
601{
602 return VINF_SUCCESS;
603}
604
605
606/**
607 * The global 1 halt method - Block in GMM (ring-0) and let it
608 * try take care of the global scheduling of EMT threads.
609 */
610static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
611{
612 PUVM pUVM = pUVCpu->pUVM;
613 PVMCPU pVCpu = pUVCpu->pVCpu;
614 PVM pVM = pUVCpu->pVM;
615 Assert(VMMGetCpu(pVM) == pVCpu);
616
617 /*
618 * Halt loop.
619 */
620 int rc = VINF_SUCCESS;
621 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
622 unsigned cLoops = 0;
623 for (;; cLoops++)
624 {
625 /*
626 * Work the timers and check if we can exit.
627 */
628 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
629 TMR3TimerQueuesDo(pVM);
630 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
631 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
632 || VMCPU_FF_ISPENDING(pVCpu, fMask))
633 break;
634
635 /*
636 * Estimate time left to the next event.
637 */
638 uint64_t u64Delta;
639 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
640 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
641 || VMCPU_FF_ISPENDING(pVCpu, fMask))
642 break;
643
644 /*
645 * Block if we're not spinning and the interval isn't all that small.
646 */
647 if (u64Delta > 50000 /* 0.050ms */)
648 {
649 VMMR3YieldStop(pVM);
650 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
651 || VMCPU_FF_ISPENDING(pVCpu, fMask))
652 break;
653
654 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
655 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, c);
656 rc = SUPCallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
657 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, c);
658 if (rc == VERR_INTERRUPTED)
659 rc = VINF_SUCCESS;
660 else if (RT_FAILURE(rc))
661 {
662 AssertMsgFailed(("VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc));
663 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
664 VM_FF_SET(pVM, VM_FF_TERMINATE);
665 rc = VERR_INTERNAL_ERROR;
666 break;
667 }
668 }
669 /*
670 * When spinning call upon the GVMM and do some wakups once
671 * in a while, it's not like we're actually busy or anything.
672 */
673 else if (!(cLoops & 0x1fff))
674 {
675 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, d);
676 rc = SUPCallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
677 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, d);
678 }
679 }
680 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
681
682 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
683 return rc;
684}
685
686
687/**
688 * The global 1 halt method - VMR3Wait() worker.
689 *
690 * @returns VBox status code.
691 * @param pUVCpu Pointer to the user mode VMCPU structure.
692 */
693static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
694{
695 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
696
697 PVM pVM = pUVCpu->pUVM->pVM;
698 PVMCPU pVCpu = VMMGetCpu(pVM);
699 Assert(pVCpu->idCpu == pUVCpu->idCpu);
700
701 int rc = VINF_SUCCESS;
702 for (;;)
703 {
704 /*
705 * Check Relevant FFs.
706 */
707 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
708 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
709 break;
710
711 /*
712 * Wait for a while. Someone will wake us up or interrupt the call if
713 * anything needs our attention.
714 */
715 rc = SUPCallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
716 if (rc == VERR_INTERRUPTED)
717 rc = VINF_SUCCESS;
718 else if (RT_FAILURE(rc))
719 {
720 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
721 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
722 VM_FF_SET(pVM, VM_FF_TERMINATE);
723 rc = VERR_INTERNAL_ERROR;
724 break;
725 }
726
727 }
728
729 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
730 return rc;
731}
732
733
734/**
735 * The global 1 halt method - VMR3NotifyFF() worker.
736 *
737 * @param pUVCpu Pointer to the user mode VMCPU structure.
738 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
739 */
740static DECLCALLBACK(void) vmR3HaltGlobal1NotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
741{
742 if (pUVCpu->vm.s.fWait)
743 {
744 int rc = SUPCallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
745 AssertRC(rc);
746 }
747 else if ( ( (fFlags & VMNOTIFYFF_FLAGS_POKE)
748 || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
749 && pUVCpu->pVCpu)
750 {
751 VMCPUSTATE enmState = VMCPU_GET_STATE(pUVCpu->pVCpu);
752 if (enmState == VMCPUSTATE_STARTED_EXEC)
753 {
754 if (fFlags & VMNOTIFYFF_FLAGS_POKE)
755 {
756 int rc = SUPCallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POKE, 0, NULL);
757 AssertRC(rc);
758 }
759 }
760 else if (enmState == VMCPUSTATE_STARTED_EXEC_REM)
761 {
762 if (!(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
763 REMR3NotifyFF(pUVCpu->pVM);
764 }
765 }
766}
767
768
769/**
770 * Bootstrap VMR3Wait() worker.
771 *
772 * @returns VBox status code.
773 * @param pUVMCPU Pointer to the user mode VMCPU structure.
774 */
775static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
776{
777 PUVM pUVM = pUVCpu->pUVM;
778
779 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
780
781 int rc = VINF_SUCCESS;
782 for (;;)
783 {
784 /*
785 * Check Relevant FFs.
786 */
787 if (pUVM->vm.s.pReqs) /* global requests pending? */
788 break;
789 if (pUVCpu->vm.s.pReqs) /* local requests pending? */
790 break;
791
792 if ( pUVCpu->pVM
793 && ( VM_FF_ISPENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
794 || VMCPU_FF_ISPENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
795 )
796 )
797 break;
798 if (pUVCpu->vm.s.fTerminateEMT)
799 break;
800
801 /*
802 * Wait for a while. Someone will wake us up or interrupt the call if
803 * anything needs our attention.
804 */
805 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
806 if (rc == VERR_TIMEOUT)
807 rc = VINF_SUCCESS;
808 else if (RT_FAILURE(rc))
809 {
810 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
811 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
812 if (pUVCpu->pVM)
813 VM_FF_SET(pUVCpu->pVM, VM_FF_TERMINATE);
814 rc = VERR_INTERNAL_ERROR;
815 break;
816 }
817
818 }
819
820 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
821 return rc;
822}
823
824
825/**
826 * Bootstrap VMR3NotifyFF() worker.
827 *
828 * @param pUVCpu Pointer to the user mode VMCPU structure.
829 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
830 */
831static DECLCALLBACK(void) vmR3BootstrapNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
832{
833 if (pUVCpu->vm.s.fWait)
834 {
835 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
836 AssertRC(rc);
837 }
838 NOREF(fFlags);
839}
840
841
842/**
843 * Default VMR3Wait() worker.
844 *
845 * @returns VBox status code.
846 * @param pUVMCPU Pointer to the user mode VMCPU structure.
847 */
848static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
849{
850 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
851
852 PVM pVM = pUVCpu->pVM;
853 PVMCPU pVCpu = pUVCpu->pVCpu;
854 int rc = VINF_SUCCESS;
855 for (;;)
856 {
857 /*
858 * Check Relevant FFs.
859 */
860 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
861 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
862 break;
863
864 /*
865 * Wait for a while. Someone will wake us up or interrupt the call if
866 * anything needs our attention.
867 */
868 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
869 if (rc == VERR_TIMEOUT)
870 rc = VINF_SUCCESS;
871 else if (RT_FAILURE(rc))
872 {
873 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
874 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
875 VM_FF_SET(pVM, VM_FF_TERMINATE);
876 rc = VERR_INTERNAL_ERROR;
877 break;
878 }
879
880 }
881
882 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
883 return rc;
884}
885
886
887/**
888 * Default VMR3NotifyFF() worker.
889 *
890 * @param pUVCpu Pointer to the user mode VMCPU structure.
891 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
892 */
893static DECLCALLBACK(void) vmR3DefaultNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
894{
895 if (pUVCpu->vm.s.fWait)
896 {
897 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
898 AssertRC(rc);
899 }
900 else if ( !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM)
901 && pUVCpu->pVCpu
902 && pUVCpu->pVCpu->enmState == VMCPUSTATE_STARTED_EXEC_REM)
903 REMR3NotifyFF(pUVCpu->pVM);
904}
905
906
907/**
908 * Array with halt method descriptors.
909 * VMINT::iHaltMethod contains an index into this array.
910 */
911static const struct VMHALTMETHODDESC
912{
913 /** The halt method id. */
914 VMHALTMETHOD enmHaltMethod;
915 /** The init function for loading config and initialize variables. */
916 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
917 /** The term function. */
918 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
919 /** The VMR3WaitHaltedU function. */
920 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now));
921 /** The VMR3WaitU function. */
922 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu));
923 /** The VMR3NotifyCpuFFU function. */
924 DECLR3CALLBACKMEMBER(void, pfnNotifyCpuFF,(PUVMCPU pUVCpu, uint32_t fFlags));
925 /** The VMR3NotifyGlobalFFU function. */
926 DECLR3CALLBACKMEMBER(void, pfnNotifyGlobalFF,(PUVM pUVM, uint32_t fFlags));
927} g_aHaltMethods[] =
928{
929 { VMHALTMETHOD_BOOTSTRAP, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyCpuFF, NULL },
930 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
931 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
932 { VMHALTMETHOD_GLOBAL_1, vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyCpuFF, NULL },
933};
934
935
936/**
937 * Notify the emulation thread (EMT) about pending Forced Action (FF).
938 *
939 * This function is called by thread other than EMT to make
940 * sure EMT wakes up and promptly service an FF request.
941 *
942 * @param pUVM Pointer to the user mode VM structure.
943 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
944 */
945VMMR3DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, uint32_t fFlags)
946{
947 LogFlow(("VMR3NotifyGlobalFFU:\n"));
948 uint32_t iHaldMethod = pUVM->vm.s.iHaltMethod;
949
950 if (g_aHaltMethods[iHaldMethod].pfnNotifyGlobalFF) /** @todo make mandatory. */
951 g_aHaltMethods[iHaldMethod].pfnNotifyGlobalFF(pUVM, fFlags);
952 else
953 for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
954 g_aHaltMethods[iHaldMethod].pfnNotifyCpuFF(&pUVM->aCpus[iCpu], fFlags);
955}
956
957
958/**
959 * Notify the emulation thread (EMT) about pending Forced Action (FF).
960 *
961 * This function is called by thread other than EMT to make
962 * sure EMT wakes up and promptly service an FF request.
963 *
964 * @param pUVM Pointer to the user mode VM structure.
965 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
966 */
967VMMR3DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, uint32_t fFlags)
968{
969 PUVM pUVM = pUVCpu->pUVM;
970
971 LogFlow(("VMR3NotifyCpuFFU:\n"));
972 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(pUVCpu, fFlags);
973}
974
975
976/**
977 * Halted VM Wait.
978 * Any external event will unblock the thread.
979 *
980 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
981 * case an appropriate status code is returned.
982 * @param pVM VM handle.
983 * @param pVCpu VMCPU handle.
984 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
985 * @thread The emulation thread.
986 */
987VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
988{
989 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
990
991 /*
992 * Check Relevant FFs.
993 */
994 const uint32_t fMask = !fIgnoreInterrupts
995 ? VMCPU_FF_EXTERNAL_HALTED_MASK
996 : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
997 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
998 || VMCPU_FF_ISPENDING(pVCpu, fMask))
999 {
1000 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
1001 return VINF_SUCCESS;
1002 }
1003
1004 /*
1005 * The yielder is suspended while we're halting, while TM might have clock(s) running
1006 * only at certain times and need to be notified..
1007 */
1008 if (pVCpu->idCpu == 0)
1009 VMMR3YieldSuspend(pVM);
1010 TMNotifyStartOfHalt(pVCpu);
1011
1012 /*
1013 * Record halt averages for the last second.
1014 */
1015 PUVMCPU pUVCpu = pVCpu->pUVCpu;
1016 uint64_t u64Now = RTTimeNanoTS();
1017 int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS;
1018 if (off > 1000000000)
1019 {
1020 if (off > _4G || !pUVCpu->vm.s.cHalts)
1021 {
1022 pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1023 pUVCpu->vm.s.HaltFrequency = 1;
1024 }
1025 else
1026 {
1027 pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts;
1028 pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off);
1029 }
1030 pUVCpu->vm.s.u64HaltsStartTS = u64Now;
1031 pUVCpu->vm.s.cHalts = 0;
1032 }
1033 pUVCpu->vm.s.cHalts++;
1034
1035 /*
1036 * Do the halt.
1037 */
1038 Assert(VMCPU_GET_STATE(pVCpu) == VMCPUSTATE_STARTED);
1039 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED);
1040 PUVM pUVM = pUVCpu->pUVM;
1041 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now);
1042 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1043
1044 /*
1045 * Notify TM and resume the yielder
1046 */
1047 TMNotifyEndOfHalt(pVCpu);
1048 if (pVCpu->idCpu == 0)
1049 VMMR3YieldResume(pVM);
1050
1051 LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fGlobalForcedActions));
1052 return rc;
1053}
1054
1055
1056/**
1057 * Suspended VM Wait.
1058 * Only a handful of forced actions will cause the function to
1059 * return to the caller.
1060 *
1061 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
1062 * case an appropriate status code is returned.
1063 * @param pUVCpu Pointer to the user mode VMCPU structure.
1064 * @thread The emulation thread.
1065 */
1066VMMR3DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
1067{
1068 LogFlow(("VMR3WaitU:\n"));
1069
1070 /*
1071 * Check Relevant FFs.
1072 */
1073 PVM pVM = pUVCpu->pVM;
1074 PVMCPU pVCpu = pUVCpu->pVCpu;
1075
1076 if ( pVM
1077 && ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
1078 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
1079 )
1080 )
1081 {
1082 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fGlobalForcedActions));
1083 return VINF_SUCCESS;
1084 }
1085
1086 /*
1087 * Do waiting according to the halt method (so VMR3NotifyFF
1088 * doesn't have to special case anything).
1089 */
1090 PUVM pUVM = pUVCpu->pUVM;
1091 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVCpu);
1092 LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pVM ? pVM->fGlobalForcedActions : 0));
1093 return rc;
1094}
1095
1096
1097/**
1098 * Changes the halt method.
1099 *
1100 * @returns VBox status code.
1101 * @param pUVM Pointer to the user mode VM structure.
1102 * @param enmHaltMethod The new halt method.
1103 * @thread EMT.
1104 */
1105int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1106{
1107 PVM pVM = pUVM->pVM; Assert(pVM);
1108 VM_ASSERT_EMT(pVM);
1109 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1110
1111 /*
1112 * Resolve default (can be overridden in the configuration).
1113 */
1114 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1115 {
1116 uint32_t u32;
1117 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1118 if (RT_SUCCESS(rc))
1119 {
1120 enmHaltMethod = (VMHALTMETHOD)u32;
1121 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1122 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1123 }
1124 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1125 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1126 else
1127 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1128 //enmHaltMethod = VMHALTMETHOD_1;
1129 //enmHaltMethod = VMHALTMETHOD_OLD;
1130 }
1131 LogRel(("VM: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1132
1133 /*
1134 * Find the descriptor.
1135 */
1136 unsigned i = 0;
1137 while ( i < RT_ELEMENTS(g_aHaltMethods)
1138 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1139 i++;
1140 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1141
1142 /*
1143 * Terminate the old one.
1144 */
1145 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1146 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1147 {
1148 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1149 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1150 }
1151
1152/** @todo SMP: Need rendezvous thing here, the other EMTs must not be
1153 * sleeping when we switch the notification method or we'll never
1154 * manage to wake them up properly and end up relying on timeouts... */
1155
1156 /*
1157 * Init the new one.
1158 */
1159 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1160 if (g_aHaltMethods[i].pfnInit)
1161 {
1162 int rc = g_aHaltMethods[i].pfnInit(pUVM);
1163 AssertRCReturn(rc, rc);
1164 }
1165 pUVM->vm.s.enmHaltMethod = enmHaltMethod;
1166
1167 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1168 return VINF_SUCCESS;
1169}
1170
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette