VirtualBox

source: vbox/trunk/src/VBox/VMM/VMEmt.cpp@ 20855

Last change on this file since 20855 was 20855, checked in by vboxsync, 16 years ago

Extra assertion

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.7 KB
Line 
1/* $Id: VMEmt.cpp 20855 2009-06-23 16:33:25Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/tm.h>
28#include <VBox/dbgf.h>
29#include <VBox/em.h>
30#include <VBox/pdmapi.h>
31#include <VBox/rem.h>
32#include <VBox/tm.h>
33#include "VMInternal.h"
34#include <VBox/vm.h>
35#include <VBox/uvm.h>
36
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/semaphore.h>
42#include <iprt/string.h>
43#include <iprt/thread.h>
44#include <iprt/time.h>
45
46
47/*******************************************************************************
48* Internal Functions *
49*******************************************************************************/
50int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu);
51
52
53/**
54 * The emulation thread main function.
55 *
56 * @returns Thread exit code.
57 * @param ThreadSelf The handle to the executing thread.
58 * @param pvArgs Pointer to the user mode per-VCpu structure (UVMPCU).
59 */
60DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
61{
62 PUVMCPU pUVCpu = (PUVMCPU)pvArgs;
63 return vmR3EmulationThreadWithId(ThreadSelf, pUVCpu, pUVCpu->idCpu);
64}
65
66
67/**
68 * The emulation thread main function, with Virtual CPU ID for debugging.
69 *
70 * @returns Thread exit code.
71 * @param ThreadSelf The handle to the executing thread.
72 * @param pUVCpu Pointer to the user mode per-VCpu structure.
73 * @param idCpu The virtual CPU ID, for backtrace purposes.
74 */
75int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu)
76{
77 PUVM pUVM = pUVCpu->pUVM;
78 int rc;
79
80 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
81 ("Invalid arguments to the emulation thread!\n"));
82
83 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu);
84 AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
85
86 /*
87 * The request loop.
88 */
89 rc = VINF_SUCCESS;
90 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM));
91 VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */
92 for (;;)
93 {
94 /*
95 * During early init there is no pVM, so make a special path
96 * for that to keep things clearly separate.
97 */
98 if (!pUVM->pVM)
99 {
100 /*
101 * Check for termination first.
102 */
103 if (pUVM->vm.s.fTerminateEMT)
104 {
105 rc = VINF_EM_TERMINATE;
106 break;
107 }
108
109 /*
110 * Only the first VCPU may initialize the VM during early init
111 * and must therefore service all VMCPUID_ANY requests.
112 * See also VMR3Create
113 */
114 if ( pUVM->vm.s.pReqs
115 && pUVCpu->idCpu == 0)
116 {
117 /*
118 * Service execute in any EMT request.
119 */
120 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY);
121 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
122 }
123 else if (pUVCpu->vm.s.pReqs)
124 {
125 /*
126 * Service execute in specific EMT request.
127 */
128 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu);
129 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
130 }
131 else
132 {
133 /*
134 * Nothing important is pending, so wait for something.
135 */
136 rc = VMR3WaitU(pUVCpu);
137 if (RT_FAILURE(rc))
138 {
139 AssertFailed();
140 break;
141 }
142 }
143 }
144 else
145 {
146 /*
147 * Pending requests which needs servicing?
148 *
149 * We check for state changes in addition to status codes when
150 * servicing requests. (Look after the ifs.)
151 */
152 PVM pVM = pUVM->pVM;
153 enmBefore = pVM->enmVMState;
154 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
155 || pUVM->vm.s.fTerminateEMT)
156 {
157 rc = VINF_EM_TERMINATE;
158 break;
159 }
160 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
161 VMMR3EmtRendezvousFF(pVM, &pVM->aCpus[idCpu]);
162 if (pUVM->vm.s.pReqs)
163 {
164 /*
165 * Service execute in any EMT request.
166 */
167 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY);
168 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
169 }
170 else if (pUVCpu->vm.s.pReqs)
171 {
172 /*
173 * Service execute in specific EMT request.
174 */
175 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu);
176 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %d -> %d\n", pUVCpu->idCpu, rc, enmBefore, pVM->enmVMState));
177 }
178 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
179 {
180 /*
181 * Service the debugger request.
182 */
183 rc = DBGFR3VMMForcedAction(pVM);
184 Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
185 }
186 else if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET_BIT))
187 {
188 /*
189 * Service a delayed reset request.
190 */
191 rc = VMR3Reset(pVM);
192 VM_FF_CLEAR(pVM, VM_FF_RESET);
193 Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
194 }
195 else
196 {
197 /*
198 * Nothing important is pending, so wait for something.
199 */
200 rc = VMR3WaitU(pUVCpu);
201 if (RT_FAILURE(rc))
202 break;
203 }
204
205 /*
206 * Check for termination requests, these have extremely high priority.
207 */
208 if ( rc == VINF_EM_TERMINATE
209 || pUVM->vm.s.fTerminateEMT
210 || ( pUVM->pVM /* pVM may have become invalid by now. */
211 && VM_FF_ISSET(pUVM->pVM, VM_FF_TERMINATE)))
212 break;
213 }
214
215 /*
216 * Some requests (both VMR3Req* and the DBGF) can potentially resume
217 * or start the VM, in that case we'll get a change in VM status
218 * indicating that we're now running.
219 */
220 if ( RT_SUCCESS(rc)
221 && pUVM->pVM)
222 {
223 PVM pVM = pUVM->pVM;
224 PVMCPU pVCpu = &pVM->aCpus[idCpu];
225 if ( pVM->enmVMState == VMSTATE_RUNNING
226 && VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu)))
227 {
228 rc = EMR3ExecuteVM(pVM, pVCpu);
229 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState));
230 if ( EMGetState(pVCpu) == EMSTATE_GURU_MEDITATION
231 && pVM->enmVMState == VMSTATE_RUNNING)
232 vmR3SetState(pVM, VMSTATE_GURU_MEDITATION);
233 }
234 }
235
236 } /* forever */
237
238
239 /*
240 * Exiting.
241 */
242 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n",
243 ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
244 if (pUVM->vm.s.fEMTDoesTheCleanup)
245 {
246 Log(("vmR3EmulationThread: executing delayed Destroy\n"));
247 Assert(pUVM->pVM);
248 vmR3Destroy(pUVM->pVM);
249 vmR3DestroyFinalBitFromEMT(pUVM);
250 }
251 else
252 {
253 vmR3DestroyFinalBitFromEMT(pUVM);
254
255 pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
256 }
257 Log(("vmR3EmulationThread: EMT is terminated.\n"));
258 return rc;
259}
260
261
262/**
263 * Gets the name of a halt method.
264 *
265 * @returns Pointer to a read only string.
266 * @param enmMethod The method.
267 */
268static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
269{
270 switch (enmMethod)
271 {
272 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
273 case VMHALTMETHOD_DEFAULT: return "default";
274 case VMHALTMETHOD_OLD: return "old";
275 case VMHALTMETHOD_1: return "method1";
276 //case VMHALTMETHOD_2: return "method2";
277 case VMHALTMETHOD_GLOBAL_1: return "global1";
278 default: return "unknown";
279 }
280}
281
282
283/**
284 * The old halt loop.
285 */
286static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
287{
288 /*
289 * Halt loop.
290 */
291 PVM pVM = pUVCpu->pVM;
292 PVMCPU pVCpu = pUVCpu->pVCpu;
293
294 int rc = VINF_SUCCESS;
295 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
296 //unsigned cLoops = 0;
297 for (;;)
298 {
299 /*
300 * Work the timers and check if we can exit.
301 * The poll call gives us the ticks left to the next event in
302 * addition to perhaps set an FF.
303 */
304 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
305 TMR3TimerQueuesDo(pVM);
306 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
307 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
308 || VMCPU_FF_ISPENDING(pVCpu, fMask))
309 break;
310 uint64_t u64NanoTS;
311 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
312 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
313 || VMCPU_FF_ISPENDING(pVCpu, fMask))
314 break;
315
316 /*
317 * Wait for a while. Someone will wake us up or interrupt the call if
318 * anything needs our attention.
319 */
320 if (u64NanoTS < 50000)
321 {
322 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
323 /* spin */;
324 }
325 else
326 {
327 VMMR3YieldStop(pVM);
328 //uint64_t u64Start = RTTimeNanoTS();
329 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
330 {
331 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
332 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, a);
333 RTThreadYield(); /* this is the best we can do here */
334 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, a);
335 }
336 else if (u64NanoTS < 2000000)
337 {
338 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
339 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
340 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
341 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
342 }
343 else
344 {
345 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
346 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
347 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
348 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
349 }
350 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
351 //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
352 }
353 if (rc == VERR_TIMEOUT)
354 rc = VINF_SUCCESS;
355 else if (RT_FAILURE(rc))
356 {
357 AssertRC(rc != VERR_INTERRUPTED);
358 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
359 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
360 VM_FF_SET(pVM, VM_FF_TERMINATE);
361 rc = VERR_INTERNAL_ERROR;
362 break;
363 }
364 }
365
366 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
367 return rc;
368}
369
370
371/**
372 * Initialize the configuration of halt method 1 & 2.
373 *
374 * @return VBox status code. Failure on invalid CFGM data.
375 * @param pVM The VM handle.
376 */
377static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
378{
379 /*
380 * The defaults.
381 */
382#if 1 /* DEBUGGING STUFF - REMOVE LATER */
383 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
384 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
385 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
386 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
387 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
388#else
389 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
390 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
391 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
392 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
393 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
394#endif
395
396 /*
397 * Query overrides.
398 *
399 * I don't have time to bother with niceities such as invalid value checks
400 * here right now. sorry.
401 */
402 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
403 if (pCfg)
404 {
405 uint32_t u32;
406 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
407 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
408 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
409 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
410 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
411 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
412 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
413 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
414 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
415 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
416 LogRel(("HaltedMethod1 config: %d/%d/%d/%d/%d\n",
417 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
418 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
419 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
420 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
421 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
422 }
423
424 return VINF_SUCCESS;
425}
426
427
428/**
429 * Initialize halt method 1.
430 *
431 * @return VBox status code.
432 * @param pUVM Pointer to the user mode VM structure.
433 */
434static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
435{
436 return vmR3HaltMethod12ReadConfigU(pUVM);
437}
438
439
440/**
441 * Method 1 - Block whenever possible, and when lagging behind
442 * switch to spinning for 10-30ms with occational blocking until
443 * the lag has been eliminated.
444 */
445static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
446{
447 PUVM pUVM = pUVCpu->pUVM;
448 PVMCPU pVCpu = pUVCpu->pVCpu;
449 PVM pVM = pUVCpu->pVM;
450
451 /*
452 * To simplify things, we decide up-front whether we should switch to spinning or
453 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
454 * and that it will generate interrupts or other events that will cause us to exit
455 * the halt loop.
456 */
457 bool fBlockOnce = false;
458 bool fSpinning = false;
459 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
460 if (u32CatchUpPct /* non-zero if catching up */)
461 {
462 if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
463 {
464 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
465 if (fSpinning)
466 {
467 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
468 fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
469 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
470 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
471 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
472 }
473 else
474 {
475 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
476 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
477 }
478 }
479 else
480 {
481 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
482 if (fSpinning)
483 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
484 }
485 }
486 else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
487 {
488 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
489 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
490 }
491
492 /*
493 * Halt loop.
494 */
495 int rc = VINF_SUCCESS;
496 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
497 unsigned cLoops = 0;
498 for (;; cLoops++)
499 {
500 /*
501 * Work the timers and check if we can exit.
502 */
503 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
504 TMR3TimerQueuesDo(pVM);
505 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
506 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
507 || VMCPU_FF_ISPENDING(pVCpu, fMask))
508 break;
509
510 /*
511 * Estimate time left to the next event.
512 */
513 uint64_t u64NanoTS;
514 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
515 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
516 || VMCPU_FF_ISPENDING(pVCpu, fMask))
517 break;
518
519 /*
520 * Block if we're not spinning and the interval isn't all that small.
521 */
522 if ( ( !fSpinning
523 || fBlockOnce)
524#if 1 /* DEBUGGING STUFF - REMOVE LATER */
525 && u64NanoTS >= 100000) /* 0.100 ms */
526#else
527 && u64NanoTS >= 250000) /* 0.250 ms */
528#endif
529 {
530 const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
531 VMMR3YieldStop(pVM);
532
533 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
534 if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
535 cMilliSecs = 1;
536 else
537 cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
538 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
539 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);
540 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
541 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a);
542 if (rc == VERR_TIMEOUT)
543 rc = VINF_SUCCESS;
544 else if (RT_FAILURE(rc))
545 {
546 AssertRC(rc != VERR_INTERRUPTED);
547 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
548 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
549 VM_FF_SET(pVM, VM_FF_TERMINATE);
550 rc = VERR_INTERNAL_ERROR;
551 break;
552 }
553
554 /*
555 * Calc the statistics.
556 * Update averages every 16th time, and flush parts of the history every 64th time.
557 */
558 const uint64_t Elapsed = RTTimeNanoTS() - Start;
559 pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
560 if (Elapsed > u64NanoTS)
561 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
562 pUVCpu->vm.s.Halt.Method12.cBlocks++;
563 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
564 {
565 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
566 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
567 {
568 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
569 pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
570 }
571 }
572 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
573
574 /*
575 * Clear the block once flag if we actually blocked.
576 */
577 if ( fBlockOnce
578 && Elapsed > 100000 /* 0.1 ms */)
579 fBlockOnce = false;
580 }
581 }
582 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
583
584 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
585 return rc;
586}
587
588
589/**
590 * Initialize the global 1 halt method.
591 *
592 * @return VBox status code.
593 * @param pUVM Pointer to the user mode VM structure.
594 */
595static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
596{
597 return VINF_SUCCESS;
598}
599
600
601/**
602 * The global 1 halt method - Block in GMM (ring-0) and let it
603 * try take care of the global scheduling of EMT threads.
604 */
605static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
606{
607 PUVM pUVM = pUVCpu->pUVM;
608 PVMCPU pVCpu = pUVCpu->pVCpu;
609 PVM pVM = pUVCpu->pVM;
610 Assert(VMMGetCpu(pVM) == pVCpu);
611
612 /*
613 * Halt loop.
614 */
615 int rc = VINF_SUCCESS;
616 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
617 unsigned cLoops = 0;
618 for (;; cLoops++)
619 {
620 /*
621 * Work the timers and check if we can exit.
622 */
623 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);
624 TMR3TimerQueuesDo(pVM);
625 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b);
626 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
627 || VMCPU_FF_ISPENDING(pVCpu, fMask))
628 break;
629
630 /*
631 * Estimate time left to the next event.
632 */
633 uint64_t u64Delta;
634 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
635 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
636 || VMCPU_FF_ISPENDING(pVCpu, fMask))
637 break;
638
639 /*
640 * Block if we're not spinning and the interval isn't all that small.
641 */
642 if (u64Delta > 50000 /* 0.050ms */)
643 {
644 VMMR3YieldStop(pVM);
645 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
646 || VMCPU_FF_ISPENDING(pVCpu, fMask))
647 break;
648
649 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
650 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, c);
651 rc = SUPCallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
652 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, c);
653 if (rc == VERR_INTERRUPTED)
654 rc = VINF_SUCCESS;
655 else if (RT_FAILURE(rc))
656 {
657 AssertMsgFailed(("VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc));
658 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
659 VM_FF_SET(pVM, VM_FF_TERMINATE);
660 rc = VERR_INTERNAL_ERROR;
661 break;
662 }
663 }
664 /*
665 * When spinning call upon the GVMM and do some wakups once
666 * in a while, it's not like we're actually busy or anything.
667 */
668 else if (!(cLoops & 0x1fff))
669 {
670 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, d);
671 rc = SUPCallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
672 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, d);
673 }
674 }
675 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
676
677 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
678 return rc;
679}
680
681
682/**
683 * The global 1 halt method - VMR3Wait() worker.
684 *
685 * @returns VBox status code.
686 * @param pUVCpu Pointer to the user mode VMCPU structure.
687 */
688static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
689{
690 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
691
692 PVM pVM = pUVCpu->pUVM->pVM;
693 PVMCPU pVCpu = VMMGetCpu(pVM);
694 Assert(pVCpu->idCpu == pUVCpu->idCpu);
695
696 int rc = VINF_SUCCESS;
697 for (;;)
698 {
699 /*
700 * Check Relevant FFs.
701 */
702 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
703 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
704 break;
705
706 /*
707 * Wait for a while. Someone will wake us up or interrupt the call if
708 * anything needs our attention.
709 */
710 rc = SUPCallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
711 if (rc == VERR_INTERRUPTED)
712 rc = VINF_SUCCESS;
713 else if (RT_FAILURE(rc))
714 {
715 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
716 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
717 VM_FF_SET(pVM, VM_FF_TERMINATE);
718 rc = VERR_INTERNAL_ERROR;
719 break;
720 }
721
722 }
723
724 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
725 return rc;
726}
727
728
729/**
730 * The global 1 halt method - VMR3NotifyFF() worker.
731 *
732 * @param pUVCpu Pointer to the user mode VMCPU structure.
733 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
734 */
735static DECLCALLBACK(void) vmR3HaltGlobal1NotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
736{
737 if (pUVCpu->vm.s.fWait)
738 {
739 int rc = SUPCallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
740 AssertRC(rc);
741 }
742 else if ( ( (fFlags & VMNOTIFYFF_FLAGS_POKE)
743 || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
744 && pUVCpu->pVCpu)
745 {
746 VMCPUSTATE enmState = VMCPU_GET_STATE(pUVCpu->pVCpu);
747 if (enmState == VMCPUSTATE_STARTED_EXEC)
748 {
749 if (fFlags & VMNOTIFYFF_FLAGS_POKE)
750 {
751 int rc = SUPCallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POKE, 0, NULL);
752 AssertRC(rc);
753 }
754 }
755 else if (enmState == VMCPUSTATE_STARTED_EXEC_REM)
756 {
757 if (!(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
758 REMR3NotifyFF(pUVCpu->pVM);
759 }
760 }
761}
762
763
764/**
765 * Bootstrap VMR3Wait() worker.
766 *
767 * @returns VBox status code.
768 * @param pUVMCPU Pointer to the user mode VMCPU structure.
769 */
770static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
771{
772 PUVM pUVM = pUVCpu->pUVM;
773
774 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
775
776 int rc = VINF_SUCCESS;
777 for (;;)
778 {
779 /*
780 * Check Relevant FFs.
781 */
782 if (pUVM->vm.s.pReqs) /* global requests pending? */
783 break;
784 if (pUVCpu->vm.s.pReqs) /* local requests pending? */
785 break;
786
787 if ( pUVCpu->pVM
788 && ( VM_FF_ISPENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
789 || VMCPU_FF_ISPENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
790 )
791 )
792 break;
793 if (pUVCpu->vm.s.fTerminateEMT)
794 break;
795
796 /*
797 * Wait for a while. Someone will wake us up or interrupt the call if
798 * anything needs our attention.
799 */
800 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
801 if (rc == VERR_TIMEOUT)
802 rc = VINF_SUCCESS;
803 else if (RT_FAILURE(rc))
804 {
805 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
806 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
807 if (pUVCpu->pVM)
808 VM_FF_SET(pUVCpu->pVM, VM_FF_TERMINATE);
809 rc = VERR_INTERNAL_ERROR;
810 break;
811 }
812
813 }
814
815 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
816 return rc;
817}
818
819
820/**
821 * Bootstrap VMR3NotifyFF() worker.
822 *
823 * @param pUVCpu Pointer to the user mode VMCPU structure.
824 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
825 */
826static DECLCALLBACK(void) vmR3BootstrapNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
827{
828 if (pUVCpu->vm.s.fWait)
829 {
830 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
831 AssertRC(rc);
832 }
833 NOREF(fFlags);
834}
835
836
837/**
838 * Default VMR3Wait() worker.
839 *
840 * @returns VBox status code.
841 * @param pUVMCPU Pointer to the user mode VMCPU structure.
842 */
843static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
844{
845 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
846
847 PVM pVM = pUVCpu->pVM;
848 PVMCPU pVCpu = pUVCpu->pVCpu;
849 int rc = VINF_SUCCESS;
850 for (;;)
851 {
852 /*
853 * Check Relevant FFs.
854 */
855 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
856 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
857 break;
858
859 /*
860 * Wait for a while. Someone will wake us up or interrupt the call if
861 * anything needs our attention.
862 */
863 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
864 if (rc == VERR_TIMEOUT)
865 rc = VINF_SUCCESS;
866 else if (RT_FAILURE(rc))
867 {
868 AssertMsgFailed(("RTSemEventWait->%Rrc\n", rc));
869 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fTerminateEMT, true);
870 VM_FF_SET(pVM, VM_FF_TERMINATE);
871 rc = VERR_INTERNAL_ERROR;
872 break;
873 }
874
875 }
876
877 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
878 return rc;
879}
880
881
882/**
883 * Default VMR3NotifyFF() worker.
884 *
885 * @param pUVCpu Pointer to the user mode VMCPU structure.
886 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
887 */
888static DECLCALLBACK(void) vmR3DefaultNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
889{
890 if (pUVCpu->vm.s.fWait)
891 {
892 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
893 AssertRC(rc);
894 }
895 else if ( !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM)
896 && pUVCpu->pVCpu
897 && pUVCpu->pVCpu->enmState == VMCPUSTATE_STARTED_EXEC_REM)
898 REMR3NotifyFF(pUVCpu->pVM);
899}
900
901
902/**
903 * Array with halt method descriptors.
904 * VMINT::iHaltMethod contains an index into this array.
905 */
906static const struct VMHALTMETHODDESC
907{
908 /** The halt method id. */
909 VMHALTMETHOD enmHaltMethod;
910 /** The init function for loading config and initialize variables. */
911 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
912 /** The term function. */
913 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
914 /** The VMR3WaitHaltedU function. */
915 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now));
916 /** The VMR3WaitU function. */
917 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu));
918 /** The VMR3NotifyCpuFFU function. */
919 DECLR3CALLBACKMEMBER(void, pfnNotifyCpuFF,(PUVMCPU pUVCpu, uint32_t fFlags));
920 /** The VMR3NotifyGlobalFFU function. */
921 DECLR3CALLBACKMEMBER(void, pfnNotifyGlobalFF,(PUVM pUVM, uint32_t fFlags));
922} g_aHaltMethods[] =
923{
924 { VMHALTMETHOD_BOOTSTRAP, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyCpuFF, NULL },
925 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
926 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
927 { VMHALTMETHOD_GLOBAL_1, vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyCpuFF, NULL },
928};
929
930
931/**
932 * Notify the emulation thread (EMT) about pending Forced Action (FF).
933 *
934 * This function is called by thread other than EMT to make
935 * sure EMT wakes up and promptly service an FF request.
936 *
937 * @param pUVM Pointer to the user mode VM structure.
938 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
939 */
940VMMR3DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, uint32_t fFlags)
941{
942 LogFlow(("VMR3NotifyGlobalFFU:\n"));
943 uint32_t iHaldMethod = pUVM->vm.s.iHaltMethod;
944
945 if (g_aHaltMethods[iHaldMethod].pfnNotifyGlobalFF) /** @todo make mandatory. */
946 g_aHaltMethods[iHaldMethod].pfnNotifyGlobalFF(pUVM, fFlags);
947 else
948 for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
949 g_aHaltMethods[iHaldMethod].pfnNotifyCpuFF(&pUVM->aCpus[iCpu], fFlags);
950}
951
952
953/**
954 * Notify the emulation thread (EMT) about pending Forced Action (FF).
955 *
956 * This function is called by thread other than EMT to make
957 * sure EMT wakes up and promptly service an FF request.
958 *
959 * @param pUVM Pointer to the user mode VM structure.
960 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
961 */
962VMMR3DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, uint32_t fFlags)
963{
964 PUVM pUVM = pUVCpu->pUVM;
965
966 LogFlow(("VMR3NotifyCpuFFU:\n"));
967 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(pUVCpu, fFlags);
968}
969
970
971/**
972 * Halted VM Wait.
973 * Any external event will unblock the thread.
974 *
975 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
976 * case an appropriate status code is returned.
977 * @param pVM VM handle.
978 * @param pVCpu VMCPU handle.
979 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
980 * @thread The emulation thread.
981 */
982VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
983{
984 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
985
986 /*
987 * Check Relevant FFs.
988 */
989 const uint32_t fMask = !fIgnoreInterrupts
990 ? VMCPU_FF_EXTERNAL_HALTED_MASK
991 : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
992 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
993 || VMCPU_FF_ISPENDING(pVCpu, fMask))
994 {
995 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
996 return VINF_SUCCESS;
997 }
998
999 /*
1000 * The yielder is suspended while we're halting, while TM might have clock(s) running
1001 * only at certain times and need to be notified..
1002 */
1003 if (pVCpu->idCpu == 0)
1004 VMMR3YieldSuspend(pVM);
1005 TMNotifyStartOfHalt(pVCpu);
1006
1007 /*
1008 * Record halt averages for the last second.
1009 */
1010 PUVMCPU pUVCpu = pVCpu->pUVCpu;
1011 uint64_t u64Now = RTTimeNanoTS();
1012 int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS;
1013 if (off > 1000000000)
1014 {
1015 if (off > _4G || !pUVCpu->vm.s.cHalts)
1016 {
1017 pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1018 pUVCpu->vm.s.HaltFrequency = 1;
1019 }
1020 else
1021 {
1022 pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts;
1023 pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off);
1024 }
1025 pUVCpu->vm.s.u64HaltsStartTS = u64Now;
1026 pUVCpu->vm.s.cHalts = 0;
1027 }
1028 pUVCpu->vm.s.cHalts++;
1029
1030 /*
1031 * Do the halt.
1032 */
1033 Assert(VMCPU_GET_STATE(pVCpu) == VMCPUSTATE_STARTED);
1034 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED);
1035 PUVM pUVM = pUVCpu->pUVM;
1036 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now);
1037 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1038
1039 /*
1040 * Notify TM and resume the yielder
1041 */
1042 TMNotifyEndOfHalt(pVCpu);
1043 if (pVCpu->idCpu == 0)
1044 VMMR3YieldResume(pVM);
1045
1046 LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fGlobalForcedActions));
1047 return rc;
1048}
1049
1050
1051/**
1052 * Suspended VM Wait.
1053 * Only a handful of forced actions will cause the function to
1054 * return to the caller.
1055 *
1056 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
1057 * case an appropriate status code is returned.
1058 * @param pUVCpu Pointer to the user mode VMCPU structure.
1059 * @thread The emulation thread.
1060 */
1061VMMR3DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
1062{
1063 LogFlow(("VMR3WaitU:\n"));
1064
1065 /*
1066 * Check Relevant FFs.
1067 */
1068 PVM pVM = pUVCpu->pVM;
1069 PVMCPU pVCpu = pUVCpu->pVCpu;
1070
1071 if ( pVM
1072 && ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
1073 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
1074 )
1075 )
1076 {
1077 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fGlobalForcedActions));
1078 return VINF_SUCCESS;
1079 }
1080
1081 /*
1082 * Do waiting according to the halt method (so VMR3NotifyFF
1083 * doesn't have to special case anything).
1084 */
1085 PUVM pUVM = pUVCpu->pUVM;
1086 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVCpu);
1087 LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pVM ? pVM->fGlobalForcedActions : 0));
1088 return rc;
1089}
1090
1091
1092/**
1093 * Rendezvous callback that will be called once.
1094 *
1095 * @returns VBox status code.
1096 * @param pVM VM handle.
1097 * @param pVCpu The VMCPU handle for the calling EMT.
1098 * @param pvUser The new g_aHaltMethods index.
1099 */
1100static DECLCALLBACK(int) vmR3SetHaltMethodCallback(PVM pVM, PVMCPU pVCpu, void *pvUser)
1101{
1102 PUVM pUVM = pVM->pUVM;
1103 uintptr_t i = (uintptr_t)pvUser;
1104 Assert(i < RT_ELEMENTS(g_aHaltMethods));
1105 NOREF(pVCpu);
1106
1107 /*
1108 * Terminate the old one.
1109 */
1110 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1111 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1112 {
1113 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1114 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1115 }
1116
1117 /* Assert that the failure fallback is where we expect. */
1118 Assert(g_aHaltMethods[0].enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
1119 Assert(!g_aHaltMethods[0].pfnTerm && !g_aHaltMethods[0].pfnInit);
1120
1121 /*
1122 * Init the new one.
1123 */
1124 int rc = VINF_SUCCESS;
1125 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1126 if (g_aHaltMethods[i].pfnInit)
1127 {
1128 rc = g_aHaltMethods[i].pfnInit(pUVM);
1129 if (RT_FAILURE(rc))
1130 {
1131 /* Fall back on the bootstrap method. This requires no
1132 init/term (see assertion above), and will always work. */
1133 AssertLogRelRC(rc);
1134 i = 0;
1135 }
1136 }
1137
1138 /*
1139 * Commit it.
1140 */
1141 pUVM->vm.s.enmHaltMethod = g_aHaltMethods[i].enmHaltMethod;
1142 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1143
1144 return rc;
1145}
1146
1147
1148/**
1149 * Changes the halt method.
1150 *
1151 * @returns VBox status code.
1152 * @param pUVM Pointer to the user mode VM structure.
1153 * @param enmHaltMethod The new halt method.
1154 * @thread EMT.
1155 */
1156int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1157{
1158 PVM pVM = pUVM->pVM; Assert(pVM);
1159 VM_ASSERT_EMT(pVM);
1160 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1161
1162 /*
1163 * Resolve default (can be overridden in the configuration).
1164 */
1165 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1166 {
1167 uint32_t u32;
1168 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1169 if (RT_SUCCESS(rc))
1170 {
1171 enmHaltMethod = (VMHALTMETHOD)u32;
1172 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1173 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1174 }
1175 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1176 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1177 else
1178 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1179 //enmHaltMethod = VMHALTMETHOD_1;
1180 //enmHaltMethod = VMHALTMETHOD_OLD;
1181 }
1182 LogRel(("VM: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1183
1184 /*
1185 * Find the descriptor.
1186 */
1187 unsigned i = 0;
1188 while ( i < RT_ELEMENTS(g_aHaltMethods)
1189 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1190 i++;
1191 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1192
1193 /*
1194 * This needs to be done while the other EMTs are not sleeping or otherwise messing around.
1195 */
1196 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3SetHaltMethodCallback, (void *)(uintptr_t)i);
1197}
1198
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette