VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMEmt.cpp@ 39025

Last change on this file since 39025 was 38838, checked in by vboxsync, 14 years ago

VMM,++: Try fix the async reset, suspend and power-off problems in PDM wrt conflicting VMM requests. Split them into priority requests and normal requests. The priority requests can safely be processed when PDM is doing async state change waits, the normal ones cannot. (The problem I bumped into was a unmap-chunk request from PGM being processed during PDMR3Reset, causing a recursive VMMR3EmtRendezvous deadlock.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 45.1 KB
Line 
1/* $Id: VMEmt.cpp 38838 2011-09-23 11:21:55Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_VM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/rem.h>
28#include <VBox/vmm/tm.h>
29#include "VMInternal.h"
30#include <VBox/vmm/vm.h>
31#include <VBox/vmm/uvm.h>
32
33#include <VBox/err.h>
34#include <VBox/log.h>
35#include <iprt/assert.h>
36#include <iprt/asm.h>
37#include <iprt/asm-math.h>
38#include <iprt/semaphore.h>
39#include <iprt/string.h>
40#include <iprt/thread.h>
41#include <iprt/time.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu);
48
49
50/**
51 * The emulation thread main function.
52 *
53 * @returns Thread exit code.
54 * @param ThreadSelf The handle to the executing thread.
55 * @param pvArgs Pointer to the user mode per-VCpu structure (UVMPCU).
56 */
57DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
58{
59 PUVMCPU pUVCpu = (PUVMCPU)pvArgs;
60 return vmR3EmulationThreadWithId(ThreadSelf, pUVCpu, pUVCpu->idCpu);
61}
62
63
64/**
65 * The emulation thread main function, with Virtual CPU ID for debugging.
66 *
67 * @returns Thread exit code.
68 * @param ThreadSelf The handle to the executing thread.
69 * @param pUVCpu Pointer to the user mode per-VCpu structure.
70 * @param idCpu The virtual CPU ID, for backtrace purposes.
71 */
72int vmR3EmulationThreadWithId(RTTHREAD ThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu)
73{
74 PUVM pUVM = pUVCpu->pUVM;
75 int rc;
76
77 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
78 ("Invalid arguments to the emulation thread!\n"));
79
80 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu);
81 AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
82
83 if ( pUVM->pVmm2UserMethods
84 && pUVM->pVmm2UserMethods->pfnNotifyEmtInit)
85 pUVM->pVmm2UserMethods->pfnNotifyEmtInit(pUVM->pVmm2UserMethods, pUVM, pUVCpu);
86
87 /*
88 * The request loop.
89 */
90 rc = VINF_SUCCESS;
91 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM));
92 VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */
93 for (;;)
94 {
95 /*
96 * During early init there is no pVM, so make a special path
97 * for that to keep things clearly separate.
98 */
99 if (!pUVM->pVM)
100 {
101 /*
102 * Check for termination first.
103 */
104 if (pUVM->vm.s.fTerminateEMT)
105 {
106 rc = VINF_EM_TERMINATE;
107 break;
108 }
109
110 /*
111 * Only the first VCPU may initialize the VM during early init
112 * and must therefore service all VMCPUID_ANY requests.
113 * See also VMR3Create
114 */
115 if ( (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
116 && pUVCpu->idCpu == 0)
117 {
118 /*
119 * Service execute in any EMT request.
120 */
121 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
122 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
123 }
124 else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
125 {
126 /*
127 * Service execute in specific EMT request.
128 */
129 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
130 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
131 }
132 else
133 {
134 /*
135 * Nothing important is pending, so wait for something.
136 */
137 rc = VMR3WaitU(pUVCpu);
138 if (RT_FAILURE(rc))
139 {
140 AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
141 break;
142 }
143 }
144 }
145 else
146 {
147 /*
148 * Pending requests which needs servicing?
149 *
150 * We check for state changes in addition to status codes when
151 * servicing requests. (Look after the ifs.)
152 */
153 PVM pVM = pUVM->pVM;
154 enmBefore = pVM->enmVMState;
155 if (pUVM->vm.s.fTerminateEMT)
156 {
157 rc = VINF_EM_TERMINATE;
158 break;
159 }
160
161 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
162 {
163 rc = VMMR3EmtRendezvousFF(pVM, &pVM->aCpus[idCpu]);
164 Log(("vmR3EmulationThread: Rendezvous rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
165 }
166 else if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
167 {
168 /*
169 * Service execute in any EMT request.
170 */
171 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
172 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
173 }
174 else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
175 {
176 /*
177 * Service execute in specific EMT request.
178 */
179 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
180 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
181 }
182 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
183 {
184 /*
185 * Service the debugger request.
186 */
187 rc = DBGFR3VMMForcedAction(pVM);
188 Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
189 }
190 else if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
191 {
192 /*
193 * Service a delayed reset request.
194 */
195 rc = VMR3Reset(pVM);
196 VM_FF_CLEAR(pVM, VM_FF_RESET);
197 Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
198 }
199 else
200 {
201 /*
202 * Nothing important is pending, so wait for something.
203 */
204 rc = VMR3WaitU(pUVCpu);
205 if (RT_FAILURE(rc))
206 {
207 AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
208 break;
209 }
210 }
211
212 /*
213 * Check for termination requests, these have extremely high priority.
214 */
215 if ( rc == VINF_EM_TERMINATE
216 || pUVM->vm.s.fTerminateEMT)
217 break;
218 }
219
220 /*
221 * Some requests (both VMR3Req* and the DBGF) can potentially resume
222 * or start the VM, in that case we'll get a change in VM status
223 * indicating that we're now running.
224 */
225 if ( RT_SUCCESS(rc)
226 && pUVM->pVM)
227 {
228 PVM pVM = pUVM->pVM;
229 PVMCPU pVCpu = &pVM->aCpus[idCpu];
230 if ( pVM->enmVMState == VMSTATE_RUNNING
231 && VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu)))
232 {
233 rc = EMR3ExecuteVM(pVM, pVCpu);
234 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState));
235 if (EMGetState(pVCpu) == EMSTATE_GURU_MEDITATION)
236 vmR3SetGuruMeditation(pVM);
237 }
238 }
239
240 } /* forever */
241
242
243 /*
244 * Cleanup and exit.
245 */
246 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n",
247 ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
248 if ( idCpu == 0
249 && pUVM->pVM)
250 {
251 PVM pVM = pUVM->pVM;
252 vmR3SetTerminated(pVM);
253 pUVM->pVM = NULL;
254
255 /** @todo SMP: This isn't 100% safe. We should wait for the other
256 * threads to finish before destroy the VM. */
257 int rc2 = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
258 AssertLogRelRC(rc2);
259 }
260
261 if ( pUVM->pVmm2UserMethods
262 && pUVM->pVmm2UserMethods->pfnNotifyEmtTerm)
263 pUVM->pVmm2UserMethods->pfnNotifyEmtTerm(pUVM->pVmm2UserMethods, pUVM, pUVCpu);
264
265 pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
266 Log(("vmR3EmulationThread: EMT is terminated.\n"));
267 return rc;
268}
269
270
271/**
272 * Gets the name of a halt method.
273 *
274 * @returns Pointer to a read only string.
275 * @param enmMethod The method.
276 */
277static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
278{
279 switch (enmMethod)
280 {
281 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
282 case VMHALTMETHOD_DEFAULT: return "default";
283 case VMHALTMETHOD_OLD: return "old";
284 case VMHALTMETHOD_1: return "method1";
285 //case VMHALTMETHOD_2: return "method2";
286 case VMHALTMETHOD_GLOBAL_1: return "global1";
287 default: return "unknown";
288 }
289}
290
291
292/**
293 * Signal a fatal wait error.
294 *
295 * @returns Fatal error code to be propagated up the call stack.
296 * @param pUVCpu The user mode per CPU structure of the calling
297 * EMT.
298 * @param pszFmt The error format with a single %Rrc in it.
299 * @param rcFmt The status code to format.
300 */
301static int vmR3FatalWaitError(PUVMCPU pUVCpu, const char *pszFmt, int rcFmt)
302{
303 /** @todo This is wrong ... raise a fatal error / guru meditation
304 * instead. */
305 AssertLogRelMsgFailed((pszFmt, rcFmt));
306 ASMAtomicUoWriteBool(&pUVCpu->pUVM->vm.s.fTerminateEMT, true);
307 if (pUVCpu->pVM)
308 VM_FF_SET(pUVCpu->pVM, VM_FF_CHECK_VM_STATE);
309 return VERR_INTERNAL_ERROR;
310}
311
312
313/**
314 * The old halt loop.
315 */
316static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
317{
318 /*
319 * Halt loop.
320 */
321 PVM pVM = pUVCpu->pVM;
322 PVMCPU pVCpu = pUVCpu->pVCpu;
323
324 int rc = VINF_SUCCESS;
325 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
326 //unsigned cLoops = 0;
327 for (;;)
328 {
329 /*
330 * Work the timers and check if we can exit.
331 * The poll call gives us the ticks left to the next event in
332 * addition to perhaps set an FF.
333 */
334 uint64_t const u64StartTimers = RTTimeNanoTS();
335 TMR3TimerQueuesDo(pVM);
336 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
337 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
338 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
339 || VMCPU_FF_ISPENDING(pVCpu, fMask))
340 break;
341 uint64_t u64NanoTS;
342 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
343 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
344 || VMCPU_FF_ISPENDING(pVCpu, fMask))
345 break;
346
347 /*
348 * Wait for a while. Someone will wake us up or interrupt the call if
349 * anything needs our attention.
350 */
351 if (u64NanoTS < 50000)
352 {
353 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
354 /* spin */;
355 }
356 else
357 {
358 VMMR3YieldStop(pVM);
359 //uint64_t u64Start = RTTimeNanoTS();
360 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
361 {
362 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
363 uint64_t const u64StartSchedYield = RTTimeNanoTS();
364 RTThreadYield(); /* this is the best we can do here */
365 uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
366 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
367 }
368 else if (u64NanoTS < 2000000)
369 {
370 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
371 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
372 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
373 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
374 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
375 }
376 else
377 {
378 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
379 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
380 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
381 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
382 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
383 }
384 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
385 //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
386 }
387 if (rc == VERR_TIMEOUT)
388 rc = VINF_SUCCESS;
389 else if (RT_FAILURE(rc))
390 {
391 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
392 break;
393 }
394 }
395
396 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
397 return rc;
398}
399
400
401/**
402 * Initialize the configuration of halt method 1 & 2.
403 *
404 * @return VBox status code. Failure on invalid CFGM data.
405 * @param pVM The VM handle.
406 */
407static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
408{
409 /*
410 * The defaults.
411 */
412#if 1 /* DEBUGGING STUFF - REMOVE LATER */
413 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
414 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
415 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
416 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
417 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
418#else
419 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
420 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
421 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
422 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
423 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
424#endif
425
426 /*
427 * Query overrides.
428 *
429 * I don't have time to bother with niceties such as invalid value checks
430 * here right now. sorry.
431 */
432 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
433 if (pCfg)
434 {
435 uint32_t u32;
436 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
437 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
438 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
439 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
440 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
441 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
442 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
443 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
444 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
445 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
446 LogRel(("HaltedMethod1 config: %d/%d/%d/%d/%d\n",
447 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
448 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
449 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
450 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
451 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
452 }
453
454 return VINF_SUCCESS;
455}
456
457
458/**
459 * Initialize halt method 1.
460 *
461 * @return VBox status code.
462 * @param pUVM Pointer to the user mode VM structure.
463 */
464static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
465{
466 return vmR3HaltMethod12ReadConfigU(pUVM);
467}
468
469
470/**
471 * Method 1 - Block whenever possible, and when lagging behind
472 * switch to spinning for 10-30ms with occasional blocking until
473 * the lag has been eliminated.
474 */
475static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
476{
477 PUVM pUVM = pUVCpu->pUVM;
478 PVMCPU pVCpu = pUVCpu->pVCpu;
479 PVM pVM = pUVCpu->pVM;
480
481 /*
482 * To simplify things, we decide up-front whether we should switch to spinning or
483 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
484 * and that it will generate interrupts or other events that will cause us to exit
485 * the halt loop.
486 */
487 bool fBlockOnce = false;
488 bool fSpinning = false;
489 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
490 if (u32CatchUpPct /* non-zero if catching up */)
491 {
492 if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
493 {
494 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
495 if (fSpinning)
496 {
497 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
498 fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
499 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
500 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
501 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
502 }
503 else
504 {
505 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
506 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
507 }
508 }
509 else
510 {
511 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
512 if (fSpinning)
513 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
514 }
515 }
516 else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
517 {
518 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
519 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
520 }
521
522 /*
523 * Halt loop.
524 */
525 int rc = VINF_SUCCESS;
526 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
527 unsigned cLoops = 0;
528 for (;; cLoops++)
529 {
530 /*
531 * Work the timers and check if we can exit.
532 */
533 uint64_t const u64StartTimers = RTTimeNanoTS();
534 TMR3TimerQueuesDo(pVM);
535 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
536 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
537 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
538 || VMCPU_FF_ISPENDING(pVCpu, fMask))
539 break;
540
541 /*
542 * Estimate time left to the next event.
543 */
544 uint64_t u64NanoTS;
545 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
546 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
547 || VMCPU_FF_ISPENDING(pVCpu, fMask))
548 break;
549
550 /*
551 * Block if we're not spinning and the interval isn't all that small.
552 */
553 if ( ( !fSpinning
554 || fBlockOnce)
555#if 1 /* DEBUGGING STUFF - REMOVE LATER */
556 && u64NanoTS >= 100000) /* 0.100 ms */
557#else
558 && u64NanoTS >= 250000) /* 0.250 ms */
559#endif
560 {
561 const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
562 VMMR3YieldStop(pVM);
563
564 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
565 if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
566 cMilliSecs = 1;
567 else
568 cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
569
570 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
571 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
572 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
573 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
574 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
575
576 if (rc == VERR_TIMEOUT)
577 rc = VINF_SUCCESS;
578 else if (RT_FAILURE(rc))
579 {
580 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
581 break;
582 }
583
584 /*
585 * Calc the statistics.
586 * Update averages every 16th time, and flush parts of the history every 64th time.
587 */
588 const uint64_t Elapsed = RTTimeNanoTS() - Start;
589 pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
590 if (Elapsed > u64NanoTS)
591 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
592 pUVCpu->vm.s.Halt.Method12.cBlocks++;
593 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
594 {
595 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
596 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
597 {
598 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
599 pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
600 }
601 }
602 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
603
604 /*
605 * Clear the block once flag if we actually blocked.
606 */
607 if ( fBlockOnce
608 && Elapsed > 100000 /* 0.1 ms */)
609 fBlockOnce = false;
610 }
611 }
612 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
613
614 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
615 return rc;
616}
617
618
619/**
620 * Initialize the global 1 halt method.
621 *
622 * @return VBox status code.
623 * @param pUVM Pointer to the user mode VM structure.
624 */
625static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
626{
627 /*
628 * The defaults.
629 */
630 uint32_t cNsResolution = SUPSemEventMultiGetResolution(pUVM->vm.s.pSession);
631 if (cNsResolution > 5*RT_NS_100US)
632 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 50000;
633 else if (cNsResolution > RT_NS_100US)
634 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = cNsResolution / 4;
635 else
636 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 2000;
637
638 /*
639 * Query overrides.
640 *
641 * I don't have time to bother with niceties such as invalid value checks
642 * here right now. sorry.
643 */
644 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedGlobal1");
645 if (pCfg)
646 {
647 uint32_t u32;
648 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "SpinBlockThreshold", &u32)))
649 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = u32;
650 }
651 LogRel(("HaltedGlobal1 config: cNsSpinBlockThresholdCfg=%u\n",
652 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg));
653 return VINF_SUCCESS;
654}
655
656
657/**
658 * The global 1 halt method - Block in GMM (ring-0) and let it
659 * try take care of the global scheduling of EMT threads.
660 */
661static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
662{
663 PUVM pUVM = pUVCpu->pUVM;
664 PVMCPU pVCpu = pUVCpu->pVCpu;
665 PVM pVM = pUVCpu->pVM;
666 Assert(VMMGetCpu(pVM) == pVCpu);
667
668 /*
669 * Halt loop.
670 */
671 //uint64_t u64NowLog, u64Start;
672 //u64Start = u64NowLog = RTTimeNanoTS();
673 int rc = VINF_SUCCESS;
674 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
675 unsigned cLoops = 0;
676 for (;; cLoops++)
677 {
678 /*
679 * Work the timers and check if we can exit.
680 */
681 uint64_t const u64StartTimers = RTTimeNanoTS();
682 TMR3TimerQueuesDo(pVM);
683 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
684 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
685 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
686 || VMCPU_FF_ISPENDING(pVCpu, fMask))
687 break;
688
689 /*
690 * Estimate time left to the next event.
691 */
692 //u64NowLog = RTTimeNanoTS();
693 uint64_t u64Delta;
694 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
695 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
696 || VMCPU_FF_ISPENDING(pVCpu, fMask))
697 break;
698
699 /*
700 * Block if we're not spinning and the interval isn't all that small.
701 */
702 if (u64Delta >= pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg)
703 {
704 VMMR3YieldStop(pVM);
705 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
706 || VMCPU_FF_ISPENDING(pVCpu, fMask))
707 break;
708
709 //RTLogPrintf("loop=%-3d u64GipTime=%'llu / %'llu now=%'llu / %'llu\n", cLoops, u64GipTime, u64Delta, u64NowLog, u64GipTime - u64NowLog);
710 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
711 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
712 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
713 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
714 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
715
716 if (rc == VERR_INTERRUPTED)
717 rc = VINF_SUCCESS;
718 else if (RT_FAILURE(rc))
719 {
720 rc = vmR3FatalWaitError(pUVCpu, "VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
721 break;
722 }
723 else
724 {
725 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
726 if (cNsOverslept > 50000)
727 STAM_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOverslept, cNsOverslept);
728 else if (cNsOverslept < -50000)
729 STAM_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockInsomnia, cNsElapsedSchedHalt);
730 else
731 STAM_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOnTime, cNsElapsedSchedHalt);
732 }
733 }
734 /*
735 * When spinning call upon the GVMM and do some wakups once
736 * in a while, it's not like we're actually busy or anything.
737 */
738 else if (!(cLoops & 0x1fff))
739 {
740 uint64_t const u64StartSchedYield = RTTimeNanoTS();
741 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
742 uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
743 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
744 }
745 }
746 //RTLogPrintf("*** %u loops %'llu; lag=%RU64\n", cLoops, u64NowLog - u64Start, TMVirtualSyncGetLag(pVM));
747
748 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
749 return rc;
750}
751
752
753/**
754 * The global 1 halt method - VMR3Wait() worker.
755 *
756 * @returns VBox status code.
757 * @param pUVCpu Pointer to the user mode VMCPU structure.
758 */
759static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
760{
761 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
762
763 PVM pVM = pUVCpu->pUVM->pVM;
764 PVMCPU pVCpu = VMMGetCpu(pVM);
765 Assert(pVCpu->idCpu == pUVCpu->idCpu);
766
767 int rc = VINF_SUCCESS;
768 for (;;)
769 {
770 /*
771 * Check Relevant FFs.
772 */
773 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
774 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
775 break;
776
777 /*
778 * Wait for a while. Someone will wake us up or interrupt the call if
779 * anything needs our attention.
780 */
781 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
782 if (rc == VERR_INTERRUPTED)
783 rc = VINF_SUCCESS;
784 else if (RT_FAILURE(rc))
785 {
786 rc = vmR3FatalWaitError(pUVCpu, "VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
787 break;
788 }
789 }
790
791 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
792 return rc;
793}
794
795
796/**
797 * The global 1 halt method - VMR3NotifyFF() worker.
798 *
799 * @param pUVCpu Pointer to the user mode VMCPU structure.
800 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
801 */
802static DECLCALLBACK(void) vmR3HaltGlobal1NotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
803{
804 if (pUVCpu->vm.s.fWait)
805 {
806 int rc = SUPR3CallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
807 AssertRC(rc);
808 }
809 else if ( ( (fFlags & VMNOTIFYFF_FLAGS_POKE)
810 || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
811 && pUVCpu->pVCpu)
812 {
813 VMCPUSTATE enmState = VMCPU_GET_STATE(pUVCpu->pVCpu);
814 if (enmState == VMCPUSTATE_STARTED_EXEC)
815 {
816 if (fFlags & VMNOTIFYFF_FLAGS_POKE)
817 {
818 int rc = SUPR3CallVMMR0Ex(pUVCpu->pVM->pVMR0, pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POKE, 0, NULL);
819 AssertRC(rc);
820 }
821 }
822 else if (enmState == VMCPUSTATE_STARTED_EXEC_REM)
823 {
824 if (!(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
825 REMR3NotifyFF(pUVCpu->pVM);
826 }
827 }
828}
829
830
831/**
832 * Bootstrap VMR3Wait() worker.
833 *
834 * @returns VBox status code.
835 * @param pUVMCPU Pointer to the user mode VMCPU structure.
836 */
837static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
838{
839 PUVM pUVM = pUVCpu->pUVM;
840
841 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
842
843 int rc = VINF_SUCCESS;
844 for (;;)
845 {
846 /*
847 * Check Relevant FFs.
848 */
849 if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) /* global requests pending? */
850 break;
851 if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) /* local requests pending? */
852 break;
853
854 if ( pUVCpu->pVM
855 && ( VM_FF_ISPENDING(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
856 || VMCPU_FF_ISPENDING(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
857 )
858 )
859 break;
860 if (pUVM->vm.s.fTerminateEMT)
861 break;
862
863 /*
864 * Wait for a while. Someone will wake us up or interrupt the call if
865 * anything needs our attention.
866 */
867 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
868 if (rc == VERR_TIMEOUT)
869 rc = VINF_SUCCESS;
870 else if (RT_FAILURE(rc))
871 {
872 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
873 break;
874 }
875 }
876
877 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
878 return rc;
879}
880
881
882/**
883 * Bootstrap VMR3NotifyFF() worker.
884 *
885 * @param pUVCpu Pointer to the user mode VMCPU structure.
886 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
887 */
888static DECLCALLBACK(void) vmR3BootstrapNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
889{
890 if (pUVCpu->vm.s.fWait)
891 {
892 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
893 AssertRC(rc);
894 }
895 NOREF(fFlags);
896}
897
898
899/**
900 * Default VMR3Wait() worker.
901 *
902 * @returns VBox status code.
903 * @param pUVMCPU Pointer to the user mode VMCPU structure.
904 */
905static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
906{
907 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
908
909 PVM pVM = pUVCpu->pVM;
910 PVMCPU pVCpu = pUVCpu->pVCpu;
911 int rc = VINF_SUCCESS;
912 for (;;)
913 {
914 /*
915 * Check Relevant FFs.
916 */
917 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
918 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
919 break;
920
921 /*
922 * Wait for a while. Someone will wake us up or interrupt the call if
923 * anything needs our attention.
924 */
925 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
926 if (rc == VERR_TIMEOUT)
927 rc = VINF_SUCCESS;
928 else if (RT_FAILURE(rc))
929 {
930 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc", rc);
931 break;
932 }
933 }
934
935 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
936 return rc;
937}
938
939
940/**
941 * Default VMR3NotifyFF() worker.
942 *
943 * @param pUVCpu Pointer to the user mode VMCPU structure.
944 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
945 */
946static DECLCALLBACK(void) vmR3DefaultNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
947{
948 if (pUVCpu->vm.s.fWait)
949 {
950 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
951 AssertRC(rc);
952 }
953 else if ( !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM)
954 && pUVCpu->pVCpu
955 && pUVCpu->pVCpu->enmState == VMCPUSTATE_STARTED_EXEC_REM)
956 REMR3NotifyFF(pUVCpu->pVM);
957}
958
959
960/**
961 * Array with halt method descriptors.
962 * VMINT::iHaltMethod contains an index into this array.
963 */
964static const struct VMHALTMETHODDESC
965{
966 /** The halt method id. */
967 VMHALTMETHOD enmHaltMethod;
968 /** The init function for loading config and initialize variables. */
969 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
970 /** The term function. */
971 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
972 /** The VMR3WaitHaltedU function. */
973 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now));
974 /** The VMR3WaitU function. */
975 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu));
976 /** The VMR3NotifyCpuFFU function. */
977 DECLR3CALLBACKMEMBER(void, pfnNotifyCpuFF,(PUVMCPU pUVCpu, uint32_t fFlags));
978 /** The VMR3NotifyGlobalFFU function. */
979 DECLR3CALLBACKMEMBER(void, pfnNotifyGlobalFF,(PUVM pUVM, uint32_t fFlags));
980} g_aHaltMethods[] =
981{
982 { VMHALTMETHOD_BOOTSTRAP, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyCpuFF, NULL },
983 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
984 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
985 { VMHALTMETHOD_GLOBAL_1, vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyCpuFF, NULL },
986};
987
988
989/**
990 * Notify the emulation thread (EMT) about pending Forced Action (FF).
991 *
992 * This function is called by thread other than EMT to make
993 * sure EMT wakes up and promptly service an FF request.
994 *
995 * @param pUVM Pointer to the user mode VM structure.
996 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
997 */
998VMMR3DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, uint32_t fFlags)
999{
1000 LogFlow(("VMR3NotifyGlobalFFU:\n"));
1001 uint32_t iHaldMethod = pUVM->vm.s.iHaltMethod;
1002
1003 if (g_aHaltMethods[iHaldMethod].pfnNotifyGlobalFF) /** @todo make mandatory. */
1004 g_aHaltMethods[iHaldMethod].pfnNotifyGlobalFF(pUVM, fFlags);
1005 else
1006 for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
1007 g_aHaltMethods[iHaldMethod].pfnNotifyCpuFF(&pUVM->aCpus[iCpu], fFlags);
1008}
1009
1010
1011/**
1012 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1013 *
1014 * This function is called by thread other than EMT to make
1015 * sure EMT wakes up and promptly service an FF request.
1016 *
1017 * @param pUVM Pointer to the user mode VM structure.
1018 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
1019 */
1020VMMR3DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, uint32_t fFlags)
1021{
1022 PUVM pUVM = pUVCpu->pUVM;
1023
1024 LogFlow(("VMR3NotifyCpuFFU:\n"));
1025 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(pUVCpu, fFlags);
1026}
1027
1028
1029/**
1030 * Halted VM Wait.
1031 * Any external event will unblock the thread.
1032 *
1033 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
1034 * case an appropriate status code is returned.
1035 * @param pVM VM handle.
1036 * @param pVCpu VMCPU handle.
1037 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
1038 * @thread The emulation thread.
1039 */
1040VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
1041{
1042 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
1043
1044 /*
1045 * Check Relevant FFs.
1046 */
1047 const uint32_t fMask = !fIgnoreInterrupts
1048 ? VMCPU_FF_EXTERNAL_HALTED_MASK
1049 : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
1050 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
1051 || VMCPU_FF_ISPENDING(pVCpu, fMask))
1052 {
1053 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
1054 return VINF_SUCCESS;
1055 }
1056
1057 /*
1058 * The yielder is suspended while we're halting, while TM might have clock(s) running
1059 * only at certain times and need to be notified..
1060 */
1061 if (pVCpu->idCpu == 0)
1062 VMMR3YieldSuspend(pVM);
1063 TMNotifyStartOfHalt(pVCpu);
1064
1065 /*
1066 * Record halt averages for the last second.
1067 */
1068 PUVMCPU pUVCpu = pVCpu->pUVCpu;
1069 uint64_t u64Now = RTTimeNanoTS();
1070 int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS;
1071 if (off > 1000000000)
1072 {
1073 if (off > _4G || !pUVCpu->vm.s.cHalts)
1074 {
1075 pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1076 pUVCpu->vm.s.HaltFrequency = 1;
1077 }
1078 else
1079 {
1080 pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts;
1081 pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off);
1082 }
1083 pUVCpu->vm.s.u64HaltsStartTS = u64Now;
1084 pUVCpu->vm.s.cHalts = 0;
1085 }
1086 pUVCpu->vm.s.cHalts++;
1087
1088 /*
1089 * Do the halt.
1090 */
1091 Assert(VMCPU_GET_STATE(pVCpu) == VMCPUSTATE_STARTED);
1092 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED);
1093 PUVM pUVM = pUVCpu->pUVM;
1094 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now);
1095 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1096
1097 /*
1098 * Notify TM and resume the yielder
1099 */
1100 TMNotifyEndOfHalt(pVCpu);
1101 if (pVCpu->idCpu == 0)
1102 VMMR3YieldResume(pVM);
1103
1104 LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fGlobalForcedActions));
1105 return rc;
1106}
1107
1108
1109/**
1110 * Suspended VM Wait.
1111 * Only a handful of forced actions will cause the function to
1112 * return to the caller.
1113 *
1114 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
1115 * case an appropriate status code is returned.
1116 * @param pUVCpu Pointer to the user mode VMCPU structure.
1117 * @thread The emulation thread.
1118 */
1119VMMR3DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
1120{
1121 LogFlow(("VMR3WaitU:\n"));
1122
1123 /*
1124 * Check Relevant FFs.
1125 */
1126 PVM pVM = pUVCpu->pVM;
1127 PVMCPU pVCpu = pUVCpu->pVCpu;
1128
1129 if ( pVM
1130 && ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
1131 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
1132 )
1133 )
1134 {
1135 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fGlobalForcedActions));
1136 return VINF_SUCCESS;
1137 }
1138
1139 /*
1140 * Do waiting according to the halt method (so VMR3NotifyFF
1141 * doesn't have to special case anything).
1142 */
1143 PUVM pUVM = pUVCpu->pUVM;
1144 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVCpu);
1145 LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pUVM->pVM ? pUVM->pVM->fGlobalForcedActions : 0));
1146 return rc;
1147}
1148
1149
1150/**
1151 * Interface that PDMR3Suspend, PDMR3PowerOff and PDMR3Reset uses when they wait
1152 * for the handling of asynchronous notifications to complete.
1153 *
1154 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
1155 * case an appropriate status code is returned.
1156 * @param pUVCpu Pointer to the user mode VMCPU structure.
1157 * @thread The emulation thread.
1158 */
1159VMMR3_INT_DECL(int) VMR3AsyncPdmNotificationWaitU(PUVMCPU pUVCpu)
1160{
1161 LogFlow(("VMR3AsyncPdmNotificationWaitU:\n"));
1162 return VMR3WaitU(pUVCpu);
1163}
1164
1165
1166/**
1167 * Interface that PDM the helper asynchronous notification completed methods
1168 * uses for EMT0 when it is waiting inside VMR3AsyncPdmNotificationWaitU().
1169 *
1170 * @param pUVM Pointer to the user mode VM structure.
1171 */
1172VMMR3_INT_DECL(void) VMR3AsyncPdmNotificationWakeupU(PUVM pUVM)
1173{
1174 LogFlow(("VMR3AsyncPdmNotificationWakeupU:\n"));
1175 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST); /* this will have to do for now. */
1176 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[0], 0 /*fFlags*/);
1177}
1178
1179
1180/**
1181 * Rendezvous callback that will be called once.
1182 *
1183 * @returns VBox strict status code.
1184 * @param pVM VM handle.
1185 * @param pVCpu The VMCPU handle for the calling EMT.
1186 * @param pvUser The new g_aHaltMethods index.
1187 */
1188static DECLCALLBACK(VBOXSTRICTRC) vmR3SetHaltMethodCallback(PVM pVM, PVMCPU pVCpu, void *pvUser)
1189{
1190 PUVM pUVM = pVM->pUVM;
1191 uintptr_t i = (uintptr_t)pvUser;
1192 Assert(i < RT_ELEMENTS(g_aHaltMethods));
1193 NOREF(pVCpu);
1194
1195 /*
1196 * Terminate the old one.
1197 */
1198 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1199 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1200 {
1201 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1202 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1203 }
1204
1205 /* Assert that the failure fallback is where we expect. */
1206 Assert(g_aHaltMethods[0].enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
1207 Assert(!g_aHaltMethods[0].pfnTerm && !g_aHaltMethods[0].pfnInit);
1208
1209 /*
1210 * Init the new one.
1211 */
1212 int rc = VINF_SUCCESS;
1213 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1214 if (g_aHaltMethods[i].pfnInit)
1215 {
1216 rc = g_aHaltMethods[i].pfnInit(pUVM);
1217 if (RT_FAILURE(rc))
1218 {
1219 /* Fall back on the bootstrap method. This requires no
1220 init/term (see assertion above), and will always work. */
1221 AssertLogRelRC(rc);
1222 i = 0;
1223 }
1224 }
1225
1226 /*
1227 * Commit it.
1228 */
1229 pUVM->vm.s.enmHaltMethod = g_aHaltMethods[i].enmHaltMethod;
1230 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1231
1232 return rc;
1233}
1234
1235
1236/**
1237 * Changes the halt method.
1238 *
1239 * @returns VBox status code.
1240 * @param pUVM Pointer to the user mode VM structure.
1241 * @param enmHaltMethod The new halt method.
1242 * @thread EMT.
1243 */
1244int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1245{
1246 PVM pVM = pUVM->pVM; Assert(pVM);
1247 VM_ASSERT_EMT(pVM);
1248 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1249
1250 /*
1251 * Resolve default (can be overridden in the configuration).
1252 */
1253 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1254 {
1255 uint32_t u32;
1256 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1257 if (RT_SUCCESS(rc))
1258 {
1259 enmHaltMethod = (VMHALTMETHOD)u32;
1260 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1261 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1262 }
1263 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1264 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1265 else
1266 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1267 //enmHaltMethod = VMHALTMETHOD_1;
1268 //enmHaltMethod = VMHALTMETHOD_OLD;
1269 }
1270 LogRel(("VM: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1271
1272 /*
1273 * Find the descriptor.
1274 */
1275 unsigned i = 0;
1276 while ( i < RT_ELEMENTS(g_aHaltMethods)
1277 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1278 i++;
1279 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1280
1281 /*
1282 * This needs to be done while the other EMTs are not sleeping or otherwise messing around.
1283 */
1284 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3SetHaltMethodCallback, (void *)(uintptr_t)i);
1285}
1286
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette