VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMEmt.cpp@ 80775

Last change on this file since 80775 was 80333, checked in by vboxsync, 6 years ago

VMM: Eliminating the VBOX_BUGREF_9217_PART_I preprocessor macro. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 51.0 KB
Line 
1/* $Id: VMEmt.cpp 80333 2019-08-16 20:28:38Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/nem.h>
27#include <VBox/vmm/pdmapi.h>
28#ifdef VBOX_WITH_REM
29# include <VBox/vmm/rem.h>
30#endif
31#include <VBox/vmm/tm.h>
32#include "VMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <iprt/assert.h>
38#include <iprt/asm.h>
39#include <iprt/asm-math.h>
40#include <iprt/semaphore.h>
41#include <iprt/string.h>
42#include <iprt/thread.h>
43#include <iprt/time.h>
44
45
46/*********************************************************************************************************************************
47* Internal Functions *
48*********************************************************************************************************************************/
49int vmR3EmulationThreadWithId(RTTHREAD hThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu);
50
51
52/**
53 * The emulation thread main function.
54 *
55 * @returns Thread exit code.
56 * @param hThreadSelf The handle to the executing thread.
57 * @param pvArgs Pointer to the user mode per-VCpu structure (UVMPCU).
58 */
59DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD hThreadSelf, void *pvArgs)
60{
61 PUVMCPU pUVCpu = (PUVMCPU)pvArgs;
62 return vmR3EmulationThreadWithId(hThreadSelf, pUVCpu, pUVCpu->idCpu);
63}
64
65
66/**
67 * The emulation thread main function, with Virtual CPU ID for debugging.
68 *
69 * @returns Thread exit code.
70 * @param hThreadSelf The handle to the executing thread.
71 * @param pUVCpu Pointer to the user mode per-VCpu structure.
72 * @param idCpu The virtual CPU ID, for backtrace purposes.
73 */
74int vmR3EmulationThreadWithId(RTTHREAD hThreadSelf, PUVMCPU pUVCpu, VMCPUID idCpu)
75{
76 PUVM pUVM = pUVCpu->pUVM;
77 int rc;
78 RT_NOREF_PV(hThreadSelf);
79
80 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
81 ("Invalid arguments to the emulation thread!\n"));
82
83 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVCpu);
84 AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);
85
86 if ( pUVM->pVmm2UserMethods
87 && pUVM->pVmm2UserMethods->pfnNotifyEmtInit)
88 pUVM->pVmm2UserMethods->pfnNotifyEmtInit(pUVM->pVmm2UserMethods, pUVM, pUVCpu);
89
90 /*
91 * The request loop.
92 */
93 rc = VINF_SUCCESS;
94 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", hThreadSelf, pUVM));
95 VMSTATE enmBefore = VMSTATE_CREATED; /* (only used for logging atm.) */
96 ASMAtomicIncU32(&pUVM->vm.s.cActiveEmts);
97 for (;;)
98 {
99 /*
100 * During early init there is no pVM and/or pVCpu, so make a special path
101 * for that to keep things clearly separate.
102 */
103 PVM pVM = pUVM->pVM;
104 PVMCPU pVCpu = pUVCpu->pVCpu;
105 if (!pVCpu || !pVM)
106 {
107 /*
108 * Check for termination first.
109 */
110 if (pUVM->vm.s.fTerminateEMT)
111 {
112 rc = VINF_EM_TERMINATE;
113 break;
114 }
115
116 /*
117 * Only the first VCPU may initialize the VM during early init
118 * and must therefore service all VMCPUID_ANY requests.
119 * See also VMR3Create
120 */
121 if ( (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
122 && pUVCpu->idCpu == 0)
123 {
124 /*
125 * Service execute in any EMT request.
126 */
127 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
128 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
129 }
130 else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
131 {
132 /*
133 * Service execute in specific EMT request.
134 */
135 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
136 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), pUVM->pVM ? VMR3GetStateName(pUVM->pVM->enmVMState) : "CREATING"));
137 }
138 else
139 {
140 /*
141 * Nothing important is pending, so wait for something.
142 */
143 rc = VMR3WaitU(pUVCpu);
144 if (RT_FAILURE(rc))
145 {
146 AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
147 break;
148 }
149 }
150 }
151 else
152 {
153 /*
154 * Pending requests which needs servicing?
155 *
156 * We check for state changes in addition to status codes when
157 * servicing requests. (Look after the ifs.)
158 */
159 enmBefore = pVM->enmVMState;
160 if (pUVM->vm.s.fTerminateEMT)
161 {
162 rc = VINF_EM_TERMINATE;
163 break;
164 }
165
166 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
167 {
168 rc = VMMR3EmtRendezvousFF(pVM, pVM->apCpusR3[idCpu]);
169 Log(("vmR3EmulationThread: Rendezvous rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
170 }
171 else if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs)
172 {
173 /*
174 * Service execute in any EMT request.
175 */
176 rc = VMR3ReqProcessU(pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
177 Log(("vmR3EmulationThread: Req rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
178 }
179 else if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs)
180 {
181 /*
182 * Service execute in specific EMT request.
183 */
184 rc = VMR3ReqProcessU(pUVM, pUVCpu->idCpu, false /*fPriorityOnly*/);
185 Log(("vmR3EmulationThread: Req (cpu=%u) rc=%Rrc, VM state %s -> %s\n", pUVCpu->idCpu, rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
186 }
187 else if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
188 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF))
189 {
190 /*
191 * Service the debugger request.
192 */
193 rc = DBGFR3VMMForcedAction(pVM, pVCpu);
194 Log(("vmR3EmulationThread: Dbg rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
195 }
196 else if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
197 {
198 /*
199 * Service a delayed reset request.
200 */
201 rc = VBOXSTRICTRC_VAL(VMR3ResetFF(pVM));
202 VM_FF_CLEAR(pVM, VM_FF_RESET);
203 Log(("vmR3EmulationThread: Reset rc=%Rrc, VM state %s -> %s\n", rc, VMR3GetStateName(enmBefore), VMR3GetStateName(pVM->enmVMState)));
204 }
205 else
206 {
207 /*
208 * Nothing important is pending, so wait for something.
209 */
210 rc = VMR3WaitU(pUVCpu);
211 if (RT_FAILURE(rc))
212 {
213 AssertLogRelMsgFailed(("VMR3WaitU failed with %Rrc\n", rc));
214 break;
215 }
216 }
217
218 /*
219 * Check for termination requests, these have extremely high priority.
220 */
221 if ( rc == VINF_EM_TERMINATE
222 || pUVM->vm.s.fTerminateEMT)
223 break;
224 }
225
226 /*
227 * Some requests (both VMR3Req* and the DBGF) can potentially resume
228 * or start the VM, in that case we'll get a change in VM status
229 * indicating that we're now running.
230 */
231 if (RT_SUCCESS(rc))
232 {
233 pVM = pUVM->pVM;
234 if (pVM)
235 {
236 pVCpu = pVM->apCpusR3[idCpu];
237 if ( pVM->enmVMState == VMSTATE_RUNNING
238 && VMCPUSTATE_IS_STARTED(VMCPU_GET_STATE(pVCpu)))
239 {
240 rc = EMR3ExecuteVM(pVM, pVCpu);
241 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Rrc, enmVMState=%d\n", rc, pVM->enmVMState));
242 }
243 }
244 }
245
246 } /* forever */
247
248
249 /*
250 * Decrement the active EMT count if we haven't done it yet in vmR3Destroy.
251 */
252 if (!pUVCpu->vm.s.fBeenThruVmDestroy)
253 ASMAtomicDecU32(&pUVM->vm.s.cActiveEmts);
254
255
256 /*
257 * Cleanup and exit.
258 * EMT0 does the VM destruction after all other EMTs have deregistered and terminated.
259 */
260 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Rrc enmBefore=%d enmVMState=%d\n",
261 hThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
262 PVM pVM;
263 if ( idCpu == 0
264 && (pVM = pUVM->pVM) != NULL)
265 {
266 /* Wait for any other EMTs to terminate before we destroy the VM (see vmR3DestroyVM). */
267 for (VMCPUID iCpu = 1; iCpu < pUVM->cCpus; iCpu++)
268 {
269 RTTHREAD hThread;
270 ASMAtomicXchgHandle(&pUVM->aCpus[iCpu].vm.s.ThreadEMT, NIL_RTTHREAD, &hThread);
271 if (hThread != NIL_RTTHREAD)
272 {
273 int rc2 = RTThreadWait(hThread, 5 * RT_MS_1SEC, NULL);
274 AssertLogRelMsgRC(rc2, ("iCpu=%u rc=%Rrc\n", iCpu, rc2));
275 if (RT_FAILURE(rc2))
276 pUVM->aCpus[iCpu].vm.s.ThreadEMT = hThread;
277 }
278 }
279
280 /* Switch to the terminated state, clearing the VM pointer and finally destroy the VM. */
281 vmR3SetTerminated(pVM);
282
283 pUVM->pVM = NULL;
284 for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
285 {
286 pUVM->aCpus[iCpu].pVM = NULL;
287 pUVM->aCpus[iCpu].pVCpu = NULL;
288 }
289
290 int rc2 = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
291 AssertLogRelRC(rc2);
292 }
293 /* Deregister the EMT with VMMR0. */
294 else if ( idCpu != 0
295 && (pVM = pUVM->pVM) != NULL)
296 {
297 int rc2 = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), idCpu, VMMR0_DO_GVMM_DEREGISTER_VMCPU, 0, NULL);
298 AssertLogRelRC(rc2);
299 }
300
301 if ( pUVM->pVmm2UserMethods
302 && pUVM->pVmm2UserMethods->pfnNotifyEmtTerm)
303 pUVM->pVmm2UserMethods->pfnNotifyEmtTerm(pUVM->pVmm2UserMethods, pUVM, pUVCpu);
304
305 pUVCpu->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
306 Log(("vmR3EmulationThread: EMT is terminated.\n"));
307 return rc;
308}
309
310
311/**
312 * Gets the name of a halt method.
313 *
314 * @returns Pointer to a read only string.
315 * @param enmMethod The method.
316 */
317static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
318{
319 switch (enmMethod)
320 {
321 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
322 case VMHALTMETHOD_DEFAULT: return "default";
323 case VMHALTMETHOD_OLD: return "old";
324 case VMHALTMETHOD_1: return "method1";
325 //case VMHALTMETHOD_2: return "method2";
326 case VMHALTMETHOD_GLOBAL_1: return "global1";
327 default: return "unknown";
328 }
329}
330
331
332/**
333 * Signal a fatal wait error.
334 *
335 * @returns Fatal error code to be propagated up the call stack.
336 * @param pUVCpu The user mode per CPU structure of the calling
337 * EMT.
338 * @param pszFmt The error format with a single %Rrc in it.
339 * @param rcFmt The status code to format.
340 */
341static int vmR3FatalWaitError(PUVMCPU pUVCpu, const char *pszFmt, int rcFmt)
342{
343 /** @todo This is wrong ... raise a fatal error / guru meditation
344 * instead. */
345 AssertLogRelMsgFailed((pszFmt, rcFmt));
346 ASMAtomicUoWriteBool(&pUVCpu->pUVM->vm.s.fTerminateEMT, true);
347 if (pUVCpu->pVM)
348 VM_FF_SET(pUVCpu->pVM, VM_FF_CHECK_VM_STATE);
349 return VERR_VM_FATAL_WAIT_ERROR;
350}
351
352
353/**
354 * The old halt loop.
355 */
356static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
357{
358 /*
359 * Halt loop.
360 */
361 PVM pVM = pUVCpu->pVM;
362 PVMCPU pVCpu = pUVCpu->pVCpu;
363
364 int rc = VINF_SUCCESS;
365 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
366 //unsigned cLoops = 0;
367 for (;;)
368 {
369 /*
370 * Work the timers and check if we can exit.
371 * The poll call gives us the ticks left to the next event in
372 * addition to perhaps set an FF.
373 */
374 uint64_t const u64StartTimers = RTTimeNanoTS();
375 TMR3TimerQueuesDo(pVM);
376 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
377 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
378 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
379 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
380 break;
381 uint64_t u64NanoTS;
382 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
383 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
384 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
385 break;
386
387 /*
388 * Wait for a while. Someone will wake us up or interrupt the call if
389 * anything needs our attention.
390 */
391 if (u64NanoTS < 50000)
392 {
393 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
394 /* spin */;
395 }
396 else
397 {
398 VMMR3YieldStop(pVM);
399 //uint64_t u64Start = RTTimeNanoTS();
400 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
401 {
402 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
403 uint64_t const u64StartSchedYield = RTTimeNanoTS();
404 RTThreadYield(); /* this is the best we can do here */
405 uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
406 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
407 }
408 else if (u64NanoTS < 2000000)
409 {
410 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
411 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
412 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
413 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
414 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
415 }
416 else
417 {
418 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
419 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
420 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
421 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
422 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
423 }
424 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
425 //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
426 }
427 if (rc == VERR_TIMEOUT)
428 rc = VINF_SUCCESS;
429 else if (RT_FAILURE(rc))
430 {
431 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
432 break;
433 }
434 }
435
436 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
437 return rc;
438}
439
440
441/**
442 * Initialize the configuration of halt method 1 & 2.
443 *
444 * @return VBox status code. Failure on invalid CFGM data.
445 * @param pUVM The user mode VM structure.
446 */
447static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
448{
449 /*
450 * The defaults.
451 */
452#if 1 /* DEBUGGING STUFF - REMOVE LATER */
453 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
454 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
455 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
456 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
457 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
458#else
459 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
460 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
461 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
462 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
463 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
464#endif
465
466 /*
467 * Query overrides.
468 *
469 * I don't have time to bother with niceties such as invalid value checks
470 * here right now. sorry.
471 */
472 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
473 if (pCfg)
474 {
475 uint32_t u32;
476 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
477 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
478 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
479 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
480 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
481 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
482 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
483 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
484 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
485 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
486 LogRel(("VMEmt: HaltedMethod1 config: %d/%d/%d/%d/%d\n",
487 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
488 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
489 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
490 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
491 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
492 }
493
494 return VINF_SUCCESS;
495}
496
497
498/**
499 * Initialize halt method 1.
500 *
501 * @return VBox status code.
502 * @param pUVM Pointer to the user mode VM structure.
503 */
504static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
505{
506 return vmR3HaltMethod12ReadConfigU(pUVM);
507}
508
509
510/**
511 * Method 1 - Block whenever possible, and when lagging behind
512 * switch to spinning for 10-30ms with occasional blocking until
513 * the lag has been eliminated.
514 */
515static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
516{
517 PUVM pUVM = pUVCpu->pUVM;
518 PVMCPU pVCpu = pUVCpu->pVCpu;
519 PVM pVM = pUVCpu->pVM;
520
521 /*
522 * To simplify things, we decide up-front whether we should switch to spinning or
523 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
524 * and that it will generate interrupts or other events that will cause us to exit
525 * the halt loop.
526 */
527 bool fBlockOnce = false;
528 bool fSpinning = false;
529 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
530 if (u32CatchUpPct /* non-zero if catching up */)
531 {
532 if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
533 {
534 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
535 if (fSpinning)
536 {
537 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
538 fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
539 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
540 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
541 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
542 }
543 else
544 {
545 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
546 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
547 }
548 }
549 else
550 {
551 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
552 if (fSpinning)
553 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
554 }
555 }
556 else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
557 {
558 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
559 pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
560 }
561
562 /*
563 * Halt loop.
564 */
565 int rc = VINF_SUCCESS;
566 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
567 unsigned cLoops = 0;
568 for (;; cLoops++)
569 {
570 /*
571 * Work the timers and check if we can exit.
572 */
573 uint64_t const u64StartTimers = RTTimeNanoTS();
574 TMR3TimerQueuesDo(pVM);
575 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
576 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
577 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
578 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
579 break;
580
581 /*
582 * Estimate time left to the next event.
583 */
584 uint64_t u64NanoTS;
585 TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
586 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
587 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
588 break;
589
590 /*
591 * Block if we're not spinning and the interval isn't all that small.
592 */
593 if ( ( !fSpinning
594 || fBlockOnce)
595#if 1 /* DEBUGGING STUFF - REMOVE LATER */
596 && u64NanoTS >= 100000) /* 0.100 ms */
597#else
598 && u64NanoTS >= 250000) /* 0.250 ms */
599#endif
600 {
601 const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
602 VMMR3YieldStop(pVM);
603
604 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
605 if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
606 cMilliSecs = 1;
607 else
608 cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
609
610 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
611 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
612 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
613 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
614 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
615
616 if (rc == VERR_TIMEOUT)
617 rc = VINF_SUCCESS;
618 else if (RT_FAILURE(rc))
619 {
620 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
621 break;
622 }
623
624 /*
625 * Calc the statistics.
626 * Update averages every 16th time, and flush parts of the history every 64th time.
627 */
628 const uint64_t Elapsed = RTTimeNanoTS() - Start;
629 pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
630 if (Elapsed > u64NanoTS)
631 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
632 pUVCpu->vm.s.Halt.Method12.cBlocks++;
633 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
634 {
635 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
636 if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
637 {
638 pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
639 pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
640 }
641 }
642 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
643
644 /*
645 * Clear the block once flag if we actually blocked.
646 */
647 if ( fBlockOnce
648 && Elapsed > 100000 /* 0.1 ms */)
649 fBlockOnce = false;
650 }
651 }
652 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
653
654 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
655 return rc;
656}
657
658
659/**
660 * Initialize the global 1 halt method.
661 *
662 * @return VBox status code.
663 * @param pUVM Pointer to the user mode VM structure.
664 */
665static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
666{
667 /*
668 * The defaults.
669 */
670 uint32_t cNsResolution = SUPSemEventMultiGetResolution(pUVM->vm.s.pSession);
671 if (cNsResolution > 5*RT_NS_100US)
672 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 50000;
673 else if (cNsResolution > RT_NS_100US)
674 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = cNsResolution / 4;
675 else
676 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 2000;
677
678 /*
679 * Query overrides.
680 *
681 * I don't have time to bother with niceties such as invalid value checks
682 * here right now. sorry.
683 */
684 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedGlobal1");
685 if (pCfg)
686 {
687 uint32_t u32;
688 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "SpinBlockThreshold", &u32)))
689 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = u32;
690 }
691 LogRel(("VMEmt: HaltedGlobal1 config: cNsSpinBlockThresholdCfg=%u\n",
692 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg));
693 return VINF_SUCCESS;
694}
695
696
697/**
698 * The global 1 halt method - Block in GMM (ring-0) and let it
699 * try take care of the global scheduling of EMT threads.
700 */
701static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
702{
703 PUVM pUVM = pUVCpu->pUVM;
704 PVMCPU pVCpu = pUVCpu->pVCpu;
705 PVM pVM = pUVCpu->pVM;
706 Assert(VMMGetCpu(pVM) == pVCpu);
707 NOREF(u64Now);
708
709 /*
710 * Halt loop.
711 */
712 //uint64_t u64NowLog, u64Start;
713 //u64Start = u64NowLog = RTTimeNanoTS();
714 int rc = VINF_SUCCESS;
715 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
716 unsigned cLoops = 0;
717 for (;; cLoops++)
718 {
719 /*
720 * Work the timers and check if we can exit.
721 */
722 uint64_t const u64StartTimers = RTTimeNanoTS();
723 TMR3TimerQueuesDo(pVM);
724 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
725 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
726 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
727 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
728 break;
729
730 /*
731 * Estimate time left to the next event.
732 */
733 //u64NowLog = RTTimeNanoTS();
734 uint64_t u64Delta;
735 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
736 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
737 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
738 break;
739
740 /*
741 * Block if we're not spinning and the interval isn't all that small.
742 */
743 if (u64Delta >= pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg)
744 {
745 VMMR3YieldStop(pVM);
746 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
747 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
748 break;
749
750 //RTLogPrintf("loop=%-3d u64GipTime=%'llu / %'llu now=%'llu / %'llu\n", cLoops, u64GipTime, u64Delta, u64NowLog, u64GipTime - u64NowLog);
751 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
752 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
753 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
754 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
755 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
756
757 if (rc == VERR_INTERRUPTED)
758 rc = VINF_SUCCESS;
759 else if (RT_FAILURE(rc))
760 {
761 rc = vmR3FatalWaitError(pUVCpu, "vmR3HaltGlobal1Halt: VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
762 break;
763 }
764 else
765 {
766 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
767 if (cNsOverslept > 50000)
768 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOverslept, cNsOverslept);
769 else if (cNsOverslept < -50000)
770 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockInsomnia, cNsElapsedSchedHalt);
771 else
772 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOnTime, cNsElapsedSchedHalt);
773 }
774 }
775 /*
776 * When spinning call upon the GVMM and do some wakups once
777 * in a while, it's not like we're actually busy or anything.
778 */
779 else if (!(cLoops & 0x1fff))
780 {
781 uint64_t const u64StartSchedYield = RTTimeNanoTS();
782 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
783 uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
784 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
785 }
786 }
787 //RTLogPrintf("*** %u loops %'llu; lag=%RU64\n", cLoops, u64NowLog - u64Start, TMVirtualSyncGetLag(pVM));
788
789 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
790 return rc;
791}
792
793
794/**
795 * The global 1 halt method - VMR3Wait() worker.
796 *
797 * @returns VBox status code.
798 * @param pUVCpu Pointer to the user mode VMCPU structure.
799 */
800static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVMCPU pUVCpu)
801{
802 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
803
804 PVM pVM = pUVCpu->pUVM->pVM;
805 PVMCPU pVCpu = VMMGetCpu(pVM);
806 Assert(pVCpu->idCpu == pUVCpu->idCpu);
807
808 int rc = VINF_SUCCESS;
809 for (;;)
810 {
811 /*
812 * Check Relevant FFs.
813 */
814 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
815 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
816 break;
817
818 /*
819 * Wait for a while. Someone will wake us up or interrupt the call if
820 * anything needs our attention.
821 */
822 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
823 if (rc == VERR_INTERRUPTED)
824 rc = VINF_SUCCESS;
825 else if (RT_FAILURE(rc))
826 {
827 rc = vmR3FatalWaitError(pUVCpu, "vmR3HaltGlobal1Wait: VMMR0_DO_GVMM_SCHED_HALT->%Rrc\n", rc);
828 break;
829 }
830 }
831
832 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
833 return rc;
834}
835
836
837/**
838 * The global 1 halt method - VMR3NotifyFF() worker.
839 *
840 * @param pUVCpu Pointer to the user mode VMCPU structure.
841 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
842 */
843static DECLCALLBACK(void) vmR3HaltGlobal1NotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
844{
845 /*
846 * With ring-0 halting, the fWait flag isn't set, so we have to check the
847 * CPU state to figure out whether to do a wakeup call.
848 */
849 PVMCPU pVCpu = pUVCpu->pVCpu;
850 if (pVCpu)
851 {
852 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
853 if (enmState == VMCPUSTATE_STARTED_HALTED || pUVCpu->vm.s.fWait)
854 {
855 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pUVCpu->pVM), pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
856 AssertRC(rc);
857
858 }
859 else if ( (fFlags & VMNOTIFYFF_FLAGS_POKE)
860 || !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
861 {
862 if (enmState == VMCPUSTATE_STARTED_EXEC)
863 {
864 if (fFlags & VMNOTIFYFF_FLAGS_POKE)
865 {
866 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pUVCpu->pVM), pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POKE, 0, NULL);
867 AssertRC(rc);
868 }
869 }
870 else if ( enmState == VMCPUSTATE_STARTED_EXEC_NEM
871 || enmState == VMCPUSTATE_STARTED_EXEC_NEM_WAIT)
872 NEMR3NotifyFF(pUVCpu->pVM, pVCpu, fFlags);
873#ifdef VBOX_WITH_REM
874 else if (enmState == VMCPUSTATE_STARTED_EXEC_REM)
875 {
876 if (!(fFlags & VMNOTIFYFF_FLAGS_DONE_REM))
877 REMR3NotifyFF(pUVCpu->pVM);
878 }
879#endif
880 }
881 }
882 /* This probably makes little sense: */
883 else if (pUVCpu->vm.s.fWait)
884 {
885 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pUVCpu->pVM), pUVCpu->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
886 AssertRC(rc);
887 }
888}
889
890
891/**
892 * Bootstrap VMR3Wait() worker.
893 *
894 * @returns VBox status code.
895 * @param pUVCpu Pointer to the user mode VMCPU structure.
896 */
897static DECLCALLBACK(int) vmR3BootstrapWait(PUVMCPU pUVCpu)
898{
899 PUVM pUVM = pUVCpu->pUVM;
900
901 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
902
903 int rc = VINF_SUCCESS;
904 for (;;)
905 {
906 /*
907 * Check Relevant FFs.
908 */
909 if (pUVM->vm.s.pNormalReqs || pUVM->vm.s.pPriorityReqs) /* global requests pending? */
910 break;
911 if (pUVCpu->vm.s.pNormalReqs || pUVCpu->vm.s.pPriorityReqs) /* local requests pending? */
912 break;
913
914 if ( pUVCpu->pVM
915 && ( VM_FF_IS_ANY_SET(pUVCpu->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
916 || VMCPU_FF_IS_ANY_SET(VMMGetCpu(pUVCpu->pVM), VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
917 )
918 )
919 break;
920 if (pUVM->vm.s.fTerminateEMT)
921 break;
922
923 /*
924 * Wait for a while. Someone will wake us up or interrupt the call if
925 * anything needs our attention.
926 */
927 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
928 if (rc == VERR_TIMEOUT)
929 rc = VINF_SUCCESS;
930 else if (RT_FAILURE(rc))
931 {
932 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
933 break;
934 }
935 }
936
937 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
938 return rc;
939}
940
941
942/**
943 * Bootstrap VMR3NotifyFF() worker.
944 *
945 * @param pUVCpu Pointer to the user mode VMCPU structure.
946 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
947 */
948static DECLCALLBACK(void) vmR3BootstrapNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
949{
950 if (pUVCpu->vm.s.fWait)
951 {
952 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
953 AssertRC(rc);
954 }
955 NOREF(fFlags);
956}
957
958
959/**
960 * Default VMR3Wait() worker.
961 *
962 * @returns VBox status code.
963 * @param pUVCpu Pointer to the user mode VMCPU structure.
964 */
965static DECLCALLBACK(int) vmR3DefaultWait(PUVMCPU pUVCpu)
966{
967 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
968
969 PVM pVM = pUVCpu->pVM;
970 PVMCPU pVCpu = pUVCpu->pVCpu;
971 int rc = VINF_SUCCESS;
972 for (;;)
973 {
974 /*
975 * Check Relevant FFs.
976 */
977 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
978 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK))
979 break;
980
981 /*
982 * Wait for a while. Someone will wake us up or interrupt the call if
983 * anything needs our attention.
984 */
985 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1000);
986 if (rc == VERR_TIMEOUT)
987 rc = VINF_SUCCESS;
988 else if (RT_FAILURE(rc))
989 {
990 rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc", rc);
991 break;
992 }
993 }
994
995 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
996 return rc;
997}
998
999
1000/**
1001 * Default VMR3NotifyFF() worker.
1002 *
1003 * @param pUVCpu Pointer to the user mode VMCPU structure.
1004 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
1005 */
1006static DECLCALLBACK(void) vmR3DefaultNotifyCpuFF(PUVMCPU pUVCpu, uint32_t fFlags)
1007{
1008 if (pUVCpu->vm.s.fWait)
1009 {
1010 int rc = RTSemEventSignal(pUVCpu->vm.s.EventSemWait);
1011 AssertRC(rc);
1012 }
1013 else
1014 {
1015 PVMCPU pVCpu = pUVCpu->pVCpu;
1016 if (pVCpu)
1017 {
1018 VMCPUSTATE enmState = pVCpu->enmState;
1019 if ( enmState == VMCPUSTATE_STARTED_EXEC_NEM
1020 || enmState == VMCPUSTATE_STARTED_EXEC_NEM_WAIT)
1021 NEMR3NotifyFF(pUVCpu->pVM, pVCpu, fFlags);
1022#ifdef VBOX_WITH_REM
1023 else if ( !(fFlags & VMNOTIFYFF_FLAGS_DONE_REM)
1024 && enmState == VMCPUSTATE_STARTED_EXEC_REM)
1025 REMR3NotifyFF(pUVCpu->pVM);
1026#endif
1027 }
1028 }
1029}
1030
1031
1032/**
1033 * Array with halt method descriptors.
1034 * VMINT::iHaltMethod contains an index into this array.
1035 */
1036static const struct VMHALTMETHODDESC
1037{
1038 /** The halt method ID. */
1039 VMHALTMETHOD enmHaltMethod;
1040 /** Set if the method support halting directly in ring-0. */
1041 bool fMayHaltInRing0;
1042 /** The init function for loading config and initialize variables. */
1043 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
1044 /** The term function. */
1045 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
1046 /** The VMR3WaitHaltedU function. */
1047 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now));
1048 /** The VMR3WaitU function. */
1049 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVMCPU pUVCpu));
1050 /** The VMR3NotifyCpuFFU function. */
1051 DECLR3CALLBACKMEMBER(void, pfnNotifyCpuFF,(PUVMCPU pUVCpu, uint32_t fFlags));
1052 /** The VMR3NotifyGlobalFFU function. */
1053 DECLR3CALLBACKMEMBER(void, pfnNotifyGlobalFF,(PUVM pUVM, uint32_t fFlags));
1054} g_aHaltMethods[] =
1055{
1056 { VMHALTMETHOD_BOOTSTRAP, false, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyCpuFF, NULL },
1057 { VMHALTMETHOD_OLD, false, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
1058 { VMHALTMETHOD_1, false, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyCpuFF, NULL },
1059 { VMHALTMETHOD_GLOBAL_1, true, vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyCpuFF, NULL },
1060};
1061
1062
1063/**
1064 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1065 *
1066 * This function is called by thread other than EMT to make
1067 * sure EMT wakes up and promptly service an FF request.
1068 *
1069 * @param pUVM Pointer to the user mode VM structure.
1070 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
1071 * @internal
1072 */
1073VMMR3_INT_DECL(void) VMR3NotifyGlobalFFU(PUVM pUVM, uint32_t fFlags)
1074{
1075 LogFlow(("VMR3NotifyGlobalFFU:\n"));
1076 uint32_t iHaltMethod = pUVM->vm.s.iHaltMethod;
1077
1078 if (g_aHaltMethods[iHaltMethod].pfnNotifyGlobalFF) /** @todo make mandatory. */
1079 g_aHaltMethods[iHaltMethod].pfnNotifyGlobalFF(pUVM, fFlags);
1080 else
1081 for (VMCPUID iCpu = 0; iCpu < pUVM->cCpus; iCpu++)
1082 g_aHaltMethods[iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[iCpu], fFlags);
1083}
1084
1085
1086/**
1087 * Notify the emulation thread (EMT) about pending Forced Action (FF).
1088 *
1089 * This function is called by thread other than EMT to make
1090 * sure EMT wakes up and promptly service an FF request.
1091 *
1092 * @param pUVCpu Pointer to the user mode per CPU VM structure.
1093 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_*.
1094 * @internal
1095 */
1096VMMR3_INT_DECL(void) VMR3NotifyCpuFFU(PUVMCPU pUVCpu, uint32_t fFlags)
1097{
1098 PUVM pUVM = pUVCpu->pUVM;
1099
1100 LogFlow(("VMR3NotifyCpuFFU:\n"));
1101 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(pUVCpu, fFlags);
1102}
1103
1104
1105/**
1106 * Halted VM Wait.
1107 * Any external event will unblock the thread.
1108 *
1109 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
1110 * case an appropriate status code is returned.
1111 * @param pVM The cross context VM structure.
1112 * @param pVCpu The cross context virtual CPU structure.
1113 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
1114 * @thread The emulation thread.
1115 * @remarks Made visible for implementing vmsvga sync register.
1116 * @internal
1117 */
1118VMMR3_INT_DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts)
1119{
1120 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
1121
1122 /*
1123 * Check Relevant FFs.
1124 */
1125 const uint32_t fMask = !fIgnoreInterrupts
1126 ? VMCPU_FF_EXTERNAL_HALTED_MASK
1127 : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC);
1128 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK)
1129 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask))
1130 {
1131 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#RX64)\n", pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
1132 return VINF_SUCCESS;
1133 }
1134
1135 /*
1136 * The yielder is suspended while we're halting, while TM might have clock(s) running
1137 * only at certain times and need to be notified..
1138 */
1139 if (pVCpu->idCpu == 0)
1140 VMMR3YieldSuspend(pVM);
1141 TMNotifyStartOfHalt(pVCpu);
1142
1143 /*
1144 * Record halt averages for the last second.
1145 */
1146 PUVMCPU pUVCpu = pVCpu->pUVCpu;
1147 uint64_t u64Now = RTTimeNanoTS();
1148 int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS;
1149 if (off > 1000000000)
1150 {
1151 if (off > _4G || !pUVCpu->vm.s.cHalts)
1152 {
1153 pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1154 pUVCpu->vm.s.HaltFrequency = 1;
1155 }
1156 else
1157 {
1158 pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts;
1159 pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off);
1160 }
1161 pUVCpu->vm.s.u64HaltsStartTS = u64Now;
1162 pUVCpu->vm.s.cHalts = 0;
1163 }
1164 pUVCpu->vm.s.cHalts++;
1165
1166 /*
1167 * Do the halt.
1168 */
1169 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
1170 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED);
1171 PUVM pUVM = pUVCpu->pUVM;
1172 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now);
1173 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1174
1175 /*
1176 * Notify TM and resume the yielder
1177 */
1178 TMNotifyEndOfHalt(pVCpu);
1179 if (pVCpu->idCpu == 0)
1180 VMMR3YieldResume(pVM);
1181
1182 LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fGlobalForcedActions));
1183 return rc;
1184}
1185
1186
1187/**
1188 * Suspended VM Wait.
1189 * Only a handful of forced actions will cause the function to
1190 * return to the caller.
1191 *
1192 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
1193 * case an appropriate status code is returned.
1194 * @param pUVCpu Pointer to the user mode VMCPU structure.
1195 * @thread The emulation thread.
1196 * @internal
1197 */
1198VMMR3_INT_DECL(int) VMR3WaitU(PUVMCPU pUVCpu)
1199{
1200 LogFlow(("VMR3WaitU:\n"));
1201
1202 /*
1203 * Check Relevant FFs.
1204 */
1205 PVM pVM = pUVCpu->pVM;
1206 PVMCPU pVCpu = pUVCpu->pVCpu;
1207
1208 if ( pVM
1209 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK)
1210 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_EXTERNAL_SUSPENDED_MASK)
1211 )
1212 )
1213 {
1214 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fGlobalForcedActions));
1215 return VINF_SUCCESS;
1216 }
1217
1218 /*
1219 * Do waiting according to the halt method (so VMR3NotifyFF
1220 * doesn't have to special case anything).
1221 */
1222 PUVM pUVM = pUVCpu->pUVM;
1223 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVCpu);
1224 LogFlow(("VMR3WaitU: returns %Rrc (FF %#x)\n", rc, pUVM->pVM ? pUVM->pVM->fGlobalForcedActions : 0));
1225 return rc;
1226}
1227
1228
1229/**
1230 * Interface that PDMR3Suspend, PDMR3PowerOff and PDMR3Reset uses when they wait
1231 * for the handling of asynchronous notifications to complete.
1232 *
1233 * @returns VINF_SUCCESS unless a fatal error occurred. In the latter
1234 * case an appropriate status code is returned.
1235 * @param pUVCpu Pointer to the user mode VMCPU structure.
1236 * @thread The emulation thread.
1237 */
1238VMMR3_INT_DECL(int) VMR3AsyncPdmNotificationWaitU(PUVMCPU pUVCpu)
1239{
1240 LogFlow(("VMR3AsyncPdmNotificationWaitU:\n"));
1241 return VMR3WaitU(pUVCpu);
1242}
1243
1244
1245/**
1246 * Interface that PDM the helper asynchronous notification completed methods
1247 * uses for EMT0 when it is waiting inside VMR3AsyncPdmNotificationWaitU().
1248 *
1249 * @param pUVM Pointer to the user mode VM structure.
1250 */
1251VMMR3_INT_DECL(void) VMR3AsyncPdmNotificationWakeupU(PUVM pUVM)
1252{
1253 LogFlow(("VMR3AsyncPdmNotificationWakeupU:\n"));
1254 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST); /* this will have to do for now. */
1255 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[0], 0 /*fFlags*/);
1256}
1257
1258
1259/**
1260 * Rendezvous callback that will be called once.
1261 *
1262 * @returns VBox strict status code.
1263 * @param pVM The cross context VM structure.
1264 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1265 * @param pvUser The new g_aHaltMethods index.
1266 */
1267static DECLCALLBACK(VBOXSTRICTRC) vmR3SetHaltMethodCallback(PVM pVM, PVMCPU pVCpu, void *pvUser)
1268{
1269 PUVM pUVM = pVM->pUVM;
1270 uintptr_t i = (uintptr_t)pvUser;
1271 Assert(i < RT_ELEMENTS(g_aHaltMethods));
1272 NOREF(pVCpu);
1273
1274 /*
1275 * Terminate the old one.
1276 */
1277 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1278 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1279 {
1280 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1281 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1282 }
1283
1284 /* Assert that the failure fallback is where we expect. */
1285 Assert(g_aHaltMethods[0].enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
1286 Assert(!g_aHaltMethods[0].pfnTerm && !g_aHaltMethods[0].pfnInit);
1287
1288 /*
1289 * Init the new one.
1290 */
1291 int rc = VINF_SUCCESS;
1292 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1293 if (g_aHaltMethods[i].pfnInit)
1294 {
1295 rc = g_aHaltMethods[i].pfnInit(pUVM);
1296 if (RT_FAILURE(rc))
1297 {
1298 /* Fall back on the bootstrap method. This requires no
1299 init/term (see assertion above), and will always work. */
1300 AssertLogRelRC(rc);
1301 i = 0;
1302 }
1303 }
1304
1305 /*
1306 * Commit it.
1307 */
1308 pUVM->vm.s.enmHaltMethod = g_aHaltMethods[i].enmHaltMethod;
1309 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1310
1311 VMMR3SetMayHaltInRing0(pVCpu, g_aHaltMethods[i].fMayHaltInRing0,
1312 g_aHaltMethods[i].enmHaltMethod == VMHALTMETHOD_GLOBAL_1
1313 ? pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg : 0);
1314
1315 return rc;
1316}
1317
1318
1319/**
1320 * Changes the halt method.
1321 *
1322 * @returns VBox status code.
1323 * @param pUVM Pointer to the user mode VM structure.
1324 * @param enmHaltMethod The new halt method.
1325 * @thread EMT.
1326 */
1327int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1328{
1329 PVM pVM = pUVM->pVM; Assert(pVM);
1330 VM_ASSERT_EMT(pVM);
1331 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1332
1333 /*
1334 * Resolve default (can be overridden in the configuration).
1335 */
1336 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1337 {
1338 uint32_t u32;
1339 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1340 if (RT_SUCCESS(rc))
1341 {
1342 enmHaltMethod = (VMHALTMETHOD)u32;
1343 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1344 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1345 }
1346 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1347 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1348 else
1349 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1350 //enmHaltMethod = VMHALTMETHOD_1;
1351 //enmHaltMethod = VMHALTMETHOD_OLD;
1352 }
1353 LogRel(("VMEmt: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1354
1355 /*
1356 * Find the descriptor.
1357 */
1358 unsigned i = 0;
1359 while ( i < RT_ELEMENTS(g_aHaltMethods)
1360 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1361 i++;
1362 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1363
1364 /*
1365 * This needs to be done while the other EMTs are not sleeping or otherwise messing around.
1366 */
1367 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3SetHaltMethodCallback, (void *)(uintptr_t)i);
1368}
1369
1370
1371/**
1372 * Special interface for implementing a HLT-like port on a device.
1373 *
1374 * This can be called directly from device code, provide the device is trusted
1375 * to access the VMM directly. Since we may not have an accurate register set
1376 * and the caller certainly shouldn't (device code does not access CPU
1377 * registers), this function will return when interrupts are pending regardless
1378 * of the actual EFLAGS.IF state.
1379 *
1380 * @returns VBox error status (never informational statuses).
1381 * @param pVM The cross context VM structure.
1382 * @param idCpu The id of the calling EMT.
1383 */
1384VMMR3DECL(int) VMR3WaitForDeviceReady(PVM pVM, VMCPUID idCpu)
1385{
1386 /*
1387 * Validate caller and resolve the CPU ID.
1388 */
1389 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1390 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1391 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1392 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1393
1394 /*
1395 * Tag along with the HLT mechanics for now.
1396 */
1397 int rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
1398 if (RT_SUCCESS(rc))
1399 return VINF_SUCCESS;
1400 return rc;
1401}
1402
1403
1404/**
1405 * Wakes up a CPU that has called VMR3WaitForDeviceReady.
1406 *
1407 * @returns VBox error status (never informational statuses).
1408 * @param pVM The cross context VM structure.
1409 * @param idCpu The id of the calling EMT.
1410 */
1411VMMR3DECL(int) VMR3NotifyCpuDeviceReady(PVM pVM, VMCPUID idCpu)
1412{
1413 /*
1414 * Validate caller and resolve the CPU ID.
1415 */
1416 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1417 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
1418 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1419
1420 /*
1421 * Pretend it was an FF that got set since we've got logic for that already.
1422 */
1423 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
1424 return VINF_SUCCESS;
1425}
1426
1427
1428/**
1429 * Returns the number of active EMTs.
1430 *
1431 * This is used by the rendezvous code during VM destruction to avoid waiting
1432 * for EMTs that aren't around any more.
1433 *
1434 * @returns Number of active EMTs. 0 if invalid parameter.
1435 * @param pUVM The user mode VM structure.
1436 */
1437VMMR3_INT_DECL(uint32_t) VMR3GetActiveEmts(PUVM pUVM)
1438{
1439 UVM_ASSERT_VALID_EXT_RETURN(pUVM, 0);
1440 return pUVM->vm.s.cActiveEmts;
1441}
1442
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette