VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMMInternal.h@ 90920

Last change on this file since 90920 was 90862, checked in by vboxsync, 4 years ago

IPRT,SUPDrv,VMM,++: Bumped major support driver version. Added RTLogSetR0ProgramStart and make the VMM use it when configuring the ring-0 loggers. Removed pfnFlush from the parameter list of RTLogCreateEx[V]. bugref:10086

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.2 KB
Line 
1/* $Id: VMMInternal.h 90862 2021-08-25 00:37:59Z vboxsync $ */
2/** @file
3 * VMM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_VMMInternal_h
19#define VMM_INCLUDED_SRC_include_VMMInternal_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/sup.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/vmm/vmm.h>
28#include <VBox/log.h>
29#include <iprt/critsect.h>
30
31#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
32# error "Not in VMM! This is an internal header!"
33#endif
34#if HC_ARCH_BITS == 32
35# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
36#endif
37
38
39
40/** @defgroup grp_vmm_int Internals
41 * @ingroup grp_vmm
42 * @internal
43 * @{
44 */
45
46/** @def VBOX_WITH_RC_RELEASE_LOGGING
47 * Enables RC release logging. */
48#define VBOX_WITH_RC_RELEASE_LOGGING
49
50/** @def VBOX_WITH_R0_LOGGING
51 * Enables Ring-0 logging (non-release).
52 *
53 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
54 * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
55 * \#if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
56 */
57#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING)
58# define VBOX_WITH_R0_LOGGING
59#endif
60
61/** @def VBOX_STRICT_VMM_STACK
62 * Enables VMM stack guard pages to catch stack over- and underruns. */
63#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
64# define VBOX_STRICT_VMM_STACK
65#endif
66
67
68/**
69 * R0 logger data (ring-0 only data).
70 */
71typedef struct VMMR0PERVCPULOGGER
72{
73 /** Pointer to the logger instance.
74 * The RTLOGGER::u32UserValue1 member is used for flags and magic, while the
75 * RTLOGGER::u64UserValue2 member is the corresponding PGVMCPU value.
76 * RTLOGGER::u64UserValue3 is currently and set to the PGVMCPU value too. */
77 R0PTRTYPE(PRTLOGGER) pLogger;
78 /** Log buffer descriptor.
79 * The buffer is allocated in a common block for all VCpus, see VMMR0PERVM. */
80 RTLOGBUFFERDESC BufDesc;
81 /** Flag indicating whether we've registered the instance already. */
82 bool fRegistered;
83 bool afPadding[7];
84} VMMR0PERVCPULOGGER;
85/** Pointer to the R0 logger data (ring-0 only). */
86typedef VMMR0PERVCPULOGGER *PVMMR0PERVCPULOGGER;
87
88
89/**
90 * R0 logger data shared with ring-3 (per CPU).
91 */
92typedef struct VMMR3CPULOGGER
93{
94 /** Auxiliary buffer descriptor. */
95 RTLOGBUFFERAUXDESC AuxDesc;
96 /** Ring-3 mapping of the logging buffer. */
97 R3PTRTYPE(char *) pchBufR3;
98 /** The buffer size. */
99 uint32_t cbBuf;
100 uint32_t uReserved;
101} VMMR3CPULOGGER;
102/** Pointer to r0 logger data shared with ring-3. */
103typedef VMMR3CPULOGGER *PVMMR3CPULOGGER;
104
105
106/**
107 * Jump buffer for the setjmp/longjmp like constructs used to
108 * quickly 'call' back into Ring-3.
109 */
110typedef struct VMMR0JMPBUF
111{
112 /** Traditional jmp_buf stuff
113 * @{ */
114#if HC_ARCH_BITS == 32
115 uint32_t ebx;
116 uint32_t esi;
117 uint32_t edi;
118 uint32_t ebp;
119 uint32_t esp;
120 uint32_t eip;
121 uint32_t eflags;
122#endif
123#if HC_ARCH_BITS == 64
124 uint64_t rbx;
125# ifdef RT_OS_WINDOWS
126 uint64_t rsi;
127 uint64_t rdi;
128# endif
129 uint64_t rbp;
130 uint64_t r12;
131 uint64_t r13;
132 uint64_t r14;
133 uint64_t r15;
134 uint64_t rsp;
135 uint64_t rip;
136# ifdef RT_OS_WINDOWS
137 uint128_t xmm6;
138 uint128_t xmm7;
139 uint128_t xmm8;
140 uint128_t xmm9;
141 uint128_t xmm10;
142 uint128_t xmm11;
143 uint128_t xmm12;
144 uint128_t xmm13;
145 uint128_t xmm14;
146 uint128_t xmm15;
147# endif
148 uint64_t rflags;
149#endif
150 /** @} */
151
152 /** Flag that indicates that we've done a ring-3 call. */
153 bool fInRing3Call;
154 /** The number of bytes we've saved. */
155 uint32_t cbSavedStack;
156 /** Pointer to the buffer used to save the stack.
157 * This is assumed to be 8KB. */
158 RTR0PTR pvSavedStack;
159 /** Esp we we match against esp on resume to make sure the stack wasn't relocated. */
160 RTHCUINTREG SpCheck;
161 /** The esp we should resume execution with after the restore. */
162 RTHCUINTREG SpResume;
163 /** ESP/RSP at the time of the jump to ring 3. */
164 RTHCUINTREG SavedEsp;
165 /** EBP/RBP at the time of the jump to ring 3. */
166 RTHCUINTREG SavedEbp;
167 /** EIP/RIP within vmmR0CallRing3LongJmp for assisting unwinding. */
168 RTHCUINTREG SavedEipForUnwind;
169 /** Unwind: The vmmR0CallRing3SetJmp return address value. */
170 RTHCUINTREG UnwindRetPcValue;
171 /** Unwind: The vmmR0CallRing3SetJmp return address stack location. */
172 RTHCUINTREG UnwindRetPcLocation;
173
174 /** The function last being executed here. */
175 RTHCUINTREG pfn;
176 /** The first argument to the function. */
177 RTHCUINTREG pvUser1;
178 /** The second argument to the function. */
179 RTHCUINTREG pvUser2;
180
181#if HC_ARCH_BITS == 32
182 /** Alignment padding. */
183 uint32_t uPadding;
184#endif
185
186 /** Stats: Max amount of stack used. */
187 uint32_t cbUsedMax;
188 /** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */
189 uint32_t cbUsedAvg;
190 /** Stats: Total amount of stack used. */
191 uint64_t cbUsedTotal;
192 /** Stats: Number of stack usages. */
193 uint64_t cUsedTotal;
194} VMMR0JMPBUF;
195/** Pointer to a ring-0 jump buffer. */
196typedef VMMR0JMPBUF *PVMMR0JMPBUF;
197
198
199/**
200 * VMM Data (part of VM)
201 */
202typedef struct VMM
203{
204 /** Whether we should use the periodic preemption timers. */
205 bool fUsePeriodicPreemptionTimers;
206 /** Alignment padding. */
207 bool afPadding0[7];
208
209#if 0 /* pointless when timers doesn't run on EMT */
210 /** The EMT yield timer. */
211 TMTIMERHANDLE hYieldTimer;
212 /** The period to the next timeout when suspended or stopped.
213 * This is 0 when running. */
214 uint32_t cYieldResumeMillies;
215 /** The EMT yield timer interval (milliseconds). */
216 uint32_t cYieldEveryMillies;
217 /** The timestamp of the previous yield. (nano) */
218 uint64_t u64LastYield;
219#endif
220
221 /** @name EMT Rendezvous
222 * @{ */
223 /** Semaphore to wait on upon entering ordered execution. */
224 R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
225 /** Semaphore to wait on upon entering for one-by-one execution. */
226 RTSEMEVENT hEvtRendezvousEnterOneByOne;
227 /** Semaphore to wait on upon entering for all-at-once execution. */
228 RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
229 /** Semaphore to wait on when done. */
230 RTSEMEVENTMULTI hEvtMulRendezvousDone;
231 /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
232 RTSEMEVENT hEvtRendezvousDoneCaller;
233 /** Semaphore to wait on upon recursing. */
234 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPush;
235 /** Semaphore to wait on after done with recursion (caller restoring state). */
236 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPop;
237 /** Semaphore the initiator waits on while the EMTs are getting into position
238 * on hEvtMulRendezvousRecursionPush. */
239 RTSEMEVENT hEvtRendezvousRecursionPushCaller;
240 /** Semaphore the initiator waits on while the EMTs sitting on
241 * hEvtMulRendezvousRecursionPop wakes up and leave. */
242 RTSEMEVENT hEvtRendezvousRecursionPopCaller;
243 /** Callback. */
244 R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
245 /** The user argument for the callback. */
246 RTR3PTR volatile pvRendezvousUser;
247 /** Flags. */
248 volatile uint32_t fRendezvousFlags;
249 /** The number of EMTs that has entered. */
250 volatile uint32_t cRendezvousEmtsEntered;
251 /** The number of EMTs that has done their job. */
252 volatile uint32_t cRendezvousEmtsDone;
253 /** The number of EMTs that has returned. */
254 volatile uint32_t cRendezvousEmtsReturned;
255 /** The status code. */
256 volatile int32_t i32RendezvousStatus;
257 /** Spin lock. */
258 volatile uint32_t u32RendezvousLock;
259 /** The recursion depth. */
260 volatile uint32_t cRendezvousRecursions;
261 /** The number of EMTs that have entered the recursion routine. */
262 volatile uint32_t cRendezvousEmtsRecursingPush;
263 /** The number of EMTs that have leaft the recursion routine. */
264 volatile uint32_t cRendezvousEmtsRecursingPop;
265 /** Triggers rendezvous recursion in the other threads. */
266 volatile bool fRendezvousRecursion;
267
268 /** @} */
269
270 /** RTThreadPreemptIsPendingTrusty() result, set by vmmR0InitVM() for
271 * release logging purposes. */
272 bool fIsPreemptPendingApiTrusty : 1;
273 /** The RTThreadPreemptIsPossible() result, set by vmmR0InitVM() for
274 * release logging purposes. */
275 bool fIsPreemptPossible : 1;
276 /** Set if ring-0 uses context hooks. */
277 bool fIsUsingContextHooks : 1;
278
279 bool afAlignment2[2]; /**< Alignment padding. */
280
281 /** Buffer for storing the standard assertion message for a ring-0 assertion.
282 * Used for saving the assertion message text for the release log and guru
283 * meditation dump. */
284 char szRing0AssertMsg1[512];
285 /** Buffer for storing the custom message for a ring-0 assertion. */
286 char szRing0AssertMsg2[256];
287
288 /** Used when setting up ring-0 logger. */
289 uint64_t nsProgramStart;
290
291 /** Number of VMMR0_DO_HM_RUN or VMMR0_DO_NEM_RUN calls. */
292 STAMCOUNTER StatRunGC;
293
294 /** Statistics for each of the RC/R0 return codes.
295 * @{ */
296 STAMCOUNTER StatRZRetNormal;
297 STAMCOUNTER StatRZRetInterrupt;
298 STAMCOUNTER StatRZRetInterruptHyper;
299 STAMCOUNTER StatRZRetGuestTrap;
300 STAMCOUNTER StatRZRetRingSwitch;
301 STAMCOUNTER StatRZRetRingSwitchInt;
302 STAMCOUNTER StatRZRetStaleSelector;
303 STAMCOUNTER StatRZRetIRETTrap;
304 STAMCOUNTER StatRZRetEmulate;
305 STAMCOUNTER StatRZRetPatchEmulate;
306 STAMCOUNTER StatRZRetIORead;
307 STAMCOUNTER StatRZRetIOWrite;
308 STAMCOUNTER StatRZRetIOCommitWrite;
309 STAMCOUNTER StatRZRetMMIORead;
310 STAMCOUNTER StatRZRetMMIOWrite;
311 STAMCOUNTER StatRZRetMMIOCommitWrite;
312 STAMCOUNTER StatRZRetMMIOPatchRead;
313 STAMCOUNTER StatRZRetMMIOPatchWrite;
314 STAMCOUNTER StatRZRetMMIOReadWrite;
315 STAMCOUNTER StatRZRetMSRRead;
316 STAMCOUNTER StatRZRetMSRWrite;
317 STAMCOUNTER StatRZRetLDTFault;
318 STAMCOUNTER StatRZRetGDTFault;
319 STAMCOUNTER StatRZRetIDTFault;
320 STAMCOUNTER StatRZRetTSSFault;
321 STAMCOUNTER StatRZRetCSAMTask;
322 STAMCOUNTER StatRZRetSyncCR3;
323 STAMCOUNTER StatRZRetMisc;
324 STAMCOUNTER StatRZRetPatchInt3;
325 STAMCOUNTER StatRZRetPatchPF;
326 STAMCOUNTER StatRZRetPatchGP;
327 STAMCOUNTER StatRZRetPatchIretIRQ;
328 STAMCOUNTER StatRZRetRescheduleREM;
329 STAMCOUNTER StatRZRetToR3Total;
330 STAMCOUNTER StatRZRetToR3FF;
331 STAMCOUNTER StatRZRetToR3Unknown;
332 STAMCOUNTER StatRZRetToR3TMVirt;
333 STAMCOUNTER StatRZRetToR3HandyPages;
334 STAMCOUNTER StatRZRetToR3PDMQueues;
335 STAMCOUNTER StatRZRetToR3Rendezvous;
336 STAMCOUNTER StatRZRetToR3Timer;
337 STAMCOUNTER StatRZRetToR3DMA;
338 STAMCOUNTER StatRZRetToR3CritSect;
339 STAMCOUNTER StatRZRetToR3Iem;
340 STAMCOUNTER StatRZRetToR3Iom;
341 STAMCOUNTER StatRZRetTimerPending;
342 STAMCOUNTER StatRZRetInterruptPending;
343 STAMCOUNTER StatRZRetCallRing3;
344 STAMCOUNTER StatRZRetPATMDuplicateFn;
345 STAMCOUNTER StatRZRetPGMChangeMode;
346 STAMCOUNTER StatRZRetPendingRequest;
347 STAMCOUNTER StatRZRetPGMFlushPending;
348 STAMCOUNTER StatRZRetPatchTPR;
349 STAMCOUNTER StatRZCallPDMCritSectEnter;
350 STAMCOUNTER StatRZCallPDMLock;
351 STAMCOUNTER StatRZCallLogFlush;
352 STAMCOUNTER StatRZCallPGMPoolGrow;
353 STAMCOUNTER StatRZCallPGMMapChunk;
354 STAMCOUNTER StatRZCallPGMAllocHandy;
355 STAMCOUNTER StatRZCallVMSetError;
356 STAMCOUNTER StatRZCallVMSetRuntimeError;
357 STAMCOUNTER StatRZCallPGMLock;
358 /** @} */
359} VMM;
360/** Pointer to VMM. */
361typedef VMM *PVMM;
362
363
364/**
365 * VMMCPU Data (part of VMCPU)
366 */
367typedef struct VMMCPU
368{
369 /** The last RC/R0 return code. */
370 int32_t iLastGZRc;
371 /** Alignment padding. */
372 uint32_t u32Padding0;
373
374 /** VMM stack, pointer to the top of the stack in R3.
375 * Stack is allocated from the hypervisor heap and is page aligned
376 * and always writable in RC. */
377 R3PTRTYPE(uint8_t *) pbEMTStackR3;
378
379 /** @name Rendezvous
380 * @{ */
381 /** Whether the EMT is executing a rendezvous right now. For detecting
382 * attempts at recursive rendezvous. */
383 bool volatile fInRendezvous;
384 bool afPadding1[2];
385 /** @} */
386
387 /** Whether we can HLT in VMMR0 rather than having to return to EM.
388 * Updated by vmR3SetHaltMethodU(). */
389 bool fMayHaltInRing0;
390 /** The minimum delta for which we can HLT in ring-0 for.
391 * The deadlines we can calculate are from TM, so, if it's too close
392 * we should just return to ring-3 and run the timer wheel, no point
393 * in spinning in ring-0.
394 * Updated by vmR3SetHaltMethodU(). */
395 uint32_t cNsSpinBlockThreshold;
396 /** Number of ring-0 halts (used for depreciating following values). */
397 uint32_t cR0Halts;
398 /** Number of ring-0 halts succeeding (VINF_SUCCESS) recently. */
399 uint32_t cR0HaltsSucceeded;
400 /** Number of ring-0 halts failing (VINF_EM_HALT) recently. */
401 uint32_t cR0HaltsToRing3;
402 /** Padding */
403 uint32_t u32Padding2;
404
405 /** @name Raw-mode context tracing data.
406 * @{ */
407 SUPDRVTRACERUSRCTX TracerCtx;
408 /** @} */
409
410 /** Alignment padding, making sure u64CallRing3Arg and CallRing3JmpBufR0 are nicely aligned. */
411 uint32_t au32Padding3[1];
412
413 /** @name Call Ring-3
414 * Formerly known as host calls.
415 * @{ */
416 /** The disable counter. */
417 uint32_t cCallRing3Disabled;
418 /** The pending operation. */
419 VMMCALLRING3 enmCallRing3Operation;
420 /** The result of the last operation. */
421 int32_t rcCallRing3;
422 /** The argument to the operation. */
423 uint64_t u64CallRing3Arg;
424 /** The Ring-0 notification callback. */
425 R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallRing3CallbackR0;
426 /** The Ring-0 notification callback user argument. */
427 R0PTRTYPE(void *) pvCallRing3CallbackUserR0;
428 /** The Ring-0 jmp buffer.
429 * @remarks The size of this type isn't stable in assembly, so don't put
430 * anything that needs to be accessed from assembly after it. */
431 VMMR0JMPBUF CallRing3JmpBufR0;
432 /** @} */
433
434 /** @name Logging
435 * @{ */
436 /** The R0 logger data shared with ring-3. */
437 VMMR3CPULOGGER Logger;
438 /** The R0 release logger data shared with ring-3. */
439 VMMR3CPULOGGER RelLogger;
440 /** @} */
441
442 STAMPROFILE StatR0HaltBlock;
443 STAMPROFILE StatR0HaltBlockOnTime;
444 STAMPROFILE StatR0HaltBlockOverslept;
445 STAMPROFILE StatR0HaltBlockInsomnia;
446 STAMCOUNTER StatR0HaltExec;
447 STAMCOUNTER StatR0HaltExecFromBlock;
448 STAMCOUNTER StatR0HaltExecFromSpin;
449 STAMCOUNTER StatR0HaltToR3;
450 STAMCOUNTER StatR0HaltToR3FromSpin;
451 STAMCOUNTER StatR0HaltToR3Other;
452 STAMCOUNTER StatR0HaltToR3PendingFF;
453 STAMCOUNTER StatR0HaltToR3SmallDelta;
454 STAMCOUNTER StatR0HaltToR3PostNoInt;
455 STAMCOUNTER StatR0HaltToR3PostPendingFF;
456} VMMCPU;
457AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8);
458/** Pointer to VMMCPU. */
459typedef VMMCPU *PVMMCPU;
460
461/**
462 * VMM per-VCpu ring-0 only instance data.
463 */
464typedef struct VMMR0PERVCPU
465{
466 /** Set if we've entered HM context. */
467 bool volatile fInHmContext;
468 /** Flag indicating whether we've disabled flushing (world switch) or not. */
469 bool fLogFlushingDisabled;
470 /** The EMT hash table index. */
471 uint16_t idxEmtHash;
472 /** Pointer to the VMMR0EntryFast preemption state structure.
473 * This is used to temporarily restore preemption before blocking. */
474 R0PTRTYPE(PRTTHREADPREEMPTSTATE) pPreemptState;
475 /** Thread context switching hook (ring-0). */
476 RTTHREADCTXHOOK hCtxHook;
477
478 /** @name Arguments passed by VMMR0EntryEx via vmmR0CallRing3SetJmpEx.
479 * @note Cannot be put on the stack as the location may change and upset the
480 * validation of resume-after-ring-3-call logic.
481 * @{ */
482 PGVM pGVM;
483 VMCPUID idCpu;
484 VMMR0OPERATION enmOperation;
485 PSUPVMMR0REQHDR pReq;
486 uint64_t u64Arg;
487 PSUPDRVSESSION pSession;
488 /** @} */
489
490 /** @name Loggers
491 * @{ */
492 /** The R0 logger data. */
493 VMMR0PERVCPULOGGER Logger;
494 /** The R0 release logger data. */
495 VMMR0PERVCPULOGGER RelLogger;
496 /** @} */
497} VMMR0PERVCPU;
498/** Pointer to VMM ring-0 VMCPU instance data. */
499typedef VMMR0PERVCPU *PVMMR0PERVCPU;
500
501/** @name RTLOGGER::u32UserValue1 Flags
502 * @{ */
503/** The magic value. */
504#define VMMR0_LOGGER_FLAGS_MAGIC_VALUE UINT32_C(0x7d297f05)
505/** Part of the flags value used for the magic. */
506#define VMMR0_LOGGER_FLAGS_MAGIC_MASK UINT32_C(0xffffff0f)
507/** Set if flushing is disabled (copy of fLogFlushingDisabled). */
508#define VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED UINT32_C(0x00000010)
509/** @} */
510
511
512/**
513 * VMM data kept in the ring-0 GVM.
514 */
515typedef struct VMMR0PERVM
516{
517 /** Logger (debug) buffer allocation.
518 * This covers all CPUs. */
519 RTR0MEMOBJ hMemObjLogger;
520 /** The ring-3 mapping object for hMemObjLogger. */
521 RTR0MEMOBJ hMapObjLogger;
522
523 /** Release logger buffer allocation.
524 * This covers all CPUs. */
525 RTR0MEMOBJ hMemObjReleaseLogger;
526 /** The ring-3 mapping object for hMemObjReleaseLogger. */
527 RTR0MEMOBJ hMapObjReleaseLogger;
528
529 /** Set if vmmR0InitVM has been called. */
530 bool fCalledInitVm;
531} VMMR0PERVM;
532
533RT_C_DECLS_BEGIN
534
535int vmmInitFormatTypes(void);
536void vmmTermFormatTypes(void);
537uint32_t vmmGetBuildType(void);
538
539#ifdef IN_RING3
540int vmmR3SwitcherInit(PVM pVM);
541void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
542#endif /* IN_RING3 */
543
544#ifdef IN_RING0
545
546/**
547 * World switcher assembly routine.
548 * It will call VMMRCEntry().
549 *
550 * @returns return code from VMMRCEntry().
551 * @param pVM The cross context VM structure.
552 * @param uArg See VMMRCEntry().
553 * @internal
554 */
555DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
556
557/**
558 * Callback function for vmmR0CallRing3SetJmp.
559 *
560 * @returns VBox status code.
561 * @param pVM The cross context VM structure.
562 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
563 */
564typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP,(PVMCC pVM, PVMCPUCC pVCpu));
565/** Pointer to FNVMMR0SETJMP(). */
566typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
567
568/**
569 * The setjmp variant used for calling Ring-3.
570 *
571 * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
572 * in the middle of a ring-3 call. Another differences is the function pointer and
573 * argument. This has to do with resuming code and the stack frame of the caller.
574 *
575 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
576 * @param pJmpBuf The jmp_buf to set.
577 * @param pfn The function to be called when not resuming.
578 * @param pVM The cross context VM structure.
579 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
580 */
581DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
582
583
584/**
585 * Callback function for vmmR0CallRing3SetJmp2.
586 *
587 * @returns VBox status code.
588 * @param pGVM The ring-0 VM structure.
589 * @param idCpu The ID of the calling EMT.
590 */
591typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP2,(PGVM pGVM, VMCPUID idCpu));
592/** Pointer to FNVMMR0SETJMP2(). */
593typedef FNVMMR0SETJMP2 *PFNVMMR0SETJMP2;
594
595/**
596 * Same as vmmR0CallRing3SetJmp except for the function signature.
597 *
598 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
599 * @param pJmpBuf The jmp_buf to set.
600 * @param pfn The function to be called when not resuming.
601 * @param pGVM The ring-0 VM structure.
602 * @param idCpu The ID of the calling EMT.
603 */
604DECLASM(int) vmmR0CallRing3SetJmp2(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP2 pfn, PGVM pGVM, VMCPUID idCpu);
605
606
607/**
608 * Callback function for vmmR0CallRing3SetJmpEx.
609 *
610 * @returns VBox status code.
611 * @param pvUser The user argument.
612 */
613typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMPEX,(void *pvUser));
614/** Pointer to FNVMMR0SETJMPEX(). */
615typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
616
617/**
618 * Same as vmmR0CallRing3SetJmp except for the function signature.
619 *
620 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
621 * @param pJmpBuf The jmp_buf to set.
622 * @param pfn The function to be called when not resuming.
623 * @param pvUser The argument of that function.
624 * @param uCallKey Unused call parameter that should be used to help
625 * uniquely identify the call.
626 */
627DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser, uintptr_t uCallKey);
628
629
630/**
631 * Worker for VMMRZCallRing3.
632 * This will save the stack and registers.
633 *
634 * @returns rc.
635 * @param pJmpBuf Pointer to the jump buffer.
636 * @param rc The return code.
637 */
638DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
639
640# ifdef VBOX_WITH_TRIPLE_FAULT_HACK
641int vmmR0TripleFaultHackInit(void);
642void vmmR0TripleFaultHackTerm(void);
643# endif
644
645#endif /* IN_RING0 */
646
647RT_C_DECLS_END
648
649/** @} */
650
651#endif /* !VMM_INCLUDED_SRC_include_VMMInternal_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette