VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMMInternal.h@ 90829

Last change on this file since 90829 was 90829, checked in by vboxsync, 4 years ago

IPRT,VMM,SUPDrv,++: Reworked the IPRT logger structure and how the VMM ring-0 uses it. bugref:10086

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.0 KB
Line 
1/* $Id: VMMInternal.h 90829 2021-08-24 10:26:07Z vboxsync $ */
2/** @file
3 * VMM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_VMMInternal_h
19#define VMM_INCLUDED_SRC_include_VMMInternal_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/sup.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/vmm/vmm.h>
28#include <VBox/log.h>
29#include <iprt/critsect.h>
30
31#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
32# error "Not in VMM! This is an internal header!"
33#endif
34#if HC_ARCH_BITS == 32
35# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
36#endif
37
38
39
40/** @defgroup grp_vmm_int Internals
41 * @ingroup grp_vmm
42 * @internal
43 * @{
44 */
45
46/** @def VBOX_WITH_RC_RELEASE_LOGGING
47 * Enables RC release logging. */
48#define VBOX_WITH_RC_RELEASE_LOGGING
49
50/** @def VBOX_WITH_R0_LOGGING
51 * Enables Ring-0 logging (non-release).
52 *
53 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
54 * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
55 * \#if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
56 */
57#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING)
58# define VBOX_WITH_R0_LOGGING
59#endif
60
61/** @def VBOX_STRICT_VMM_STACK
62 * Enables VMM stack guard pages to catch stack over- and underruns. */
63#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
64# define VBOX_STRICT_VMM_STACK
65#endif
66
67
68/**
69 * R0 logger data (ring-0 only data).
70 */
71typedef struct VMMR0PERVCPULOGGER
72{
73 /** Pointer to the logger instance.
74 * The RTLOGGER::u32UserValue1 member is used for flags and magic, while the
75 * RTLOGGER::u64UserValue2 member is the corresponding PGVMCPU value.
76 * RTLOGGER::u64UserValue3 is currently and set to the PGVMCPU value too. */
77 R0PTRTYPE(PRTLOGGER) pLogger;
78 /** Log buffer descriptor.
79 * The buffer is allocated in a common block for all VCpus, see VMMR0PERVM. */
80 RTLOGBUFFERDESC BufDesc;
81 /** Flag indicating whether we've registered the instance already. */
82 bool fRegistered;
83 bool afPadding[7];
84} VMMR0PERVCPULOGGER;
85/** Pointer to the R0 logger data (ring-0 only). */
86typedef VMMR0PERVCPULOGGER *PVMMR0PERVCPULOGGER;
87
88
89/**
90 * R0 logger data shared with ring-3 (per CPU).
91 */
92typedef struct VMMR3CPULOGGER
93{
94 /** Auxiliary buffer descriptor. */
95 RTLOGBUFFERAUXDESC AuxDesc;
96 /** Ring-3 mapping of the logging buffer. */
97 R3PTRTYPE(char *) pchBufR3;
98 /** The buffer size. */
99 uint32_t cbBuf;
100 uint32_t uReserved;
101} VMMR3CPULOGGER;
102/** Pointer to r0 logger data shared with ring-3. */
103typedef VMMR3CPULOGGER *PVMMR3CPULOGGER;
104
105
106/**
107 * Jump buffer for the setjmp/longjmp like constructs used to
108 * quickly 'call' back into Ring-3.
109 */
110typedef struct VMMR0JMPBUF
111{
112 /** Traditional jmp_buf stuff
113 * @{ */
114#if HC_ARCH_BITS == 32
115 uint32_t ebx;
116 uint32_t esi;
117 uint32_t edi;
118 uint32_t ebp;
119 uint32_t esp;
120 uint32_t eip;
121 uint32_t eflags;
122#endif
123#if HC_ARCH_BITS == 64
124 uint64_t rbx;
125# ifdef RT_OS_WINDOWS
126 uint64_t rsi;
127 uint64_t rdi;
128# endif
129 uint64_t rbp;
130 uint64_t r12;
131 uint64_t r13;
132 uint64_t r14;
133 uint64_t r15;
134 uint64_t rsp;
135 uint64_t rip;
136# ifdef RT_OS_WINDOWS
137 uint128_t xmm6;
138 uint128_t xmm7;
139 uint128_t xmm8;
140 uint128_t xmm9;
141 uint128_t xmm10;
142 uint128_t xmm11;
143 uint128_t xmm12;
144 uint128_t xmm13;
145 uint128_t xmm14;
146 uint128_t xmm15;
147# endif
148 uint64_t rflags;
149#endif
150 /** @} */
151
152 /** Flag that indicates that we've done a ring-3 call. */
153 bool fInRing3Call;
154 /** The number of bytes we've saved. */
155 uint32_t cbSavedStack;
156 /** Pointer to the buffer used to save the stack.
157 * This is assumed to be 8KB. */
158 RTR0PTR pvSavedStack;
159 /** Esp we we match against esp on resume to make sure the stack wasn't relocated. */
160 RTHCUINTREG SpCheck;
161 /** The esp we should resume execution with after the restore. */
162 RTHCUINTREG SpResume;
163 /** ESP/RSP at the time of the jump to ring 3. */
164 RTHCUINTREG SavedEsp;
165 /** EBP/RBP at the time of the jump to ring 3. */
166 RTHCUINTREG SavedEbp;
167 /** EIP/RIP within vmmR0CallRing3LongJmp for assisting unwinding. */
168 RTHCUINTREG SavedEipForUnwind;
169 /** Unwind: The vmmR0CallRing3SetJmp return address value. */
170 RTHCUINTREG UnwindRetPcValue;
171 /** Unwind: The vmmR0CallRing3SetJmp return address stack location. */
172 RTHCUINTREG UnwindRetPcLocation;
173
174 /** The function last being executed here. */
175 RTHCUINTREG pfn;
176 /** The first argument to the function. */
177 RTHCUINTREG pvUser1;
178 /** The second argument to the function. */
179 RTHCUINTREG pvUser2;
180
181#if HC_ARCH_BITS == 32
182 /** Alignment padding. */
183 uint32_t uPadding;
184#endif
185
186 /** Stats: Max amount of stack used. */
187 uint32_t cbUsedMax;
188 /** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */
189 uint32_t cbUsedAvg;
190 /** Stats: Total amount of stack used. */
191 uint64_t cbUsedTotal;
192 /** Stats: Number of stack usages. */
193 uint64_t cUsedTotal;
194} VMMR0JMPBUF;
195/** Pointer to a ring-0 jump buffer. */
196typedef VMMR0JMPBUF *PVMMR0JMPBUF;
197
198
199/**
200 * VMM Data (part of VM)
201 */
202typedef struct VMM
203{
204 /** Whether we should use the periodic preemption timers. */
205 bool fUsePeriodicPreemptionTimers;
206 /** Alignment padding. */
207 bool afPadding0[7];
208
209#if 0 /* pointless when timers doesn't run on EMT */
210 /** The EMT yield timer. */
211 TMTIMERHANDLE hYieldTimer;
212 /** The period to the next timeout when suspended or stopped.
213 * This is 0 when running. */
214 uint32_t cYieldResumeMillies;
215 /** The EMT yield timer interval (milliseconds). */
216 uint32_t cYieldEveryMillies;
217 /** The timestamp of the previous yield. (nano) */
218 uint64_t u64LastYield;
219#endif
220
221 /** @name EMT Rendezvous
222 * @{ */
223 /** Semaphore to wait on upon entering ordered execution. */
224 R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
225 /** Semaphore to wait on upon entering for one-by-one execution. */
226 RTSEMEVENT hEvtRendezvousEnterOneByOne;
227 /** Semaphore to wait on upon entering for all-at-once execution. */
228 RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
229 /** Semaphore to wait on when done. */
230 RTSEMEVENTMULTI hEvtMulRendezvousDone;
231 /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
232 RTSEMEVENT hEvtRendezvousDoneCaller;
233 /** Semaphore to wait on upon recursing. */
234 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPush;
235 /** Semaphore to wait on after done with recursion (caller restoring state). */
236 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPop;
237 /** Semaphore the initiator waits on while the EMTs are getting into position
238 * on hEvtMulRendezvousRecursionPush. */
239 RTSEMEVENT hEvtRendezvousRecursionPushCaller;
240 /** Semaphore the initiator waits on while the EMTs sitting on
241 * hEvtMulRendezvousRecursionPop wakes up and leave. */
242 RTSEMEVENT hEvtRendezvousRecursionPopCaller;
243 /** Callback. */
244 R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
245 /** The user argument for the callback. */
246 RTR3PTR volatile pvRendezvousUser;
247 /** Flags. */
248 volatile uint32_t fRendezvousFlags;
249 /** The number of EMTs that has entered. */
250 volatile uint32_t cRendezvousEmtsEntered;
251 /** The number of EMTs that has done their job. */
252 volatile uint32_t cRendezvousEmtsDone;
253 /** The number of EMTs that has returned. */
254 volatile uint32_t cRendezvousEmtsReturned;
255 /** The status code. */
256 volatile int32_t i32RendezvousStatus;
257 /** Spin lock. */
258 volatile uint32_t u32RendezvousLock;
259 /** The recursion depth. */
260 volatile uint32_t cRendezvousRecursions;
261 /** The number of EMTs that have entered the recursion routine. */
262 volatile uint32_t cRendezvousEmtsRecursingPush;
263 /** The number of EMTs that have leaft the recursion routine. */
264 volatile uint32_t cRendezvousEmtsRecursingPop;
265 /** Triggers rendezvous recursion in the other threads. */
266 volatile bool fRendezvousRecursion;
267
268 /** @} */
269
270 /** RTThreadPreemptIsPendingTrusty() result, set by vmmR0InitVM() for
271 * release logging purposes. */
272 bool fIsPreemptPendingApiTrusty : 1;
273 /** The RTThreadPreemptIsPossible() result, set by vmmR0InitVM() for
274 * release logging purposes. */
275 bool fIsPreemptPossible : 1;
276 /** Set if ring-0 uses context hooks. */
277 bool fIsUsingContextHooks : 1;
278
279 bool afAlignment2[2]; /**< Alignment padding. */
280
281 /** Buffer for storing the standard assertion message for a ring-0 assertion.
282 * Used for saving the assertion message text for the release log and guru
283 * meditation dump. */
284 char szRing0AssertMsg1[512];
285 /** Buffer for storing the custom message for a ring-0 assertion. */
286 char szRing0AssertMsg2[256];
287
288 /** Number of VMMR0_DO_HM_RUN or VMMR0_DO_NEM_RUN calls. */
289 STAMCOUNTER StatRunGC;
290
291 /** Statistics for each of the RC/R0 return codes.
292 * @{ */
293 STAMCOUNTER StatRZRetNormal;
294 STAMCOUNTER StatRZRetInterrupt;
295 STAMCOUNTER StatRZRetInterruptHyper;
296 STAMCOUNTER StatRZRetGuestTrap;
297 STAMCOUNTER StatRZRetRingSwitch;
298 STAMCOUNTER StatRZRetRingSwitchInt;
299 STAMCOUNTER StatRZRetStaleSelector;
300 STAMCOUNTER StatRZRetIRETTrap;
301 STAMCOUNTER StatRZRetEmulate;
302 STAMCOUNTER StatRZRetPatchEmulate;
303 STAMCOUNTER StatRZRetIORead;
304 STAMCOUNTER StatRZRetIOWrite;
305 STAMCOUNTER StatRZRetIOCommitWrite;
306 STAMCOUNTER StatRZRetMMIORead;
307 STAMCOUNTER StatRZRetMMIOWrite;
308 STAMCOUNTER StatRZRetMMIOCommitWrite;
309 STAMCOUNTER StatRZRetMMIOPatchRead;
310 STAMCOUNTER StatRZRetMMIOPatchWrite;
311 STAMCOUNTER StatRZRetMMIOReadWrite;
312 STAMCOUNTER StatRZRetMSRRead;
313 STAMCOUNTER StatRZRetMSRWrite;
314 STAMCOUNTER StatRZRetLDTFault;
315 STAMCOUNTER StatRZRetGDTFault;
316 STAMCOUNTER StatRZRetIDTFault;
317 STAMCOUNTER StatRZRetTSSFault;
318 STAMCOUNTER StatRZRetCSAMTask;
319 STAMCOUNTER StatRZRetSyncCR3;
320 STAMCOUNTER StatRZRetMisc;
321 STAMCOUNTER StatRZRetPatchInt3;
322 STAMCOUNTER StatRZRetPatchPF;
323 STAMCOUNTER StatRZRetPatchGP;
324 STAMCOUNTER StatRZRetPatchIretIRQ;
325 STAMCOUNTER StatRZRetRescheduleREM;
326 STAMCOUNTER StatRZRetToR3Total;
327 STAMCOUNTER StatRZRetToR3FF;
328 STAMCOUNTER StatRZRetToR3Unknown;
329 STAMCOUNTER StatRZRetToR3TMVirt;
330 STAMCOUNTER StatRZRetToR3HandyPages;
331 STAMCOUNTER StatRZRetToR3PDMQueues;
332 STAMCOUNTER StatRZRetToR3Rendezvous;
333 STAMCOUNTER StatRZRetToR3Timer;
334 STAMCOUNTER StatRZRetToR3DMA;
335 STAMCOUNTER StatRZRetToR3CritSect;
336 STAMCOUNTER StatRZRetToR3Iem;
337 STAMCOUNTER StatRZRetToR3Iom;
338 STAMCOUNTER StatRZRetTimerPending;
339 STAMCOUNTER StatRZRetInterruptPending;
340 STAMCOUNTER StatRZRetCallRing3;
341 STAMCOUNTER StatRZRetPATMDuplicateFn;
342 STAMCOUNTER StatRZRetPGMChangeMode;
343 STAMCOUNTER StatRZRetPendingRequest;
344 STAMCOUNTER StatRZRetPGMFlushPending;
345 STAMCOUNTER StatRZRetPatchTPR;
346 STAMCOUNTER StatRZCallPDMCritSectEnter;
347 STAMCOUNTER StatRZCallPDMLock;
348 STAMCOUNTER StatRZCallLogFlush;
349 STAMCOUNTER StatRZCallPGMPoolGrow;
350 STAMCOUNTER StatRZCallPGMMapChunk;
351 STAMCOUNTER StatRZCallPGMAllocHandy;
352 STAMCOUNTER StatRZCallVMSetError;
353 STAMCOUNTER StatRZCallVMSetRuntimeError;
354 STAMCOUNTER StatRZCallPGMLock;
355 /** @} */
356} VMM;
357/** Pointer to VMM. */
358typedef VMM *PVMM;
359
360
361/**
362 * VMMCPU Data (part of VMCPU)
363 */
364typedef struct VMMCPU
365{
366 /** The last RC/R0 return code. */
367 int32_t iLastGZRc;
368 /** Alignment padding. */
369 uint32_t u32Padding0;
370
371 /** VMM stack, pointer to the top of the stack in R3.
372 * Stack is allocated from the hypervisor heap and is page aligned
373 * and always writable in RC. */
374 R3PTRTYPE(uint8_t *) pbEMTStackR3;
375
376 /** @name Rendezvous
377 * @{ */
378 /** Whether the EMT is executing a rendezvous right now. For detecting
379 * attempts at recursive rendezvous. */
380 bool volatile fInRendezvous;
381 bool afPadding1[2];
382 /** @} */
383
384 /** Whether we can HLT in VMMR0 rather than having to return to EM.
385 * Updated by vmR3SetHaltMethodU(). */
386 bool fMayHaltInRing0;
387 /** The minimum delta for which we can HLT in ring-0 for.
388 * The deadlines we can calculate are from TM, so, if it's too close
389 * we should just return to ring-3 and run the timer wheel, no point
390 * in spinning in ring-0.
391 * Updated by vmR3SetHaltMethodU(). */
392 uint32_t cNsSpinBlockThreshold;
393 /** Number of ring-0 halts (used for depreciating following values). */
394 uint32_t cR0Halts;
395 /** Number of ring-0 halts succeeding (VINF_SUCCESS) recently. */
396 uint32_t cR0HaltsSucceeded;
397 /** Number of ring-0 halts failing (VINF_EM_HALT) recently. */
398 uint32_t cR0HaltsToRing3;
399 /** Padding */
400 uint32_t u32Padding2;
401
402 /** @name Raw-mode context tracing data.
403 * @{ */
404 SUPDRVTRACERUSRCTX TracerCtx;
405 /** @} */
406
407 /** Alignment padding, making sure u64CallRing3Arg and CallRing3JmpBufR0 are nicely aligned. */
408 uint32_t au32Padding3[1];
409
410 /** @name Call Ring-3
411 * Formerly known as host calls.
412 * @{ */
413 /** The disable counter. */
414 uint32_t cCallRing3Disabled;
415 /** The pending operation. */
416 VMMCALLRING3 enmCallRing3Operation;
417 /** The result of the last operation. */
418 int32_t rcCallRing3;
419 /** The argument to the operation. */
420 uint64_t u64CallRing3Arg;
421 /** The Ring-0 notification callback. */
422 R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallRing3CallbackR0;
423 /** The Ring-0 notification callback user argument. */
424 R0PTRTYPE(void *) pvCallRing3CallbackUserR0;
425 /** The Ring-0 jmp buffer.
426 * @remarks The size of this type isn't stable in assembly, so don't put
427 * anything that needs to be accessed from assembly after it. */
428 VMMR0JMPBUF CallRing3JmpBufR0;
429 /** @} */
430
431 /** @name Logging
432 * @{ */
433 /** The R0 logger data shared with ring-3. */
434 VMMR3CPULOGGER Logger;
435 /** The R0 release logger data shared with ring-3. */
436 VMMR3CPULOGGER RelLogger;
437 /** @} */
438
439 STAMPROFILE StatR0HaltBlock;
440 STAMPROFILE StatR0HaltBlockOnTime;
441 STAMPROFILE StatR0HaltBlockOverslept;
442 STAMPROFILE StatR0HaltBlockInsomnia;
443 STAMCOUNTER StatR0HaltExec;
444 STAMCOUNTER StatR0HaltExecFromBlock;
445 STAMCOUNTER StatR0HaltExecFromSpin;
446 STAMCOUNTER StatR0HaltToR3;
447 STAMCOUNTER StatR0HaltToR3FromSpin;
448 STAMCOUNTER StatR0HaltToR3Other;
449 STAMCOUNTER StatR0HaltToR3PendingFF;
450 STAMCOUNTER StatR0HaltToR3SmallDelta;
451 STAMCOUNTER StatR0HaltToR3PostNoInt;
452 STAMCOUNTER StatR0HaltToR3PostPendingFF;
453} VMMCPU;
454AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8);
455/** Pointer to VMMCPU. */
456typedef VMMCPU *PVMMCPU;
457
458/**
459 * VMM per-VCpu ring-0 only instance data.
460 */
461typedef struct VMMR0PERVCPU
462{
463 /** Set if we've entered HM context. */
464 bool volatile fInHmContext;
465 /** Flag indicating whether we've disabled flushing (world switch) or not. */
466 bool fLogFlushingDisabled;
467 /** The EMT hash table index. */
468 uint16_t idxEmtHash;
469 /** Pointer to the VMMR0EntryFast preemption state structure.
470 * This is used to temporarily restore preemption before blocking. */
471 R0PTRTYPE(PRTTHREADPREEMPTSTATE) pPreemptState;
472 /** Thread context switching hook (ring-0). */
473 RTTHREADCTXHOOK hCtxHook;
474
475 /** @name Arguments passed by VMMR0EntryEx via vmmR0CallRing3SetJmpEx.
476 * @note Cannot be put on the stack as the location may change and upset the
477 * validation of resume-after-ring-3-call logic.
478 * @{ */
479 PGVM pGVM;
480 VMCPUID idCpu;
481 VMMR0OPERATION enmOperation;
482 PSUPVMMR0REQHDR pReq;
483 uint64_t u64Arg;
484 PSUPDRVSESSION pSession;
485 /** @} */
486
487 /** @name Loggers
488 * @{ */
489 /** The R0 logger data. */
490 VMMR0PERVCPULOGGER Logger;
491 /** The R0 release logger data. */
492 VMMR0PERVCPULOGGER RelLogger;
493 /** @} */
494} VMMR0PERVCPU;
495/** Pointer to VMM ring-0 VMCPU instance data. */
496typedef VMMR0PERVCPU *PVMMR0PERVCPU;
497
498/** @name RTLOGGER::u32UserValue1 Flags
499 * @{ */
500/** The magic value. */
501#define VMMR0_LOGGER_FLAGS_MAGIC_VALUE UINT32_C(0x7d297f05)
502/** Part of the flags value used for the magic. */
503#define VMMR0_LOGGER_FLAGS_MAGIC_MASK UINT32_C(0xffffff0f)
504/** Set if flushing is disabled (copy of fLogFlushingDisabled). */
505#define VMMR0_LOGGER_FLAGS_FLUSHING_DISABLED UINT32_C(0x00000010)
506/** @} */
507
508
509/**
510 * VMM data kept in the ring-0 GVM.
511 */
512typedef struct VMMR0PERVM
513{
514 /** Logger (debug) buffer allocation.
515 * This covers all CPUs. */
516 RTR0MEMOBJ hMemObjLogger;
517 /** The ring-3 mapping object for hMemObjLogger. */
518 RTR0MEMOBJ hMapObjLogger;
519
520 /** Release logger buffer allocation.
521 * This covers all CPUs. */
522 RTR0MEMOBJ hMemObjReleaseLogger;
523 /** The ring-3 mapping object for hMemObjReleaseLogger. */
524 RTR0MEMOBJ hMapObjReleaseLogger;
525
526 /** Set if vmmR0InitVM has been called. */
527 bool fCalledInitVm;
528} VMMR0PERVM;
529
530RT_C_DECLS_BEGIN
531
532int vmmInitFormatTypes(void);
533void vmmTermFormatTypes(void);
534uint32_t vmmGetBuildType(void);
535
536#ifdef IN_RING3
537int vmmR3SwitcherInit(PVM pVM);
538void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
539#endif /* IN_RING3 */
540
541#ifdef IN_RING0
542
543/**
544 * World switcher assembly routine.
545 * It will call VMMRCEntry().
546 *
547 * @returns return code from VMMRCEntry().
548 * @param pVM The cross context VM structure.
549 * @param uArg See VMMRCEntry().
550 * @internal
551 */
552DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
553
554/**
555 * Callback function for vmmR0CallRing3SetJmp.
556 *
557 * @returns VBox status code.
558 * @param pVM The cross context VM structure.
559 */
560typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP,(PVMCC pVM, PVMCPUCC pVCpu));
561/** Pointer to FNVMMR0SETJMP(). */
562typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
563
564/**
565 * The setjmp variant used for calling Ring-3.
566 *
567 * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
568 * in the middle of a ring-3 call. Another differences is the function pointer and
569 * argument. This has to do with resuming code and the stack frame of the caller.
570 *
571 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
572 * @param pJmpBuf The jmp_buf to set.
573 * @param pfn The function to be called when not resuming.
574 * @param pVM The cross context VM structure.
575 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
576 */
577DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
578
579
580/**
581 * Callback function for vmmR0CallRing3SetJmp2.
582 *
583 * @returns VBox status code.
584 * @param pvUser The user argument.
585 */
586typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP2,(PGVM pGVM, VMCPUID idCpu));
587/** Pointer to FNVMMR0SETJMP2(). */
588typedef FNVMMR0SETJMP2 *PFNVMMR0SETJMP2;
589
590/**
591 * Same as vmmR0CallRing3SetJmp except for the function signature.
592 *
593 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
594 * @param pJmpBuf The jmp_buf to set.
595 * @param pfn The function to be called when not resuming.
596 * @param pGVM The ring-0 VM structure.
597 * @param idCpu The ID of the calling EMT.
598 */
599DECLASM(int) vmmR0CallRing3SetJmp2(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP2 pfn, PGVM pGVM, VMCPUID idCpu);
600
601
602/**
603 * Callback function for vmmR0CallRing3SetJmpEx.
604 *
605 * @returns VBox status code.
606 * @param pvUser The user argument.
607 */
608typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMPEX,(void *pvUser));
609/** Pointer to FNVMMR0SETJMPEX(). */
610typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
611
612/**
613 * Same as vmmR0CallRing3SetJmp except for the function signature.
614 *
615 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
616 * @param pJmpBuf The jmp_buf to set.
617 * @param pfn The function to be called when not resuming.
618 * @param pvUser The argument of that function.
619 * @param uCallKey Unused call parameter that should be used to help
620 * uniquely identify the call.
621 */
622DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser, uintptr_t uCallKey);
623
624
625/**
626 * Worker for VMMRZCallRing3.
627 * This will save the stack and registers.
628 *
629 * @returns rc.
630 * @param pJmpBuf Pointer to the jump buffer.
631 * @param rc The return code.
632 */
633DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
634
635# ifdef VBOX_WITH_TRIPLE_FAULT_HACK
636int vmmR0TripleFaultHackInit(void);
637void vmmR0TripleFaultHackTerm(void);
638# endif
639
640#endif /* IN_RING0 */
641
642RT_C_DECLS_END
643
644/** @} */
645
646#endif /* !VMM_INCLUDED_SRC_include_VMMInternal_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette