VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMMInternal.h@ 92489

Last change on this file since 92489 was 92408, checked in by vboxsync, 3 years ago

VMM: Reworked most of the call-ring-3 stuff into setjmp-longjmp-on-assert and removed the stack switching/copying/resume code. bugref:10093 bugref:10124

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 27.6 KB
Line 
1/* $Id: VMMInternal.h 92408 2021-11-12 21:49:06Z vboxsync $ */
2/** @file
3 * VMM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_VMMInternal_h
19#define VMM_INCLUDED_SRC_include_VMMInternal_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/sup.h>
26#include <VBox/vmm/stam.h>
27#include <VBox/vmm/vmm.h>
28#include <VBox/param.h>
29#include <VBox/log.h>
30#include <iprt/critsect.h>
31
32#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
33# error "Not in VMM! This is an internal header!"
34#endif
35#if HC_ARCH_BITS == 32
36# error "32-bit hosts are no longer supported. Go back to 6.0 or earlier!"
37#endif
38
39
40
41/** @defgroup grp_vmm_int Internals
42 * @ingroup grp_vmm
43 * @internal
44 * @{
45 */
46
47/** @def VBOX_WITH_RC_RELEASE_LOGGING
48 * Enables RC release logging. */
49#define VBOX_WITH_RC_RELEASE_LOGGING
50
51/** @def VBOX_WITH_R0_LOGGING
52 * Enables Ring-0 logging (non-release).
53 *
54 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
55 * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
56 * \#if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
57 */
58#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING)
59# define VBOX_WITH_R0_LOGGING
60#endif
61
62/** @def VBOX_STRICT_VMM_STACK
63 * Enables VMM stack guard pages to catch stack over- and underruns. */
64#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
65# define VBOX_STRICT_VMM_STACK
66#endif
67
68
69/** Number of buffers per logger. */
70#define VMMLOGGER_BUFFER_COUNT 4
71
72/**
73 * R0 logger data (ring-0 only data).
74 */
75typedef struct VMMR0PERVCPULOGGER
76{
77 /** Pointer to the logger instance.
78 * The RTLOGGER::u32UserValue1 member is used for flags and magic, while the
79 * RTLOGGER::u64UserValue2 member is the corresponding PGVMCPU value.
80 * RTLOGGER::u64UserValue3 is currently and set to the PGVMCPU value too. */
81 R0PTRTYPE(PRTLOGGER) pLogger;
82 /** Log buffer descriptor.
83 * The buffer is allocated in a common block for all VCpus, see VMMR0PERVM. */
84 RTLOGBUFFERDESC aBufDescs[VMMLOGGER_BUFFER_COUNT];
85 /** Flag indicating whether we've registered the instance already. */
86 bool fRegistered;
87 /** Set if the EMT is waiting on hEventFlushWait. */
88 bool fEmtWaiting;
89 /** Set while we're inside vmmR0LoggerFlushCommon to prevent recursion. */
90 bool fFlushing;
91 bool afPadding[1];
92 /** Number of buffers currently queued for flushing. */
93 uint32_t volatile cFlushing;
94 /** The event semaphore the EMT waits on while the buffer is being flushed. */
95 RTSEMEVENT hEventFlushWait;
96} VMMR0PERVCPULOGGER;
97/** Pointer to the R0 logger data (ring-0 only). */
98typedef VMMR0PERVCPULOGGER *PVMMR0PERVCPULOGGER;
99
100
101/**
102 * R0 logger data shared with ring-3 (per CPU).
103 */
104typedef struct VMMR3CPULOGGER
105{
106 /** Buffer info. */
107 struct
108 {
109 /** Auxiliary buffer descriptor. */
110 RTLOGBUFFERAUXDESC AuxDesc;
111 /** Ring-3 mapping of the logging buffer. */
112 R3PTRTYPE(char *) pchBufR3;
113 } aBufs[VMMLOGGER_BUFFER_COUNT];
114 /** The current buffer. */
115 uint32_t idxBuf;
116 /** Number of buffers currently queued for flushing (copy of
117 * VMMR0PERVCPULOGGER::cFlushing). */
118 uint32_t volatile cFlushing;
119 /** The buffer size. */
120 uint32_t cbBuf;
121 /** Number of bytes dropped because the flush context didn't allow waiting. */
122 uint32_t cbDropped;
123 STAMCOUNTER StatFlushes;
124 STAMCOUNTER StatCannotBlock;
125 STAMPROFILE StatWait;
126 STAMPROFILE StatRaces;
127 STAMCOUNTER StatRacesToR0;
128} VMMR3CPULOGGER;
129/** Pointer to r0 logger data shared with ring-3. */
130typedef VMMR3CPULOGGER *PVMMR3CPULOGGER;
131
132/** @name Logger indexes for VMMR0PERVCPU::u.aLoggers and VMMCPU::u.aLoggers.
133 * @{ */
134#define VMMLOGGER_IDX_REGULAR 0
135#define VMMLOGGER_IDX_RELEASE 1
136#define VMMLOGGER_IDX_MAX 2
137/** @} */
138
139
140/** Pointer to a ring-0 jump buffer. */
141typedef struct VMMR0JMPBUF *PVMMR0JMPBUF;
142/**
143 * Jump buffer for the setjmp/longjmp like constructs used to
144 * quickly 'call' back into Ring-3.
145 */
146typedef struct VMMR0JMPBUF
147{
148 /** Traditional jmp_buf stuff
149 * @{ */
150#if HC_ARCH_BITS == 32
151 uint32_t ebx;
152 uint32_t esi;
153 uint32_t edi;
154 uint32_t ebp;
155 uint32_t esp;
156 uint32_t eip;
157 uint32_t eflags;
158#endif
159#if HC_ARCH_BITS == 64
160 uint64_t rbx;
161# ifdef RT_OS_WINDOWS
162 uint64_t rsi;
163 uint64_t rdi;
164# endif
165 uint64_t rbp;
166 uint64_t r12;
167 uint64_t r13;
168 uint64_t r14;
169 uint64_t r15;
170 uint64_t rsp;
171 uint64_t rip;
172# ifdef RT_OS_WINDOWS
173 uint128_t xmm6;
174 uint128_t xmm7;
175 uint128_t xmm8;
176 uint128_t xmm9;
177 uint128_t xmm10;
178 uint128_t xmm11;
179 uint128_t xmm12;
180 uint128_t xmm13;
181 uint128_t xmm14;
182 uint128_t xmm15;
183# endif
184 uint64_t rflags;
185#endif
186 /** @} */
187
188 /** RSP/ESP at the time of the stack mirroring (what pvStackBuf starts with). */
189 RTHCUINTREG UnwindSp;
190 /** RSP/ESP at the time of the long jump call. */
191 RTHCUINTREG UnwindRetSp;
192 /** RBP/EBP inside the vmmR0CallRing3LongJmp frame. */
193 RTHCUINTREG UnwindBp;
194 /** RIP/EIP within vmmR0CallRing3LongJmp for assisting unwinding. */
195 RTHCUINTREG UnwindPc;
196 /** Unwind: The vmmR0CallRing3SetJmp return address value. */
197 RTHCUINTREG UnwindRetPcValue;
198 /** Unwind: The vmmR0CallRing3SetJmp return address stack location. */
199 RTHCUINTREG UnwindRetPcLocation;
200
201 /** The function last being executed here. */
202 RTHCUINTREG pfn;
203 /** The first argument to the function. */
204 RTHCUINTREG pvUser1;
205 /** The second argument to the function. */
206 RTHCUINTREG pvUser2;
207
208 /** Number of valid bytes in pvStackBuf. */
209 uint32_t cbStackValid;
210 /** Size of buffer pvStackBuf points to. */
211 uint32_t cbStackBuf;
212 /** Pointer to buffer for mirroring the stack. Optional. */
213 RTR0PTR pvStackBuf;
214 /** Pointer to a ring-3 accessible jump buffer structure for automatic
215 * mirroring on longjmp. Optional. */
216 R0PTRTYPE(PVMMR0JMPBUF) pMirrorBuf;
217} VMMR0JMPBUF;
218
219
220/**
221 * Log flusher job.
222 *
223 * There is a ring buffer of these in ring-0 (VMMR0PERVM::aLogFlushRing) and a
224 * copy of the current one in the shared VM structure (VMM::LogFlusherItem).
225 */
226typedef union VMMLOGFLUSHERENTRY
227{
228 struct
229 {
230 /** The virtual CPU ID. */
231 uint32_t idCpu : 16;
232 /** The logger: 0 for release, 1 for debug. */
233 uint32_t idxLogger : 8;
234 /** The buffer to be flushed. */
235 uint32_t idxBuffer : 7;
236 /** Set by the flusher thread once it fetched the entry and started
237 * processing it. */
238 uint32_t fProcessing : 1;
239 } s;
240 uint32_t u32;
241} VMMLOGFLUSHERENTRY;
242
243
244/**
245 * VMM Data (part of VM)
246 */
247typedef struct VMM
248{
249 /** Whether we should use the periodic preemption timers. */
250 bool fUsePeriodicPreemptionTimers;
251 /** Alignment padding. */
252 bool afPadding0[7];
253
254#if 0 /* pointless when timers doesn't run on EMT */
255 /** The EMT yield timer. */
256 TMTIMERHANDLE hYieldTimer;
257 /** The period to the next timeout when suspended or stopped.
258 * This is 0 when running. */
259 uint32_t cYieldResumeMillies;
260 /** The EMT yield timer interval (milliseconds). */
261 uint32_t cYieldEveryMillies;
262 /** The timestamp of the previous yield. (nano) */
263 uint64_t u64LastYield;
264#endif
265
266 /** @name EMT Rendezvous
267 * @{ */
268 /** Semaphore to wait on upon entering ordered execution. */
269 R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
270 /** Semaphore to wait on upon entering for one-by-one execution. */
271 RTSEMEVENT hEvtRendezvousEnterOneByOne;
272 /** Semaphore to wait on upon entering for all-at-once execution. */
273 RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
274 /** Semaphore to wait on when done. */
275 RTSEMEVENTMULTI hEvtMulRendezvousDone;
276 /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
277 RTSEMEVENT hEvtRendezvousDoneCaller;
278 /** Semaphore to wait on upon recursing. */
279 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPush;
280 /** Semaphore to wait on after done with recursion (caller restoring state). */
281 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPop;
282 /** Semaphore the initiator waits on while the EMTs are getting into position
283 * on hEvtMulRendezvousRecursionPush. */
284 RTSEMEVENT hEvtRendezvousRecursionPushCaller;
285 /** Semaphore the initiator waits on while the EMTs sitting on
286 * hEvtMulRendezvousRecursionPop wakes up and leave. */
287 RTSEMEVENT hEvtRendezvousRecursionPopCaller;
288 /** Callback. */
289 R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
290 /** The user argument for the callback. */
291 RTR3PTR volatile pvRendezvousUser;
292 /** Flags. */
293 volatile uint32_t fRendezvousFlags;
294 /** The number of EMTs that has entered. */
295 volatile uint32_t cRendezvousEmtsEntered;
296 /** The number of EMTs that has done their job. */
297 volatile uint32_t cRendezvousEmtsDone;
298 /** The number of EMTs that has returned. */
299 volatile uint32_t cRendezvousEmtsReturned;
300 /** The status code. */
301 volatile int32_t i32RendezvousStatus;
302 /** Spin lock. */
303 volatile uint32_t u32RendezvousLock;
304 /** The recursion depth. */
305 volatile uint32_t cRendezvousRecursions;
306 /** The number of EMTs that have entered the recursion routine. */
307 volatile uint32_t cRendezvousEmtsRecursingPush;
308 /** The number of EMTs that have leaft the recursion routine. */
309 volatile uint32_t cRendezvousEmtsRecursingPop;
310 /** Triggers rendezvous recursion in the other threads. */
311 volatile bool fRendezvousRecursion;
312
313 /** @} */
314
315 /** RTThreadPreemptIsPendingTrusty() result, set by vmmR0InitVM() for
316 * release logging purposes. */
317 bool fIsPreemptPendingApiTrusty : 1;
318 /** The RTThreadPreemptIsPossible() result, set by vmmR0InitVM() for
319 * release logging purposes. */
320 bool fIsPreemptPossible : 1;
321 /** Set if ring-0 uses context hooks. */
322 bool fIsUsingContextHooks : 1;
323
324 bool afAlignment2[2]; /**< Alignment padding. */
325
326 /** Buffer for storing the standard assertion message for a ring-0 assertion.
327 * Used for saving the assertion message text for the release log and guru
328 * meditation dump. */
329 char szRing0AssertMsg1[512];
330 /** Buffer for storing the custom message for a ring-0 assertion. */
331 char szRing0AssertMsg2[256];
332
333 /** @name Logging
334 * @{ */
335 /** Used when setting up ring-0 logger. */
336 uint64_t nsProgramStart;
337 /** Log flusher thread. */
338 RTTHREAD hLogFlusherThread;
339 /** Copy of the current work log flusher work item. */
340 VMMLOGFLUSHERENTRY volatile LogFlusherItem;
341 STAMCOUNTER StatLogFlusherFlushes;
342 STAMCOUNTER StatLogFlusherNoWakeUp;
343 /** @} */
344
345 /** Number of VMMR0_DO_HM_RUN or VMMR0_DO_NEM_RUN calls. */
346 STAMCOUNTER StatRunGC;
347
348 /** Statistics for each of the RC/R0 return codes.
349 * @{ */
350 STAMCOUNTER StatRZRetNormal;
351 STAMCOUNTER StatRZRetInterrupt;
352 STAMCOUNTER StatRZRetInterruptHyper;
353 STAMCOUNTER StatRZRetGuestTrap;
354 STAMCOUNTER StatRZRetRingSwitch;
355 STAMCOUNTER StatRZRetRingSwitchInt;
356 STAMCOUNTER StatRZRetStaleSelector;
357 STAMCOUNTER StatRZRetIRETTrap;
358 STAMCOUNTER StatRZRetEmulate;
359 STAMCOUNTER StatRZRetPatchEmulate;
360 STAMCOUNTER StatRZRetIORead;
361 STAMCOUNTER StatRZRetIOWrite;
362 STAMCOUNTER StatRZRetIOCommitWrite;
363 STAMCOUNTER StatRZRetMMIORead;
364 STAMCOUNTER StatRZRetMMIOWrite;
365 STAMCOUNTER StatRZRetMMIOCommitWrite;
366 STAMCOUNTER StatRZRetMMIOPatchRead;
367 STAMCOUNTER StatRZRetMMIOPatchWrite;
368 STAMCOUNTER StatRZRetMMIOReadWrite;
369 STAMCOUNTER StatRZRetMSRRead;
370 STAMCOUNTER StatRZRetMSRWrite;
371 STAMCOUNTER StatRZRetLDTFault;
372 STAMCOUNTER StatRZRetGDTFault;
373 STAMCOUNTER StatRZRetIDTFault;
374 STAMCOUNTER StatRZRetTSSFault;
375 STAMCOUNTER StatRZRetCSAMTask;
376 STAMCOUNTER StatRZRetSyncCR3;
377 STAMCOUNTER StatRZRetMisc;
378 STAMCOUNTER StatRZRetPatchInt3;
379 STAMCOUNTER StatRZRetPatchPF;
380 STAMCOUNTER StatRZRetPatchGP;
381 STAMCOUNTER StatRZRetPatchIretIRQ;
382 STAMCOUNTER StatRZRetRescheduleREM;
383 STAMCOUNTER StatRZRetToR3Total;
384 STAMCOUNTER StatRZRetToR3FF;
385 STAMCOUNTER StatRZRetToR3Unknown;
386 STAMCOUNTER StatRZRetToR3TMVirt;
387 STAMCOUNTER StatRZRetToR3HandyPages;
388 STAMCOUNTER StatRZRetToR3PDMQueues;
389 STAMCOUNTER StatRZRetToR3Rendezvous;
390 STAMCOUNTER StatRZRetToR3Timer;
391 STAMCOUNTER StatRZRetToR3DMA;
392 STAMCOUNTER StatRZRetToR3CritSect;
393 STAMCOUNTER StatRZRetToR3Iem;
394 STAMCOUNTER StatRZRetToR3Iom;
395 STAMCOUNTER StatRZRetTimerPending;
396 STAMCOUNTER StatRZRetInterruptPending;
397 STAMCOUNTER StatRZRetPATMDuplicateFn;
398 STAMCOUNTER StatRZRetPGMChangeMode;
399 STAMCOUNTER StatRZRetPendingRequest;
400 STAMCOUNTER StatRZRetPGMFlushPending;
401 STAMCOUNTER StatRZRetPatchTPR;
402 /** @} */
403} VMM;
404/** Pointer to VMM. */
405typedef VMM *PVMM;
406
407
408/**
409 * VMMCPU Data (part of VMCPU)
410 */
411typedef struct VMMCPU
412{
413 /** The last RC/R0 return code. */
414 int32_t iLastGZRc;
415 /** Alignment padding. */
416 uint32_t u32Padding0;
417
418 /** @name Rendezvous
419 * @{ */
420 /** Whether the EMT is executing a rendezvous right now. For detecting
421 * attempts at recursive rendezvous. */
422 bool volatile fInRendezvous;
423 bool afPadding1[2];
424 /** @} */
425
426 /** Whether we can HLT in VMMR0 rather than having to return to EM.
427 * Updated by vmR3SetHaltMethodU(). */
428 bool fMayHaltInRing0;
429 /** The minimum delta for which we can HLT in ring-0 for.
430 * The deadlines we can calculate are from TM, so, if it's too close
431 * we should just return to ring-3 and run the timer wheel, no point
432 * in spinning in ring-0.
433 * Updated by vmR3SetHaltMethodU(). */
434 uint32_t cNsSpinBlockThreshold;
435 /** Number of ring-0 halts (used for depreciating following values). */
436 uint32_t cR0Halts;
437 /** Number of ring-0 halts succeeding (VINF_SUCCESS) recently. */
438 uint32_t cR0HaltsSucceeded;
439 /** Number of ring-0 halts failing (VINF_EM_HALT) recently. */
440 uint32_t cR0HaltsToRing3;
441 /** Padding */
442 uint32_t u32Padding2;
443
444 /** @name Raw-mode context tracing data.
445 * @{ */
446 SUPDRVTRACERUSRCTX TracerCtx;
447 /** @} */
448
449 /** @name Ring-0 assertion info for this EMT.
450 * @{ */
451 /** Copy of the ring-0 jmp buffer after an assertion. */
452 VMMR0JMPBUF AssertJmpBuf;
453 /** Copy of the assertion stack. */
454 uint8_t abAssertStack[8192];
455 /** @} */
456
457 /**
458 * Loggers.
459 */
460 union
461 {
462 struct
463 {
464 /** The R0 logger data shared with ring-3. */
465 VMMR3CPULOGGER Logger;
466 /** The R0 release logger data shared with ring-3. */
467 VMMR3CPULOGGER RelLogger;
468 } s;
469 /** Array view. */
470 VMMR3CPULOGGER aLoggers[VMMLOGGER_IDX_MAX];
471 } u;
472
473 STAMPROFILE StatR0HaltBlock;
474 STAMPROFILE StatR0HaltBlockOnTime;
475 STAMPROFILE StatR0HaltBlockOverslept;
476 STAMPROFILE StatR0HaltBlockInsomnia;
477 STAMCOUNTER StatR0HaltExec;
478 STAMCOUNTER StatR0HaltExecFromBlock;
479 STAMCOUNTER StatR0HaltExecFromSpin;
480 STAMCOUNTER StatR0HaltToR3;
481 STAMCOUNTER StatR0HaltToR3FromSpin;
482 STAMCOUNTER StatR0HaltToR3Other;
483 STAMCOUNTER StatR0HaltToR3PendingFF;
484 STAMCOUNTER StatR0HaltToR3SmallDelta;
485 STAMCOUNTER StatR0HaltToR3PostNoInt;
486 STAMCOUNTER StatR0HaltToR3PostPendingFF;
487} VMMCPU;
488AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8);
489AssertCompile( RTASSERT_OFFSET_OF(VMMCPU, u.s.Logger)
490 == RTASSERT_OFFSET_OF(VMMCPU, u.aLoggers) + sizeof(VMMR3CPULOGGER) * VMMLOGGER_IDX_REGULAR);
491AssertCompile(RTASSERT_OFFSET_OF(VMMCPU, u.s.RelLogger)
492 == RTASSERT_OFFSET_OF(VMMCPU, u.aLoggers) + sizeof(VMMR3CPULOGGER) * VMMLOGGER_IDX_RELEASE);
493
494/** Pointer to VMMCPU. */
495typedef VMMCPU *PVMMCPU;
496
497/**
498 * VMM per-VCpu ring-0 only instance data.
499 */
500typedef struct VMMR0PERVCPU
501{
502 /** The EMT hash table index. */
503 uint16_t idxEmtHash;
504 /** Flag indicating whether we've disabled flushing (world switch) or not. */
505 bool fLogFlushingDisabled;
506 bool afPadding1[5];
507 /** Pointer to the VMMR0EntryFast preemption state structure.
508 * This is used to temporarily restore preemption before blocking. */
509 R0PTRTYPE(PRTTHREADPREEMPTSTATE) pPreemptState;
510 /** Thread context switching hook (ring-0). */
511 RTTHREADCTXHOOK hCtxHook;
512
513 /** @name Arguments passed by VMMR0EntryEx via vmmR0CallRing3SetJmpEx.
514 * @note Cannot be put on the stack as the location may change and upset the
515 * validation of resume-after-ring-3-call logic.
516 * @todo This no longer needs to be here now that we don't call ring-3 and mess
517 * around with stack restoring/switching.
518 * @{ */
519 PGVM pGVM;
520 VMCPUID idCpu;
521 VMMR0OPERATION enmOperation;
522 PSUPVMMR0REQHDR pReq;
523 uint64_t u64Arg;
524 PSUPDRVSESSION pSession;
525 /** @} */
526
527 /** @name Ring-0 setjmp / assertion handling.
528 * @{ */
529 /** The ring-0 setjmp buffer. */
530 VMMR0JMPBUF AssertJmpBuf;
531 /** The disable counter. */
532 uint32_t cCallRing3Disabled;
533 uint32_t u32Padding3;
534 /** Ring-0 assertion notification callback. */
535 R0PTRTYPE(PFNVMMR0ASSERTIONNOTIFICATION) pfnAssertCallback;
536 /** Argument for pfnRing0AssertionNotificationCallback. */
537 R0PTRTYPE(void *) pvAssertCallbackUser;
538 /** @} */
539
540 /**
541 * Loggers
542 */
543 union
544 {
545 struct
546 {
547 /** The R0 logger data. */
548 VMMR0PERVCPULOGGER Logger;
549 /** The R0 release logger data. */
550 VMMR0PERVCPULOGGER RelLogger;
551 } s;
552 /** Array view. */
553 VMMR0PERVCPULOGGER aLoggers[VMMLOGGER_IDX_MAX];
554 } u;
555} VMMR0PERVCPU;
556AssertCompile( RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.s.Logger)
557 == RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.aLoggers) + sizeof(VMMR0PERVCPULOGGER) * VMMLOGGER_IDX_REGULAR);
558AssertCompile(RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.s.RelLogger)
559 == RTASSERT_OFFSET_OF(VMMR0PERVCPU, u.aLoggers) + sizeof(VMMR0PERVCPULOGGER) * VMMLOGGER_IDX_RELEASE);
560AssertCompileMemberAlignment(VMMR0PERVCPU, AssertJmpBuf, 64);
561/** Pointer to VMM ring-0 VMCPU instance data. */
562typedef VMMR0PERVCPU *PVMMR0PERVCPU;
563
564/** @name RTLOGGER::u32UserValue1 Flags
565 * @{ */
566/** The magic value. */
567#define VMMR0_LOGGER_FLAGS_MAGIC_VALUE UINT32_C(0x7d297f05)
568/** Part of the flags value used for the magic. */
569#define VMMR0_LOGGER_FLAGS_MAGIC_MASK UINT32_C(0xffffff0f)
570/** @} */
571
572
573/**
574 * VMM data kept in the ring-0 GVM.
575 */
576typedef struct VMMR0PERVM
577{
578 /** Set if vmmR0InitVM has been called. */
579 bool fCalledInitVm;
580 bool afPadding1[7];
581
582 /** @name Logging
583 * @{ */
584 /** Logger (debug) buffer allocation.
585 * This covers all CPUs. */
586 RTR0MEMOBJ hMemObjLogger;
587 /** The ring-3 mapping object for hMemObjLogger. */
588 RTR0MEMOBJ hMapObjLogger;
589
590 /** Release logger buffer allocation.
591 * This covers all CPUs. */
592 RTR0MEMOBJ hMemObjReleaseLogger;
593 /** The ring-3 mapping object for hMemObjReleaseLogger. */
594 RTR0MEMOBJ hMapObjReleaseLogger;
595
596 struct
597 {
598 /** Spinlock protecting the logger ring buffer and associated variables. */
599 R0PTRTYPE(RTSPINLOCK) hSpinlock;
600 /** The log flusher thread handle to make sure there is only one. */
601 RTNATIVETHREAD hThread;
602 /** The handle to the event semaphore the log flusher waits on. */
603 RTSEMEVENT hEvent;
604 /** The index of the log flusher queue head (flusher thread side). */
605 uint32_t volatile idxRingHead;
606 /** The index of the log flusher queue tail (EMT side). */
607 uint32_t volatile idxRingTail;
608 /** Set if the log flusher thread is waiting for work and needs poking. */
609 bool volatile fThreadWaiting;
610 /** Set when the log flusher thread should shut down. */
611 bool volatile fThreadShutdown;
612 /** Indicates that the log flusher thread is running. */
613 bool volatile fThreadRunning;
614 bool afPadding2[5];
615 STAMCOUNTER StatFlushes;
616 STAMCOUNTER StatNoWakeUp;
617 /** Logger ring buffer.
618 * This is for communicating with the log flusher thread. */
619 VMMLOGFLUSHERENTRY aRing[VMM_MAX_CPU_COUNT * 2 /*loggers*/ * 1 /*buffer*/ + 16 /*fudge*/];
620 } LogFlusher;
621 /** @} */
622} VMMR0PERVM;
623
624RT_C_DECLS_BEGIN
625
626int vmmInitFormatTypes(void);
627void vmmTermFormatTypes(void);
628uint32_t vmmGetBuildType(void);
629
630#ifdef IN_RING3
631int vmmR3SwitcherInit(PVM pVM);
632void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
633#endif /* IN_RING3 */
634
635#ifdef IN_RING0
636
637/**
638 * World switcher assembly routine.
639 * It will call VMMRCEntry().
640 *
641 * @returns return code from VMMRCEntry().
642 * @param pVM The cross context VM structure.
643 * @param uArg See VMMRCEntry().
644 * @internal
645 */
646DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
647
648/**
649 * Callback function for vmmR0CallRing3SetJmp.
650 *
651 * @returns VBox status code.
652 * @param pVM The cross context VM structure.
653 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
654 */
655typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP,(PVMCC pVM, PVMCPUCC pVCpu));
656/** Pointer to FNVMMR0SETJMP(). */
657typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
658
659/**
660 * The setjmp variant used for calling Ring-3.
661 *
662 * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
663 * in the middle of a ring-3 call. Another differences is the function pointer and
664 * argument. This has to do with resuming code and the stack frame of the caller.
665 *
666 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
667 * @param pJmpBuf The jmp_buf to set.
668 * @param pfn The function to be called when not resuming.
669 * @param pVM The cross context VM structure.
670 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
671 */
672DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
673
674
675/**
676 * Callback function for vmmR0CallRing3SetJmp2.
677 *
678 * @returns VBox status code.
679 * @param pGVM The ring-0 VM structure.
680 * @param idCpu The ID of the calling EMT.
681 */
682typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMP2,(PGVM pGVM, VMCPUID idCpu));
683/** Pointer to FNVMMR0SETJMP2(). */
684typedef FNVMMR0SETJMP2 *PFNVMMR0SETJMP2;
685
686/**
687 * Same as vmmR0CallRing3SetJmp except for the function signature.
688 *
689 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
690 * @param pJmpBuf The jmp_buf to set.
691 * @param pfn The function to be called when not resuming.
692 * @param pGVM The ring-0 VM structure.
693 * @param idCpu The ID of the calling EMT.
694 */
695DECLASM(int) vmmR0CallRing3SetJmp2(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP2 pfn, PGVM pGVM, VMCPUID idCpu);
696
697
698/**
699 * Callback function for vmmR0CallRing3SetJmpEx.
700 *
701 * @returns VBox status code.
702 * @param pvUser The user argument.
703 */
704typedef DECLCALLBACKTYPE(int, FNVMMR0SETJMPEX,(void *pvUser));
705/** Pointer to FNVMMR0SETJMPEX(). */
706typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
707
708/**
709 * Same as vmmR0CallRing3SetJmp except for the function signature.
710 *
711 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
712 * @param pJmpBuf The jmp_buf to set.
713 * @param pfn The function to be called when not resuming.
714 * @param pvUser The argument of that function.
715 * @param uCallKey Unused call parameter that should be used to help
716 * uniquely identify the call.
717 */
718DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser, uintptr_t uCallKey);
719
720
721/**
722 * Worker for VMMRZCallRing3.
723 * This will save the stack and registers.
724 *
725 * @returns rc.
726 * @param pJmpBuf Pointer to the jump buffer.
727 * @param rc The return code.
728 */
729DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
730
731# ifdef VBOX_WITH_TRIPLE_FAULT_HACK
732int vmmR0TripleFaultHackInit(void);
733void vmmR0TripleFaultHackTerm(void);
734# endif
735
736#endif /* IN_RING0 */
737
738RT_C_DECLS_END
739
740/** @} */
741
742#endif /* !VMM_INCLUDED_SRC_include_VMMInternal_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette