VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMMInternal.h@ 76066

Last change on this file since 76066 was 75646, checked in by vboxsync, 6 years ago

VMM: HLT/MWAIT optimizations for busy guests: don't go back to ring-3 just to call GVMMR0SchedHalt(), do the first call in ring-0. This saves a reduces interrupt latency for some workloads. bugref:9172

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 28.7 KB
Line 
1/* $Id: VMMInternal.h 75646 2018-11-21 15:38:10Z vboxsync $ */
2/** @file
3 * VMM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___VMMInternal_h
19#define ___VMMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/sup.h>
23#include <VBox/vmm/stam.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/log.h>
26#include <iprt/critsect.h>
27
28#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
29# error "Not in VMM! This is an internal header!"
30#endif
31#if defined(RT_OS_DARWIN) && HC_ARCH_BITS == 32
32# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
33#endif
34
35
36
37/** @defgroup grp_vmm_int Internals
38 * @ingroup grp_vmm
39 * @internal
40 * @{
41 */
42
43/** @def VBOX_WITH_RC_RELEASE_LOGGING
44 * Enables RC release logging. */
45#define VBOX_WITH_RC_RELEASE_LOGGING
46
47/** @def VBOX_WITH_R0_LOGGING
48 * Enables Ring-0 logging (non-release).
49 *
50 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
51 * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
52 * \#if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
53 */
54#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING)
55# define VBOX_WITH_R0_LOGGING
56#endif
57
58/** @def VBOX_STRICT_VMM_STACK
59 * Enables VMM stack guard pages to catch stack over- and underruns. */
60#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
61# define VBOX_STRICT_VMM_STACK
62#endif
63
64
65/**
66 * Converts a VMM pointer into a VM pointer.
67 * @returns Pointer to the VM structure the VMM is part of.
68 * @param pVMM Pointer to VMM instance data.
69 */
70#define VMM2VM(pVMM) ( (PVM)((char*)pVMM - pVMM->offVM) )
71
72
73/**
74 * Switcher function, HC to RC.
75 *
76 * @param pVM The cross context VM structure.
77 * @returns Return code indicating the action to take.
78 */
79typedef DECLASMTYPE(int) FNVMMSWITCHERHC(PVM pVM);
80/** Pointer to switcher function. */
81typedef FNVMMSWITCHERHC *PFNVMMSWITCHERHC;
82
83/**
84 * Switcher function, RC to HC.
85 *
86 * @param rc VBox status code.
87 */
88typedef DECLASMTYPE(void) FNVMMSWITCHERRC(int rc);
89/** Pointer to switcher function. */
90typedef FNVMMSWITCHERRC *PFNVMMSWITCHERRC;
91
92
93/**
94 * The ring-0 logger instance wrapper.
95 *
96 * We need to be able to find the VM handle from the logger instance, so we wrap
97 * it in this structure.
98 */
99typedef struct VMMR0LOGGER
100{
101 /** Pointer to Pointer to the VM. */
102 R0PTRTYPE(PVM) pVM;
103 /** Size of the allocated logger instance (Logger). */
104 uint32_t cbLogger;
105 /** Flag indicating whether we've create the logger Ring-0 instance yet. */
106 bool fCreated;
107 /** Flag indicating whether we've disabled flushing (world switch) or not. */
108 bool fFlushingDisabled;
109 /** Flag indicating whether we've registered the instance already. */
110 bool fRegistered;
111 bool a8Alignment;
112 /** The CPU ID. */
113 VMCPUID idCpu;
114#if HC_ARCH_BITS == 64
115 uint32_t u32Alignment;
116#endif
117 /** The ring-0 logger instance. This extends beyond the size. */
118 RTLOGGER Logger;
119} VMMR0LOGGER;
120/** Pointer to a ring-0 logger instance wrapper. */
121typedef VMMR0LOGGER *PVMMR0LOGGER;
122
123
124/**
125 * Jump buffer for the setjmp/longjmp like constructs used to
126 * quickly 'call' back into Ring-3.
127 */
128typedef struct VMMR0JMPBUF
129{
130 /** Traditional jmp_buf stuff
131 * @{ */
132#if HC_ARCH_BITS == 32
133 uint32_t ebx;
134 uint32_t esi;
135 uint32_t edi;
136 uint32_t ebp;
137 uint32_t esp;
138 uint32_t eip;
139 uint32_t eflags;
140#endif
141#if HC_ARCH_BITS == 64
142 uint64_t rbx;
143# ifdef RT_OS_WINDOWS
144 uint64_t rsi;
145 uint64_t rdi;
146# endif
147 uint64_t rbp;
148 uint64_t r12;
149 uint64_t r13;
150 uint64_t r14;
151 uint64_t r15;
152 uint64_t rsp;
153 uint64_t rip;
154# ifdef RT_OS_WINDOWS
155 uint128_t xmm6;
156 uint128_t xmm7;
157 uint128_t xmm8;
158 uint128_t xmm9;
159 uint128_t xmm10;
160 uint128_t xmm11;
161 uint128_t xmm12;
162 uint128_t xmm13;
163 uint128_t xmm14;
164 uint128_t xmm15;
165# endif
166 uint64_t rflags;
167#endif
168 /** @} */
169
170 /** Flag that indicates that we've done a ring-3 call. */
171 bool fInRing3Call;
172 /** The number of bytes we've saved. */
173 uint32_t cbSavedStack;
174 /** Pointer to the buffer used to save the stack.
175 * This is assumed to be 8KB. */
176 RTR0PTR pvSavedStack;
177 /** Esp we we match against esp on resume to make sure the stack wasn't relocated. */
178 RTHCUINTREG SpCheck;
179 /** The esp we should resume execution with after the restore. */
180 RTHCUINTREG SpResume;
181 /** ESP/RSP at the time of the jump to ring 3. */
182 RTHCUINTREG SavedEsp;
183 /** EBP/RBP at the time of the jump to ring 3. */
184 RTHCUINTREG SavedEbp;
185 /** EIP/RIP within vmmR0CallRing3LongJmp for assisting unwinding. */
186 RTHCUINTREG SavedEipForUnwind;
187 /** Unwind: The vmmR0CallRing3SetJmp return address value. */
188 RTHCUINTREG UnwindRetPcValue;
189 /** Unwind: The vmmR0CallRing3SetJmp return address stack location. */
190 RTHCUINTREG UnwindRetPcLocation;
191#if HC_ARCH_BITS == 32
192 /** Alignment padding. */
193 uint32_t uPadding;
194#endif
195
196 /** Stats: Max amount of stack used. */
197 uint32_t cbUsedMax;
198 /** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */
199 uint32_t cbUsedAvg;
200 /** Stats: Total amount of stack used. */
201 uint64_t cbUsedTotal;
202 /** Stats: Number of stack usages. */
203 uint64_t cUsedTotal;
204} VMMR0JMPBUF;
205/** Pointer to a ring-0 jump buffer. */
206typedef VMMR0JMPBUF *PVMMR0JMPBUF;
207
208
209/**
210 * VMM Data (part of VM)
211 */
212typedef struct VMM
213{
214 /** Offset to the VM structure.
215 * See VMM2VM(). */
216 RTINT offVM;
217
218 /** @name World Switcher and Related
219 * @{
220 */
221 /** Size of the core code. */
222 RTUINT cbCoreCode;
223 /** Physical address of core code. */
224 RTHCPHYS HCPhysCoreCode;
225 /** Pointer to core code ring-3 mapping - contiguous memory.
226 * At present this only means the context switcher code. */
227 RTR3PTR pvCoreCodeR3;
228 /** Pointer to core code ring-0 mapping - contiguous memory.
229 * At present this only means the context switcher code. */
230 RTR0PTR pvCoreCodeR0;
231 /** Pointer to core code guest context mapping. */
232 RTRCPTR pvCoreCodeRC;
233 RTRCPTR pRCPadding0; /**< Alignment padding. */
234#ifdef VBOX_WITH_NMI
235 /** The guest context address of the APIC (host) mapping. */
236 RTRCPTR GCPtrApicBase;
237 RTRCPTR pRCPadding1; /**< Alignment padding. */
238#endif
239 /** The current switcher.
240 * This will be set before the VMM is fully initialized. */
241 VMMSWITCHER enmSwitcher;
242 /** Array of offsets to the different switchers within the core code. */
243 uint32_t aoffSwitchers[VMMSWITCHER_MAX];
244 uint32_t u32Padding2; /**< Alignment padding. */
245
246 /** Resume Guest Execution. See CPUMGCResumeGuest(). */
247 RTRCPTR pfnCPUMRCResumeGuest;
248 /** Resume Guest Execution in V86 mode. See CPUMGCResumeGuestV86(). */
249 RTRCPTR pfnCPUMRCResumeGuestV86;
250 /** Call Trampoline. See vmmGCCallTrampoline(). */
251 RTRCPTR pfnCallTrampolineRC;
252 /** Guest to host switcher entry point. */
253 RCPTRTYPE(PFNVMMSWITCHERRC) pfnRCToHost;
254 /** Host to guest switcher entry point. */
255 R0PTRTYPE(PFNVMMSWITCHERHC) pfnR0ToRawMode;
256 /** @} */
257
258 /** @name Logging
259 * @{
260 */
261 /** Size of the allocated logger instance (pRCLoggerRC/pRCLoggerR3). */
262 uint32_t cbRCLogger;
263 /** Pointer to the RC logger instance - RC Ptr.
264 * This is NULL if logging is disabled. */
265 RCPTRTYPE(PRTLOGGERRC) pRCLoggerRC;
266 /** Pointer to the GC logger instance - R3 Ptr.
267 * This is NULL if logging is disabled. */
268 R3PTRTYPE(PRTLOGGERRC) pRCLoggerR3;
269 /** Pointer to the GC release logger instance - R3 Ptr. */
270 R3PTRTYPE(PRTLOGGERRC) pRCRelLoggerR3;
271 /** Pointer to the GC release logger instance - RC Ptr. */
272 RCPTRTYPE(PRTLOGGERRC) pRCRelLoggerRC;
273 /** Size of the allocated release logger instance (pRCRelLoggerRC/pRCRelLoggerR3).
274 * This may differ from cbRCLogger. */
275 uint32_t cbRCRelLogger;
276 /** Whether log flushing has been disabled or not. */
277 bool fRCLoggerFlushingDisabled;
278 bool afAlignment1[5]; /**< Alignment padding. */
279 /** @} */
280
281 /** Whether the stack guard pages have been stationed or not. */
282 bool fStackGuardsStationed;
283 /** Whether we should use the periodic preemption timers. */
284 bool fUsePeriodicPreemptionTimers;
285
286 /** The EMT yield timer. */
287 PTMTIMERR3 pYieldTimer;
288 /** The period to the next timeout when suspended or stopped.
289 * This is 0 when running. */
290 uint32_t cYieldResumeMillies;
291 /** The EMT yield timer interval (milliseconds). */
292 uint32_t cYieldEveryMillies;
293 /** The timestamp of the previous yield. (nano) */
294 uint64_t u64LastYield;
295
296 /** @name EMT Rendezvous
297 * @{ */
298 /** Semaphore to wait on upon entering ordered execution. */
299 R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
300 /** Semaphore to wait on upon entering for one-by-one execution. */
301 RTSEMEVENT hEvtRendezvousEnterOneByOne;
302 /** Semaphore to wait on upon entering for all-at-once execution. */
303 RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
304 /** Semaphore to wait on when done. */
305 RTSEMEVENTMULTI hEvtMulRendezvousDone;
306 /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
307 RTSEMEVENT hEvtRendezvousDoneCaller;
308 /** Semaphore to wait on upon recursing. */
309 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPush;
310 /** Semaphore to wait on after done with recursion (caller restoring state). */
311 RTSEMEVENTMULTI hEvtMulRendezvousRecursionPop;
312 /** Semaphore the initiator waits on while the EMTs are getting into position
313 * on hEvtMulRendezvousRecursionPush. */
314 RTSEMEVENT hEvtRendezvousRecursionPushCaller;
315 /** Semaphore the initiator waits on while the EMTs sitting on
316 * hEvtMulRendezvousRecursionPop wakes up and leave. */
317 RTSEMEVENT hEvtRendezvousRecursionPopCaller;
318 /** Callback. */
319 R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
320 /** The user argument for the callback. */
321 RTR3PTR volatile pvRendezvousUser;
322 /** Flags. */
323 volatile uint32_t fRendezvousFlags;
324 /** The number of EMTs that has entered. */
325 volatile uint32_t cRendezvousEmtsEntered;
326 /** The number of EMTs that has done their job. */
327 volatile uint32_t cRendezvousEmtsDone;
328 /** The number of EMTs that has returned. */
329 volatile uint32_t cRendezvousEmtsReturned;
330 /** The status code. */
331 volatile int32_t i32RendezvousStatus;
332 /** Spin lock. */
333 volatile uint32_t u32RendezvousLock;
334 /** The recursion depth. */
335 volatile uint32_t cRendezvousRecursions;
336 /** The number of EMTs that have entered the recursion routine. */
337 volatile uint32_t cRendezvousEmtsRecursingPush;
338 /** The number of EMTs that have leaft the recursion routine. */
339 volatile uint32_t cRendezvousEmtsRecursingPop;
340 /** Triggers rendezvous recursion in the other threads. */
341 volatile bool fRendezvousRecursion;
342
343 /** @} */
344
345 /** RTThreadPreemptIsPendingTrusty() result, set by vmmR0InitVM() for
346 * release logging purposes. */
347 bool fIsPreemptPendingApiTrusty : 1;
348 /** The RTThreadPreemptIsPossible() result, set by vmmR0InitVM() for
349 * release logging purposes. */
350 bool fIsPreemptPossible : 1;
351
352 bool afAlignment2[HC_ARCH_BITS == 32 ? 6 : 2]; /**< Alignment padding. */
353
354 /** Buffer for storing the standard assertion message for a ring-0 assertion.
355 * Used for saving the assertion message text for the release log and guru
356 * meditation dump. */
357 char szRing0AssertMsg1[512];
358 /** Buffer for storing the custom message for a ring-0 assertion. */
359 char szRing0AssertMsg2[256];
360
361 /** Number of VMMR0_DO_RUN_GC calls. */
362 STAMCOUNTER StatRunRC;
363
364 /** Statistics for each of the RC/R0 return codes.
365 * @{ */
366 STAMCOUNTER StatRZRetNormal;
367 STAMCOUNTER StatRZRetInterrupt;
368 STAMCOUNTER StatRZRetInterruptHyper;
369 STAMCOUNTER StatRZRetGuestTrap;
370 STAMCOUNTER StatRZRetRingSwitch;
371 STAMCOUNTER StatRZRetRingSwitchInt;
372 STAMCOUNTER StatRZRetStaleSelector;
373 STAMCOUNTER StatRZRetIRETTrap;
374 STAMCOUNTER StatRZRetEmulate;
375 STAMCOUNTER StatRZRetPatchEmulate;
376 STAMCOUNTER StatRZRetIORead;
377 STAMCOUNTER StatRZRetIOWrite;
378 STAMCOUNTER StatRZRetIOCommitWrite;
379 STAMCOUNTER StatRZRetMMIORead;
380 STAMCOUNTER StatRZRetMMIOWrite;
381 STAMCOUNTER StatRZRetMMIOCommitWrite;
382 STAMCOUNTER StatRZRetMMIOPatchRead;
383 STAMCOUNTER StatRZRetMMIOPatchWrite;
384 STAMCOUNTER StatRZRetMMIOReadWrite;
385 STAMCOUNTER StatRZRetMSRRead;
386 STAMCOUNTER StatRZRetMSRWrite;
387 STAMCOUNTER StatRZRetLDTFault;
388 STAMCOUNTER StatRZRetGDTFault;
389 STAMCOUNTER StatRZRetIDTFault;
390 STAMCOUNTER StatRZRetTSSFault;
391 STAMCOUNTER StatRZRetCSAMTask;
392 STAMCOUNTER StatRZRetSyncCR3;
393 STAMCOUNTER StatRZRetMisc;
394 STAMCOUNTER StatRZRetPatchInt3;
395 STAMCOUNTER StatRZRetPatchPF;
396 STAMCOUNTER StatRZRetPatchGP;
397 STAMCOUNTER StatRZRetPatchIretIRQ;
398 STAMCOUNTER StatRZRetRescheduleREM;
399 STAMCOUNTER StatRZRetToR3Total;
400 STAMCOUNTER StatRZRetToR3FF;
401 STAMCOUNTER StatRZRetToR3Unknown;
402 STAMCOUNTER StatRZRetToR3TMVirt;
403 STAMCOUNTER StatRZRetToR3HandyPages;
404 STAMCOUNTER StatRZRetToR3PDMQueues;
405 STAMCOUNTER StatRZRetToR3Rendezvous;
406 STAMCOUNTER StatRZRetToR3Timer;
407 STAMCOUNTER StatRZRetToR3DMA;
408 STAMCOUNTER StatRZRetToR3CritSect;
409 STAMCOUNTER StatRZRetToR3Iem;
410 STAMCOUNTER StatRZRetToR3Iom;
411 STAMCOUNTER StatRZRetTimerPending;
412 STAMCOUNTER StatRZRetInterruptPending;
413 STAMCOUNTER StatRZRetCallRing3;
414 STAMCOUNTER StatRZRetPATMDuplicateFn;
415 STAMCOUNTER StatRZRetPGMChangeMode;
416 STAMCOUNTER StatRZRetPendingRequest;
417 STAMCOUNTER StatRZRetPGMFlushPending;
418 STAMCOUNTER StatRZRetPatchTPR;
419 STAMCOUNTER StatRZCallPDMCritSectEnter;
420 STAMCOUNTER StatRZCallPDMLock;
421 STAMCOUNTER StatRZCallLogFlush;
422 STAMCOUNTER StatRZCallPGMPoolGrow;
423 STAMCOUNTER StatRZCallPGMMapChunk;
424 STAMCOUNTER StatRZCallPGMAllocHandy;
425 STAMCOUNTER StatRZCallRemReplay;
426 STAMCOUNTER StatRZCallVMSetError;
427 STAMCOUNTER StatRZCallVMSetRuntimeError;
428 STAMCOUNTER StatRZCallPGMLock;
429 /** @} */
430} VMM;
431/** Pointer to VMM. */
432typedef VMM *PVMM;
433
434
435/**
436 * VMMCPU Data (part of VMCPU)
437 */
438typedef struct VMMCPU
439{
440 /** Offset to the VMCPU structure.
441 * See VMM2VMCPU(). */
442 int32_t offVMCPU;
443
444 /** The last RC/R0 return code. */
445 int32_t iLastGZRc;
446
447 /** VMM stack, pointer to the top of the stack in R3.
448 * Stack is allocated from the hypervisor heap and is page aligned
449 * and always writable in RC. */
450 R3PTRTYPE(uint8_t *) pbEMTStackR3;
451 /** Pointer to the bottom of the stack - needed for doing relocations. */
452 RCPTRTYPE(uint8_t *) pbEMTStackRC;
453 /** Pointer to the bottom of the stack - needed for doing relocations. */
454 RCPTRTYPE(uint8_t *) pbEMTStackBottomRC;
455
456 /** Pointer to the R0 logger instance - R3 Ptr.
457 * This is NULL if logging is disabled. */
458 R3PTRTYPE(PVMMR0LOGGER) pR0LoggerR3;
459 /** Pointer to the R0 logger instance - R0 Ptr.
460 * This is NULL if logging is disabled. */
461 R0PTRTYPE(PVMMR0LOGGER) pR0LoggerR0;
462
463 /** Pointer to the R0 release logger instance - R3 Ptr.
464 * This is NULL if logging is disabled. */
465 R3PTRTYPE(PVMMR0LOGGER) pR0RelLoggerR3;
466 /** Pointer to the R0 release instance - R0 Ptr.
467 * This is NULL if logging is disabled. */
468 R0PTRTYPE(PVMMR0LOGGER) pR0RelLoggerR0;
469
470 /** Thread context switching hook (ring-0). */
471 RTTHREADCTXHOOK hCtxHook;
472
473 /** @name Rendezvous
474 * @{ */
475 /** Whether the EMT is executing a rendezvous right now. For detecting
476 * attempts at recursive rendezvous. */
477 bool volatile fInRendezvous;
478 bool afPadding[HC_ARCH_BITS == 32 ? 2 : 6+4];
479 /** @} */
480
481 /** Whether we can HLT in VMMR0 rather than having to return to EM.
482 * Updated by vmR3SetHaltMethodU(). */
483 bool fMayHaltInRing0;
484 /** The minimum delta for which we can HLT in ring-0 for.
485 * The deadlines we can calculate are from TM, so, if it's too close
486 * we should just return to ring-3 and run the timer wheel, no point
487 * in spinning in ring-0.
488 * Updated by vmR3SetHaltMethodU(). */
489 uint32_t cNsSpinBlockThreshold;
490 /** Number of ring-0 halts (used for depreciating following values). */
491 uint32_t cR0Halts;
492 /** Number of ring-0 halts succeeding (VINF_SUCCESS) recently. */
493 uint32_t cR0HaltsSucceeded;
494 /** Number of ring-0 halts failing (VINF_EM_HALT) recently. */
495 uint32_t cR0HaltsToRing3;
496 /** Padding */
497 uint32_t u32Padding0;
498
499 /** @name Raw-mode context tracing data.
500 * @{ */
501 SUPDRVTRACERUSRCTX TracerCtx;
502 /** @} */
503
504 /** Alignment padding, making sure u64CallRing3Arg is nicely aligned. */
505 uint32_t au32Padding1[3];
506
507 /** @name Call Ring-3
508 * Formerly known as host calls.
509 * @{ */
510 /** The disable counter. */
511 uint32_t cCallRing3Disabled;
512 /** The pending operation. */
513 VMMCALLRING3 enmCallRing3Operation;
514 /** The result of the last operation. */
515 int32_t rcCallRing3;
516 /** The argument to the operation. */
517 uint64_t u64CallRing3Arg;
518 /** The Ring-0 notification callback. */
519 R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallRing3CallbackR0;
520 /** The Ring-0 notification callback user argument. */
521 R0PTRTYPE(void *) pvCallRing3CallbackUserR0;
522 /** The Ring-0 jmp buffer.
523 * @remarks The size of this type isn't stable in assembly, so don't put
524 * anything that needs to be accessed from assembly after it. */
525 VMMR0JMPBUF CallRing3JmpBufR0;
526 /** @} */
527
528 STAMPROFILE StatR0HaltBlock;
529 STAMPROFILE StatR0HaltBlockOnTime;
530 STAMPROFILE StatR0HaltBlockOverslept;
531 STAMPROFILE StatR0HaltBlockInsomnia;
532 STAMCOUNTER StatR0HaltExec;
533 STAMCOUNTER StatR0HaltExecFromBlock;
534 STAMCOUNTER StatR0HaltExecFromSpin;
535 STAMCOUNTER StatR0HaltToR3FromSpin;
536} VMMCPU;
537AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8);
538/** Pointer to VMMCPU. */
539typedef VMMCPU *PVMMCPU;
540
541
542/**
543 * The VMMRCEntry() codes.
544 */
545typedef enum VMMRCOPERATION
546{
547 /** Do GC module init. */
548 VMMRC_DO_VMMRC_INIT = 1,
549
550 /** The first Trap testcase. */
551 VMMRC_DO_TESTCASE_TRAP_FIRST = 0x0dead000,
552 /** Trap 0 testcases, uArg selects the variation. */
553 VMMRC_DO_TESTCASE_TRAP_0 = VMMRC_DO_TESTCASE_TRAP_FIRST,
554 /** Trap 1 testcases, uArg selects the variation. */
555 VMMRC_DO_TESTCASE_TRAP_1,
556 /** Trap 2 testcases, uArg selects the variation. */
557 VMMRC_DO_TESTCASE_TRAP_2,
558 /** Trap 3 testcases, uArg selects the variation. */
559 VMMRC_DO_TESTCASE_TRAP_3,
560 /** Trap 4 testcases, uArg selects the variation. */
561 VMMRC_DO_TESTCASE_TRAP_4,
562 /** Trap 5 testcases, uArg selects the variation. */
563 VMMRC_DO_TESTCASE_TRAP_5,
564 /** Trap 6 testcases, uArg selects the variation. */
565 VMMRC_DO_TESTCASE_TRAP_6,
566 /** Trap 7 testcases, uArg selects the variation. */
567 VMMRC_DO_TESTCASE_TRAP_7,
568 /** Trap 8 testcases, uArg selects the variation. */
569 VMMRC_DO_TESTCASE_TRAP_8,
570 /** Trap 9 testcases, uArg selects the variation. */
571 VMMRC_DO_TESTCASE_TRAP_9,
572 /** Trap 0a testcases, uArg selects the variation. */
573 VMMRC_DO_TESTCASE_TRAP_0A,
574 /** Trap 0b testcases, uArg selects the variation. */
575 VMMRC_DO_TESTCASE_TRAP_0B,
576 /** Trap 0c testcases, uArg selects the variation. */
577 VMMRC_DO_TESTCASE_TRAP_0C,
578 /** Trap 0d testcases, uArg selects the variation. */
579 VMMRC_DO_TESTCASE_TRAP_0D,
580 /** Trap 0e testcases, uArg selects the variation. */
581 VMMRC_DO_TESTCASE_TRAP_0E,
582 /** The last trap testcase (exclusive). */
583 VMMRC_DO_TESTCASE_TRAP_LAST,
584 /** Testcase for checking interrupt forwarding. */
585 VMMRC_DO_TESTCASE_HYPER_INTERRUPT,
586 /** Switching testing and profiling stub. */
587 VMMRC_DO_TESTCASE_NOP,
588 /** Testcase for checking interrupt masking. */
589 VMMRC_DO_TESTCASE_INTERRUPT_MASKING,
590 /** Switching testing and profiling stub. */
591 VMMRC_DO_TESTCASE_HM_NOP,
592
593 /** The usual 32-bit hack. */
594 VMMRC_DO_32_BIT_HACK = 0x7fffffff
595} VMMRCOPERATION;
596
597
598
599/**
600 * MSR test result entry.
601 */
602typedef struct VMMTESTMSRENTRY
603{
604 /** The MSR number, including padding.
605 * Set to UINT64_MAX if invalid MSR. */
606 uint64_t uMsr;
607 /** The register value. */
608 uint64_t uValue;
609} VMMTESTMSRENTRY;
610/** Pointer to an MSR test result entry. */
611typedef VMMTESTMSRENTRY *PVMMTESTMSRENTRY;
612
613
614
615RT_C_DECLS_BEGIN
616
617int vmmInitFormatTypes(void);
618void vmmTermFormatTypes(void);
619uint32_t vmmGetBuildType(void);
620
621#ifdef IN_RING3
622int vmmR3SwitcherInit(PVM pVM);
623void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
624#endif /* IN_RING3 */
625
626#ifdef IN_RING0
627/**
628 * World switcher assembly routine.
629 * It will call VMMRCEntry().
630 *
631 * @returns return code from VMMRCEntry().
632 * @param pVM The cross context VM structure.
633 * @param uArg See VMMRCEntry().
634 * @internal
635 */
636DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
637
638/**
639 * Callback function for vmmR0CallRing3SetJmp.
640 *
641 * @returns VBox status code.
642 * @param pVM The cross context VM structure.
643 */
644typedef DECLCALLBACK(int) FNVMMR0SETJMP(PVM pVM, PVMCPU pVCpu);
645/** Pointer to FNVMMR0SETJMP(). */
646typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
647
648/**
649 * The setjmp variant used for calling Ring-3.
650 *
651 * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
652 * in the middle of a ring-3 call. Another differences is the function pointer and
653 * argument. This has to do with resuming code and the stack frame of the caller.
654 *
655 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
656 * @param pJmpBuf The jmp_buf to set.
657 * @param pfn The function to be called when not resuming.
658 * @param pVM The cross context VM structure.
659 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
660 */
661DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
662
663
664/**
665 * Callback function for vmmR0CallRing3SetJmp2.
666 *
667 * @returns VBox status code.
668 * @param pvUser The user argument.
669 */
670typedef DECLCALLBACK(int) FNVMMR0SETJMP2(PGVM pGVM, VMCPUID idCpu);
671/** Pointer to FNVMMR0SETJMP2(). */
672typedef FNVMMR0SETJMP2 *PFNVMMR0SETJMP2;
673
674/**
675 * Same as vmmR0CallRing3SetJmp except for the function signature.
676 *
677 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
678 * @param pJmpBuf The jmp_buf to set.
679 * @param pfn The function to be called when not resuming.
680 * @param pGVM The ring-0 VM structure.
681 * @param idCpu The ID of the calling EMT.
682 */
683DECLASM(int) vmmR0CallRing3SetJmp2(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP2 pfn, PGVM pGVM, VMCPUID idCpu);
684
685
686/**
687 * Callback function for vmmR0CallRing3SetJmpEx.
688 *
689 * @returns VBox status code.
690 * @param pvUser The user argument.
691 */
692typedef DECLCALLBACK(int) FNVMMR0SETJMPEX(void *pvUser);
693/** Pointer to FNVMMR0SETJMPEX(). */
694typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
695
696/**
697 * Same as vmmR0CallRing3SetJmp except for the function signature.
698 *
699 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
700 * @param pJmpBuf The jmp_buf to set.
701 * @param pfn The function to be called when not resuming.
702 * @param pvUser The argument of that function.
703 */
704DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser);
705
706
707/**
708 * Worker for VMMRZCallRing3.
709 * This will save the stack and registers.
710 *
711 * @returns rc.
712 * @param pJmpBuf Pointer to the jump buffer.
713 * @param rc The return code.
714 */
715DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
716
717/**
718 * Internal R0 logger worker: Logger wrapper.
719 */
720VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...);
721
722/**
723 * Internal R0 logger worker: Flush logger.
724 *
725 * @param pLogger The logger instance to flush.
726 * @remark This function must be exported!
727 */
728VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger);
729
730/**
731 * Internal R0 logger worker: Custom prefix.
732 *
733 * @returns Number of chars written.
734 *
735 * @param pLogger The logger instance.
736 * @param pchBuf The output buffer.
737 * @param cchBuf The size of the buffer.
738 * @param pvUser User argument (ignored).
739 */
740VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
741
742# ifdef VBOX_WITH_TRIPLE_FAULT_HACK
743int vmmR0TripleFaultHackInit(void);
744void vmmR0TripleFaultHackTerm(void);
745# endif
746
747#endif /* IN_RING0 */
748#ifdef IN_RC
749
750/**
751 * Internal GC logger worker: Logger wrapper.
752 */
753VMMRCDECL(void) vmmGCLoggerWrapper(const char *pszFormat, ...);
754
755/**
756 * Internal GC release logger worker: Logger wrapper.
757 */
758VMMRCDECL(void) vmmGCRelLoggerWrapper(const char *pszFormat, ...);
759
760/**
761 * Internal GC logger worker: Flush logger.
762 *
763 * @returns VINF_SUCCESS.
764 * @param pLogger The logger instance to flush.
765 * @remark This function must be exported!
766 */
767VMMRCDECL(int) vmmGCLoggerFlush(PRTLOGGERRC pLogger);
768
769/** @name Trap testcases and related labels.
770 * @{ */
771DECLASM(void) vmmGCEnableWP(void);
772DECLASM(void) vmmGCDisableWP(void);
773DECLASM(int) vmmGCTestTrap3(void);
774DECLASM(int) vmmGCTestTrap8(void);
775DECLASM(int) vmmGCTestTrap0d(void);
776DECLASM(int) vmmGCTestTrap0e(void);
777DECLASM(int) vmmGCTestTrap0e_FaultEIP(void); /**< a label */
778DECLASM(int) vmmGCTestTrap0e_ResumeEIP(void); /**< a label */
779/** @} */
780
781#endif /* IN_RC */
782
783RT_C_DECLS_END
784
785/** @} */
786
787#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette