VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMMInternal.h@ 38683

Last change on this file since 38683 was 37452, checked in by vboxsync, 14 years ago

IOM,PDMCritSect: Extended PDMCritSectEnter to handle rcBusy=VINF_SUCCESS as a request to call ring-3 to acquire a busy lock. Implemented device level locking in the MMIO code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.6 KB
Line 
1/* $Id: VMMInternal.h 37452 2011-06-14 18:13:48Z vboxsync $ */
2/** @file
3 * VMM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___VMMInternal_h
19#define ___VMMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/vmm/stam.h>
23#include <VBox/log.h>
24#include <iprt/critsect.h>
25
26
27#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
28# error "Not in VMM! This is an internal header!"
29#endif
30
31
32/** @defgroup grp_vmm_int Internals
33 * @ingroup grp_vmm
34 * @internal
35 * @{
36 */
37
38/** @def VBOX_WITH_RC_RELEASE_LOGGING
39 * Enables RC release logging. */
40#define VBOX_WITH_RC_RELEASE_LOGGING
41
42/** @def VBOX_WITH_R0_LOGGING
43 * Enables Ring-0 logging (non-release).
44 *
45 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
46 * so you have to sign up here by adding your defined(DEBUG_<userid>) to the
47 * #if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
48 */
49#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DOXYGEN_RUNNING)
50# define VBOX_WITH_R0_LOGGING
51#endif
52
53/** @def VBOX_STRICT_VMM_STACK
54 * Enables VMM stack guard pages to catch stack over- and underruns. */
55#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
56# define VBOX_STRICT_VMM_STACK
57#endif
58
59
60/**
61 * Converts a VMM pointer into a VM pointer.
62 * @returns Pointer to the VM structure the VMM is part of.
63 * @param pVMM Pointer to VMM instance data.
64 */
65#define VMM2VM(pVMM) ( (PVM)((char*)pVMM - pVMM->offVM) )
66
67
68/**
69 * Switcher function, HC to RC.
70 *
71 * @param pVM The VM handle.
72 * @returns Return code indicating the action to take.
73 */
74typedef DECLASMTYPE(int) FNVMMSWITCHERHC(PVM pVM);
75/** Pointer to switcher function. */
76typedef FNVMMSWITCHERHC *PFNVMMSWITCHERHC;
77
78/**
79 * Switcher function, RC to HC.
80 *
81 * @param rc VBox status code.
82 */
83typedef DECLASMTYPE(void) FNVMMSWITCHERRC(int rc);
84/** Pointer to switcher function. */
85typedef FNVMMSWITCHERRC *PFNVMMSWITCHERRC;
86
87
88/**
89 * The ring-0 logger instance wrapper.
90 *
91 * We need to be able to find the VM handle from the logger instance, so we wrap
92 * it in this structure.
93 */
94typedef struct VMMR0LOGGER
95{
96 /** Pointer to the VM handle. */
97 R0PTRTYPE(PVM) pVM;
98 /** Size of the allocated logger instance (Logger). */
99 uint32_t cbLogger;
100 /** Flag indicating whether we've create the logger Ring-0 instance yet. */
101 bool fCreated;
102 /** Flag indicating whether we've disabled flushing (world switch) or not. */
103 bool fFlushingDisabled;
104 /** Flag indicating whether we've registered the instance already. */
105 bool fRegistered;
106 bool a8Alignment;
107 /** The CPU ID. */
108 VMCPUID idCpu;
109#if HC_ARCH_BITS == 64
110 uint32_t u32Alignment;
111#endif
112 /** The ring-0 logger instance. This extends beyond the size. */
113 RTLOGGER Logger;
114} VMMR0LOGGER;
115/** Pointer to a ring-0 logger instance wrapper. */
116typedef VMMR0LOGGER *PVMMR0LOGGER;
117
118
119/**
120 * Jump buffer for the setjmp/longjmp like constructs used to
121 * quickly 'call' back into Ring-3.
122 */
123typedef struct VMMR0JMPBUF
124{
125 /** Traditional jmp_buf stuff
126 * @{ */
127#if HC_ARCH_BITS == 32
128 uint32_t ebx;
129 uint32_t esi;
130 uint32_t edi;
131 uint32_t ebp;
132 uint32_t esp;
133 uint32_t eip;
134 uint32_t eflags;
135#endif
136#if HC_ARCH_BITS == 64
137 uint64_t rbx;
138# ifdef RT_OS_WINDOWS
139 uint64_t rsi;
140 uint64_t rdi;
141# endif
142 uint64_t rbp;
143 uint64_t r12;
144 uint64_t r13;
145 uint64_t r14;
146 uint64_t r15;
147 uint64_t rsp;
148 uint64_t rip;
149# ifdef RT_OS_WINDOWS
150 uint128_t xmm6;
151 uint128_t xmm7;
152 uint128_t xmm8;
153 uint128_t xmm9;
154 uint128_t xmm10;
155 uint128_t xmm11;
156 uint128_t xmm12;
157 uint128_t xmm13;
158 uint128_t xmm14;
159 uint128_t xmm15;
160# endif
161 uint64_t rflags;
162#endif
163 /** @} */
164
165 /** Flag that indicates that we've done a ring-3 call. */
166 bool fInRing3Call;
167 /** The number of bytes we've saved. */
168 uint32_t cbSavedStack;
169 /** Pointer to the buffer used to save the stack.
170 * This is assumed to be 8KB. */
171 RTR0PTR pvSavedStack;
172 /** Esp we we match against esp on resume to make sure the stack wasn't relocated. */
173 RTHCUINTREG SpCheck;
174 /** The esp we should resume execution with after the restore. */
175 RTHCUINTREG SpResume;
176 /** ESP/RSP at the time of the jump to ring 3. */
177 RTHCUINTREG SavedEsp;
178 /** EBP/RBP at the time of the jump to ring 3. */
179 RTHCUINTREG SavedEbp;
180
181 /** Stats: Max amount of stack used. */
182 uint32_t cbUsedMax;
183 /** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */
184 uint32_t cbUsedAvg;
185 /** Stats: Total amount of stack used. */
186 uint64_t cbUsedTotal;
187 /** Stats: Number of stack usages. */
188 uint64_t cUsedTotal;
189} VMMR0JMPBUF;
190/** Pointer to a ring-0 jump buffer. */
191typedef VMMR0JMPBUF *PVMMR0JMPBUF;
192
193
194/**
195 * VMM Data (part of VM)
196 */
197typedef struct VMM
198{
199 /** Offset to the VM structure.
200 * See VMM2VM(). */
201 RTINT offVM;
202
203 /** @name World Switcher and Related
204 * @{
205 */
206 /** Size of the core code. */
207 RTUINT cbCoreCode;
208 /** Physical address of core code. */
209 RTHCPHYS HCPhysCoreCode;
210 /** Pointer to core code ring-3 mapping - contiguous memory.
211 * At present this only means the context switcher code. */
212 RTR3PTR pvCoreCodeR3;
213 /** Pointer to core code ring-0 mapping - contiguous memory.
214 * At present this only means the context switcher code. */
215 RTR0PTR pvCoreCodeR0;
216 /** Pointer to core code guest context mapping. */
217 RTRCPTR pvCoreCodeRC;
218 RTRCPTR pRCPadding0; /**< Alignment padding */
219#ifdef VBOX_WITH_NMI
220 /** The guest context address of the APIC (host) mapping. */
221 RTRCPTR GCPtrApicBase;
222 RTRCPTR pRCPadding1; /**< Alignment padding */
223#endif
224 /** The current switcher.
225 * This will be set before the VMM is fully initialized. */
226 VMMSWITCHER enmSwitcher;
227 /** Flag to disable the switcher permanently (VMX) (boolean) */
228 bool fSwitcherDisabled;
229 /** Array of offsets to the different switchers within the core code. */
230 RTUINT aoffSwitchers[VMMSWITCHER_MAX];
231
232 /** Resume Guest Execution. See CPUMGCResumeGuest(). */
233 RTRCPTR pfnCPUMRCResumeGuest;
234 /** Resume Guest Execution in V86 mode. See CPUMGCResumeGuestV86(). */
235 RTRCPTR pfnCPUMRCResumeGuestV86;
236 /** Call Trampoline. See vmmGCCallTrampoline(). */
237 RTRCPTR pfnCallTrampolineRC;
238 /** Guest to host switcher entry point. */
239 RCPTRTYPE(PFNVMMSWITCHERRC) pfnGuestToHostRC;
240 /** Host to guest switcher entry point. */
241 R0PTRTYPE(PFNVMMSWITCHERHC) pfnHostToGuestR0;
242 /** @} */
243
244 /** @name Logging
245 * @{
246 */
247 /** Size of the allocated logger instance (pRCLoggerRC/pRCLoggerR3). */
248 uint32_t cbRCLogger;
249 /** Pointer to the RC logger instance - RC Ptr.
250 * This is NULL if logging is disabled. */
251 RCPTRTYPE(PRTLOGGERRC) pRCLoggerRC;
252 /** Pointer to the GC logger instance - R3 Ptr.
253 * This is NULL if logging is disabled. */
254 R3PTRTYPE(PRTLOGGERRC) pRCLoggerR3;
255 /** Pointer to the GC release logger instance - R3 Ptr. */
256 R3PTRTYPE(PRTLOGGERRC) pRCRelLoggerR3;
257 /** Pointer to the GC release logger instance - RC Ptr. */
258 RCPTRTYPE(PRTLOGGERRC) pRCRelLoggerRC;
259 /** Size of the allocated release logger instance (pRCRelLoggerRC/pRCRelLoggerR3).
260 * This may differ from cbRCLogger. */
261 uint32_t cbRCRelLogger;
262 /** Whether log flushing has been disabled or not. */
263 bool fRCLoggerFlushingDisabled;
264 bool afAlignment[5]; /**< Alignment padding. */
265 /** @} */
266
267 /** Whether the stack guard pages have been stationed or not. */
268 bool fStackGuardsStationed;
269 /** Whether we should use the periodic preemption timers. */
270 bool fUsePeriodicPreemptionTimers;
271
272 /** The EMT yield timer. */
273 PTMTIMERR3 pYieldTimer;
274 /** The period to the next timeout when suspended or stopped.
275 * This is 0 when running. */
276 uint32_t cYieldResumeMillies;
277 /** The EMT yield timer interval (milliseconds). */
278 uint32_t cYieldEveryMillies;
279 /** The timestamp of the previous yield. (nano) */
280 uint64_t u64LastYield;
281
282 /** Critical section.
283 * Use for synchronizing all VCPUs
284 */
285 RTCRITSECT CritSectSync;
286
287 /** @name EMT Rendezvous
288 * @{ */
289 /** Semaphore to wait on upon entering ordered execution. */
290 R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
291 /** Semaphore to wait on upon entering for one-by-one execution. */
292 RTSEMEVENT hEvtRendezvousEnterOneByOne;
293 /** Semaphore to wait on upon entering for all-at-once execution. */
294 RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
295 /** Semaphore to wait on when done. */
296 RTSEMEVENTMULTI hEvtMulRendezvousDone;
297 /** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
298 RTSEMEVENT hEvtRendezvousDoneCaller;
299 /** Callback. */
300 R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
301 /** The user argument for the callback. */
302 RTR3PTR volatile pvRendezvousUser;
303 /** Flags. */
304 volatile uint32_t fRendezvousFlags;
305 /** The number of EMTs that has entered. */
306 volatile uint32_t cRendezvousEmtsEntered;
307 /** The number of EMTs that has done their job. */
308 volatile uint32_t cRendezvousEmtsDone;
309 /** The number of EMTs that has returned. */
310 volatile uint32_t cRendezvousEmtsReturned;
311 /** The status code. */
312 volatile int32_t i32RendezvousStatus;
313 /** Spin lock. */
314 volatile uint32_t u32RendezvousLock;
315 /** @} */
316
317#if HC_ARCH_BITS == 32
318 uint32_t u32Alignment; /**< Alignment padding. */
319#endif
320
321 /** Buffer for storing the standard assertion message for a ring-0 assertion.
322 * Used for saving the assertion message text for the release log and guru
323 * meditation dump. */
324 char szRing0AssertMsg1[512];
325 /** Buffer for storing the custom message for a ring-0 assertion. */
326 char szRing0AssertMsg2[256];
327
328 /** Number of VMMR0_DO_RUN_GC calls. */
329 STAMCOUNTER StatRunRC;
330
331 /** Statistics for each of the RC/R0 return codes.
332 * @{ */
333 STAMCOUNTER StatRZRetNormal;
334 STAMCOUNTER StatRZRetInterrupt;
335 STAMCOUNTER StatRZRetInterruptHyper;
336 STAMCOUNTER StatRZRetGuestTrap;
337 STAMCOUNTER StatRZRetRingSwitch;
338 STAMCOUNTER StatRZRetRingSwitchInt;
339 STAMCOUNTER StatRZRetStaleSelector;
340 STAMCOUNTER StatRZRetIRETTrap;
341 STAMCOUNTER StatRZRetEmulate;
342 STAMCOUNTER StatRZRetIOBlockEmulate;
343 STAMCOUNTER StatRZRetPatchEmulate;
344 STAMCOUNTER StatRZRetIORead;
345 STAMCOUNTER StatRZRetIOWrite;
346 STAMCOUNTER StatRZRetMMIORead;
347 STAMCOUNTER StatRZRetMMIOWrite;
348 STAMCOUNTER StatRZRetMMIOPatchRead;
349 STAMCOUNTER StatRZRetMMIOPatchWrite;
350 STAMCOUNTER StatRZRetMMIOReadWrite;
351 STAMCOUNTER StatRZRetLDTFault;
352 STAMCOUNTER StatRZRetGDTFault;
353 STAMCOUNTER StatRZRetIDTFault;
354 STAMCOUNTER StatRZRetTSSFault;
355 STAMCOUNTER StatRZRetPDFault;
356 STAMCOUNTER StatRZRetCSAMTask;
357 STAMCOUNTER StatRZRetSyncCR3;
358 STAMCOUNTER StatRZRetMisc;
359 STAMCOUNTER StatRZRetPatchInt3;
360 STAMCOUNTER StatRZRetPatchPF;
361 STAMCOUNTER StatRZRetPatchGP;
362 STAMCOUNTER StatRZRetPatchIretIRQ;
363 STAMCOUNTER StatRZRetRescheduleREM;
364 STAMCOUNTER StatRZRetToR3;
365 STAMCOUNTER StatRZRetToR3Unknown;
366 STAMCOUNTER StatRZRetToR3TMVirt;
367 STAMCOUNTER StatRZRetToR3HandyPages;
368 STAMCOUNTER StatRZRetToR3PDMQueues;
369 STAMCOUNTER StatRZRetToR3Rendezvous;
370 STAMCOUNTER StatRZRetToR3Timer;
371 STAMCOUNTER StatRZRetToR3DMA;
372 STAMCOUNTER StatRZRetToR3CritSect;
373 STAMCOUNTER StatRZRetTimerPending;
374 STAMCOUNTER StatRZRetInterruptPending;
375 STAMCOUNTER StatRZRetCallRing3;
376 STAMCOUNTER StatRZRetPATMDuplicateFn;
377 STAMCOUNTER StatRZRetPGMChangeMode;
378 STAMCOUNTER StatRZRetPendingRequest;
379 STAMCOUNTER StatRZRetPGMFlushPending;
380 STAMCOUNTER StatRZRetPatchTPR;
381 STAMCOUNTER StatRZCallPDMCritSectEnter;
382 STAMCOUNTER StatRZCallPDMLock;
383 STAMCOUNTER StatRZCallLogFlush;
384 STAMCOUNTER StatRZCallPGMPoolGrow;
385 STAMCOUNTER StatRZCallPGMMapChunk;
386 STAMCOUNTER StatRZCallPGMAllocHandy;
387 STAMCOUNTER StatRZCallRemReplay;
388 STAMCOUNTER StatRZCallVMSetError;
389 STAMCOUNTER StatRZCallVMSetRuntimeError;
390 STAMCOUNTER StatRZCallPGMLock;
391 /** @} */
392} VMM;
393/** Pointer to VMM. */
394typedef VMM *PVMM;
395
396
397/**
398 * VMMCPU Data (part of VMCPU)
399 */
400typedef struct VMMCPU
401{
402 /** Offset to the VMCPU structure.
403 * See VMM2VMCPU(). */
404 RTINT offVMCPU;
405
406 /** The last RC/R0 return code. */
407 int32_t iLastGZRc;
408
409 /** VMM stack, pointer to the top of the stack in R3.
410 * Stack is allocated from the hypervisor heap and is page aligned
411 * and always writable in RC. */
412 R3PTRTYPE(uint8_t *) pbEMTStackR3;
413 /** Pointer to the bottom of the stack - needed for doing relocations. */
414 RCPTRTYPE(uint8_t *) pbEMTStackRC;
415 /** Pointer to the bottom of the stack - needed for doing relocations. */
416 RCPTRTYPE(uint8_t *) pbEMTStackBottomRC;
417
418#ifdef LOG_ENABLED
419 /** Pointer to the R0 logger instance - R3 Ptr.
420 * This is NULL if logging is disabled. */
421 R3PTRTYPE(PVMMR0LOGGER) pR0LoggerR3;
422 /** Pointer to the R0 logger instance - R0 Ptr.
423 * This is NULL if logging is disabled. */
424 R0PTRTYPE(PVMMR0LOGGER) pR0LoggerR0;
425#endif
426
427 /** @name Call Ring-3
428 * Formerly known as host calls.
429 * @{ */
430 /** The disable counter. */
431 uint32_t cCallRing3Disabled;
432 /** The pending operation. */
433 VMMCALLRING3 enmCallRing3Operation;
434 /** The result of the last operation. */
435 int32_t rcCallRing3;
436#if HC_ARCH_BITS == 64
437 uint32_t padding;
438#endif
439 /** The argument to the operation. */
440 uint64_t u64CallRing3Arg;
441 /** The Ring-0 jmp buffer. */
442 VMMR0JMPBUF CallRing3JmpBufR0;
443 /** @} */
444
445} VMMCPU;
446/** Pointer to VMMCPU. */
447typedef VMMCPU *PVMMCPU;
448
449
450/**
451 * The VMMGCEntry() codes.
452 */
453typedef enum VMMGCOPERATION
454{
455 /** Do GC module init. */
456 VMMGC_DO_VMMGC_INIT = 1,
457
458 /** The first Trap testcase. */
459 VMMGC_DO_TESTCASE_TRAP_FIRST = 0x0dead000,
460 /** Trap 0 testcases, uArg selects the variation. */
461 VMMGC_DO_TESTCASE_TRAP_0 = VMMGC_DO_TESTCASE_TRAP_FIRST,
462 /** Trap 1 testcases, uArg selects the variation. */
463 VMMGC_DO_TESTCASE_TRAP_1,
464 /** Trap 2 testcases, uArg selects the variation. */
465 VMMGC_DO_TESTCASE_TRAP_2,
466 /** Trap 3 testcases, uArg selects the variation. */
467 VMMGC_DO_TESTCASE_TRAP_3,
468 /** Trap 4 testcases, uArg selects the variation. */
469 VMMGC_DO_TESTCASE_TRAP_4,
470 /** Trap 5 testcases, uArg selects the variation. */
471 VMMGC_DO_TESTCASE_TRAP_5,
472 /** Trap 6 testcases, uArg selects the variation. */
473 VMMGC_DO_TESTCASE_TRAP_6,
474 /** Trap 7 testcases, uArg selects the variation. */
475 VMMGC_DO_TESTCASE_TRAP_7,
476 /** Trap 8 testcases, uArg selects the variation. */
477 VMMGC_DO_TESTCASE_TRAP_8,
478 /** Trap 9 testcases, uArg selects the variation. */
479 VMMGC_DO_TESTCASE_TRAP_9,
480 /** Trap 0a testcases, uArg selects the variation. */
481 VMMGC_DO_TESTCASE_TRAP_0A,
482 /** Trap 0b testcases, uArg selects the variation. */
483 VMMGC_DO_TESTCASE_TRAP_0B,
484 /** Trap 0c testcases, uArg selects the variation. */
485 VMMGC_DO_TESTCASE_TRAP_0C,
486 /** Trap 0d testcases, uArg selects the variation. */
487 VMMGC_DO_TESTCASE_TRAP_0D,
488 /** Trap 0e testcases, uArg selects the variation. */
489 VMMGC_DO_TESTCASE_TRAP_0E,
490 /** The last trap testcase (exclusive). */
491 VMMGC_DO_TESTCASE_TRAP_LAST,
492 /** Testcase for checking interrupt forwarding. */
493 VMMGC_DO_TESTCASE_HYPER_INTERRUPT,
494 /** Switching testing and profiling stub. */
495 VMMGC_DO_TESTCASE_NOP,
496 /** Testcase for checking interrupt masking.. */
497 VMMGC_DO_TESTCASE_INTERRUPT_MASKING,
498 /** Switching testing and profiling stub. */
499 VMMGC_DO_TESTCASE_HWACCM_NOP,
500
501 /** The usual 32-bit hack. */
502 VMMGC_DO_32_BIT_HACK = 0x7fffffff
503} VMMGCOPERATION;
504
505
506RT_C_DECLS_BEGIN
507
508#ifdef IN_RING3
509int vmmR3SwitcherInit(PVM pVM);
510void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
511#endif /* IN_RING3 */
512
513#ifdef IN_RING0
514/**
515 * World switcher assembly routine.
516 * It will call VMMGCEntry().
517 *
518 * @returns return code from VMMGCEntry().
519 * @param pVM The VM in question.
520 * @param uArg See VMMGCEntry().
521 * @internal
522 */
523DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
524
525/**
526 * Callback function for vmmR0CallRing3SetJmp.
527 *
528 * @returns VBox status code.
529 * @param pVM The VM handle.
530 */
531typedef DECLCALLBACK(int) FNVMMR0SETJMP(PVM pVM, PVMCPU pVCpu);
532/** Pointer to FNVMMR0SETJMP(). */
533typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
534
535/**
536 * The setjmp variant used for calling Ring-3.
537 *
538 * This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
539 * in the middle of a ring-3 call. Another differences is the function pointer and
540 * argument. This has to do with resuming code and the stack frame of the caller.
541 *
542 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
543 * @param pJmpBuf The jmp_buf to set.
544 * @param pfn The function to be called when not resuming..
545 * @param pVM The argument of that function.
546 */
547DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
548
549/**
550 * Callback function for vmmR0CallRing3SetJmpEx.
551 *
552 * @returns VBox status code.
553 * @param pvUser The user argument.
554 */
555typedef DECLCALLBACK(int) FNVMMR0SETJMPEX(void *pvUser);
556/** Pointer to FNVMMR0SETJMP(). */
557typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
558
559/**
560 * Same as vmmR0CallRing3SetJmp except for the function signature.
561 *
562 * @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
563 * @param pJmpBuf The jmp_buf to set.
564 * @param pfn The function to be called when not resuming..
565 * @param pvUser The argument of that function.
566 */
567DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser);
568
569
570/**
571 * Worker for VMMRZCallRing3.
572 * This will save the stack and registers.
573 *
574 * @returns rc.
575 * @param pJmpBuf Pointer to the jump buffer.
576 * @param rc The return code.
577 */
578DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
579
580/**
581 * Internal R0 logger worker: Logger wrapper.
582 */
583VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...);
584
585/**
586 * Internal R0 logger worker: Flush logger.
587 *
588 * @param pLogger The logger instance to flush.
589 * @remark This function must be exported!
590 */
591VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger);
592
593/**
594 * Internal R0 logger worker: Custom prefix.
595 *
596 * @returns Number of chars written.
597 *
598 * @param pLogger The logger instance.
599 * @param pchBuf The output buffer.
600 * @param cchBuf The size of the buffer.
601 * @param pvUser User argument (ignored).
602 */
603VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
604
605#endif /* IN_RING0 */
606#ifdef IN_RC
607
608/**
609 * Internal GC logger worker: Logger wrapper.
610 */
611VMMRCDECL(void) vmmGCLoggerWrapper(const char *pszFormat, ...);
612
613/**
614 * Internal GC release logger worker: Logger wrapper.
615 */
616VMMRCDECL(void) vmmGCRelLoggerWrapper(const char *pszFormat, ...);
617
618/**
619 * Internal GC logger worker: Flush logger.
620 *
621 * @returns VINF_SUCCESS.
622 * @param pLogger The logger instance to flush.
623 * @remark This function must be exported!
624 */
625VMMRCDECL(int) vmmGCLoggerFlush(PRTLOGGERRC pLogger);
626
627/** @name Trap testcases and related labels.
628 * @{ */
629DECLASM(void) vmmGCEnableWP(void);
630DECLASM(void) vmmGCDisableWP(void);
631DECLASM(int) vmmGCTestTrap3(void);
632DECLASM(int) vmmGCTestTrap8(void);
633DECLASM(int) vmmGCTestTrap0d(void);
634DECLASM(int) vmmGCTestTrap0e(void);
635DECLASM(int) vmmGCTestTrap0e_FaultEIP(void); /**< a label */
636DECLASM(int) vmmGCTestTrap0e_ResumeEIP(void); /**< a label */
637/** @} */
638
639#endif /* IN_RC */
640
641RT_C_DECLS_END
642
643/** @} */
644
645#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette