VirtualBox

source: vbox/trunk/include/VBox/vmm.h@ 34801

Last change on this file since 34801 was 34326, checked in by vboxsync, 14 years ago

VMM: Removed the XXXInitCPU and XXXTermCPU methods since all but the HWACCM ones where stubs and the XXXTermCPU bits was not called in all expected paths. The HWACCMR3InitCPU was hooked up as a VMINITCOMPLETED_RING3 hook, essentially leaving it's position in the order of things unchanged, while the HWACCMR3TermCPU call was made static without changing its position at the end of HWACCMR3Term.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.7 KB
Line 
1/** @file
2 * VMM - The Virtual Machine Monitor. (VMM)
3 */
4
5/*
6 * Copyright (C) 2006-2010 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_h
27#define ___VBox_vmm_h
28
29#include <VBox/cdefs.h>
30#include <VBox/types.h>
31#include <VBox/vmapi.h>
32#include <VBox/sup.h>
33#include <VBox/log.h>
34#include <iprt/stdarg.h>
35
36RT_C_DECLS_BEGIN
37
38/** @defgroup grp_vmm The Virtual Machine Monitor API
39 * @{
40 */
41
42/**
43 * World switcher identifiers.
44 */
45typedef enum VMMSWITCHER
46{
47 /** The usual invalid 0. */
48 VMMSWITCHER_INVALID = 0,
49 /** Switcher for 32-bit host to 32-bit shadow paging. */
50 VMMSWITCHER_32_TO_32,
51 /** Switcher for 32-bit host paging to PAE shadow paging. */
52 VMMSWITCHER_32_TO_PAE,
53 /** Switcher for 32-bit host paging to AMD64 shadow paging. */
54 VMMSWITCHER_32_TO_AMD64,
55 /** Switcher for PAE host to 32-bit shadow paging. */
56 VMMSWITCHER_PAE_TO_32,
57 /** Switcher for PAE host to PAE shadow paging. */
58 VMMSWITCHER_PAE_TO_PAE,
59 /** Switcher for PAE host paging to AMD64 shadow paging. */
60 VMMSWITCHER_PAE_TO_AMD64,
61 /** Switcher for AMD64 host paging to 32-bit shadow paging. */
62 VMMSWITCHER_AMD64_TO_32,
63 /** Switcher for AMD64 host paging to PAE shadow paging. */
64 VMMSWITCHER_AMD64_TO_PAE,
65 /** Switcher for AMD64 host paging to AMD64 shadow paging. */
66 VMMSWITCHER_AMD64_TO_AMD64,
67 /** Used to make a count for array declarations and suchlike. */
68 VMMSWITCHER_MAX,
69 /** The usual 32-bit paranoia. */
70 VMMSWITCHER_32BIT_HACK = 0x7fffffff
71} VMMSWITCHER;
72
73
74/**
75 * VMMRZCallRing3 operations.
76 */
77typedef enum VMMCALLRING3
78{
79 /** Invalid operation. */
80 VMMCALLRING3_INVALID = 0,
81 /** Acquire the PDM lock. */
82 VMMCALLRING3_PDM_LOCK,
83 /** Acquire the PGM lock. */
84 VMMCALLRING3_PGM_LOCK,
85 /** Grow the PGM shadow page pool. */
86 VMMCALLRING3_PGM_POOL_GROW,
87 /** Maps a chunk into ring-3. */
88 VMMCALLRING3_PGM_MAP_CHUNK,
89 /** Allocates more handy pages. */
90 VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES,
91 /** Allocates a large (2MB) page. */
92 VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE,
93 /** Acquire the MM hypervisor heap lock. */
94 VMMCALLRING3_MMHYPER_LOCK,
95 /** Replay the REM handler notifications. */
96 VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS,
97 /** Flush the GC/R0 logger. */
98 VMMCALLRING3_VMM_LOGGER_FLUSH,
99 /** Set the VM error message. */
100 VMMCALLRING3_VM_SET_ERROR,
101 /** Set the VM runtime error message. */
102 VMMCALLRING3_VM_SET_RUNTIME_ERROR,
103 /** Signal a ring 0 assertion. */
104 VMMCALLRING3_VM_R0_ASSERTION,
105 /** Ring switch to force preemption. */
106 VMMCALLRING3_VM_R0_PREEMPT,
107 /** Sync the FTM state with the standby node. */
108 VMMCALLRING3_FTM_SET_CHECKPOINT,
109 /** The usual 32-bit hack. */
110 VMMCALLRING3_32BIT_HACK = 0x7fffffff
111} VMMCALLRING3;
112
113/**
114 * VMMR3AtomicExecuteHandler callback function.
115 *
116 * @returns VBox status code.
117 * @param pVM Pointer to the shared VM structure.
118 * @param pvUser User specified argument
119 *
120 * @todo missing prefix.
121 */
122typedef DECLCALLBACK(int) FNATOMICHANDLER(PVM pVM, void *pvUser);
123/** Pointer to a FNMMATOMICHANDLER(). */
124typedef FNATOMICHANDLER *PFNATOMICHANDLER;
125
126/**
127 * Rendezvous callback.
128 *
129 * @returns VBox strict status code - EM scheduling. Do not return
130 * informational status code other than the ones used by EM for
131 * scheduling.
132 *
133 * @param pVM The VM handle.
134 * @param pVCpu The handle of the calling virtual CPU.
135 * @param pvUser The user argument.
136 */
137typedef DECLCALLBACK(VBOXSTRICTRC) FNVMMEMTRENDEZVOUS(PVM pVM, PVMCPU pVCpu, void *pvUser);
138/** Pointer to a rendezvous callback function. */
139typedef FNVMMEMTRENDEZVOUS *PFNVMMEMTRENDEZVOUS;
140
141/**
142 * Method table that the VMM uses to call back the user of the VMM.
143 */
144typedef struct VMM2USERMETHODS
145{
146 /** Magic value (VMM2USERMETHODS_MAGIC). */
147 uint32_t u32Magic;
148 /** Structure version (VMM2USERMETHODS_VERSION). */
149 uint32_t u32Version;
150
151 /**
152 * Save the VM state.
153 *
154 * @returns VBox status code.
155 * @param pThis Pointer to the callback method table.
156 * @param pVM The VM handle.
157 *
158 * @remarks This member shall be set to NULL if the operation is not
159 * supported.
160 */
161 DECLR3CALLBACKMEMBER(int, pfnSaveState,(PCVMM2USERMETHODS pThis, PVM pVM));
162 /** @todo Move pfnVMAtError and pfnCFGMConstructor here? */
163
164 /** Magic value (VMM2USERMETHODS_MAGIC) marking the end of the structure. */
165 uint32_t u32EndMagic;
166} VMM2USERMETHODS;
167
168/** Magic value of the VMM2USERMETHODS (Franz Kafka). */
169#define VMM2USERMETHODS_MAGIC UINT32_C(0x18830703)
170/** The VMM2USERMETHODS structure version. */
171#define VMM2USERMETHODS_VERSION UINT32_C(0x00010000)
172
173
174VMMDECL(RTRCPTR) VMMGetStackRC(PVMCPU pVCpu);
175VMMDECL(VMCPUID) VMMGetCpuId(PVM pVM);
176VMMDECL(PVMCPU) VMMGetCpu(PVM pVM);
177VMMDECL(PVMCPU) VMMGetCpu0(PVM pVM);
178VMMDECL(PVMCPU) VMMGetCpuById(PVM pVM, VMCPUID idCpu);
179VMMDECL(uint32_t) VMMGetSvnRev(void);
180VMMDECL(VMMSWITCHER) VMMGetSwitcher(PVM pVM);
181VMMDECL(void) VMMTrashVolatileXMMRegs(void);
182
183/** @def VMMIsHwVirtExtForced
184 * Checks if forced to use the hardware assisted virtualization extensions.
185 *
186 * This is intended for making setup decisions where we can save resources when
187 * using hardware assisted virtualization.
188 *
189 * @returns true / false.
190 * @param pVM Pointer to the shared VM structure.
191 */
192#define VMMIsHwVirtExtForced(pVM) ((pVM)->fHwVirtExtForced)
193
194
195#ifdef IN_RING3
196/** @defgroup grp_vmm_r3 The VMM Host Context Ring 3 API
197 * @ingroup grp_vmm
198 * @{
199 */
200VMMR3_INT_DECL(int) VMMR3Init(PVM pVM);
201VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM);
202VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM);
203VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
204VMMR3_INT_DECL(int) VMMR3Term(PVM pVM);
205VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta);
206VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM);
207VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM);
208VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM);
209VMMR3_INT_DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue);
210VMMR3_INT_DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher);
211VMMR3_INT_DECL(int) VMMR3DisableSwitcher(PVM pVM);
212VMMR3_INT_DECL(RTR0PTR) VMMR3GetHostToGuestSwitcher(PVM pVM, VMMSWITCHER enmSwitcher);
213VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu);
214VMMR3_INT_DECL(int) VMMR3HwAccRunGC(PVM pVM, PVMCPU pVCpu);
215VMMR3DECL(int) VMMR3CallRC(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, ...);
216VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list args);
217VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
218VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu);
219VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr);
220VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM);
221VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM);
222VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM);
223VMMR3_INT_DECL(void) VMMR3SendSipi(PVM pVM, VMCPUID idCpu, uint32_t uVector);
224VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu);
225VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
226VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
227VMMR3DECL(int) VMMR3AtomicExecuteHandler(PVM pVM, PFNATOMICHANDLER pfnHandler, void *pvUser);
228VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
229/** @defgroup grp_VMMR3EmtRendezvous_fFlags VMMR3EmtRendezvous flags
230 * @{ */
231/** Execution type mask. */
232#define VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK UINT32_C(0x00000007)
233/** Invalid execution type. */
234#define VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID UINT32_C(0)
235/** Let the EMTs execute the callback one by one (in no particular order). */
236#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE UINT32_C(1)
237/** Let all the EMTs execute the callback at the same time. */
238#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE UINT32_C(2)
239/** Only execute the callback on one EMT (no particular one). */
240#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE UINT32_C(3)
241/** Let the EMTs execute the callback one by one in ascending order. */
242#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING UINT32_C(4)
243/** Let the EMTs execute the callback one by one in descending order. */
244#define VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING UINT32_C(5)
245/** Stop after the first error.
246 * This is not valid for any execution type where more than one EMT is active
247 * at a time. */
248#define VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR UINT32_C(0x00000008)
249/** The valid flags. */
250#define VMMEMTRENDEZVOUS_FLAGS_VALID_MASK UINT32_C(0x0000000f)
251/** @} */
252VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu);
253VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead);
254/** @} */
255#endif /* IN_RING3 */
256
257
258/** @defgroup grp_vmm_r0 The VMM Host Context Ring 0 API
259 * @ingroup grp_vmm
260 * @{
261 */
262
263/**
264 * The VMMR0Entry() codes.
265 */
266typedef enum VMMR0OPERATION
267{
268 /** Run guest context. */
269 VMMR0_DO_RAW_RUN = SUP_VMMR0_DO_RAW_RUN,
270 /** Run guest code using the available hardware acceleration technology. */
271 VMMR0_DO_HWACC_RUN = SUP_VMMR0_DO_HWACC_RUN,
272 /** Official NOP that we use for profiling. */
273 VMMR0_DO_NOP = SUP_VMMR0_DO_NOP,
274 /** Official slow iocl NOP that we use for profiling. */
275 VMMR0_DO_SLOW_NOP,
276
277 /** Ask the GVMM to create a new VM. */
278 VMMR0_DO_GVMM_CREATE_VM,
279 /** Ask the GVMM to destroy the VM. */
280 VMMR0_DO_GVMM_DESTROY_VM,
281 /** Call GVMMR0SchedHalt(). */
282 VMMR0_DO_GVMM_SCHED_HALT,
283 /** Call GVMMR0SchedWakeUp(). */
284 VMMR0_DO_GVMM_SCHED_WAKE_UP,
285 /** Call GVMMR0SchedPoke(). */
286 VMMR0_DO_GVMM_SCHED_POKE,
287 /** Call GVMMR0SchedWakeUpAndPokeCpus(). */
288 VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS,
289 /** Call GVMMR0SchedPoll(). */
290 VMMR0_DO_GVMM_SCHED_POLL,
291 /** Call GVMMR0QueryStatistics(). */
292 VMMR0_DO_GVMM_QUERY_STATISTICS,
293 /** Call GVMMR0ResetStatistics(). */
294 VMMR0_DO_GVMM_RESET_STATISTICS,
295 /** Call GVMMR0RegisterVCpu(). */
296 VMMR0_DO_GVMM_REGISTER_VMCPU,
297
298 /** Call VMMR0 Per VM Init. */
299 VMMR0_DO_VMMR0_INIT,
300 /** Call VMMR0 Per VM Termination. */
301 VMMR0_DO_VMMR0_TERM,
302 /** Setup the hardware accelerated raw-mode session. */
303 VMMR0_DO_HWACC_SETUP_VM,
304 /** Attempt to enable or disable hardware accelerated raw-mode. */
305 VMMR0_DO_HWACC_ENABLE,
306 /** Calls function in the hypervisor.
307 * The caller must setup the hypervisor context so the call will be performed.
308 * The difference between VMMR0_DO_RUN_GC and this one is the handling of
309 * the return GC code. The return code will not be interpreted by this operation.
310 */
311 VMMR0_DO_CALL_HYPERVISOR,
312
313 /** Call PGMR0PhysAllocateHandyPages(). */
314 VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES,
315 /** Call PGMR0AllocateLargePage(). */
316 VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE,
317
318 /** Call GMMR0InitialReservation(). */
319 VMMR0_DO_GMM_INITIAL_RESERVATION,
320 /** Call GMMR0UpdateReservation(). */
321 VMMR0_DO_GMM_UPDATE_RESERVATION,
322 /** Call GMMR0AllocatePages(). */
323 VMMR0_DO_GMM_ALLOCATE_PAGES,
324 /** Call GMMR0FreePages(). */
325 VMMR0_DO_GMM_FREE_PAGES,
326 /** Call GMMR0FreeLargePage(). */
327 VMMR0_DO_GMM_FREE_LARGE_PAGE,
328 /** Call GMMR0QueryHypervisorMemoryStatsReq(). */
329 VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS,
330 /** Call GMMR0QueryMemoryStatsReq(). */
331 VMMR0_DO_GMM_QUERY_MEM_STATS,
332 /** Call GMMR0BalloonedPages(). */
333 VMMR0_DO_GMM_BALLOONED_PAGES,
334 /** Call GMMR0MapUnmapChunk(). */
335 VMMR0_DO_GMM_MAP_UNMAP_CHUNK,
336 /** Call GMMR0SeedChunk(). */
337 VMMR0_DO_GMM_SEED_CHUNK,
338 /** Call GMMR0RegisterSharedModule. */
339 VMMR0_DO_GMM_REGISTER_SHARED_MODULE,
340 /** Call GMMR0UnregisterSharedModule. */
341 VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE,
342 /** Call GMMR0ResetSharedModules. */
343 VMMR0_DO_GMM_RESET_SHARED_MODULES,
344 /** Call GMMR0CheckSharedModules. */
345 VMMR0_DO_GMM_CHECK_SHARED_MODULES,
346 /** Call GMMR0FindDuplicatePage. */
347 VMMR0_DO_GMM_FIND_DUPLICATE_PAGE,
348
349 /** Set a GVMM or GMM configuration value. */
350 VMMR0_DO_GCFGM_SET_VALUE,
351 /** Query a GVMM or GMM configuration value. */
352 VMMR0_DO_GCFGM_QUERY_VALUE,
353
354 /** Call PDMR0DriverCallReqHandler. */
355 VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER,
356 /** Call PDMR0DeviceCallReqHandler. */
357 VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER,
358
359 /** The start of the R0 service operations. */
360 VMMR0_DO_SRV_START,
361 /** Call IntNetR0Open(). */
362 VMMR0_DO_INTNET_OPEN,
363 /** Call IntNetR0IfClose(). */
364 VMMR0_DO_INTNET_IF_CLOSE,
365 /** Call IntNetR0IfGetBufferPtrs(). */
366 VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS,
367 /** Call IntNetR0IfSetPromiscuousMode(). */
368 VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE,
369 /** Call IntNetR0IfSetMacAddress(). */
370 VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS,
371 /** Call IntNetR0IfSetActive(). */
372 VMMR0_DO_INTNET_IF_SET_ACTIVE,
373 /** Call IntNetR0IfSend(). */
374 VMMR0_DO_INTNET_IF_SEND,
375 /** Call IntNetR0IfWait(). */
376 VMMR0_DO_INTNET_IF_WAIT,
377 /** Call IntNetR0IfAbortWait(). */
378 VMMR0_DO_INTNET_IF_ABORT_WAIT,
379 /** The end of the R0 service operations. */
380 VMMR0_DO_SRV_END,
381
382 /** Official call we use for testing Ring-0 APIs. */
383 VMMR0_DO_TESTS,
384 /** Test the 32->64 bits switcher. */
385 VMMR0_DO_TEST_SWITCHER3264,
386
387 /** The usual 32-bit type blow up. */
388 VMMR0_DO_32BIT_HACK = 0x7fffffff
389} VMMR0OPERATION;
390
391
392/**
393 * Request buffer for VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE.
394 * @todo Move got GCFGM.h when it's implemented.
395 */
396typedef struct GCFGMVALUEREQ
397{
398 /** The request header.*/
399 SUPVMMR0REQHDR Hdr;
400 /** The support driver session handle. */
401 PSUPDRVSESSION pSession;
402 /** The value.
403 * This is input for the set request and output for the query. */
404 uint64_t u64Value;
405 /** The variable name.
406 * This is fixed sized just to make things simple for the mock-up. */
407 char szName[48];
408} GCFGMVALUEREQ;
409/** Pointer to a VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE request buffer.
410 * @todo Move got GCFGM.h when it's implemented.
411 */
412typedef GCFGMVALUEREQ *PGCFGMVALUEREQ;
413
414VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg);
415VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation);
416VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);
417VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM);
418
419#ifdef LOG_ENABLED
420VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu);
421VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu);
422#else
423#define VMMR0LogFlushDisable(pVCpu) do { } while(0)
424#define VMMR0LogFlushEnable(pVCpu) do { } while(0)
425#endif
426
427/** @} */
428
429
430#ifdef IN_RC
431/** @defgroup grp_vmm_rc The VMM Raw-Mode Context API
432 * @ingroup grp_vmm
433 * @{
434 */
435VMMRCDECL(int) VMMGCEntry(PVM pVM, unsigned uOperation, unsigned uArg, ...);
436VMMRCDECL(void) VMMGCGuestToHost(PVM pVM, int rc);
437VMMRCDECL(void) VMMGCLogFlushIfFull(PVM pVM);
438/** @} */
439#endif /* IN_RC */
440
441#if defined(IN_RC) || defined(IN_RING0)
442/** @defgroup grp_vmm_rz The VMM Raw-Mode and Ring-0 Context API
443 * @ingroup grp_vmm
444 * @{
445 */
446VMMRZDECL(int) VMMRZCallRing3(PVM pVM, PVMCPU pVCpu, VMMCALLRING3 enmOperation, uint64_t uArg);
447VMMRZDECL(int) VMMRZCallRing3NoCpu(PVM pVM, VMMCALLRING3 enmOperation, uint64_t uArg);
448VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPU pVCpu);
449VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPU pVCpu);
450VMMRZDECL(bool) VMMRZCallRing3IsEnabled(PVMCPU pVCpu);
451/** @} */
452#endif
453
454
455/** @} */
456RT_C_DECLS_END
457
458#endif
459
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette