VirtualBox

source: vbox/trunk/include/VBox/vmm/vmm.h@ 45344

Last change on this file since 45344 was 45293, checked in by vboxsync, 12 years ago

PGMCritSectRw: Prep for ring-0 and raw-mode context operation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.8 KB
Line 
1/** @file
2 * VMM - The Virtual Machine Monitor.
3 */
4
5/*
6 * Copyright (C) 2006-2013 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_vmm_h
27#define ___VBox_vmm_vmm_h
28
29#include <VBox/types.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/sup.h>
32#include <VBox/log.h>
33#include <iprt/stdarg.h>
34
35RT_C_DECLS_BEGIN
36
37/** @defgroup grp_vmm The Virtual Machine Monitor API
38 * @{
39 */
40
41/**
42 * World switcher identifiers.
43 */
44typedef enum VMMSWITCHER
45{
46 /** The usual invalid 0. */
47 VMMSWITCHER_INVALID = 0,
48 /** Switcher for 32-bit host to 32-bit shadow paging. */
49 VMMSWITCHER_32_TO_32,
50 /** Switcher for 32-bit host paging to PAE shadow paging. */
51 VMMSWITCHER_32_TO_PAE,
52 /** Switcher for 32-bit host paging to AMD64 shadow paging. */
53 VMMSWITCHER_32_TO_AMD64,
54 /** Switcher for PAE host to 32-bit shadow paging. */
55 VMMSWITCHER_PAE_TO_32,
56 /** Switcher for PAE host to PAE shadow paging. */
57 VMMSWITCHER_PAE_TO_PAE,
58 /** Switcher for PAE host paging to AMD64 shadow paging. */
59 VMMSWITCHER_PAE_TO_AMD64,
60 /** Switcher for AMD64 host paging to 32-bit shadow paging. */
61 VMMSWITCHER_AMD64_TO_32,
62 /** Switcher for AMD64 host paging to PAE shadow paging. */
63 VMMSWITCHER_AMD64_TO_PAE,
64 /** Switcher for AMD64 host paging to AMD64 shadow paging. */
65 VMMSWITCHER_AMD64_TO_AMD64,
66 /** Used to make a count for array declarations and suchlike. */
67 VMMSWITCHER_MAX,
68 /** The usual 32-bit paranoia. */
69 VMMSWITCHER_32BIT_HACK = 0x7fffffff
70} VMMSWITCHER;
71
72
73/**
74 * VMMRZCallRing3 operations.
75 */
76typedef enum VMMCALLRING3
77{
78 /** Invalid operation. */
79 VMMCALLRING3_INVALID = 0,
80 /** Acquire the PDM lock. */
81 VMMCALLRING3_PDM_LOCK,
82 /** Acquire the critical section specified as argument. */
83 VMMCALLRING3_PDM_CRIT_SECT_ENTER,
84 /** Enter the R/W critical section (in argument) exclusively. */
85 VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL,
86 /** Enter the R/W critical section (in argument) shared. */
87 VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED,
88 /** Acquire the PGM lock. */
89 VMMCALLRING3_PGM_LOCK,
90 /** Grow the PGM shadow page pool. */
91 VMMCALLRING3_PGM_POOL_GROW,
92 /** Maps a chunk into ring-3. */
93 VMMCALLRING3_PGM_MAP_CHUNK,
94 /** Allocates more handy pages. */
95 VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES,
96 /** Allocates a large (2MB) page. */
97 VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE,
98 /** Acquire the MM hypervisor heap lock. */
99 VMMCALLRING3_MMHYPER_LOCK,
100 /** Replay the REM handler notifications. */
101 VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS,
102 /** Flush the GC/R0 logger. */
103 VMMCALLRING3_VMM_LOGGER_FLUSH,
104 /** Set the VM error message. */
105 VMMCALLRING3_VM_SET_ERROR,
106 /** Set the VM runtime error message. */
107 VMMCALLRING3_VM_SET_RUNTIME_ERROR,
108 /** Signal a ring 0 assertion. */
109 VMMCALLRING3_VM_R0_ASSERTION,
110 /** Ring switch to force preemption. */
111 VMMCALLRING3_VM_R0_PREEMPT,
112 /** Sync the FTM state with the standby node. */
113 VMMCALLRING3_FTM_SET_CHECKPOINT,
114 /** The usual 32-bit hack. */
115 VMMCALLRING3_32BIT_HACK = 0x7fffffff
116} VMMCALLRING3;
117
118/**
119 * VMMRZCallRing3 notification callback.
120 *
121 * @param pVCpu Pointer to the VMCPU.
122 * @param enmOperation The operation causing the ring-3 jump.
123 * @param pvUser The user argument.
124 */
125typedef DECLCALLBACK(void) FNVMMR0CALLRING3NOTIFICATION(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser);
126/** Pointer to a FNRTMPNOTIFICATION(). */
127typedef FNVMMR0CALLRING3NOTIFICATION *PFNVMMR0CALLRING3NOTIFICATION;
128
129/**
130 * Rendezvous callback.
131 *
132 * @returns VBox strict status code - EM scheduling. Do not return
133 * informational status code other than the ones used by EM for
134 * scheduling.
135 *
136 * @param pVM The VM handle.
137 * @param pVCpu The handle of the calling virtual CPU.
138 * @param pvUser The user argument.
139 */
140typedef DECLCALLBACK(VBOXSTRICTRC) FNVMMEMTRENDEZVOUS(PVM pVM, PVMCPU pVCpu, void *pvUser);
141/** Pointer to a rendezvous callback function. */
142typedef FNVMMEMTRENDEZVOUS *PFNVMMEMTRENDEZVOUS;
143
144/**
145 * Method table that the VMM uses to call back the user of the VMM.
146 */
147typedef struct VMM2USERMETHODS
148{
149 /** Magic value (VMM2USERMETHODS_MAGIC). */
150 uint32_t u32Magic;
151 /** Structure version (VMM2USERMETHODS_VERSION). */
152 uint32_t u32Version;
153
154 /**
155 * Save the VM state.
156 *
157 * @returns VBox status code.
158 * @param pThis Pointer to the callback method table.
159 * @param pUVM The user mode VM handle.
160 *
161 * @remarks This member shall be set to NULL if the operation is not
162 * supported.
163 */
164 DECLR3CALLBACKMEMBER(int, pfnSaveState,(PCVMM2USERMETHODS pThis, PUVM pUVM));
165 /** @todo Move pfnVMAtError and pfnCFGMConstructor here? */
166
167 /**
168 * EMT initialization notification callback.
169 *
170 * This is intended for doing per-thread initialization for EMTs (like COM
171 * init).
172 *
173 * @param pThis Pointer to the callback method table.
174 * @param pUVM The user mode VM handle.
175 * @param pUVCpu The user mode virtual CPU handle.
176 *
177 * @remarks This is optional and shall be set to NULL if not wanted.
178 */
179 DECLR3CALLBACKMEMBER(void, pfnNotifyEmtInit,(PCVMM2USERMETHODS pThis, PUVM pUVM, PUVMCPU pUVCpu));
180
181 /**
182 * EMT termination notification callback.
183 *
184 * This is intended for doing per-thread cleanups for EMTs (like COM).
185 *
186 * @param pThis Pointer to the callback method table.
187 * @param pUVM The user mode VM handle.
188 * @param pUVCpu The user mode virtual CPU handle.
189 *
190 * @remarks This is optional and shall be set to NULL if not wanted.
191 */
192 DECLR3CALLBACKMEMBER(void, pfnNotifyEmtTerm,(PCVMM2USERMETHODS pThis, PUVM pUVM, PUVMCPU pUVCpu));
193
194 /**
195 * PDM thread initialization notification callback.
196 *
197 * This is intended for doing per-thread initialization (like COM init).
198 *
199 * @param pThis Pointer to the callback method table.
200 * @param pUVM The user mode VM handle.
201 *
202 * @remarks This is optional and shall be set to NULL if not wanted.
203 */
204 DECLR3CALLBACKMEMBER(void, pfnNotifyPdmtInit,(PCVMM2USERMETHODS pThis, PUVM pUVM));
205
206 /**
207 * EMT termination notification callback.
208 *
209 * This is intended for doing per-thread cleanups for EMTs (like COM).
210 *
211 * @param pThis Pointer to the callback method table.
212 * @param pUVM The user mode VM handle.
213 *
214 * @remarks This is optional and shall be set to NULL if not wanted.
215 */
216 DECLR3CALLBACKMEMBER(void, pfnNotifyPdmtTerm,(PCVMM2USERMETHODS pThis, PUVM pUVM));
217
218 /** Magic value (VMM2USERMETHODS_MAGIC) marking the end of the structure. */
219 uint32_t u32EndMagic;
220} VMM2USERMETHODS;
221
222/** Magic value of the VMM2USERMETHODS (Franz Kafka). */
223#define VMM2USERMETHODS_MAGIC UINT32_C(0x18830703)
224/** The VMM2USERMETHODS structure version. */
225#define VMM2USERMETHODS_VERSION UINT32_C(0x00020000)
226
227
228VMM_INT_DECL(RTRCPTR) VMMGetStackRC(PVMCPU pVCpu);
229VMMDECL(VMCPUID) VMMGetCpuId(PVM pVM);
230VMMDECL(PVMCPU) VMMGetCpu(PVM pVM);
231VMMDECL(PVMCPU) VMMGetCpu0(PVM pVM);
232VMMDECL(PVMCPU) VMMGetCpuById(PVM pVM, VMCPUID idCpu);
233VMMR3DECL(PVMCPU) VMMR3GetCpuByIdU(PUVM pVM, VMCPUID idCpu);
234VMM_INT_DECL(uint32_t) VMMGetSvnRev(void);
235VMM_INT_DECL(VMMSWITCHER) VMMGetSwitcher(PVM pVM);
236VMM_INT_DECL(void) VMMTrashVolatileXMMRegs(void);
237
238/** @def VMMIsHwVirtExtForced
239 * Checks if forced to use the hardware assisted virtualization extensions.
240 *
241 * This is intended for making setup decisions where we can save resources when
242 * using hardware assisted virtualization.
243 *
244 * @returns true / false.
245 * @param pVM Pointer to the shared VM structure.
246 * @internal
247 */
248#define VMMIsHwVirtExtForced(pVM) ((pVM)->fHwVirtExtForced)
249
250
251#ifdef IN_RING3
252/** @defgroup grp_vmm_r3 The VMM Host Context Ring 3 API
253 * @ingroup grp_vmm
254 * @{
255 */
256VMMR3_INT_DECL(int) VMMR3Init(PVM pVM);
257VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM);
258VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM);
259VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
260VMMR3_INT_DECL(int) VMMR3Term(PVM pVM);
261VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta);
262VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM);
263VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM);
264VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM);
265VMMR3_INT_DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue);
266VMMR3_INT_DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher);
267VMMR3_INT_DECL(int) VMMR3DisableSwitcher(PVM pVM);
268VMMR3_INT_DECL(RTR0PTR) VMMR3GetHostToGuestSwitcher(PVM pVM, VMMSWITCHER enmSwitcher);
269VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu);
270VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu);
271VMMR3DECL(int) VMMR3CallRC(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, ...);
272VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list args);
273VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
274VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu);
275VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr);
276VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM);
277VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM);
278VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM);
279VMMR3_INT_DECL(void) VMMR3SendSipi(PVM pVM, VMCPUID idCpu, uint32_t uVector);
280VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu);
281VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
282VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
283VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
284VMMR3_INT_DECL(bool) VMMR3EmtRendezvousSetDisabled(PVMCPU pVCpu, bool fDisabled);
285/** @defgroup grp_VMMR3EmtRendezvous_fFlags VMMR3EmtRendezvous flags
286 * @{ */
287/** Execution type mask. */
288#define VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK UINT32_C(0x00000007)
289/** Invalid execution type. */
290#define VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID UINT32_C(0)
291/** Let the EMTs execute the callback one by one (in no particular order). */
292#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE UINT32_C(1)
293/** Let all the EMTs execute the callback at the same time. */
294#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE UINT32_C(2)
295/** Only execute the callback on one EMT (no particular one). */
296#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE UINT32_C(3)
297/** Let the EMTs execute the callback one by one in ascending order. */
298#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING UINT32_C(4)
299/** Let the EMTs execute the callback one by one in descending order. */
300#define VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING UINT32_C(5)
301/** Stop after the first error.
302 * This is not valid for any execution type where more than one EMT is active
303 * at a time. */
304#define VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR UINT32_C(0x00000008)
305/** The valid flags. */
306#define VMMEMTRENDEZVOUS_FLAGS_VALID_MASK UINT32_C(0x0000000f)
307/** @} */
308VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu);
309VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead);
310/** @} */
311#endif /* IN_RING3 */
312
313
314/** @defgroup grp_vmm_r0 The VMM Host Context Ring 0 API
315 * @ingroup grp_vmm
316 * @{
317 */
318
319/**
320 * The VMMR0Entry() codes.
321 */
322typedef enum VMMR0OPERATION
323{
324 /** Run guest context. */
325 VMMR0_DO_RAW_RUN = SUP_VMMR0_DO_RAW_RUN,
326 /** Run guest code using the available hardware acceleration technology. */
327 VMMR0_DO_HM_RUN = SUP_VMMR0_DO_HM_RUN,
328 /** Official NOP that we use for profiling. */
329 VMMR0_DO_NOP = SUP_VMMR0_DO_NOP,
330 /** Official slow iocl NOP that we use for profiling. */
331 VMMR0_DO_SLOW_NOP,
332
333 /** Ask the GVMM to create a new VM. */
334 VMMR0_DO_GVMM_CREATE_VM,
335 /** Ask the GVMM to destroy the VM. */
336 VMMR0_DO_GVMM_DESTROY_VM,
337 /** Call GVMMR0SchedHalt(). */
338 VMMR0_DO_GVMM_SCHED_HALT,
339 /** Call GVMMR0SchedWakeUp(). */
340 VMMR0_DO_GVMM_SCHED_WAKE_UP,
341 /** Call GVMMR0SchedPoke(). */
342 VMMR0_DO_GVMM_SCHED_POKE,
343 /** Call GVMMR0SchedWakeUpAndPokeCpus(). */
344 VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS,
345 /** Call GVMMR0SchedPoll(). */
346 VMMR0_DO_GVMM_SCHED_POLL,
347 /** Call GVMMR0QueryStatistics(). */
348 VMMR0_DO_GVMM_QUERY_STATISTICS,
349 /** Call GVMMR0ResetStatistics(). */
350 VMMR0_DO_GVMM_RESET_STATISTICS,
351 /** Call GVMMR0RegisterVCpu(). */
352 VMMR0_DO_GVMM_REGISTER_VMCPU,
353
354 /** Call VMMR0 Per VM Init. */
355 VMMR0_DO_VMMR0_INIT,
356 /** Call VMMR0 Per VM Termination. */
357 VMMR0_DO_VMMR0_TERM,
358 /** Setup the hardware accelerated raw-mode session. */
359 VMMR0_DO_HM_SETUP_VM,
360 /** Attempt to enable or disable hardware accelerated raw-mode. */
361 VMMR0_DO_HM_ENABLE,
362 /** Calls function in the hypervisor.
363 * The caller must setup the hypervisor context so the call will be performed.
364 * The difference between VMMR0_DO_RUN_GC and this one is the handling of
365 * the return GC code. The return code will not be interpreted by this operation.
366 */
367 VMMR0_DO_CALL_HYPERVISOR,
368
369 /** Call PGMR0PhysAllocateHandyPages(). */
370 VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES,
371 /** Call PGMR0PhysFlushHandyPages(). */
372 VMMR0_DO_PGM_FLUSH_HANDY_PAGES,
373 /** Call PGMR0AllocateLargePage(). */
374 VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE,
375 /** Call PGMR0PhysSetupIommu(). */
376 VMMR0_DO_PGM_PHYS_SETUP_IOMMU,
377
378 /** Call GMMR0InitialReservation(). */
379 VMMR0_DO_GMM_INITIAL_RESERVATION,
380 /** Call GMMR0UpdateReservation(). */
381 VMMR0_DO_GMM_UPDATE_RESERVATION,
382 /** Call GMMR0AllocatePages(). */
383 VMMR0_DO_GMM_ALLOCATE_PAGES,
384 /** Call GMMR0FreePages(). */
385 VMMR0_DO_GMM_FREE_PAGES,
386 /** Call GMMR0FreeLargePage(). */
387 VMMR0_DO_GMM_FREE_LARGE_PAGE,
388 /** Call GMMR0QueryHypervisorMemoryStatsReq(). */
389 VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS,
390 /** Call GMMR0QueryMemoryStatsReq(). */
391 VMMR0_DO_GMM_QUERY_MEM_STATS,
392 /** Call GMMR0BalloonedPages(). */
393 VMMR0_DO_GMM_BALLOONED_PAGES,
394 /** Call GMMR0MapUnmapChunk(). */
395 VMMR0_DO_GMM_MAP_UNMAP_CHUNK,
396 /** Call GMMR0SeedChunk(). */
397 VMMR0_DO_GMM_SEED_CHUNK,
398 /** Call GMMR0RegisterSharedModule. */
399 VMMR0_DO_GMM_REGISTER_SHARED_MODULE,
400 /** Call GMMR0UnregisterSharedModule. */
401 VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE,
402 /** Call GMMR0ResetSharedModules. */
403 VMMR0_DO_GMM_RESET_SHARED_MODULES,
404 /** Call GMMR0CheckSharedModules. */
405 VMMR0_DO_GMM_CHECK_SHARED_MODULES,
406 /** Call GMMR0FindDuplicatePage. */
407 VMMR0_DO_GMM_FIND_DUPLICATE_PAGE,
408 /** Call GMMR0QueryStatistics(). */
409 VMMR0_DO_GMM_QUERY_STATISTICS,
410 /** Call GMMR0ResetStatistics(). */
411 VMMR0_DO_GMM_RESET_STATISTICS,
412
413 /** Set a GVMM or GMM configuration value. */
414 VMMR0_DO_GCFGM_SET_VALUE,
415 /** Query a GVMM or GMM configuration value. */
416 VMMR0_DO_GCFGM_QUERY_VALUE,
417
418 /** Call PDMR0DriverCallReqHandler. */
419 VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER,
420 /** Call PDMR0DeviceCallReqHandler. */
421 VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER,
422
423 /** The start of the R0 service operations. */
424 VMMR0_DO_SRV_START,
425 /** Call IntNetR0Open(). */
426 VMMR0_DO_INTNET_OPEN,
427 /** Call IntNetR0IfClose(). */
428 VMMR0_DO_INTNET_IF_CLOSE,
429 /** Call IntNetR0IfGetBufferPtrs(). */
430 VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS,
431 /** Call IntNetR0IfSetPromiscuousMode(). */
432 VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE,
433 /** Call IntNetR0IfSetMacAddress(). */
434 VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS,
435 /** Call IntNetR0IfSetActive(). */
436 VMMR0_DO_INTNET_IF_SET_ACTIVE,
437 /** Call IntNetR0IfSend(). */
438 VMMR0_DO_INTNET_IF_SEND,
439 /** Call IntNetR0IfWait(). */
440 VMMR0_DO_INTNET_IF_WAIT,
441 /** Call IntNetR0IfAbortWait(). */
442 VMMR0_DO_INTNET_IF_ABORT_WAIT,
443
444 /** Forward call to the PCI driver */
445 VMMR0_DO_PCIRAW_REQ,
446
447 /** The end of the R0 service operations. */
448 VMMR0_DO_SRV_END,
449
450 /** Official call we use for testing Ring-0 APIs. */
451 VMMR0_DO_TESTS,
452 /** Test the 32->64 bits switcher. */
453 VMMR0_DO_TEST_SWITCHER3264,
454
455 /** The usual 32-bit type blow up. */
456 VMMR0_DO_32BIT_HACK = 0x7fffffff
457} VMMR0OPERATION;
458
459
460/**
461 * Request buffer for VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE.
462 * @todo Move got GCFGM.h when it's implemented.
463 */
464typedef struct GCFGMVALUEREQ
465{
466 /** The request header.*/
467 SUPVMMR0REQHDR Hdr;
468 /** The support driver session handle. */
469 PSUPDRVSESSION pSession;
470 /** The value.
471 * This is input for the set request and output for the query. */
472 uint64_t u64Value;
473 /** The variable name.
474 * This is fixed sized just to make things simple for the mock-up. */
475 char szName[48];
476} GCFGMVALUEREQ;
477/** Pointer to a VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE request buffer.
478 * @todo Move got GCFGM.h when it's implemented.
479 */
480typedef GCFGMVALUEREQ *PGCFGMVALUEREQ;
481
482VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg);
483VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation);
484VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);
485VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM);
486
487#ifdef LOG_ENABLED
488VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu);
489VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu);
490VMMR0DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu);
491#else
492#define VMMR0LogFlushDisable(pVCpu) do { } while(0)
493#define VMMR0LogFlushEnable(pVCpu) do { } while(0)
494#define VMMR0IsLogFlushDisabled(pVCpu) (true)
495#endif
496
497/** @} */
498
499
500#ifdef IN_RC
501/** @defgroup grp_vmm_rc The VMM Raw-Mode Context API
502 * @ingroup grp_vmm
503 * @{
504 */
505VMMRCDECL(int) VMMGCEntry(PVM pVM, unsigned uOperation, unsigned uArg, ...);
506VMMRCDECL(void) VMMGCGuestToHost(PVM pVM, int rc);
507VMMRCDECL(void) VMMGCLogFlushIfFull(PVM pVM);
508/** @} */
509#endif /* IN_RC */
510
511#if defined(IN_RC) || defined(IN_RING0)
512/** @defgroup grp_vmm_rz The VMM Raw-Mode and Ring-0 Context API
513 * @ingroup grp_vmm
514 * @{
515 */
516VMMRZDECL(int) VMMRZCallRing3(PVM pVM, PVMCPU pVCpu, VMMCALLRING3 enmOperation, uint64_t uArg);
517VMMRZDECL(int) VMMRZCallRing3NoCpu(PVM pVM, VMMCALLRING3 enmOperation, uint64_t uArg);
518VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPU pVCpu);
519VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPU pVCpu);
520VMMRZDECL(bool) VMMRZCallRing3IsEnabled(PVMCPU pVCpu);
521VMMRZDECL(int) VMMRZCallRing3SetNotification(PVMCPU pVCpu, R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallback, RTR0PTR pvUser);
522VMMRZDECL(void) VMMRZCallRing3RemoveNotification(PVMCPU pVCpu);
523/** @} */
524#endif
525
526
527/** @} */
528RT_C_DECLS_END
529
530#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette