VirtualBox

source: vbox/trunk/include/VBox/vmm/vmm.h@ 75565

Last change on this file since 75565 was 73474, checked in by vboxsync, 6 years ago

VMM,DBGF: Improved unwinding of ring-0 assertion stacks, making the new unwind info stuff deal correctly with ring-0 pointers and such. bugref:3897 [build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.5 KB
Line 
1/** @file
2 * VMM - The Virtual Machine Monitor.
3 */
4
5/*
6 * Copyright (C) 2006-2017 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_vmm_h
27#define ___VBox_vmm_vmm_h
28
29#include <VBox/types.h>
30#include <VBox/vmm/vmapi.h>
31#include <VBox/sup.h>
32#include <VBox/log.h>
33#include <iprt/stdarg.h>
34#include <iprt/thread.h>
35
36RT_C_DECLS_BEGIN
37
38/** @defgroup grp_vmm The Virtual Machine Monitor
39 * @{
40 */
41
42/** @defgroup grp_vmm_api The Virtual Machine Monitor API
43 * @{
44 */
45
46/**
47 * World switcher identifiers.
48 */
49typedef enum VMMSWITCHER
50{
51 /** The usual invalid 0. */
52 VMMSWITCHER_INVALID = 0,
53 /** Switcher for 32-bit host to 32-bit shadow paging. */
54 VMMSWITCHER_32_TO_32,
55 /** Switcher for 32-bit host paging to PAE shadow paging. */
56 VMMSWITCHER_32_TO_PAE,
57 /** Switcher for 32-bit host paging to AMD64 shadow paging. */
58 VMMSWITCHER_32_TO_AMD64,
59 /** Switcher for PAE host to 32-bit shadow paging. */
60 VMMSWITCHER_PAE_TO_32,
61 /** Switcher for PAE host to PAE shadow paging. */
62 VMMSWITCHER_PAE_TO_PAE,
63 /** Switcher for PAE host paging to AMD64 shadow paging. */
64 VMMSWITCHER_PAE_TO_AMD64,
65 /** Switcher for AMD64 host paging to 32-bit shadow paging. */
66 VMMSWITCHER_AMD64_TO_32,
67 /** Switcher for AMD64 host paging to PAE shadow paging. */
68 VMMSWITCHER_AMD64_TO_PAE,
69 /** Switcher for AMD64 host paging to AMD64 shadow paging. */
70 VMMSWITCHER_AMD64_TO_AMD64,
71 /** Stub switcher for 32-bit and PAE. */
72 VMMSWITCHER_X86_STUB,
73 /** Stub switcher for AMD64. */
74 VMMSWITCHER_AMD64_STUB,
75 /** Used to make a count for array declarations and suchlike. */
76 VMMSWITCHER_MAX,
77 /** The usual 32-bit paranoia. */
78 VMMSWITCHER_32BIT_HACK = 0x7fffffff
79} VMMSWITCHER;
80
81
82/**
83 * VMMRZCallRing3 operations.
84 */
85typedef enum VMMCALLRING3
86{
87 /** Invalid operation. */
88 VMMCALLRING3_INVALID = 0,
89 /** Acquire the PDM lock. */
90 VMMCALLRING3_PDM_LOCK,
91 /** Acquire the critical section specified as argument. */
92 VMMCALLRING3_PDM_CRIT_SECT_ENTER,
93 /** Enter the R/W critical section (in argument) exclusively. */
94 VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL,
95 /** Enter the R/W critical section (in argument) shared. */
96 VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED,
97 /** Acquire the PGM lock. */
98 VMMCALLRING3_PGM_LOCK,
99 /** Grow the PGM shadow page pool. */
100 VMMCALLRING3_PGM_POOL_GROW,
101 /** Maps a chunk into ring-3. */
102 VMMCALLRING3_PGM_MAP_CHUNK,
103 /** Allocates more handy pages. */
104 VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES,
105 /** Allocates a large (2MB) page. */
106 VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE,
107 /** Acquire the MM hypervisor heap lock. */
108 VMMCALLRING3_MMHYPER_LOCK,
109 /** Replay the REM handler notifications. */
110 VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS,
111 /** Flush the GC/R0 logger. */
112 VMMCALLRING3_VMM_LOGGER_FLUSH,
113 /** Set the VM error message. */
114 VMMCALLRING3_VM_SET_ERROR,
115 /** Set the VM runtime error message. */
116 VMMCALLRING3_VM_SET_RUNTIME_ERROR,
117 /** Signal a ring 0 assertion. */
118 VMMCALLRING3_VM_R0_ASSERTION,
119 /** Ring switch to force preemption. This is also used by PDMCritSect to
120 * handle VERR_INTERRUPTED in kernel context. */
121 VMMCALLRING3_VM_R0_PREEMPT,
122 /** Sync the FTM state with the standby node. */
123 VMMCALLRING3_FTM_SET_CHECKPOINT,
124 /** The usual 32-bit hack. */
125 VMMCALLRING3_32BIT_HACK = 0x7fffffff
126} VMMCALLRING3;
127
128/**
129 * VMMRZCallRing3 notification callback.
130 *
131 * @returns VBox status code.
132 * @param pVCpu The cross context virtual CPU structure.
133 * @param enmOperation The operation causing the ring-3 jump.
134 * @param pvUser The user argument.
135 */
136typedef DECLCALLBACK(int) FNVMMR0CALLRING3NOTIFICATION(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser);
137/** Pointer to a FNRTMPNOTIFICATION(). */
138typedef FNVMMR0CALLRING3NOTIFICATION *PFNVMMR0CALLRING3NOTIFICATION;
139
140/**
141 * Rendezvous callback.
142 *
143 * @returns VBox strict status code - EM scheduling. Do not return
144 * informational status code other than the ones used by EM for
145 * scheduling.
146 *
147 * @param pVM The cross context VM structure.
148 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
149 * @param pvUser The user argument.
150 */
151typedef DECLCALLBACK(VBOXSTRICTRC) FNVMMEMTRENDEZVOUS(PVM pVM, PVMCPU pVCpu, void *pvUser);
152/** Pointer to a rendezvous callback function. */
153typedef FNVMMEMTRENDEZVOUS *PFNVMMEMTRENDEZVOUS;
154
155/**
156 * Method table that the VMM uses to call back the user of the VMM.
157 */
158typedef struct VMM2USERMETHODS
159{
160 /** Magic value (VMM2USERMETHODS_MAGIC). */
161 uint32_t u32Magic;
162 /** Structure version (VMM2USERMETHODS_VERSION). */
163 uint32_t u32Version;
164
165 /**
166 * Save the VM state.
167 *
168 * @returns VBox status code.
169 * @param pThis Pointer to the callback method table.
170 * @param pUVM The user mode VM handle.
171 *
172 * @remarks This member shall be set to NULL if the operation is not
173 * supported.
174 */
175 DECLR3CALLBACKMEMBER(int, pfnSaveState,(PCVMM2USERMETHODS pThis, PUVM pUVM));
176 /** @todo Move pfnVMAtError and pfnCFGMConstructor here? */
177
178 /**
179 * EMT initialization notification callback.
180 *
181 * This is intended for doing per-thread initialization for EMTs (like COM
182 * init).
183 *
184 * @param pThis Pointer to the callback method table.
185 * @param pUVM The user mode VM handle.
186 * @param pUVCpu The user mode virtual CPU handle.
187 *
188 * @remarks This is optional and shall be set to NULL if not wanted.
189 */
190 DECLR3CALLBACKMEMBER(void, pfnNotifyEmtInit,(PCVMM2USERMETHODS pThis, PUVM pUVM, PUVMCPU pUVCpu));
191
192 /**
193 * EMT termination notification callback.
194 *
195 * This is intended for doing per-thread cleanups for EMTs (like COM).
196 *
197 * @param pThis Pointer to the callback method table.
198 * @param pUVM The user mode VM handle.
199 * @param pUVCpu The user mode virtual CPU handle.
200 *
201 * @remarks This is optional and shall be set to NULL if not wanted.
202 */
203 DECLR3CALLBACKMEMBER(void, pfnNotifyEmtTerm,(PCVMM2USERMETHODS pThis, PUVM pUVM, PUVMCPU pUVCpu));
204
205 /**
206 * PDM thread initialization notification callback.
207 *
208 * This is intended for doing per-thread initialization (like COM init).
209 *
210 * @param pThis Pointer to the callback method table.
211 * @param pUVM The user mode VM handle.
212 *
213 * @remarks This is optional and shall be set to NULL if not wanted.
214 */
215 DECLR3CALLBACKMEMBER(void, pfnNotifyPdmtInit,(PCVMM2USERMETHODS pThis, PUVM pUVM));
216
217 /**
218 * EMT termination notification callback.
219 *
220 * This is intended for doing per-thread cleanups for EMTs (like COM).
221 *
222 * @param pThis Pointer to the callback method table.
223 * @param pUVM The user mode VM handle.
224 *
225 * @remarks This is optional and shall be set to NULL if not wanted.
226 */
227 DECLR3CALLBACKMEMBER(void, pfnNotifyPdmtTerm,(PCVMM2USERMETHODS pThis, PUVM pUVM));
228
229 /**
230 * Notification callback that that a VM reset will be turned into a power off.
231 *
232 * @param pThis Pointer to the callback method table.
233 * @param pUVM The user mode VM handle.
234 *
235 * @remarks This is optional and shall be set to NULL if not wanted.
236 */
237 DECLR3CALLBACKMEMBER(void, pfnNotifyResetTurnedIntoPowerOff,(PCVMM2USERMETHODS pThis, PUVM pUVM));
238
239 /**
240 * Generic object query by UUID.
241 *
242 * @returns pointer to queried the object on success, NULL if not found.
243 *
244 * @param pThis Pointer to the callback method table.
245 * @param pUVM The user mode VM handle.
246 * @param pUuid The UUID of what's being queried. The UUIDs and the
247 * usage conventions are defined by the user.
248 *
249 * @remarks This is optional and shall be set to NULL if not wanted.
250 */
251 DECLR3CALLBACKMEMBER(void *, pfnQueryGenericObject,(PCVMM2USERMETHODS pThis, PUVM pUVM, PCRTUUID pUuid));
252
253 /** Magic value (VMM2USERMETHODS_MAGIC) marking the end of the structure. */
254 uint32_t u32EndMagic;
255} VMM2USERMETHODS;
256
257/** Magic value of the VMM2USERMETHODS (Franz Kafka). */
258#define VMM2USERMETHODS_MAGIC UINT32_C(0x18830703)
259/** The VMM2USERMETHODS structure version. */
260#define VMM2USERMETHODS_VERSION UINT32_C(0x00030000)
261
262
263/**
264 * Checks whether we've armed the ring-0 long jump machinery.
265 *
266 * @returns @c true / @c false
267 * @param a_pVCpu The caller's cross context virtual CPU structure.
268 * @thread EMT
269 * @sa VMMR0IsLongJumpArmed
270 */
271#ifdef IN_RING0
272# define VMMIsLongJumpArmed(a_pVCpu) VMMR0IsLongJumpArmed(a_pVCpu)
273#else
274# define VMMIsLongJumpArmed(a_pVCpu) (false)
275#endif
276
277
278VMM_INT_DECL(RTRCPTR) VMMGetStackRC(PVMCPU pVCpu);
279VMMDECL(VMCPUID) VMMGetCpuId(PVM pVM);
280VMMDECL(PVMCPU) VMMGetCpu(PVM pVM);
281VMMDECL(PVMCPU) VMMGetCpu0(PVM pVM);
282VMMDECL(PVMCPU) VMMGetCpuById(PVM pVM, VMCPUID idCpu);
283VMMR3DECL(PVMCPU) VMMR3GetCpuByIdU(PUVM pVM, VMCPUID idCpu);
284VMM_INT_DECL(uint32_t) VMMGetSvnRev(void);
285VMM_INT_DECL(VMMSWITCHER) VMMGetSwitcher(PVM pVM);
286VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPU pVCpu);
287VMM_INT_DECL(void) VMMTrashVolatileXMMRegs(void);
288
289
290/** @defgroup grp_vmm_api_r0 The VMM Host Context Ring 0 API
291 * @{
292 */
293
294/**
295 * The VMMR0Entry() codes.
296 */
297typedef enum VMMR0OPERATION
298{
299 /** Run guest context. */
300 VMMR0_DO_RAW_RUN = SUP_VMMR0_DO_RAW_RUN,
301 /** Run guest code using the available hardware acceleration technology. */
302 VMMR0_DO_HM_RUN = SUP_VMMR0_DO_HM_RUN,
303 /** Official NOP that we use for profiling. */
304 VMMR0_DO_NOP = SUP_VMMR0_DO_NOP,
305 /** Official NOP that we use for profiling. */
306 VMMR0_DO_NEM_RUN = SUP_VMMR0_DO_NEM_RUN,
307 /** Official slow iocl NOP that we use for profiling. */
308 VMMR0_DO_SLOW_NOP,
309
310 /** Ask the GVMM to create a new VM. */
311 VMMR0_DO_GVMM_CREATE_VM = 32,
312 /** Ask the GVMM to destroy the VM. */
313 VMMR0_DO_GVMM_DESTROY_VM,
314 /** Call GVMMR0RegisterVCpu(). */
315 VMMR0_DO_GVMM_REGISTER_VMCPU,
316 /** Call GVMMR0DeregisterVCpu(). */
317 VMMR0_DO_GVMM_DEREGISTER_VMCPU,
318 /** Call GVMMR0SchedHalt(). */
319 VMMR0_DO_GVMM_SCHED_HALT,
320 /** Call GVMMR0SchedWakeUp(). */
321 VMMR0_DO_GVMM_SCHED_WAKE_UP,
322 /** Call GVMMR0SchedPoke(). */
323 VMMR0_DO_GVMM_SCHED_POKE,
324 /** Call GVMMR0SchedWakeUpAndPokeCpus(). */
325 VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS,
326 /** Call GVMMR0SchedPoll(). */
327 VMMR0_DO_GVMM_SCHED_POLL,
328 /** Call GVMMR0QueryStatistics(). */
329 VMMR0_DO_GVMM_QUERY_STATISTICS,
330 /** Call GVMMR0ResetStatistics(). */
331 VMMR0_DO_GVMM_RESET_STATISTICS,
332
333 /** Call VMMR0 Per VM Init. */
334 VMMR0_DO_VMMR0_INIT = 64,
335 /** Call VMMR0 Per VM EMT Init */
336 VMMR0_DO_VMMR0_INIT_EMT,
337 /** Call VMMR0 Per VM Termination. */
338 VMMR0_DO_VMMR0_TERM,
339
340 /** Setup the hardware accelerated raw-mode session. */
341 VMMR0_DO_HM_SETUP_VM = 128,
342 /** Attempt to enable or disable hardware accelerated raw-mode. */
343 VMMR0_DO_HM_ENABLE,
344
345 /** Call PGMR0PhysAllocateHandyPages(). */
346 VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES = 192,
347 /** Call PGMR0PhysFlushHandyPages(). */
348 VMMR0_DO_PGM_FLUSH_HANDY_PAGES,
349 /** Call PGMR0AllocateLargePage(). */
350 VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE,
351 /** Call PGMR0PhysSetupIommu(). */
352 VMMR0_DO_PGM_PHYS_SETUP_IOMMU,
353
354 /** Call GMMR0InitialReservation(). */
355 VMMR0_DO_GMM_INITIAL_RESERVATION = 256,
356 /** Call GMMR0UpdateReservation(). */
357 VMMR0_DO_GMM_UPDATE_RESERVATION,
358 /** Call GMMR0AllocatePages(). */
359 VMMR0_DO_GMM_ALLOCATE_PAGES,
360 /** Call GMMR0FreePages(). */
361 VMMR0_DO_GMM_FREE_PAGES,
362 /** Call GMMR0FreeLargePage(). */
363 VMMR0_DO_GMM_FREE_LARGE_PAGE,
364 /** Call GMMR0QueryHypervisorMemoryStatsReq(). */
365 VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS,
366 /** Call GMMR0QueryMemoryStatsReq(). */
367 VMMR0_DO_GMM_QUERY_MEM_STATS,
368 /** Call GMMR0BalloonedPages(). */
369 VMMR0_DO_GMM_BALLOONED_PAGES,
370 /** Call GMMR0MapUnmapChunk(). */
371 VMMR0_DO_GMM_MAP_UNMAP_CHUNK,
372 /** Call GMMR0SeedChunk(). */
373 VMMR0_DO_GMM_SEED_CHUNK,
374 /** Call GMMR0RegisterSharedModule. */
375 VMMR0_DO_GMM_REGISTER_SHARED_MODULE,
376 /** Call GMMR0UnregisterSharedModule. */
377 VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE,
378 /** Call GMMR0ResetSharedModules. */
379 VMMR0_DO_GMM_RESET_SHARED_MODULES,
380 /** Call GMMR0CheckSharedModules. */
381 VMMR0_DO_GMM_CHECK_SHARED_MODULES,
382 /** Call GMMR0FindDuplicatePage. */
383 VMMR0_DO_GMM_FIND_DUPLICATE_PAGE,
384 /** Call GMMR0QueryStatistics(). */
385 VMMR0_DO_GMM_QUERY_STATISTICS,
386 /** Call GMMR0ResetStatistics(). */
387 VMMR0_DO_GMM_RESET_STATISTICS,
388
389 /** Call PDMR0DriverCallReqHandler. */
390 VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER = 320,
391 /** Call PDMR0DeviceCallReqHandler. */
392 VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER,
393
394 /** Calls function in the hypervisor.
395 * The caller must setup the hypervisor context so the call will be performed.
396 * The difference between VMMR0_DO_RUN_GC and this one is the handling of
397 * the return GC code. The return code will not be interpreted by this operation.
398 */
399 VMMR0_DO_CALL_HYPERVISOR = 384,
400
401 /** Set a GVMM or GMM configuration value. */
402 VMMR0_DO_GCFGM_SET_VALUE = 400,
403 /** Query a GVMM or GMM configuration value. */
404 VMMR0_DO_GCFGM_QUERY_VALUE,
405
406 /** The start of the R0 service operations. */
407 VMMR0_DO_SRV_START = 448,
408 /** Call IntNetR0Open(). */
409 VMMR0_DO_INTNET_OPEN,
410 /** Call IntNetR0IfClose(). */
411 VMMR0_DO_INTNET_IF_CLOSE,
412 /** Call IntNetR0IfGetBufferPtrs(). */
413 VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS,
414 /** Call IntNetR0IfSetPromiscuousMode(). */
415 VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE,
416 /** Call IntNetR0IfSetMacAddress(). */
417 VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS,
418 /** Call IntNetR0IfSetActive(). */
419 VMMR0_DO_INTNET_IF_SET_ACTIVE,
420 /** Call IntNetR0IfSend(). */
421 VMMR0_DO_INTNET_IF_SEND,
422 /** Call IntNetR0IfWait(). */
423 VMMR0_DO_INTNET_IF_WAIT,
424 /** Call IntNetR0IfAbortWait(). */
425 VMMR0_DO_INTNET_IF_ABORT_WAIT,
426
427 /** Forward call to the PCI driver */
428 VMMR0_DO_PCIRAW_REQ = 512,
429
430 /** The end of the R0 service operations. */
431 VMMR0_DO_SRV_END,
432
433 /** Call NEMR0InitVM() (host specific). */
434 VMMR0_DO_NEM_INIT_VM = 576,
435 /** Call NEMR0InitVMPart2() (host specific). */
436 VMMR0_DO_NEM_INIT_VM_PART_2,
437 /** Call NEMR0MapPages() (host specific). */
438 VMMR0_DO_NEM_MAP_PAGES,
439 /** Call NEMR0UnmapPages() (host specific). */
440 VMMR0_DO_NEM_UNMAP_PAGES,
441 /** Call NEMR0ExportState() (host specific). */
442 VMMR0_DO_NEM_EXPORT_STATE,
443 /** Call NEMR0ImportState() (host specific). */
444 VMMR0_DO_NEM_IMPORT_STATE,
445 /** Call NEMR0QueryCpuTick() (host specific). */
446 VMMR0_DO_NEM_QUERY_CPU_TICK,
447 /** Call NEMR0ResumeCpuTickOnAll() (host specific). */
448 VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL,
449 /** Call NEMR0UpdateStatistics() (host specific). */
450 VMMR0_DO_NEM_UPDATE_STATISTICS,
451 /** Call NEMR0DoExperiment() (host specific, experimental, debug only). */
452 VMMR0_DO_NEM_EXPERIMENT,
453
454 /** Official call we use for testing Ring-0 APIs. */
455 VMMR0_DO_TESTS = 640,
456 /** Test the 32->64 bits switcher. */
457 VMMR0_DO_TEST_SWITCHER3264,
458
459 /** The usual 32-bit type blow up. */
460 VMMR0_DO_32BIT_HACK = 0x7fffffff
461} VMMR0OPERATION;
462
463
464/**
465 * Request buffer for VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE.
466 * @todo Move got GCFGM.h when it's implemented.
467 */
468typedef struct GCFGMVALUEREQ
469{
470 /** The request header.*/
471 SUPVMMR0REQHDR Hdr;
472 /** The support driver session handle. */
473 PSUPDRVSESSION pSession;
474 /** The value.
475 * This is input for the set request and output for the query. */
476 uint64_t u64Value;
477 /** The variable name.
478 * This is fixed sized just to make things simple for the mock-up. */
479 char szName[48];
480} GCFGMVALUEREQ;
481/** Pointer to a VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE request buffer.
482 * @todo Move got GCFGM.h when it's implemented.
483 */
484typedef GCFGMVALUEREQ *PGCFGMVALUEREQ;
485
486#if defined(IN_RING0) || defined(DOXYGEN_RUNNING)
487VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation);
488VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
489 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);
490VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVM pVM, VMCPUID idCpu);
491VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu);
492VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu);
493VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu);
494VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu);
495VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu);
496VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu);
497
498# ifdef LOG_ENABLED
499VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu);
500VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu);
501VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu);
502# else
503# define VMMR0LogFlushDisable(pVCpu) do { } while(0)
504# define VMMR0LogFlushEnable(pVCpu) do { } while(0)
505# define VMMR0IsLogFlushDisabled(pVCpu) (true)
506# endif /* LOG_ENABLED */
507#endif /* IN_RING0 */
508
509/** @} */
510
511
512#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
513/** @defgroup grp_vmm_api_r3 The VMM Host Context Ring 3 API
514 * @{
515 */
516VMMR3_INT_DECL(int) VMMR3Init(PVM pVM);
517VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM);
518# ifdef VBOX_WITH_RAW_MODE
519VMMR3_INT_DECL(int) VMMR3InitRC(PVM pVM);
520# endif
521VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
522VMMR3_INT_DECL(int) VMMR3Term(PVM pVM);
523VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta);
524VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM);
525VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM);
526VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM);
527VMMR3_INT_DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher);
528VMMR3_INT_DECL(RTR0PTR) VMMR3GetHostToGuestSwitcher(PVM pVM, VMMSWITCHER enmSwitcher);
529VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu);
530# ifdef VBOX_WITH_RAW_MODE
531VMMR3_INT_DECL(int) VMMR3RawRunGC(PVM pVM, PVMCPU pVCpu);
532VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM, PVMCPU pVCpu);
533VMMR3_INT_DECL(int) VMMR3GetImportRC(PVM pVM, const char *pszSymbol, PRTRCPTR pRCPtrValue);
534VMMR3DECL(int) VMMR3CallRC(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, ...);
535VMMR3DECL(int) VMMR3CallRCV(PVM pVM, RTRCPTR RCPtrEntry, unsigned cArgs, va_list args);
536# endif
537VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
538VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
539VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation);
540VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr);
541VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM);
542VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM);
543VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM);
544VMMR3_INT_DECL(void) VMMR3SendStartupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector);
545VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu);
546VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
547VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
548VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
549/** @defgroup grp_VMMR3EmtRendezvous_fFlags VMMR3EmtRendezvous flags
550 * @{ */
551/** Execution type mask. */
552#define VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK UINT32_C(0x00000007)
553/** Invalid execution type. */
554#define VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID UINT32_C(0)
555/** Let the EMTs execute the callback one by one (in no particular order).
556 * Recursion from within the callback possible. */
557#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE UINT32_C(1)
558/** Let all the EMTs execute the callback at the same time.
559 * Cannot recurse from the callback. */
560#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE UINT32_C(2)
561/** Only execute the callback on one EMT (no particular one).
562 * Recursion from within the callback possible. */
563#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE UINT32_C(3)
564/** Let the EMTs execute the callback one by one in ascending order.
565 * Recursion from within the callback possible. */
566#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING UINT32_C(4)
567/** Let the EMTs execute the callback one by one in descending order.
568 * Recursion from within the callback possible. */
569#define VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING UINT32_C(5)
570/** Stop after the first error.
571 * This is not valid for any execution type where more than one EMT is active
572 * at a time. */
573#define VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR UINT32_C(0x00000008)
574/** Use VMREQFLAGS_PRIORITY when contacting the EMTs. */
575#define VMMEMTRENDEZVOUS_FLAGS_PRIORITY UINT32_C(0x00000010)
576/** The valid flags. */
577#define VMMEMTRENDEZVOUS_FLAGS_VALID_MASK UINT32_C(0x0000001f)
578/** @} */
579VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu);
580VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead);
581VMMR3_INT_DECL(void) VMMR3InitR0StackUnwindState(PUVM pUVM, VMCPUID idCpu, PRTDBGUNWINDSTATE pState);
582/** @} */
583#endif /* IN_RING3 */
584
585
586#if defined(IN_RC) || defined(DOXYGEN_RUNNING)
587/** @defgroup grp_vmm_api_rc The VMM Raw-Mode Context API
588 * @{
589 */
590VMMRCDECL(int) VMMRCEntry(PVM pVM, unsigned uOperation, unsigned uArg, ...);
591VMMRCDECL(void) VMMRCGuestToHost(PVM pVM, int rc);
592VMMRCDECL(void) VMMRCLogFlushIfFull(PVM pVM);
593/** @} */
594#endif /* IN_RC */
595
596#if defined(IN_RC) || defined(IN_RING0) || defined(DOXYGEN_RUNNING)
597/** @defgroup grp_vmm_api_rz The VMM Raw-Mode and Ring-0 Context API
598 * @{
599 */
600VMMRZDECL(int) VMMRZCallRing3(PVM pVM, PVMCPU pVCpu, VMMCALLRING3 enmOperation, uint64_t uArg);
601VMMRZDECL(int) VMMRZCallRing3NoCpu(PVM pVM, VMMCALLRING3 enmOperation, uint64_t uArg);
602VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPU pVCpu);
603VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPU pVCpu);
604VMMRZDECL(bool) VMMRZCallRing3IsEnabled(PVMCPU pVCpu);
605VMMRZDECL(int) VMMRZCallRing3SetNotification(PVMCPU pVCpu, R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallback, RTR0PTR pvUser);
606VMMRZDECL(void) VMMRZCallRing3RemoveNotification(PVMCPU pVCpu);
607VMMRZDECL(bool) VMMRZCallRing3IsNotificationSet(PVMCPU pVCpu);
608/** @} */
609#endif
610
611
612/** @} */
613
614/** @} */
615RT_C_DECLS_END
616
617#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette