VirtualBox

source: vbox/trunk/include/VBox/vmm/vmm.h@ 90959

Last change on this file since 90959 was 90948, checked in by vboxsync, 4 years ago

VMM: Organize the logger structures both named member and arrays for easier access by the log flusher and init/term code. bugref:10086

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 24.3 KB
Line 
1/** @file
2 * VMM - The Virtual Machine Monitor.
3 */
4
5/*
6 * Copyright (C) 2006-2020 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_vmm_h
27#define VBOX_INCLUDED_vmm_vmm_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <VBox/types.h>
33#include <VBox/vmm/vmapi.h>
34#include <VBox/sup.h>
35#include <VBox/log.h>
36#include <iprt/stdarg.h>
37#include <iprt/thread.h>
38
39RT_C_DECLS_BEGIN
40
41/** @defgroup grp_vmm The Virtual Machine Monitor
42 * @{
43 */
44
45/** @defgroup grp_vmm_api The Virtual Machine Monitor API
46 * @{
47 */
48
49
50/**
51 * VMMRZCallRing3 operations.
52 */
53typedef enum VMMCALLRING3
54{
55 /** Invalid operation. */
56 VMMCALLRING3_INVALID = 0,
57 /** Acquire the PDM lock. */
58 VMMCALLRING3_PDM_LOCK,
59 /** Acquire the critical section specified as argument. */
60 VMMCALLRING3_PDM_CRIT_SECT_ENTER,
61 /** Enter the R/W critical section (in argument) exclusively. */
62 VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL,
63 /** Enter the R/W critical section (in argument) shared. */
64 VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED,
65 /** Acquire the PGM lock. */
66 VMMCALLRING3_PGM_LOCK,
67 /** Grow the PGM shadow page pool. */
68 VMMCALLRING3_PGM_POOL_GROW,
69 /** Maps a chunk into ring-3. */
70 VMMCALLRING3_PGM_MAP_CHUNK,
71 /** Allocates more handy pages. */
72 VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES,
73 /** Allocates a large (2MB) page. */
74 VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE,
75 /** Acquire the MM hypervisor heap lock. */
76 VMMCALLRING3_MMHYPER_LOCK,
77 /** Set the VM error message. */
78 VMMCALLRING3_VM_SET_ERROR,
79 /** Set the VM runtime error message. */
80 VMMCALLRING3_VM_SET_RUNTIME_ERROR,
81 /** Signal a ring 0 assertion. */
82 VMMCALLRING3_VM_R0_ASSERTION,
83 /** Ring switch to force preemption. This is also used by PDMCritSect to
84 * handle VERR_INTERRUPTED in kernel context. */
85 VMMCALLRING3_VM_R0_PREEMPT,
86 /** The usual 32-bit hack. */
87 VMMCALLRING3_32BIT_HACK = 0x7fffffff
88} VMMCALLRING3;
89
90/**
91 * VMMRZCallRing3 notification callback.
92 *
93 * @returns VBox status code.
94 * @param pVCpu The cross context virtual CPU structure.
95 * @param enmOperation The operation causing the ring-3 jump.
96 * @param pvUser The user argument.
97 */
98typedef DECLCALLBACKTYPE(int, FNVMMR0CALLRING3NOTIFICATION,(PVMCPUCC pVCpu, VMMCALLRING3 enmOperation, void *pvUser));
99/** Pointer to a FNRTMPNOTIFICATION(). */
100typedef FNVMMR0CALLRING3NOTIFICATION *PFNVMMR0CALLRING3NOTIFICATION;
101
102/**
103 * Rendezvous callback.
104 *
105 * @returns VBox strict status code - EM scheduling. Do not return
106 * informational status code other than the ones used by EM for
107 * scheduling.
108 *
109 * @param pVM The cross context VM structure.
110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
111 * @param pvUser The user argument.
112 */
113typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMMEMTRENDEZVOUS,(PVM pVM, PVMCPU pVCpu, void *pvUser));
114/** Pointer to a rendezvous callback function. */
115typedef FNVMMEMTRENDEZVOUS *PFNVMMEMTRENDEZVOUS;
116
117/**
118 * Method table that the VMM uses to call back the user of the VMM.
119 */
120typedef struct VMM2USERMETHODS
121{
122 /** Magic value (VMM2USERMETHODS_MAGIC). */
123 uint32_t u32Magic;
124 /** Structure version (VMM2USERMETHODS_VERSION). */
125 uint32_t u32Version;
126
127 /**
128 * Save the VM state.
129 *
130 * @returns VBox status code.
131 * @param pThis Pointer to the callback method table.
132 * @param pUVM The user mode VM handle.
133 *
134 * @remarks This member shall be set to NULL if the operation is not
135 * supported.
136 */
137 DECLR3CALLBACKMEMBER(int, pfnSaveState,(PCVMM2USERMETHODS pThis, PUVM pUVM));
138 /** @todo Move pfnVMAtError and pfnCFGMConstructor here? */
139
140 /**
141 * EMT initialization notification callback.
142 *
143 * This is intended for doing per-thread initialization for EMTs (like COM
144 * init).
145 *
146 * @param pThis Pointer to the callback method table.
147 * @param pUVM The user mode VM handle.
148 * @param pUVCpu The user mode virtual CPU handle.
149 *
150 * @remarks This is optional and shall be set to NULL if not wanted.
151 */
152 DECLR3CALLBACKMEMBER(void, pfnNotifyEmtInit,(PCVMM2USERMETHODS pThis, PUVM pUVM, PUVMCPU pUVCpu));
153
154 /**
155 * EMT termination notification callback.
156 *
157 * This is intended for doing per-thread cleanups for EMTs (like COM).
158 *
159 * @param pThis Pointer to the callback method table.
160 * @param pUVM The user mode VM handle.
161 * @param pUVCpu The user mode virtual CPU handle.
162 *
163 * @remarks This is optional and shall be set to NULL if not wanted.
164 */
165 DECLR3CALLBACKMEMBER(void, pfnNotifyEmtTerm,(PCVMM2USERMETHODS pThis, PUVM pUVM, PUVMCPU pUVCpu));
166
167 /**
168 * PDM thread initialization notification callback.
169 *
170 * This is intended for doing per-thread initialization (like COM init).
171 *
172 * @param pThis Pointer to the callback method table.
173 * @param pUVM The user mode VM handle.
174 *
175 * @remarks This is optional and shall be set to NULL if not wanted.
176 */
177 DECLR3CALLBACKMEMBER(void, pfnNotifyPdmtInit,(PCVMM2USERMETHODS pThis, PUVM pUVM));
178
179 /**
180 * EMT termination notification callback.
181 *
182 * This is intended for doing per-thread cleanups for EMTs (like COM).
183 *
184 * @param pThis Pointer to the callback method table.
185 * @param pUVM The user mode VM handle.
186 *
187 * @remarks This is optional and shall be set to NULL if not wanted.
188 */
189 DECLR3CALLBACKMEMBER(void, pfnNotifyPdmtTerm,(PCVMM2USERMETHODS pThis, PUVM pUVM));
190
191 /**
192 * Notification callback that that a VM reset will be turned into a power off.
193 *
194 * @param pThis Pointer to the callback method table.
195 * @param pUVM The user mode VM handle.
196 *
197 * @remarks This is optional and shall be set to NULL if not wanted.
198 */
199 DECLR3CALLBACKMEMBER(void, pfnNotifyResetTurnedIntoPowerOff,(PCVMM2USERMETHODS pThis, PUVM pUVM));
200
201 /**
202 * Generic object query by UUID.
203 *
204 * @returns pointer to queried the object on success, NULL if not found.
205 *
206 * @param pThis Pointer to the callback method table.
207 * @param pUVM The user mode VM handle.
208 * @param pUuid The UUID of what's being queried. The UUIDs and the
209 * usage conventions are defined by the user.
210 *
211 * @remarks This is optional and shall be set to NULL if not wanted.
212 */
213 DECLR3CALLBACKMEMBER(void *, pfnQueryGenericObject,(PCVMM2USERMETHODS pThis, PUVM pUVM, PCRTUUID pUuid));
214
215 /** Magic value (VMM2USERMETHODS_MAGIC) marking the end of the structure. */
216 uint32_t u32EndMagic;
217} VMM2USERMETHODS;
218
219/** Magic value of the VMM2USERMETHODS (Franz Kafka). */
220#define VMM2USERMETHODS_MAGIC UINT32_C(0x18830703)
221/** The VMM2USERMETHODS structure version. */
222#define VMM2USERMETHODS_VERSION UINT32_C(0x00030000)
223
224
225/**
226 * Checks whether we've armed the ring-0 long jump machinery.
227 *
228 * @returns @c true / @c false
229 * @param a_pVCpu The caller's cross context virtual CPU structure.
230 * @thread EMT
231 * @sa VMMR0IsLongJumpArmed
232 */
233#ifdef IN_RING0
234# define VMMIsLongJumpArmed(a_pVCpu) VMMR0IsLongJumpArmed(a_pVCpu)
235#else
236# define VMMIsLongJumpArmed(a_pVCpu) (false)
237#endif
238
239
240VMMDECL(VMCPUID) VMMGetCpuId(PVMCC pVM);
241VMMDECL(PVMCPUCC) VMMGetCpu(PVMCC pVM);
242VMMDECL(PVMCPUCC) VMMGetCpu0(PVMCC pVM);
243VMMDECL(PVMCPUCC) VMMGetCpuById(PVMCC pVM, VMCPUID idCpu);
244VMMR3DECL(PVMCPUCC) VMMR3GetCpuByIdU(PUVM pVM, VMCPUID idCpu);
245VMM_INT_DECL(uint32_t) VMMGetSvnRev(void);
246VMM_INT_DECL(bool) VMMIsInRing3Call(PVMCPUCC pVCpu);
247VMM_INT_DECL(void) VMMTrashVolatileXMMRegs(void);
248
249
250/** @defgroup grp_vmm_api_r0 The VMM Host Context Ring 0 API
251 * @{
252 */
253
254/**
255 * The VMMR0Entry() codes.
256 */
257typedef enum VMMR0OPERATION
258{
259 /** Run guest code using the available hardware acceleration technology. */
260 VMMR0_DO_HM_RUN = SUP_VMMR0_DO_HM_RUN,
261 /** Official NOP that we use for profiling. */
262 VMMR0_DO_NEM_RUN = SUP_VMMR0_DO_NEM_RUN,
263 /** Official NOP that we use for profiling. */
264 VMMR0_DO_NOP = SUP_VMMR0_DO_NOP,
265 /** Official slow iocl NOP that we use for profiling. */
266 VMMR0_DO_SLOW_NOP,
267
268 /** Ask the GVMM to create a new VM. */
269 VMMR0_DO_GVMM_CREATE_VM = 32,
270 /** Ask the GVMM to destroy the VM. */
271 VMMR0_DO_GVMM_DESTROY_VM,
272 /** Call GVMMR0RegisterVCpu(). */
273 VMMR0_DO_GVMM_REGISTER_VMCPU,
274 /** Call GVMMR0DeregisterVCpu(). */
275 VMMR0_DO_GVMM_DEREGISTER_VMCPU,
276 /** Call GVMMR0SchedHalt(). */
277 VMMR0_DO_GVMM_SCHED_HALT,
278 /** Call GVMMR0SchedWakeUp(). */
279 VMMR0_DO_GVMM_SCHED_WAKE_UP,
280 /** Call GVMMR0SchedPoke(). */
281 VMMR0_DO_GVMM_SCHED_POKE,
282 /** Call GVMMR0SchedWakeUpAndPokeCpus(). */
283 VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS,
284 /** Call GVMMR0SchedPoll(). */
285 VMMR0_DO_GVMM_SCHED_POLL,
286 /** Call GVMMR0QueryStatistics(). */
287 VMMR0_DO_GVMM_QUERY_STATISTICS,
288 /** Call GVMMR0ResetStatistics(). */
289 VMMR0_DO_GVMM_RESET_STATISTICS,
290
291 /** Call VMMR0 Per VM Init. */
292 VMMR0_DO_VMMR0_INIT = 64,
293 /** Call VMMR0 Per VM EMT Init */
294 VMMR0_DO_VMMR0_INIT_EMT,
295 /** Call VMMR0 Per VM Termination. */
296 VMMR0_DO_VMMR0_TERM,
297 /** Copy logger settings from userland, VMMR0UpdateLoggersReq(). */
298 VMMR0_DO_VMMR0_UPDATE_LOGGERS,
299 /** Used by the log flusher, VMMR0LogFlusher. */
300 VMMR0_DO_VMMR0_LOG_FLUSHER,
301
302 /** Setup hardware-assisted VM session. */
303 VMMR0_DO_HM_SETUP_VM = 128,
304 /** Attempt to enable or disable hardware-assisted mode. */
305 VMMR0_DO_HM_ENABLE,
306
307 /** Call PGMR0PhysAllocateHandyPages(). */
308 VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES = 192,
309 /** Call PGMR0PhysFlushHandyPages(). */
310 VMMR0_DO_PGM_FLUSH_HANDY_PAGES,
311 /** Call PGMR0AllocateLargePage(). */
312 VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE,
313 /** Call PGMR0PhysSetupIommu(). */
314 VMMR0_DO_PGM_PHYS_SETUP_IOMMU,
315 /** Call PGMR0PoolGrow(). */
316 VMMR0_DO_PGM_POOL_GROW,
317
318 /** Call GMMR0InitialReservation(). */
319 VMMR0_DO_GMM_INITIAL_RESERVATION = 256,
320 /** Call GMMR0UpdateReservation(). */
321 VMMR0_DO_GMM_UPDATE_RESERVATION,
322 /** Call GMMR0AllocatePages(). */
323 VMMR0_DO_GMM_ALLOCATE_PAGES,
324 /** Call GMMR0FreePages(). */
325 VMMR0_DO_GMM_FREE_PAGES,
326 /** Call GMMR0FreeLargePage(). */
327 VMMR0_DO_GMM_FREE_LARGE_PAGE,
328 /** Call GMMR0QueryHypervisorMemoryStatsReq(). */
329 VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS,
330 /** Call GMMR0QueryMemoryStatsReq(). */
331 VMMR0_DO_GMM_QUERY_MEM_STATS,
332 /** Call GMMR0BalloonedPages(). */
333 VMMR0_DO_GMM_BALLOONED_PAGES,
334 /** Call GMMR0MapUnmapChunk(). */
335 VMMR0_DO_GMM_MAP_UNMAP_CHUNK,
336 /** Call GMMR0SeedChunk(). */
337 VMMR0_DO_GMM_SEED_CHUNK,
338 /** Call GMMR0RegisterSharedModule. */
339 VMMR0_DO_GMM_REGISTER_SHARED_MODULE,
340 /** Call GMMR0UnregisterSharedModule. */
341 VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE,
342 /** Call GMMR0ResetSharedModules. */
343 VMMR0_DO_GMM_RESET_SHARED_MODULES,
344 /** Call GMMR0CheckSharedModules. */
345 VMMR0_DO_GMM_CHECK_SHARED_MODULES,
346 /** Call GMMR0FindDuplicatePage. */
347 VMMR0_DO_GMM_FIND_DUPLICATE_PAGE,
348 /** Call GMMR0QueryStatistics(). */
349 VMMR0_DO_GMM_QUERY_STATISTICS,
350 /** Call GMMR0ResetStatistics(). */
351 VMMR0_DO_GMM_RESET_STATISTICS,
352
353 /** Call PDMR0DriverCallReqHandler. */
354 VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER = 320,
355 /** Call PDMR0DeviceCreateReqHandler. */
356 VMMR0_DO_PDM_DEVICE_CREATE,
357 /** Call PDMR0DeviceGenCallReqHandler. */
358 VMMR0_DO_PDM_DEVICE_GEN_CALL,
359 /** Old style device compat: Set ring-0 critical section. */
360 VMMR0_DO_PDM_DEVICE_COMPAT_SET_CRITSECT,
361
362 /** Set a GVMM or GMM configuration value. */
363 VMMR0_DO_GCFGM_SET_VALUE = 400,
364 /** Query a GVMM or GMM configuration value. */
365 VMMR0_DO_GCFGM_QUERY_VALUE,
366
367 /** The start of the R0 service operations. */
368 VMMR0_DO_SRV_START = 448,
369 /** Call IntNetR0Open(). */
370 VMMR0_DO_INTNET_OPEN,
371 /** Call IntNetR0IfClose(). */
372 VMMR0_DO_INTNET_IF_CLOSE,
373 /** Call IntNetR0IfGetBufferPtrs(). */
374 VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS,
375 /** Call IntNetR0IfSetPromiscuousMode(). */
376 VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE,
377 /** Call IntNetR0IfSetMacAddress(). */
378 VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS,
379 /** Call IntNetR0IfSetActive(). */
380 VMMR0_DO_INTNET_IF_SET_ACTIVE,
381 /** Call IntNetR0IfSend(). */
382 VMMR0_DO_INTNET_IF_SEND,
383 /** Call IntNetR0IfWait(). */
384 VMMR0_DO_INTNET_IF_WAIT,
385 /** Call IntNetR0IfAbortWait(). */
386 VMMR0_DO_INTNET_IF_ABORT_WAIT,
387
388#if 0
389 /** Forward call to the PCI driver */
390 VMMR0_DO_PCIRAW_REQ = 512,
391#endif
392
393 /** The end of the R0 service operations. */
394 VMMR0_DO_SRV_END,
395
396 /** Call NEMR0InitVM() (host specific). */
397 VMMR0_DO_NEM_INIT_VM = 576,
398 /** Call NEMR0InitVMPart2() (host specific). */
399 VMMR0_DO_NEM_INIT_VM_PART_2,
400 /** Call NEMR0MapPages() (host specific). */
401 VMMR0_DO_NEM_MAP_PAGES,
402 /** Call NEMR0UnmapPages() (host specific). */
403 VMMR0_DO_NEM_UNMAP_PAGES,
404 /** Call NEMR0ExportState() (host specific). */
405 VMMR0_DO_NEM_EXPORT_STATE,
406 /** Call NEMR0ImportState() (host specific). */
407 VMMR0_DO_NEM_IMPORT_STATE,
408 /** Call NEMR0QueryCpuTick() (host specific). */
409 VMMR0_DO_NEM_QUERY_CPU_TICK,
410 /** Call NEMR0ResumeCpuTickOnAll() (host specific). */
411 VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL,
412 /** Call NEMR0UpdateStatistics() (host specific). */
413 VMMR0_DO_NEM_UPDATE_STATISTICS,
414 /** Call NEMR0DoExperiment() (host specific, experimental, debug only). */
415 VMMR0_DO_NEM_EXPERIMENT,
416
417 /** Grow the I/O port registration tables. */
418 VMMR0_DO_IOM_GROW_IO_PORTS = 640,
419 /** Grow the I/O port statistics tables. */
420 VMMR0_DO_IOM_GROW_IO_PORT_STATS,
421 /** Grow the MMIO registration tables. */
422 VMMR0_DO_IOM_GROW_MMIO_REGS,
423 /** Grow the MMIO statistics tables. */
424 VMMR0_DO_IOM_GROW_MMIO_STATS,
425 /** Synchronize statistics indices for I/O ports and MMIO regions. */
426 VMMR0_DO_IOM_SYNC_STATS_INDICES,
427
428 /** Call DBGFR0TraceCreateReqHandler. */
429 VMMR0_DO_DBGF_TRACER_CREATE = 704,
430 /** Call DBGFR0TraceCallReqHandler. */
431 VMMR0_DO_DBGF_TRACER_CALL_REQ_HANDLER,
432 /** Call DBGFR0BpInitReqHandler(). */
433 VMMR0_DO_DBGF_BP_INIT,
434 /** Call DBGFR0BpChunkAllocReqHandler(). */
435 VMMR0_DO_DBGF_BP_CHUNK_ALLOC,
436 /** Call DBGFR0BpL2TblChunkAllocReqHandler(). */
437 VMMR0_DO_DBGF_BP_L2_TBL_CHUNK_ALLOC,
438 /** Call DBGFR0BpOwnerInitReqHandler(). */
439 VMMR0_DO_DBGF_BP_OWNER_INIT,
440 /** Call DBGFR0BpPortIoInitReqHandler(). */
441 VMMR0_DO_DBGF_BP_PORTIO_INIT,
442
443 /** Grow a timer queue. */
444 VMMR0_DO_TM_GROW_TIMER_QUEUE = 768,
445
446 /** Official call we use for testing Ring-0 APIs. */
447 VMMR0_DO_TESTS = 2048,
448
449 /** The usual 32-bit type blow up. */
450 VMMR0_DO_32BIT_HACK = 0x7fffffff
451} VMMR0OPERATION;
452
453
454/**
455 * Request buffer for VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE.
456 * @todo Move got GCFGM.h when it's implemented.
457 */
458typedef struct GCFGMVALUEREQ
459{
460 /** The request header.*/
461 SUPVMMR0REQHDR Hdr;
462 /** The support driver session handle. */
463 PSUPDRVSESSION pSession;
464 /** The value.
465 * This is input for the set request and output for the query. */
466 uint64_t u64Value;
467 /** The variable name.
468 * This is fixed sized just to make things simple for the mock-up. */
469 char szName[48];
470} GCFGMVALUEREQ;
471/** Pointer to a VMMR0_DO_GCFGM_SET_VALUE and VMMR0_DO_GCFGM_QUERY_VALUE request buffer.
472 * @todo Move got GCFGM.h when it's implemented.
473 */
474typedef GCFGMVALUEREQ *PGCFGMVALUEREQ;
475
476
477/**
478 * Request package for VMMR0_DO_VMMR0_UPDATE_LOGGERS.
479 *
480 * In addition the u64Arg selects the logger sets: @c false for debug, @c true
481 * for release.
482 */
483typedef struct VMMR0UPDATELOGGERSREQ
484{
485 /** The request header. */
486 SUPVMMR0REQHDR Hdr;
487 /** The current logger flags (RTLOGFLAGS). */
488 uint64_t fFlags;
489 /** Groups, assuming same group layout as ring-3. */
490 uint32_t cGroups;
491 /** CRC32 of the group names. */
492 uint32_t uGroupCrc32;
493 /** Per-group settings, variable size. */
494 RT_FLEXIBLE_ARRAY_EXTENSION
495 uint32_t afGroups[RT_FLEXIBLE_ARRAY];
496} VMMR0UPDATELOGGERSREQ;
497/** Pointer to a VMMR0_DO_VMMR0_UPDATE_LOGGERS request. */
498typedef VMMR0UPDATELOGGERSREQ *PVMMR0UPDATELOGGERSREQ;
499
500#if defined(IN_RING0) || defined(DOXYGEN_RUNNING)
501
502/**
503 * Structure VMMR0EmtPrepareToBlock uses to pass info to
504 * VMMR0EmtResumeAfterBlocking.
505 */
506typedef struct VMMR0EMTBLOCKCTX
507{
508 /** Magic value (VMMR0EMTBLOCKCTX_MAGIC). */
509 uint32_t uMagic;
510 /** Set if we were in HM context, clear if not. */
511 bool fWasInHmContext;
512} VMMR0EMTBLOCKCTX;
513/** Pointer to a VMMR0EmtPrepareToBlock context structure. */
514typedef VMMR0EMTBLOCKCTX *PVMMR0EMTBLOCKCTX;
515/** Magic value for VMMR0EMTBLOCKCTX::uMagic (Paul Desmond). */
516#define VMMR0EMTBLOCKCTX_MAGIC UINT32_C(0x19261125)
517/** Magic value for VMMR0EMTBLOCKCTX::uMagic when its out of context. */
518#define VMMR0EMTBLOCKCTX_MAGIC_DEAD UINT32_C(0x19770530)
519
520VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation);
521VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
522 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION);
523VMMR0_INT_DECL(int) VMMR0InitPerVMData(PGVM pGVM);
524VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu);
525VMMR0_INT_DECL(void) VMMR0CleanupVM(PGVM pGVM);
526VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu);
527VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu);
528VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu);
529VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu);
530VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu);
531VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu);
532VMMR0_INT_DECL(int) VMMR0EmtPrepareToBlock(PVMCPUCC pVCpu, int rcBusy, const char *pszCaller, void *pvLock,
533 PVMMR0EMTBLOCKCTX pCtx);
534VMMR0_INT_DECL(void) VMMR0EmtResumeAfterBlocking(PVMCPUCC pVCpu, PVMMR0EMTBLOCKCTX pCtx);
535VMMR0_INT_DECL(PRTLOGGER) VMMR0GetReleaseLogger(PVMCPUCC pVCpu);
536#endif /* IN_RING0 */
537
538/** @} */
539
540
541#if defined(IN_RING3) || defined(DOXYGEN_RUNNING)
542/** @defgroup grp_vmm_api_r3 The VMM Host Context Ring 3 API
543 * @{
544 */
545VMMR3_INT_DECL(int) VMMR3Init(PVM pVM);
546VMMR3_INT_DECL(int) VMMR3InitR0(PVM pVM);
547VMMR3_INT_DECL(int) VMMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
548VMMR3_INT_DECL(int) VMMR3Term(PVM pVM);
549VMMR3_INT_DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta);
550VMMR3_INT_DECL(int) VMMR3UpdateLoggers(PVM pVM);
551VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM);
552VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM);
553VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu);
554VMMR3DECL(int) VMMR3CallR0(PVM pVM, uint32_t uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
555VMMR3_INT_DECL(int) VMMR3CallR0Emt(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr);
556VMMR3_INT_DECL(VBOXSTRICTRC) VMMR3CallR0EmtFast(PVM pVM, PVMCPU pVCpu, VMMR0OPERATION enmOperation);
557VMMR3DECL(void) VMMR3FatalDump(PVM pVM, PVMCPU pVCpu, int rcErr);
558VMMR3_INT_DECL(void) VMMR3YieldSuspend(PVM pVM);
559VMMR3_INT_DECL(void) VMMR3YieldStop(PVM pVM);
560VMMR3_INT_DECL(void) VMMR3YieldResume(PVM pVM);
561VMMR3_INT_DECL(void) VMMR3SendStartupIpi(PVM pVM, VMCPUID idCpu, uint32_t uVector);
562VMMR3_INT_DECL(void) VMMR3SendInitIpi(PVM pVM, VMCPUID idCpu);
563VMMR3DECL(int) VMMR3RegisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
564VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
565VMMR3DECL(int) VMMR3EmtRendezvous(PVM pVM, uint32_t fFlags, PFNVMMEMTRENDEZVOUS pfnRendezvous, void *pvUser);
566/** @defgroup grp_VMMR3EmtRendezvous_fFlags VMMR3EmtRendezvous flags
567 * @{ */
568/** Execution type mask. */
569#define VMMEMTRENDEZVOUS_FLAGS_TYPE_MASK UINT32_C(0x00000007)
570/** Invalid execution type. */
571#define VMMEMTRENDEZVOUS_FLAGS_TYPE_INVALID UINT32_C(0)
572/** Let the EMTs execute the callback one by one (in no particular order).
573 * Recursion from within the callback possible. */
574#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE UINT32_C(1)
575/** Let all the EMTs execute the callback at the same time.
576 * Cannot recurse from the callback. */
577#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE UINT32_C(2)
578/** Only execute the callback on one EMT (no particular one).
579 * Recursion from within the callback possible. */
580#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE UINT32_C(3)
581/** Let the EMTs execute the callback one by one in ascending order.
582 * Recursion from within the callback possible. */
583#define VMMEMTRENDEZVOUS_FLAGS_TYPE_ASCENDING UINT32_C(4)
584/** Let the EMTs execute the callback one by one in descending order.
585 * Recursion from within the callback possible. */
586#define VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING UINT32_C(5)
587/** Stop after the first error.
588 * This is not valid for any execution type where more than one EMT is active
589 * at a time. */
590#define VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR UINT32_C(0x00000008)
591/** Use VMREQFLAGS_PRIORITY when contacting the EMTs. */
592#define VMMEMTRENDEZVOUS_FLAGS_PRIORITY UINT32_C(0x00000010)
593/** The valid flags. */
594#define VMMEMTRENDEZVOUS_FLAGS_VALID_MASK UINT32_C(0x0000001f)
595/** @} */
596VMMR3_INT_DECL(int) VMMR3EmtRendezvousFF(PVM pVM, PVMCPU pVCpu);
597VMMR3_INT_DECL(void) VMMR3SetMayHaltInRing0(PVMCPU pVCpu, bool fMayHaltInRing0, uint32_t cNsSpinBlockThreshold);
598VMMR3_INT_DECL(int) VMMR3ReadR0Stack(PVM pVM, VMCPUID idCpu, RTHCUINTPTR R0Addr, void *pvBuf, size_t cbRead);
599VMMR3_INT_DECL(void) VMMR3InitR0StackUnwindState(PUVM pUVM, VMCPUID idCpu, PRTDBGUNWINDSTATE pState);
600/** @} */
601#endif /* IN_RING3 */
602
603
604#if defined(IN_RC) || defined(IN_RING0) || defined(DOXYGEN_RUNNING)
605/** @defgroup grp_vmm_api_rz The VMM Raw-Mode and Ring-0 Context API
606 * @{
607 */
608VMMRZDECL(int) VMMRZCallRing3(PVMCC pVMCC, PVMCPUCC pVCpu, VMMCALLRING3 enmOperation, uint64_t uArg);
609VMMRZDECL(int) VMMRZCallRing3NoCpu(PVMCC pVM, VMMCALLRING3 enmOperation, uint64_t uArg);
610VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPUCC pVCpu);
611VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPUCC pVCpu);
612VMMRZDECL(bool) VMMRZCallRing3IsEnabled(PVMCPUCC pVCpu);
613VMMRZDECL(int) VMMRZCallRing3SetNotification(PVMCPUCC pVCpu, R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallback, RTR0PTR pvUser);
614VMMRZDECL(void) VMMRZCallRing3RemoveNotification(PVMCPUCC pVCpu);
615VMMRZDECL(bool) VMMRZCallRing3IsNotificationSet(PVMCPUCC pVCpu);
616/** @} */
617#endif
618
619
620/** Wrapper around AssertReleaseMsgReturn that avoid tripping up in the
621 * kernel when we don't have a setjmp in place. */
622#ifdef IN_RING0
623# define VMM_ASSERT_RELEASE_MSG_RETURN(a_pVM, a_Expr, a_Msg, a_rc) do { \
624 if (RT_LIKELY(a_Expr)) { /* likely */ } \
625 else \
626 { \
627 PVMCPUCC pVCpuAssert = VMMGetCpu(a_pVM); \
628 if (pVCpuAssert && VMMR0IsLongJumpArmed(pVCpuAssert)) \
629 AssertReleaseMsg(a_Expr, a_Msg); \
630 else \
631 AssertLogRelMsg(a_Expr, a_Msg); \
632 return (a_rc); \
633 } \
634 } while (0)
635#else
636# define VMM_ASSERT_RELEASE_MSG_RETURN(a_pVM, a_Expr, a_Msg, a_rc) AssertReleaseMsgReturn(a_Expr, a_Msg, a_rc)
637#endif
638
639/** @} */
640
641/** @} */
642RT_C_DECLS_END
643
644#endif /* !VBOX_INCLUDED_vmm_vmm_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette