VirtualBox

source: vbox/trunk/src/VBox/VMM/HWACCMInternal.h@ 13885

Last change on this file since 13885 was 13885, checked in by vboxsync, 16 years ago

fix 32-bit builds

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 20.6 KB
Line 
1/* $Id: HWACCMInternal.h 13885 2008-11-05 17:41:57Z vboxsync $ */
2/** @file
3 * HWACCM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___HWACCMInternal_h
23#define ___HWACCMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/em.h>
28#include <VBox/stam.h>
29#include <VBox/dis.h>
30#include <VBox/hwaccm.h>
31#include <VBox/pgm.h>
32#include <VBox/cpum.h>
33#include <iprt/memobj.h>
34#include <iprt/cpuset.h>
35#include <iprt/mp.h>
36
37#if HC_ARCH_BITS == 64
38/* Enable 64 bits guest support. */
39# define VBOX_ENABLE_64_BITS_GUESTS
40#endif
41
42#define HWACCM_VMX_EMULATE_REALMODE
43#define HWACCM_VTX_WITH_EPT
44#define HWACCM_VTX_WITH_VPID
45
46__BEGIN_DECLS
47
48
49/** @defgroup grp_hwaccm_int Internal
50 * @ingroup grp_hwaccm
51 * @internal
52 * @{
53 */
54
55
56/** Maximum number of exit reason statistics counters. */
57#define MAX_EXITREASON_STAT 0x100
58#define MASK_EXITREASON_STAT 0xff
59
60/** @name Changed flags
61 * These flags are used to keep track of which important registers that
62 * have been changed since last they were reset.
63 * @{
64 */
65#define HWACCM_CHANGED_GUEST_FPU RT_BIT(0)
66#define HWACCM_CHANGED_GUEST_CR0 RT_BIT(1)
67#define HWACCM_CHANGED_GUEST_CR3 RT_BIT(2)
68#define HWACCM_CHANGED_GUEST_CR4 RT_BIT(3)
69#define HWACCM_CHANGED_GUEST_GDTR RT_BIT(4)
70#define HWACCM_CHANGED_GUEST_IDTR RT_BIT(5)
71#define HWACCM_CHANGED_GUEST_LDTR RT_BIT(6)
72#define HWACCM_CHANGED_GUEST_TR RT_BIT(7)
73#define HWACCM_CHANGED_GUEST_SYSENTER_MSR RT_BIT(8)
74#define HWACCM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)
75#define HWACCM_CHANGED_GUEST_DEBUG RT_BIT(10)
76#define HWACCM_CHANGED_HOST_CONTEXT RT_BIT(11)
77
78#define HWACCM_CHANGED_ALL ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
79 | HWACCM_CHANGED_GUEST_CR0 \
80 | HWACCM_CHANGED_GUEST_CR3 \
81 | HWACCM_CHANGED_GUEST_CR4 \
82 | HWACCM_CHANGED_GUEST_GDTR \
83 | HWACCM_CHANGED_GUEST_IDTR \
84 | HWACCM_CHANGED_GUEST_LDTR \
85 | HWACCM_CHANGED_GUEST_TR \
86 | HWACCM_CHANGED_GUEST_SYSENTER_MSR \
87 | HWACCM_CHANGED_GUEST_FPU \
88 | HWACCM_CHANGED_GUEST_DEBUG \
89 | HWACCM_CHANGED_HOST_CONTEXT)
90
91#define HWACCM_CHANGED_ALL_GUEST ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \
92 | HWACCM_CHANGED_GUEST_CR0 \
93 | HWACCM_CHANGED_GUEST_CR3 \
94 | HWACCM_CHANGED_GUEST_CR4 \
95 | HWACCM_CHANGED_GUEST_GDTR \
96 | HWACCM_CHANGED_GUEST_IDTR \
97 | HWACCM_CHANGED_GUEST_LDTR \
98 | HWACCM_CHANGED_GUEST_TR \
99 | HWACCM_CHANGED_GUEST_SYSENTER_MSR \
100 | HWACCM_CHANGED_GUEST_DEBUG \
101 | HWACCM_CHANGED_GUEST_FPU)
102
103/** @} */
104
105/** @name Intercepted traps
106 * Traps that need to be intercepted so we can correctly dispatch them to the guest if required.
107 * Currently #NM and #PF only
108 */
109#ifdef VBOX_STRICT
110#define HWACCM_VMX_TRAP_MASK RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_PF) | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_MF)
111#define HWACCM_SVM_TRAP_MASK HWACCM_VMX_TRAP_MASK
112#else
113#define HWACCM_VMX_TRAP_MASK RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_PF)
114#define HWACCM_SVM_TRAP_MASK RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_PF)
115#endif
116/* All exceptions have to be intercept in emulated real-mode (minues NM & PF as they are always intercepted. */
117#define HWACCM_VMX_TRAP_MASK_REALMODE RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_DF) | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) | RT_BIT(X86_XCPT_XF)
118/** @} */
119
120
121/** Maxium resume loops allowed in ring 0 (safety precaution) */
122#define HWACCM_MAX_RESUME_LOOPS 1024
123
124/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
125#define HWACCM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE
126/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
127#define HWACCM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1)
128/** Total guest mapped memory needed. */
129#define HWACCM_VTX_TOTAL_DEVHEAP_MEM (HWACCM_EPT_IDENTITY_PG_TABLE_SIZE + HWACCM_VTX_TSS_SIZE)
130
131/** HWACCM SSM version
132 */
133#define HWACCM_SSM_VERSION 3
134
135/* Per-cpu information. (host) */
136typedef struct
137{
138 RTCPUID idCpu;
139
140 RTR0MEMOBJ pMemObj;
141 /* Current ASID (AMD-V)/VPID (Intel) */
142 uint32_t uCurrentASID;
143 /* TLB flush count */
144 uint32_t cTLBFlushes;
145
146 /* Set the first time a cpu is used to make sure we start with a clean TLB. */
147 bool fFlushTLB;
148
149 /** Configured for VT-x or AMD-V. */
150 bool fConfigured;
151
152 /** In use by our code. (for power suspend) */
153 volatile bool fInUse;
154} HWACCM_CPUINFO;
155typedef HWACCM_CPUINFO *PHWACCM_CPUINFO;
156
157/* VT-x capability qword. */
158typedef union
159{
160 struct
161 {
162 uint32_t disallowed0;
163 uint32_t allowed1;
164 } n;
165 uint64_t u;
166} VMX_CAPABILITY;
167
168/**
169 * HWACCM VM Instance data.
170 * Changes to this must checked against the padding of the cfgm union in VM!
171 */
172typedef struct HWACCM
173{
174 /** Set when we've initialized VMX or SVM. */
175 bool fInitialized;
176 /** Set when we're using VMX/SVN at that moment. */
177 bool fActive;
178
179 /** Set when hardware acceleration is allowed. */
180 bool fAllowed;
181
182 /** Set if nested paging is enabled. */
183 bool fNestedPaging;
184
185 /** Set if nested paging is allowed. */
186 bool fAllowNestedPaging;
187 /** Set if VT-x VPID is allowed. */
188 bool fAllowVPID;
189
190 /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask
191 * naturally. */
192 bool padding[1];
193
194 /** And mask for copying register contents. */
195 uint64_t u64RegisterMask;
196
197 /** Maximum ASID allowed. */
198 RTUINT uMaxASID;
199
200#if HC_ARCH_BITS == 32
201 uint32_t Alignment0;
202#endif
203
204 struct
205 {
206 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */
207 bool fSupported;
208
209 /** Set when we've enabled VMX. */
210 bool fEnabled;
211
212 /** Set if VPID is supported. */
213 bool fVPID;
214
215 /** Virtual address of the TSS page used for real mode emulation. */
216 R3PTRTYPE(PVBOXTSS) pRealModeTSS;
217
218 /** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
219 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable;
220
221 /** R0 memory object for the virtual APIC mmio cache. */
222 RTR0MEMOBJ pMemObjAPIC;
223 /** Physical address of the virtual APIC mmio cache. */
224 RTHCPHYS pAPICPhys;
225 /** Virtual address of the virtual APIC mmio cache. */
226 R0PTRTYPE(uint8_t *) pAPIC;
227
228 /** R0 memory object for the MSR bitmap (1 page). */
229 RTR0MEMOBJ pMemObjMSRBitmap;
230 /** Physical address of the MSR bitmap (1 page). */
231 RTHCPHYS pMSRBitmapPhys;
232 /** Virtual address of the MSR bitmap (1 page). */
233 R0PTRTYPE(uint8_t *) pMSRBitmap;
234
235 /** R0 memory object for the MSR entry load page (guest MSRs). */
236 RTR0MEMOBJ pMemObjMSREntryLoad;
237 /** Physical address of the MSR entry load page (guest MSRs). */
238 RTHCPHYS pMSREntryLoadPhys;
239 /** Virtual address of the MSR entry load page (guest MSRs). */
240 R0PTRTYPE(uint8_t *) pMSREntryLoad;
241
242 /** R0 memory object for the MSR exit store page (guest MSRs). */
243 RTR0MEMOBJ pMemObjMSRExitStore;
244 /** Physical address of the MSR exit store page (guest MSRs). */
245 RTHCPHYS pMSRExitStorePhys;
246 /** Virtual address of the MSR exit store page (guest MSRs). */
247 R0PTRTYPE(uint8_t *) pMSRExitStore;
248
249 /** R0 memory object for the MSR exit load page (host MSRs). */
250 RTR0MEMOBJ pMemObjMSRExitLoad;
251 /** Physical address of the MSR exit load page (host MSRs). */
252 RTHCPHYS pMSRExitLoadPhys;
253 /** Virtual address of the MSR exit load page (host MSRs). */
254 R0PTRTYPE(uint8_t *) pMSRExitLoad;
255
256 /** Ring 0 handlers for VT-x. */
257 DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM, PVMCPU pVCpu));
258
259 /** Host CR4 value (set by ring-0 VMX init) */
260 uint64_t hostCR4;
261
262 /** VMX MSR values */
263 struct
264 {
265 uint64_t feature_ctrl;
266 uint64_t vmx_basic_info;
267 VMX_CAPABILITY vmx_pin_ctls;
268 VMX_CAPABILITY vmx_proc_ctls;
269 VMX_CAPABILITY vmx_proc_ctls2;
270 VMX_CAPABILITY vmx_exit;
271 VMX_CAPABILITY vmx_entry;
272 uint64_t vmx_misc;
273 uint64_t vmx_cr0_fixed0;
274 uint64_t vmx_cr0_fixed1;
275 uint64_t vmx_cr4_fixed0;
276 uint64_t vmx_cr4_fixed1;
277 uint64_t vmx_vmcs_enum;
278 uint64_t vmx_eptcaps;
279 } msr;
280
281 /* Last instruction error */
282 uint32_t ulLastInstrError;
283
284 /** The last known guest paging mode. */
285 PGMMODE enmCurrGuestMode;
286
287 /** Flush types for invept & invvpid; they depend on capabilities. */
288 VMX_FLUSH enmFlushPage;
289 VMX_FLUSH enmFlushContext;
290 } vmx;
291
292 struct
293 {
294 /** Set by the ring-0 driver to indicate SVM is supported by the CPU. */
295 bool fSupported;
296 /** Set when we've enabled SVM. */
297 bool fEnabled;
298 /** Set if erratum 170 affects the AMD cpu. */
299 bool fAlwaysFlushTLB;
300 /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask
301 * naturally. */
302 bool padding[1];
303
304 /** R0 memory object for the host VM control block (VMCB). */
305 RTR0MEMOBJ pMemObjVMCBHost;
306 /** Physical address of the host VM control block (VMCB). */
307 RTHCPHYS pVMCBHostPhys;
308 /** Virtual address of the host VM control block (VMCB). */
309 R0PTRTYPE(void *) pVMCBHost;
310
311 /** R0 memory object for the IO bitmap (12kb). */
312 RTR0MEMOBJ pMemObjIOBitmap;
313 /** Physical address of the IO bitmap (12kb). */
314 RTHCPHYS pIOBitmapPhys;
315 /** Virtual address of the IO bitmap. */
316 R0PTRTYPE(void *) pIOBitmap;
317
318 /** R0 memory object for the MSR bitmap (8kb). */
319 RTR0MEMOBJ pMemObjMSRBitmap;
320 /** Physical address of the MSR bitmap (8kb). */
321 RTHCPHYS pMSRBitmapPhys;
322 /** Virtual address of the MSR bitmap. */
323 R0PTRTYPE(void *) pMSRBitmap;
324
325 /** SVM revision. */
326 uint32_t u32Rev;
327
328 /** SVM feature bits from cpuid 0x8000000a */
329 uint32_t u32Features;
330 } svm;
331
332 struct
333 {
334 uint32_t u32AMDFeatureECX;
335 uint32_t u32AMDFeatureEDX;
336 } cpuid;
337
338 /** Saved error from detection */
339 int32_t lLastError;
340
341 /** HWACCMR0Init was run */
342 bool fHWACCMR0Init;
343
344 /** Currenty shadow paging mode. */
345 PGMMODE enmShadowMode;
346
347 /** Explicit alignment padding of StatEntry (32-bit g++ again). */
348 int32_t padding2;
349
350#ifdef VBOX_STRICT
351 /** The CPU ID of the CPU currently owning the VMCS. Set in
352 * HWACCMR0Enter and cleared in HWACCMR0Leave. */
353 RTCPUID idEnteredCpu;
354#else
355# if HC_ARCH_BITS == 32
356 RTCPUID Alignment2;
357# endif
358#endif
359
360 STAMPROFILEADV StatEntry;
361 STAMPROFILEADV StatExit;
362 STAMPROFILEADV StatInGC;
363
364 STAMCOUNTER StatIntInject;
365
366 STAMCOUNTER StatExitShadowNM;
367 STAMCOUNTER StatExitGuestNM;
368 STAMCOUNTER StatExitShadowPF;
369 STAMCOUNTER StatExitGuestPF;
370 STAMCOUNTER StatExitGuestUD;
371 STAMCOUNTER StatExitGuestSS;
372 STAMCOUNTER StatExitGuestNP;
373 STAMCOUNTER StatExitGuestGP;
374 STAMCOUNTER StatExitGuestDE;
375 STAMCOUNTER StatExitGuestDB;
376 STAMCOUNTER StatExitGuestMF;
377 STAMCOUNTER StatExitInvpg;
378 STAMCOUNTER StatExitInvd;
379 STAMCOUNTER StatExitCpuid;
380 STAMCOUNTER StatExitRdtsc;
381 STAMCOUNTER StatExitCRxWrite;
382 STAMCOUNTER StatExitCRxRead;
383 STAMCOUNTER StatExitDRxWrite;
384 STAMCOUNTER StatExitDRxRead;
385 STAMCOUNTER StatExitCLTS;
386 STAMCOUNTER StatExitLMSW;
387 STAMCOUNTER StatExitIOWrite;
388 STAMCOUNTER StatExitIORead;
389 STAMCOUNTER StatExitIOStringWrite;
390 STAMCOUNTER StatExitIOStringRead;
391 STAMCOUNTER StatExitIrqWindow;
392 STAMCOUNTER StatExitMaxResume;
393 STAMCOUNTER StatIntReinject;
394 STAMCOUNTER StatPendingHostIrq;
395
396 STAMCOUNTER StatFlushPageManual;
397 STAMCOUNTER StatFlushPhysPageManual;
398 STAMCOUNTER StatFlushTLBManual;
399 STAMCOUNTER StatFlushPageInvlpg;
400 STAMCOUNTER StatFlushTLBWorldSwitch;
401 STAMCOUNTER StatNoFlushTLBWorldSwitch;
402 STAMCOUNTER StatFlushTLBCRxChange;
403 STAMCOUNTER StatFlushASID;
404 STAMCOUNTER StatFlushTLBInvlpga;
405
406 STAMCOUNTER StatSwitchGuestIrq;
407 STAMCOUNTER StatSwitchToR3;
408
409 STAMCOUNTER StatTSCOffset;
410 STAMCOUNTER StatTSCIntercept;
411
412 STAMCOUNTER StatExitReasonNPF;
413 STAMCOUNTER StatDRxArmed;
414 STAMCOUNTER StatDRxContextSwitch;
415 STAMCOUNTER StatDRxIOCheck;
416
417
418 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
419 R0PTRTYPE(PSTAMCOUNTER) paStatExitReasonR0;
420} HWACCM;
421/** Pointer to HWACCM VM instance data. */
422typedef HWACCM *PHWACCM;
423
424/**
425 * HWACCM VMCPU Instance data.
426 */
427typedef struct HWACCMCPU
428{
429 /** Old style FPU reporting trap mask override performed (optimization) */
430 bool fFPUOldStyleOverride;
431
432 /** Set if we don't have to flush the TLB on VM entry. */
433 bool fResumeVM;
434
435 /** Set if we need to flush the TLB during the world switch. */
436 bool fForceTLBFlush;
437
438 /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask
439 * naturally. */
440 bool padding[1];
441
442 /** HWACCM_CHANGED_* flags. */
443 RTUINT fContextUseFlags;
444
445 /* Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
446 RTCPUID idLastCpu;
447
448 /* TLB flush count */
449 RTUINT cTLBFlushes;
450
451 /* Current ASID in use by the VM */
452 RTUINT uCurrentASID;
453
454 struct
455 {
456 /** R0 memory object for the VM control structure (VMCS). */
457 RTR0MEMOBJ pMemObjVMCS;
458 /** Physical address of the VM control structure (VMCS). */
459 RTHCPHYS pVMCSPhys;
460 /** Virtual address of the VM control structure (VMCS). */
461 R0PTRTYPE(void *) pVMCS;
462
463 /** Ring 0 handlers for VT-x. */
464 DECLR0CALLBACKMEMBER(int, pfnStartVM,(RTHCUINT fResume, PCPUMCTX pCtx));
465
466 /** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
467 uint64_t proc_ctls;
468
469 /** Current CR0 mask. */
470 uint64_t cr0_mask;
471 /** Current CR4 mask. */
472 uint64_t cr4_mask;
473
474 /** Current EPTP. */
475 RTHCPHYS GCPhysEPTP;
476
477 /** Real-mode emulation state. */
478 struct
479 {
480 X86EFLAGS eflags;
481 uint32_t fValid;
482 } RealMode;
483
484 struct
485 {
486 uint64_t u64VMCSPhys;
487 uint32_t ulVMCSRevision;
488 uint32_t ulLastInstrError;
489 uint32_t ulLastExitReason;
490 uint32_t padding;
491 } lasterror;
492
493 } vmx;
494
495 struct
496 {
497 /** R0 memory object for the VM control block (VMCB). */
498 RTR0MEMOBJ pMemObjVMCB;
499 /** Physical address of the VM control block (VMCB). */
500 RTHCPHYS pVMCBPhys;
501 /** Virtual address of the VM control block (VMCB). */
502 R0PTRTYPE(void *) pVMCB;
503
504 /** Ring 0 handlers for VT-x. */
505 DECLR0CALLBACKMEMBER(int, pfnVMRun,(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx));
506
507 } svm;
508
509#if HC_ARCH_BITS == 32
510 uint32_t Alignment;
511#endif
512
513 /** Event injection state. */
514 struct
515 {
516 uint32_t fPending;
517 uint32_t errCode;
518 uint64_t intInfo;
519 } Event;
520
521} HWACCMCPU;
522/** Pointer to HWACCM VM instance data. */
523typedef HWACCMCPU *PHWACCMCPU;
524
525
526#ifdef IN_RING0
527
528VMMR0DECL(PHWACCM_CPUINFO) HWACCMR0GetCurrentCpu();
529
530#ifdef VBOX_STRICT
531VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PCPUMCTX pCtx);
532VMMR0DECL(void) HWACCMR0DumpDescriptor(PX86DESCHC Desc, RTSEL Sel, const char *pszMsg);
533#else
534#define HWACCMDumpRegs(a, b) do { } while (0)
535#define HWACCMR0DumpDescriptor(a, b, c) do { } while (0)
536#endif
537
538/* Dummy callback handlers. */
539VMMR0DECL(int) HWACCMR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHWACCM_CPUINFO pCpu);
540VMMR0DECL(int) HWACCMR0DummyLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
541VMMR0DECL(int) HWACCMR0DummyEnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
542VMMR0DECL(int) HWACCMR0DummyDisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
543VMMR0DECL(int) HWACCMR0DummyInitVM(PVM pVM);
544VMMR0DECL(int) HWACCMR0DummyTermVM(PVM pVM);
545VMMR0DECL(int) HWACCMR0DummySetupVM(PVM pVM);
546VMMR0DECL(int) HWACCMR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
547VMMR0DECL(int) HWACCMR0DummySaveHostState(PVM pVM, PVMCPU pVCpu);
548VMMR0DECL(int) HWACCMR0DummyLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
549
550#endif /* IN_RING0 */
551
552/** @} */
553
554__END_DECLS
555
556#endif
557
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette