VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 47742

Last change on this file since 47742 was 47645, checked in by vboxsync, 11 years ago

VMM: RTThreadCtxHooksAre[Registered|Created](). Moved it into IN_RING0 as soon we'll require PFNRTTHREADCTXHOOK.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 58.4 KB
Line 
1/* $Id: VMMR0.cpp 47645 2013-08-09 13:47:38Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vmm/vm.h>
32#ifdef VBOX_WITH_PCI_PASSTHROUGH
33# include <VBox/vmm/pdmpci.h>
34#endif
35
36#include <VBox/vmm/gvmm.h>
37#include <VBox/vmm/gmm.h>
38#include <VBox/intnet.h>
39#include <VBox/vmm/hm.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44
45#include <iprt/asm-amd64-x86.h>
46#include <iprt/assert.h>
47#include <iprt/crc.h>
48#include <iprt/mp.h>
49#include <iprt/once.h>
50#include <iprt/stdarg.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53#include <iprt/timer.h>
54
55#include "dtrace/VBoxVMM.h"
56
57
58#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
59# pragma intrinsic(_AddressOfReturnAddress)
60#endif
61
62
63/*******************************************************************************
64* Internal Functions *
65*******************************************************************************/
66RT_C_DECLS_BEGIN
67#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
68extern uint64_t __udivdi3(uint64_t, uint64_t);
69extern uint64_t __umoddi3(uint64_t, uint64_t);
70#endif
71RT_C_DECLS_END
72
73
74/*******************************************************************************
75* Global Variables *
76*******************************************************************************/
77/** Drag in necessary library bits.
78 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
79PFNRT g_VMMGCDeps[] =
80{
81 (PFNRT)RTCrc32,
82 (PFNRT)RTOnce,
83#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
84 (PFNRT)__udivdi3,
85 (PFNRT)__umoddi3,
86#endif
87 NULL
88};
89
90#ifdef RT_OS_SOLARIS
91/* Dependency information for the native solaris loader. */
92extern "C" { char _depends_on[] = "vboxdrv"; }
93#endif
94
95
96
97/**
98 * Initialize the module.
99 * This is called when we're first loaded.
100 *
101 * @returns 0 on success.
102 * @returns VBox status on failure.
103 * @param hMod Image handle for use in APIs.
104 */
105DECLEXPORT(int) ModuleInit(void *hMod)
106{
107#ifdef VBOX_WITH_DTRACE_R0
108 /*
109 * The first thing to do is register the static tracepoints.
110 * (Deregistration is automatic.)
111 */
112 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
113 if (RT_FAILURE(rc2))
114 return rc2;
115#endif
116 LogFlow(("ModuleInit:\n"));
117
118 /*
119 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
120 */
121 int rc = vmmInitFormatTypes();
122 if (RT_SUCCESS(rc))
123 {
124 rc = GVMMR0Init();
125 if (RT_SUCCESS(rc))
126 {
127 rc = GMMR0Init();
128 if (RT_SUCCESS(rc))
129 {
130 rc = HMR0Init();
131 if (RT_SUCCESS(rc))
132 {
133 rc = PGMRegisterStringFormatTypes();
134 if (RT_SUCCESS(rc))
135 {
136#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
137 rc = PGMR0DynMapInit();
138#endif
139 if (RT_SUCCESS(rc))
140 {
141 rc = IntNetR0Init();
142 if (RT_SUCCESS(rc))
143 {
144#ifdef VBOX_WITH_PCI_PASSTHROUGH
145 rc = PciRawR0Init();
146#endif
147 if (RT_SUCCESS(rc))
148 {
149 rc = CPUMR0ModuleInit();
150 if (RT_SUCCESS(rc))
151 {
152#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
153 rc = vmmR0TripleFaultHackInit();
154 if (RT_SUCCESS(rc))
155#endif
156 {
157 LogFlow(("ModuleInit: returns success.\n"));
158 return VINF_SUCCESS;
159 }
160
161 /*
162 * Bail out.
163 */
164#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
165 vmmR0TripleFaultHackTerm();
166#endif
167 }
168 else
169 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
170#ifdef VBOX_WITH_PCI_PASSTHROUGH
171 PciRawR0Term();
172#endif
173 }
174 else
175 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
176 IntNetR0Term();
177 }
178 else
179 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
180#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
181 PGMR0DynMapTerm();
182#endif
183 }
184 else
185 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
186 PGMDeregisterStringFormatTypes();
187 }
188 else
189 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
190 HMR0Term();
191 }
192 else
193 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
194 GMMR0Term();
195 }
196 else
197 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
198 GVMMR0Term();
199 }
200 else
201 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
202 vmmTermFormatTypes();
203 }
204 else
205 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
206
207 LogFlow(("ModuleInit: failed %Rrc\n", rc));
208 return rc;
209}
210
211
212/**
213 * Terminate the module.
214 * This is called when we're finally unloaded.
215 *
216 * @param hMod Image handle for use in APIs.
217 */
218DECLEXPORT(void) ModuleTerm(void *hMod)
219{
220 LogFlow(("ModuleTerm:\n"));
221
222 /*
223 * Terminate the CPUM module (Local APIC cleanup).
224 */
225 CPUMR0ModuleTerm();
226
227 /*
228 * Terminate the internal network service.
229 */
230 IntNetR0Term();
231
232 /*
233 * PGM (Darwin), HM and PciRaw global cleanup.
234 */
235#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
236 PGMR0DynMapTerm();
237#endif
238#ifdef VBOX_WITH_PCI_PASSTHROUGH
239 PciRawR0Term();
240#endif
241 PGMDeregisterStringFormatTypes();
242 HMR0Term();
243#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
244 vmmR0TripleFaultHackTerm();
245#endif
246
247 /*
248 * Destroy the GMM and GVMM instances.
249 */
250 GMMR0Term();
251 GVMMR0Term();
252
253 vmmTermFormatTypes();
254
255 LogFlow(("ModuleTerm: returns\n"));
256}
257
258
259/**
260 * Initiates the R0 driver for a particular VM instance.
261 *
262 * @returns VBox status code.
263 *
264 * @param pVM Pointer to the VM.
265 * @param uSvnRev The SVN revision of the ring-3 part.
266 * @param uBuildType Build type indicator.
267 * @thread EMT.
268 */
269static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
270{
271 /*
272 * Match the SVN revisions and build type.
273 */
274 if (uSvnRev != VMMGetSvnRev())
275 {
276 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
277 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
278 return VERR_VMM_R0_VERSION_MISMATCH;
279 }
280 if (uBuildType != vmmGetBuildType())
281 {
282 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
283 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
284 return VERR_VMM_R0_VERSION_MISMATCH;
285 }
286 if ( !VALID_PTR(pVM)
287 || pVM->pVMR0 != pVM)
288 return VERR_INVALID_PARAMETER;
289
290
291#ifdef LOG_ENABLED
292 /*
293 * Register the EMT R0 logger instance for VCPU 0.
294 */
295 PVMCPU pVCpu = &pVM->aCpus[0];
296
297 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
298 if (pR0Logger)
299 {
300# if 0 /* testing of the logger. */
301 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
302 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
303 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
304 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
305
306 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
307 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
308 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
309 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
310
311 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
312 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
313 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
314 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
315
316 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
317 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
318 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
319 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
320 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
321 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
322
323 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
324 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
325
326 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
327 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
328 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
329# endif
330 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
331 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
332 pR0Logger->fRegistered = true;
333 }
334#endif /* LOG_ENABLED */
335
336 /*
337 * Check if the host supports high resolution timers or not.
338 */
339 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
340 && !RTTimerCanDoHighResolution())
341 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
342
343 /*
344 * Initialize the per VM data for GVMM and GMM.
345 */
346 int rc = GVMMR0InitVM(pVM);
347// if (RT_SUCCESS(rc))
348// rc = GMMR0InitPerVMData(pVM);
349 if (RT_SUCCESS(rc))
350 {
351 /*
352 * Init HM, CPUM and PGM (Darwin only).
353 */
354 rc = HMR0InitVM(pVM);
355 if (RT_SUCCESS(rc))
356 {
357 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
358 if (RT_SUCCESS(rc))
359 {
360#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
361 rc = PGMR0DynMapInitVM(pVM);
362#endif
363 if (RT_SUCCESS(rc))
364 {
365#ifdef VBOX_WITH_PCI_PASSTHROUGH
366 rc = PciRawR0InitVM(pVM);
367#endif
368 if (RT_SUCCESS(rc))
369 {
370 GVMMR0DoneInitVM(pVM);
371 return rc;
372 }
373 }
374
375 /* bail out */
376 }
377#ifdef VBOX_WITH_PCI_PASSTHROUGH
378 PciRawR0TermVM(pVM);
379#endif
380 HMR0TermVM(pVM);
381 }
382 }
383
384
385 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
386 return rc;
387}
388
389
390/**
391 * Terminates the R0 bits for a particular VM instance.
392 *
393 * This is normally called by ring-3 as part of the VM termination process, but
394 * may alternatively be called during the support driver session cleanup when
395 * the VM object is destroyed (see GVMM).
396 *
397 * @returns VBox status code.
398 *
399 * @param pVM Pointer to the VM.
400 * @param pGVM Pointer to the global VM structure. Optional.
401 * @thread EMT or session clean up thread.
402 */
403VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
404{
405#ifdef VBOX_WITH_PCI_PASSTHROUGH
406 PciRawR0TermVM(pVM);
407#endif
408
409
410 /*
411 * Tell GVMM what we're up to and check that we only do this once.
412 */
413 if (GVMMR0DoingTermVM(pVM, pGVM))
414 {
415 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
416 * here to make sure we don't leak any shared pages if we crash... */
417#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
418 PGMR0DynMapTermVM(pVM);
419#endif
420 HMR0TermVM(pVM);
421 }
422
423 /*
424 * Deregister the logger.
425 */
426 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
427 return VINF_SUCCESS;
428}
429
430
431/**
432 * Creates R0 thread-context hooks for the current EMT thread.
433 *
434 * @returns VBox status code.
435 *
436 * @param pVCpu Pointer to the VMCPU.
437 * @thread EMT.
438 */
439VMMR0DECL(int) VMMR0ThreadCtxHooksCreate(PVMCPU pVCpu)
440{
441 VMCPU_ASSERT_EMT(pVCpu);
442 Assert(pVCpu->vmm.s.hR0ThreadCtx == NIL_RTTHREADCTX);
443 int rc = RTThreadCtxHooksCreate(&pVCpu->vmm.s.hR0ThreadCtx);
444 if ( RT_SUCCESS(rc)
445 || rc == VERR_NOT_SUPPORTED)
446 {
447 return VINF_SUCCESS;
448 }
449
450 Log(("RTThreadCtxHooksCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
451 return rc;
452}
453
454
455/**
456 * Releases the object reference for the thread-context hook.
457 *
458 * @param pVCpu Pointer to the VMCPU.
459 * @remarks Can be called from any thread.
460 */
461VMMR0DECL(void) VMMR0ThreadCtxHooksRelease(PVMCPU pVCpu)
462{
463 RTThreadCtxHooksRelease(pVCpu->vmm.s.hR0ThreadCtx);
464}
465
466
467/**
468 * Whether thread-context hooks are created (implying they're supported) on this
469 * platform.
470 *
471 * @returns true if the hooks are created, false otherwise.
472 * @param pVCpu Pointer to the VMCPU.
473 *
474 * @remarks Can be called from any thread.
475 */
476VMMR0DECL(bool) VMMR0ThreadCtxHooksAreCreated(PVMCPU pVCpu)
477{
478 return pVCpu->vmm.s.hR0ThreadCtx != NIL_RTTHREADCTX;
479}
480
481
482/**
483 * Whether thread-context hooks are registered for this VCPU.
484 *
485 * @returns true if registered, false otherwise.
486 * @param pVCpu Pointer to the VMCPU.
487 */
488VMMR0DECL(bool) VMMR0ThreadCtxHooksAreRegistered(PVMCPU pVCpu)
489{
490 return RTThreadCtxHooksAreRegistered(pVCpu->vmm.s.hR0ThreadCtx);
491}
492
493
494#ifdef VBOX_WITH_STATISTICS
495/**
496 * Record return code statistics
497 * @param pVM Pointer to the VM.
498 * @param pVCpu Pointer to the VMCPU.
499 * @param rc The status code.
500 */
501static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
502{
503 /*
504 * Collect statistics.
505 */
506 switch (rc)
507 {
508 case VINF_SUCCESS:
509 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
510 break;
511 case VINF_EM_RAW_INTERRUPT:
512 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
513 break;
514 case VINF_EM_RAW_INTERRUPT_HYPER:
515 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
516 break;
517 case VINF_EM_RAW_GUEST_TRAP:
518 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
519 break;
520 case VINF_EM_RAW_RING_SWITCH:
521 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
522 break;
523 case VINF_EM_RAW_RING_SWITCH_INT:
524 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
525 break;
526 case VINF_EM_RAW_STALE_SELECTOR:
527 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
528 break;
529 case VINF_EM_RAW_IRET_TRAP:
530 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
531 break;
532 case VINF_IOM_R3_IOPORT_READ:
533 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
534 break;
535 case VINF_IOM_R3_IOPORT_WRITE:
536 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
537 break;
538 case VINF_IOM_R3_MMIO_READ:
539 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
540 break;
541 case VINF_IOM_R3_MMIO_WRITE:
542 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
543 break;
544 case VINF_IOM_R3_MMIO_READ_WRITE:
545 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
546 break;
547 case VINF_PATM_HC_MMIO_PATCH_READ:
548 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
549 break;
550 case VINF_PATM_HC_MMIO_PATCH_WRITE:
551 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
552 break;
553 case VINF_EM_RAW_EMULATE_INSTR:
554 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
555 break;
556 case VINF_EM_RAW_EMULATE_IO_BLOCK:
557 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
558 break;
559 case VINF_PATCH_EMULATE_INSTR:
560 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
561 break;
562 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
563 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
564 break;
565 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
566 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
567 break;
568 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
569 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
570 break;
571 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
572 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
573 break;
574 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
575 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
576 break;
577 case VINF_CSAM_PENDING_ACTION:
578 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
579 break;
580 case VINF_PGM_SYNC_CR3:
581 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
582 break;
583 case VINF_PATM_PATCH_INT3:
584 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
585 break;
586 case VINF_PATM_PATCH_TRAP_PF:
587 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
588 break;
589 case VINF_PATM_PATCH_TRAP_GP:
590 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
591 break;
592 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
593 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
594 break;
595 case VINF_EM_RESCHEDULE_REM:
596 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
597 break;
598 case VINF_EM_RAW_TO_R3:
599 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
600 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
601 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
602 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
603 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
604 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
605 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
606 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
607 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
608 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
609 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
610 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
611 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
612 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
613 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
614 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
615 else
616 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
617 break;
618
619 case VINF_EM_RAW_TIMER_PENDING:
620 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
621 break;
622 case VINF_EM_RAW_INTERRUPT_PENDING:
623 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
624 break;
625 case VINF_VMM_CALL_HOST:
626 switch (pVCpu->vmm.s.enmCallRing3Operation)
627 {
628 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
629 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
630 break;
631 case VMMCALLRING3_PDM_LOCK:
632 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
633 break;
634 case VMMCALLRING3_PGM_POOL_GROW:
635 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
636 break;
637 case VMMCALLRING3_PGM_LOCK:
638 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
639 break;
640 case VMMCALLRING3_PGM_MAP_CHUNK:
641 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
642 break;
643 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
644 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
645 break;
646 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
647 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
648 break;
649 case VMMCALLRING3_VMM_LOGGER_FLUSH:
650 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
651 break;
652 case VMMCALLRING3_VM_SET_ERROR:
653 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
654 break;
655 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
656 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
657 break;
658 case VMMCALLRING3_VM_R0_ASSERTION:
659 default:
660 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
661 break;
662 }
663 break;
664 case VINF_PATM_DUPLICATE_FUNCTION:
665 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
666 break;
667 case VINF_PGM_CHANGE_MODE:
668 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
669 break;
670 case VINF_PGM_POOL_FLUSH_PENDING:
671 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
672 break;
673 case VINF_EM_PENDING_REQUEST:
674 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
675 break;
676 case VINF_EM_HM_PATCH_TPR_INSTR:
677 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
678 break;
679 default:
680 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
681 break;
682 }
683}
684#endif /* VBOX_WITH_STATISTICS */
685
686
687/**
688 * Unused ring-0 entry point that used to be called from the interrupt gate.
689 *
690 * Will be removed one of the next times we do a major SUPDrv version bump.
691 *
692 * @returns VBox status code.
693 * @param pVM Pointer to the VM.
694 * @param enmOperation Which operation to execute.
695 * @param pvArg Argument to the operation.
696 * @remarks Assume called with interrupts disabled.
697 */
698VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
699{
700 /*
701 * We're returning VERR_NOT_SUPPORT here so we've got something else
702 * than -1 which the interrupt gate glue code might return.
703 */
704 Log(("operation %#x is not supported\n", enmOperation));
705 NOREF(enmOperation); NOREF(pvArg); NOREF(pVM);
706 return VERR_NOT_SUPPORTED;
707}
708
709
710/**
711 * The Ring 0 entry point, called by the fast-ioctl path.
712 *
713 * @param pVM Pointer to the VM.
714 * The return code is stored in pVM->vmm.s.iLastGZRc.
715 * @param idCpu The Virtual CPU ID of the calling EMT.
716 * @param enmOperation Which operation to execute.
717 * @remarks Assume called with interrupts _enabled_.
718 */
719VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
720{
721 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
722 return;
723 PVMCPU pVCpu = &pVM->aCpus[idCpu];
724
725 switch (enmOperation)
726 {
727 /*
728 * Switch to GC and run guest raw mode code.
729 * Disable interrupts before doing the world switch.
730 */
731 case VMMR0_DO_RAW_RUN:
732 {
733#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
734 /* Some safety precautions first. */
735 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
736 {
737 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
738 break;
739 }
740#endif
741
742 /* Disable preemption and update the periodic preemption timer. */
743 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
744 RTThreadPreemptDisable(&PreemptState);
745 RTCPUID idHostCpu = RTMpCpuId();
746#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
747 CPUMR0SetLApic(pVM, idHostCpu);
748#endif
749 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
750 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
751 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
752
753 /* We might need to disable VT-x if the active switcher turns off paging. */
754 bool fVTxDisabled;
755 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
756 if (RT_SUCCESS(rc))
757 {
758 RTCCUINTREG uFlags = ASMIntDisableFlags();
759
760 for (;;)
761 {
762 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
763 TMNotifyStartOfExecution(pVCpu);
764
765 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
766 pVCpu->vmm.s.iLastGZRc = rc;
767
768 TMNotifyEndOfExecution(pVCpu);
769 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
770
771 if (rc != VINF_VMM_CALL_TRACER)
772 break;
773 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
774 }
775
776 /* Re-enable VT-x if previously turned off. */
777 HMR0LeaveSwitcher(pVM, fVTxDisabled);
778
779 if ( rc == VINF_EM_RAW_INTERRUPT
780 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
781 TRPMR0DispatchHostInterrupt(pVM);
782
783 ASMSetFlags(uFlags);
784
785#ifdef VBOX_WITH_STATISTICS
786 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
787 vmmR0RecordRC(pVM, pVCpu, rc);
788#endif
789 }
790 else
791 pVCpu->vmm.s.iLastGZRc = rc;
792 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
793 RTThreadPreemptRestore(&PreemptState);
794 break;
795 }
796
797 /*
798 * Run guest code using the available hardware acceleration technology.
799 *
800 * Disable interrupts before we do anything interesting. On Windows we avoid
801 * this by having the support driver raise the IRQL before calling us, this way
802 * we hope to get away with page faults and later calling into the kernel.
803 */
804 case VMMR0_DO_HM_RUN:
805 {
806#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
807 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
808 RTThreadPreemptDisable(&PreemptState);
809#elif !defined(RT_OS_WINDOWS)
810 RTCCUINTREG uFlags = ASMIntDisableFlags();
811#endif
812 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
813 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
814 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
815
816#ifdef LOG_ENABLED
817 if (pVCpu->idCpu > 0)
818 {
819 /* Lazy registration of ring 0 loggers. */
820 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
821 if ( pR0Logger
822 && !pR0Logger->fRegistered)
823 {
824 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
825 pR0Logger->fRegistered = true;
826 }
827 }
828#endif
829 int rc;
830 if (!HMR0SuspendPending())
831 {
832 rc = HMR0Enter(pVM, pVCpu);
833 if (RT_SUCCESS(rc))
834 {
835 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
836
837 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
838 int rc2 = HMR0Leave(pVM, pVCpu);
839 AssertRC(rc2);
840
841 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
842 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
843 {
844 /* Manual assert as normal assertions are going to crash in this case. */
845 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
846 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
847 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
848 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
849 }
850 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
851 }
852 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
853 }
854 else
855 {
856 /* System is about to go into suspend mode; go back to ring 3. */
857 rc = VINF_EM_RAW_INTERRUPT;
858 }
859 pVCpu->vmm.s.iLastGZRc = rc;
860
861 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
862#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
863 RTThreadPreemptRestore(&PreemptState);
864#elif !defined(RT_OS_WINDOWS)
865 ASMSetFlags(uFlags);
866#endif
867
868#ifdef VBOX_WITH_STATISTICS
869 vmmR0RecordRC(pVM, pVCpu, rc);
870#endif
871 /* No special action required for external interrupts, just return. */
872 break;
873 }
874
875 /*
876 * For profiling.
877 */
878 case VMMR0_DO_NOP:
879 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
880 break;
881
882 /*
883 * Impossible.
884 */
885 default:
886 AssertMsgFailed(("%#x\n", enmOperation));
887 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
888 break;
889 }
890}
891
892
893/**
894 * Validates a session or VM session argument.
895 *
896 * @returns true / false accordingly.
897 * @param pVM Pointer to the VM.
898 * @param pSession The session argument.
899 */
900DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
901{
902 /* This must be set! */
903 if (!pSession)
904 return false;
905
906 /* Only one out of the two. */
907 if (pVM && pClaimedSession)
908 return false;
909 if (pVM)
910 pClaimedSession = pVM->pSession;
911 return pClaimedSession == pSession;
912}
913
914
915/**
916 * VMMR0EntryEx worker function, either called directly or when ever possible
917 * called thru a longjmp so we can exit safely on failure.
918 *
919 * @returns VBox status code.
920 * @param pVM Pointer to the VM.
921 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
922 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
923 * @param enmOperation Which operation to execute.
924 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
925 * The support driver validates this if it's present.
926 * @param u64Arg Some simple constant argument.
927 * @param pSession The session of the caller.
928 * @remarks Assume called with interrupts _enabled_.
929 */
930static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
931{
932 /*
933 * Common VM pointer validation.
934 */
935 if (pVM)
936 {
937 if (RT_UNLIKELY( !VALID_PTR(pVM)
938 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
939 {
940 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
941 return VERR_INVALID_POINTER;
942 }
943 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
944 || pVM->enmVMState > VMSTATE_TERMINATED
945 || pVM->pVMR0 != pVM))
946 {
947 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
948 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
949 return VERR_INVALID_POINTER;
950 }
951
952 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
953 {
954 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
955 return VERR_INVALID_PARAMETER;
956 }
957 }
958 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
959 {
960 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
961 return VERR_INVALID_PARAMETER;
962 }
963
964
965 switch (enmOperation)
966 {
967 /*
968 * GVM requests
969 */
970 case VMMR0_DO_GVMM_CREATE_VM:
971 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
972 return VERR_INVALID_PARAMETER;
973 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
974
975 case VMMR0_DO_GVMM_DESTROY_VM:
976 if (pReqHdr || u64Arg)
977 return VERR_INVALID_PARAMETER;
978 return GVMMR0DestroyVM(pVM);
979
980 case VMMR0_DO_GVMM_REGISTER_VMCPU:
981 {
982 if (!pVM)
983 return VERR_INVALID_PARAMETER;
984 return GVMMR0RegisterVCpu(pVM, idCpu);
985 }
986
987 case VMMR0_DO_GVMM_SCHED_HALT:
988 if (pReqHdr)
989 return VERR_INVALID_PARAMETER;
990 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
991
992 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
993 if (pReqHdr || u64Arg)
994 return VERR_INVALID_PARAMETER;
995 return GVMMR0SchedWakeUp(pVM, idCpu);
996
997 case VMMR0_DO_GVMM_SCHED_POKE:
998 if (pReqHdr || u64Arg)
999 return VERR_INVALID_PARAMETER;
1000 return GVMMR0SchedPoke(pVM, idCpu);
1001
1002 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1003 if (u64Arg)
1004 return VERR_INVALID_PARAMETER;
1005 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1006
1007 case VMMR0_DO_GVMM_SCHED_POLL:
1008 if (pReqHdr || u64Arg > 1)
1009 return VERR_INVALID_PARAMETER;
1010 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1011
1012 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1013 if (u64Arg)
1014 return VERR_INVALID_PARAMETER;
1015 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1016
1017 case VMMR0_DO_GVMM_RESET_STATISTICS:
1018 if (u64Arg)
1019 return VERR_INVALID_PARAMETER;
1020 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1021
1022 /*
1023 * Initialize the R0 part of a VM instance.
1024 */
1025 case VMMR0_DO_VMMR0_INIT:
1026 return vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1027
1028 /*
1029 * Terminate the R0 part of a VM instance.
1030 */
1031 case VMMR0_DO_VMMR0_TERM:
1032 return VMMR0TermVM(pVM, NULL);
1033
1034 /*
1035 * Attempt to enable hm mode and check the current setting.
1036 */
1037 case VMMR0_DO_HM_ENABLE:
1038 return HMR0EnableAllCpus(pVM);
1039
1040 /*
1041 * Setup the hardware accelerated session.
1042 */
1043 case VMMR0_DO_HM_SETUP_VM:
1044 return HMR0SetupVM(pVM);
1045
1046 /*
1047 * Switch to RC to execute Hypervisor function.
1048 */
1049 case VMMR0_DO_CALL_HYPERVISOR:
1050 {
1051 int rc;
1052 bool fVTxDisabled;
1053
1054#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1055 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
1056 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1057#endif
1058
1059 RTCCUINTREG fFlags = ASMIntDisableFlags();
1060
1061#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1062 RTCPUID idHostCpu = RTMpCpuId();
1063 CPUMR0SetLApic(pVM, idHostCpu);
1064#endif
1065
1066 /* We might need to disable VT-x if the active switcher turns off paging. */
1067 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1068 if (RT_FAILURE(rc))
1069 return rc;
1070
1071 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1072
1073 /* Re-enable VT-x if previously turned off. */
1074 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1075
1076 /** @todo dispatch interrupts? */
1077 ASMSetFlags(fFlags);
1078 return rc;
1079 }
1080
1081 /*
1082 * PGM wrappers.
1083 */
1084 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1085 if (idCpu == NIL_VMCPUID)
1086 return VERR_INVALID_CPU_ID;
1087 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1088
1089 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1090 if (idCpu == NIL_VMCPUID)
1091 return VERR_INVALID_CPU_ID;
1092 return PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1093
1094 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1095 if (idCpu == NIL_VMCPUID)
1096 return VERR_INVALID_CPU_ID;
1097 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1098
1099 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1100 if (idCpu != 0)
1101 return VERR_INVALID_CPU_ID;
1102 return PGMR0PhysSetupIommu(pVM);
1103
1104 /*
1105 * GMM wrappers.
1106 */
1107 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1108 if (u64Arg)
1109 return VERR_INVALID_PARAMETER;
1110 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1111
1112 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1113 if (u64Arg)
1114 return VERR_INVALID_PARAMETER;
1115 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1116
1117 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1118 if (u64Arg)
1119 return VERR_INVALID_PARAMETER;
1120 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1121
1122 case VMMR0_DO_GMM_FREE_PAGES:
1123 if (u64Arg)
1124 return VERR_INVALID_PARAMETER;
1125 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1126
1127 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1128 if (u64Arg)
1129 return VERR_INVALID_PARAMETER;
1130 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1131
1132 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1133 if (u64Arg)
1134 return VERR_INVALID_PARAMETER;
1135 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1136
1137 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1138 if (idCpu == NIL_VMCPUID)
1139 return VERR_INVALID_CPU_ID;
1140 if (u64Arg)
1141 return VERR_INVALID_PARAMETER;
1142 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1143
1144 case VMMR0_DO_GMM_BALLOONED_PAGES:
1145 if (u64Arg)
1146 return VERR_INVALID_PARAMETER;
1147 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1148
1149 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1150 if (u64Arg)
1151 return VERR_INVALID_PARAMETER;
1152 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1153
1154 case VMMR0_DO_GMM_SEED_CHUNK:
1155 if (pReqHdr)
1156 return VERR_INVALID_PARAMETER;
1157 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1158
1159 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1160 if (idCpu == NIL_VMCPUID)
1161 return VERR_INVALID_CPU_ID;
1162 if (u64Arg)
1163 return VERR_INVALID_PARAMETER;
1164 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1165
1166 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1167 if (idCpu == NIL_VMCPUID)
1168 return VERR_INVALID_CPU_ID;
1169 if (u64Arg)
1170 return VERR_INVALID_PARAMETER;
1171 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1172
1173 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1174 if (idCpu == NIL_VMCPUID)
1175 return VERR_INVALID_CPU_ID;
1176 if ( u64Arg
1177 || pReqHdr)
1178 return VERR_INVALID_PARAMETER;
1179 return GMMR0ResetSharedModules(pVM, idCpu);
1180
1181#ifdef VBOX_WITH_PAGE_SHARING
1182 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1183 {
1184 if (idCpu == NIL_VMCPUID)
1185 return VERR_INVALID_CPU_ID;
1186 if ( u64Arg
1187 || pReqHdr)
1188 return VERR_INVALID_PARAMETER;
1189
1190 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1191 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1192
1193# ifdef DEBUG_sandervl
1194 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1195 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1196 int rc = GMMR0CheckSharedModulesStart(pVM);
1197 if (rc == VINF_SUCCESS)
1198 {
1199 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1200 Assert( rc == VINF_SUCCESS
1201 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1202 GMMR0CheckSharedModulesEnd(pVM);
1203 }
1204# else
1205 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1206# endif
1207 return rc;
1208 }
1209#endif
1210
1211#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1212 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1213 if (u64Arg)
1214 return VERR_INVALID_PARAMETER;
1215 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1216#endif
1217
1218 case VMMR0_DO_GMM_QUERY_STATISTICS:
1219 if (u64Arg)
1220 return VERR_INVALID_PARAMETER;
1221 return GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1222
1223 case VMMR0_DO_GMM_RESET_STATISTICS:
1224 if (u64Arg)
1225 return VERR_INVALID_PARAMETER;
1226 return GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1227
1228 /*
1229 * A quick GCFGM mock-up.
1230 */
1231 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1232 case VMMR0_DO_GCFGM_SET_VALUE:
1233 case VMMR0_DO_GCFGM_QUERY_VALUE:
1234 {
1235 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1236 return VERR_INVALID_PARAMETER;
1237 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1238 if (pReq->Hdr.cbReq != sizeof(*pReq))
1239 return VERR_INVALID_PARAMETER;
1240 int rc;
1241 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1242 {
1243 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1244 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1245 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1246 }
1247 else
1248 {
1249 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1250 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1251 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1252 }
1253 return rc;
1254 }
1255
1256 /*
1257 * PDM Wrappers.
1258 */
1259 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1260 {
1261 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1262 return VERR_INVALID_PARAMETER;
1263 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1264 }
1265
1266 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1267 {
1268 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1269 return VERR_INVALID_PARAMETER;
1270 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1271 }
1272
1273 /*
1274 * Requests to the internal networking service.
1275 */
1276 case VMMR0_DO_INTNET_OPEN:
1277 {
1278 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1279 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1280 return VERR_INVALID_PARAMETER;
1281 return IntNetR0OpenReq(pSession, pReq);
1282 }
1283
1284 case VMMR0_DO_INTNET_IF_CLOSE:
1285 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1286 return VERR_INVALID_PARAMETER;
1287 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1288
1289 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1290 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1291 return VERR_INVALID_PARAMETER;
1292 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1293
1294 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1295 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1296 return VERR_INVALID_PARAMETER;
1297 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1298
1299 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1300 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1301 return VERR_INVALID_PARAMETER;
1302 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1303
1304 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1305 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1306 return VERR_INVALID_PARAMETER;
1307 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1308
1309 case VMMR0_DO_INTNET_IF_SEND:
1310 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1311 return VERR_INVALID_PARAMETER;
1312 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1313
1314 case VMMR0_DO_INTNET_IF_WAIT:
1315 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1316 return VERR_INVALID_PARAMETER;
1317 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1318
1319 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1320 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1321 return VERR_INVALID_PARAMETER;
1322 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1323
1324#ifdef VBOX_WITH_PCI_PASSTHROUGH
1325 /*
1326 * Requests to host PCI driver service.
1327 */
1328 case VMMR0_DO_PCIRAW_REQ:
1329 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1330 return VERR_INVALID_PARAMETER;
1331 return PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1332#endif
1333 /*
1334 * For profiling.
1335 */
1336 case VMMR0_DO_NOP:
1337 case VMMR0_DO_SLOW_NOP:
1338 return VINF_SUCCESS;
1339
1340 /*
1341 * For testing Ring-0 APIs invoked in this environment.
1342 */
1343 case VMMR0_DO_TESTS:
1344 /** @todo make new test */
1345 return VINF_SUCCESS;
1346
1347
1348#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1349 case VMMR0_DO_TEST_SWITCHER3264:
1350 if (idCpu == NIL_VMCPUID)
1351 return VERR_INVALID_CPU_ID;
1352 return HMR0TestSwitcher3264(pVM);
1353#endif
1354 default:
1355 /*
1356 * We're returning VERR_NOT_SUPPORT here so we've got something else
1357 * than -1 which the interrupt gate glue code might return.
1358 */
1359 Log(("operation %#x is not supported\n", enmOperation));
1360 return VERR_NOT_SUPPORTED;
1361 }
1362}
1363
1364
1365/**
1366 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1367 */
1368typedef struct VMMR0ENTRYEXARGS
1369{
1370 PVM pVM;
1371 VMCPUID idCpu;
1372 VMMR0OPERATION enmOperation;
1373 PSUPVMMR0REQHDR pReq;
1374 uint64_t u64Arg;
1375 PSUPDRVSESSION pSession;
1376} VMMR0ENTRYEXARGS;
1377/** Pointer to a vmmR0EntryExWrapper argument package. */
1378typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1379
1380/**
1381 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1382 *
1383 * @returns VBox status code.
1384 * @param pvArgs The argument package
1385 */
1386static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1387{
1388 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1389 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1390 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1391 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1392 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1393 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1394}
1395
1396
1397/**
1398 * The Ring 0 entry point, called by the support library (SUP).
1399 *
1400 * @returns VBox status code.
1401 * @param pVM Pointer to the VM.
1402 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1403 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1404 * @param enmOperation Which operation to execute.
1405 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1406 * @param u64Arg Some simple constant argument.
1407 * @param pSession The session of the caller.
1408 * @remarks Assume called with interrupts _enabled_.
1409 */
1410VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1411{
1412 /*
1413 * Requests that should only happen on the EMT thread will be
1414 * wrapped in a setjmp so we can assert without causing trouble.
1415 */
1416 if ( VALID_PTR(pVM)
1417 && pVM->pVMR0
1418 && idCpu < pVM->cCpus)
1419 {
1420 switch (enmOperation)
1421 {
1422 /* These might/will be called before VMMR3Init. */
1423 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1424 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1425 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1426 case VMMR0_DO_GMM_FREE_PAGES:
1427 case VMMR0_DO_GMM_BALLOONED_PAGES:
1428 /* On the mac we might not have a valid jmp buf, so check these as well. */
1429 case VMMR0_DO_VMMR0_INIT:
1430 case VMMR0_DO_VMMR0_TERM:
1431 {
1432 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1433
1434 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1435 break;
1436
1437 /** @todo validate this EMT claim... GVM knows. */
1438 VMMR0ENTRYEXARGS Args;
1439 Args.pVM = pVM;
1440 Args.idCpu = idCpu;
1441 Args.enmOperation = enmOperation;
1442 Args.pReq = pReq;
1443 Args.u64Arg = u64Arg;
1444 Args.pSession = pSession;
1445 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1446 }
1447
1448 default:
1449 break;
1450 }
1451 }
1452 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1453}
1454
1455
1456/**
1457 * Checks whether we've armed the ring-0 long jump machinery.
1458 *
1459 * @returns @c true / @c false
1460 * @param pVCpu The caller's cross context virtual CPU structure.
1461 * @thread EMT
1462 * @sa VMMIsLongJumpArmed
1463 */
1464VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
1465{
1466#ifdef RT_ARCH_X86
1467 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
1468 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1469#else
1470 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
1471 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1472#endif
1473}
1474
1475
1476/**
1477 * Internal R0 logger worker: Flush logger.
1478 *
1479 * @param pLogger The logger instance to flush.
1480 * @remark This function must be exported!
1481 */
1482VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1483{
1484#ifdef LOG_ENABLED
1485 /*
1486 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1487 * (This is a bit paranoid code.)
1488 */
1489 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1490 if ( !VALID_PTR(pR0Logger)
1491 || !VALID_PTR(pR0Logger + 1)
1492 || pLogger->u32Magic != RTLOGGER_MAGIC)
1493 {
1494# ifdef DEBUG
1495 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1496# endif
1497 return;
1498 }
1499 if (pR0Logger->fFlushingDisabled)
1500 return; /* quietly */
1501
1502 PVM pVM = pR0Logger->pVM;
1503 if ( !VALID_PTR(pVM)
1504 || pVM->pVMR0 != pVM)
1505 {
1506# ifdef DEBUG
1507 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1508# endif
1509 return;
1510 }
1511
1512 PVMCPU pVCpu = VMMGetCpu(pVM);
1513 if (pVCpu)
1514 {
1515 /*
1516 * Check that the jump buffer is armed.
1517 */
1518# ifdef RT_ARCH_X86
1519 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1520 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1521# else
1522 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1523 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1524# endif
1525 {
1526# ifdef DEBUG
1527 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1528# endif
1529 return;
1530 }
1531 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1532 }
1533# ifdef DEBUG
1534 else
1535 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1536# endif
1537#endif
1538}
1539
1540/**
1541 * Internal R0 logger worker: Custom prefix.
1542 *
1543 * @returns Number of chars written.
1544 *
1545 * @param pLogger The logger instance.
1546 * @param pchBuf The output buffer.
1547 * @param cchBuf The size of the buffer.
1548 * @param pvUser User argument (ignored).
1549 */
1550VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1551{
1552 NOREF(pvUser);
1553#ifdef LOG_ENABLED
1554 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1555 if ( !VALID_PTR(pR0Logger)
1556 || !VALID_PTR(pR0Logger + 1)
1557 || pLogger->u32Magic != RTLOGGER_MAGIC
1558 || cchBuf < 2)
1559 return 0;
1560
1561 static const char s_szHex[17] = "0123456789abcdef";
1562 VMCPUID const idCpu = pR0Logger->idCpu;
1563 pchBuf[1] = s_szHex[ idCpu & 15];
1564 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1565
1566 return 2;
1567#else
1568 return 0;
1569#endif
1570}
1571
1572#ifdef LOG_ENABLED
1573
1574/**
1575 * Disables flushing of the ring-0 debug log.
1576 *
1577 * @param pVCpu Pointer to the VMCPU.
1578 */
1579VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1580{
1581 if (pVCpu->vmm.s.pR0LoggerR0)
1582 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1583}
1584
1585
1586/**
1587 * Enables flushing of the ring-0 debug log.
1588 *
1589 * @param pVCpu Pointer to the VMCPU.
1590 */
1591VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1592{
1593 if (pVCpu->vmm.s.pR0LoggerR0)
1594 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1595}
1596
1597
1598/**
1599 * Checks if log flushing is disabled or not.
1600 *
1601 * @param pVCpu Pointer to the VMCPU.
1602 */
1603VMMR0DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
1604{
1605 if (pVCpu->vmm.s.pR0LoggerR0)
1606 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
1607 return true;
1608}
1609#endif /* LOG_ENABLED */
1610
1611/**
1612 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1613 *
1614 * @returns true if the breakpoint should be hit, false if it should be ignored.
1615 */
1616DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1617{
1618#if 0
1619 return true;
1620#else
1621 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1622 if (pVM)
1623 {
1624 PVMCPU pVCpu = VMMGetCpu(pVM);
1625
1626 if (pVCpu)
1627 {
1628#ifdef RT_ARCH_X86
1629 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1630 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1631#else
1632 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1633 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1634#endif
1635 {
1636 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1637 return RT_FAILURE_NP(rc);
1638 }
1639 }
1640 }
1641#ifdef RT_OS_LINUX
1642 return true;
1643#else
1644 return false;
1645#endif
1646#endif
1647}
1648
1649
1650/**
1651 * Override this so we can push it up to ring-3.
1652 *
1653 * @param pszExpr Expression. Can be NULL.
1654 * @param uLine Location line number.
1655 * @param pszFile Location file name.
1656 * @param pszFunction Location function name.
1657 */
1658DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1659{
1660 /*
1661 * To the log.
1662 */
1663 LogAlways(("\n!!R0-Assertion Failed!!\n"
1664 "Expression: %s\n"
1665 "Location : %s(%d) %s\n",
1666 pszExpr, pszFile, uLine, pszFunction));
1667
1668 /*
1669 * To the global VMM buffer.
1670 */
1671 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1672 if (pVM)
1673 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1674 "\n!!R0-Assertion Failed!!\n"
1675 "Expression: %s\n"
1676 "Location : %s(%d) %s\n",
1677 pszExpr, pszFile, uLine, pszFunction);
1678
1679 /*
1680 * Continue the normal way.
1681 */
1682 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1683}
1684
1685
1686/**
1687 * Callback for RTLogFormatV which writes to the ring-3 log port.
1688 * See PFNLOGOUTPUT() for details.
1689 */
1690static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1691{
1692 for (size_t i = 0; i < cbChars; i++)
1693 LogAlways(("%c", pachChars[i]));
1694
1695 NOREF(pv);
1696 return cbChars;
1697}
1698
1699
1700/**
1701 * Override this so we can push it up to ring-3.
1702 *
1703 * @param pszFormat The format string.
1704 * @param va Arguments.
1705 */
1706DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1707{
1708 va_list vaCopy;
1709
1710 /*
1711 * Push the message to the loggers.
1712 */
1713 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
1714 if (pLog)
1715 {
1716 va_copy(vaCopy, va);
1717 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1718 va_end(vaCopy);
1719 }
1720 pLog = RTLogRelDefaultInstance();
1721 if (pLog)
1722 {
1723 va_copy(vaCopy, va);
1724 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1725 va_end(vaCopy);
1726 }
1727
1728 /*
1729 * Push it to the global VMM buffer.
1730 */
1731 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1732 if (pVM)
1733 {
1734 va_copy(vaCopy, va);
1735 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1736 va_end(vaCopy);
1737 }
1738
1739 /*
1740 * Continue the normal way.
1741 */
1742 RTAssertMsg2V(pszFormat, va);
1743}
1744
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette