VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 17212

Last change on this file since 17212 was 16790, checked in by vboxsync, 16 years ago

VBOX_WITH_PGMPOOL_PAGING_ONLY: paranoid check

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.1 KB
Line 
1/* $Id: VMMR0.cpp 16790 2009-02-16 14:05:10Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VMM
26#include <VBox/vmm.h>
27#include <VBox/sup.h>
28#include <VBox/trpm.h>
29#include <VBox/cpum.h>
30#include <VBox/pgm.h>
31#include <VBox/stam.h>
32#include <VBox/tm.h>
33#include "VMMInternal.h"
34#include <VBox/vm.h>
35#include <VBox/gvmm.h>
36#include <VBox/gmm.h>
37#include <VBox/intnet.h>
38#include <VBox/hwaccm.h>
39#include <VBox/param.h>
40
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45#include <iprt/stdarg.h>
46#include <iprt/mp.h>
47#include <iprt/string.h>
48
49#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
50# pragma intrinsic(_AddressOfReturnAddress)
51#endif
52
53
54/*******************************************************************************
55* Internal Functions *
56*******************************************************************************/
57__BEGIN_DECLS
58VMMR0DECL(int) ModuleInit(void);
59VMMR0DECL(void) ModuleTerm(void);
60__END_DECLS
61
62
63/*******************************************************************************
64* Global Variables *
65*******************************************************************************/
66/** Pointer to the internal networking service instance. */
67PINTNET g_pIntNet = 0;
68
69
70/**
71 * Initialize the module.
72 * This is called when we're first loaded.
73 *
74 * @returns 0 on success.
75 * @returns VBox status on failure.
76 */
77VMMR0DECL(int) ModuleInit(void)
78{
79 LogFlow(("ModuleInit:\n"));
80
81 /*
82 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
83 */
84 int rc = GVMMR0Init();
85 if (RT_SUCCESS(rc))
86 {
87 rc = GMMR0Init();
88 if (RT_SUCCESS(rc))
89 {
90 rc = HWACCMR0Init();
91 if (RT_SUCCESS(rc))
92 {
93#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
94 rc = PGMR0DynMapInit();
95#endif
96 if (RT_SUCCESS(rc))
97 {
98 LogFlow(("ModuleInit: g_pIntNet=%p\n", g_pIntNet));
99 g_pIntNet = NULL;
100 LogFlow(("ModuleInit: g_pIntNet=%p should be NULL now...\n", g_pIntNet));
101 rc = INTNETR0Create(&g_pIntNet);
102 if (RT_SUCCESS(rc))
103 {
104 LogFlow(("ModuleInit: returns success. g_pIntNet=%p\n", g_pIntNet));
105 return VINF_SUCCESS;
106 }
107
108 /* bail out */
109 g_pIntNet = NULL;
110 LogFlow(("ModuleTerm: returns %Rrc\n", rc));
111#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
112 PGMR0DynMapTerm();
113#endif
114 }
115 HWACCMR0Term();
116 }
117 GMMR0Term();
118 }
119 GVMMR0Term();
120 }
121
122 LogFlow(("ModuleInit: failed %Rrc\n", rc));
123 return rc;
124}
125
126
127/**
128 * Terminate the module.
129 * This is called when we're finally unloaded.
130 */
131VMMR0DECL(void) ModuleTerm(void)
132{
133 LogFlow(("ModuleTerm:\n"));
134
135 /*
136 * Destroy the internal networking instance.
137 */
138 if (g_pIntNet)
139 {
140 INTNETR0Destroy(g_pIntNet);
141 g_pIntNet = NULL;
142 }
143
144 /*
145 * PGM (Darwin) and HWACCM global cleanup.
146 * Destroy the GMM and GVMM instances.
147 */
148#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
149 PGMR0DynMapTerm();
150#endif
151 HWACCMR0Term();
152
153 GMMR0Term();
154 GVMMR0Term();
155
156 LogFlow(("ModuleTerm: returns\n"));
157}
158
159
160/**
161 * Initaties the R0 driver for a particular VM instance.
162 *
163 * @returns VBox status code.
164 *
165 * @param pVM The VM instance in question.
166 * @param uSvnRev The SVN revision of the ring-3 part.
167 * @thread EMT.
168 */
169static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
170{
171 /*
172 * Match the SVN revisions.
173 */
174 if (uSvnRev != VMMGetSvnRev())
175 return VERR_VERSION_MISMATCH;
176 if ( !VALID_PTR(pVM)
177 || pVM->pVMR0 != pVM)
178 return VERR_INVALID_PARAMETER;
179
180 /*
181 * Register the EMT R0 logger instance.
182 */
183 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0LoggerR0;
184 if (pR0Logger)
185 {
186#if 0 /* testing of the logger. */
187 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
188 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
189 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
190 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
191
192 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
193 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
194 RTLogSetDefaultInstanceThread(NULL, 0);
195 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
196
197 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
198 LogCom(("vmmR0InitVM: returned succesfully from direct logger call.\n"));
199 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
200 LogCom(("vmmR0InitVM: returned succesfully from direct flush call.\n"));
201
202 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
203 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
204 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
205 LogCom(("vmmR0InitVM: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
206 RTLogSetDefaultInstanceThread(NULL, 0);
207 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
208
209 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
210 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
211
212 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
213 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
214 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
215#endif
216 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
217 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
218 }
219
220 /*
221 * Initialize the per VM data for GVMM and GMM.
222 */
223 int rc = GVMMR0InitVM(pVM);
224// if (RT_SUCCESS(rc))
225// rc = GMMR0InitPerVMData(pVM);
226 if (RT_SUCCESS(rc))
227 {
228 /*
229 * Init HWACCM, CPUM and PGM (Darwin only).
230 */
231 rc = HWACCMR0InitVM(pVM);
232 if (RT_SUCCESS(rc))
233 {
234 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
235 if (RT_SUCCESS(rc))
236 {
237#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
238 rc = PGMR0DynMapInitVM(pVM);
239#endif
240 if (RT_SUCCESS(rc))
241 {
242 GVMMR0DoneInitVM(pVM);
243 return rc;
244 }
245
246 /* bail out */
247 }
248 HWACCMR0TermVM(pVM);
249 }
250 }
251 RTLogSetDefaultInstanceThread(NULL, 0);
252 return rc;
253}
254
255
256/**
257 * Terminates the R0 driver for a particular VM instance.
258 *
259 * This is normally called by ring-3 as part of the VM termination process, but
260 * may alternatively be called during the support driver session cleanup when
261 * the VM object is destroyed (see GVMM).
262 *
263 * @returns VBox status code.
264 *
265 * @param pVM The VM instance in question.
266 * @param pGVM Pointer to the global VM structure. Optional.
267 * @thread EMT or session clean up thread.
268 */
269VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
270{
271 /*
272 * Tell GVMM what we're up to and check that we only do this once.
273 */
274 if (GVMMR0DoingTermVM(pVM, pGVM))
275 {
276#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
277 PGMR0DynMapTermVM(pVM);
278#endif
279 HWACCMR0TermVM(pVM);
280 }
281
282 /*
283 * Deregister the logger.
284 */
285 RTLogSetDefaultInstanceThread(NULL, 0);
286 return VINF_SUCCESS;
287}
288
289
290/**
291 * Calls the ring-3 host code.
292 *
293 * @returns VBox status code of the ring-3 call.
294 * @param pVM The VM handle.
295 * @param enmOperation The operation.
296 * @param uArg The argument to the operation.
297 */
298VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg)
299{
300/** @todo profile this! */
301 pVM->vmm.s.enmCallHostOperation = enmOperation;
302 pVM->vmm.s.u64CallHostArg = uArg;
303 pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
304 int rc = vmmR0CallHostLongJmp(&pVM->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);
305 if (rc == VINF_SUCCESS)
306 rc = pVM->vmm.s.rcCallHost;
307 return rc;
308}
309
310
311#ifdef VBOX_WITH_STATISTICS
312/**
313 * Record return code statistics
314 * @param pVM The VM handle.
315 * @param rc The status code.
316 */
317static void vmmR0RecordRC(PVM pVM, int rc)
318{
319 /*
320 * Collect statistics.
321 */
322 switch (rc)
323 {
324 case VINF_SUCCESS:
325 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
326 break;
327 case VINF_EM_RAW_INTERRUPT:
328 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
329 break;
330 case VINF_EM_RAW_INTERRUPT_HYPER:
331 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
332 break;
333 case VINF_EM_RAW_GUEST_TRAP:
334 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
335 break;
336 case VINF_EM_RAW_RING_SWITCH:
337 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
338 break;
339 case VINF_EM_RAW_RING_SWITCH_INT:
340 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
341 break;
342 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
343 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetExceptionPrivilege);
344 break;
345 case VINF_EM_RAW_STALE_SELECTOR:
346 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
347 break;
348 case VINF_EM_RAW_IRET_TRAP:
349 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
350 break;
351 case VINF_IOM_HC_IOPORT_READ:
352 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
353 break;
354 case VINF_IOM_HC_IOPORT_WRITE:
355 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
356 break;
357 case VINF_IOM_HC_MMIO_READ:
358 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
359 break;
360 case VINF_IOM_HC_MMIO_WRITE:
361 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
362 break;
363 case VINF_IOM_HC_MMIO_READ_WRITE:
364 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
365 break;
366 case VINF_PATM_HC_MMIO_PATCH_READ:
367 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
368 break;
369 case VINF_PATM_HC_MMIO_PATCH_WRITE:
370 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
371 break;
372 case VINF_EM_RAW_EMULATE_INSTR:
373 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
374 break;
375 case VINF_PATCH_EMULATE_INSTR:
376 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
377 break;
378 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
379 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
380 break;
381 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
382 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
383 break;
384 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
385 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
386 break;
387 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
388 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
389 break;
390 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
391 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
392 break;
393 case VINF_CSAM_PENDING_ACTION:
394 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
395 break;
396 case VINF_PGM_SYNC_CR3:
397 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
398 break;
399 case VINF_PATM_PATCH_INT3:
400 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
401 break;
402 case VINF_PATM_PATCH_TRAP_PF:
403 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
404 break;
405 case VINF_PATM_PATCH_TRAP_GP:
406 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
407 break;
408 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
409 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
410 break;
411 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
412 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPageOverflow);
413 break;
414 case VINF_EM_RESCHEDULE_REM:
415 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
416 break;
417 case VINF_EM_RAW_TO_R3:
418 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
419 break;
420 case VINF_EM_RAW_TIMER_PENDING:
421 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
422 break;
423 case VINF_EM_RAW_INTERRUPT_PENDING:
424 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
425 break;
426 case VINF_VMM_CALL_HOST:
427 switch (pVM->vmm.s.enmCallHostOperation)
428 {
429 case VMMCALLHOST_PDM_LOCK:
430 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
431 break;
432 case VMMCALLHOST_PDM_QUEUE_FLUSH:
433 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMQueueFlush);
434 break;
435 case VMMCALLHOST_PGM_POOL_GROW:
436 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
437 break;
438 case VMMCALLHOST_PGM_LOCK:
439 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
440 break;
441 case VMMCALLHOST_PGM_MAP_CHUNK:
442 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
443 break;
444 case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
445 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
446 break;
447#ifndef VBOX_WITH_NEW_PHYS_CODE
448 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
449 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMGrowRAM);
450 break;
451#endif
452 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
453 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
454 break;
455 case VMMCALLHOST_VMM_LOGGER_FLUSH:
456 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
457 break;
458 case VMMCALLHOST_VM_SET_ERROR:
459 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
460 break;
461 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
462 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
463 break;
464 case VMMCALLHOST_VM_R0_ASSERTION:
465 default:
466 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallHost);
467 break;
468 }
469 break;
470 case VINF_PATM_DUPLICATE_FUNCTION:
471 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
472 break;
473 case VINF_PGM_CHANGE_MODE:
474 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
475 break;
476 case VINF_EM_RAW_EMULATE_INSTR_HLT:
477 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulHlt);
478 break;
479 case VINF_EM_PENDING_REQUEST:
480 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
481 break;
482 default:
483 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
484 break;
485 }
486}
487#endif /* VBOX_WITH_STATISTICS */
488
489
490/**
491 * Unused ring-0 entry point that used to be called from the interrupt gate.
492 *
493 * Will be removed one of the next times we do a major SUPDrv version bump.
494 *
495 * @returns VBox status code.
496 * @param pVM The VM to operate on.
497 * @param enmOperation Which operation to execute.
498 * @param pvArg Argument to the operation.
499 * @remarks Assume called with interrupts disabled.
500 */
501VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
502{
503 switch (enmOperation)
504 {
505 default:
506 /*
507 * We're returning VERR_NOT_SUPPORT here so we've got something else
508 * than -1 which the interrupt gate glue code might return.
509 */
510 Log(("operation %#x is not supported\n", enmOperation));
511 return VERR_NOT_SUPPORTED;
512 }
513}
514
515
516/**
517 * The Ring 0 entry point, called by the fast-ioctl path.
518 *
519 * @param pVM The VM to operate on.
520 * The return code is stored in pVM->vmm.s.iLastGZRc.
521 * @param idCpu VMCPU id.
522 * @param enmOperation Which operation to execute.
523 * @remarks Assume called with interrupts _enabled_.
524 */
525VMMR0DECL(void) VMMR0EntryFast(PVM pVM, unsigned idCpu, VMMR0OPERATION enmOperation)
526{
527 if (RT_UNLIKELY(idCpu >= pVM->cCPUs))
528 {
529 pVM->vmm.s.iLastGZRc = VERR_INVALID_PARAMETER;
530 return;
531 }
532
533 switch (enmOperation)
534 {
535 /*
536 * Switch to GC and run guest raw mode code.
537 * Disable interrupts before doing the world switch.
538 */
539 case VMMR0_DO_RAW_RUN:
540 {
541 /* Safety precaution as hwaccm disables the switcher. */
542 if (RT_LIKELY(!pVM->vmm.s.fSwitcherDisabled))
543 {
544 RTCCUINTREG uFlags = ASMIntDisableFlags();
545
546#ifdef VBOX_STRICT
547 if (RT_UNLIKELY(!PGMGetHyperCR3(pVM)))
548 {
549 pVM->vmm.s.iLastGZRc = VERR_ACCESS_DENIED;
550 return;
551 }
552#endif
553
554 TMNotifyStartOfExecution(pVM);
555 int rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
556 pVM->vmm.s.iLastGZRc = rc;
557 TMNotifyEndOfExecution(pVM);
558
559 if ( rc == VINF_EM_RAW_INTERRUPT
560 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
561 TRPMR0DispatchHostInterrupt(pVM);
562
563 ASMSetFlags(uFlags);
564
565#ifdef VBOX_WITH_STATISTICS
566 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
567 vmmR0RecordRC(pVM, rc);
568#endif
569 }
570 else
571 {
572 Assert(!pVM->vmm.s.fSwitcherDisabled);
573 pVM->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
574 }
575 break;
576 }
577
578 /*
579 * Run guest code using the available hardware acceleration technology.
580 *
581 * Disable interrupts before we do anything interesting. On Windows we avoid
582 * this by having the support driver raise the IRQL before calling us, this way
583 * we hope to get away with page faults and later calling into the kernel.
584 */
585 case VMMR0_DO_HWACC_RUN:
586 {
587 int rc;
588 PVMCPU pVCpu = &pVM->aCpus[idCpu];
589
590 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
591
592#ifndef RT_OS_WINDOWS /** @todo check other hosts */
593 RTCCUINTREG uFlags = ASMIntDisableFlags();
594#endif
595 if (!HWACCMR0SuspendPending())
596 {
597 rc = HWACCMR0Enter(pVM, pVCpu);
598 if (RT_SUCCESS(rc))
599 {
600 rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
601 int rc2 = HWACCMR0Leave(pVM, pVCpu);
602 AssertRC(rc2);
603 }
604 }
605 else
606 {
607 /* System is about to go into suspend mode; go back to ring 3. */
608 rc = VINF_EM_RAW_INTERRUPT;
609 }
610 pVM->vmm.s.iLastGZRc = rc;
611#ifndef RT_OS_WINDOWS /** @todo check other hosts */
612 ASMSetFlags(uFlags);
613#endif
614
615#ifdef VBOX_WITH_STATISTICS
616 vmmR0RecordRC(pVM, rc);
617#endif
618 /* No special action required for external interrupts, just return. */
619 break;
620 }
621
622 /*
623 * For profiling.
624 */
625 case VMMR0_DO_NOP:
626 pVM->vmm.s.iLastGZRc = VINF_SUCCESS;
627 break;
628
629 /*
630 * Impossible.
631 */
632 default:
633 AssertMsgFailed(("%#x\n", enmOperation));
634 pVM->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
635 break;
636 }
637}
638
639
640/**
641 * Validates a session or VM session argument.
642 *
643 * @returns true / false accordingly.
644 * @param pVM The VM argument.
645 * @param pSession The session argument.
646 */
647DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
648{
649 /* This must be set! */
650 if (!pSession)
651 return false;
652
653 /* Only one out of the two. */
654 if (pVM && pClaimedSession)
655 return false;
656 if (pVM)
657 pClaimedSession = pVM->pSession;
658 return pClaimedSession == pSession;
659}
660
661
662/**
663 * VMMR0EntryEx worker function, either called directly or when ever possible
664 * called thru a longjmp so we can exit safely on failure.
665 *
666 * @returns VBox status code.
667 * @param pVM The VM to operate on.
668 * @param enmOperation Which operation to execute.
669 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
670 * The support driver validates this if it's present.
671 * @param u64Arg Some simple constant argument.
672 * @param pSession The session of the caller.
673 * @remarks Assume called with interrupts _enabled_.
674 */
675static int vmmR0EntryExWorker(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
676{
677 /*
678 * Common VM pointer validation.
679 */
680 if (pVM)
681 {
682 if (RT_UNLIKELY( !VALID_PTR(pVM)
683 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
684 {
685 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
686 return VERR_INVALID_POINTER;
687 }
688 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
689 || pVM->enmVMState > VMSTATE_TERMINATED
690 || pVM->pVMR0 != pVM))
691 {
692 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
693 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
694 return VERR_INVALID_POINTER;
695 }
696 }
697
698 switch (enmOperation)
699 {
700 /*
701 * GVM requests
702 */
703 case VMMR0_DO_GVMM_CREATE_VM:
704 if (pVM || u64Arg)
705 return VERR_INVALID_PARAMETER;
706 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
707
708 case VMMR0_DO_GVMM_DESTROY_VM:
709 if (pReqHdr || u64Arg)
710 return VERR_INVALID_PARAMETER;
711 return GVMMR0DestroyVM(pVM);
712
713 case VMMR0_DO_GVMM_SCHED_HALT:
714 if (pReqHdr)
715 return VERR_INVALID_PARAMETER;
716 return GVMMR0SchedHalt(pVM, u64Arg);
717
718 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
719 if (pReqHdr || u64Arg)
720 return VERR_INVALID_PARAMETER;
721 return GVMMR0SchedWakeUp(pVM);
722
723 case VMMR0_DO_GVMM_SCHED_POLL:
724 if (pReqHdr || u64Arg > 1)
725 return VERR_INVALID_PARAMETER;
726 return GVMMR0SchedPoll(pVM, !!u64Arg);
727
728 case VMMR0_DO_GVMM_QUERY_STATISTICS:
729 if (u64Arg)
730 return VERR_INVALID_PARAMETER;
731 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
732
733 case VMMR0_DO_GVMM_RESET_STATISTICS:
734 if (u64Arg)
735 return VERR_INVALID_PARAMETER;
736 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
737
738 /*
739 * Initialize the R0 part of a VM instance.
740 */
741 case VMMR0_DO_VMMR0_INIT:
742 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
743
744 /*
745 * Terminate the R0 part of a VM instance.
746 */
747 case VMMR0_DO_VMMR0_TERM:
748 return VMMR0TermVM(pVM, NULL);
749
750 /*
751 * Attempt to enable hwacc mode and check the current setting.
752 *
753 */
754 case VMMR0_DO_HWACC_ENABLE:
755 return HWACCMR0EnableAllCpus(pVM, (HWACCMSTATE)u64Arg);
756
757 /*
758 * Setup the hardware accelerated raw-mode session.
759 */
760 case VMMR0_DO_HWACC_SETUP_VM:
761 {
762 RTCCUINTREG fFlags = ASMIntDisableFlags();
763 int rc = HWACCMR0SetupVM(pVM);
764 ASMSetFlags(fFlags);
765 return rc;
766 }
767
768 /*
769 * Switch to GC to execute Hypervisor function.
770 */
771 case VMMR0_DO_CALL_HYPERVISOR:
772 {
773 /* Safety precaution as HWACCM can disable the switcher. */
774 Assert(!pVM->vmm.s.fSwitcherDisabled);
775 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
776 return VERR_NOT_SUPPORTED;
777
778#ifdef VBOX_STRICT
779 if (RT_UNLIKELY(!PGMGetHyperCR3(pVM)))
780 return VERR_NOT_SUPPORTED;
781#endif
782
783 RTCCUINTREG fFlags = ASMIntDisableFlags();
784 int rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
785 /** @todo dispatch interrupts? */
786 ASMSetFlags(fFlags);
787 return rc;
788 }
789
790 /*
791 * PGM wrappers.
792 */
793 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
794 return PGMR0PhysAllocateHandyPages(pVM);
795
796 /*
797 * GMM wrappers.
798 */
799 case VMMR0_DO_GMM_INITIAL_RESERVATION:
800 if (u64Arg)
801 return VERR_INVALID_PARAMETER;
802 return GMMR0InitialReservationReq(pVM, (PGMMINITIALRESERVATIONREQ)pReqHdr);
803 case VMMR0_DO_GMM_UPDATE_RESERVATION:
804 if (u64Arg)
805 return VERR_INVALID_PARAMETER;
806 return GMMR0UpdateReservationReq(pVM, (PGMMUPDATERESERVATIONREQ)pReqHdr);
807
808 case VMMR0_DO_GMM_ALLOCATE_PAGES:
809 if (u64Arg)
810 return VERR_INVALID_PARAMETER;
811 return GMMR0AllocatePagesReq(pVM, (PGMMALLOCATEPAGESREQ)pReqHdr);
812 case VMMR0_DO_GMM_FREE_PAGES:
813 if (u64Arg)
814 return VERR_INVALID_PARAMETER;
815 return GMMR0FreePagesReq(pVM, (PGMMFREEPAGESREQ)pReqHdr);
816 case VMMR0_DO_GMM_BALLOONED_PAGES:
817 if (u64Arg)
818 return VERR_INVALID_PARAMETER;
819 return GMMR0BalloonedPagesReq(pVM, (PGMMBALLOONEDPAGESREQ)pReqHdr);
820 case VMMR0_DO_GMM_DEFLATED_BALLOON:
821 if (pReqHdr)
822 return VERR_INVALID_PARAMETER;
823 return GMMR0DeflatedBalloon(pVM, (uint32_t)u64Arg);
824
825 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
826 if (u64Arg)
827 return VERR_INVALID_PARAMETER;
828 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
829 case VMMR0_DO_GMM_SEED_CHUNK:
830 if (pReqHdr)
831 return VERR_INVALID_PARAMETER;
832 return GMMR0SeedChunk(pVM, (RTR3PTR)u64Arg);
833
834 /*
835 * A quick GCFGM mock-up.
836 */
837 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
838 case VMMR0_DO_GCFGM_SET_VALUE:
839 case VMMR0_DO_GCFGM_QUERY_VALUE:
840 {
841 if (pVM || !pReqHdr || u64Arg)
842 return VERR_INVALID_PARAMETER;
843 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
844 if (pReq->Hdr.cbReq != sizeof(*pReq))
845 return VERR_INVALID_PARAMETER;
846 int rc;
847 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
848 {
849 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
850 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
851 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
852 }
853 else
854 {
855 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
856 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
857 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
858 }
859 return rc;
860 }
861
862
863 /*
864 * Requests to the internal networking service.
865 */
866 case VMMR0_DO_INTNET_OPEN:
867 {
868 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
869 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession))
870 return VERR_INVALID_PARAMETER;
871 if (!g_pIntNet)
872 return VERR_NOT_SUPPORTED;
873 return INTNETR0OpenReq(g_pIntNet, pSession, pReq);
874 }
875
876 case VMMR0_DO_INTNET_IF_CLOSE:
877 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession))
878 return VERR_INVALID_PARAMETER;
879 if (!g_pIntNet)
880 return VERR_NOT_SUPPORTED;
881 return INTNETR0IfCloseReq(g_pIntNet, pSession, (PINTNETIFCLOSEREQ)pReqHdr);
882
883 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
884 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETRING3BUFFERREQ)pReqHdr)->pSession, pSession))
885 return VERR_INVALID_PARAMETER;
886 if (!g_pIntNet)
887 return VERR_NOT_SUPPORTED;
888 return INTNETR0IfGetRing3BufferReq(g_pIntNet, pSession, (PINTNETIFGETRING3BUFFERREQ)pReqHdr);
889
890 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
891 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession))
892 return VERR_INVALID_PARAMETER;
893 if (!g_pIntNet)
894 return VERR_NOT_SUPPORTED;
895 return INTNETR0IfSetPromiscuousModeReq(g_pIntNet, pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
896
897 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
898 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession))
899 return VERR_INVALID_PARAMETER;
900 if (!g_pIntNet)
901 return VERR_NOT_SUPPORTED;
902 return INTNETR0IfSetMacAddressReq(g_pIntNet, pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
903
904 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
905 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession))
906 return VERR_INVALID_PARAMETER;
907 if (!g_pIntNet)
908 return VERR_NOT_SUPPORTED;
909 return INTNETR0IfSetActiveReq(g_pIntNet, pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
910
911 case VMMR0_DO_INTNET_IF_SEND:
912 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession))
913 return VERR_INVALID_PARAMETER;
914 if (!g_pIntNet)
915 return VERR_NOT_SUPPORTED;
916 return INTNETR0IfSendReq(g_pIntNet, pSession, (PINTNETIFSENDREQ)pReqHdr);
917
918 case VMMR0_DO_INTNET_IF_WAIT:
919 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession))
920 return VERR_INVALID_PARAMETER;
921 if (!g_pIntNet)
922 return VERR_NOT_SUPPORTED;
923 return INTNETR0IfWaitReq(g_pIntNet, pSession, (PINTNETIFWAITREQ)pReqHdr);
924
925 /*
926 * For profiling.
927 */
928 case VMMR0_DO_NOP:
929 case VMMR0_DO_SLOW_NOP:
930 return VINF_SUCCESS;
931
932 /*
933 * For testing Ring-0 APIs invoked in this environment.
934 */
935 case VMMR0_DO_TESTS:
936 /** @todo make new test */
937 return VINF_SUCCESS;
938
939
940#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
941 case VMMR0_DO_TEST_SWITCHER3264:
942 return HWACCMR0TestSwitcher3264(pVM);
943#endif
944 default:
945 /*
946 * We're returning VERR_NOT_SUPPORT here so we've got something else
947 * than -1 which the interrupt gate glue code might return.
948 */
949 Log(("operation %#x is not supported\n", enmOperation));
950 return VERR_NOT_SUPPORTED;
951 }
952}
953
954
955/**
956 * Argument for vmmR0EntryExWrapper containing the argument s ofr VMMR0EntryEx.
957 */
958typedef struct VMMR0ENTRYEXARGS
959{
960 PVM pVM;
961 VMMR0OPERATION enmOperation;
962 PSUPVMMR0REQHDR pReq;
963 uint64_t u64Arg;
964 PSUPDRVSESSION pSession;
965} VMMR0ENTRYEXARGS;
966/** Pointer to a vmmR0EntryExWrapper argument package. */
967typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
968
969/**
970 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
971 *
972 * @returns VBox status code.
973 * @param pvArgs The argument package
974 */
975static int vmmR0EntryExWrapper(void *pvArgs)
976{
977 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
978 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
979 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
980 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
981 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
982}
983
984
985/**
986 * The Ring 0 entry point, called by the support library (SUP).
987 *
988 * @returns VBox status code.
989 * @param pVM The VM to operate on.
990 * @param enmOperation Which operation to execute.
991 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
992 * @param u64Arg Some simple constant argument.
993 * @param pSession The session of the caller.
994 * @remarks Assume called with interrupts _enabled_.
995 */
996VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
997{
998 /*
999 * Requests that should only happen on the EMT thread will be
1000 * wrapped in a setjmp so we can assert without causing trouble.
1001 */
1002 if ( VALID_PTR(pVM)
1003 && pVM->pVMR0)
1004 {
1005 switch (enmOperation)
1006 {
1007 /* These might/will be called before VMMR3Init. */
1008 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1009 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1010 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1011 case VMMR0_DO_GMM_FREE_PAGES:
1012 case VMMR0_DO_GMM_BALLOONED_PAGES:
1013 case VMMR0_DO_GMM_DEFLATED_BALLOON:
1014 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1015 case VMMR0_DO_GMM_SEED_CHUNK:
1016 /* On the mac we might not have a valid jmp buf, so check these as well. */
1017 case VMMR0_DO_VMMR0_INIT:
1018 case VMMR0_DO_VMMR0_TERM:
1019 {
1020 if (!pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack)
1021 break;
1022
1023 /** @todo validate this EMT claim... GVM knows. */
1024 VMMR0ENTRYEXARGS Args;
1025 Args.pVM = pVM;
1026 Args.enmOperation = enmOperation;
1027 Args.pReq = pReq;
1028 Args.u64Arg = u64Arg;
1029 Args.pSession = pSession;
1030 return vmmR0CallHostSetJmpEx(&pVM->vmm.s.CallHostR0JmpBuf, vmmR0EntryExWrapper, &Args);
1031 }
1032
1033 default:
1034 break;
1035 }
1036 }
1037 return vmmR0EntryExWorker(pVM, enmOperation, pReq, u64Arg, pSession);
1038}
1039
1040
1041/**
1042 * Internal R0 logger worker: Flush logger.
1043 *
1044 * @param pLogger The logger instance to flush.
1045 * @remark This function must be exported!
1046 */
1047VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1048{
1049 /*
1050 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1051 * (This is a bit paranoid code.)
1052 */
1053 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1054 if ( !VALID_PTR(pR0Logger)
1055 || !VALID_PTR(pR0Logger + 1)
1056 || pLogger->u32Magic != RTLOGGER_MAGIC)
1057 {
1058#ifdef DEBUG
1059 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1060#endif
1061 return;
1062 }
1063 if (pR0Logger->fFlushingDisabled)
1064 return; /* quietly */
1065
1066 PVM pVM = pR0Logger->pVM;
1067 if ( !VALID_PTR(pVM)
1068 || pVM->pVMR0 != pVM)
1069 {
1070#ifdef DEBUG
1071 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1072#endif
1073 return;
1074 }
1075
1076 /*
1077 * Check that the jump buffer is armed.
1078 */
1079#ifdef RT_ARCH_X86
1080 if (!pVM->vmm.s.CallHostR0JmpBuf.eip)
1081#else
1082 if (!pVM->vmm.s.CallHostR0JmpBuf.rip)
1083#endif
1084 {
1085#ifdef DEBUG
1086 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1087#endif
1088 return;
1089 }
1090 VMMR0CallHost(pVM, VMMCALLHOST_VMM_LOGGER_FLUSH, 0);
1091}
1092
1093
1094/**
1095 * Disables flushing of the ring-0 debug log.
1096 *
1097 * @param pVCpu The shared virtual cpu structure.
1098 */
1099VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1100{
1101 PVM pVM = pVCpu->pVMR0;
1102 if (pVM->vmm.s.pR0LoggerR0)
1103 pVM->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1104}
1105
1106
1107/**
1108 * Enables flushing of the ring-0 debug log.
1109 *
1110 * @param pVCpu The shared virtual cpu structure.
1111 */
1112VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1113{
1114 PVM pVM = pVCpu->pVMR0;
1115 if (pVM->vmm.s.pR0LoggerR0)
1116 pVM->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1117}
1118
1119
1120/**
1121 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1122 *
1123 * @returns true if the breakpoint should be hit, false if it should be ignored.
1124 */
1125DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1126{
1127#if 0
1128 return true;
1129#else
1130 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1131 if (pVM)
1132 {
1133#ifdef RT_ARCH_X86
1134 if (pVM->vmm.s.CallHostR0JmpBuf.eip)
1135#else
1136 if (pVM->vmm.s.CallHostR0JmpBuf.rip)
1137#endif
1138 {
1139 int rc = VMMR0CallHost(pVM, VMMCALLHOST_VM_R0_ASSERTION, 0);
1140 return RT_FAILURE_NP(rc);
1141 }
1142 }
1143#ifdef RT_OS_LINUX
1144 return true;
1145#else
1146 return false;
1147#endif
1148#endif
1149}
1150
1151
1152/**
1153 * Override this so we can push it up to ring-3.
1154 *
1155 * @param pszExpr Expression. Can be NULL.
1156 * @param uLine Location line number.
1157 * @param pszFile Location file name.
1158 * @param pszFunction Location function name.
1159 */
1160DECLEXPORT(void) RTCALL AssertMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1161{
1162#if !defined(DEBUG_sandervl) && !defined(RT_OS_DARWIN)
1163 SUPR0Printf("\n!!R0-Assertion Failed!!\n"
1164 "Expression: %s\n"
1165 "Location : %s(%d) %s\n",
1166 pszExpr, pszFile, uLine, pszFunction);
1167#endif
1168 LogAlways(("\n!!R0-Assertion Failed!!\n"
1169 "Expression: %s\n"
1170 "Location : %s(%d) %s\n",
1171 pszExpr, pszFile, uLine, pszFunction));
1172
1173 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1174 if (pVM)
1175 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1176 "\n!!R0-Assertion Failed!!\n"
1177 "Expression: %s\n"
1178 "Location : %s(%d) %s\n",
1179 pszExpr, pszFile, uLine, pszFunction);
1180#ifdef RT_OS_DARWIN
1181 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1182#endif
1183}
1184
1185
1186/**
1187 * Callback for RTLogFormatV which writes to the ring-3 log port.
1188 * See PFNLOGOUTPUT() for details.
1189 */
1190static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1191{
1192 for (size_t i = 0; i < cbChars; i++)
1193 {
1194#if !defined(DEBUG_sandervl) && !defined(RT_OS_DARWIN)
1195 SUPR0Printf("%c", pachChars[i]);
1196#endif
1197 LogAlways(("%c", pachChars[i]));
1198 }
1199
1200 return cbChars;
1201}
1202
1203
1204DECLEXPORT(void) RTCALL AssertMsg2(const char *pszFormat, ...)
1205{
1206 va_list va;
1207
1208 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1209 if (pLog)
1210 {
1211 va_start(va, pszFormat);
1212 RTLogFormatV(rtLogOutput, pLog, pszFormat, va);
1213 va_end(va);
1214
1215 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1216 if (pVM)
1217 {
1218 va_start(va, pszFormat);
1219 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, va);
1220 va_end(va);
1221 }
1222 }
1223
1224#ifdef RT_OS_DARWIN
1225 va_start(va, pszFormat);
1226 RTAssertMsg2V(pszFormat, va);
1227 va_end(va);
1228#endif
1229}
1230
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette