VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 13049

Last change on this file since 13049 was 12836, checked in by vboxsync, 16 years ago

VMMR0: Fixed va_list reuse bug in AssertMsg2.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 39.8 KB
Line 
1/* $Id: VMMR0.cpp 12836 2008-09-30 15:33:20Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VMM
27#include <VBox/vmm.h>
28#include <VBox/sup.h>
29#include <VBox/trpm.h>
30#include <VBox/cpum.h>
31#include <VBox/stam.h>
32#include <VBox/tm.h>
33#include "VMMInternal.h"
34#include <VBox/vm.h>
35#include <VBox/gvmm.h>
36#include <VBox/gmm.h>
37#include <VBox/intnet.h>
38#include <VBox/hwaccm.h>
39#include <VBox/param.h>
40
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45#include <iprt/stdarg.h>
46#include <iprt/mp.h>
47#include <iprt/string.h>
48
49#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
50# pragma intrinsic(_AddressOfReturnAddress)
51#endif
52
53
54/*******************************************************************************
55* Internal Functions *
56*******************************************************************************/
57static int VMMR0Init(PVM pVM, uint32_t uSvnRev);
58static int VMMR0Term(PVM pVM);
59__BEGIN_DECLS
60VMMR0DECL(int) ModuleInit(void);
61VMMR0DECL(void) ModuleTerm(void);
62__END_DECLS
63
64
65/*******************************************************************************
66* Global Variables *
67*******************************************************************************/
68#ifdef VBOX_WITH_INTERNAL_NETWORKING
69/** Pointer to the internal networking service instance. */
70PINTNET g_pIntNet = 0;
71#endif
72
73
74/**
75 * Initialize the module.
76 * This is called when we're first loaded.
77 *
78 * @returns 0 on success.
79 * @returns VBox status on failure.
80 */
81VMMR0DECL(int) ModuleInit(void)
82{
83 LogFlow(("ModuleInit:\n"));
84
85 /*
86 * Initialize the GVMM, GMM.& HWACCM
87 */
88 int rc = GVMMR0Init();
89 if (RT_SUCCESS(rc))
90 {
91 rc = GMMR0Init();
92 if (RT_SUCCESS(rc))
93 {
94 rc = HWACCMR0Init();
95 if (RT_SUCCESS(rc))
96 {
97#ifdef VBOX_WITH_INTERNAL_NETWORKING
98 LogFlow(("ModuleInit: g_pIntNet=%p\n", g_pIntNet));
99 g_pIntNet = NULL;
100 LogFlow(("ModuleInit: g_pIntNet=%p should be NULL now...\n", g_pIntNet));
101 rc = INTNETR0Create(&g_pIntNet);
102 if (VBOX_SUCCESS(rc))
103 {
104 LogFlow(("ModuleInit: returns success. g_pIntNet=%p\n", g_pIntNet));
105 return VINF_SUCCESS;
106 }
107 g_pIntNet = NULL;
108 LogFlow(("ModuleTerm: returns %Vrc\n", rc));
109#else
110 LogFlow(("ModuleInit: returns success.\n"));
111 return VINF_SUCCESS;
112#endif
113 }
114 }
115 }
116
117 LogFlow(("ModuleInit: failed %Rrc\n", rc));
118 return rc;
119}
120
121
122/**
123 * Terminate the module.
124 * This is called when we're finally unloaded.
125 */
126VMMR0DECL(void) ModuleTerm(void)
127{
128 LogFlow(("ModuleTerm:\n"));
129
130#ifdef VBOX_WITH_INTERNAL_NETWORKING
131 /*
132 * Destroy the internal networking instance.
133 */
134 if (g_pIntNet)
135 {
136 INTNETR0Destroy(g_pIntNet);
137 g_pIntNet = NULL;
138 }
139#endif
140
141 /* Global HWACCM cleanup */
142 HWACCMR0Term();
143
144 /*
145 * Destroy the GMM and GVMM instances.
146 */
147 GMMR0Term();
148 GVMMR0Term();
149
150 LogFlow(("ModuleTerm: returns\n"));
151}
152
153
154/**
155 * Initaties the R0 driver for a particular VM instance.
156 *
157 * @returns VBox status code.
158 *
159 * @param pVM The VM instance in question.
160 * @param uSvnRev The SVN revision of the ring-3 part.
161 * @thread EMT.
162 */
163static int VMMR0Init(PVM pVM, uint32_t uSvnRev)
164{
165 /*
166 * Match the SVN revisions.
167 */
168 if (uSvnRev != VMMGetSvnRev())
169 return VERR_VERSION_MISMATCH;
170 if ( !VALID_PTR(pVM)
171 || pVM->pVMR0 != pVM)
172 return VERR_INVALID_PARAMETER;
173
174 /*
175 * Register the EMT R0 logger instance.
176 */
177 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
178 if (pR0Logger)
179 {
180#if 0 /* testing of the logger. */
181 LogCom(("VMMR0Init: before %p\n", RTLogDefaultInstance()));
182 LogCom(("VMMR0Init: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
183 LogCom(("VMMR0Init: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
184 LogCom(("VMMR0Init: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
185
186 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
187 LogCom(("VMMR0Init: after %p reg\n", RTLogDefaultInstance()));
188 RTLogSetDefaultInstanceThread(NULL, 0);
189 LogCom(("VMMR0Init: after %p dereg\n", RTLogDefaultInstance()));
190
191 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
192 LogCom(("VMMR0Init: returned succesfully from direct logger call.\n"));
193 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
194 LogCom(("VMMR0Init: returned succesfully from direct flush call.\n"));
195
196 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
197 LogCom(("VMMR0Init: after %p reg2\n", RTLogDefaultInstance()));
198 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
199 LogCom(("VMMR0Init: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
200 RTLogSetDefaultInstanceThread(NULL, 0);
201 LogCom(("VMMR0Init: after %p dereg2\n", RTLogDefaultInstance()));
202
203 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
204 LogCom(("VMMR0Init: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
205
206 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
207 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
208 LogCom(("VMMR0Init: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
209#endif
210 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
211 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
212 }
213
214 /*
215 * Initialize the per VM data for GVMM and GMM.
216 */
217 int rc = GVMMR0InitVM(pVM);
218// if (RT_SUCCESS(rc))
219// rc = GMMR0InitPerVMData(pVM);
220 if (RT_SUCCESS(rc))
221 {
222 /*
223 * Init HWACCM.
224 */
225 rc = HWACCMR0InitVM(pVM);
226 if (RT_SUCCESS(rc))
227 {
228 /*
229 * Init CPUM.
230 */
231 rc = CPUMR0Init(pVM);
232 if (RT_SUCCESS(rc))
233 return rc;
234 }
235 }
236
237 /* failed */
238 RTLogSetDefaultInstanceThread(NULL, 0);
239 return rc;
240}
241
242
243/**
244 * Terminates the R0 driver for a particular VM instance.
245 *
246 * @returns VBox status code.
247 *
248 * @param pVM The VM instance in question.
249 * @thread EMT.
250 */
251static int VMMR0Term(PVM pVM)
252{
253 HWACCMR0TermVM(pVM);
254
255 /*
256 * Deregister the logger.
257 */
258 RTLogSetDefaultInstanceThread(NULL, 0);
259 return VINF_SUCCESS;
260}
261
262
263/**
264 * Calls the ring-3 host code.
265 *
266 * @returns VBox status code of the ring-3 call.
267 * @param pVM The VM handle.
268 * @param enmOperation The operation.
269 * @param uArg The argument to the operation.
270 */
271VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg)
272{
273/** @todo profile this! */
274 pVM->vmm.s.enmCallHostOperation = enmOperation;
275 pVM->vmm.s.u64CallHostArg = uArg;
276 pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
277 int rc = vmmR0CallHostLongJmp(&pVM->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);
278 if (rc == VINF_SUCCESS)
279 rc = pVM->vmm.s.rcCallHost;
280 return rc;
281}
282
283
284#ifdef VBOX_WITH_STATISTICS
285/**
286 * Record return code statistics
287 * @param pVM The VM handle.
288 * @param rc The status code.
289 */
290static void vmmR0RecordRC(PVM pVM, int rc)
291{
292 /*
293 * Collect statistics.
294 */
295 switch (rc)
296 {
297 case VINF_SUCCESS:
298 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetNormal);
299 break;
300 case VINF_EM_RAW_INTERRUPT:
301 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterrupt);
302 break;
303 case VINF_EM_RAW_INTERRUPT_HYPER:
304 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterruptHyper);
305 break;
306 case VINF_EM_RAW_GUEST_TRAP:
307 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetGuestTrap);
308 break;
309 case VINF_EM_RAW_RING_SWITCH:
310 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRingSwitch);
311 break;
312 case VINF_EM_RAW_RING_SWITCH_INT:
313 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRingSwitchInt);
314 break;
315 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
316 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetExceptionPrivilege);
317 break;
318 case VINF_EM_RAW_STALE_SELECTOR:
319 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetStaleSelector);
320 break;
321 case VINF_EM_RAW_IRET_TRAP:
322 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIRETTrap);
323 break;
324 case VINF_IOM_HC_IOPORT_READ:
325 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIORead);
326 break;
327 case VINF_IOM_HC_IOPORT_WRITE:
328 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIOWrite);
329 break;
330 case VINF_IOM_HC_MMIO_READ:
331 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIORead);
332 break;
333 case VINF_IOM_HC_MMIO_WRITE:
334 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOWrite);
335 break;
336 case VINF_IOM_HC_MMIO_READ_WRITE:
337 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOReadWrite);
338 break;
339 case VINF_PATM_HC_MMIO_PATCH_READ:
340 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOPatchRead);
341 break;
342 case VINF_PATM_HC_MMIO_PATCH_WRITE:
343 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOPatchWrite);
344 break;
345 case VINF_EM_RAW_EMULATE_INSTR:
346 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetEmulate);
347 break;
348 case VINF_PATCH_EMULATE_INSTR:
349 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchEmulate);
350 break;
351 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
352 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetLDTFault);
353 break;
354 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
355 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetGDTFault);
356 break;
357 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
358 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIDTFault);
359 break;
360 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
361 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetTSSFault);
362 break;
363 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
364 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDFault);
365 break;
366 case VINF_CSAM_PENDING_ACTION:
367 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetCSAMTask);
368 break;
369 case VINF_PGM_SYNC_CR3:
370 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetSyncCR3);
371 break;
372 case VINF_PATM_PATCH_INT3:
373 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchInt3);
374 break;
375 case VINF_PATM_PATCH_TRAP_PF:
376 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchPF);
377 break;
378 case VINF_PATM_PATCH_TRAP_GP:
379 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchGP);
380 break;
381 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
382 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchIretIRQ);
383 break;
384 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
385 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPageOverflow);
386 break;
387 case VINF_EM_RESCHEDULE_REM:
388 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRescheduleREM);
389 break;
390 case VINF_EM_RAW_TO_R3:
391 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetToR3);
392 break;
393 case VINF_EM_RAW_TIMER_PENDING:
394 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetTimerPending);
395 break;
396 case VINF_EM_RAW_INTERRUPT_PENDING:
397 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterruptPending);
398 break;
399 case VINF_VMM_CALL_HOST:
400 switch (pVM->vmm.s.enmCallHostOperation)
401 {
402 case VMMCALLHOST_PDM_LOCK:
403 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDMLock);
404 break;
405 case VMMCALLHOST_PDM_QUEUE_FLUSH:
406 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDMQueueFlush);
407 break;
408 case VMMCALLHOST_PGM_POOL_GROW:
409 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMPoolGrow);
410 break;
411 case VMMCALLHOST_PGM_LOCK:
412 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMLock);
413 break;
414 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
415 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRemReplay);
416 break;
417 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
418 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMGrowRAM);
419 break;
420 case VMMCALLHOST_VMM_LOGGER_FLUSH:
421 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetLogFlush);
422 break;
423 case VMMCALLHOST_VM_SET_ERROR:
424 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetVMSetError);
425 break;
426 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
427 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetVMSetRuntimeError);
428 break;
429 case VMMCALLHOST_VM_R0_HYPER_ASSERTION:
430 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetHyperAssertion);
431 break;
432 default:
433 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetCallHost);
434 break;
435 }
436 break;
437 case VINF_PATM_DUPLICATE_FUNCTION:
438 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPATMDuplicateFn);
439 break;
440 case VINF_PGM_CHANGE_MODE:
441 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMChangeMode);
442 break;
443 case VINF_EM_RAW_EMULATE_INSTR_HLT:
444 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetEmulHlt);
445 break;
446 case VINF_EM_PENDING_REQUEST:
447 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPendingRequest);
448 break;
449 default:
450 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMisc);
451 break;
452 }
453}
454#endif /* VBOX_WITH_STATISTICS */
455
456
457
458/**
459 * The Ring 0 entry point, called by the interrupt gate.
460 *
461 * @returns VBox status code.
462 * @param pVM The VM to operate on.
463 * @param enmOperation Which operation to execute.
464 * @param pvArg Argument to the operation.
465 * @remarks Assume called with interrupts disabled.
466 */
467VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
468{
469 switch (enmOperation)
470 {
471#ifdef VBOX_WITH_IDT_PATCHING
472 /*
473 * Switch to GC.
474 * These calls return whatever the GC returns.
475 */
476 case VMMR0_DO_RAW_RUN:
477 {
478 /* Safety precaution as VMX disables the switcher. */
479 Assert(!pVM->vmm.s.fSwitcherDisabled);
480 if (pVM->vmm.s.fSwitcherDisabled)
481 return VERR_NOT_SUPPORTED;
482
483 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
484 register int rc;
485 pVM->vmm.s.iLastGCRc = rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
486
487#ifdef VBOX_WITH_STATISTICS
488 vmmR0RecordRC(pVM, rc);
489#endif
490
491 /*
492 * We'll let TRPM change the stack frame so our return is different.
493 * Just keep in mind that after the call, things have changed!
494 */
495 if ( rc == VINF_EM_RAW_INTERRUPT
496 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
497 {
498 /*
499 * Don't trust the compiler to get this right.
500 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit
501 * mode too because we push the arguments on the stack in the IDT patch code.
502 */
503# if defined(__GNUC__)
504 void *pvRet = (uint8_t *)__builtin_frame_address(0) + sizeof(void *);
505# elif defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
506 void *pvRet = (uint8_t *)_AddressOfReturnAddress();
507# elif defined(RT_ARCH_X86)
508 void *pvRet = (uint8_t *)&pVM - sizeof(pVM);
509# else
510# error "huh?"
511# endif
512 if ( ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM
513 && ((uintptr_t *)pvRet)[2] == (uintptr_t)enmOperation
514 && ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg)
515 TRPMR0SetupInterruptDispatcherFrame(pVM, pvRet);
516 else
517 {
518# if defined(DEBUG) || defined(LOG_ENABLED)
519 static bool s_fHaveWarned = false;
520 if (!s_fHaveWarned)
521 {
522 s_fHaveWarned = true;
523 RTLogPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
524 RTLogComPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
525 }
526# endif
527 TRPMR0DispatchHostInterrupt(pVM);
528 }
529 }
530 return rc;
531 }
532
533 /*
534 * Switch to GC to execute Hypervisor function.
535 */
536 case VMMR0_DO_CALL_HYPERVISOR:
537 {
538 /* Safety precaution as VMX disables the switcher. */
539 Assert(!pVM->vmm.s.fSwitcherDisabled);
540 if (pVM->vmm.s.fSwitcherDisabled)
541 return VERR_NOT_SUPPORTED;
542
543 RTCCUINTREG fFlags = ASMIntDisableFlags();
544 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
545 /** @todo dispatch interrupts? */
546 ASMSetFlags(fFlags);
547 return rc;
548 }
549
550 /*
551 * For profiling.
552 */
553 case VMMR0_DO_NOP:
554 return VINF_SUCCESS;
555#endif /* VBOX_WITH_IDT_PATCHING */
556
557 default:
558 /*
559 * We're returning VERR_NOT_SUPPORT here so we've got something else
560 * than -1 which the interrupt gate glue code might return.
561 */
562 Log(("operation %#x is not supported\n", enmOperation));
563 return VERR_NOT_SUPPORTED;
564 }
565}
566
567
568/**
569 * The Ring 0 entry point, called by the fast-ioctl path.
570 *
571 * @param pVM The VM to operate on.
572 * The return code is stored in pVM->vmm.s.iLastGCRc.
573 * @param enmOperation Which operation to execute.
574 * @remarks Assume called with interrupts _enabled_.
575 */
576VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMMR0OPERATION enmOperation)
577{
578 switch (enmOperation)
579 {
580 /*
581 * Switch to GC and run guest raw mode code.
582 * Disable interrupts before doing the world switch.
583 */
584 case VMMR0_DO_RAW_RUN:
585 {
586 /* Safety precaution as hwaccm disables the switcher. */
587 if (RT_LIKELY(!pVM->vmm.s.fSwitcherDisabled))
588 {
589 RTCCUINTREG uFlags = ASMIntDisableFlags();
590
591 TMNotifyStartOfExecution(pVM);
592 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
593 pVM->vmm.s.iLastGCRc = rc;
594 TMNotifyEndOfExecution(pVM);
595
596 if ( rc == VINF_EM_RAW_INTERRUPT
597 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
598 TRPMR0DispatchHostInterrupt(pVM);
599
600 ASMSetFlags(uFlags);
601
602#ifdef VBOX_WITH_STATISTICS
603 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
604 vmmR0RecordRC(pVM, rc);
605#endif
606 }
607 else
608 {
609 Assert(!pVM->vmm.s.fSwitcherDisabled);
610 pVM->vmm.s.iLastGCRc = VERR_NOT_SUPPORTED;
611 }
612 break;
613 }
614
615 /*
616 * Run guest code using the available hardware acceleration technology.
617 *
618 * Disable interrupts before we do anything interesting. On Windows we avoid
619 * this by having the support driver raise the IRQL before calling us, this way
620 * we hope to get away we page faults and later calling into the kernel.
621 */
622 case VMMR0_DO_HWACC_RUN:
623 {
624 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
625
626#ifndef RT_OS_WINDOWS /** @todo check other hosts */
627 RTCCUINTREG uFlags = ASMIntDisableFlags();
628#endif
629 int rc = HWACCMR0Enter(pVM);
630 if (VBOX_SUCCESS(rc))
631 {
632 rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM); /* this may resume code. */
633 int rc2 = HWACCMR0Leave(pVM);
634 AssertRC(rc2);
635 }
636 pVM->vmm.s.iLastGCRc = rc;
637#ifndef RT_OS_WINDOWS /** @todo check other hosts */
638 ASMSetFlags(uFlags);
639#endif
640
641#ifdef VBOX_WITH_STATISTICS
642 vmmR0RecordRC(pVM, rc);
643#endif
644 /* No special action required for external interrupts, just return. */
645 break;
646 }
647
648 /*
649 * For profiling.
650 */
651 case VMMR0_DO_NOP:
652 pVM->vmm.s.iLastGCRc = VINF_SUCCESS;
653 break;
654
655 /*
656 * Impossible.
657 */
658 default:
659 AssertMsgFailed(("%#x\n", enmOperation));
660 pVM->vmm.s.iLastGCRc = VERR_NOT_SUPPORTED;
661 break;
662 }
663}
664
665
666/**
667 * Validates a session or VM session argument.
668 *
669 * @returns true / false accordingly.
670 * @param pVM The VM argument.
671 * @param pSession The session argument.
672 */
673DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
674{
675 /* This must be set! */
676 if (!pSession)
677 return false;
678
679 /* Only one out of the two. */
680 if (pVM && pClaimedSession)
681 return false;
682 if (pVM)
683 pClaimedSession = pVM->pSession;
684 return pClaimedSession == pSession;
685}
686
687
688/**
689 * VMMR0EntryEx worker function, either called directly or when ever possible
690 * called thru a longjmp so we can exit safely on failure.
691 *
692 * @returns VBox status code.
693 * @param pVM The VM to operate on.
694 * @param enmOperation Which operation to execute.
695 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
696 * The support driver validates this if it's present.
697 * @param u64Arg Some simple constant argument.
698 * @param pSession The session of the caller.
699 * @remarks Assume called with interrupts _enabled_.
700 */
701static int vmmR0EntryExWorker(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
702{
703 /*
704 * Common VM pointer validation.
705 */
706 if (pVM)
707 {
708 if (RT_UNLIKELY( !VALID_PTR(pVM)
709 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
710 {
711 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
712 return VERR_INVALID_POINTER;
713 }
714 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
715 || pVM->enmVMState > VMSTATE_TERMINATED
716 || pVM->pVMR0 != pVM))
717 {
718 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
719 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
720 return VERR_INVALID_POINTER;
721 }
722 }
723
724 switch (enmOperation)
725 {
726 /*
727 * GVM requests
728 */
729 case VMMR0_DO_GVMM_CREATE_VM:
730 if (pVM || u64Arg)
731 return VERR_INVALID_PARAMETER;
732 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
733
734 case VMMR0_DO_GVMM_DESTROY_VM:
735 if (pReqHdr || u64Arg)
736 return VERR_INVALID_PARAMETER;
737 return GVMMR0DestroyVM(pVM);
738
739 case VMMR0_DO_GVMM_SCHED_HALT:
740 if (pReqHdr)
741 return VERR_INVALID_PARAMETER;
742 return GVMMR0SchedHalt(pVM, u64Arg);
743
744 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
745 if (pReqHdr || u64Arg)
746 return VERR_INVALID_PARAMETER;
747 return GVMMR0SchedWakeUp(pVM);
748
749 case VMMR0_DO_GVMM_SCHED_POLL:
750 if (pReqHdr || u64Arg > 1)
751 return VERR_INVALID_PARAMETER;
752 return GVMMR0SchedPoll(pVM, !!u64Arg);
753
754 case VMMR0_DO_GVMM_QUERY_STATISTICS:
755 if (u64Arg)
756 return VERR_INVALID_PARAMETER;
757 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
758
759 case VMMR0_DO_GVMM_RESET_STATISTICS:
760 if (u64Arg)
761 return VERR_INVALID_PARAMETER;
762 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
763
764 /*
765 * Initialize the R0 part of a VM instance.
766 */
767 case VMMR0_DO_VMMR0_INIT:
768 return VMMR0Init(pVM, (uint32_t)u64Arg);
769
770 /*
771 * Terminate the R0 part of a VM instance.
772 */
773 case VMMR0_DO_VMMR0_TERM:
774 return VMMR0Term(pVM);
775
776 /*
777 * Attempt to enable hwacc mode and check the current setting.
778 *
779 */
780 case VMMR0_DO_HWACC_ENABLE:
781 return HWACCMR0EnableAllCpus(pVM, (HWACCMSTATE)u64Arg);
782
783 /*
784 * Setup the hardware accelerated raw-mode session.
785 */
786 case VMMR0_DO_HWACC_SETUP_VM:
787 {
788 RTCCUINTREG fFlags = ASMIntDisableFlags();
789 int rc = HWACCMR0SetupVM(pVM);
790 ASMSetFlags(fFlags);
791 return rc;
792 }
793
794 /*
795 * Switch to GC to execute Hypervisor function.
796 */
797 case VMMR0_DO_CALL_HYPERVISOR:
798 {
799 /* Safety precaution as HWACCM can disable the switcher. */
800 Assert(!pVM->vmm.s.fSwitcherDisabled);
801 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
802 return VERR_NOT_SUPPORTED;
803
804 RTCCUINTREG fFlags = ASMIntDisableFlags();
805 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
806 /** @todo dispatch interrupts? */
807 ASMSetFlags(fFlags);
808 return rc;
809 }
810
811 /*
812 * PGM wrappers.
813 */
814 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
815 return PGMR0PhysAllocateHandyPages(pVM);
816
817 /*
818 * GMM wrappers.
819 */
820 case VMMR0_DO_GMM_INITIAL_RESERVATION:
821 if (u64Arg)
822 return VERR_INVALID_PARAMETER;
823 return GMMR0InitialReservationReq(pVM, (PGMMINITIALRESERVATIONREQ)pReqHdr);
824 case VMMR0_DO_GMM_UPDATE_RESERVATION:
825 if (u64Arg)
826 return VERR_INVALID_PARAMETER;
827 return GMMR0UpdateReservationReq(pVM, (PGMMUPDATERESERVATIONREQ)pReqHdr);
828
829 case VMMR0_DO_GMM_ALLOCATE_PAGES:
830 if (u64Arg)
831 return VERR_INVALID_PARAMETER;
832 return GMMR0AllocatePagesReq(pVM, (PGMMALLOCATEPAGESREQ)pReqHdr);
833 case VMMR0_DO_GMM_FREE_PAGES:
834 if (u64Arg)
835 return VERR_INVALID_PARAMETER;
836 return GMMR0FreePagesReq(pVM, (PGMMFREEPAGESREQ)pReqHdr);
837 case VMMR0_DO_GMM_BALLOONED_PAGES:
838 if (u64Arg)
839 return VERR_INVALID_PARAMETER;
840 return GMMR0BalloonedPagesReq(pVM, (PGMMBALLOONEDPAGESREQ)pReqHdr);
841 case VMMR0_DO_GMM_DEFLATED_BALLOON:
842 if (pReqHdr)
843 return VERR_INVALID_PARAMETER;
844 return GMMR0DeflatedBalloon(pVM, (uint32_t)u64Arg);
845
846 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
847 if (u64Arg)
848 return VERR_INVALID_PARAMETER;
849 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
850 case VMMR0_DO_GMM_SEED_CHUNK:
851 if (pReqHdr)
852 return VERR_INVALID_PARAMETER;
853 return GMMR0SeedChunk(pVM, (RTR3PTR)u64Arg);
854
855 /*
856 * A quick GCFGM mock-up.
857 */
858 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
859 case VMMR0_DO_GCFGM_SET_VALUE:
860 case VMMR0_DO_GCFGM_QUERY_VALUE:
861 {
862 if (pVM || !pReqHdr || u64Arg)
863 return VERR_INVALID_PARAMETER;
864 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
865 if (pReq->Hdr.cbReq != sizeof(*pReq))
866 return VERR_INVALID_PARAMETER;
867 int rc;
868 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
869 {
870 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
871 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
872 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
873 }
874 else
875 {
876 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
877 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
878 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
879 }
880 return rc;
881 }
882
883
884#ifdef VBOX_WITH_INTERNAL_NETWORKING
885 /*
886 * Requests to the internal networking service.
887 */
888 case VMMR0_DO_INTNET_OPEN:
889 {
890 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
891 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession))
892 return VERR_INVALID_PARAMETER;
893 if (!g_pIntNet)
894 return VERR_NOT_SUPPORTED;
895 return INTNETR0OpenReq(g_pIntNet, pSession, pReq);
896 }
897
898 case VMMR0_DO_INTNET_IF_CLOSE:
899 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession))
900 return VERR_INVALID_PARAMETER;
901 if (!g_pIntNet)
902 return VERR_NOT_SUPPORTED;
903 return INTNETR0IfCloseReq(g_pIntNet, pSession, (PINTNETIFCLOSEREQ)pReqHdr);
904
905 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
906 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETRING3BUFFERREQ)pReqHdr)->pSession, pSession))
907 return VERR_INVALID_PARAMETER;
908 if (!g_pIntNet)
909 return VERR_NOT_SUPPORTED;
910 return INTNETR0IfGetRing3BufferReq(g_pIntNet, pSession, (PINTNETIFGETRING3BUFFERREQ)pReqHdr);
911
912 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
913 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession))
914 return VERR_INVALID_PARAMETER;
915 if (!g_pIntNet)
916 return VERR_NOT_SUPPORTED;
917 return INTNETR0IfSetPromiscuousModeReq(g_pIntNet, pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
918
919 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
920 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession))
921 return VERR_INVALID_PARAMETER;
922 if (!g_pIntNet)
923 return VERR_NOT_SUPPORTED;
924 return INTNETR0IfSetMacAddressReq(g_pIntNet, pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
925
926 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
927 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession))
928 return VERR_INVALID_PARAMETER;
929 if (!g_pIntNet)
930 return VERR_NOT_SUPPORTED;
931 return INTNETR0IfSetActiveReq(g_pIntNet, pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
932
933 case VMMR0_DO_INTNET_IF_SEND:
934 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession))
935 return VERR_INVALID_PARAMETER;
936 if (!g_pIntNet)
937 return VERR_NOT_SUPPORTED;
938 return INTNETR0IfSendReq(g_pIntNet, pSession, (PINTNETIFSENDREQ)pReqHdr);
939
940 case VMMR0_DO_INTNET_IF_WAIT:
941 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession))
942 return VERR_INVALID_PARAMETER;
943 if (!g_pIntNet)
944 return VERR_NOT_SUPPORTED;
945 return INTNETR0IfWaitReq(g_pIntNet, pSession, (PINTNETIFWAITREQ)pReqHdr);
946#endif /* VBOX_WITH_INTERNAL_NETWORKING */
947
948 /*
949 * For profiling.
950 */
951 case VMMR0_DO_NOP:
952 case VMMR0_DO_SLOW_NOP:
953 return VINF_SUCCESS;
954
955 /*
956 * For testing Ring-0 APIs invoked in this environment.
957 */
958 case VMMR0_DO_TESTS:
959 /** @todo make new test */
960 return VINF_SUCCESS;
961
962
963 default:
964 /*
965 * We're returning VERR_NOT_SUPPORT here so we've got something else
966 * than -1 which the interrupt gate glue code might return.
967 */
968 Log(("operation %#x is not supported\n", enmOperation));
969 return VERR_NOT_SUPPORTED;
970 }
971}
972
973
974/**
975 * Argument for vmmR0EntryExWrapper containing the argument s ofr VMMR0EntryEx.
976 */
977typedef struct VMMR0ENTRYEXARGS
978{
979 PVM pVM;
980 VMMR0OPERATION enmOperation;
981 PSUPVMMR0REQHDR pReq;
982 uint64_t u64Arg;
983 PSUPDRVSESSION pSession;
984} VMMR0ENTRYEXARGS;
985/** Pointer to a vmmR0EntryExWrapper argument package. */
986typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
987
988/**
989 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
990 *
991 * @returns VBox status code.
992 * @param pvArgs The argument package
993 */
994static int vmmR0EntryExWrapper(void *pvArgs)
995{
996 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
997 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
998 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
999 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1000 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1001}
1002
1003
1004/**
1005 * The Ring 0 entry point, called by the support library (SUP).
1006 *
1007 * @returns VBox status code.
1008 * @param pVM The VM to operate on.
1009 * @param enmOperation Which operation to execute.
1010 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1011 * @param u64Arg Some simple constant argument.
1012 * @param pSession The session of the caller.
1013 * @remarks Assume called with interrupts _enabled_.
1014 */
1015VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1016{
1017 /*
1018 * Requests that should only happen on the EMT thread will be
1019 * wrapped in a setjmp so we can assert without causing trouble.
1020 */
1021 if ( VALID_PTR(pVM)
1022 && pVM->pVMR0)
1023 {
1024 switch (enmOperation)
1025 {
1026 case VMMR0_DO_VMMR0_INIT:
1027 case VMMR0_DO_VMMR0_TERM:
1028 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1029 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1030 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1031 case VMMR0_DO_GMM_FREE_PAGES:
1032 case VMMR0_DO_GMM_BALLOONED_PAGES:
1033 case VMMR0_DO_GMM_DEFLATED_BALLOON:
1034 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1035 case VMMR0_DO_GMM_SEED_CHUNK:
1036 {
1037 /** @todo validate this EMT claim... GVM knows. */
1038 VMMR0ENTRYEXARGS Args;
1039 Args.pVM = pVM;
1040 Args.enmOperation = enmOperation;
1041 Args.pReq = pReq;
1042 Args.u64Arg = u64Arg;
1043 Args.pSession = pSession;
1044 return vmmR0CallHostSetJmpEx(&pVM->vmm.s.CallHostR0JmpBuf, vmmR0EntryExWrapper, &Args);
1045 }
1046
1047 default:
1048 break;
1049 }
1050 }
1051 return vmmR0EntryExWorker(pVM, enmOperation, pReq, u64Arg, pSession);
1052}
1053
1054
1055
1056/**
1057 * Internal R0 logger worker: Flush logger.
1058 *
1059 * @param pLogger The logger instance to flush.
1060 * @remark This function must be exported!
1061 */
1062VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1063{
1064 /*
1065 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1066 * (This is a bit paranoid code.)
1067 */
1068 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1069 if ( !VALID_PTR(pR0Logger)
1070 || !VALID_PTR(pR0Logger + 1)
1071 || !VALID_PTR(pLogger)
1072 || pLogger->u32Magic != RTLOGGER_MAGIC)
1073 {
1074#ifdef DEBUG
1075 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1076#endif
1077 return;
1078 }
1079
1080 PVM pVM = pR0Logger->pVM;
1081 if ( !VALID_PTR(pVM)
1082 || pVM->pVMR0 != pVM)
1083 {
1084#ifdef DEBUG
1085 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1086#endif
1087 return;
1088 }
1089
1090 /*
1091 * Check that the jump buffer is armed.
1092 */
1093#ifdef RT_ARCH_X86
1094 if (!pVM->vmm.s.CallHostR0JmpBuf.eip)
1095#else
1096 if (!pVM->vmm.s.CallHostR0JmpBuf.rip)
1097#endif
1098 {
1099#ifdef DEBUG
1100 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1101#endif
1102 pLogger->offScratch = 0;
1103 return;
1104 }
1105 VMMR0CallHost(pVM, VMMCALLHOST_VMM_LOGGER_FLUSH, 0);
1106}
1107
1108
1109
1110/**
1111 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1112 *
1113 * @returns true if the breakpoint should be hit, false if it should be ignored.
1114 * @remark The RTDECL() makes this a bit difficult to override on windows. Sorry.
1115 */
1116DECLEXPORT(bool) RTCALL RTAssertDoBreakpoint(void)
1117{
1118 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1119 if (pVM)
1120 {
1121#ifdef RT_ARCH_X86
1122 if (pVM->vmm.s.CallHostR0JmpBuf.eip)
1123#else
1124 if (pVM->vmm.s.CallHostR0JmpBuf.rip)
1125#endif
1126 {
1127 int rc = VMMR0CallHost(pVM, VMMCALLHOST_VM_R0_HYPER_ASSERTION, 0);
1128 return RT_FAILURE_NP(rc);
1129 }
1130 }
1131#ifdef RT_OS_LINUX
1132 return true;
1133#else
1134 return false;
1135#endif
1136}
1137
1138
1139
1140/**
1141 * Override this so we can push it up to ring-3.
1142 *
1143 * @param pszExpr Expression. Can be NULL.
1144 * @param uLine Location line number.
1145 * @param pszFile Location file name.
1146 * @param pszFunction Location function name.
1147 */
1148DECLEXPORT(void) RTCALL AssertMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1149{
1150#ifndef DEBUG_sandervl
1151 SUPR0Printf("\n!!R0-Assertion Failed!!\n"
1152 "Expression: %s\n"
1153 "Location : %s(%d) %s\n",
1154 pszExpr, pszFile, uLine, pszFunction);
1155#endif
1156 LogAlways(("\n!!R0-Assertion Failed!!\n"
1157 "Expression: %s\n"
1158 "Location : %s(%d) %s\n",
1159 pszExpr, pszFile, uLine, pszFunction));
1160
1161 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1162 if (pVM)
1163 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1164 "\n!!R0-Assertion Failed!!\n"
1165 "Expression: %s\n"
1166 "Location : %s(%d) %s\n",
1167 pszExpr, pszFile, uLine, pszFunction);
1168}
1169
1170
1171/**
1172 * Callback for RTLogFormatV which writes to the ring-3 log port.
1173 * See PFNLOGOUTPUT() for details.
1174 */
1175static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1176{
1177 for (size_t i = 0; i < cbChars; i++)
1178 {
1179#ifndef DEBUG_sandervl
1180 SUPR0Printf("%c", pachChars[i]);
1181#endif
1182 LogAlways(("%c", pachChars[i]));
1183 }
1184
1185 return cbChars;
1186}
1187
1188
1189DECLEXPORT(void) RTCALL AssertMsg2(const char *pszFormat, ...)
1190{
1191 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1192 if (pLog)
1193 {
1194 va_list va;
1195 va_start(va, pszFormat);
1196 RTLogFormatV(rtLogOutput, pLog, pszFormat, va);
1197 va_end(va);
1198
1199 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1200 if (pVM)
1201 {
1202 va_start(va, pszFormat);
1203 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, va);
1204 va_end(va);
1205 }
1206 }
1207}
1208
1209
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette