VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 15144

Last change on this file since 15144 was 14899, checked in by vboxsync, 16 years ago

Wrote testcase for the new switcher.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 39.7 KB
Line 
1/* $Id: VMMR0.cpp 14899 2008-12-02 12:39:34Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VMM
26#include <VBox/vmm.h>
27#include <VBox/sup.h>
28#include <VBox/trpm.h>
29#include <VBox/cpum.h>
30#include <VBox/pgm.h>
31#include <VBox/stam.h>
32#include <VBox/tm.h>
33#include "VMMInternal.h"
34#include <VBox/vm.h>
35#include <VBox/gvmm.h>
36#include <VBox/gmm.h>
37#include <VBox/intnet.h>
38#include <VBox/hwaccm.h>
39#include <VBox/param.h>
40
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45#include <iprt/stdarg.h>
46#include <iprt/mp.h>
47#include <iprt/string.h>
48
49#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
50# pragma intrinsic(_AddressOfReturnAddress)
51#endif
52
53
54/*******************************************************************************
55* Internal Functions *
56*******************************************************************************/
57__BEGIN_DECLS
58VMMR0DECL(int) ModuleInit(void);
59VMMR0DECL(void) ModuleTerm(void);
60__END_DECLS
61
62
63/*******************************************************************************
64* Global Variables *
65*******************************************************************************/
66/** Pointer to the internal networking service instance. */
67PINTNET g_pIntNet = 0;
68
69
70/**
71 * Initialize the module.
72 * This is called when we're first loaded.
73 *
74 * @returns 0 on success.
75 * @returns VBox status on failure.
76 */
77VMMR0DECL(int) ModuleInit(void)
78{
79 LogFlow(("ModuleInit:\n"));
80
81 /*
82 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
83 */
84 int rc = GVMMR0Init();
85 if (RT_SUCCESS(rc))
86 {
87 rc = GMMR0Init();
88 if (RT_SUCCESS(rc))
89 {
90 rc = HWACCMR0Init();
91 if (RT_SUCCESS(rc))
92 {
93#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
94 rc = PGMR0DynMapInit();
95#endif
96 if (RT_SUCCESS(rc))
97 {
98 LogFlow(("ModuleInit: g_pIntNet=%p\n", g_pIntNet));
99 g_pIntNet = NULL;
100 LogFlow(("ModuleInit: g_pIntNet=%p should be NULL now...\n", g_pIntNet));
101 rc = INTNETR0Create(&g_pIntNet);
102 if (RT_SUCCESS(rc))
103 {
104 LogFlow(("ModuleInit: returns success. g_pIntNet=%p\n", g_pIntNet));
105 return VINF_SUCCESS;
106 }
107
108 /* bail out */
109 g_pIntNet = NULL;
110 LogFlow(("ModuleTerm: returns %Rrc\n", rc));
111#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
112 PGMR0DynMapTerm();
113#endif
114 }
115 HWACCMR0Term();
116 }
117 GMMR0Term();
118 }
119 GVMMR0Term();
120 }
121
122 LogFlow(("ModuleInit: failed %Rrc\n", rc));
123 return rc;
124}
125
126
127/**
128 * Terminate the module.
129 * This is called when we're finally unloaded.
130 */
131VMMR0DECL(void) ModuleTerm(void)
132{
133 LogFlow(("ModuleTerm:\n"));
134
135 /*
136 * Destroy the internal networking instance.
137 */
138 if (g_pIntNet)
139 {
140 INTNETR0Destroy(g_pIntNet);
141 g_pIntNet = NULL;
142 }
143
144 /*
145 * PGM (Darwin) and HWACCM global cleanup.
146 * Destroy the GMM and GVMM instances.
147 */
148#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
149 PGMR0DynMapTerm();
150#endif
151 HWACCMR0Term();
152
153 GMMR0Term();
154 GVMMR0Term();
155
156 LogFlow(("ModuleTerm: returns\n"));
157}
158
159
160/**
161 * Initaties the R0 driver for a particular VM instance.
162 *
163 * @returns VBox status code.
164 *
165 * @param pVM The VM instance in question.
166 * @param uSvnRev The SVN revision of the ring-3 part.
167 * @thread EMT.
168 */
169static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
170{
171 /*
172 * Match the SVN revisions.
173 */
174 if (uSvnRev != VMMGetSvnRev())
175 return VERR_VERSION_MISMATCH;
176 if ( !VALID_PTR(pVM)
177 || pVM->pVMR0 != pVM)
178 return VERR_INVALID_PARAMETER;
179
180 /*
181 * Register the EMT R0 logger instance.
182 */
183 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0LoggerR0;
184 if (pR0Logger)
185 {
186#if 0 /* testing of the logger. */
187 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
188 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
189 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
190 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
191
192 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
193 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
194 RTLogSetDefaultInstanceThread(NULL, 0);
195 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
196
197 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
198 LogCom(("vmmR0InitVM: returned succesfully from direct logger call.\n"));
199 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
200 LogCom(("vmmR0InitVM: returned succesfully from direct flush call.\n"));
201
202 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
203 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
204 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
205 LogCom(("vmmR0InitVM: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
206 RTLogSetDefaultInstanceThread(NULL, 0);
207 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
208
209 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
210 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
211
212 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
213 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
214 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
215#endif
216 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
217 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
218 }
219
220 /*
221 * Initialize the per VM data for GVMM and GMM.
222 */
223 int rc = GVMMR0InitVM(pVM);
224// if (RT_SUCCESS(rc))
225// rc = GMMR0InitPerVMData(pVM);
226 if (RT_SUCCESS(rc))
227 {
228 /*
229 * Init HWACCM, CPUM and PGM (Darwin only).
230 */
231 rc = HWACCMR0InitVM(pVM);
232 if (RT_SUCCESS(rc))
233 {
234 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
235 if (RT_SUCCESS(rc))
236 {
237#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
238 rc = PGMR0DynMapInitVM(pVM);
239#endif
240 if (RT_SUCCESS(rc))
241 {
242 GVMMR0DoneInitVM(pVM);
243 return rc;
244 }
245
246 /* bail out */
247 }
248 HWACCMR0TermVM(pVM);
249 }
250 }
251 RTLogSetDefaultInstanceThread(NULL, 0);
252 return rc;
253}
254
255
256/**
257 * Terminates the R0 driver for a particular VM instance.
258 *
259 * This is normally called by ring-3 as part of the VM termination process, but
260 * may alternatively be called during the support driver session cleanup when
261 * the VM object is destroyed (see GVMM).
262 *
263 * @returns VBox status code.
264 *
265 * @param pVM The VM instance in question.
266 * @param pGVM Pointer to the global VM structure. Optional.
267 * @thread EMT or session clean up thread.
268 */
269VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
270{
271 /*
272 * Tell GVMM what we're up to and check that we only do this once.
273 */
274 if (GVMMR0DoingTermVM(pVM, pGVM))
275 {
276#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
277 PGMR0DynMapTermVM(pVM);
278#endif
279 HWACCMR0TermVM(pVM);
280 }
281
282 /*
283 * Deregister the logger.
284 */
285 RTLogSetDefaultInstanceThread(NULL, 0);
286 return VINF_SUCCESS;
287}
288
289
290/**
291 * Calls the ring-3 host code.
292 *
293 * @returns VBox status code of the ring-3 call.
294 * @param pVM The VM handle.
295 * @param enmOperation The operation.
296 * @param uArg The argument to the operation.
297 */
298VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg)
299{
300/** @todo profile this! */
301 pVM->vmm.s.enmCallHostOperation = enmOperation;
302 pVM->vmm.s.u64CallHostArg = uArg;
303 pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
304 int rc = vmmR0CallHostLongJmp(&pVM->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);
305 if (rc == VINF_SUCCESS)
306 rc = pVM->vmm.s.rcCallHost;
307 return rc;
308}
309
310
311#ifdef VBOX_WITH_STATISTICS
312/**
313 * Record return code statistics
314 * @param pVM The VM handle.
315 * @param rc The status code.
316 */
317static void vmmR0RecordRC(PVM pVM, int rc)
318{
319 /*
320 * Collect statistics.
321 */
322 switch (rc)
323 {
324 case VINF_SUCCESS:
325 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
326 break;
327 case VINF_EM_RAW_INTERRUPT:
328 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
329 break;
330 case VINF_EM_RAW_INTERRUPT_HYPER:
331 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
332 break;
333 case VINF_EM_RAW_GUEST_TRAP:
334 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
335 break;
336 case VINF_EM_RAW_RING_SWITCH:
337 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
338 break;
339 case VINF_EM_RAW_RING_SWITCH_INT:
340 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
341 break;
342 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
343 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetExceptionPrivilege);
344 break;
345 case VINF_EM_RAW_STALE_SELECTOR:
346 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
347 break;
348 case VINF_EM_RAW_IRET_TRAP:
349 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
350 break;
351 case VINF_IOM_HC_IOPORT_READ:
352 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
353 break;
354 case VINF_IOM_HC_IOPORT_WRITE:
355 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
356 break;
357 case VINF_IOM_HC_MMIO_READ:
358 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
359 break;
360 case VINF_IOM_HC_MMIO_WRITE:
361 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
362 break;
363 case VINF_IOM_HC_MMIO_READ_WRITE:
364 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
365 break;
366 case VINF_PATM_HC_MMIO_PATCH_READ:
367 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
368 break;
369 case VINF_PATM_HC_MMIO_PATCH_WRITE:
370 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
371 break;
372 case VINF_EM_RAW_EMULATE_INSTR:
373 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
374 break;
375 case VINF_PATCH_EMULATE_INSTR:
376 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
377 break;
378 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
379 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
380 break;
381 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
382 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
383 break;
384 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
385 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
386 break;
387 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
388 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
389 break;
390 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
391 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
392 break;
393 case VINF_CSAM_PENDING_ACTION:
394 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
395 break;
396 case VINF_PGM_SYNC_CR3:
397 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
398 break;
399 case VINF_PATM_PATCH_INT3:
400 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
401 break;
402 case VINF_PATM_PATCH_TRAP_PF:
403 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
404 break;
405 case VINF_PATM_PATCH_TRAP_GP:
406 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
407 break;
408 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
409 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
410 break;
411 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
412 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPageOverflow);
413 break;
414 case VINF_EM_RESCHEDULE_REM:
415 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
416 break;
417 case VINF_EM_RAW_TO_R3:
418 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
419 break;
420 case VINF_EM_RAW_TIMER_PENDING:
421 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
422 break;
423 case VINF_EM_RAW_INTERRUPT_PENDING:
424 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
425 break;
426 case VINF_VMM_CALL_HOST:
427 switch (pVM->vmm.s.enmCallHostOperation)
428 {
429 case VMMCALLHOST_PDM_LOCK:
430 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
431 break;
432 case VMMCALLHOST_PDM_QUEUE_FLUSH:
433 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMQueueFlush);
434 break;
435 case VMMCALLHOST_PGM_POOL_GROW:
436 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
437 break;
438 case VMMCALLHOST_PGM_LOCK:
439 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
440 break;
441 case VMMCALLHOST_PGM_MAP_CHUNK:
442 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
443 break;
444 case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
445 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
446 break;
447#ifndef VBOX_WITH_NEW_PHYS_CODE
448 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
449 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMGrowRAM);
450 break;
451#endif
452 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
453 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
454 break;
455 case VMMCALLHOST_VMM_LOGGER_FLUSH:
456 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
457 break;
458 case VMMCALLHOST_VM_SET_ERROR:
459 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
460 break;
461 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
462 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
463 break;
464 case VMMCALLHOST_VM_R0_ASSERTION:
465 default:
466 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallHost);
467 break;
468 }
469 break;
470 case VINF_PATM_DUPLICATE_FUNCTION:
471 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
472 break;
473 case VINF_PGM_CHANGE_MODE:
474 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
475 break;
476 case VINF_EM_RAW_EMULATE_INSTR_HLT:
477 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulHlt);
478 break;
479 case VINF_EM_PENDING_REQUEST:
480 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
481 break;
482 default:
483 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
484 break;
485 }
486}
487#endif /* VBOX_WITH_STATISTICS */
488
489
490/**
491 * Unused ring-0 entry point that used to be called from the interrupt gate.
492 *
493 * Will be removed one of the next times we do a major SUPDrv version bump.
494 *
495 * @returns VBox status code.
496 * @param pVM The VM to operate on.
497 * @param enmOperation Which operation to execute.
498 * @param pvArg Argument to the operation.
499 * @remarks Assume called with interrupts disabled.
500 */
501VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
502{
503 switch (enmOperation)
504 {
505 default:
506 /*
507 * We're returning VERR_NOT_SUPPORT here so we've got something else
508 * than -1 which the interrupt gate glue code might return.
509 */
510 Log(("operation %#x is not supported\n", enmOperation));
511 return VERR_NOT_SUPPORTED;
512 }
513}
514
515
516/**
517 * The Ring 0 entry point, called by the fast-ioctl path.
518 *
519 * @param pVM The VM to operate on.
520 * The return code is stored in pVM->vmm.s.iLastGZRc.
521 * @param idCpu VMCPU id.
522 * @param enmOperation Which operation to execute.
523 * @remarks Assume called with interrupts _enabled_.
524 */
525VMMR0DECL(void) VMMR0EntryFast(PVM pVM, unsigned idCpu, VMMR0OPERATION enmOperation)
526{
527 if (RT_UNLIKELY(idCpu >= pVM->cCPUs))
528 {
529 pVM->vmm.s.iLastGZRc = VERR_INVALID_PARAMETER;
530 return;
531 }
532
533 switch (enmOperation)
534 {
535 /*
536 * Switch to GC and run guest raw mode code.
537 * Disable interrupts before doing the world switch.
538 */
539 case VMMR0_DO_RAW_RUN:
540 {
541 /* Safety precaution as hwaccm disables the switcher. */
542 if (RT_LIKELY(!pVM->vmm.s.fSwitcherDisabled))
543 {
544 RTCCUINTREG uFlags = ASMIntDisableFlags();
545
546 TMNotifyStartOfExecution(pVM);
547 int rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
548 pVM->vmm.s.iLastGZRc = rc;
549 TMNotifyEndOfExecution(pVM);
550
551 if ( rc == VINF_EM_RAW_INTERRUPT
552 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
553 TRPMR0DispatchHostInterrupt(pVM);
554
555 ASMSetFlags(uFlags);
556
557#ifdef VBOX_WITH_STATISTICS
558 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
559 vmmR0RecordRC(pVM, rc);
560#endif
561 }
562 else
563 {
564 Assert(!pVM->vmm.s.fSwitcherDisabled);
565 pVM->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
566 }
567 break;
568 }
569
570 /*
571 * Run guest code using the available hardware acceleration technology.
572 *
573 * Disable interrupts before we do anything interesting. On Windows we avoid
574 * this by having the support driver raise the IRQL before calling us, this way
575 * we hope to get away with page faults and later calling into the kernel.
576 */
577 case VMMR0_DO_HWACC_RUN:
578 {
579 int rc;
580 PVMCPU pVCpu = &pVM->aCpus[idCpu];
581
582 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
583
584#ifndef RT_OS_WINDOWS /** @todo check other hosts */
585 RTCCUINTREG uFlags = ASMIntDisableFlags();
586#endif
587 if (!HWACCMR0SuspendPending())
588 {
589 rc = HWACCMR0Enter(pVM, pVCpu);
590 if (RT_SUCCESS(rc))
591 {
592 rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
593 int rc2 = HWACCMR0Leave(pVM, pVCpu);
594 AssertRC(rc2);
595 }
596 }
597 else
598 {
599 /* System is about to go into suspend mode; go back to ring 3. */
600 rc = VINF_EM_RAW_INTERRUPT;
601 }
602 pVM->vmm.s.iLastGZRc = rc;
603#ifndef RT_OS_WINDOWS /** @todo check other hosts */
604 ASMSetFlags(uFlags);
605#endif
606
607#ifdef VBOX_WITH_STATISTICS
608 vmmR0RecordRC(pVM, rc);
609#endif
610 /* No special action required for external interrupts, just return. */
611 break;
612 }
613
614 /*
615 * For profiling.
616 */
617 case VMMR0_DO_NOP:
618 pVM->vmm.s.iLastGZRc = VINF_SUCCESS;
619 break;
620
621 /*
622 * Impossible.
623 */
624 default:
625 AssertMsgFailed(("%#x\n", enmOperation));
626 pVM->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
627 break;
628 }
629}
630
631
632/**
633 * Validates a session or VM session argument.
634 *
635 * @returns true / false accordingly.
636 * @param pVM The VM argument.
637 * @param pSession The session argument.
638 */
639DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
640{
641 /* This must be set! */
642 if (!pSession)
643 return false;
644
645 /* Only one out of the two. */
646 if (pVM && pClaimedSession)
647 return false;
648 if (pVM)
649 pClaimedSession = pVM->pSession;
650 return pClaimedSession == pSession;
651}
652
653
654/**
655 * VMMR0EntryEx worker function, either called directly or when ever possible
656 * called thru a longjmp so we can exit safely on failure.
657 *
658 * @returns VBox status code.
659 * @param pVM The VM to operate on.
660 * @param enmOperation Which operation to execute.
661 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
662 * The support driver validates this if it's present.
663 * @param u64Arg Some simple constant argument.
664 * @param pSession The session of the caller.
665 * @remarks Assume called with interrupts _enabled_.
666 */
667static int vmmR0EntryExWorker(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
668{
669 /*
670 * Common VM pointer validation.
671 */
672 if (pVM)
673 {
674 if (RT_UNLIKELY( !VALID_PTR(pVM)
675 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
676 {
677 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
678 return VERR_INVALID_POINTER;
679 }
680 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
681 || pVM->enmVMState > VMSTATE_TERMINATED
682 || pVM->pVMR0 != pVM))
683 {
684 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
685 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
686 return VERR_INVALID_POINTER;
687 }
688 }
689
690 switch (enmOperation)
691 {
692 /*
693 * GVM requests
694 */
695 case VMMR0_DO_GVMM_CREATE_VM:
696 if (pVM || u64Arg)
697 return VERR_INVALID_PARAMETER;
698 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
699
700 case VMMR0_DO_GVMM_DESTROY_VM:
701 if (pReqHdr || u64Arg)
702 return VERR_INVALID_PARAMETER;
703 return GVMMR0DestroyVM(pVM);
704
705 case VMMR0_DO_GVMM_SCHED_HALT:
706 if (pReqHdr)
707 return VERR_INVALID_PARAMETER;
708 return GVMMR0SchedHalt(pVM, u64Arg);
709
710 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
711 if (pReqHdr || u64Arg)
712 return VERR_INVALID_PARAMETER;
713 return GVMMR0SchedWakeUp(pVM);
714
715 case VMMR0_DO_GVMM_SCHED_POLL:
716 if (pReqHdr || u64Arg > 1)
717 return VERR_INVALID_PARAMETER;
718 return GVMMR0SchedPoll(pVM, !!u64Arg);
719
720 case VMMR0_DO_GVMM_QUERY_STATISTICS:
721 if (u64Arg)
722 return VERR_INVALID_PARAMETER;
723 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
724
725 case VMMR0_DO_GVMM_RESET_STATISTICS:
726 if (u64Arg)
727 return VERR_INVALID_PARAMETER;
728 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
729
730 /*
731 * Initialize the R0 part of a VM instance.
732 */
733 case VMMR0_DO_VMMR0_INIT:
734 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
735
736 /*
737 * Terminate the R0 part of a VM instance.
738 */
739 case VMMR0_DO_VMMR0_TERM:
740 return VMMR0TermVM(pVM, NULL);
741
742 /*
743 * Attempt to enable hwacc mode and check the current setting.
744 *
745 */
746 case VMMR0_DO_HWACC_ENABLE:
747 return HWACCMR0EnableAllCpus(pVM, (HWACCMSTATE)u64Arg);
748
749 /*
750 * Setup the hardware accelerated raw-mode session.
751 */
752 case VMMR0_DO_HWACC_SETUP_VM:
753 {
754 RTCCUINTREG fFlags = ASMIntDisableFlags();
755 int rc = HWACCMR0SetupVM(pVM);
756 ASMSetFlags(fFlags);
757 return rc;
758 }
759
760 /*
761 * Switch to GC to execute Hypervisor function.
762 */
763 case VMMR0_DO_CALL_HYPERVISOR:
764 {
765 /* Safety precaution as HWACCM can disable the switcher. */
766 Assert(!pVM->vmm.s.fSwitcherDisabled);
767 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
768 return VERR_NOT_SUPPORTED;
769
770 RTCCUINTREG fFlags = ASMIntDisableFlags();
771 int rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
772 /** @todo dispatch interrupts? */
773 ASMSetFlags(fFlags);
774 return rc;
775 }
776
777 /*
778 * PGM wrappers.
779 */
780 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
781 return PGMR0PhysAllocateHandyPages(pVM);
782
783 /*
784 * GMM wrappers.
785 */
786 case VMMR0_DO_GMM_INITIAL_RESERVATION:
787 if (u64Arg)
788 return VERR_INVALID_PARAMETER;
789 return GMMR0InitialReservationReq(pVM, (PGMMINITIALRESERVATIONREQ)pReqHdr);
790 case VMMR0_DO_GMM_UPDATE_RESERVATION:
791 if (u64Arg)
792 return VERR_INVALID_PARAMETER;
793 return GMMR0UpdateReservationReq(pVM, (PGMMUPDATERESERVATIONREQ)pReqHdr);
794
795 case VMMR0_DO_GMM_ALLOCATE_PAGES:
796 if (u64Arg)
797 return VERR_INVALID_PARAMETER;
798 return GMMR0AllocatePagesReq(pVM, (PGMMALLOCATEPAGESREQ)pReqHdr);
799 case VMMR0_DO_GMM_FREE_PAGES:
800 if (u64Arg)
801 return VERR_INVALID_PARAMETER;
802 return GMMR0FreePagesReq(pVM, (PGMMFREEPAGESREQ)pReqHdr);
803 case VMMR0_DO_GMM_BALLOONED_PAGES:
804 if (u64Arg)
805 return VERR_INVALID_PARAMETER;
806 return GMMR0BalloonedPagesReq(pVM, (PGMMBALLOONEDPAGESREQ)pReqHdr);
807 case VMMR0_DO_GMM_DEFLATED_BALLOON:
808 if (pReqHdr)
809 return VERR_INVALID_PARAMETER;
810 return GMMR0DeflatedBalloon(pVM, (uint32_t)u64Arg);
811
812 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
813 if (u64Arg)
814 return VERR_INVALID_PARAMETER;
815 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
816 case VMMR0_DO_GMM_SEED_CHUNK:
817 if (pReqHdr)
818 return VERR_INVALID_PARAMETER;
819 return GMMR0SeedChunk(pVM, (RTR3PTR)u64Arg);
820
821 /*
822 * A quick GCFGM mock-up.
823 */
824 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
825 case VMMR0_DO_GCFGM_SET_VALUE:
826 case VMMR0_DO_GCFGM_QUERY_VALUE:
827 {
828 if (pVM || !pReqHdr || u64Arg)
829 return VERR_INVALID_PARAMETER;
830 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
831 if (pReq->Hdr.cbReq != sizeof(*pReq))
832 return VERR_INVALID_PARAMETER;
833 int rc;
834 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
835 {
836 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
837 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
838 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
839 }
840 else
841 {
842 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
843 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
844 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
845 }
846 return rc;
847 }
848
849
850 /*
851 * Requests to the internal networking service.
852 */
853 case VMMR0_DO_INTNET_OPEN:
854 {
855 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
856 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession))
857 return VERR_INVALID_PARAMETER;
858 if (!g_pIntNet)
859 return VERR_NOT_SUPPORTED;
860 return INTNETR0OpenReq(g_pIntNet, pSession, pReq);
861 }
862
863 case VMMR0_DO_INTNET_IF_CLOSE:
864 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession))
865 return VERR_INVALID_PARAMETER;
866 if (!g_pIntNet)
867 return VERR_NOT_SUPPORTED;
868 return INTNETR0IfCloseReq(g_pIntNet, pSession, (PINTNETIFCLOSEREQ)pReqHdr);
869
870 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
871 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETRING3BUFFERREQ)pReqHdr)->pSession, pSession))
872 return VERR_INVALID_PARAMETER;
873 if (!g_pIntNet)
874 return VERR_NOT_SUPPORTED;
875 return INTNETR0IfGetRing3BufferReq(g_pIntNet, pSession, (PINTNETIFGETRING3BUFFERREQ)pReqHdr);
876
877 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
878 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession))
879 return VERR_INVALID_PARAMETER;
880 if (!g_pIntNet)
881 return VERR_NOT_SUPPORTED;
882 return INTNETR0IfSetPromiscuousModeReq(g_pIntNet, pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
883
884 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
885 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession))
886 return VERR_INVALID_PARAMETER;
887 if (!g_pIntNet)
888 return VERR_NOT_SUPPORTED;
889 return INTNETR0IfSetMacAddressReq(g_pIntNet, pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
890
891 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
892 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession))
893 return VERR_INVALID_PARAMETER;
894 if (!g_pIntNet)
895 return VERR_NOT_SUPPORTED;
896 return INTNETR0IfSetActiveReq(g_pIntNet, pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
897
898 case VMMR0_DO_INTNET_IF_SEND:
899 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession))
900 return VERR_INVALID_PARAMETER;
901 if (!g_pIntNet)
902 return VERR_NOT_SUPPORTED;
903 return INTNETR0IfSendReq(g_pIntNet, pSession, (PINTNETIFSENDREQ)pReqHdr);
904
905 case VMMR0_DO_INTNET_IF_WAIT:
906 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession))
907 return VERR_INVALID_PARAMETER;
908 if (!g_pIntNet)
909 return VERR_NOT_SUPPORTED;
910 return INTNETR0IfWaitReq(g_pIntNet, pSession, (PINTNETIFWAITREQ)pReqHdr);
911
912 /*
913 * For profiling.
914 */
915 case VMMR0_DO_NOP:
916 case VMMR0_DO_SLOW_NOP:
917 return VINF_SUCCESS;
918
919 /*
920 * For testing Ring-0 APIs invoked in this environment.
921 */
922 case VMMR0_DO_TESTS:
923 /** @todo make new test */
924 return VINF_SUCCESS;
925
926
927#if defined(DEBUG) && HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
928 case VMMR0_DO_TEST_SWITCHER3264:
929 return HWACCMR0TestSwitcher3264(pVM);
930#endif
931 default:
932 /*
933 * We're returning VERR_NOT_SUPPORT here so we've got something else
934 * than -1 which the interrupt gate glue code might return.
935 */
936 Log(("operation %#x is not supported\n", enmOperation));
937 return VERR_NOT_SUPPORTED;
938 }
939}
940
941
942/**
943 * Argument for vmmR0EntryExWrapper containing the argument s ofr VMMR0EntryEx.
944 */
945typedef struct VMMR0ENTRYEXARGS
946{
947 PVM pVM;
948 VMMR0OPERATION enmOperation;
949 PSUPVMMR0REQHDR pReq;
950 uint64_t u64Arg;
951 PSUPDRVSESSION pSession;
952} VMMR0ENTRYEXARGS;
953/** Pointer to a vmmR0EntryExWrapper argument package. */
954typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
955
956/**
957 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
958 *
959 * @returns VBox status code.
960 * @param pvArgs The argument package
961 */
962static int vmmR0EntryExWrapper(void *pvArgs)
963{
964 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
965 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
966 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
967 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
968 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
969}
970
971
972/**
973 * The Ring 0 entry point, called by the support library (SUP).
974 *
975 * @returns VBox status code.
976 * @param pVM The VM to operate on.
977 * @param enmOperation Which operation to execute.
978 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
979 * @param u64Arg Some simple constant argument.
980 * @param pSession The session of the caller.
981 * @remarks Assume called with interrupts _enabled_.
982 */
983VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
984{
985 /*
986 * Requests that should only happen on the EMT thread will be
987 * wrapped in a setjmp so we can assert without causing trouble.
988 */
989 if ( VALID_PTR(pVM)
990 && pVM->pVMR0)
991 {
992 switch (enmOperation)
993 {
994 /* These might/will be called before VMMR3Init. */
995 case VMMR0_DO_GMM_INITIAL_RESERVATION:
996 case VMMR0_DO_GMM_UPDATE_RESERVATION:
997 case VMMR0_DO_GMM_ALLOCATE_PAGES:
998 case VMMR0_DO_GMM_FREE_PAGES:
999 case VMMR0_DO_GMM_BALLOONED_PAGES:
1000 case VMMR0_DO_GMM_DEFLATED_BALLOON:
1001 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1002 case VMMR0_DO_GMM_SEED_CHUNK:
1003 /* On the mac we might not have a valid jmp buf, so check these as well. */
1004 case VMMR0_DO_VMMR0_INIT:
1005 case VMMR0_DO_VMMR0_TERM:
1006 {
1007 if (!pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack)
1008 break;
1009
1010 /** @todo validate this EMT claim... GVM knows. */
1011 VMMR0ENTRYEXARGS Args;
1012 Args.pVM = pVM;
1013 Args.enmOperation = enmOperation;
1014 Args.pReq = pReq;
1015 Args.u64Arg = u64Arg;
1016 Args.pSession = pSession;
1017 return vmmR0CallHostSetJmpEx(&pVM->vmm.s.CallHostR0JmpBuf, vmmR0EntryExWrapper, &Args);
1018 }
1019
1020 default:
1021 break;
1022 }
1023 }
1024 return vmmR0EntryExWorker(pVM, enmOperation, pReq, u64Arg, pSession);
1025}
1026
1027
1028/**
1029 * Internal R0 logger worker: Flush logger.
1030 *
1031 * @param pLogger The logger instance to flush.
1032 * @remark This function must be exported!
1033 */
1034VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1035{
1036 /*
1037 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1038 * (This is a bit paranoid code.)
1039 */
1040 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1041 if ( !VALID_PTR(pR0Logger)
1042 || !VALID_PTR(pR0Logger + 1)
1043 || pLogger->u32Magic != RTLOGGER_MAGIC)
1044 {
1045#ifdef DEBUG
1046 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1047#endif
1048 return;
1049 }
1050 if (pR0Logger->fFlushingDisabled)
1051 return; /* quietly */
1052
1053 PVM pVM = pR0Logger->pVM;
1054 if ( !VALID_PTR(pVM)
1055 || pVM->pVMR0 != pVM)
1056 {
1057#ifdef DEBUG
1058 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1059#endif
1060 return;
1061 }
1062
1063 /*
1064 * Check that the jump buffer is armed.
1065 */
1066#ifdef RT_ARCH_X86
1067 if (!pVM->vmm.s.CallHostR0JmpBuf.eip)
1068#else
1069 if (!pVM->vmm.s.CallHostR0JmpBuf.rip)
1070#endif
1071 {
1072#ifdef DEBUG
1073 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1074#endif
1075 return;
1076 }
1077 VMMR0CallHost(pVM, VMMCALLHOST_VMM_LOGGER_FLUSH, 0);
1078}
1079
1080
1081/**
1082 * Disables flushing of the ring-0 debug log.
1083 *
1084 * @param pVCpu The shared virtual cpu structure.
1085 */
1086VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1087{
1088 PVM pVM = pVCpu->pVMR0;
1089 if (pVM->vmm.s.pR0LoggerR0)
1090 pVM->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1091}
1092
1093
1094/**
1095 * Enables flushing of the ring-0 debug log.
1096 *
1097 * @param pVCpu The shared virtual cpu structure.
1098 */
1099VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1100{
1101 PVM pVM = pVCpu->pVMR0;
1102 if (pVM->vmm.s.pR0LoggerR0)
1103 pVM->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1104}
1105
1106
1107/**
1108 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1109 *
1110 * @returns true if the breakpoint should be hit, false if it should be ignored.
1111 */
1112DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1113{
1114#if 0
1115 return true;
1116#else
1117 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1118 if (pVM)
1119 {
1120#ifdef RT_ARCH_X86
1121 if (pVM->vmm.s.CallHostR0JmpBuf.eip)
1122#else
1123 if (pVM->vmm.s.CallHostR0JmpBuf.rip)
1124#endif
1125 {
1126 int rc = VMMR0CallHost(pVM, VMMCALLHOST_VM_R0_ASSERTION, 0);
1127 return RT_FAILURE_NP(rc);
1128 }
1129 }
1130#ifdef RT_OS_LINUX
1131 return true;
1132#else
1133 return false;
1134#endif
1135#endif
1136}
1137
1138
1139/**
1140 * Override this so we can push it up to ring-3.
1141 *
1142 * @param pszExpr Expression. Can be NULL.
1143 * @param uLine Location line number.
1144 * @param pszFile Location file name.
1145 * @param pszFunction Location function name.
1146 */
1147DECLEXPORT(void) RTCALL AssertMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1148{
1149#if !defined(DEBUG_sandervl) && !defined(RT_OS_DARWIN)
1150 SUPR0Printf("\n!!R0-Assertion Failed!!\n"
1151 "Expression: %s\n"
1152 "Location : %s(%d) %s\n",
1153 pszExpr, pszFile, uLine, pszFunction);
1154#endif
1155 LogAlways(("\n!!R0-Assertion Failed!!\n"
1156 "Expression: %s\n"
1157 "Location : %s(%d) %s\n",
1158 pszExpr, pszFile, uLine, pszFunction));
1159
1160 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1161 if (pVM)
1162 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1163 "\n!!R0-Assertion Failed!!\n"
1164 "Expression: %s\n"
1165 "Location : %s(%d) %s\n",
1166 pszExpr, pszFile, uLine, pszFunction);
1167#ifdef RT_OS_DARWIN
1168 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1169#endif
1170}
1171
1172
1173/**
1174 * Callback for RTLogFormatV which writes to the ring-3 log port.
1175 * See PFNLOGOUTPUT() for details.
1176 */
1177static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1178{
1179 for (size_t i = 0; i < cbChars; i++)
1180 {
1181#if !defined(DEBUG_sandervl) && !defined(RT_OS_DARWIN)
1182 SUPR0Printf("%c", pachChars[i]);
1183#endif
1184 LogAlways(("%c", pachChars[i]));
1185 }
1186
1187 return cbChars;
1188}
1189
1190
1191DECLEXPORT(void) RTCALL AssertMsg2(const char *pszFormat, ...)
1192{
1193 va_list va;
1194
1195 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1196 if (pLog)
1197 {
1198 va_start(va, pszFormat);
1199 RTLogFormatV(rtLogOutput, pLog, pszFormat, va);
1200 va_end(va);
1201
1202 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1203 if (pVM)
1204 {
1205 va_start(va, pszFormat);
1206 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, va);
1207 va_end(va);
1208 }
1209 }
1210
1211#ifdef RT_OS_DARWIN
1212 va_start(va, pszFormat);
1213 RTAssertMsg2V(pszFormat, va);
1214 va_end(va);
1215#endif
1216}
1217
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette