VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 14446

Last change on this file since 14446 was 14114, checked in by vboxsync, 16 years ago

#1865: Some structures and external API for the ring-0 dynamic mapping cache (Darwin only). Some of these APIs will be / are shared with RC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 41.0 KB
Line 
1/* $Id: VMMR0.cpp 14114 2008-11-11 23:37:04Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VMM
26#include <VBox/vmm.h>
27#include <VBox/sup.h>
28#include <VBox/trpm.h>
29#include <VBox/cpum.h>
30#include <VBox/pgm.h>
31#include <VBox/stam.h>
32#include <VBox/tm.h>
33#include "VMMInternal.h"
34#include <VBox/vm.h>
35#include <VBox/gvmm.h>
36#include <VBox/gmm.h>
37#include <VBox/intnet.h>
38#include <VBox/hwaccm.h>
39#include <VBox/param.h>
40
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45#include <iprt/stdarg.h>
46#include <iprt/mp.h>
47#include <iprt/string.h>
48
49#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
50# pragma intrinsic(_AddressOfReturnAddress)
51#endif
52
53
54/*******************************************************************************
55* Internal Functions *
56*******************************************************************************/
57static int VMMR0Init(PVM pVM, uint32_t uSvnRev);
58static int VMMR0Term(PVM pVM);
59__BEGIN_DECLS
60VMMR0DECL(int) ModuleInit(void);
61VMMR0DECL(void) ModuleTerm(void);
62__END_DECLS
63
64
65/*******************************************************************************
66* Global Variables *
67*******************************************************************************/
68/** Pointer to the internal networking service instance. */
69PINTNET g_pIntNet = 0;
70
71
72/**
73 * Initialize the module.
74 * This is called when we're first loaded.
75 *
76 * @returns 0 on success.
77 * @returns VBox status on failure.
78 */
79VMMR0DECL(int) ModuleInit(void)
80{
81 LogFlow(("ModuleInit:\n"));
82
83 /*
84 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
85 */
86 int rc = GVMMR0Init();
87 if (RT_SUCCESS(rc))
88 {
89 rc = GMMR0Init();
90 if (RT_SUCCESS(rc))
91 {
92 rc = HWACCMR0Init();
93 if (RT_SUCCESS(rc))
94 {
95#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
96 rc = PGMR0DynMapInit();
97#endif
98 if (RT_SUCCESS(rc))
99 {
100 LogFlow(("ModuleInit: g_pIntNet=%p\n", g_pIntNet));
101 g_pIntNet = NULL;
102 LogFlow(("ModuleInit: g_pIntNet=%p should be NULL now...\n", g_pIntNet));
103 rc = INTNETR0Create(&g_pIntNet);
104 if (RT_SUCCESS(rc))
105 {
106 LogFlow(("ModuleInit: returns success. g_pIntNet=%p\n", g_pIntNet));
107 return VINF_SUCCESS;
108 }
109
110 /* bail out */
111 g_pIntNet = NULL;
112 LogFlow(("ModuleTerm: returns %Rrc\n", rc));
113#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
114 PGMR0DynMapTerm();
115#endif
116 }
117 HWACCMR0Term();
118 }
119 GMMR0Term();
120 }
121 GVMMR0Term();
122 }
123
124 LogFlow(("ModuleInit: failed %Rrc\n", rc));
125 return rc;
126}
127
128
129/**
130 * Terminate the module.
131 * This is called when we're finally unloaded.
132 */
133VMMR0DECL(void) ModuleTerm(void)
134{
135 LogFlow(("ModuleTerm:\n"));
136
137 /*
138 * Destroy the internal networking instance.
139 */
140 if (g_pIntNet)
141 {
142 INTNETR0Destroy(g_pIntNet);
143 g_pIntNet = NULL;
144 }
145
146 /*
147 * PGM (Darwin) and HWACCM global cleanup.
148 * Destroy the GMM and GVMM instances.
149 */
150#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
151 PGMR0DynMapTerm();
152#endif
153 HWACCMR0Term();
154
155 GMMR0Term();
156 GVMMR0Term();
157
158 LogFlow(("ModuleTerm: returns\n"));
159}
160
161
162/**
163 * Initaties the R0 driver for a particular VM instance.
164 *
165 * @returns VBox status code.
166 *
167 * @param pVM The VM instance in question.
168 * @param uSvnRev The SVN revision of the ring-3 part.
169 * @thread EMT.
170 */
171static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
172{
173 /*
174 * Match the SVN revisions.
175 */
176 if (uSvnRev != VMMGetSvnRev())
177 return VERR_VERSION_MISMATCH;
178 if ( !VALID_PTR(pVM)
179 || pVM->pVMR0 != pVM)
180 return VERR_INVALID_PARAMETER;
181
182 /*
183 * Register the EMT R0 logger instance.
184 */
185 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0LoggerR0;
186 if (pR0Logger)
187 {
188#if 0 /* testing of the logger. */
189 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
190 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
191 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
192 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
193
194 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
195 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
196 RTLogSetDefaultInstanceThread(NULL, 0);
197 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
198
199 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
200 LogCom(("vmmR0InitVM: returned succesfully from direct logger call.\n"));
201 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
202 LogCom(("vmmR0InitVM: returned succesfully from direct flush call.\n"));
203
204 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
205 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
206 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
207 LogCom(("vmmR0InitVM: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
208 RTLogSetDefaultInstanceThread(NULL, 0);
209 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
210
211 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
212 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
213
214 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
215 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
216 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
217#endif
218 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
219 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
220 }
221
222 /*
223 * Initialize the per VM data for GVMM and GMM.
224 */
225 int rc = GVMMR0InitVM(pVM);
226// if (RT_SUCCESS(rc))
227// rc = GMMR0InitPerVMData(pVM);
228 if (RT_SUCCESS(rc))
229 {
230 /*
231 * Init HWACCM, CPUM and PGM (Darwin only).
232 */
233 rc = HWACCMR0InitVM(pVM);
234 if (RT_SUCCESS(rc))
235 {
236 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
237 if (RT_SUCCESS(rc))
238 {
239#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
240 rc = PGMR0DynMapInitVM(pVM);
241#endif
242 if (RT_SUCCESS(rc))
243 return rc;
244
245 /* bail out */
246 }
247 HWACCMR0TermVM(pVM);
248 }
249 }
250 RTLogSetDefaultInstanceThread(NULL, 0);
251 return rc;
252}
253
254
255/**
256 * Terminates the R0 driver for a particular VM instance.
257 *
258 * @returns VBox status code.
259 *
260 * @param pVM The VM instance in question.
261 * @thread EMT.
262 */
263static int vmmR0TermVM(PVM pVM)
264{
265#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
266 PGMR0DynMapTermVM(pVM);
267#endif
268 HWACCMR0TermVM(pVM);
269
270 /*
271 * Deregister the logger.
272 */
273 RTLogSetDefaultInstanceThread(NULL, 0);
274 return VINF_SUCCESS;
275}
276
277
278/**
279 * Calls the ring-3 host code.
280 *
281 * @returns VBox status code of the ring-3 call.
282 * @param pVM The VM handle.
283 * @param enmOperation The operation.
284 * @param uArg The argument to the operation.
285 */
286VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg)
287{
288/** @todo profile this! */
289 pVM->vmm.s.enmCallHostOperation = enmOperation;
290 pVM->vmm.s.u64CallHostArg = uArg;
291 pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
292 int rc = vmmR0CallHostLongJmp(&pVM->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);
293 if (rc == VINF_SUCCESS)
294 rc = pVM->vmm.s.rcCallHost;
295 return rc;
296}
297
298
299#ifdef VBOX_WITH_STATISTICS
300/**
301 * Record return code statistics
302 * @param pVM The VM handle.
303 * @param rc The status code.
304 */
305static void vmmR0RecordRC(PVM pVM, int rc)
306{
307 /*
308 * Collect statistics.
309 */
310 switch (rc)
311 {
312 case VINF_SUCCESS:
313 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
314 break;
315 case VINF_EM_RAW_INTERRUPT:
316 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
317 break;
318 case VINF_EM_RAW_INTERRUPT_HYPER:
319 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
320 break;
321 case VINF_EM_RAW_GUEST_TRAP:
322 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
323 break;
324 case VINF_EM_RAW_RING_SWITCH:
325 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
326 break;
327 case VINF_EM_RAW_RING_SWITCH_INT:
328 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
329 break;
330 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
331 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetExceptionPrivilege);
332 break;
333 case VINF_EM_RAW_STALE_SELECTOR:
334 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
335 break;
336 case VINF_EM_RAW_IRET_TRAP:
337 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
338 break;
339 case VINF_IOM_HC_IOPORT_READ:
340 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
341 break;
342 case VINF_IOM_HC_IOPORT_WRITE:
343 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
344 break;
345 case VINF_IOM_HC_MMIO_READ:
346 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
347 break;
348 case VINF_IOM_HC_MMIO_WRITE:
349 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
350 break;
351 case VINF_IOM_HC_MMIO_READ_WRITE:
352 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
353 break;
354 case VINF_PATM_HC_MMIO_PATCH_READ:
355 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
356 break;
357 case VINF_PATM_HC_MMIO_PATCH_WRITE:
358 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
359 break;
360 case VINF_EM_RAW_EMULATE_INSTR:
361 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
362 break;
363 case VINF_PATCH_EMULATE_INSTR:
364 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
365 break;
366 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
367 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
368 break;
369 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
370 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
371 break;
372 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
373 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
374 break;
375 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
376 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
377 break;
378 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
379 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
380 break;
381 case VINF_CSAM_PENDING_ACTION:
382 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
383 break;
384 case VINF_PGM_SYNC_CR3:
385 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
386 break;
387 case VINF_PATM_PATCH_INT3:
388 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
389 break;
390 case VINF_PATM_PATCH_TRAP_PF:
391 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
392 break;
393 case VINF_PATM_PATCH_TRAP_GP:
394 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
395 break;
396 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
397 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
398 break;
399 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
400 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPageOverflow);
401 break;
402 case VINF_EM_RESCHEDULE_REM:
403 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
404 break;
405 case VINF_EM_RAW_TO_R3:
406 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
407 break;
408 case VINF_EM_RAW_TIMER_PENDING:
409 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
410 break;
411 case VINF_EM_RAW_INTERRUPT_PENDING:
412 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
413 break;
414 case VINF_VMM_CALL_HOST:
415 switch (pVM->vmm.s.enmCallHostOperation)
416 {
417 case VMMCALLHOST_PDM_LOCK:
418 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
419 break;
420 case VMMCALLHOST_PDM_QUEUE_FLUSH:
421 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMQueueFlush);
422 break;
423 case VMMCALLHOST_PGM_POOL_GROW:
424 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
425 break;
426 case VMMCALLHOST_PGM_LOCK:
427 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
428 break;
429 case VMMCALLHOST_PGM_MAP_CHUNK:
430 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
431 break;
432 case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES:
433 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
434 break;
435#ifndef VBOX_WITH_NEW_PHYS_CODE
436 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
437 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMGrowRAM);
438 break;
439#endif
440 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
441 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
442 break;
443 case VMMCALLHOST_VMM_LOGGER_FLUSH:
444 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
445 break;
446 case VMMCALLHOST_VM_SET_ERROR:
447 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
448 break;
449 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
450 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
451 break;
452 case VMMCALLHOST_VM_R0_ASSERTION:
453 default:
454 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallHost);
455 break;
456 }
457 break;
458 case VINF_PATM_DUPLICATE_FUNCTION:
459 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
460 break;
461 case VINF_PGM_CHANGE_MODE:
462 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
463 break;
464 case VINF_EM_RAW_EMULATE_INSTR_HLT:
465 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulHlt);
466 break;
467 case VINF_EM_PENDING_REQUEST:
468 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
469 break;
470 default:
471 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
472 break;
473 }
474}
475#endif /* VBOX_WITH_STATISTICS */
476
477
478/**
479 * The Ring 0 entry point, called by the interrupt gate.
480 *
481 * @returns VBox status code.
482 * @param pVM The VM to operate on.
483 * @param enmOperation Which operation to execute.
484 * @param pvArg Argument to the operation.
485 * @remarks Assume called with interrupts disabled.
486 */
487VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
488{
489 switch (enmOperation)
490 {
491#ifdef VBOX_WITH_IDT_PATCHING
492 /*
493 * Switch to GC.
494 * These calls return whatever the GC returns.
495 */
496 case VMMR0_DO_RAW_RUN:
497 {
498 /* Safety precaution as VMX disables the switcher. */
499 Assert(!pVM->vmm.s.fSwitcherDisabled);
500 if (pVM->vmm.s.fSwitcherDisabled)
501 return VERR_NOT_SUPPORTED;
502
503 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
504 register int rc;
505 pVM->vmm.s.iLastGZRc = rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
506
507#ifdef VBOX_WITH_STATISTICS
508 vmmR0RecordRC(pVM, rc);
509#endif
510
511 /*
512 * We'll let TRPM change the stack frame so our return is different.
513 * Just keep in mind that after the call, things have changed!
514 */
515 if ( rc == VINF_EM_RAW_INTERRUPT
516 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
517 {
518 /*
519 * Don't trust the compiler to get this right.
520 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit
521 * mode too because we push the arguments on the stack in the IDT patch code.
522 */
523# if defined(__GNUC__)
524 void *pvRet = (uint8_t *)__builtin_frame_address(0) + sizeof(void *);
525# elif defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
526 void *pvRet = (uint8_t *)_AddressOfReturnAddress();
527# elif defined(RT_ARCH_X86)
528 void *pvRet = (uint8_t *)&pVM - sizeof(pVM);
529# else
530# error "huh?"
531# endif
532 if ( ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM
533 && ((uintptr_t *)pvRet)[2] == (uintptr_t)enmOperation
534 && ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg)
535 TRPMR0SetupInterruptDispatcherFrame(pVM, pvRet);
536 else
537 {
538# if defined(DEBUG) || defined(LOG_ENABLED)
539 static bool s_fHaveWarned = false;
540 if (!s_fHaveWarned)
541 {
542 s_fHaveWarned = true;
543 RTLogPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
544 RTLogComPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
545 }
546# endif
547 TRPMR0DispatchHostInterrupt(pVM);
548 }
549 }
550 return rc;
551 }
552
553 /*
554 * Switch to GC to execute Hypervisor function.
555 */
556 case VMMR0_DO_CALL_HYPERVISOR:
557 {
558 /* Safety precaution as VMX disables the switcher. */
559 Assert(!pVM->vmm.s.fSwitcherDisabled);
560 if (pVM->vmm.s.fSwitcherDisabled)
561 return VERR_NOT_SUPPORTED;
562
563 RTCCUINTREG fFlags = ASMIntDisableFlags();
564 int rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
565 /** @todo dispatch interrupts? */
566 ASMSetFlags(fFlags);
567 return rc;
568 }
569
570 /*
571 * For profiling.
572 */
573 case VMMR0_DO_NOP:
574 return VINF_SUCCESS;
575#endif /* VBOX_WITH_IDT_PATCHING */
576
577 default:
578 /*
579 * We're returning VERR_NOT_SUPPORT here so we've got something else
580 * than -1 which the interrupt gate glue code might return.
581 */
582 Log(("operation %#x is not supported\n", enmOperation));
583 return VERR_NOT_SUPPORTED;
584 }
585}
586
587
588/**
589 * The Ring 0 entry point, called by the fast-ioctl path.
590 *
591 * @param pVM The VM to operate on.
592 * The return code is stored in pVM->vmm.s.iLastGZRc.
593 * @param idCpu VMCPU id.
594 * @param enmOperation Which operation to execute.
595 * @remarks Assume called with interrupts _enabled_.
596 */
597VMMR0DECL(void) VMMR0EntryFast(PVM pVM, unsigned idCpu, VMMR0OPERATION enmOperation)
598{
599 if (RT_UNLIKELY(idCpu >= pVM->cCPUs))
600 {
601 pVM->vmm.s.iLastGZRc = VERR_INVALID_PARAMETER;
602 return;
603 }
604
605 switch (enmOperation)
606 {
607 /*
608 * Switch to GC and run guest raw mode code.
609 * Disable interrupts before doing the world switch.
610 */
611 case VMMR0_DO_RAW_RUN:
612 {
613 /* Safety precaution as hwaccm disables the switcher. */
614 if (RT_LIKELY(!pVM->vmm.s.fSwitcherDisabled))
615 {
616 RTCCUINTREG uFlags = ASMIntDisableFlags();
617
618 TMNotifyStartOfExecution(pVM);
619 int rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
620 pVM->vmm.s.iLastGZRc = rc;
621 TMNotifyEndOfExecution(pVM);
622
623 if ( rc == VINF_EM_RAW_INTERRUPT
624 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
625 TRPMR0DispatchHostInterrupt(pVM);
626
627 ASMSetFlags(uFlags);
628
629#ifdef VBOX_WITH_STATISTICS
630 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
631 vmmR0RecordRC(pVM, rc);
632#endif
633 }
634 else
635 {
636 Assert(!pVM->vmm.s.fSwitcherDisabled);
637 pVM->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
638 }
639 break;
640 }
641
642 /*
643 * Run guest code using the available hardware acceleration technology.
644 *
645 * Disable interrupts before we do anything interesting. On Windows we avoid
646 * this by having the support driver raise the IRQL before calling us, this way
647 * we hope to get away with page faults and later calling into the kernel.
648 */
649 case VMMR0_DO_HWACC_RUN:
650 {
651 int rc;
652 PVMCPU pVCpu = &pVM->aCpus[idCpu];
653
654 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
655
656#ifndef RT_OS_WINDOWS /** @todo check other hosts */
657 RTCCUINTREG uFlags = ASMIntDisableFlags();
658#endif
659 if (!HWACCMR0SuspendPending())
660 {
661 rc = HWACCMR0Enter(pVM, pVCpu);
662 if (RT_SUCCESS(rc))
663 {
664 rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
665 int rc2 = HWACCMR0Leave(pVM, pVCpu);
666 AssertRC(rc2);
667 }
668 }
669 else
670 {
671 /* System is about to go into suspend mode; go back to ring 3. */
672 rc = VINF_EM_RAW_INTERRUPT;
673 }
674 pVM->vmm.s.iLastGZRc = rc;
675#ifndef RT_OS_WINDOWS /** @todo check other hosts */
676 ASMSetFlags(uFlags);
677#endif
678
679#ifdef VBOX_WITH_STATISTICS
680 vmmR0RecordRC(pVM, rc);
681#endif
682 /* No special action required for external interrupts, just return. */
683 break;
684 }
685
686 /*
687 * For profiling.
688 */
689 case VMMR0_DO_NOP:
690 pVM->vmm.s.iLastGZRc = VINF_SUCCESS;
691 break;
692
693 /*
694 * Impossible.
695 */
696 default:
697 AssertMsgFailed(("%#x\n", enmOperation));
698 pVM->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
699 break;
700 }
701}
702
703
704/**
705 * Validates a session or VM session argument.
706 *
707 * @returns true / false accordingly.
708 * @param pVM The VM argument.
709 * @param pSession The session argument.
710 */
711DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
712{
713 /* This must be set! */
714 if (!pSession)
715 return false;
716
717 /* Only one out of the two. */
718 if (pVM && pClaimedSession)
719 return false;
720 if (pVM)
721 pClaimedSession = pVM->pSession;
722 return pClaimedSession == pSession;
723}
724
725
726/**
727 * VMMR0EntryEx worker function, either called directly or when ever possible
728 * called thru a longjmp so we can exit safely on failure.
729 *
730 * @returns VBox status code.
731 * @param pVM The VM to operate on.
732 * @param enmOperation Which operation to execute.
733 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
734 * The support driver validates this if it's present.
735 * @param u64Arg Some simple constant argument.
736 * @param pSession The session of the caller.
737 * @remarks Assume called with interrupts _enabled_.
738 */
739static int vmmR0EntryExWorker(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
740{
741 /*
742 * Common VM pointer validation.
743 */
744 if (pVM)
745 {
746 if (RT_UNLIKELY( !VALID_PTR(pVM)
747 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
748 {
749 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
750 return VERR_INVALID_POINTER;
751 }
752 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
753 || pVM->enmVMState > VMSTATE_TERMINATED
754 || pVM->pVMR0 != pVM))
755 {
756 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
757 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
758 return VERR_INVALID_POINTER;
759 }
760 }
761
762 switch (enmOperation)
763 {
764 /*
765 * GVM requests
766 */
767 case VMMR0_DO_GVMM_CREATE_VM:
768 if (pVM || u64Arg)
769 return VERR_INVALID_PARAMETER;
770 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
771
772 case VMMR0_DO_GVMM_DESTROY_VM:
773 if (pReqHdr || u64Arg)
774 return VERR_INVALID_PARAMETER;
775 return GVMMR0DestroyVM(pVM);
776
777 case VMMR0_DO_GVMM_SCHED_HALT:
778 if (pReqHdr)
779 return VERR_INVALID_PARAMETER;
780 return GVMMR0SchedHalt(pVM, u64Arg);
781
782 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
783 if (pReqHdr || u64Arg)
784 return VERR_INVALID_PARAMETER;
785 return GVMMR0SchedWakeUp(pVM);
786
787 case VMMR0_DO_GVMM_SCHED_POLL:
788 if (pReqHdr || u64Arg > 1)
789 return VERR_INVALID_PARAMETER;
790 return GVMMR0SchedPoll(pVM, !!u64Arg);
791
792 case VMMR0_DO_GVMM_QUERY_STATISTICS:
793 if (u64Arg)
794 return VERR_INVALID_PARAMETER;
795 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
796
797 case VMMR0_DO_GVMM_RESET_STATISTICS:
798 if (u64Arg)
799 return VERR_INVALID_PARAMETER;
800 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
801
802 /*
803 * Initialize the R0 part of a VM instance.
804 */
805 case VMMR0_DO_VMMR0_INIT:
806 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
807
808 /*
809 * Terminate the R0 part of a VM instance.
810 */
811 case VMMR0_DO_VMMR0_TERM:
812 return vmmR0TermVM(pVM);
813
814 /*
815 * Attempt to enable hwacc mode and check the current setting.
816 *
817 */
818 case VMMR0_DO_HWACC_ENABLE:
819 return HWACCMR0EnableAllCpus(pVM, (HWACCMSTATE)u64Arg);
820
821 /*
822 * Setup the hardware accelerated raw-mode session.
823 */
824 case VMMR0_DO_HWACC_SETUP_VM:
825 {
826 RTCCUINTREG fFlags = ASMIntDisableFlags();
827 int rc = HWACCMR0SetupVM(pVM);
828 ASMSetFlags(fFlags);
829 return rc;
830 }
831
832 /*
833 * Switch to GC to execute Hypervisor function.
834 */
835 case VMMR0_DO_CALL_HYPERVISOR:
836 {
837 /* Safety precaution as HWACCM can disable the switcher. */
838 Assert(!pVM->vmm.s.fSwitcherDisabled);
839 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
840 return VERR_NOT_SUPPORTED;
841
842 RTCCUINTREG fFlags = ASMIntDisableFlags();
843 int rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
844 /** @todo dispatch interrupts? */
845 ASMSetFlags(fFlags);
846 return rc;
847 }
848
849 /*
850 * PGM wrappers.
851 */
852 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
853 return PGMR0PhysAllocateHandyPages(pVM);
854
855 /*
856 * GMM wrappers.
857 */
858 case VMMR0_DO_GMM_INITIAL_RESERVATION:
859 if (u64Arg)
860 return VERR_INVALID_PARAMETER;
861 return GMMR0InitialReservationReq(pVM, (PGMMINITIALRESERVATIONREQ)pReqHdr);
862 case VMMR0_DO_GMM_UPDATE_RESERVATION:
863 if (u64Arg)
864 return VERR_INVALID_PARAMETER;
865 return GMMR0UpdateReservationReq(pVM, (PGMMUPDATERESERVATIONREQ)pReqHdr);
866
867 case VMMR0_DO_GMM_ALLOCATE_PAGES:
868 if (u64Arg)
869 return VERR_INVALID_PARAMETER;
870 return GMMR0AllocatePagesReq(pVM, (PGMMALLOCATEPAGESREQ)pReqHdr);
871 case VMMR0_DO_GMM_FREE_PAGES:
872 if (u64Arg)
873 return VERR_INVALID_PARAMETER;
874 return GMMR0FreePagesReq(pVM, (PGMMFREEPAGESREQ)pReqHdr);
875 case VMMR0_DO_GMM_BALLOONED_PAGES:
876 if (u64Arg)
877 return VERR_INVALID_PARAMETER;
878 return GMMR0BalloonedPagesReq(pVM, (PGMMBALLOONEDPAGESREQ)pReqHdr);
879 case VMMR0_DO_GMM_DEFLATED_BALLOON:
880 if (pReqHdr)
881 return VERR_INVALID_PARAMETER;
882 return GMMR0DeflatedBalloon(pVM, (uint32_t)u64Arg);
883
884 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
885 if (u64Arg)
886 return VERR_INVALID_PARAMETER;
887 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
888 case VMMR0_DO_GMM_SEED_CHUNK:
889 if (pReqHdr)
890 return VERR_INVALID_PARAMETER;
891 return GMMR0SeedChunk(pVM, (RTR3PTR)u64Arg);
892
893 /*
894 * A quick GCFGM mock-up.
895 */
896 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
897 case VMMR0_DO_GCFGM_SET_VALUE:
898 case VMMR0_DO_GCFGM_QUERY_VALUE:
899 {
900 if (pVM || !pReqHdr || u64Arg)
901 return VERR_INVALID_PARAMETER;
902 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
903 if (pReq->Hdr.cbReq != sizeof(*pReq))
904 return VERR_INVALID_PARAMETER;
905 int rc;
906 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
907 {
908 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
909 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
910 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
911 }
912 else
913 {
914 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
915 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
916 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
917 }
918 return rc;
919 }
920
921
922 /*
923 * Requests to the internal networking service.
924 */
925 case VMMR0_DO_INTNET_OPEN:
926 {
927 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
928 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession))
929 return VERR_INVALID_PARAMETER;
930 if (!g_pIntNet)
931 return VERR_NOT_SUPPORTED;
932 return INTNETR0OpenReq(g_pIntNet, pSession, pReq);
933 }
934
935 case VMMR0_DO_INTNET_IF_CLOSE:
936 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession))
937 return VERR_INVALID_PARAMETER;
938 if (!g_pIntNet)
939 return VERR_NOT_SUPPORTED;
940 return INTNETR0IfCloseReq(g_pIntNet, pSession, (PINTNETIFCLOSEREQ)pReqHdr);
941
942 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
943 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETRING3BUFFERREQ)pReqHdr)->pSession, pSession))
944 return VERR_INVALID_PARAMETER;
945 if (!g_pIntNet)
946 return VERR_NOT_SUPPORTED;
947 return INTNETR0IfGetRing3BufferReq(g_pIntNet, pSession, (PINTNETIFGETRING3BUFFERREQ)pReqHdr);
948
949 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
950 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession))
951 return VERR_INVALID_PARAMETER;
952 if (!g_pIntNet)
953 return VERR_NOT_SUPPORTED;
954 return INTNETR0IfSetPromiscuousModeReq(g_pIntNet, pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
955
956 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
957 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession))
958 return VERR_INVALID_PARAMETER;
959 if (!g_pIntNet)
960 return VERR_NOT_SUPPORTED;
961 return INTNETR0IfSetMacAddressReq(g_pIntNet, pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
962
963 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
964 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession))
965 return VERR_INVALID_PARAMETER;
966 if (!g_pIntNet)
967 return VERR_NOT_SUPPORTED;
968 return INTNETR0IfSetActiveReq(g_pIntNet, pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
969
970 case VMMR0_DO_INTNET_IF_SEND:
971 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession))
972 return VERR_INVALID_PARAMETER;
973 if (!g_pIntNet)
974 return VERR_NOT_SUPPORTED;
975 return INTNETR0IfSendReq(g_pIntNet, pSession, (PINTNETIFSENDREQ)pReqHdr);
976
977 case VMMR0_DO_INTNET_IF_WAIT:
978 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession))
979 return VERR_INVALID_PARAMETER;
980 if (!g_pIntNet)
981 return VERR_NOT_SUPPORTED;
982 return INTNETR0IfWaitReq(g_pIntNet, pSession, (PINTNETIFWAITREQ)pReqHdr);
983
984 /*
985 * For profiling.
986 */
987 case VMMR0_DO_NOP:
988 case VMMR0_DO_SLOW_NOP:
989 return VINF_SUCCESS;
990
991 /*
992 * For testing Ring-0 APIs invoked in this environment.
993 */
994 case VMMR0_DO_TESTS:
995 /** @todo make new test */
996 return VINF_SUCCESS;
997
998
999 default:
1000 /*
1001 * We're returning VERR_NOT_SUPPORT here so we've got something else
1002 * than -1 which the interrupt gate glue code might return.
1003 */
1004 Log(("operation %#x is not supported\n", enmOperation));
1005 return VERR_NOT_SUPPORTED;
1006 }
1007}
1008
1009
1010/**
1011 * Argument for vmmR0EntryExWrapper containing the argument s ofr VMMR0EntryEx.
1012 */
1013typedef struct VMMR0ENTRYEXARGS
1014{
1015 PVM pVM;
1016 VMMR0OPERATION enmOperation;
1017 PSUPVMMR0REQHDR pReq;
1018 uint64_t u64Arg;
1019 PSUPDRVSESSION pSession;
1020} VMMR0ENTRYEXARGS;
1021/** Pointer to a vmmR0EntryExWrapper argument package. */
1022typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1023
1024/**
1025 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1026 *
1027 * @returns VBox status code.
1028 * @param pvArgs The argument package
1029 */
1030static int vmmR0EntryExWrapper(void *pvArgs)
1031{
1032 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1033 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1034 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1035 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1036 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1037}
1038
1039
1040/**
1041 * The Ring 0 entry point, called by the support library (SUP).
1042 *
1043 * @returns VBox status code.
1044 * @param pVM The VM to operate on.
1045 * @param enmOperation Which operation to execute.
1046 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1047 * @param u64Arg Some simple constant argument.
1048 * @param pSession The session of the caller.
1049 * @remarks Assume called with interrupts _enabled_.
1050 */
1051VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1052{
1053 /*
1054 * Requests that should only happen on the EMT thread will be
1055 * wrapped in a setjmp so we can assert without causing trouble.
1056 */
1057 if ( VALID_PTR(pVM)
1058 && pVM->pVMR0)
1059 {
1060 switch (enmOperation)
1061 {
1062 case VMMR0_DO_VMMR0_INIT:
1063 case VMMR0_DO_VMMR0_TERM:
1064 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1065 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1066 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1067 case VMMR0_DO_GMM_FREE_PAGES:
1068 case VMMR0_DO_GMM_BALLOONED_PAGES:
1069 case VMMR0_DO_GMM_DEFLATED_BALLOON:
1070 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1071 case VMMR0_DO_GMM_SEED_CHUNK:
1072 {
1073 /** @todo validate this EMT claim... GVM knows. */
1074 VMMR0ENTRYEXARGS Args;
1075 Args.pVM = pVM;
1076 Args.enmOperation = enmOperation;
1077 Args.pReq = pReq;
1078 Args.u64Arg = u64Arg;
1079 Args.pSession = pSession;
1080 return vmmR0CallHostSetJmpEx(&pVM->vmm.s.CallHostR0JmpBuf, vmmR0EntryExWrapper, &Args);
1081 }
1082
1083 default:
1084 break;
1085 }
1086 }
1087 return vmmR0EntryExWorker(pVM, enmOperation, pReq, u64Arg, pSession);
1088}
1089
1090
1091/**
1092 * Internal R0 logger worker: Flush logger.
1093 *
1094 * @param pLogger The logger instance to flush.
1095 * @remark This function must be exported!
1096 */
1097VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1098{
1099 /*
1100 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1101 * (This is a bit paranoid code.)
1102 */
1103 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1104 if ( !VALID_PTR(pR0Logger)
1105 || !VALID_PTR(pR0Logger + 1)
1106 || !VALID_PTR(pLogger)
1107 || pLogger->u32Magic != RTLOGGER_MAGIC)
1108 {
1109#ifdef DEBUG
1110 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1111#endif
1112 return;
1113 }
1114
1115 PVM pVM = pR0Logger->pVM;
1116 if ( !VALID_PTR(pVM)
1117 || pVM->pVMR0 != pVM)
1118 {
1119#ifdef DEBUG
1120 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1121#endif
1122 return;
1123 }
1124
1125 /*
1126 * Check that the jump buffer is armed.
1127 */
1128#ifdef RT_ARCH_X86
1129 if (!pVM->vmm.s.CallHostR0JmpBuf.eip)
1130#else
1131 if (!pVM->vmm.s.CallHostR0JmpBuf.rip)
1132#endif
1133 {
1134#ifdef DEBUG
1135 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1136#endif
1137 pLogger->offScratch = 0;
1138 return;
1139 }
1140 VMMR0CallHost(pVM, VMMCALLHOST_VMM_LOGGER_FLUSH, 0);
1141}
1142
1143
1144/**
1145 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1146 *
1147 * @returns true if the breakpoint should be hit, false if it should be ignored.
1148 */
1149DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1150{
1151 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1152 if (pVM)
1153 {
1154#ifdef RT_ARCH_X86
1155 if (pVM->vmm.s.CallHostR0JmpBuf.eip)
1156#else
1157 if (pVM->vmm.s.CallHostR0JmpBuf.rip)
1158#endif
1159 {
1160 int rc = VMMR0CallHost(pVM, VMMCALLHOST_VM_R0_ASSERTION, 0);
1161 return RT_FAILURE_NP(rc);
1162 }
1163 }
1164#ifdef RT_OS_LINUX
1165 return true;
1166#else
1167 return false;
1168#endif
1169}
1170
1171
1172/**
1173 * Override this so we can push it up to ring-3.
1174 *
1175 * @param pszExpr Expression. Can be NULL.
1176 * @param uLine Location line number.
1177 * @param pszFile Location file name.
1178 * @param pszFunction Location function name.
1179 */
1180DECLEXPORT(void) RTCALL AssertMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1181{
1182#ifndef DEBUG_sandervl
1183 SUPR0Printf("\n!!R0-Assertion Failed!!\n"
1184 "Expression: %s\n"
1185 "Location : %s(%d) %s\n",
1186 pszExpr, pszFile, uLine, pszFunction);
1187#endif
1188 LogAlways(("\n!!R0-Assertion Failed!!\n"
1189 "Expression: %s\n"
1190 "Location : %s(%d) %s\n",
1191 pszExpr, pszFile, uLine, pszFunction));
1192
1193 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1194 if (pVM)
1195 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1196 "\n!!R0-Assertion Failed!!\n"
1197 "Expression: %s\n"
1198 "Location : %s(%d) %s\n",
1199 pszExpr, pszFile, uLine, pszFunction);
1200}
1201
1202
1203/**
1204 * Callback for RTLogFormatV which writes to the ring-3 log port.
1205 * See PFNLOGOUTPUT() for details.
1206 */
1207static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1208{
1209 for (size_t i = 0; i < cbChars; i++)
1210 {
1211#ifndef DEBUG_sandervl
1212 SUPR0Printf("%c", pachChars[i]);
1213#endif
1214 LogAlways(("%c", pachChars[i]));
1215 }
1216
1217 return cbChars;
1218}
1219
1220
1221DECLEXPORT(void) RTCALL AssertMsg2(const char *pszFormat, ...)
1222{
1223 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1224 if (pLog)
1225 {
1226 va_list va;
1227 va_start(va, pszFormat);
1228 RTLogFormatV(rtLogOutput, pLog, pszFormat, va);
1229 va_end(va);
1230
1231 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1232 if (pVM)
1233 {
1234 va_start(va, pszFormat);
1235 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, va);
1236 va_end(va);
1237 }
1238 }
1239}
1240
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette