VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 30791

Last change on this file since 30791 was 30660, checked in by vboxsync, 15 years ago

Very annoying to return informational codes without hitting assertions, so turn it into an error.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 48.4 KB
Line 
1/* $Id: VMMR0.cpp 30660 2010-07-06 12:08:21Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/trpm.h>
25#include <VBox/cpum.h>
26#include <VBox/pdmapi.h>
27#include <VBox/pgm.h>
28#include <VBox/stam.h>
29#include <VBox/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vm.h>
32
33#include <VBox/gvmm.h>
34#include <VBox/gmm.h>
35#include <VBox/intnet.h>
36#include <VBox/hwaccm.h>
37#include <VBox/param.h>
38#include <VBox/err.h>
39#include <VBox/version.h>
40#include <VBox/log.h>
41
42#include <iprt/asm-amd64-x86.h>
43#include <iprt/assert.h>
44#include <iprt/crc32.h>
45#include <iprt/mp.h>
46#include <iprt/once.h>
47#include <iprt/stdarg.h>
48#include <iprt/string.h>
49#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
50# include <iprt/thread.h>
51#endif
52
53#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
54# pragma intrinsic(_AddressOfReturnAddress)
55#endif
56
57
58/*******************************************************************************
59* Internal Functions *
60*******************************************************************************/
61RT_C_DECLS_BEGIN
62VMMR0DECL(int) ModuleInit(void);
63VMMR0DECL(void) ModuleTerm(void);
64RT_C_DECLS_END
65
66
67/*******************************************************************************
68* Global Variables *
69*******************************************************************************/
70/** Drag in necessary library bits.
71 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
72PFNRT g_VMMGCDeps[] =
73{
74 (PFNRT)RTCrc32,
75 (PFNRT)RTOnce
76};
77
78
79/**
80 * Initialize the module.
81 * This is called when we're first loaded.
82 *
83 * @returns 0 on success.
84 * @returns VBox status on failure.
85 */
86VMMR0DECL(int) ModuleInit(void)
87{
88 LogFlow(("ModuleInit:\n"));
89
90 /*
91 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
92 */
93 int rc = GVMMR0Init();
94 if (RT_SUCCESS(rc))
95 {
96 rc = GMMR0Init();
97 if (RT_SUCCESS(rc))
98 {
99 rc = HWACCMR0Init();
100 if (RT_SUCCESS(rc))
101 {
102 rc = PGMRegisterStringFormatTypes();
103 if (RT_SUCCESS(rc))
104 {
105#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
106 rc = PGMR0DynMapInit();
107#endif
108 if (RT_SUCCESS(rc))
109 {
110 rc = IntNetR0Init();
111 if (RT_SUCCESS(rc))
112 {
113 LogFlow(("ModuleInit: returns success.\n"));
114 return VINF_SUCCESS;
115 }
116
117 /* bail out */
118 LogFlow(("ModuleTerm: returns %Rrc\n", rc));
119#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
120 PGMR0DynMapTerm();
121#endif
122 }
123 PGMDeregisterStringFormatTypes();
124 }
125 HWACCMR0Term();
126 }
127 GMMR0Term();
128 }
129 GVMMR0Term();
130 }
131
132 LogFlow(("ModuleInit: failed %Rrc\n", rc));
133 return rc;
134}
135
136
137/**
138 * Terminate the module.
139 * This is called when we're finally unloaded.
140 */
141VMMR0DECL(void) ModuleTerm(void)
142{
143 LogFlow(("ModuleTerm:\n"));
144
145 /*
146 * Terminate the internal network service.
147 */
148 IntNetR0Term();
149
150 /*
151 * PGM (Darwin) and HWACCM global cleanup.
152 */
153#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
154 PGMR0DynMapTerm();
155#endif
156 PGMDeregisterStringFormatTypes();
157 HWACCMR0Term();
158
159 /*
160 * Destroy the GMM and GVMM instances.
161 */
162 GMMR0Term();
163 GVMMR0Term();
164
165 LogFlow(("ModuleTerm: returns\n"));
166}
167
168
169/**
170 * Initaties the R0 driver for a particular VM instance.
171 *
172 * @returns VBox status code.
173 *
174 * @param pVM The VM instance in question.
175 * @param uSvnRev The SVN revision of the ring-3 part.
176 * @thread EMT.
177 */
178static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
179{
180 /*
181 * Match the SVN revisions.
182 */
183 if (uSvnRev != VMMGetSvnRev())
184 {
185 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
186 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
187 return VERR_VERSION_MISMATCH;
188 }
189 if ( !VALID_PTR(pVM)
190 || pVM->pVMR0 != pVM)
191 return VERR_INVALID_PARAMETER;
192
193#ifdef LOG_ENABLED
194 /*
195 * Register the EMT R0 logger instance for VCPU 0.
196 */
197 PVMCPU pVCpu = &pVM->aCpus[0];
198
199 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
200 if (pR0Logger)
201 {
202# if 0 /* testing of the logger. */
203 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
204 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
205 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
206 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
207
208 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
209 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
210 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
211 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
212
213 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
214 LogCom(("vmmR0InitVM: returned succesfully from direct logger call.\n"));
215 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
216 LogCom(("vmmR0InitVM: returned succesfully from direct flush call.\n"));
217
218 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
219 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
220 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
221 LogCom(("vmmR0InitVM: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
222 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
223 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
224
225 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
226 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
227
228 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
229 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
230 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
231# endif
232 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
233 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
234 pR0Logger->fRegistered = true;
235 }
236#endif /* LOG_ENABLED */
237
238 /*
239 * Initialize the per VM data for GVMM and GMM.
240 */
241 int rc = GVMMR0InitVM(pVM);
242// if (RT_SUCCESS(rc))
243// rc = GMMR0InitPerVMData(pVM);
244 if (RT_SUCCESS(rc))
245 {
246 /*
247 * Init HWACCM, CPUM and PGM (Darwin only).
248 */
249 rc = HWACCMR0InitVM(pVM);
250 if (RT_SUCCESS(rc))
251 {
252 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
253 if (RT_SUCCESS(rc))
254 {
255#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
256 rc = PGMR0DynMapInitVM(pVM);
257#endif
258 if (RT_SUCCESS(rc))
259 {
260 GVMMR0DoneInitVM(pVM);
261 return rc;
262 }
263
264 /* bail out */
265 }
266 HWACCMR0TermVM(pVM);
267 }
268 }
269 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
270 return rc;
271}
272
273
274/**
275 * Terminates the R0 driver for a particular VM instance.
276 *
277 * This is normally called by ring-3 as part of the VM termination process, but
278 * may alternatively be called during the support driver session cleanup when
279 * the VM object is destroyed (see GVMM).
280 *
281 * @returns VBox status code.
282 *
283 * @param pVM The VM instance in question.
284 * @param pGVM Pointer to the global VM structure. Optional.
285 * @thread EMT or session clean up thread.
286 */
287VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
288{
289 /*
290 * Tell GVMM what we're up to and check that we only do this once.
291 */
292 if (GVMMR0DoingTermVM(pVM, pGVM))
293 {
294#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
295 PGMR0DynMapTermVM(pVM);
296#endif
297 HWACCMR0TermVM(pVM);
298 }
299
300 /*
301 * Deregister the logger.
302 */
303 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
304 return VINF_SUCCESS;
305}
306
307
308#ifdef VBOX_WITH_STATISTICS
309/**
310 * Record return code statistics
311 * @param pVM The VM handle.
312 * @param pVCpu The VMCPU handle.
313 * @param rc The status code.
314 */
315static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
316{
317 /*
318 * Collect statistics.
319 */
320 switch (rc)
321 {
322 case VINF_SUCCESS:
323 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
324 break;
325 case VINF_EM_RAW_INTERRUPT:
326 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
327 break;
328 case VINF_EM_RAW_INTERRUPT_HYPER:
329 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
330 break;
331 case VINF_EM_RAW_GUEST_TRAP:
332 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
333 break;
334 case VINF_EM_RAW_RING_SWITCH:
335 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
336 break;
337 case VINF_EM_RAW_RING_SWITCH_INT:
338 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
339 break;
340 case VINF_EM_RAW_STALE_SELECTOR:
341 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
342 break;
343 case VINF_EM_RAW_IRET_TRAP:
344 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
345 break;
346 case VINF_IOM_HC_IOPORT_READ:
347 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
348 break;
349 case VINF_IOM_HC_IOPORT_WRITE:
350 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
351 break;
352 case VINF_IOM_HC_MMIO_READ:
353 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
354 break;
355 case VINF_IOM_HC_MMIO_WRITE:
356 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
357 break;
358 case VINF_IOM_HC_MMIO_READ_WRITE:
359 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
360 break;
361 case VINF_PATM_HC_MMIO_PATCH_READ:
362 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
363 break;
364 case VINF_PATM_HC_MMIO_PATCH_WRITE:
365 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
366 break;
367 case VINF_EM_RAW_EMULATE_INSTR:
368 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
369 break;
370 case VINF_EM_RAW_EMULATE_IO_BLOCK:
371 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
372 break;
373 case VINF_PATCH_EMULATE_INSTR:
374 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
375 break;
376 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
377 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
378 break;
379 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
380 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
381 break;
382 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
383 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
384 break;
385 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
386 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
387 break;
388 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
389 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
390 break;
391 case VINF_CSAM_PENDING_ACTION:
392 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
393 break;
394 case VINF_PGM_SYNC_CR3:
395 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
396 break;
397 case VINF_PATM_PATCH_INT3:
398 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
399 break;
400 case VINF_PATM_PATCH_TRAP_PF:
401 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
402 break;
403 case VINF_PATM_PATCH_TRAP_GP:
404 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
405 break;
406 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
407 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
408 break;
409 case VINF_EM_RESCHEDULE_REM:
410 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
411 break;
412 case VINF_EM_RAW_TO_R3:
413 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
414 break;
415 case VINF_EM_RAW_TIMER_PENDING:
416 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
417 break;
418 case VINF_EM_RAW_INTERRUPT_PENDING:
419 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
420 break;
421 case VINF_VMM_CALL_HOST:
422 switch (pVCpu->vmm.s.enmCallRing3Operation)
423 {
424 case VMMCALLRING3_PDM_LOCK:
425 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
426 break;
427 case VMMCALLRING3_PGM_POOL_GROW:
428 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
429 break;
430 case VMMCALLRING3_PGM_LOCK:
431 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
432 break;
433 case VMMCALLRING3_PGM_MAP_CHUNK:
434 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
435 break;
436 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
437 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
438 break;
439 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
440 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
441 break;
442 case VMMCALLRING3_VMM_LOGGER_FLUSH:
443 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
444 break;
445 case VMMCALLRING3_VM_SET_ERROR:
446 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
447 break;
448 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
449 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
450 break;
451 case VMMCALLRING3_VM_R0_ASSERTION:
452 default:
453 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
454 break;
455 }
456 break;
457 case VINF_PATM_DUPLICATE_FUNCTION:
458 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
459 break;
460 case VINF_PGM_CHANGE_MODE:
461 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
462 break;
463 case VINF_PGM_POOL_FLUSH_PENDING:
464 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
465 break;
466 case VINF_EM_PENDING_REQUEST:
467 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
468 break;
469 case VINF_EM_HWACCM_PATCH_TPR_INSTR:
470 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
471 break;
472 default:
473 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
474 break;
475 }
476}
477#endif /* VBOX_WITH_STATISTICS */
478
479
480/**
481 * Unused ring-0 entry point that used to be called from the interrupt gate.
482 *
483 * Will be removed one of the next times we do a major SUPDrv version bump.
484 *
485 * @returns VBox status code.
486 * @param pVM The VM to operate on.
487 * @param enmOperation Which operation to execute.
488 * @param pvArg Argument to the operation.
489 * @remarks Assume called with interrupts disabled.
490 */
491VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
492{
493 /*
494 * We're returning VERR_NOT_SUPPORT here so we've got something else
495 * than -1 which the interrupt gate glue code might return.
496 */
497 Log(("operation %#x is not supported\n", enmOperation));
498 return VERR_NOT_SUPPORTED;
499}
500
501
502/**
503 * The Ring 0 entry point, called by the fast-ioctl path.
504 *
505 * @param pVM The VM to operate on.
506 * The return code is stored in pVM->vmm.s.iLastGZRc.
507 * @param idCpu The Virtual CPU ID of the calling EMT.
508 * @param enmOperation Which operation to execute.
509 * @remarks Assume called with interrupts _enabled_.
510 */
511VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
512{
513 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
514 return;
515 PVMCPU pVCpu = &pVM->aCpus[idCpu];
516
517 switch (enmOperation)
518 {
519 /*
520 * Switch to GC and run guest raw mode code.
521 * Disable interrupts before doing the world switch.
522 */
523 case VMMR0_DO_RAW_RUN:
524 {
525 /* Safety precaution as hwaccm disables the switcher. */
526 if (RT_LIKELY(!pVM->vmm.s.fSwitcherDisabled))
527 {
528 RTCCUINTREG uFlags = ASMIntDisableFlags();
529 int rc;
530 bool fVTxDisabled;
531
532 if (RT_UNLIKELY(pVM->cCpus > 1))
533 {
534 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
535 ASMSetFlags(uFlags);
536 return;
537 }
538
539#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
540 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
541 {
542 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
543 ASMSetFlags(uFlags);
544 return;
545 }
546#endif
547
548 /* We might need to disable VT-x if the active switcher turns off paging. */
549 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
550 if (RT_FAILURE(rc))
551 {
552 pVCpu->vmm.s.iLastGZRc = rc;
553 ASMSetFlags(uFlags);
554 return;
555 }
556
557 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
558 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
559
560 TMNotifyStartOfExecution(pVCpu);
561 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
562 pVCpu->vmm.s.iLastGZRc = rc;
563 TMNotifyEndOfExecution(pVCpu);
564
565 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
566 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
567
568 /* Re-enable VT-x if previously turned off. */
569 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
570
571 if ( rc == VINF_EM_RAW_INTERRUPT
572 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
573 TRPMR0DispatchHostInterrupt(pVM);
574
575 ASMSetFlags(uFlags);
576
577#ifdef VBOX_WITH_STATISTICS
578 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
579 vmmR0RecordRC(pVM, pVCpu, rc);
580#endif
581 }
582 else
583 {
584 Assert(!pVM->vmm.s.fSwitcherDisabled);
585 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
586 }
587 break;
588 }
589
590 /*
591 * Run guest code using the available hardware acceleration technology.
592 *
593 * Disable interrupts before we do anything interesting. On Windows we avoid
594 * this by having the support driver raise the IRQL before calling us, this way
595 * we hope to get away with page faults and later calling into the kernel.
596 */
597 case VMMR0_DO_HWACC_RUN:
598 {
599 int rc;
600
601 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
602
603#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
604 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
605 RTThreadPreemptDisable(&PreemptState);
606#elif !defined(RT_OS_WINDOWS)
607 RTCCUINTREG uFlags = ASMIntDisableFlags();
608#endif
609 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
610
611#ifdef LOG_ENABLED
612 if (pVCpu->idCpu > 0)
613 {
614 /* Lazy registration of ring 0 loggers. */
615 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
616 if ( pR0Logger
617 && !pR0Logger->fRegistered)
618 {
619 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
620 pR0Logger->fRegistered = true;
621 }
622 }
623#endif
624 if (!HWACCMR0SuspendPending())
625 {
626 rc = HWACCMR0Enter(pVM, pVCpu);
627 if (RT_SUCCESS(rc))
628 {
629 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
630 int rc2 = HWACCMR0Leave(pVM, pVCpu);
631 AssertRC(rc2);
632 }
633 }
634 else
635 {
636 /* System is about to go into suspend mode; go back to ring 3. */
637 rc = VINF_EM_RAW_INTERRUPT;
638 }
639 pVCpu->vmm.s.iLastGZRc = rc;
640
641 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
642#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
643 RTThreadPreemptRestore(&PreemptState);
644#elif !defined(RT_OS_WINDOWS)
645 ASMSetFlags(uFlags);
646#endif
647
648#ifdef VBOX_WITH_STATISTICS
649 vmmR0RecordRC(pVM, pVCpu, rc);
650#endif
651 /* No special action required for external interrupts, just return. */
652 break;
653 }
654
655 /*
656 * For profiling.
657 */
658 case VMMR0_DO_NOP:
659 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
660 break;
661
662 /*
663 * Impossible.
664 */
665 default:
666 AssertMsgFailed(("%#x\n", enmOperation));
667 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
668 break;
669 }
670}
671
672
673/**
674 * Validates a session or VM session argument.
675 *
676 * @returns true / false accordingly.
677 * @param pVM The VM argument.
678 * @param pSession The session argument.
679 */
680DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
681{
682 /* This must be set! */
683 if (!pSession)
684 return false;
685
686 /* Only one out of the two. */
687 if (pVM && pClaimedSession)
688 return false;
689 if (pVM)
690 pClaimedSession = pVM->pSession;
691 return pClaimedSession == pSession;
692}
693
694
695/**
696 * VMMR0EntryEx worker function, either called directly or when ever possible
697 * called thru a longjmp so we can exit safely on failure.
698 *
699 * @returns VBox status code.
700 * @param pVM The VM to operate on.
701 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
702 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
703 * @param enmOperation Which operation to execute.
704 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
705 * The support driver validates this if it's present.
706 * @param u64Arg Some simple constant argument.
707 * @param pSession The session of the caller.
708 * @remarks Assume called with interrupts _enabled_.
709 */
710static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
711{
712 /*
713 * Common VM pointer validation.
714 */
715 if (pVM)
716 {
717 if (RT_UNLIKELY( !VALID_PTR(pVM)
718 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
719 {
720 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
721 return VERR_INVALID_POINTER;
722 }
723 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
724 || pVM->enmVMState > VMSTATE_TERMINATED
725 || pVM->pVMR0 != pVM))
726 {
727 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
728 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
729 return VERR_INVALID_POINTER;
730 }
731
732 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
733 {
734 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
735 return VERR_INVALID_PARAMETER;
736 }
737 }
738 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
739 {
740 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
741 return VERR_INVALID_PARAMETER;
742 }
743
744
745 switch (enmOperation)
746 {
747 /*
748 * GVM requests
749 */
750 case VMMR0_DO_GVMM_CREATE_VM:
751 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
752 return VERR_INVALID_PARAMETER;
753 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
754
755 case VMMR0_DO_GVMM_DESTROY_VM:
756 if (pReqHdr || u64Arg)
757 return VERR_INVALID_PARAMETER;
758 return GVMMR0DestroyVM(pVM);
759
760 case VMMR0_DO_GVMM_REGISTER_VMCPU:
761 {
762 if (!pVM)
763 return VERR_INVALID_PARAMETER;
764 return GVMMR0RegisterVCpu(pVM, idCpu);
765 }
766
767 case VMMR0_DO_GVMM_SCHED_HALT:
768 if (pReqHdr)
769 return VERR_INVALID_PARAMETER;
770 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
771
772 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
773 if (pReqHdr || u64Arg)
774 return VERR_INVALID_PARAMETER;
775 return GVMMR0SchedWakeUp(pVM, idCpu);
776
777 case VMMR0_DO_GVMM_SCHED_POKE:
778 if (pReqHdr || u64Arg)
779 return VERR_INVALID_PARAMETER;
780 return GVMMR0SchedPoke(pVM, idCpu);
781
782 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
783 if (u64Arg)
784 return VERR_INVALID_PARAMETER;
785 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
786
787 case VMMR0_DO_GVMM_SCHED_POLL:
788 if (pReqHdr || u64Arg > 1)
789 return VERR_INVALID_PARAMETER;
790 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
791
792 case VMMR0_DO_GVMM_QUERY_STATISTICS:
793 if (u64Arg)
794 return VERR_INVALID_PARAMETER;
795 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
796
797 case VMMR0_DO_GVMM_RESET_STATISTICS:
798 if (u64Arg)
799 return VERR_INVALID_PARAMETER;
800 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
801
802 /*
803 * Initialize the R0 part of a VM instance.
804 */
805 case VMMR0_DO_VMMR0_INIT:
806 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
807
808 /*
809 * Terminate the R0 part of a VM instance.
810 */
811 case VMMR0_DO_VMMR0_TERM:
812 return VMMR0TermVM(pVM, NULL);
813
814 /*
815 * Attempt to enable hwacc mode and check the current setting.
816 */
817 case VMMR0_DO_HWACC_ENABLE:
818 return HWACCMR0EnableAllCpus(pVM);
819
820 /*
821 * Setup the hardware accelerated session.
822 */
823 case VMMR0_DO_HWACC_SETUP_VM:
824 {
825 RTCCUINTREG fFlags = ASMIntDisableFlags();
826 int rc = HWACCMR0SetupVM(pVM);
827 ASMSetFlags(fFlags);
828 return rc;
829 }
830
831 /*
832 * Switch to RC to execute Hypervisor function.
833 */
834 case VMMR0_DO_CALL_HYPERVISOR:
835 {
836 int rc;
837 bool fVTxDisabled;
838
839 /* Safety precaution as HWACCM can disable the switcher. */
840 Assert(!pVM->vmm.s.fSwitcherDisabled);
841 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
842 return VERR_NOT_SUPPORTED;
843
844#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
845 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
846 return VERR_PGM_NO_CR3_SHADOW_ROOT;
847#endif
848
849 RTCCUINTREG fFlags = ASMIntDisableFlags();
850
851 /* We might need to disable VT-x if the active switcher turns off paging. */
852 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
853 if (RT_FAILURE(rc))
854 return rc;
855
856 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
857
858 /* Re-enable VT-x if previously turned off. */
859 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
860
861 /** @todo dispatch interrupts? */
862 ASMSetFlags(fFlags);
863 return rc;
864 }
865
866 /*
867 * PGM wrappers.
868 */
869 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
870 if (idCpu == NIL_VMCPUID)
871 return VERR_INVALID_CPU_ID;
872 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
873
874 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
875 if (idCpu == NIL_VMCPUID)
876 return VERR_INVALID_CPU_ID;
877 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
878
879 /*
880 * GMM wrappers.
881 */
882 case VMMR0_DO_GMM_INITIAL_RESERVATION:
883 if (u64Arg)
884 return VERR_INVALID_PARAMETER;
885 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
886
887 case VMMR0_DO_GMM_UPDATE_RESERVATION:
888 if (u64Arg)
889 return VERR_INVALID_PARAMETER;
890 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
891
892 case VMMR0_DO_GMM_ALLOCATE_PAGES:
893 if (u64Arg)
894 return VERR_INVALID_PARAMETER;
895 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
896
897 case VMMR0_DO_GMM_FREE_PAGES:
898 if (u64Arg)
899 return VERR_INVALID_PARAMETER;
900 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
901
902 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
903 if (u64Arg)
904 return VERR_INVALID_PARAMETER;
905 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
906
907 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
908 if (u64Arg)
909 return VERR_INVALID_PARAMETER;
910 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
911
912 case VMMR0_DO_GMM_QUERY_MEM_STATS:
913 if (idCpu == NIL_VMCPUID)
914 return VERR_INVALID_CPU_ID;
915 if (u64Arg)
916 return VERR_INVALID_PARAMETER;
917 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
918
919 case VMMR0_DO_GMM_BALLOONED_PAGES:
920 if (u64Arg)
921 return VERR_INVALID_PARAMETER;
922 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
923
924 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
925 if (u64Arg)
926 return VERR_INVALID_PARAMETER;
927 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
928
929 case VMMR0_DO_GMM_SEED_CHUNK:
930 if (pReqHdr)
931 return VERR_INVALID_PARAMETER;
932 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
933
934 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
935 if (idCpu == NIL_VMCPUID)
936 return VERR_INVALID_CPU_ID;
937 if (u64Arg)
938 return VERR_INVALID_PARAMETER;
939 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
940
941 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
942 if (idCpu == NIL_VMCPUID)
943 return VERR_INVALID_CPU_ID;
944 if (u64Arg)
945 return VERR_INVALID_PARAMETER;
946 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
947
948 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
949 if (idCpu == NIL_VMCPUID)
950 return VERR_INVALID_CPU_ID;
951 if ( u64Arg
952 || pReqHdr)
953 return VERR_INVALID_PARAMETER;
954 return GMMR0ResetSharedModules(pVM, idCpu);
955
956#ifdef VBOX_WITH_PAGE_SHARING
957 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
958 {
959 if (idCpu == NIL_VMCPUID)
960 return VERR_INVALID_CPU_ID;
961 if ( u64Arg
962 || pReqHdr)
963 return VERR_INVALID_PARAMETER;
964
965 PVMCPU pVCpu = &pVM->aCpus[idCpu];
966
967 /* Select a valid VCPU context. */
968 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
969
970# ifdef DEBUG_sandervl
971 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
972 int rc = GMMR0CheckSharedModulesStart(pVM);
973 if (rc == VINF_SUCCESS)
974 {
975 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
976 GMMR0CheckSharedModulesEnd(pVM);
977 }
978# else
979 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
980# endif
981
982 /* Clear the VCPU context. */
983 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
984 return rc;
985 }
986#endif
987
988#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
989 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
990 {
991 if (u64Arg)
992 return VERR_INVALID_PARAMETER;
993 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
994 }
995#endif
996
997 /*
998 * A quick GCFGM mock-up.
999 */
1000 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1001 case VMMR0_DO_GCFGM_SET_VALUE:
1002 case VMMR0_DO_GCFGM_QUERY_VALUE:
1003 {
1004 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1005 return VERR_INVALID_PARAMETER;
1006 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1007 if (pReq->Hdr.cbReq != sizeof(*pReq))
1008 return VERR_INVALID_PARAMETER;
1009 int rc;
1010 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1011 {
1012 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1013 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1014 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1015 }
1016 else
1017 {
1018 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1019 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1020 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1021 }
1022 return rc;
1023 }
1024
1025 /*
1026 * PDM Wrappers.
1027 */
1028 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1029 {
1030 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1031 return VERR_INVALID_PARAMETER;
1032 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1033 }
1034
1035 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1036 {
1037 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1038 return VERR_INVALID_PARAMETER;
1039 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1040 }
1041
1042 /*
1043 * Requests to the internal networking service.
1044 */
1045 case VMMR0_DO_INTNET_OPEN:
1046 {
1047 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1048 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1049 return VERR_INVALID_PARAMETER;
1050 return IntNetR0OpenReq(pSession, pReq);
1051 }
1052
1053 case VMMR0_DO_INTNET_IF_CLOSE:
1054 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1055 return VERR_INVALID_PARAMETER;
1056 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1057
1058 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1059 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1060 return VERR_INVALID_PARAMETER;
1061 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1062
1063 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1064 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1065 return VERR_INVALID_PARAMETER;
1066 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1067
1068 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1069 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1070 return VERR_INVALID_PARAMETER;
1071 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1072
1073 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1074 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1075 return VERR_INVALID_PARAMETER;
1076 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1077
1078 case VMMR0_DO_INTNET_IF_SEND:
1079 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1080 return VERR_INVALID_PARAMETER;
1081 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1082
1083 case VMMR0_DO_INTNET_IF_WAIT:
1084 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1085 return VERR_INVALID_PARAMETER;
1086 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1087
1088 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1089 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1090 return VERR_INVALID_PARAMETER;
1091 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1092
1093 /*
1094 * For profiling.
1095 */
1096 case VMMR0_DO_NOP:
1097 case VMMR0_DO_SLOW_NOP:
1098 return VINF_SUCCESS;
1099
1100 /*
1101 * For testing Ring-0 APIs invoked in this environment.
1102 */
1103 case VMMR0_DO_TESTS:
1104 /** @todo make new test */
1105 return VINF_SUCCESS;
1106
1107
1108#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1109 case VMMR0_DO_TEST_SWITCHER3264:
1110 if (idCpu == NIL_VMCPUID)
1111 return VERR_INVALID_CPU_ID;
1112 return HWACCMR0TestSwitcher3264(pVM);
1113#endif
1114 default:
1115 /*
1116 * We're returning VERR_NOT_SUPPORT here so we've got something else
1117 * than -1 which the interrupt gate glue code might return.
1118 */
1119 Log(("operation %#x is not supported\n", enmOperation));
1120 return VERR_NOT_SUPPORTED;
1121 }
1122}
1123
1124
1125/**
1126 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1127 */
1128typedef struct VMMR0ENTRYEXARGS
1129{
1130 PVM pVM;
1131 VMCPUID idCpu;
1132 VMMR0OPERATION enmOperation;
1133 PSUPVMMR0REQHDR pReq;
1134 uint64_t u64Arg;
1135 PSUPDRVSESSION pSession;
1136} VMMR0ENTRYEXARGS;
1137/** Pointer to a vmmR0EntryExWrapper argument package. */
1138typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1139
1140/**
1141 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1142 *
1143 * @returns VBox status code.
1144 * @param pvArgs The argument package
1145 */
1146static int vmmR0EntryExWrapper(void *pvArgs)
1147{
1148 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1149 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1150 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1151 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1152 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1153 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1154}
1155
1156
1157/**
1158 * The Ring 0 entry point, called by the support library (SUP).
1159 *
1160 * @returns VBox status code.
1161 * @param pVM The VM to operate on.
1162 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1163 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1164 * @param enmOperation Which operation to execute.
1165 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1166 * @param u64Arg Some simple constant argument.
1167 * @param pSession The session of the caller.
1168 * @remarks Assume called with interrupts _enabled_.
1169 */
1170VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1171{
1172 /*
1173 * Requests that should only happen on the EMT thread will be
1174 * wrapped in a setjmp so we can assert without causing trouble.
1175 */
1176 if ( VALID_PTR(pVM)
1177 && pVM->pVMR0
1178 && idCpu < pVM->cCpus)
1179 {
1180 switch (enmOperation)
1181 {
1182 /* These might/will be called before VMMR3Init. */
1183 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1184 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1185 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1186 case VMMR0_DO_GMM_FREE_PAGES:
1187 case VMMR0_DO_GMM_BALLOONED_PAGES:
1188 /* On the mac we might not have a valid jmp buf, so check these as well. */
1189 case VMMR0_DO_VMMR0_INIT:
1190 case VMMR0_DO_VMMR0_TERM:
1191 {
1192 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1193
1194 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1195 break;
1196
1197 /** @todo validate this EMT claim... GVM knows. */
1198 VMMR0ENTRYEXARGS Args;
1199 Args.pVM = pVM;
1200 Args.idCpu = idCpu;
1201 Args.enmOperation = enmOperation;
1202 Args.pReq = pReq;
1203 Args.u64Arg = u64Arg;
1204 Args.pSession = pSession;
1205 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1206 }
1207
1208 default:
1209 break;
1210 }
1211 }
1212 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1213}
1214
1215/**
1216 * Internal R0 logger worker: Flush logger.
1217 *
1218 * @param pLogger The logger instance to flush.
1219 * @remark This function must be exported!
1220 */
1221VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1222{
1223#ifdef LOG_ENABLED
1224 /*
1225 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1226 * (This is a bit paranoid code.)
1227 */
1228 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1229 if ( !VALID_PTR(pR0Logger)
1230 || !VALID_PTR(pR0Logger + 1)
1231 || pLogger->u32Magic != RTLOGGER_MAGIC)
1232 {
1233# ifdef DEBUG
1234 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1235# endif
1236 return;
1237 }
1238 if (pR0Logger->fFlushingDisabled)
1239 return; /* quietly */
1240
1241 PVM pVM = pR0Logger->pVM;
1242 if ( !VALID_PTR(pVM)
1243 || pVM->pVMR0 != pVM)
1244 {
1245# ifdef DEBUG
1246 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1247# endif
1248 return;
1249 }
1250
1251 PVMCPU pVCpu = VMMGetCpu(pVM);
1252 if (pVCpu)
1253 {
1254 /*
1255 * Check that the jump buffer is armed.
1256 */
1257# ifdef RT_ARCH_X86
1258 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1259 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1260# else
1261 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1262 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1263# endif
1264 {
1265# ifdef DEBUG
1266 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1267# endif
1268 return;
1269 }
1270 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1271 }
1272# ifdef DEBUG
1273 else
1274 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1275# endif
1276#endif
1277}
1278
1279/**
1280 * Interal R0 logger worker: Custom prefix.
1281 *
1282 * @returns Number of chars written.
1283 *
1284 * @param pLogger The logger instance.
1285 * @param pchBuf The output buffer.
1286 * @param cchBuf The size of the buffer.
1287 * @param pvUser User argument (ignored).
1288 */
1289VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1290{
1291 NOREF(pvUser);
1292#ifdef LOG_ENABLED
1293 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1294 if ( !VALID_PTR(pR0Logger)
1295 || !VALID_PTR(pR0Logger + 1)
1296 || pLogger->u32Magic != RTLOGGER_MAGIC
1297 || cchBuf < 2)
1298 return 0;
1299
1300 static const char s_szHex[17] = "0123456789abcdef";
1301 VMCPUID const idCpu = pR0Logger->idCpu;
1302 pchBuf[1] = s_szHex[ idCpu & 15];
1303 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1304
1305 return 2;
1306#else
1307 return 0;
1308#endif
1309}
1310
1311
1312#ifdef LOG_ENABLED
1313/**
1314 * Disables flushing of the ring-0 debug log.
1315 *
1316 * @param pVCpu The shared virtual cpu structure.
1317 */
1318VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1319{
1320 PVM pVM = pVCpu->pVMR0;
1321 if (pVCpu->vmm.s.pR0LoggerR0)
1322 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1323}
1324
1325
1326/**
1327 * Enables flushing of the ring-0 debug log.
1328 *
1329 * @param pVCpu The shared virtual cpu structure.
1330 */
1331VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1332{
1333 PVM pVM = pVCpu->pVMR0;
1334 if (pVCpu->vmm.s.pR0LoggerR0)
1335 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1336}
1337#endif
1338
1339/**
1340 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1341 *
1342 * @returns true if the breakpoint should be hit, false if it should be ignored.
1343 */
1344DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1345{
1346#if 0
1347 return true;
1348#else
1349 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1350 if (pVM)
1351 {
1352 PVMCPU pVCpu = VMMGetCpu(pVM);
1353
1354 if (pVCpu)
1355 {
1356#ifdef RT_ARCH_X86
1357 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1358 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1359#else
1360 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1361 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1362#endif
1363 {
1364 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1365 return RT_FAILURE_NP(rc);
1366 }
1367 }
1368 }
1369#ifdef RT_OS_LINUX
1370 return true;
1371#else
1372 return false;
1373#endif
1374#endif
1375}
1376
1377
1378/**
1379 * Override this so we can push it up to ring-3.
1380 *
1381 * @param pszExpr Expression. Can be NULL.
1382 * @param uLine Location line number.
1383 * @param pszFile Location file name.
1384 * @param pszFunction Location function name.
1385 */
1386DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1387{
1388 /*
1389 * To the log.
1390 */
1391 LogAlways(("\n!!R0-Assertion Failed!!\n"
1392 "Expression: %s\n"
1393 "Location : %s(%d) %s\n",
1394 pszExpr, pszFile, uLine, pszFunction));
1395
1396 /*
1397 * To the global VMM buffer.
1398 */
1399 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1400 if (pVM)
1401 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1402 "\n!!R0-Assertion Failed!!\n"
1403 "Expression: %s\n"
1404 "Location : %s(%d) %s\n",
1405 pszExpr, pszFile, uLine, pszFunction);
1406
1407 /*
1408 * Continue the normal way.
1409 */
1410 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1411}
1412
1413
1414/**
1415 * Callback for RTLogFormatV which writes to the ring-3 log port.
1416 * See PFNLOGOUTPUT() for details.
1417 */
1418static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1419{
1420 for (size_t i = 0; i < cbChars; i++)
1421 LogAlways(("%c", pachChars[i]));
1422
1423 return cbChars;
1424}
1425
1426
1427/**
1428 * Override this so we can push it up to ring-3.
1429 *
1430 * @param pszFormat The format string.
1431 * @param va Arguments.
1432 */
1433DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1434{
1435 va_list vaCopy;
1436
1437 /*
1438 * Push the message to the logger.
1439 */
1440 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1441 if (pLog)
1442 {
1443 va_copy(vaCopy, va);
1444 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1445 va_end(vaCopy);
1446 }
1447
1448 /*
1449 * Push it to the global VMM buffer.
1450 */
1451 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1452 if (pVM)
1453 {
1454 va_copy(vaCopy, va);
1455 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1456 va_end(vaCopy);
1457 }
1458
1459 /*
1460 * Continue the normal way.
1461 */
1462 RTAssertMsg2V(pszFormat, va);
1463}
1464
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette