VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 31359

Last change on this file since 31359 was 31359, checked in by vboxsync, 15 years ago

Keep track of the native R0 thread handle for each EMT too. Use that to find the right VCPU

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 48.4 KB
Line 
1/* $Id: VMMR0.cpp 31359 2010-08-04 15:31:04Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/trpm.h>
25#include <VBox/cpum.h>
26#include <VBox/pdmapi.h>
27#include <VBox/pgm.h>
28#include <VBox/stam.h>
29#include <VBox/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vm.h>
32
33#include <VBox/gvmm.h>
34#include <VBox/gmm.h>
35#include <VBox/intnet.h>
36#include <VBox/hwaccm.h>
37#include <VBox/param.h>
38#include <VBox/err.h>
39#include <VBox/version.h>
40#include <VBox/log.h>
41
42#include <iprt/asm-amd64-x86.h>
43#include <iprt/assert.h>
44#include <iprt/crc32.h>
45#include <iprt/mp.h>
46#include <iprt/once.h>
47#include <iprt/stdarg.h>
48#include <iprt/string.h>
49#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
50# include <iprt/thread.h>
51#endif
52
53#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
54# pragma intrinsic(_AddressOfReturnAddress)
55#endif
56
57
58/*******************************************************************************
59* Internal Functions *
60*******************************************************************************/
61RT_C_DECLS_BEGIN
62VMMR0DECL(int) ModuleInit(void);
63VMMR0DECL(void) ModuleTerm(void);
64RT_C_DECLS_END
65
66
67/*******************************************************************************
68* Global Variables *
69*******************************************************************************/
70/** Drag in necessary library bits.
71 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
72PFNRT g_VMMGCDeps[] =
73{
74 (PFNRT)RTCrc32,
75 (PFNRT)RTOnce
76};
77
78
79/**
80 * Initialize the module.
81 * This is called when we're first loaded.
82 *
83 * @returns 0 on success.
84 * @returns VBox status on failure.
85 */
86VMMR0DECL(int) ModuleInit(void)
87{
88 LogFlow(("ModuleInit:\n"));
89
90 /*
91 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
92 */
93 int rc = GVMMR0Init();
94 if (RT_SUCCESS(rc))
95 {
96 rc = GMMR0Init();
97 if (RT_SUCCESS(rc))
98 {
99 rc = HWACCMR0Init();
100 if (RT_SUCCESS(rc))
101 {
102 rc = PGMRegisterStringFormatTypes();
103 if (RT_SUCCESS(rc))
104 {
105#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
106 rc = PGMR0DynMapInit();
107#endif
108 if (RT_SUCCESS(rc))
109 {
110 rc = IntNetR0Init();
111 if (RT_SUCCESS(rc))
112 {
113 LogFlow(("ModuleInit: returns success.\n"));
114 return VINF_SUCCESS;
115 }
116
117 /* bail out */
118 LogFlow(("ModuleTerm: returns %Rrc\n", rc));
119#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
120 PGMR0DynMapTerm();
121#endif
122 }
123 PGMDeregisterStringFormatTypes();
124 }
125 HWACCMR0Term();
126 }
127 GMMR0Term();
128 }
129 GVMMR0Term();
130 }
131
132 LogFlow(("ModuleInit: failed %Rrc\n", rc));
133 return rc;
134}
135
136
137/**
138 * Terminate the module.
139 * This is called when we're finally unloaded.
140 */
141VMMR0DECL(void) ModuleTerm(void)
142{
143 LogFlow(("ModuleTerm:\n"));
144
145 /*
146 * Terminate the internal network service.
147 */
148 IntNetR0Term();
149
150 /*
151 * PGM (Darwin) and HWACCM global cleanup.
152 */
153#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
154 PGMR0DynMapTerm();
155#endif
156 PGMDeregisterStringFormatTypes();
157 HWACCMR0Term();
158
159 /*
160 * Destroy the GMM and GVMM instances.
161 */
162 GMMR0Term();
163 GVMMR0Term();
164
165 LogFlow(("ModuleTerm: returns\n"));
166}
167
168
169/**
170 * Initaties the R0 driver for a particular VM instance.
171 *
172 * @returns VBox status code.
173 *
174 * @param pVM The VM instance in question.
175 * @param uSvnRev The SVN revision of the ring-3 part.
176 * @thread EMT.
177 */
178static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
179{
180 /*
181 * Match the SVN revisions.
182 */
183 if (uSvnRev != VMMGetSvnRev())
184 {
185 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
186 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
187 return VERR_VERSION_MISMATCH;
188 }
189 if ( !VALID_PTR(pVM)
190 || pVM->pVMR0 != pVM)
191 return VERR_INVALID_PARAMETER;
192
193#ifdef LOG_ENABLED
194 /*
195 * Register the EMT R0 logger instance for VCPU 0.
196 */
197 PVMCPU pVCpu = &pVM->aCpus[0];
198
199 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
200 if (pR0Logger)
201 {
202# if 0 /* testing of the logger. */
203 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
204 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
205 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
206 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
207
208 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
209 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
210 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
211 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
212
213 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
214 LogCom(("vmmR0InitVM: returned succesfully from direct logger call.\n"));
215 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
216 LogCom(("vmmR0InitVM: returned succesfully from direct flush call.\n"));
217
218 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
219 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
220 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
221 LogCom(("vmmR0InitVM: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
222 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
223 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
224
225 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
226 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
227
228 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
229 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
230 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
231# endif
232 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
233 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
234 pR0Logger->fRegistered = true;
235 }
236#endif /* LOG_ENABLED */
237
238 /*
239 * Initialize the per VM data for GVMM and GMM.
240 */
241 int rc = GVMMR0InitVM(pVM);
242// if (RT_SUCCESS(rc))
243// rc = GMMR0InitPerVMData(pVM);
244 if (RT_SUCCESS(rc))
245 {
246 /*
247 * Init HWACCM, CPUM and PGM (Darwin only).
248 */
249 rc = HWACCMR0InitVM(pVM);
250 if (RT_SUCCESS(rc))
251 {
252 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
253 if (RT_SUCCESS(rc))
254 {
255#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
256 rc = PGMR0DynMapInitVM(pVM);
257#endif
258 if (RT_SUCCESS(rc))
259 {
260 GVMMR0DoneInitVM(pVM);
261 return rc;
262 }
263
264 /* bail out */
265 }
266 HWACCMR0TermVM(pVM);
267 }
268 }
269 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
270 return rc;
271}
272
273
274/**
275 * Terminates the R0 driver for a particular VM instance.
276 *
277 * This is normally called by ring-3 as part of the VM termination process, but
278 * may alternatively be called during the support driver session cleanup when
279 * the VM object is destroyed (see GVMM).
280 *
281 * @returns VBox status code.
282 *
283 * @param pVM The VM instance in question.
284 * @param pGVM Pointer to the global VM structure. Optional.
285 * @thread EMT or session clean up thread.
286 */
287VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
288{
289 /*
290 * Tell GVMM what we're up to and check that we only do this once.
291 */
292 if (GVMMR0DoingTermVM(pVM, pGVM))
293 {
294#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
295 PGMR0DynMapTermVM(pVM);
296#endif
297 HWACCMR0TermVM(pVM);
298 }
299
300 /*
301 * Deregister the logger.
302 */
303 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
304 return VINF_SUCCESS;
305}
306
307
308#ifdef VBOX_WITH_STATISTICS
309/**
310 * Record return code statistics
311 * @param pVM The VM handle.
312 * @param pVCpu The VMCPU handle.
313 * @param rc The status code.
314 */
315static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
316{
317 /*
318 * Collect statistics.
319 */
320 switch (rc)
321 {
322 case VINF_SUCCESS:
323 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
324 break;
325 case VINF_EM_RAW_INTERRUPT:
326 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
327 break;
328 case VINF_EM_RAW_INTERRUPT_HYPER:
329 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
330 break;
331 case VINF_EM_RAW_GUEST_TRAP:
332 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
333 break;
334 case VINF_EM_RAW_RING_SWITCH:
335 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
336 break;
337 case VINF_EM_RAW_RING_SWITCH_INT:
338 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
339 break;
340 case VINF_EM_RAW_STALE_SELECTOR:
341 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
342 break;
343 case VINF_EM_RAW_IRET_TRAP:
344 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
345 break;
346 case VINF_IOM_HC_IOPORT_READ:
347 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
348 break;
349 case VINF_IOM_HC_IOPORT_WRITE:
350 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
351 break;
352 case VINF_IOM_HC_MMIO_READ:
353 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
354 break;
355 case VINF_IOM_HC_MMIO_WRITE:
356 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
357 break;
358 case VINF_IOM_HC_MMIO_READ_WRITE:
359 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
360 break;
361 case VINF_PATM_HC_MMIO_PATCH_READ:
362 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
363 break;
364 case VINF_PATM_HC_MMIO_PATCH_WRITE:
365 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
366 break;
367 case VINF_EM_RAW_EMULATE_INSTR:
368 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
369 break;
370 case VINF_EM_RAW_EMULATE_IO_BLOCK:
371 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
372 break;
373 case VINF_PATCH_EMULATE_INSTR:
374 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
375 break;
376 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
377 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
378 break;
379 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
380 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
381 break;
382 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
383 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
384 break;
385 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
386 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
387 break;
388 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
389 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
390 break;
391 case VINF_CSAM_PENDING_ACTION:
392 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
393 break;
394 case VINF_PGM_SYNC_CR3:
395 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
396 break;
397 case VINF_PATM_PATCH_INT3:
398 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
399 break;
400 case VINF_PATM_PATCH_TRAP_PF:
401 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
402 break;
403 case VINF_PATM_PATCH_TRAP_GP:
404 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
405 break;
406 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
407 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
408 break;
409 case VINF_EM_RESCHEDULE_REM:
410 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
411 break;
412 case VINF_EM_RAW_TO_R3:
413 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
414 break;
415 case VINF_EM_RAW_TIMER_PENDING:
416 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
417 break;
418 case VINF_EM_RAW_INTERRUPT_PENDING:
419 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
420 break;
421 case VINF_VMM_CALL_HOST:
422 switch (pVCpu->vmm.s.enmCallRing3Operation)
423 {
424 case VMMCALLRING3_PDM_LOCK:
425 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
426 break;
427 case VMMCALLRING3_PGM_POOL_GROW:
428 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
429 break;
430 case VMMCALLRING3_PGM_LOCK:
431 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
432 break;
433 case VMMCALLRING3_PGM_MAP_CHUNK:
434 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
435 break;
436 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
437 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
438 break;
439 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
440 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
441 break;
442 case VMMCALLRING3_VMM_LOGGER_FLUSH:
443 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
444 break;
445 case VMMCALLRING3_VM_SET_ERROR:
446 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
447 break;
448 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
449 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
450 break;
451 case VMMCALLRING3_VM_R0_ASSERTION:
452 default:
453 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
454 break;
455 }
456 break;
457 case VINF_PATM_DUPLICATE_FUNCTION:
458 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
459 break;
460 case VINF_PGM_CHANGE_MODE:
461 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
462 break;
463 case VINF_PGM_POOL_FLUSH_PENDING:
464 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
465 break;
466 case VINF_EM_PENDING_REQUEST:
467 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
468 break;
469 case VINF_EM_HWACCM_PATCH_TPR_INSTR:
470 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
471 break;
472 default:
473 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
474 break;
475 }
476}
477#endif /* VBOX_WITH_STATISTICS */
478
479
480/**
481 * Unused ring-0 entry point that used to be called from the interrupt gate.
482 *
483 * Will be removed one of the next times we do a major SUPDrv version bump.
484 *
485 * @returns VBox status code.
486 * @param pVM The VM to operate on.
487 * @param enmOperation Which operation to execute.
488 * @param pvArg Argument to the operation.
489 * @remarks Assume called with interrupts disabled.
490 */
491VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
492{
493 /*
494 * We're returning VERR_NOT_SUPPORT here so we've got something else
495 * than -1 which the interrupt gate glue code might return.
496 */
497 Log(("operation %#x is not supported\n", enmOperation));
498 return VERR_NOT_SUPPORTED;
499}
500
501
502/**
503 * The Ring 0 entry point, called by the fast-ioctl path.
504 *
505 * @param pVM The VM to operate on.
506 * The return code is stored in pVM->vmm.s.iLastGZRc.
507 * @param idCpu The Virtual CPU ID of the calling EMT.
508 * @param enmOperation Which operation to execute.
509 * @remarks Assume called with interrupts _enabled_.
510 */
511VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
512{
513 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
514 return;
515 PVMCPU pVCpu = &pVM->aCpus[idCpu];
516
517 switch (enmOperation)
518 {
519 /*
520 * Switch to GC and run guest raw mode code.
521 * Disable interrupts before doing the world switch.
522 */
523 case VMMR0_DO_RAW_RUN:
524 {
525 /* Safety precaution as hwaccm disables the switcher. */
526 if (RT_LIKELY(!pVM->vmm.s.fSwitcherDisabled))
527 {
528 RTCCUINTREG uFlags = ASMIntDisableFlags();
529 int rc;
530 bool fVTxDisabled;
531
532 if (RT_UNLIKELY(pVM->cCpus > 1))
533 {
534 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
535 ASMSetFlags(uFlags);
536 return;
537 }
538
539#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
540 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
541 {
542 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
543 ASMSetFlags(uFlags);
544 return;
545 }
546#endif
547
548 /* We might need to disable VT-x if the active switcher turns off paging. */
549 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
550 if (RT_FAILURE(rc))
551 {
552 pVCpu->vmm.s.iLastGZRc = rc;
553 ASMSetFlags(uFlags);
554 return;
555 }
556
557 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
558 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
559
560 TMNotifyStartOfExecution(pVCpu);
561 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
562 pVCpu->vmm.s.iLastGZRc = rc;
563 TMNotifyEndOfExecution(pVCpu);
564
565 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
566 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
567
568 /* Re-enable VT-x if previously turned off. */
569 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
570
571 if ( rc == VINF_EM_RAW_INTERRUPT
572 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
573 TRPMR0DispatchHostInterrupt(pVM);
574
575 ASMSetFlags(uFlags);
576
577#ifdef VBOX_WITH_STATISTICS
578 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
579 vmmR0RecordRC(pVM, pVCpu, rc);
580#endif
581 }
582 else
583 {
584 Assert(!pVM->vmm.s.fSwitcherDisabled);
585 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
586 }
587 break;
588 }
589
590 /*
591 * Run guest code using the available hardware acceleration technology.
592 *
593 * Disable interrupts before we do anything interesting. On Windows we avoid
594 * this by having the support driver raise the IRQL before calling us, this way
595 * we hope to get away with page faults and later calling into the kernel.
596 */
597 case VMMR0_DO_HWACC_RUN:
598 {
599 int rc;
600
601 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
602
603#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
604 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
605 RTThreadPreemptDisable(&PreemptState);
606#elif !defined(RT_OS_WINDOWS)
607 RTCCUINTREG uFlags = ASMIntDisableFlags();
608#endif
609 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
610
611#ifdef LOG_ENABLED
612 if (pVCpu->idCpu > 0)
613 {
614 /* Lazy registration of ring 0 loggers. */
615 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
616 if ( pR0Logger
617 && !pR0Logger->fRegistered)
618 {
619 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
620 pR0Logger->fRegistered = true;
621 }
622 }
623#endif
624 if (!HWACCMR0SuspendPending())
625 {
626 rc = HWACCMR0Enter(pVM, pVCpu);
627 if (RT_SUCCESS(rc))
628 {
629 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
630 int rc2 = HWACCMR0Leave(pVM, pVCpu);
631 AssertRC(rc2);
632 }
633 }
634 else
635 {
636 /* System is about to go into suspend mode; go back to ring 3. */
637 rc = VINF_EM_RAW_INTERRUPT;
638 }
639 pVCpu->vmm.s.iLastGZRc = rc;
640
641 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
642#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
643 RTThreadPreemptRestore(&PreemptState);
644#elif !defined(RT_OS_WINDOWS)
645 ASMSetFlags(uFlags);
646#endif
647
648#ifdef VBOX_WITH_STATISTICS
649 vmmR0RecordRC(pVM, pVCpu, rc);
650#endif
651 /* No special action required for external interrupts, just return. */
652 break;
653 }
654
655 /*
656 * For profiling.
657 */
658 case VMMR0_DO_NOP:
659 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
660 break;
661
662 /*
663 * Impossible.
664 */
665 default:
666 AssertMsgFailed(("%#x\n", enmOperation));
667 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
668 break;
669 }
670}
671
672
673/**
674 * Validates a session or VM session argument.
675 *
676 * @returns true / false accordingly.
677 * @param pVM The VM argument.
678 * @param pSession The session argument.
679 */
680DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
681{
682 /* This must be set! */
683 if (!pSession)
684 return false;
685
686 /* Only one out of the two. */
687 if (pVM && pClaimedSession)
688 return false;
689 if (pVM)
690 pClaimedSession = pVM->pSession;
691 return pClaimedSession == pSession;
692}
693
694
695/**
696 * VMMR0EntryEx worker function, either called directly or when ever possible
697 * called thru a longjmp so we can exit safely on failure.
698 *
699 * @returns VBox status code.
700 * @param pVM The VM to operate on.
701 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
702 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
703 * @param enmOperation Which operation to execute.
704 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
705 * The support driver validates this if it's present.
706 * @param u64Arg Some simple constant argument.
707 * @param pSession The session of the caller.
708 * @remarks Assume called with interrupts _enabled_.
709 */
710static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
711{
712 /*
713 * Common VM pointer validation.
714 */
715 if (pVM)
716 {
717 if (RT_UNLIKELY( !VALID_PTR(pVM)
718 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
719 {
720 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
721 return VERR_INVALID_POINTER;
722 }
723 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
724 || pVM->enmVMState > VMSTATE_TERMINATED
725 || pVM->pVMR0 != pVM))
726 {
727 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
728 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
729 return VERR_INVALID_POINTER;
730 }
731
732 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
733 {
734 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
735 return VERR_INVALID_PARAMETER;
736 }
737 }
738 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
739 {
740 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
741 return VERR_INVALID_PARAMETER;
742 }
743
744
745 switch (enmOperation)
746 {
747 /*
748 * GVM requests
749 */
750 case VMMR0_DO_GVMM_CREATE_VM:
751 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
752 return VERR_INVALID_PARAMETER;
753 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
754
755 case VMMR0_DO_GVMM_DESTROY_VM:
756 if (pReqHdr || u64Arg)
757 return VERR_INVALID_PARAMETER;
758 return GVMMR0DestroyVM(pVM);
759
760 case VMMR0_DO_GVMM_REGISTER_VMCPU:
761 {
762 if (!pVM)
763 return VERR_INVALID_PARAMETER;
764 return GVMMR0RegisterVCpu(pVM, idCpu);
765 }
766
767 case VMMR0_DO_GVMM_SCHED_HALT:
768 if (pReqHdr)
769 return VERR_INVALID_PARAMETER;
770 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
771
772 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
773 if (pReqHdr || u64Arg)
774 return VERR_INVALID_PARAMETER;
775 return GVMMR0SchedWakeUp(pVM, idCpu);
776
777 case VMMR0_DO_GVMM_SCHED_POKE:
778 if (pReqHdr || u64Arg)
779 return VERR_INVALID_PARAMETER;
780 return GVMMR0SchedPoke(pVM, idCpu);
781
782 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
783 if (u64Arg)
784 return VERR_INVALID_PARAMETER;
785 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
786
787 case VMMR0_DO_GVMM_SCHED_POLL:
788 if (pReqHdr || u64Arg > 1)
789 return VERR_INVALID_PARAMETER;
790 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
791
792 case VMMR0_DO_GVMM_QUERY_STATISTICS:
793 if (u64Arg)
794 return VERR_INVALID_PARAMETER;
795 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
796
797 case VMMR0_DO_GVMM_RESET_STATISTICS:
798 if (u64Arg)
799 return VERR_INVALID_PARAMETER;
800 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
801
802 /*
803 * Initialize the R0 part of a VM instance.
804 */
805 case VMMR0_DO_VMMR0_INIT:
806 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
807
808 /*
809 * Terminate the R0 part of a VM instance.
810 */
811 case VMMR0_DO_VMMR0_TERM:
812 return VMMR0TermVM(pVM, NULL);
813
814 /*
815 * Attempt to enable hwacc mode and check the current setting.
816 */
817 case VMMR0_DO_HWACC_ENABLE:
818 return HWACCMR0EnableAllCpus(pVM);
819
820 /*
821 * Setup the hardware accelerated session.
822 */
823 case VMMR0_DO_HWACC_SETUP_VM:
824 {
825 RTCCUINTREG fFlags = ASMIntDisableFlags();
826 int rc = HWACCMR0SetupVM(pVM);
827 ASMSetFlags(fFlags);
828 return rc;
829 }
830
831 /*
832 * Switch to RC to execute Hypervisor function.
833 */
834 case VMMR0_DO_CALL_HYPERVISOR:
835 {
836 int rc;
837 bool fVTxDisabled;
838
839 /* Safety precaution as HWACCM can disable the switcher. */
840 Assert(!pVM->vmm.s.fSwitcherDisabled);
841 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
842 return VERR_NOT_SUPPORTED;
843
844#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
845 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
846 return VERR_PGM_NO_CR3_SHADOW_ROOT;
847#endif
848
849 RTCCUINTREG fFlags = ASMIntDisableFlags();
850
851 /* We might need to disable VT-x if the active switcher turns off paging. */
852 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
853 if (RT_FAILURE(rc))
854 return rc;
855
856 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
857
858 /* Re-enable VT-x if previously turned off. */
859 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
860
861 /** @todo dispatch interrupts? */
862 ASMSetFlags(fFlags);
863 return rc;
864 }
865
866 /*
867 * PGM wrappers.
868 */
869 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
870 if (idCpu == NIL_VMCPUID)
871 return VERR_INVALID_CPU_ID;
872 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
873
874 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
875 if (idCpu == NIL_VMCPUID)
876 return VERR_INVALID_CPU_ID;
877 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
878
879 /*
880 * GMM wrappers.
881 */
882 case VMMR0_DO_GMM_INITIAL_RESERVATION:
883 if (u64Arg)
884 return VERR_INVALID_PARAMETER;
885 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
886
887 case VMMR0_DO_GMM_UPDATE_RESERVATION:
888 if (u64Arg)
889 return VERR_INVALID_PARAMETER;
890 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
891
892 case VMMR0_DO_GMM_ALLOCATE_PAGES:
893 if (u64Arg)
894 return VERR_INVALID_PARAMETER;
895 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
896
897 case VMMR0_DO_GMM_FREE_PAGES:
898 if (u64Arg)
899 return VERR_INVALID_PARAMETER;
900 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
901
902 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
903 if (u64Arg)
904 return VERR_INVALID_PARAMETER;
905 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
906
907 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
908 if (u64Arg)
909 return VERR_INVALID_PARAMETER;
910 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
911
912 case VMMR0_DO_GMM_QUERY_MEM_STATS:
913 if (idCpu == NIL_VMCPUID)
914 return VERR_INVALID_CPU_ID;
915 if (u64Arg)
916 return VERR_INVALID_PARAMETER;
917 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
918
919 case VMMR0_DO_GMM_BALLOONED_PAGES:
920 if (u64Arg)
921 return VERR_INVALID_PARAMETER;
922 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
923
924 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
925 if (u64Arg)
926 return VERR_INVALID_PARAMETER;
927 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
928
929 case VMMR0_DO_GMM_SEED_CHUNK:
930 if (pReqHdr)
931 return VERR_INVALID_PARAMETER;
932 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
933
934 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
935 if (idCpu == NIL_VMCPUID)
936 return VERR_INVALID_CPU_ID;
937 if (u64Arg)
938 return VERR_INVALID_PARAMETER;
939 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
940
941 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
942 if (idCpu == NIL_VMCPUID)
943 return VERR_INVALID_CPU_ID;
944 if (u64Arg)
945 return VERR_INVALID_PARAMETER;
946 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
947
948 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
949 if (idCpu == NIL_VMCPUID)
950 return VERR_INVALID_CPU_ID;
951 if ( u64Arg
952 || pReqHdr)
953 return VERR_INVALID_PARAMETER;
954 return GMMR0ResetSharedModules(pVM, idCpu);
955
956#ifdef VBOX_WITH_PAGE_SHARING
957 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
958 {
959 if (idCpu == NIL_VMCPUID)
960 return VERR_INVALID_CPU_ID;
961 if ( u64Arg
962 || pReqHdr)
963 return VERR_INVALID_PARAMETER;
964
965 PVMCPU pVCpu = &pVM->aCpus[idCpu];
966
967 if (pVCpu->hNativeThreadR0 == NIL_RTNATIVETHREAD)
968 pVCpu->hNativeThreadR0 = RTThreadNativeSelf();
969
970 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
971 int rc = GMMR0CheckSharedModulesStart(pVM);
972 if (rc == VINF_SUCCESS)
973 {
974 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
975 Assert( rc == VINF_SUCCESS
976 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
977 GMMR0CheckSharedModulesEnd(pVM);
978 }
979 return rc;
980 }
981#endif
982
983#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
984 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
985 {
986 if (u64Arg)
987 return VERR_INVALID_PARAMETER;
988 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
989 }
990#endif
991
992 /*
993 * A quick GCFGM mock-up.
994 */
995 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
996 case VMMR0_DO_GCFGM_SET_VALUE:
997 case VMMR0_DO_GCFGM_QUERY_VALUE:
998 {
999 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1000 return VERR_INVALID_PARAMETER;
1001 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1002 if (pReq->Hdr.cbReq != sizeof(*pReq))
1003 return VERR_INVALID_PARAMETER;
1004 int rc;
1005 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1006 {
1007 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1008 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1009 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1010 }
1011 else
1012 {
1013 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1014 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1015 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1016 }
1017 return rc;
1018 }
1019
1020 /*
1021 * PDM Wrappers.
1022 */
1023 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1024 {
1025 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1026 return VERR_INVALID_PARAMETER;
1027 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1028 }
1029
1030 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1031 {
1032 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1033 return VERR_INVALID_PARAMETER;
1034 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1035 }
1036
1037 /*
1038 * Requests to the internal networking service.
1039 */
1040 case VMMR0_DO_INTNET_OPEN:
1041 {
1042 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1043 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1044 return VERR_INVALID_PARAMETER;
1045 return IntNetR0OpenReq(pSession, pReq);
1046 }
1047
1048 case VMMR0_DO_INTNET_IF_CLOSE:
1049 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1050 return VERR_INVALID_PARAMETER;
1051 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1052
1053 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1054 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1055 return VERR_INVALID_PARAMETER;
1056 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1057
1058 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1059 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1060 return VERR_INVALID_PARAMETER;
1061 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1062
1063 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1064 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1065 return VERR_INVALID_PARAMETER;
1066 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1067
1068 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1069 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1070 return VERR_INVALID_PARAMETER;
1071 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1072
1073 case VMMR0_DO_INTNET_IF_SEND:
1074 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1075 return VERR_INVALID_PARAMETER;
1076 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1077
1078 case VMMR0_DO_INTNET_IF_WAIT:
1079 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1080 return VERR_INVALID_PARAMETER;
1081 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1082
1083 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1084 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1085 return VERR_INVALID_PARAMETER;
1086 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1087
1088 /*
1089 * For profiling.
1090 */
1091 case VMMR0_DO_NOP:
1092 case VMMR0_DO_SLOW_NOP:
1093 return VINF_SUCCESS;
1094
1095 /*
1096 * For testing Ring-0 APIs invoked in this environment.
1097 */
1098 case VMMR0_DO_TESTS:
1099 /** @todo make new test */
1100 return VINF_SUCCESS;
1101
1102
1103#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1104 case VMMR0_DO_TEST_SWITCHER3264:
1105 if (idCpu == NIL_VMCPUID)
1106 return VERR_INVALID_CPU_ID;
1107 return HWACCMR0TestSwitcher3264(pVM);
1108#endif
1109 default:
1110 /*
1111 * We're returning VERR_NOT_SUPPORT here so we've got something else
1112 * than -1 which the interrupt gate glue code might return.
1113 */
1114 Log(("operation %#x is not supported\n", enmOperation));
1115 return VERR_NOT_SUPPORTED;
1116 }
1117}
1118
1119
1120/**
1121 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1122 */
1123typedef struct VMMR0ENTRYEXARGS
1124{
1125 PVM pVM;
1126 VMCPUID idCpu;
1127 VMMR0OPERATION enmOperation;
1128 PSUPVMMR0REQHDR pReq;
1129 uint64_t u64Arg;
1130 PSUPDRVSESSION pSession;
1131} VMMR0ENTRYEXARGS;
1132/** Pointer to a vmmR0EntryExWrapper argument package. */
1133typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1134
1135/**
1136 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1137 *
1138 * @returns VBox status code.
1139 * @param pvArgs The argument package
1140 */
1141static int vmmR0EntryExWrapper(void *pvArgs)
1142{
1143 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1144 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1145 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1146 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1147 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1148 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1149}
1150
1151
1152/**
1153 * The Ring 0 entry point, called by the support library (SUP).
1154 *
1155 * @returns VBox status code.
1156 * @param pVM The VM to operate on.
1157 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1158 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1159 * @param enmOperation Which operation to execute.
1160 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1161 * @param u64Arg Some simple constant argument.
1162 * @param pSession The session of the caller.
1163 * @remarks Assume called with interrupts _enabled_.
1164 */
1165VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1166{
1167 /*
1168 * Requests that should only happen on the EMT thread will be
1169 * wrapped in a setjmp so we can assert without causing trouble.
1170 */
1171 if ( VALID_PTR(pVM)
1172 && pVM->pVMR0
1173 && idCpu < pVM->cCpus)
1174 {
1175 switch (enmOperation)
1176 {
1177 /* These might/will be called before VMMR3Init. */
1178 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1179 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1180 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1181 case VMMR0_DO_GMM_FREE_PAGES:
1182 case VMMR0_DO_GMM_BALLOONED_PAGES:
1183 /* On the mac we might not have a valid jmp buf, so check these as well. */
1184 case VMMR0_DO_VMMR0_INIT:
1185 case VMMR0_DO_VMMR0_TERM:
1186 {
1187 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1188
1189 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1190 break;
1191
1192 /** @todo validate this EMT claim... GVM knows. */
1193 VMMR0ENTRYEXARGS Args;
1194 Args.pVM = pVM;
1195 Args.idCpu = idCpu;
1196 Args.enmOperation = enmOperation;
1197 Args.pReq = pReq;
1198 Args.u64Arg = u64Arg;
1199 Args.pSession = pSession;
1200 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1201 }
1202
1203 default:
1204 break;
1205 }
1206 }
1207 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1208}
1209
1210/**
1211 * Internal R0 logger worker: Flush logger.
1212 *
1213 * @param pLogger The logger instance to flush.
1214 * @remark This function must be exported!
1215 */
1216VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1217{
1218#ifdef LOG_ENABLED
1219 /*
1220 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1221 * (This is a bit paranoid code.)
1222 */
1223 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1224 if ( !VALID_PTR(pR0Logger)
1225 || !VALID_PTR(pR0Logger + 1)
1226 || pLogger->u32Magic != RTLOGGER_MAGIC)
1227 {
1228# ifdef DEBUG
1229 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1230# endif
1231 return;
1232 }
1233 if (pR0Logger->fFlushingDisabled)
1234 return; /* quietly */
1235
1236 PVM pVM = pR0Logger->pVM;
1237 if ( !VALID_PTR(pVM)
1238 || pVM->pVMR0 != pVM)
1239 {
1240# ifdef DEBUG
1241 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1242# endif
1243 return;
1244 }
1245
1246 PVMCPU pVCpu = VMMGetCpu(pVM);
1247 if (pVCpu)
1248 {
1249 /*
1250 * Check that the jump buffer is armed.
1251 */
1252# ifdef RT_ARCH_X86
1253 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1254 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1255# else
1256 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1257 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1258# endif
1259 {
1260# ifdef DEBUG
1261 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1262# endif
1263 return;
1264 }
1265 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1266 }
1267# ifdef DEBUG
1268 else
1269 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1270# endif
1271#endif
1272}
1273
1274/**
1275 * Interal R0 logger worker: Custom prefix.
1276 *
1277 * @returns Number of chars written.
1278 *
1279 * @param pLogger The logger instance.
1280 * @param pchBuf The output buffer.
1281 * @param cchBuf The size of the buffer.
1282 * @param pvUser User argument (ignored).
1283 */
1284VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1285{
1286 NOREF(pvUser);
1287#ifdef LOG_ENABLED
1288 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1289 if ( !VALID_PTR(pR0Logger)
1290 || !VALID_PTR(pR0Logger + 1)
1291 || pLogger->u32Magic != RTLOGGER_MAGIC
1292 || cchBuf < 2)
1293 return 0;
1294
1295 static const char s_szHex[17] = "0123456789abcdef";
1296 VMCPUID const idCpu = pR0Logger->idCpu;
1297 pchBuf[1] = s_szHex[ idCpu & 15];
1298 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1299
1300 return 2;
1301#else
1302 return 0;
1303#endif
1304}
1305
1306#ifdef LOG_ENABLED
1307
1308/**
1309 * Disables flushing of the ring-0 debug log.
1310 *
1311 * @param pVCpu The shared virtual cpu structure.
1312 */
1313VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1314{
1315 PVM pVM = pVCpu->pVMR0;
1316 if (pVCpu->vmm.s.pR0LoggerR0)
1317 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1318}
1319
1320
1321/**
1322 * Enables flushing of the ring-0 debug log.
1323 *
1324 * @param pVCpu The shared virtual cpu structure.
1325 */
1326VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1327{
1328 PVM pVM = pVCpu->pVMR0;
1329 if (pVCpu->vmm.s.pR0LoggerR0)
1330 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1331}
1332
1333#endif /* LOG_ENABLED */
1334
1335/**
1336 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1337 *
1338 * @returns true if the breakpoint should be hit, false if it should be ignored.
1339 */
1340DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1341{
1342#if 0
1343 return true;
1344#else
1345 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1346 if (pVM)
1347 {
1348 PVMCPU pVCpu = VMMGetCpu(pVM);
1349
1350 if (pVCpu)
1351 {
1352#ifdef RT_ARCH_X86
1353 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1354 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1355#else
1356 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1357 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1358#endif
1359 {
1360 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1361 return RT_FAILURE_NP(rc);
1362 }
1363 }
1364 }
1365#ifdef RT_OS_LINUX
1366 return true;
1367#else
1368 return false;
1369#endif
1370#endif
1371}
1372
1373
1374/**
1375 * Override this so we can push it up to ring-3.
1376 *
1377 * @param pszExpr Expression. Can be NULL.
1378 * @param uLine Location line number.
1379 * @param pszFile Location file name.
1380 * @param pszFunction Location function name.
1381 */
1382DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1383{
1384 /*
1385 * To the log.
1386 */
1387 LogAlways(("\n!!R0-Assertion Failed!!\n"
1388 "Expression: %s\n"
1389 "Location : %s(%d) %s\n",
1390 pszExpr, pszFile, uLine, pszFunction));
1391
1392 /*
1393 * To the global VMM buffer.
1394 */
1395 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1396 if (pVM)
1397 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1398 "\n!!R0-Assertion Failed!!\n"
1399 "Expression: %s\n"
1400 "Location : %s(%d) %s\n",
1401 pszExpr, pszFile, uLine, pszFunction);
1402
1403 /*
1404 * Continue the normal way.
1405 */
1406 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1407}
1408
1409
1410/**
1411 * Callback for RTLogFormatV which writes to the ring-3 log port.
1412 * See PFNLOGOUTPUT() for details.
1413 */
1414static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1415{
1416 for (size_t i = 0; i < cbChars; i++)
1417 LogAlways(("%c", pachChars[i]));
1418
1419 return cbChars;
1420}
1421
1422
1423/**
1424 * Override this so we can push it up to ring-3.
1425 *
1426 * @param pszFormat The format string.
1427 * @param va Arguments.
1428 */
1429DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1430{
1431 va_list vaCopy;
1432
1433 /*
1434 * Push the message to the logger.
1435 */
1436 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1437 if (pLog)
1438 {
1439 va_copy(vaCopy, va);
1440 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1441 va_end(vaCopy);
1442 }
1443
1444 /*
1445 * Push it to the global VMM buffer.
1446 */
1447 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1448 if (pVM)
1449 {
1450 va_copy(vaCopy, va);
1451 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1452 va_end(vaCopy);
1453 }
1454
1455 /*
1456 * Continue the normal way.
1457 */
1458 RTAssertMsg2V(pszFormat, va);
1459}
1460
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette