VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 32418

Last change on this file since 32418 was 32418, checked in by vboxsync, 15 years ago

Increase the size of the image to work around the refusal of Win64 to load images in the 0x80000 range.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 48.7 KB
Line 
1/* $Id: VMMR0.cpp 32418 2010-09-10 15:39:47Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/trpm.h>
25#include <VBox/cpum.h>
26#include <VBox/pdmapi.h>
27#include <VBox/pgm.h>
28#include <VBox/stam.h>
29#include <VBox/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vm.h>
32
33#include <VBox/gvmm.h>
34#include <VBox/gmm.h>
35#include <VBox/intnet.h>
36#include <VBox/hwaccm.h>
37#include <VBox/param.h>
38#include <VBox/err.h>
39#include <VBox/version.h>
40#include <VBox/log.h>
41
42#include <iprt/asm-amd64-x86.h>
43#include <iprt/assert.h>
44#include <iprt/crc.h>
45#include <iprt/mp.h>
46#include <iprt/once.h>
47#include <iprt/stdarg.h>
48#include <iprt/string.h>
49#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
50# include <iprt/thread.h>
51#endif
52
53#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
54# pragma intrinsic(_AddressOfReturnAddress)
55#endif
56
57
58/*******************************************************************************
59* Internal Functions *
60*******************************************************************************/
61RT_C_DECLS_BEGIN
62VMMR0DECL(int) ModuleInit(void);
63VMMR0DECL(void) ModuleTerm(void);
64RT_C_DECLS_END
65
66
67/*******************************************************************************
68* Global Variables *
69*******************************************************************************/
70/** Drag in necessary library bits.
71 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
72PFNRT g_VMMGCDeps[] =
73{
74 (PFNRT)RTCrc32,
75 (PFNRT)RTOnce
76};
77
78
79#if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
80/* Increase the size of the image to work around the refusal of Win64 to
81 * load images in the 0x80000 range.
82 */
83static uint64_t u64BloatImage[8192] = {0};
84#endif
85
86/**
87 * Initialize the module.
88 * This is called when we're first loaded.
89 *
90 * @returns 0 on success.
91 * @returns VBox status on failure.
92 */
93VMMR0DECL(int) ModuleInit(void)
94{
95 LogFlow(("ModuleInit:\n"));
96
97 /*
98 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
99 */
100 int rc = GVMMR0Init();
101 if (RT_SUCCESS(rc))
102 {
103 rc = GMMR0Init();
104 if (RT_SUCCESS(rc))
105 {
106 rc = HWACCMR0Init();
107 if (RT_SUCCESS(rc))
108 {
109 rc = PGMRegisterStringFormatTypes();
110 if (RT_SUCCESS(rc))
111 {
112#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
113 rc = PGMR0DynMapInit();
114#endif
115 if (RT_SUCCESS(rc))
116 {
117 rc = IntNetR0Init();
118 if (RT_SUCCESS(rc))
119 {
120 LogFlow(("ModuleInit: returns success.\n"));
121 return VINF_SUCCESS;
122 }
123
124 /* bail out */
125 LogFlow(("ModuleTerm: returns %Rrc\n", rc));
126#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
127 PGMR0DynMapTerm();
128#endif
129 }
130 PGMDeregisterStringFormatTypes();
131 }
132 HWACCMR0Term();
133 }
134 GMMR0Term();
135 }
136 GVMMR0Term();
137 }
138
139 LogFlow(("ModuleInit: failed %Rrc\n", rc));
140 return rc;
141}
142
143
144/**
145 * Terminate the module.
146 * This is called when we're finally unloaded.
147 */
148VMMR0DECL(void) ModuleTerm(void)
149{
150 LogFlow(("ModuleTerm:\n"));
151
152 /*
153 * Terminate the internal network service.
154 */
155 IntNetR0Term();
156
157 /*
158 * PGM (Darwin) and HWACCM global cleanup.
159 */
160#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
161 PGMR0DynMapTerm();
162#endif
163 PGMDeregisterStringFormatTypes();
164 HWACCMR0Term();
165
166 /*
167 * Destroy the GMM and GVMM instances.
168 */
169 GMMR0Term();
170 GVMMR0Term();
171
172 LogFlow(("ModuleTerm: returns\n"));
173}
174
175
176/**
177 * Initaties the R0 driver for a particular VM instance.
178 *
179 * @returns VBox status code.
180 *
181 * @param pVM The VM instance in question.
182 * @param uSvnRev The SVN revision of the ring-3 part.
183 * @thread EMT.
184 */
185static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
186{
187 /*
188 * Match the SVN revisions.
189 */
190 if (uSvnRev != VMMGetSvnRev())
191 {
192 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
193 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
194 return VERR_VERSION_MISMATCH;
195 }
196 if ( !VALID_PTR(pVM)
197 || pVM->pVMR0 != pVM)
198 return VERR_INVALID_PARAMETER;
199
200#ifdef LOG_ENABLED
201 /*
202 * Register the EMT R0 logger instance for VCPU 0.
203 */
204 PVMCPU pVCpu = &pVM->aCpus[0];
205
206 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
207 if (pR0Logger)
208 {
209# if 0 /* testing of the logger. */
210 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
211 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
212 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
213 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
214
215 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
216 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
217 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
218 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
219
220 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
221 LogCom(("vmmR0InitVM: returned succesfully from direct logger call.\n"));
222 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
223 LogCom(("vmmR0InitVM: returned succesfully from direct flush call.\n"));
224
225 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
226 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
227 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
228 LogCom(("vmmR0InitVM: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
229 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
230 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
231
232 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
233 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
234
235 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
236 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
237 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
238# endif
239 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
240 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
241 pR0Logger->fRegistered = true;
242 }
243#endif /* LOG_ENABLED */
244
245 /*
246 * Initialize the per VM data for GVMM and GMM.
247 */
248 int rc = GVMMR0InitVM(pVM);
249// if (RT_SUCCESS(rc))
250// rc = GMMR0InitPerVMData(pVM);
251 if (RT_SUCCESS(rc))
252 {
253 /*
254 * Init HWACCM, CPUM and PGM (Darwin only).
255 */
256 rc = HWACCMR0InitVM(pVM);
257 if (RT_SUCCESS(rc))
258 {
259 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
260 if (RT_SUCCESS(rc))
261 {
262#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
263 rc = PGMR0DynMapInitVM(pVM);
264#endif
265 if (RT_SUCCESS(rc))
266 {
267 GVMMR0DoneInitVM(pVM);
268 return rc;
269 }
270
271 /* bail out */
272 }
273 HWACCMR0TermVM(pVM);
274 }
275 }
276 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
277 return rc;
278}
279
280
281/**
282 * Terminates the R0 driver for a particular VM instance.
283 *
284 * This is normally called by ring-3 as part of the VM termination process, but
285 * may alternatively be called during the support driver session cleanup when
286 * the VM object is destroyed (see GVMM).
287 *
288 * @returns VBox status code.
289 *
290 * @param pVM The VM instance in question.
291 * @param pGVM Pointer to the global VM structure. Optional.
292 * @thread EMT or session clean up thread.
293 */
294VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
295{
296 /*
297 * Tell GVMM what we're up to and check that we only do this once.
298 */
299 if (GVMMR0DoingTermVM(pVM, pGVM))
300 {
301#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
302 PGMR0DynMapTermVM(pVM);
303#endif
304 HWACCMR0TermVM(pVM);
305 }
306
307 /*
308 * Deregister the logger.
309 */
310 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
311 return VINF_SUCCESS;
312}
313
314
315#ifdef VBOX_WITH_STATISTICS
316/**
317 * Record return code statistics
318 * @param pVM The VM handle.
319 * @param pVCpu The VMCPU handle.
320 * @param rc The status code.
321 */
322static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
323{
324 /*
325 * Collect statistics.
326 */
327 switch (rc)
328 {
329 case VINF_SUCCESS:
330 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
331 break;
332 case VINF_EM_RAW_INTERRUPT:
333 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
334 break;
335 case VINF_EM_RAW_INTERRUPT_HYPER:
336 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
337 break;
338 case VINF_EM_RAW_GUEST_TRAP:
339 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
340 break;
341 case VINF_EM_RAW_RING_SWITCH:
342 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
343 break;
344 case VINF_EM_RAW_RING_SWITCH_INT:
345 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
346 break;
347 case VINF_EM_RAW_STALE_SELECTOR:
348 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
349 break;
350 case VINF_EM_RAW_IRET_TRAP:
351 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
352 break;
353 case VINF_IOM_HC_IOPORT_READ:
354 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
355 break;
356 case VINF_IOM_HC_IOPORT_WRITE:
357 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
358 break;
359 case VINF_IOM_HC_MMIO_READ:
360 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
361 break;
362 case VINF_IOM_HC_MMIO_WRITE:
363 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
364 break;
365 case VINF_IOM_HC_MMIO_READ_WRITE:
366 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
367 break;
368 case VINF_PATM_HC_MMIO_PATCH_READ:
369 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
370 break;
371 case VINF_PATM_HC_MMIO_PATCH_WRITE:
372 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
373 break;
374 case VINF_EM_RAW_EMULATE_INSTR:
375 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
376 break;
377 case VINF_EM_RAW_EMULATE_IO_BLOCK:
378 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
379 break;
380 case VINF_PATCH_EMULATE_INSTR:
381 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
382 break;
383 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
384 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
385 break;
386 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
387 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
388 break;
389 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
390 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
391 break;
392 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
393 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
394 break;
395 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
396 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
397 break;
398 case VINF_CSAM_PENDING_ACTION:
399 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
400 break;
401 case VINF_PGM_SYNC_CR3:
402 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
403 break;
404 case VINF_PATM_PATCH_INT3:
405 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
406 break;
407 case VINF_PATM_PATCH_TRAP_PF:
408 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
409 break;
410 case VINF_PATM_PATCH_TRAP_GP:
411 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
412 break;
413 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
414 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
415 break;
416 case VINF_EM_RESCHEDULE_REM:
417 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
418 break;
419 case VINF_EM_RAW_TO_R3:
420 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
421 break;
422 case VINF_EM_RAW_TIMER_PENDING:
423 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
424 break;
425 case VINF_EM_RAW_INTERRUPT_PENDING:
426 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
427 break;
428 case VINF_VMM_CALL_HOST:
429 switch (pVCpu->vmm.s.enmCallRing3Operation)
430 {
431 case VMMCALLRING3_PDM_LOCK:
432 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
433 break;
434 case VMMCALLRING3_PGM_POOL_GROW:
435 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
436 break;
437 case VMMCALLRING3_PGM_LOCK:
438 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
439 break;
440 case VMMCALLRING3_PGM_MAP_CHUNK:
441 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
442 break;
443 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
444 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
445 break;
446 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
447 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
448 break;
449 case VMMCALLRING3_VMM_LOGGER_FLUSH:
450 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
451 break;
452 case VMMCALLRING3_VM_SET_ERROR:
453 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
454 break;
455 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
456 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
457 break;
458 case VMMCALLRING3_VM_R0_ASSERTION:
459 default:
460 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
461 break;
462 }
463 break;
464 case VINF_PATM_DUPLICATE_FUNCTION:
465 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
466 break;
467 case VINF_PGM_CHANGE_MODE:
468 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
469 break;
470 case VINF_PGM_POOL_FLUSH_PENDING:
471 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
472 break;
473 case VINF_EM_PENDING_REQUEST:
474 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
475 break;
476 case VINF_EM_HWACCM_PATCH_TPR_INSTR:
477 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
478 break;
479 default:
480 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
481 break;
482 }
483}
484#endif /* VBOX_WITH_STATISTICS */
485
486
487/**
488 * Unused ring-0 entry point that used to be called from the interrupt gate.
489 *
490 * Will be removed one of the next times we do a major SUPDrv version bump.
491 *
492 * @returns VBox status code.
493 * @param pVM The VM to operate on.
494 * @param enmOperation Which operation to execute.
495 * @param pvArg Argument to the operation.
496 * @remarks Assume called with interrupts disabled.
497 */
498VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
499{
500 /*
501 * We're returning VERR_NOT_SUPPORT here so we've got something else
502 * than -1 which the interrupt gate glue code might return.
503 */
504 Log(("operation %#x is not supported\n", enmOperation));
505 return VERR_NOT_SUPPORTED;
506}
507
508
509/**
510 * The Ring 0 entry point, called by the fast-ioctl path.
511 *
512 * @param pVM The VM to operate on.
513 * The return code is stored in pVM->vmm.s.iLastGZRc.
514 * @param idCpu The Virtual CPU ID of the calling EMT.
515 * @param enmOperation Which operation to execute.
516 * @remarks Assume called with interrupts _enabled_.
517 */
518VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
519{
520 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
521 return;
522 PVMCPU pVCpu = &pVM->aCpus[idCpu];
523
524 switch (enmOperation)
525 {
526 /*
527 * Switch to GC and run guest raw mode code.
528 * Disable interrupts before doing the world switch.
529 */
530 case VMMR0_DO_RAW_RUN:
531 {
532 /* Safety precaution as hwaccm disables the switcher. */
533 if (RT_LIKELY(!pVM->vmm.s.fSwitcherDisabled))
534 {
535 RTCCUINTREG uFlags = ASMIntDisableFlags();
536 int rc;
537 bool fVTxDisabled;
538
539 if (RT_UNLIKELY(pVM->cCpus > 1))
540 {
541 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
542 ASMSetFlags(uFlags);
543 return;
544 }
545
546#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
547 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
548 {
549 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
550 ASMSetFlags(uFlags);
551 return;
552 }
553#endif
554
555 /* We might need to disable VT-x if the active switcher turns off paging. */
556 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
557 if (RT_FAILURE(rc))
558 {
559 pVCpu->vmm.s.iLastGZRc = rc;
560 ASMSetFlags(uFlags);
561 return;
562 }
563
564 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
565 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
566
567 TMNotifyStartOfExecution(pVCpu);
568 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
569 pVCpu->vmm.s.iLastGZRc = rc;
570 TMNotifyEndOfExecution(pVCpu);
571
572 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
573 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
574
575 /* Re-enable VT-x if previously turned off. */
576 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
577
578 if ( rc == VINF_EM_RAW_INTERRUPT
579 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
580 TRPMR0DispatchHostInterrupt(pVM);
581
582 ASMSetFlags(uFlags);
583
584#ifdef VBOX_WITH_STATISTICS
585 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
586 vmmR0RecordRC(pVM, pVCpu, rc);
587#endif
588 }
589 else
590 {
591 Assert(!pVM->vmm.s.fSwitcherDisabled);
592 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
593 }
594 break;
595 }
596
597 /*
598 * Run guest code using the available hardware acceleration technology.
599 *
600 * Disable interrupts before we do anything interesting. On Windows we avoid
601 * this by having the support driver raise the IRQL before calling us, this way
602 * we hope to get away with page faults and later calling into the kernel.
603 */
604 case VMMR0_DO_HWACC_RUN:
605 {
606 int rc;
607
608 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
609
610#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
611 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
612 RTThreadPreemptDisable(&PreemptState);
613#elif !defined(RT_OS_WINDOWS)
614 RTCCUINTREG uFlags = ASMIntDisableFlags();
615#endif
616 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
617
618#ifdef LOG_ENABLED
619 if (pVCpu->idCpu > 0)
620 {
621 /* Lazy registration of ring 0 loggers. */
622 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
623 if ( pR0Logger
624 && !pR0Logger->fRegistered)
625 {
626 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
627 pR0Logger->fRegistered = true;
628 }
629 }
630#endif
631 if (!HWACCMR0SuspendPending())
632 {
633 rc = HWACCMR0Enter(pVM, pVCpu);
634 if (RT_SUCCESS(rc))
635 {
636 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
637 int rc2 = HWACCMR0Leave(pVM, pVCpu);
638 AssertRC(rc2);
639 }
640 }
641 else
642 {
643 /* System is about to go into suspend mode; go back to ring 3. */
644 rc = VINF_EM_RAW_INTERRUPT;
645 }
646 pVCpu->vmm.s.iLastGZRc = rc;
647
648 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
649#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
650 RTThreadPreemptRestore(&PreemptState);
651#elif !defined(RT_OS_WINDOWS)
652 ASMSetFlags(uFlags);
653#endif
654
655#ifdef VBOX_WITH_STATISTICS
656 vmmR0RecordRC(pVM, pVCpu, rc);
657#endif
658 /* No special action required for external interrupts, just return. */
659 break;
660 }
661
662 /*
663 * For profiling.
664 */
665 case VMMR0_DO_NOP:
666 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
667 break;
668
669 /*
670 * Impossible.
671 */
672 default:
673 AssertMsgFailed(("%#x\n", enmOperation));
674 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
675 break;
676 }
677}
678
679
680/**
681 * Validates a session or VM session argument.
682 *
683 * @returns true / false accordingly.
684 * @param pVM The VM argument.
685 * @param pSession The session argument.
686 */
687DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
688{
689 /* This must be set! */
690 if (!pSession)
691 return false;
692
693 /* Only one out of the two. */
694 if (pVM && pClaimedSession)
695 return false;
696 if (pVM)
697 pClaimedSession = pVM->pSession;
698 return pClaimedSession == pSession;
699}
700
701
702/**
703 * VMMR0EntryEx worker function, either called directly or when ever possible
704 * called thru a longjmp so we can exit safely on failure.
705 *
706 * @returns VBox status code.
707 * @param pVM The VM to operate on.
708 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
709 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
710 * @param enmOperation Which operation to execute.
711 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
712 * The support driver validates this if it's present.
713 * @param u64Arg Some simple constant argument.
714 * @param pSession The session of the caller.
715 * @remarks Assume called with interrupts _enabled_.
716 */
717static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
718{
719 /*
720 * Common VM pointer validation.
721 */
722 if (pVM)
723 {
724 if (RT_UNLIKELY( !VALID_PTR(pVM)
725 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
726 {
727 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
728 return VERR_INVALID_POINTER;
729 }
730 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
731 || pVM->enmVMState > VMSTATE_TERMINATED
732 || pVM->pVMR0 != pVM))
733 {
734 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
735 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
736 return VERR_INVALID_POINTER;
737 }
738
739 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
740 {
741 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
742 return VERR_INVALID_PARAMETER;
743 }
744 }
745 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
746 {
747 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
748 return VERR_INVALID_PARAMETER;
749 }
750
751
752 switch (enmOperation)
753 {
754 /*
755 * GVM requests
756 */
757 case VMMR0_DO_GVMM_CREATE_VM:
758 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
759 return VERR_INVALID_PARAMETER;
760 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
761
762 case VMMR0_DO_GVMM_DESTROY_VM:
763 if (pReqHdr || u64Arg)
764 return VERR_INVALID_PARAMETER;
765 return GVMMR0DestroyVM(pVM);
766
767 case VMMR0_DO_GVMM_REGISTER_VMCPU:
768 {
769 if (!pVM)
770 return VERR_INVALID_PARAMETER;
771 return GVMMR0RegisterVCpu(pVM, idCpu);
772 }
773
774 case VMMR0_DO_GVMM_SCHED_HALT:
775 if (pReqHdr)
776 return VERR_INVALID_PARAMETER;
777 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
778
779 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
780 if (pReqHdr || u64Arg)
781 return VERR_INVALID_PARAMETER;
782 return GVMMR0SchedWakeUp(pVM, idCpu);
783
784 case VMMR0_DO_GVMM_SCHED_POKE:
785 if (pReqHdr || u64Arg)
786 return VERR_INVALID_PARAMETER;
787 return GVMMR0SchedPoke(pVM, idCpu);
788
789 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
790 if (u64Arg)
791 return VERR_INVALID_PARAMETER;
792 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
793
794 case VMMR0_DO_GVMM_SCHED_POLL:
795 if (pReqHdr || u64Arg > 1)
796 return VERR_INVALID_PARAMETER;
797 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
798
799 case VMMR0_DO_GVMM_QUERY_STATISTICS:
800 if (u64Arg)
801 return VERR_INVALID_PARAMETER;
802 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
803
804 case VMMR0_DO_GVMM_RESET_STATISTICS:
805 if (u64Arg)
806 return VERR_INVALID_PARAMETER;
807 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
808
809 /*
810 * Initialize the R0 part of a VM instance.
811 */
812 case VMMR0_DO_VMMR0_INIT:
813 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
814
815 /*
816 * Terminate the R0 part of a VM instance.
817 */
818 case VMMR0_DO_VMMR0_TERM:
819 return VMMR0TermVM(pVM, NULL);
820
821 /*
822 * Attempt to enable hwacc mode and check the current setting.
823 */
824 case VMMR0_DO_HWACC_ENABLE:
825 return HWACCMR0EnableAllCpus(pVM);
826
827 /*
828 * Setup the hardware accelerated session.
829 */
830 case VMMR0_DO_HWACC_SETUP_VM:
831 {
832 RTCCUINTREG fFlags = ASMIntDisableFlags();
833 int rc = HWACCMR0SetupVM(pVM);
834 ASMSetFlags(fFlags);
835 return rc;
836 }
837
838 /*
839 * Switch to RC to execute Hypervisor function.
840 */
841 case VMMR0_DO_CALL_HYPERVISOR:
842 {
843 int rc;
844 bool fVTxDisabled;
845
846 /* Safety precaution as HWACCM can disable the switcher. */
847 Assert(!pVM->vmm.s.fSwitcherDisabled);
848 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
849 return VERR_NOT_SUPPORTED;
850
851#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
852 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
853 return VERR_PGM_NO_CR3_SHADOW_ROOT;
854#endif
855
856 RTCCUINTREG fFlags = ASMIntDisableFlags();
857
858 /* We might need to disable VT-x if the active switcher turns off paging. */
859 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
860 if (RT_FAILURE(rc))
861 return rc;
862
863 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
864
865 /* Re-enable VT-x if previously turned off. */
866 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
867
868 /** @todo dispatch interrupts? */
869 ASMSetFlags(fFlags);
870 return rc;
871 }
872
873 /*
874 * PGM wrappers.
875 */
876 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
877 if (idCpu == NIL_VMCPUID)
878 return VERR_INVALID_CPU_ID;
879 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
880
881 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
882 if (idCpu == NIL_VMCPUID)
883 return VERR_INVALID_CPU_ID;
884 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
885
886 /*
887 * GMM wrappers.
888 */
889 case VMMR0_DO_GMM_INITIAL_RESERVATION:
890 if (u64Arg)
891 return VERR_INVALID_PARAMETER;
892 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
893
894 case VMMR0_DO_GMM_UPDATE_RESERVATION:
895 if (u64Arg)
896 return VERR_INVALID_PARAMETER;
897 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
898
899 case VMMR0_DO_GMM_ALLOCATE_PAGES:
900 if (u64Arg)
901 return VERR_INVALID_PARAMETER;
902 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
903
904 case VMMR0_DO_GMM_FREE_PAGES:
905 if (u64Arg)
906 return VERR_INVALID_PARAMETER;
907 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
908
909 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
910 if (u64Arg)
911 return VERR_INVALID_PARAMETER;
912 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
913
914 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
915 if (u64Arg)
916 return VERR_INVALID_PARAMETER;
917 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
918
919 case VMMR0_DO_GMM_QUERY_MEM_STATS:
920 if (idCpu == NIL_VMCPUID)
921 return VERR_INVALID_CPU_ID;
922 if (u64Arg)
923 return VERR_INVALID_PARAMETER;
924 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
925
926 case VMMR0_DO_GMM_BALLOONED_PAGES:
927 if (u64Arg)
928 return VERR_INVALID_PARAMETER;
929 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
930
931 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
932 if (u64Arg)
933 return VERR_INVALID_PARAMETER;
934 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
935
936 case VMMR0_DO_GMM_SEED_CHUNK:
937 if (pReqHdr)
938 return VERR_INVALID_PARAMETER;
939 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
940
941 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
942 if (idCpu == NIL_VMCPUID)
943 return VERR_INVALID_CPU_ID;
944 if (u64Arg)
945 return VERR_INVALID_PARAMETER;
946 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
947
948 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
949 if (idCpu == NIL_VMCPUID)
950 return VERR_INVALID_CPU_ID;
951 if (u64Arg)
952 return VERR_INVALID_PARAMETER;
953 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
954
955 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
956 if (idCpu == NIL_VMCPUID)
957 return VERR_INVALID_CPU_ID;
958 if ( u64Arg
959 || pReqHdr)
960 return VERR_INVALID_PARAMETER;
961 return GMMR0ResetSharedModules(pVM, idCpu);
962
963#ifdef VBOX_WITH_PAGE_SHARING
964 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
965 {
966 if (idCpu == NIL_VMCPUID)
967 return VERR_INVALID_CPU_ID;
968 if ( u64Arg
969 || pReqHdr)
970 return VERR_INVALID_PARAMETER;
971
972 PVMCPU pVCpu = &pVM->aCpus[idCpu];
973 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
974
975# ifdef DEBUG_sandervl
976 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
977 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
978 int rc = GMMR0CheckSharedModulesStart(pVM);
979 if (rc == VINF_SUCCESS)
980 {
981 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
982 Assert( rc == VINF_SUCCESS
983 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
984 GMMR0CheckSharedModulesEnd(pVM);
985 }
986# else
987 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
988# endif
989 return rc;
990 }
991#endif
992
993#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
994 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
995 {
996 if (u64Arg)
997 return VERR_INVALID_PARAMETER;
998 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
999 }
1000#endif
1001
1002 /*
1003 * A quick GCFGM mock-up.
1004 */
1005 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1006 case VMMR0_DO_GCFGM_SET_VALUE:
1007 case VMMR0_DO_GCFGM_QUERY_VALUE:
1008 {
1009 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1010 return VERR_INVALID_PARAMETER;
1011 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1012 if (pReq->Hdr.cbReq != sizeof(*pReq))
1013 return VERR_INVALID_PARAMETER;
1014 int rc;
1015 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1016 {
1017 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1018 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1019 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1020 }
1021 else
1022 {
1023 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1024 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1025 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1026 }
1027 return rc;
1028 }
1029
1030 /*
1031 * PDM Wrappers.
1032 */
1033 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1034 {
1035 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1036 return VERR_INVALID_PARAMETER;
1037 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1038 }
1039
1040 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1041 {
1042 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1043 return VERR_INVALID_PARAMETER;
1044 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1045 }
1046
1047 /*
1048 * Requests to the internal networking service.
1049 */
1050 case VMMR0_DO_INTNET_OPEN:
1051 {
1052 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1053 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1054 return VERR_INVALID_PARAMETER;
1055 return IntNetR0OpenReq(pSession, pReq);
1056 }
1057
1058 case VMMR0_DO_INTNET_IF_CLOSE:
1059 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1060 return VERR_INVALID_PARAMETER;
1061 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1062
1063 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1064 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1065 return VERR_INVALID_PARAMETER;
1066 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1067
1068 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1069 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1070 return VERR_INVALID_PARAMETER;
1071 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1072
1073 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1074 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1075 return VERR_INVALID_PARAMETER;
1076 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1077
1078 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1079 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1080 return VERR_INVALID_PARAMETER;
1081 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1082
1083 case VMMR0_DO_INTNET_IF_SEND:
1084 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1085 return VERR_INVALID_PARAMETER;
1086 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1087
1088 case VMMR0_DO_INTNET_IF_WAIT:
1089 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1090 return VERR_INVALID_PARAMETER;
1091 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1092
1093 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1094 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1095 return VERR_INVALID_PARAMETER;
1096 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1097
1098 /*
1099 * For profiling.
1100 */
1101 case VMMR0_DO_NOP:
1102 case VMMR0_DO_SLOW_NOP:
1103 return VINF_SUCCESS;
1104
1105 /*
1106 * For testing Ring-0 APIs invoked in this environment.
1107 */
1108 case VMMR0_DO_TESTS:
1109 /** @todo make new test */
1110 return VINF_SUCCESS;
1111
1112
1113#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1114 case VMMR0_DO_TEST_SWITCHER3264:
1115 if (idCpu == NIL_VMCPUID)
1116 return VERR_INVALID_CPU_ID;
1117 return HWACCMR0TestSwitcher3264(pVM);
1118#endif
1119 default:
1120 /*
1121 * We're returning VERR_NOT_SUPPORT here so we've got something else
1122 * than -1 which the interrupt gate glue code might return.
1123 */
1124 Log(("operation %#x is not supported\n", enmOperation));
1125 return VERR_NOT_SUPPORTED;
1126 }
1127}
1128
1129
1130/**
1131 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1132 */
1133typedef struct VMMR0ENTRYEXARGS
1134{
1135 PVM pVM;
1136 VMCPUID idCpu;
1137 VMMR0OPERATION enmOperation;
1138 PSUPVMMR0REQHDR pReq;
1139 uint64_t u64Arg;
1140 PSUPDRVSESSION pSession;
1141} VMMR0ENTRYEXARGS;
1142/** Pointer to a vmmR0EntryExWrapper argument package. */
1143typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1144
1145/**
1146 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1147 *
1148 * @returns VBox status code.
1149 * @param pvArgs The argument package
1150 */
1151static int vmmR0EntryExWrapper(void *pvArgs)
1152{
1153 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1154 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1155 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1156 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1157 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1158 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1159}
1160
1161
1162/**
1163 * The Ring 0 entry point, called by the support library (SUP).
1164 *
1165 * @returns VBox status code.
1166 * @param pVM The VM to operate on.
1167 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1168 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1169 * @param enmOperation Which operation to execute.
1170 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1171 * @param u64Arg Some simple constant argument.
1172 * @param pSession The session of the caller.
1173 * @remarks Assume called with interrupts _enabled_.
1174 */
1175VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1176{
1177 /*
1178 * Requests that should only happen on the EMT thread will be
1179 * wrapped in a setjmp so we can assert without causing trouble.
1180 */
1181 if ( VALID_PTR(pVM)
1182 && pVM->pVMR0
1183 && idCpu < pVM->cCpus)
1184 {
1185 switch (enmOperation)
1186 {
1187 /* These might/will be called before VMMR3Init. */
1188 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1189 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1190 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1191 case VMMR0_DO_GMM_FREE_PAGES:
1192 case VMMR0_DO_GMM_BALLOONED_PAGES:
1193 /* On the mac we might not have a valid jmp buf, so check these as well. */
1194 case VMMR0_DO_VMMR0_INIT:
1195 case VMMR0_DO_VMMR0_TERM:
1196 {
1197 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1198
1199 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1200 break;
1201
1202 /** @todo validate this EMT claim... GVM knows. */
1203 VMMR0ENTRYEXARGS Args;
1204 Args.pVM = pVM;
1205 Args.idCpu = idCpu;
1206 Args.enmOperation = enmOperation;
1207 Args.pReq = pReq;
1208 Args.u64Arg = u64Arg;
1209 Args.pSession = pSession;
1210 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1211 }
1212
1213 default:
1214 break;
1215 }
1216 }
1217 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1218}
1219
1220/**
1221 * Internal R0 logger worker: Flush logger.
1222 *
1223 * @param pLogger The logger instance to flush.
1224 * @remark This function must be exported!
1225 */
1226VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1227{
1228#ifdef LOG_ENABLED
1229 /*
1230 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1231 * (This is a bit paranoid code.)
1232 */
1233 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1234 if ( !VALID_PTR(pR0Logger)
1235 || !VALID_PTR(pR0Logger + 1)
1236 || pLogger->u32Magic != RTLOGGER_MAGIC)
1237 {
1238# ifdef DEBUG
1239 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1240# endif
1241 return;
1242 }
1243 if (pR0Logger->fFlushingDisabled)
1244 return; /* quietly */
1245
1246 PVM pVM = pR0Logger->pVM;
1247 if ( !VALID_PTR(pVM)
1248 || pVM->pVMR0 != pVM)
1249 {
1250# ifdef DEBUG
1251 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1252# endif
1253 return;
1254 }
1255
1256 PVMCPU pVCpu = VMMGetCpu(pVM);
1257 if (pVCpu)
1258 {
1259 /*
1260 * Check that the jump buffer is armed.
1261 */
1262# ifdef RT_ARCH_X86
1263 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1264 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1265# else
1266 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1267 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1268# endif
1269 {
1270# ifdef DEBUG
1271 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1272# endif
1273 return;
1274 }
1275 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1276 }
1277# ifdef DEBUG
1278 else
1279 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1280# endif
1281#endif
1282}
1283
1284/**
1285 * Interal R0 logger worker: Custom prefix.
1286 *
1287 * @returns Number of chars written.
1288 *
1289 * @param pLogger The logger instance.
1290 * @param pchBuf The output buffer.
1291 * @param cchBuf The size of the buffer.
1292 * @param pvUser User argument (ignored).
1293 */
1294VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1295{
1296 NOREF(pvUser);
1297#ifdef LOG_ENABLED
1298 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1299 if ( !VALID_PTR(pR0Logger)
1300 || !VALID_PTR(pR0Logger + 1)
1301 || pLogger->u32Magic != RTLOGGER_MAGIC
1302 || cchBuf < 2)
1303 return 0;
1304
1305 static const char s_szHex[17] = "0123456789abcdef";
1306 VMCPUID const idCpu = pR0Logger->idCpu;
1307 pchBuf[1] = s_szHex[ idCpu & 15];
1308 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1309
1310 return 2;
1311#else
1312 return 0;
1313#endif
1314}
1315
1316#ifdef LOG_ENABLED
1317
1318/**
1319 * Disables flushing of the ring-0 debug log.
1320 *
1321 * @param pVCpu The shared virtual cpu structure.
1322 */
1323VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1324{
1325 PVM pVM = pVCpu->pVMR0;
1326 if (pVCpu->vmm.s.pR0LoggerR0)
1327 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1328}
1329
1330
1331/**
1332 * Enables flushing of the ring-0 debug log.
1333 *
1334 * @param pVCpu The shared virtual cpu structure.
1335 */
1336VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1337{
1338 PVM pVM = pVCpu->pVMR0;
1339 if (pVCpu->vmm.s.pR0LoggerR0)
1340 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1341}
1342
1343#endif /* LOG_ENABLED */
1344
1345/**
1346 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1347 *
1348 * @returns true if the breakpoint should be hit, false if it should be ignored.
1349 */
1350DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1351{
1352#if 0
1353 return true;
1354#else
1355 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1356 if (pVM)
1357 {
1358 PVMCPU pVCpu = VMMGetCpu(pVM);
1359
1360 if (pVCpu)
1361 {
1362#ifdef RT_ARCH_X86
1363 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1364 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1365#else
1366 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1367 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1368#endif
1369 {
1370 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1371 return RT_FAILURE_NP(rc);
1372 }
1373 }
1374 }
1375#ifdef RT_OS_LINUX
1376 return true;
1377#else
1378 return false;
1379#endif
1380#endif
1381}
1382
1383
1384/**
1385 * Override this so we can push it up to ring-3.
1386 *
1387 * @param pszExpr Expression. Can be NULL.
1388 * @param uLine Location line number.
1389 * @param pszFile Location file name.
1390 * @param pszFunction Location function name.
1391 */
1392DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1393{
1394 /*
1395 * To the log.
1396 */
1397 LogAlways(("\n!!R0-Assertion Failed!!\n"
1398 "Expression: %s\n"
1399 "Location : %s(%d) %s\n",
1400 pszExpr, pszFile, uLine, pszFunction));
1401
1402 /*
1403 * To the global VMM buffer.
1404 */
1405 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1406 if (pVM)
1407 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1408 "\n!!R0-Assertion Failed!!\n"
1409 "Expression: %s\n"
1410 "Location : %s(%d) %s\n",
1411 pszExpr, pszFile, uLine, pszFunction);
1412
1413 /*
1414 * Continue the normal way.
1415 */
1416 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1417}
1418
1419
1420/**
1421 * Callback for RTLogFormatV which writes to the ring-3 log port.
1422 * See PFNLOGOUTPUT() for details.
1423 */
1424static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1425{
1426 for (size_t i = 0; i < cbChars; i++)
1427 LogAlways(("%c", pachChars[i]));
1428
1429 return cbChars;
1430}
1431
1432
1433/**
1434 * Override this so we can push it up to ring-3.
1435 *
1436 * @param pszFormat The format string.
1437 * @param va Arguments.
1438 */
1439DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1440{
1441 va_list vaCopy;
1442
1443 /*
1444 * Push the message to the logger.
1445 */
1446 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1447 if (pLog)
1448 {
1449 va_copy(vaCopy, va);
1450 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1451 va_end(vaCopy);
1452 }
1453
1454 /*
1455 * Push it to the global VMM buffer.
1456 */
1457 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1458 if (pVM)
1459 {
1460 va_copy(vaCopy, va);
1461 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1462 va_end(vaCopy);
1463 }
1464
1465 /*
1466 * Continue the normal way.
1467 */
1468 RTAssertMsg2V(pszFormat, va);
1469}
1470
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette