VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 32892

Last change on this file since 32892 was 32793, checked in by vboxsync, 14 years ago

One more for crit sections

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 50.7 KB
Line 
1/* $Id: VMMR0.cpp 32793 2010-09-28 13:57:28Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/trpm.h>
25#include <VBox/cpum.h>
26#include <VBox/pdmapi.h>
27#include <VBox/pgm.h>
28#include <VBox/stam.h>
29#include <VBox/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vm.h>
32
33#include <VBox/gvmm.h>
34#include <VBox/gmm.h>
35#include <VBox/intnet.h>
36#include <VBox/hwaccm.h>
37#include <VBox/param.h>
38#include <VBox/err.h>
39#include <VBox/version.h>
40#include <VBox/log.h>
41
42#include <iprt/asm-amd64-x86.h>
43#include <iprt/assert.h>
44#include <iprt/crc.h>
45#include <iprt/mp.h>
46#include <iprt/once.h>
47#include <iprt/stdarg.h>
48#include <iprt/string.h>
49#include <iprt/thread.h>
50#include <iprt/timer.h>
51
52#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
53# pragma intrinsic(_AddressOfReturnAddress)
54#endif
55
56
57/*******************************************************************************
58* Internal Functions *
59*******************************************************************************/
60RT_C_DECLS_BEGIN
61VMMR0DECL(int) ModuleInit(void);
62VMMR0DECL(void) ModuleTerm(void);
63RT_C_DECLS_END
64
65
66/*******************************************************************************
67* Global Variables *
68*******************************************************************************/
69/** Drag in necessary library bits.
70 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
71PFNRT g_VMMGCDeps[] =
72{
73 (PFNRT)RTCrc32,
74 (PFNRT)RTOnce
75};
76
77
78#if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
79/* Increase the size of the image to work around the refusal of Win64 to
80 * load images in the 0x80000 range.
81 */
82static uint64_t u64BloatImage[8192] = {0};
83#endif
84
85/**
86 * Initialize the module.
87 * This is called when we're first loaded.
88 *
89 * @returns 0 on success.
90 * @returns VBox status on failure.
91 */
92VMMR0DECL(int) ModuleInit(void)
93{
94 LogFlow(("ModuleInit:\n"));
95
96 /*
97 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
98 */
99 int rc = GVMMR0Init();
100 if (RT_SUCCESS(rc))
101 {
102 rc = GMMR0Init();
103 if (RT_SUCCESS(rc))
104 {
105 rc = HWACCMR0Init();
106 if (RT_SUCCESS(rc))
107 {
108 rc = PGMRegisterStringFormatTypes();
109 if (RT_SUCCESS(rc))
110 {
111#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
112 rc = PGMR0DynMapInit();
113#endif
114 if (RT_SUCCESS(rc))
115 {
116 rc = IntNetR0Init();
117 if (RT_SUCCESS(rc))
118 {
119 LogFlow(("ModuleInit: returns success.\n"));
120 return VINF_SUCCESS;
121 }
122
123 /* bail out */
124 LogFlow(("ModuleTerm: returns %Rrc\n", rc));
125#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
126 PGMR0DynMapTerm();
127#endif
128 }
129 PGMDeregisterStringFormatTypes();
130 }
131 HWACCMR0Term();
132 }
133 GMMR0Term();
134 }
135 GVMMR0Term();
136 }
137
138 LogFlow(("ModuleInit: failed %Rrc\n", rc));
139 return rc;
140}
141
142
143/**
144 * Terminate the module.
145 * This is called when we're finally unloaded.
146 */
147VMMR0DECL(void) ModuleTerm(void)
148{
149 LogFlow(("ModuleTerm:\n"));
150
151 /*
152 * Terminate the internal network service.
153 */
154 IntNetR0Term();
155
156 /*
157 * PGM (Darwin) and HWACCM global cleanup.
158 */
159#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
160 PGMR0DynMapTerm();
161#endif
162 PGMDeregisterStringFormatTypes();
163 HWACCMR0Term();
164
165 /*
166 * Destroy the GMM and GVMM instances.
167 */
168 GMMR0Term();
169 GVMMR0Term();
170
171 LogFlow(("ModuleTerm: returns\n"));
172}
173
174
175/**
176 * Initaties the R0 driver for a particular VM instance.
177 *
178 * @returns VBox status code.
179 *
180 * @param pVM The VM instance in question.
181 * @param uSvnRev The SVN revision of the ring-3 part.
182 * @thread EMT.
183 */
184static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
185{
186 /*
187 * Match the SVN revisions.
188 */
189 if (uSvnRev != VMMGetSvnRev())
190 {
191 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
192 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
193 return VERR_VERSION_MISMATCH;
194 }
195 if ( !VALID_PTR(pVM)
196 || pVM->pVMR0 != pVM)
197 return VERR_INVALID_PARAMETER;
198
199#ifdef LOG_ENABLED
200 /*
201 * Register the EMT R0 logger instance for VCPU 0.
202 */
203 PVMCPU pVCpu = &pVM->aCpus[0];
204
205 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
206 if (pR0Logger)
207 {
208# if 0 /* testing of the logger. */
209 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
210 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
211 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
212 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
213
214 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
215 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
216 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
217 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
218
219 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
220 LogCom(("vmmR0InitVM: returned succesfully from direct logger call.\n"));
221 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
222 LogCom(("vmmR0InitVM: returned succesfully from direct flush call.\n"));
223
224 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
225 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
226 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
227 LogCom(("vmmR0InitVM: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
228 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
229 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
230
231 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
232 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
233
234 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
235 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
236 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
237# endif
238 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
239 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
240 pR0Logger->fRegistered = true;
241 }
242#endif /* LOG_ENABLED */
243
244 /*
245 * Check if the host supports high resolution timers or not.
246 */
247 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
248 && !RTTimerCanDoHighResolution())
249 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
250
251 /*
252 * Initialize the per VM data for GVMM and GMM.
253 */
254 int rc = GVMMR0InitVM(pVM);
255// if (RT_SUCCESS(rc))
256// rc = GMMR0InitPerVMData(pVM);
257 if (RT_SUCCESS(rc))
258 {
259 /*
260 * Init HWACCM, CPUM and PGM (Darwin only).
261 */
262 rc = HWACCMR0InitVM(pVM);
263 if (RT_SUCCESS(rc))
264 {
265 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
266 if (RT_SUCCESS(rc))
267 {
268#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
269 rc = PGMR0DynMapInitVM(pVM);
270#endif
271 if (RT_SUCCESS(rc))
272 {
273 GVMMR0DoneInitVM(pVM);
274 return rc;
275 }
276
277 /* bail out */
278 }
279 HWACCMR0TermVM(pVM);
280 }
281 }
282 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
283 return rc;
284}
285
286
287/**
288 * Terminates the R0 driver for a particular VM instance.
289 *
290 * This is normally called by ring-3 as part of the VM termination process, but
291 * may alternatively be called during the support driver session cleanup when
292 * the VM object is destroyed (see GVMM).
293 *
294 * @returns VBox status code.
295 *
296 * @param pVM The VM instance in question.
297 * @param pGVM Pointer to the global VM structure. Optional.
298 * @thread EMT or session clean up thread.
299 */
300VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
301{
302 /*
303 * Tell GVMM what we're up to and check that we only do this once.
304 */
305 if (GVMMR0DoingTermVM(pVM, pGVM))
306 {
307#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
308 PGMR0DynMapTermVM(pVM);
309#endif
310 HWACCMR0TermVM(pVM);
311 }
312
313 /*
314 * Deregister the logger.
315 */
316 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
317 return VINF_SUCCESS;
318}
319
320
321#ifdef VBOX_WITH_STATISTICS
322/**
323 * Record return code statistics
324 * @param pVM The VM handle.
325 * @param pVCpu The VMCPU handle.
326 * @param rc The status code.
327 */
328static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
329{
330 /*
331 * Collect statistics.
332 */
333 switch (rc)
334 {
335 case VINF_SUCCESS:
336 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
337 break;
338 case VINF_EM_RAW_INTERRUPT:
339 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
340 break;
341 case VINF_EM_RAW_INTERRUPT_HYPER:
342 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
343 break;
344 case VINF_EM_RAW_GUEST_TRAP:
345 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
346 break;
347 case VINF_EM_RAW_RING_SWITCH:
348 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
349 break;
350 case VINF_EM_RAW_RING_SWITCH_INT:
351 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
352 break;
353 case VINF_EM_RAW_STALE_SELECTOR:
354 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
355 break;
356 case VINF_EM_RAW_IRET_TRAP:
357 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
358 break;
359 case VINF_IOM_HC_IOPORT_READ:
360 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
361 break;
362 case VINF_IOM_HC_IOPORT_WRITE:
363 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
364 break;
365 case VINF_IOM_HC_MMIO_READ:
366 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
367 break;
368 case VINF_IOM_HC_MMIO_WRITE:
369 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
370 break;
371 case VINF_IOM_HC_MMIO_READ_WRITE:
372 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
373 break;
374 case VINF_PATM_HC_MMIO_PATCH_READ:
375 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
376 break;
377 case VINF_PATM_HC_MMIO_PATCH_WRITE:
378 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
379 break;
380 case VINF_EM_RAW_EMULATE_INSTR:
381 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
382 break;
383 case VINF_EM_RAW_EMULATE_IO_BLOCK:
384 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
385 break;
386 case VINF_PATCH_EMULATE_INSTR:
387 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
388 break;
389 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
390 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
391 break;
392 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
393 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
394 break;
395 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
396 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
397 break;
398 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
399 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
400 break;
401 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
402 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
403 break;
404 case VINF_CSAM_PENDING_ACTION:
405 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
406 break;
407 case VINF_PGM_SYNC_CR3:
408 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
409 break;
410 case VINF_PATM_PATCH_INT3:
411 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
412 break;
413 case VINF_PATM_PATCH_TRAP_PF:
414 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
415 break;
416 case VINF_PATM_PATCH_TRAP_GP:
417 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
418 break;
419 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
420 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
421 break;
422 case VINF_EM_RESCHEDULE_REM:
423 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
424 break;
425 case VINF_EM_RAW_TO_R3:
426 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
427 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
428 else
429 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
430 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
431 else
432 if (VM_FF_ISPENDING(pVM, VM_FF_PDM_QUEUES))
433 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
434 else
435 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
436 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
437 else
438 if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA))
439 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
440 else
441 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))
442 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
443 else
444 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
445 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
446 else
447 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3))
448 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
449 else
450 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
451 break;
452
453 case VINF_EM_RAW_TIMER_PENDING:
454 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
455 break;
456 case VINF_EM_RAW_INTERRUPT_PENDING:
457 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
458 break;
459 case VINF_VMM_CALL_HOST:
460 switch (pVCpu->vmm.s.enmCallRing3Operation)
461 {
462 case VMMCALLRING3_PDM_LOCK:
463 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
464 break;
465 case VMMCALLRING3_PGM_POOL_GROW:
466 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
467 break;
468 case VMMCALLRING3_PGM_LOCK:
469 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
470 break;
471 case VMMCALLRING3_PGM_MAP_CHUNK:
472 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
473 break;
474 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
475 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
476 break;
477 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
478 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
479 break;
480 case VMMCALLRING3_VMM_LOGGER_FLUSH:
481 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
482 break;
483 case VMMCALLRING3_VM_SET_ERROR:
484 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
485 break;
486 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
487 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
488 break;
489 case VMMCALLRING3_VM_R0_ASSERTION:
490 default:
491 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
492 break;
493 }
494 break;
495 case VINF_PATM_DUPLICATE_FUNCTION:
496 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
497 break;
498 case VINF_PGM_CHANGE_MODE:
499 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
500 break;
501 case VINF_PGM_POOL_FLUSH_PENDING:
502 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
503 break;
504 case VINF_EM_PENDING_REQUEST:
505 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
506 break;
507 case VINF_EM_HWACCM_PATCH_TPR_INSTR:
508 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
509 break;
510 default:
511 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
512 break;
513 }
514}
515#endif /* VBOX_WITH_STATISTICS */
516
517
518/**
519 * Unused ring-0 entry point that used to be called from the interrupt gate.
520 *
521 * Will be removed one of the next times we do a major SUPDrv version bump.
522 *
523 * @returns VBox status code.
524 * @param pVM The VM to operate on.
525 * @param enmOperation Which operation to execute.
526 * @param pvArg Argument to the operation.
527 * @remarks Assume called with interrupts disabled.
528 */
529VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
530{
531 /*
532 * We're returning VERR_NOT_SUPPORT here so we've got something else
533 * than -1 which the interrupt gate glue code might return.
534 */
535 Log(("operation %#x is not supported\n", enmOperation));
536 return VERR_NOT_SUPPORTED;
537}
538
539
540/**
541 * The Ring 0 entry point, called by the fast-ioctl path.
542 *
543 * @param pVM The VM to operate on.
544 * The return code is stored in pVM->vmm.s.iLastGZRc.
545 * @param idCpu The Virtual CPU ID of the calling EMT.
546 * @param enmOperation Which operation to execute.
547 * @remarks Assume called with interrupts _enabled_.
548 */
549VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
550{
551 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
552 return;
553 PVMCPU pVCpu = &pVM->aCpus[idCpu];
554
555 switch (enmOperation)
556 {
557 /*
558 * Switch to GC and run guest raw mode code.
559 * Disable interrupts before doing the world switch.
560 */
561 case VMMR0_DO_RAW_RUN:
562 {
563 /* Some safety precautions first. */
564#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
565 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled /* hwaccm */
566 && pVM->cCpus == 1 /* !smp */
567 && PGMGetHyperCR3(pVCpu)))
568#else
569 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled
570 && pVM->cCpus == 1))
571#endif
572 {
573 /* Disable preemption and update the periodic preemption timer. */
574 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
575 RTThreadPreemptDisable(&PreemptState);
576 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
577 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
578 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
579
580 /* We might need to disable VT-x if the active switcher turns off paging. */
581 bool fVTxDisabled;
582 int rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
583 if (RT_SUCCESS(rc))
584 {
585 RTCCUINTREG uFlags = ASMIntDisableFlags();
586 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
587
588 TMNotifyStartOfExecution(pVCpu);
589 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
590 pVCpu->vmm.s.iLastGZRc = rc;
591 TMNotifyEndOfExecution(pVCpu);
592
593 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
594
595 /* Re-enable VT-x if previously turned off. */
596 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
597
598 if ( rc == VINF_EM_RAW_INTERRUPT
599 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
600 TRPMR0DispatchHostInterrupt(pVM);
601
602 ASMSetFlags(uFlags);
603
604#ifdef VBOX_WITH_STATISTICS
605 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
606 vmmR0RecordRC(pVM, pVCpu, rc);
607#endif
608 }
609 else
610 pVCpu->vmm.s.iLastGZRc = rc;
611 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
612 RTThreadPreemptRestore(&PreemptState);
613 }
614 else
615 {
616 Assert(!pVM->vmm.s.fSwitcherDisabled);
617 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
618 if (pVM->cCpus != 1)
619 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
620#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
621 if (!PGMGetHyperCR3(pVCpu))
622 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
623#endif
624 }
625 break;
626 }
627
628 /*
629 * Run guest code using the available hardware acceleration technology.
630 *
631 * Disable interrupts before we do anything interesting. On Windows we avoid
632 * this by having the support driver raise the IRQL before calling us, this way
633 * we hope to get away with page faults and later calling into the kernel.
634 */
635 case VMMR0_DO_HWACC_RUN:
636 {
637#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
638 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
639 RTThreadPreemptDisable(&PreemptState);
640#elif !defined(RT_OS_WINDOWS)
641 RTCCUINTREG uFlags = ASMIntDisableFlags();
642#endif
643 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
644 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
645 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
646
647#ifdef LOG_ENABLED
648 if (pVCpu->idCpu > 0)
649 {
650 /* Lazy registration of ring 0 loggers. */
651 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
652 if ( pR0Logger
653 && !pR0Logger->fRegistered)
654 {
655 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
656 pR0Logger->fRegistered = true;
657 }
658 }
659#endif
660 int rc;
661 if (!HWACCMR0SuspendPending())
662 {
663 rc = HWACCMR0Enter(pVM, pVCpu);
664 if (RT_SUCCESS(rc))
665 {
666 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
667 int rc2 = HWACCMR0Leave(pVM, pVCpu);
668 AssertRC(rc2);
669 }
670 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
671 }
672 else
673 {
674 /* System is about to go into suspend mode; go back to ring 3. */
675 rc = VINF_EM_RAW_INTERRUPT;
676 }
677 pVCpu->vmm.s.iLastGZRc = rc;
678
679 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
680#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
681 RTThreadPreemptRestore(&PreemptState);
682#elif !defined(RT_OS_WINDOWS)
683 ASMSetFlags(uFlags);
684#endif
685
686#ifdef VBOX_WITH_STATISTICS
687 vmmR0RecordRC(pVM, pVCpu, rc);
688#endif
689 /* No special action required for external interrupts, just return. */
690 break;
691 }
692
693 /*
694 * For profiling.
695 */
696 case VMMR0_DO_NOP:
697 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
698 break;
699
700 /*
701 * Impossible.
702 */
703 default:
704 AssertMsgFailed(("%#x\n", enmOperation));
705 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
706 break;
707 }
708}
709
710
711/**
712 * Validates a session or VM session argument.
713 *
714 * @returns true / false accordingly.
715 * @param pVM The VM argument.
716 * @param pSession The session argument.
717 */
718DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
719{
720 /* This must be set! */
721 if (!pSession)
722 return false;
723
724 /* Only one out of the two. */
725 if (pVM && pClaimedSession)
726 return false;
727 if (pVM)
728 pClaimedSession = pVM->pSession;
729 return pClaimedSession == pSession;
730}
731
732
733/**
734 * VMMR0EntryEx worker function, either called directly or when ever possible
735 * called thru a longjmp so we can exit safely on failure.
736 *
737 * @returns VBox status code.
738 * @param pVM The VM to operate on.
739 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
740 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
741 * @param enmOperation Which operation to execute.
742 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
743 * The support driver validates this if it's present.
744 * @param u64Arg Some simple constant argument.
745 * @param pSession The session of the caller.
746 * @remarks Assume called with interrupts _enabled_.
747 */
748static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
749{
750 /*
751 * Common VM pointer validation.
752 */
753 if (pVM)
754 {
755 if (RT_UNLIKELY( !VALID_PTR(pVM)
756 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
757 {
758 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
759 return VERR_INVALID_POINTER;
760 }
761 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
762 || pVM->enmVMState > VMSTATE_TERMINATED
763 || pVM->pVMR0 != pVM))
764 {
765 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
766 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
767 return VERR_INVALID_POINTER;
768 }
769
770 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
771 {
772 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
773 return VERR_INVALID_PARAMETER;
774 }
775 }
776 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
777 {
778 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
779 return VERR_INVALID_PARAMETER;
780 }
781
782
783 switch (enmOperation)
784 {
785 /*
786 * GVM requests
787 */
788 case VMMR0_DO_GVMM_CREATE_VM:
789 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
790 return VERR_INVALID_PARAMETER;
791 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
792
793 case VMMR0_DO_GVMM_DESTROY_VM:
794 if (pReqHdr || u64Arg)
795 return VERR_INVALID_PARAMETER;
796 return GVMMR0DestroyVM(pVM);
797
798 case VMMR0_DO_GVMM_REGISTER_VMCPU:
799 {
800 if (!pVM)
801 return VERR_INVALID_PARAMETER;
802 return GVMMR0RegisterVCpu(pVM, idCpu);
803 }
804
805 case VMMR0_DO_GVMM_SCHED_HALT:
806 if (pReqHdr)
807 return VERR_INVALID_PARAMETER;
808 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
809
810 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
811 if (pReqHdr || u64Arg)
812 return VERR_INVALID_PARAMETER;
813 return GVMMR0SchedWakeUp(pVM, idCpu);
814
815 case VMMR0_DO_GVMM_SCHED_POKE:
816 if (pReqHdr || u64Arg)
817 return VERR_INVALID_PARAMETER;
818 return GVMMR0SchedPoke(pVM, idCpu);
819
820 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
821 if (u64Arg)
822 return VERR_INVALID_PARAMETER;
823 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
824
825 case VMMR0_DO_GVMM_SCHED_POLL:
826 if (pReqHdr || u64Arg > 1)
827 return VERR_INVALID_PARAMETER;
828 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
829
830 case VMMR0_DO_GVMM_QUERY_STATISTICS:
831 if (u64Arg)
832 return VERR_INVALID_PARAMETER;
833 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
834
835 case VMMR0_DO_GVMM_RESET_STATISTICS:
836 if (u64Arg)
837 return VERR_INVALID_PARAMETER;
838 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
839
840 /*
841 * Initialize the R0 part of a VM instance.
842 */
843 case VMMR0_DO_VMMR0_INIT:
844 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
845
846 /*
847 * Terminate the R0 part of a VM instance.
848 */
849 case VMMR0_DO_VMMR0_TERM:
850 return VMMR0TermVM(pVM, NULL);
851
852 /*
853 * Attempt to enable hwacc mode and check the current setting.
854 */
855 case VMMR0_DO_HWACC_ENABLE:
856 return HWACCMR0EnableAllCpus(pVM);
857
858 /*
859 * Setup the hardware accelerated session.
860 */
861 case VMMR0_DO_HWACC_SETUP_VM:
862 {
863 RTCCUINTREG fFlags = ASMIntDisableFlags();
864 int rc = HWACCMR0SetupVM(pVM);
865 ASMSetFlags(fFlags);
866 return rc;
867 }
868
869 /*
870 * Switch to RC to execute Hypervisor function.
871 */
872 case VMMR0_DO_CALL_HYPERVISOR:
873 {
874 int rc;
875 bool fVTxDisabled;
876
877 /* Safety precaution as HWACCM can disable the switcher. */
878 Assert(!pVM->vmm.s.fSwitcherDisabled);
879 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
880 return VERR_NOT_SUPPORTED;
881
882#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
883 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
884 return VERR_PGM_NO_CR3_SHADOW_ROOT;
885#endif
886
887 RTCCUINTREG fFlags = ASMIntDisableFlags();
888
889 /* We might need to disable VT-x if the active switcher turns off paging. */
890 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
891 if (RT_FAILURE(rc))
892 return rc;
893
894 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
895
896 /* Re-enable VT-x if previously turned off. */
897 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
898
899 /** @todo dispatch interrupts? */
900 ASMSetFlags(fFlags);
901 return rc;
902 }
903
904 /*
905 * PGM wrappers.
906 */
907 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
908 if (idCpu == NIL_VMCPUID)
909 return VERR_INVALID_CPU_ID;
910 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
911
912 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
913 if (idCpu == NIL_VMCPUID)
914 return VERR_INVALID_CPU_ID;
915 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
916
917 /*
918 * GMM wrappers.
919 */
920 case VMMR0_DO_GMM_INITIAL_RESERVATION:
921 if (u64Arg)
922 return VERR_INVALID_PARAMETER;
923 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
924
925 case VMMR0_DO_GMM_UPDATE_RESERVATION:
926 if (u64Arg)
927 return VERR_INVALID_PARAMETER;
928 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
929
930 case VMMR0_DO_GMM_ALLOCATE_PAGES:
931 if (u64Arg)
932 return VERR_INVALID_PARAMETER;
933 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
934
935 case VMMR0_DO_GMM_FREE_PAGES:
936 if (u64Arg)
937 return VERR_INVALID_PARAMETER;
938 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
939
940 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
941 if (u64Arg)
942 return VERR_INVALID_PARAMETER;
943 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
944
945 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
946 if (u64Arg)
947 return VERR_INVALID_PARAMETER;
948 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
949
950 case VMMR0_DO_GMM_QUERY_MEM_STATS:
951 if (idCpu == NIL_VMCPUID)
952 return VERR_INVALID_CPU_ID;
953 if (u64Arg)
954 return VERR_INVALID_PARAMETER;
955 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
956
957 case VMMR0_DO_GMM_BALLOONED_PAGES:
958 if (u64Arg)
959 return VERR_INVALID_PARAMETER;
960 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
961
962 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
963 if (u64Arg)
964 return VERR_INVALID_PARAMETER;
965 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
966
967 case VMMR0_DO_GMM_SEED_CHUNK:
968 if (pReqHdr)
969 return VERR_INVALID_PARAMETER;
970 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
971
972 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
973 if (idCpu == NIL_VMCPUID)
974 return VERR_INVALID_CPU_ID;
975 if (u64Arg)
976 return VERR_INVALID_PARAMETER;
977 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
978
979 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
980 if (idCpu == NIL_VMCPUID)
981 return VERR_INVALID_CPU_ID;
982 if (u64Arg)
983 return VERR_INVALID_PARAMETER;
984 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
985
986 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
987 if (idCpu == NIL_VMCPUID)
988 return VERR_INVALID_CPU_ID;
989 if ( u64Arg
990 || pReqHdr)
991 return VERR_INVALID_PARAMETER;
992 return GMMR0ResetSharedModules(pVM, idCpu);
993
994#ifdef VBOX_WITH_PAGE_SHARING
995 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
996 {
997 if (idCpu == NIL_VMCPUID)
998 return VERR_INVALID_CPU_ID;
999 if ( u64Arg
1000 || pReqHdr)
1001 return VERR_INVALID_PARAMETER;
1002
1003 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1004 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1005
1006# ifdef DEBUG_sandervl
1007 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1008 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1009 int rc = GMMR0CheckSharedModulesStart(pVM);
1010 if (rc == VINF_SUCCESS)
1011 {
1012 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1013 Assert( rc == VINF_SUCCESS
1014 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1015 GMMR0CheckSharedModulesEnd(pVM);
1016 }
1017# else
1018 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1019# endif
1020 return rc;
1021 }
1022#endif
1023
1024#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1025 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1026 {
1027 if (u64Arg)
1028 return VERR_INVALID_PARAMETER;
1029 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1030 }
1031#endif
1032
1033 /*
1034 * A quick GCFGM mock-up.
1035 */
1036 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1037 case VMMR0_DO_GCFGM_SET_VALUE:
1038 case VMMR0_DO_GCFGM_QUERY_VALUE:
1039 {
1040 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1041 return VERR_INVALID_PARAMETER;
1042 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1043 if (pReq->Hdr.cbReq != sizeof(*pReq))
1044 return VERR_INVALID_PARAMETER;
1045 int rc;
1046 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1047 {
1048 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1049 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1050 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1051 }
1052 else
1053 {
1054 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1055 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1056 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1057 }
1058 return rc;
1059 }
1060
1061 /*
1062 * PDM Wrappers.
1063 */
1064 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1065 {
1066 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1067 return VERR_INVALID_PARAMETER;
1068 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1069 }
1070
1071 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1072 {
1073 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1074 return VERR_INVALID_PARAMETER;
1075 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1076 }
1077
1078 /*
1079 * Requests to the internal networking service.
1080 */
1081 case VMMR0_DO_INTNET_OPEN:
1082 {
1083 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1084 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1085 return VERR_INVALID_PARAMETER;
1086 return IntNetR0OpenReq(pSession, pReq);
1087 }
1088
1089 case VMMR0_DO_INTNET_IF_CLOSE:
1090 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1091 return VERR_INVALID_PARAMETER;
1092 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1093
1094 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1095 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1096 return VERR_INVALID_PARAMETER;
1097 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1098
1099 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1100 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1101 return VERR_INVALID_PARAMETER;
1102 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1103
1104 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1105 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1106 return VERR_INVALID_PARAMETER;
1107 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1108
1109 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1110 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1111 return VERR_INVALID_PARAMETER;
1112 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1113
1114 case VMMR0_DO_INTNET_IF_SEND:
1115 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1116 return VERR_INVALID_PARAMETER;
1117 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1118
1119 case VMMR0_DO_INTNET_IF_WAIT:
1120 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1121 return VERR_INVALID_PARAMETER;
1122 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1123
1124 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1125 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1126 return VERR_INVALID_PARAMETER;
1127 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1128
1129 /*
1130 * For profiling.
1131 */
1132 case VMMR0_DO_NOP:
1133 case VMMR0_DO_SLOW_NOP:
1134 return VINF_SUCCESS;
1135
1136 /*
1137 * For testing Ring-0 APIs invoked in this environment.
1138 */
1139 case VMMR0_DO_TESTS:
1140 /** @todo make new test */
1141 return VINF_SUCCESS;
1142
1143
1144#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1145 case VMMR0_DO_TEST_SWITCHER3264:
1146 if (idCpu == NIL_VMCPUID)
1147 return VERR_INVALID_CPU_ID;
1148 return HWACCMR0TestSwitcher3264(pVM);
1149#endif
1150 default:
1151 /*
1152 * We're returning VERR_NOT_SUPPORT here so we've got something else
1153 * than -1 which the interrupt gate glue code might return.
1154 */
1155 Log(("operation %#x is not supported\n", enmOperation));
1156 return VERR_NOT_SUPPORTED;
1157 }
1158}
1159
1160
1161/**
1162 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1163 */
1164typedef struct VMMR0ENTRYEXARGS
1165{
1166 PVM pVM;
1167 VMCPUID idCpu;
1168 VMMR0OPERATION enmOperation;
1169 PSUPVMMR0REQHDR pReq;
1170 uint64_t u64Arg;
1171 PSUPDRVSESSION pSession;
1172} VMMR0ENTRYEXARGS;
1173/** Pointer to a vmmR0EntryExWrapper argument package. */
1174typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1175
1176/**
1177 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1178 *
1179 * @returns VBox status code.
1180 * @param pvArgs The argument package
1181 */
1182static int vmmR0EntryExWrapper(void *pvArgs)
1183{
1184 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1185 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1186 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1187 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1188 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1189 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1190}
1191
1192
1193/**
1194 * The Ring 0 entry point, called by the support library (SUP).
1195 *
1196 * @returns VBox status code.
1197 * @param pVM The VM to operate on.
1198 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1199 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1200 * @param enmOperation Which operation to execute.
1201 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1202 * @param u64Arg Some simple constant argument.
1203 * @param pSession The session of the caller.
1204 * @remarks Assume called with interrupts _enabled_.
1205 */
1206VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1207{
1208 /*
1209 * Requests that should only happen on the EMT thread will be
1210 * wrapped in a setjmp so we can assert without causing trouble.
1211 */
1212 if ( VALID_PTR(pVM)
1213 && pVM->pVMR0
1214 && idCpu < pVM->cCpus)
1215 {
1216 switch (enmOperation)
1217 {
1218 /* These might/will be called before VMMR3Init. */
1219 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1220 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1221 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1222 case VMMR0_DO_GMM_FREE_PAGES:
1223 case VMMR0_DO_GMM_BALLOONED_PAGES:
1224 /* On the mac we might not have a valid jmp buf, so check these as well. */
1225 case VMMR0_DO_VMMR0_INIT:
1226 case VMMR0_DO_VMMR0_TERM:
1227 {
1228 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1229
1230 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1231 break;
1232
1233 /** @todo validate this EMT claim... GVM knows. */
1234 VMMR0ENTRYEXARGS Args;
1235 Args.pVM = pVM;
1236 Args.idCpu = idCpu;
1237 Args.enmOperation = enmOperation;
1238 Args.pReq = pReq;
1239 Args.u64Arg = u64Arg;
1240 Args.pSession = pSession;
1241 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1242 }
1243
1244 default:
1245 break;
1246 }
1247 }
1248 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1249}
1250
1251/**
1252 * Internal R0 logger worker: Flush logger.
1253 *
1254 * @param pLogger The logger instance to flush.
1255 * @remark This function must be exported!
1256 */
1257VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1258{
1259#ifdef LOG_ENABLED
1260 /*
1261 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1262 * (This is a bit paranoid code.)
1263 */
1264 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1265 if ( !VALID_PTR(pR0Logger)
1266 || !VALID_PTR(pR0Logger + 1)
1267 || pLogger->u32Magic != RTLOGGER_MAGIC)
1268 {
1269# ifdef DEBUG
1270 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1271# endif
1272 return;
1273 }
1274 if (pR0Logger->fFlushingDisabled)
1275 return; /* quietly */
1276
1277 PVM pVM = pR0Logger->pVM;
1278 if ( !VALID_PTR(pVM)
1279 || pVM->pVMR0 != pVM)
1280 {
1281# ifdef DEBUG
1282 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1283# endif
1284 return;
1285 }
1286
1287 PVMCPU pVCpu = VMMGetCpu(pVM);
1288 if (pVCpu)
1289 {
1290 /*
1291 * Check that the jump buffer is armed.
1292 */
1293# ifdef RT_ARCH_X86
1294 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1295 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1296# else
1297 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1298 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1299# endif
1300 {
1301# ifdef DEBUG
1302 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1303# endif
1304 return;
1305 }
1306 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1307 }
1308# ifdef DEBUG
1309 else
1310 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1311# endif
1312#endif
1313}
1314
1315/**
1316 * Interal R0 logger worker: Custom prefix.
1317 *
1318 * @returns Number of chars written.
1319 *
1320 * @param pLogger The logger instance.
1321 * @param pchBuf The output buffer.
1322 * @param cchBuf The size of the buffer.
1323 * @param pvUser User argument (ignored).
1324 */
1325VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1326{
1327 NOREF(pvUser);
1328#ifdef LOG_ENABLED
1329 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1330 if ( !VALID_PTR(pR0Logger)
1331 || !VALID_PTR(pR0Logger + 1)
1332 || pLogger->u32Magic != RTLOGGER_MAGIC
1333 || cchBuf < 2)
1334 return 0;
1335
1336 static const char s_szHex[17] = "0123456789abcdef";
1337 VMCPUID const idCpu = pR0Logger->idCpu;
1338 pchBuf[1] = s_szHex[ idCpu & 15];
1339 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1340
1341 return 2;
1342#else
1343 return 0;
1344#endif
1345}
1346
1347#ifdef LOG_ENABLED
1348
1349/**
1350 * Disables flushing of the ring-0 debug log.
1351 *
1352 * @param pVCpu The shared virtual cpu structure.
1353 */
1354VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1355{
1356 PVM pVM = pVCpu->pVMR0;
1357 if (pVCpu->vmm.s.pR0LoggerR0)
1358 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1359}
1360
1361
1362/**
1363 * Enables flushing of the ring-0 debug log.
1364 *
1365 * @param pVCpu The shared virtual cpu structure.
1366 */
1367VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1368{
1369 PVM pVM = pVCpu->pVMR0;
1370 if (pVCpu->vmm.s.pR0LoggerR0)
1371 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1372}
1373
1374#endif /* LOG_ENABLED */
1375
1376/**
1377 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1378 *
1379 * @returns true if the breakpoint should be hit, false if it should be ignored.
1380 */
1381DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1382{
1383#if 0
1384 return true;
1385#else
1386 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1387 if (pVM)
1388 {
1389 PVMCPU pVCpu = VMMGetCpu(pVM);
1390
1391 if (pVCpu)
1392 {
1393#ifdef RT_ARCH_X86
1394 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1395 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1396#else
1397 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1398 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1399#endif
1400 {
1401 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1402 return RT_FAILURE_NP(rc);
1403 }
1404 }
1405 }
1406#ifdef RT_OS_LINUX
1407 return true;
1408#else
1409 return false;
1410#endif
1411#endif
1412}
1413
1414
1415/**
1416 * Override this so we can push it up to ring-3.
1417 *
1418 * @param pszExpr Expression. Can be NULL.
1419 * @param uLine Location line number.
1420 * @param pszFile Location file name.
1421 * @param pszFunction Location function name.
1422 */
1423DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1424{
1425 /*
1426 * To the log.
1427 */
1428 LogAlways(("\n!!R0-Assertion Failed!!\n"
1429 "Expression: %s\n"
1430 "Location : %s(%d) %s\n",
1431 pszExpr, pszFile, uLine, pszFunction));
1432
1433 /*
1434 * To the global VMM buffer.
1435 */
1436 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1437 if (pVM)
1438 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1439 "\n!!R0-Assertion Failed!!\n"
1440 "Expression: %s\n"
1441 "Location : %s(%d) %s\n",
1442 pszExpr, pszFile, uLine, pszFunction);
1443
1444 /*
1445 * Continue the normal way.
1446 */
1447 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1448}
1449
1450
1451/**
1452 * Callback for RTLogFormatV which writes to the ring-3 log port.
1453 * See PFNLOGOUTPUT() for details.
1454 */
1455static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1456{
1457 for (size_t i = 0; i < cbChars; i++)
1458 LogAlways(("%c", pachChars[i]));
1459
1460 return cbChars;
1461}
1462
1463
1464/**
1465 * Override this so we can push it up to ring-3.
1466 *
1467 * @param pszFormat The format string.
1468 * @param va Arguments.
1469 */
1470DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1471{
1472 va_list vaCopy;
1473
1474 /*
1475 * Push the message to the logger.
1476 */
1477 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1478 if (pLog)
1479 {
1480 va_copy(vaCopy, va);
1481 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1482 va_end(vaCopy);
1483 }
1484
1485 /*
1486 * Push it to the global VMM buffer.
1487 */
1488 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1489 if (pVM)
1490 {
1491 va_copy(vaCopy, va);
1492 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1493 va_end(vaCopy);
1494 }
1495
1496 /*
1497 * Continue the normal way.
1498 */
1499 RTAssertMsg2V(pszFormat, va);
1500}
1501
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette