VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 37512

Last change on this file since 37512 was 37452, checked in by vboxsync, 14 years ago

IOM,PDMCritSect: Extended PDMCritSectEnter to handle rcBusy=VINF_SUCCESS as a request to call ring-3 to acquire a busy lock. Implemented device level locking in the MMIO code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 53.2 KB
Line 
1/* $Id: VMMR0.cpp 37452 2011-06-14 18:13:48Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vmm/vm.h>
32#ifdef VBOX_WITH_PCI_PASSTHROUGH
33# include <VBox/vmm/pdmpci.h>
34#endif
35
36#include <VBox/vmm/gvmm.h>
37#include <VBox/vmm/gmm.h>
38#include <VBox/intnet.h>
39#include <VBox/vmm/hwaccm.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44
45#include <iprt/asm-amd64-x86.h>
46#include <iprt/assert.h>
47#include <iprt/crc.h>
48#include <iprt/mp.h>
49#include <iprt/once.h>
50#include <iprt/stdarg.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53#include <iprt/timer.h>
54
55#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
56# pragma intrinsic(_AddressOfReturnAddress)
57#endif
58
59
60/*******************************************************************************
61* Internal Functions *
62*******************************************************************************/
63RT_C_DECLS_BEGIN
64VMMR0DECL(int) ModuleInit(void);
65VMMR0DECL(void) ModuleTerm(void);
66
67#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
68extern uint64_t __udivdi3(uint64_t, uint64_t);
69extern uint64_t __umoddi3(uint64_t, uint64_t);
70#endif // RT_ARCH_X86 && (RT_OS_SOLARIS || RT_OS_FREEBSD)
71RT_C_DECLS_END
72
73
74/*******************************************************************************
75* Global Variables *
76*******************************************************************************/
77/** Drag in necessary library bits.
78 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
79PFNRT g_VMMGCDeps[] =
80{
81 (PFNRT)RTCrc32,
82 (PFNRT)RTOnce,
83#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
84 (PFNRT)__udivdi3,
85 (PFNRT)__umoddi3,
86#endif // RT_ARCH_X86 && (RT_OS_SOLARIS || RT_OS_FREEBSD)
87 NULL
88};
89
90#ifdef RT_OS_SOLARIS
91/* Dependency information for the native solaris loader. */
92extern "C" { char _depends_on[] = "vboxdrv"; }
93#endif
94
95
96
97#if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
98/* Increase the size of the image to work around the refusal of Win64 to
99 * load images in the 0x80000 range.
100 */
101static uint64_t u64BloatImage[8192] = {0};
102#endif
103
104/**
105 * Initialize the module.
106 * This is called when we're first loaded.
107 *
108 * @returns 0 on success.
109 * @returns VBox status on failure.
110 */
111VMMR0DECL(int) ModuleInit(void)
112{
113 LogFlow(("ModuleInit:\n"));
114
115 /*
116 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
117 */
118 int rc = GVMMR0Init();
119 if (RT_SUCCESS(rc))
120 {
121 rc = GMMR0Init();
122 if (RT_SUCCESS(rc))
123 {
124 rc = HWACCMR0Init();
125 if (RT_SUCCESS(rc))
126 {
127 rc = PGMRegisterStringFormatTypes();
128 if (RT_SUCCESS(rc))
129 {
130#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
131 rc = PGMR0DynMapInit();
132#endif
133 if (RT_SUCCESS(rc))
134 {
135 rc = IntNetR0Init();
136 if (RT_SUCCESS(rc))
137 {
138#ifdef VBOX_WITH_PCI_PASSTHROUGH
139 rc = PciRawR0Init();
140#endif
141 if (RT_SUCCESS(rc))
142 {
143 rc = CPUMR0ModuleInit();
144 if (RT_SUCCESS(rc))
145 {
146 LogFlow(("ModuleInit: returns success.\n"));
147 return VINF_SUCCESS;
148 }
149
150 /*
151 * Bail out.
152 */
153#ifdef VBOX_WITH_PCI_PASSTHROUGH
154 PciRawR0Term();
155#endif
156 }
157 IntNetR0Term();
158 }
159#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
160 PGMR0DynMapTerm();
161#endif
162 }
163 PGMDeregisterStringFormatTypes();
164 }
165 HWACCMR0Term();
166 }
167 GMMR0Term();
168 }
169 GVMMR0Term();
170 }
171
172 LogFlow(("ModuleInit: failed %Rrc\n", rc));
173 return rc;
174}
175
176
177/**
178 * Terminate the module.
179 * This is called when we're finally unloaded.
180 */
181VMMR0DECL(void) ModuleTerm(void)
182{
183 LogFlow(("ModuleTerm:\n"));
184
185 /*
186 * Terminate the CPUM module (Local APIC cleanup).
187 */
188 CPUMR0ModuleTerm();
189
190 /*
191 * Terminate the internal network service.
192 */
193 IntNetR0Term();
194
195 /*
196 * PGM (Darwin), HWACCM and PciRaw global cleanup.
197 */
198#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
199 PGMR0DynMapTerm();
200#endif
201#ifdef VBOX_WITH_PCI_PASSTHROUGH
202 PciRawR0Term();
203#endif
204 PGMDeregisterStringFormatTypes();
205 HWACCMR0Term();
206
207 /*
208 * Destroy the GMM and GVMM instances.
209 */
210 GMMR0Term();
211 GVMMR0Term();
212
213 LogFlow(("ModuleTerm: returns\n"));
214}
215
216
217/**
218 * Initiates the R0 driver for a particular VM instance.
219 *
220 * @returns VBox status code.
221 *
222 * @param pVM The VM instance in question.
223 * @param uSvnRev The SVN revision of the ring-3 part.
224 * @thread EMT.
225 */
226static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
227{
228 /*
229 * Match the SVN revisions.
230 */
231 if (uSvnRev != VMMGetSvnRev())
232 {
233 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
234 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
235 return VERR_VMM_R0_VERSION_MISMATCH;
236 }
237 if ( !VALID_PTR(pVM)
238 || pVM->pVMR0 != pVM)
239 return VERR_INVALID_PARAMETER;
240
241#ifdef LOG_ENABLED
242 /*
243 * Register the EMT R0 logger instance for VCPU 0.
244 */
245 PVMCPU pVCpu = &pVM->aCpus[0];
246
247 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
248 if (pR0Logger)
249 {
250# if 0 /* testing of the logger. */
251 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
252 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
253 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
254 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
255
256 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
257 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
258 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
259 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
260
261 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
262 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
263 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
264 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
265
266 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
267 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
268 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
269 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
270 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
271 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
272
273 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
274 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
275
276 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
277 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
278 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
279# endif
280 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
281 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
282 pR0Logger->fRegistered = true;
283 }
284#endif /* LOG_ENABLED */
285
286 /*
287 * Check if the host supports high resolution timers or not.
288 */
289 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
290 && !RTTimerCanDoHighResolution())
291 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
292
293 /*
294 * Initialize the per VM data for GVMM and GMM.
295 */
296 int rc = GVMMR0InitVM(pVM);
297// if (RT_SUCCESS(rc))
298// rc = GMMR0InitPerVMData(pVM);
299 if (RT_SUCCESS(rc))
300 {
301 /*
302 * Init HWACCM, CPUM and PGM (Darwin only).
303 */
304 rc = HWACCMR0InitVM(pVM);
305 if (RT_SUCCESS(rc))
306 {
307 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
308 if (RT_SUCCESS(rc))
309 {
310#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
311 rc = PGMR0DynMapInitVM(pVM);
312#endif
313 if (RT_SUCCESS(rc))
314 {
315#ifdef VBOX_WITH_PCI_PASSTHROUGH
316 rc = PciRawR0InitVM(pVM);
317#endif
318 if (RT_SUCCESS(rc))
319 {
320 GVMMR0DoneInitVM(pVM);
321 return rc;
322 }
323 }
324
325 /* bail out */
326 }
327#ifdef VBOX_WITH_PCI_PASSTHROUGH
328 PciRawR0TermVM(pVM);
329#endif
330 HWACCMR0TermVM(pVM);
331 }
332 }
333
334
335 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
336 return rc;
337}
338
339
340/**
341 * Terminates the R0 driver for a particular VM instance.
342 *
343 * This is normally called by ring-3 as part of the VM termination process, but
344 * may alternatively be called during the support driver session cleanup when
345 * the VM object is destroyed (see GVMM).
346 *
347 * @returns VBox status code.
348 *
349 * @param pVM The VM instance in question.
350 * @param pGVM Pointer to the global VM structure. Optional.
351 * @thread EMT or session clean up thread.
352 */
353VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
354{
355#ifdef VBOX_WITH_PCI_PASSTHROUGH
356 PciRawR0TermVM(pVM);
357#endif
358
359 /*
360 * Tell GVMM what we're up to and check that we only do this once.
361 */
362 if (GVMMR0DoingTermVM(pVM, pGVM))
363 {
364#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
365 PGMR0DynMapTermVM(pVM);
366#endif
367 HWACCMR0TermVM(pVM);
368 }
369
370 /*
371 * Deregister the logger.
372 */
373 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
374 return VINF_SUCCESS;
375}
376
377
378#ifdef VBOX_WITH_STATISTICS
379/**
380 * Record return code statistics
381 * @param pVM The VM handle.
382 * @param pVCpu The VMCPU handle.
383 * @param rc The status code.
384 */
385static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
386{
387 /*
388 * Collect statistics.
389 */
390 switch (rc)
391 {
392 case VINF_SUCCESS:
393 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
394 break;
395 case VINF_EM_RAW_INTERRUPT:
396 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
397 break;
398 case VINF_EM_RAW_INTERRUPT_HYPER:
399 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
400 break;
401 case VINF_EM_RAW_GUEST_TRAP:
402 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
403 break;
404 case VINF_EM_RAW_RING_SWITCH:
405 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
406 break;
407 case VINF_EM_RAW_RING_SWITCH_INT:
408 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
409 break;
410 case VINF_EM_RAW_STALE_SELECTOR:
411 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
412 break;
413 case VINF_EM_RAW_IRET_TRAP:
414 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
415 break;
416 case VINF_IOM_HC_IOPORT_READ:
417 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
418 break;
419 case VINF_IOM_HC_IOPORT_WRITE:
420 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
421 break;
422 case VINF_IOM_HC_MMIO_READ:
423 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
424 break;
425 case VINF_IOM_HC_MMIO_WRITE:
426 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
427 break;
428 case VINF_IOM_HC_MMIO_READ_WRITE:
429 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
430 break;
431 case VINF_PATM_HC_MMIO_PATCH_READ:
432 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
433 break;
434 case VINF_PATM_HC_MMIO_PATCH_WRITE:
435 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
436 break;
437 case VINF_EM_RAW_EMULATE_INSTR:
438 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
439 break;
440 case VINF_EM_RAW_EMULATE_IO_BLOCK:
441 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
442 break;
443 case VINF_PATCH_EMULATE_INSTR:
444 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
445 break;
446 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
447 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
448 break;
449 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
450 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
451 break;
452 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
453 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
454 break;
455 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
456 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
457 break;
458 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
459 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
460 break;
461 case VINF_CSAM_PENDING_ACTION:
462 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
463 break;
464 case VINF_PGM_SYNC_CR3:
465 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
466 break;
467 case VINF_PATM_PATCH_INT3:
468 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
469 break;
470 case VINF_PATM_PATCH_TRAP_PF:
471 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
472 break;
473 case VINF_PATM_PATCH_TRAP_GP:
474 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
475 break;
476 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
477 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
478 break;
479 case VINF_EM_RESCHEDULE_REM:
480 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
481 break;
482 case VINF_EM_RAW_TO_R3:
483 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
484 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
485 else
486 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
487 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
488 else
489 if (VM_FF_ISPENDING(pVM, VM_FF_PDM_QUEUES))
490 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
491 else
492 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
493 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
494 else
495 if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA))
496 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
497 else
498 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))
499 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
500 else
501 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
502 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
503 else
504 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3))
505 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
506 else
507 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
508 break;
509
510 case VINF_EM_RAW_TIMER_PENDING:
511 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
512 break;
513 case VINF_EM_RAW_INTERRUPT_PENDING:
514 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
515 break;
516 case VINF_VMM_CALL_HOST:
517 switch (pVCpu->vmm.s.enmCallRing3Operation)
518 {
519 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
520 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
521 break;
522 case VMMCALLRING3_PDM_LOCK:
523 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
524 break;
525 case VMMCALLRING3_PGM_POOL_GROW:
526 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
527 break;
528 case VMMCALLRING3_PGM_LOCK:
529 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
530 break;
531 case VMMCALLRING3_PGM_MAP_CHUNK:
532 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
533 break;
534 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
535 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
536 break;
537 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
538 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
539 break;
540 case VMMCALLRING3_VMM_LOGGER_FLUSH:
541 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
542 break;
543 case VMMCALLRING3_VM_SET_ERROR:
544 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
545 break;
546 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
547 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
548 break;
549 case VMMCALLRING3_VM_R0_ASSERTION:
550 default:
551 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
552 break;
553 }
554 break;
555 case VINF_PATM_DUPLICATE_FUNCTION:
556 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
557 break;
558 case VINF_PGM_CHANGE_MODE:
559 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
560 break;
561 case VINF_PGM_POOL_FLUSH_PENDING:
562 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
563 break;
564 case VINF_EM_PENDING_REQUEST:
565 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
566 break;
567 case VINF_EM_HWACCM_PATCH_TPR_INSTR:
568 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
569 break;
570 default:
571 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
572 break;
573 }
574}
575#endif /* VBOX_WITH_STATISTICS */
576
577
578/**
579 * Unused ring-0 entry point that used to be called from the interrupt gate.
580 *
581 * Will be removed one of the next times we do a major SUPDrv version bump.
582 *
583 * @returns VBox status code.
584 * @param pVM The VM to operate on.
585 * @param enmOperation Which operation to execute.
586 * @param pvArg Argument to the operation.
587 * @remarks Assume called with interrupts disabled.
588 */
589VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
590{
591 /*
592 * We're returning VERR_NOT_SUPPORT here so we've got something else
593 * than -1 which the interrupt gate glue code might return.
594 */
595 Log(("operation %#x is not supported\n", enmOperation));
596 return VERR_NOT_SUPPORTED;
597}
598
599
600/**
601 * The Ring 0 entry point, called by the fast-ioctl path.
602 *
603 * @param pVM The VM to operate on.
604 * The return code is stored in pVM->vmm.s.iLastGZRc.
605 * @param idCpu The Virtual CPU ID of the calling EMT.
606 * @param enmOperation Which operation to execute.
607 * @remarks Assume called with interrupts _enabled_.
608 */
609VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
610{
611 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
612 return;
613 PVMCPU pVCpu = &pVM->aCpus[idCpu];
614
615 switch (enmOperation)
616 {
617 /*
618 * Switch to GC and run guest raw mode code.
619 * Disable interrupts before doing the world switch.
620 */
621 case VMMR0_DO_RAW_RUN:
622 {
623 /* Some safety precautions first. */
624#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
625 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled /* hwaccm */
626 && pVM->cCpus == 1 /* !smp */
627 && PGMGetHyperCR3(pVCpu)))
628#else
629 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled
630 && pVM->cCpus == 1))
631#endif
632 {
633 /* Disable preemption and update the periodic preemption timer. */
634 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
635 RTThreadPreemptDisable(&PreemptState);
636 RTCPUID idHostCpu = RTMpCpuId();
637#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
638 CPUMR0SetLApic(pVM, idHostCpu);
639#endif
640 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
641 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
642 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
643
644 /* We might need to disable VT-x if the active switcher turns off paging. */
645 bool fVTxDisabled;
646 int rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
647 if (RT_SUCCESS(rc))
648 {
649 RTCCUINTREG uFlags = ASMIntDisableFlags();
650 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
651
652 TMNotifyStartOfExecution(pVCpu);
653 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
654 pVCpu->vmm.s.iLastGZRc = rc;
655 TMNotifyEndOfExecution(pVCpu);
656
657 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
658
659 /* Re-enable VT-x if previously turned off. */
660 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
661
662 if ( rc == VINF_EM_RAW_INTERRUPT
663 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
664 TRPMR0DispatchHostInterrupt(pVM);
665
666 ASMSetFlags(uFlags);
667
668#ifdef VBOX_WITH_STATISTICS
669 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
670 vmmR0RecordRC(pVM, pVCpu, rc);
671#endif
672 }
673 else
674 pVCpu->vmm.s.iLastGZRc = rc;
675 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
676 RTThreadPreemptRestore(&PreemptState);
677 }
678 else
679 {
680 Assert(!pVM->vmm.s.fSwitcherDisabled);
681 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
682 if (pVM->cCpus != 1)
683 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
684#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
685 if (!PGMGetHyperCR3(pVCpu))
686 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
687#endif
688 }
689 break;
690 }
691
692 /*
693 * Run guest code using the available hardware acceleration technology.
694 *
695 * Disable interrupts before we do anything interesting. On Windows we avoid
696 * this by having the support driver raise the IRQL before calling us, this way
697 * we hope to get away with page faults and later calling into the kernel.
698 */
699 case VMMR0_DO_HWACC_RUN:
700 {
701#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
702 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
703 RTThreadPreemptDisable(&PreemptState);
704#elif !defined(RT_OS_WINDOWS)
705 RTCCUINTREG uFlags = ASMIntDisableFlags();
706#endif
707 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
708 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
709 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
710
711#ifdef LOG_ENABLED
712 if (pVCpu->idCpu > 0)
713 {
714 /* Lazy registration of ring 0 loggers. */
715 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
716 if ( pR0Logger
717 && !pR0Logger->fRegistered)
718 {
719 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
720 pR0Logger->fRegistered = true;
721 }
722 }
723#endif
724 int rc;
725 if (!HWACCMR0SuspendPending())
726 {
727 rc = HWACCMR0Enter(pVM, pVCpu);
728 if (RT_SUCCESS(rc))
729 {
730 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
731 int rc2 = HWACCMR0Leave(pVM, pVCpu);
732 AssertRC(rc2);
733 }
734 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
735 }
736 else
737 {
738 /* System is about to go into suspend mode; go back to ring 3. */
739 rc = VINF_EM_RAW_INTERRUPT;
740 }
741 pVCpu->vmm.s.iLastGZRc = rc;
742
743 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
744#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
745 RTThreadPreemptRestore(&PreemptState);
746#elif !defined(RT_OS_WINDOWS)
747 ASMSetFlags(uFlags);
748#endif
749
750#ifdef VBOX_WITH_STATISTICS
751 vmmR0RecordRC(pVM, pVCpu, rc);
752#endif
753 /* No special action required for external interrupts, just return. */
754 break;
755 }
756
757 /*
758 * For profiling.
759 */
760 case VMMR0_DO_NOP:
761 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
762 break;
763
764 /*
765 * Impossible.
766 */
767 default:
768 AssertMsgFailed(("%#x\n", enmOperation));
769 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
770 break;
771 }
772}
773
774
775/**
776 * Validates a session or VM session argument.
777 *
778 * @returns true / false accordingly.
779 * @param pVM The VM argument.
780 * @param pSession The session argument.
781 */
782DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
783{
784 /* This must be set! */
785 if (!pSession)
786 return false;
787
788 /* Only one out of the two. */
789 if (pVM && pClaimedSession)
790 return false;
791 if (pVM)
792 pClaimedSession = pVM->pSession;
793 return pClaimedSession == pSession;
794}
795
796
797/**
798 * VMMR0EntryEx worker function, either called directly or when ever possible
799 * called thru a longjmp so we can exit safely on failure.
800 *
801 * @returns VBox status code.
802 * @param pVM The VM to operate on.
803 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
804 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
805 * @param enmOperation Which operation to execute.
806 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
807 * The support driver validates this if it's present.
808 * @param u64Arg Some simple constant argument.
809 * @param pSession The session of the caller.
810 * @remarks Assume called with interrupts _enabled_.
811 */
812static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
813{
814 /*
815 * Common VM pointer validation.
816 */
817 if (pVM)
818 {
819 if (RT_UNLIKELY( !VALID_PTR(pVM)
820 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
821 {
822 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
823 return VERR_INVALID_POINTER;
824 }
825 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
826 || pVM->enmVMState > VMSTATE_TERMINATED
827 || pVM->pVMR0 != pVM))
828 {
829 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
830 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
831 return VERR_INVALID_POINTER;
832 }
833
834 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
835 {
836 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
837 return VERR_INVALID_PARAMETER;
838 }
839 }
840 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
841 {
842 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
843 return VERR_INVALID_PARAMETER;
844 }
845
846
847 switch (enmOperation)
848 {
849 /*
850 * GVM requests
851 */
852 case VMMR0_DO_GVMM_CREATE_VM:
853 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
854 return VERR_INVALID_PARAMETER;
855 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
856
857 case VMMR0_DO_GVMM_DESTROY_VM:
858 if (pReqHdr || u64Arg)
859 return VERR_INVALID_PARAMETER;
860 return GVMMR0DestroyVM(pVM);
861
862 case VMMR0_DO_GVMM_REGISTER_VMCPU:
863 {
864 if (!pVM)
865 return VERR_INVALID_PARAMETER;
866 return GVMMR0RegisterVCpu(pVM, idCpu);
867 }
868
869 case VMMR0_DO_GVMM_SCHED_HALT:
870 if (pReqHdr)
871 return VERR_INVALID_PARAMETER;
872 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
873
874 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
875 if (pReqHdr || u64Arg)
876 return VERR_INVALID_PARAMETER;
877 return GVMMR0SchedWakeUp(pVM, idCpu);
878
879 case VMMR0_DO_GVMM_SCHED_POKE:
880 if (pReqHdr || u64Arg)
881 return VERR_INVALID_PARAMETER;
882 return GVMMR0SchedPoke(pVM, idCpu);
883
884 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
885 if (u64Arg)
886 return VERR_INVALID_PARAMETER;
887 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
888
889 case VMMR0_DO_GVMM_SCHED_POLL:
890 if (pReqHdr || u64Arg > 1)
891 return VERR_INVALID_PARAMETER;
892 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
893
894 case VMMR0_DO_GVMM_QUERY_STATISTICS:
895 if (u64Arg)
896 return VERR_INVALID_PARAMETER;
897 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
898
899 case VMMR0_DO_GVMM_RESET_STATISTICS:
900 if (u64Arg)
901 return VERR_INVALID_PARAMETER;
902 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
903
904 /*
905 * Initialize the R0 part of a VM instance.
906 */
907 case VMMR0_DO_VMMR0_INIT:
908 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
909
910 /*
911 * Terminate the R0 part of a VM instance.
912 */
913 case VMMR0_DO_VMMR0_TERM:
914 return VMMR0TermVM(pVM, NULL);
915
916 /*
917 * Attempt to enable hwacc mode and check the current setting.
918 */
919 case VMMR0_DO_HWACC_ENABLE:
920 return HWACCMR0EnableAllCpus(pVM);
921
922 /*
923 * Setup the hardware accelerated session.
924 */
925 case VMMR0_DO_HWACC_SETUP_VM:
926 return HWACCMR0SetupVM(pVM);
927
928 /*
929 * Switch to RC to execute Hypervisor function.
930 */
931 case VMMR0_DO_CALL_HYPERVISOR:
932 {
933 int rc;
934 bool fVTxDisabled;
935
936 /* Safety precaution as HWACCM can disable the switcher. */
937 Assert(!pVM->vmm.s.fSwitcherDisabled);
938 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
939 return VERR_NOT_SUPPORTED;
940
941#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
942 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
943 return VERR_PGM_NO_CR3_SHADOW_ROOT;
944#endif
945
946 RTCCUINTREG fFlags = ASMIntDisableFlags();
947
948#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
949 RTCPUID idHostCpu = RTMpCpuId();
950 CPUMR0SetLApic(pVM, idHostCpu);
951#endif
952
953 /* We might need to disable VT-x if the active switcher turns off paging. */
954 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
955 if (RT_FAILURE(rc))
956 return rc;
957
958 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
959
960 /* Re-enable VT-x if previously turned off. */
961 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
962
963 /** @todo dispatch interrupts? */
964 ASMSetFlags(fFlags);
965 return rc;
966 }
967
968 /*
969 * PGM wrappers.
970 */
971 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
972 if (idCpu == NIL_VMCPUID)
973 return VERR_INVALID_CPU_ID;
974 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
975
976 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
977 if (idCpu == NIL_VMCPUID)
978 return VERR_INVALID_CPU_ID;
979 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
980
981 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
982 if (idCpu != 0)
983 return VERR_INVALID_CPU_ID;
984 return PGMR0PhysSetupIommu(pVM);
985
986 /*
987 * GMM wrappers.
988 */
989 case VMMR0_DO_GMM_INITIAL_RESERVATION:
990 if (u64Arg)
991 return VERR_INVALID_PARAMETER;
992 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
993
994 case VMMR0_DO_GMM_UPDATE_RESERVATION:
995 if (u64Arg)
996 return VERR_INVALID_PARAMETER;
997 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
998
999 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1000 if (u64Arg)
1001 return VERR_INVALID_PARAMETER;
1002 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1003
1004 case VMMR0_DO_GMM_FREE_PAGES:
1005 if (u64Arg)
1006 return VERR_INVALID_PARAMETER;
1007 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1008
1009 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1010 if (u64Arg)
1011 return VERR_INVALID_PARAMETER;
1012 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1013
1014 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1015 if (u64Arg)
1016 return VERR_INVALID_PARAMETER;
1017 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1018
1019 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1020 if (idCpu == NIL_VMCPUID)
1021 return VERR_INVALID_CPU_ID;
1022 if (u64Arg)
1023 return VERR_INVALID_PARAMETER;
1024 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1025
1026 case VMMR0_DO_GMM_BALLOONED_PAGES:
1027 if (u64Arg)
1028 return VERR_INVALID_PARAMETER;
1029 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1030
1031 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1032 if (u64Arg)
1033 return VERR_INVALID_PARAMETER;
1034 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1035
1036 case VMMR0_DO_GMM_SEED_CHUNK:
1037 if (pReqHdr)
1038 return VERR_INVALID_PARAMETER;
1039 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1040
1041 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1042 if (idCpu == NIL_VMCPUID)
1043 return VERR_INVALID_CPU_ID;
1044 if (u64Arg)
1045 return VERR_INVALID_PARAMETER;
1046 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1047
1048 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1049 if (idCpu == NIL_VMCPUID)
1050 return VERR_INVALID_CPU_ID;
1051 if (u64Arg)
1052 return VERR_INVALID_PARAMETER;
1053 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1054
1055 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1056 if (idCpu == NIL_VMCPUID)
1057 return VERR_INVALID_CPU_ID;
1058 if ( u64Arg
1059 || pReqHdr)
1060 return VERR_INVALID_PARAMETER;
1061 return GMMR0ResetSharedModules(pVM, idCpu);
1062
1063#ifdef VBOX_WITH_PAGE_SHARING
1064 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1065 {
1066 if (idCpu == NIL_VMCPUID)
1067 return VERR_INVALID_CPU_ID;
1068 if ( u64Arg
1069 || pReqHdr)
1070 return VERR_INVALID_PARAMETER;
1071
1072 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1073 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1074
1075# ifdef DEBUG_sandervl
1076 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1077 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1078 int rc = GMMR0CheckSharedModulesStart(pVM);
1079 if (rc == VINF_SUCCESS)
1080 {
1081 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1082 Assert( rc == VINF_SUCCESS
1083 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1084 GMMR0CheckSharedModulesEnd(pVM);
1085 }
1086# else
1087 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1088# endif
1089 return rc;
1090 }
1091#endif
1092
1093#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1094 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1095 {
1096 if (u64Arg)
1097 return VERR_INVALID_PARAMETER;
1098 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1099 }
1100#endif
1101
1102 /*
1103 * A quick GCFGM mock-up.
1104 */
1105 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1106 case VMMR0_DO_GCFGM_SET_VALUE:
1107 case VMMR0_DO_GCFGM_QUERY_VALUE:
1108 {
1109 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1110 return VERR_INVALID_PARAMETER;
1111 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1112 if (pReq->Hdr.cbReq != sizeof(*pReq))
1113 return VERR_INVALID_PARAMETER;
1114 int rc;
1115 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1116 {
1117 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1118 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1119 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1120 }
1121 else
1122 {
1123 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1124 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1125 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1126 }
1127 return rc;
1128 }
1129
1130 /*
1131 * PDM Wrappers.
1132 */
1133 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1134 {
1135 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1136 return VERR_INVALID_PARAMETER;
1137 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1138 }
1139
1140 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1141 {
1142 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1143 return VERR_INVALID_PARAMETER;
1144 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1145 }
1146
1147 /*
1148 * Requests to the internal networking service.
1149 */
1150 case VMMR0_DO_INTNET_OPEN:
1151 {
1152 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1153 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1154 return VERR_INVALID_PARAMETER;
1155 return IntNetR0OpenReq(pSession, pReq);
1156 }
1157
1158 case VMMR0_DO_INTNET_IF_CLOSE:
1159 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1160 return VERR_INVALID_PARAMETER;
1161 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1162
1163 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1164 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1165 return VERR_INVALID_PARAMETER;
1166 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1167
1168 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1169 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1170 return VERR_INVALID_PARAMETER;
1171 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1172
1173 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1174 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1175 return VERR_INVALID_PARAMETER;
1176 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1177
1178 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1179 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1180 return VERR_INVALID_PARAMETER;
1181 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1182
1183 case VMMR0_DO_INTNET_IF_SEND:
1184 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1185 return VERR_INVALID_PARAMETER;
1186 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1187
1188 case VMMR0_DO_INTNET_IF_WAIT:
1189 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1190 return VERR_INVALID_PARAMETER;
1191 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1192
1193 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1194 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1195 return VERR_INVALID_PARAMETER;
1196 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1197
1198#ifdef VBOX_WITH_PCI_PASSTHROUGH
1199 /*
1200 * Requests to host PCI driver service.
1201 */
1202 case VMMR0_DO_PCIRAW_REQ:
1203 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1204 return VERR_INVALID_PARAMETER;
1205 return PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1206#endif
1207 /*
1208 * For profiling.
1209 */
1210 case VMMR0_DO_NOP:
1211 case VMMR0_DO_SLOW_NOP:
1212 return VINF_SUCCESS;
1213
1214 /*
1215 * For testing Ring-0 APIs invoked in this environment.
1216 */
1217 case VMMR0_DO_TESTS:
1218 /** @todo make new test */
1219 return VINF_SUCCESS;
1220
1221
1222#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1223 case VMMR0_DO_TEST_SWITCHER3264:
1224 if (idCpu == NIL_VMCPUID)
1225 return VERR_INVALID_CPU_ID;
1226 return HWACCMR0TestSwitcher3264(pVM);
1227#endif
1228 default:
1229 /*
1230 * We're returning VERR_NOT_SUPPORT here so we've got something else
1231 * than -1 which the interrupt gate glue code might return.
1232 */
1233 Log(("operation %#x is not supported\n", enmOperation));
1234 return VERR_NOT_SUPPORTED;
1235 }
1236}
1237
1238
1239/**
1240 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1241 */
1242typedef struct VMMR0ENTRYEXARGS
1243{
1244 PVM pVM;
1245 VMCPUID idCpu;
1246 VMMR0OPERATION enmOperation;
1247 PSUPVMMR0REQHDR pReq;
1248 uint64_t u64Arg;
1249 PSUPDRVSESSION pSession;
1250} VMMR0ENTRYEXARGS;
1251/** Pointer to a vmmR0EntryExWrapper argument package. */
1252typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1253
1254/**
1255 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1256 *
1257 * @returns VBox status code.
1258 * @param pvArgs The argument package
1259 */
1260static int vmmR0EntryExWrapper(void *pvArgs)
1261{
1262 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1263 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1264 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1265 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1266 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1267 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1268}
1269
1270
1271/**
1272 * The Ring 0 entry point, called by the support library (SUP).
1273 *
1274 * @returns VBox status code.
1275 * @param pVM The VM to operate on.
1276 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1277 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1278 * @param enmOperation Which operation to execute.
1279 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1280 * @param u64Arg Some simple constant argument.
1281 * @param pSession The session of the caller.
1282 * @remarks Assume called with interrupts _enabled_.
1283 */
1284VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1285{
1286 /*
1287 * Requests that should only happen on the EMT thread will be
1288 * wrapped in a setjmp so we can assert without causing trouble.
1289 */
1290 if ( VALID_PTR(pVM)
1291 && pVM->pVMR0
1292 && idCpu < pVM->cCpus)
1293 {
1294 switch (enmOperation)
1295 {
1296 /* These might/will be called before VMMR3Init. */
1297 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1298 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1299 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1300 case VMMR0_DO_GMM_FREE_PAGES:
1301 case VMMR0_DO_GMM_BALLOONED_PAGES:
1302 /* On the mac we might not have a valid jmp buf, so check these as well. */
1303 case VMMR0_DO_VMMR0_INIT:
1304 case VMMR0_DO_VMMR0_TERM:
1305 {
1306 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1307
1308 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1309 break;
1310
1311 /** @todo validate this EMT claim... GVM knows. */
1312 VMMR0ENTRYEXARGS Args;
1313 Args.pVM = pVM;
1314 Args.idCpu = idCpu;
1315 Args.enmOperation = enmOperation;
1316 Args.pReq = pReq;
1317 Args.u64Arg = u64Arg;
1318 Args.pSession = pSession;
1319 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1320 }
1321
1322 default:
1323 break;
1324 }
1325 }
1326 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1327}
1328
1329/**
1330 * Internal R0 logger worker: Flush logger.
1331 *
1332 * @param pLogger The logger instance to flush.
1333 * @remark This function must be exported!
1334 */
1335VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1336{
1337#ifdef LOG_ENABLED
1338 /*
1339 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1340 * (This is a bit paranoid code.)
1341 */
1342 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1343 if ( !VALID_PTR(pR0Logger)
1344 || !VALID_PTR(pR0Logger + 1)
1345 || pLogger->u32Magic != RTLOGGER_MAGIC)
1346 {
1347# ifdef DEBUG
1348 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1349# endif
1350 return;
1351 }
1352 if (pR0Logger->fFlushingDisabled)
1353 return; /* quietly */
1354
1355 PVM pVM = pR0Logger->pVM;
1356 if ( !VALID_PTR(pVM)
1357 || pVM->pVMR0 != pVM)
1358 {
1359# ifdef DEBUG
1360 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1361# endif
1362 return;
1363 }
1364
1365 PVMCPU pVCpu = VMMGetCpu(pVM);
1366 if (pVCpu)
1367 {
1368 /*
1369 * Check that the jump buffer is armed.
1370 */
1371# ifdef RT_ARCH_X86
1372 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1373 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1374# else
1375 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1376 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1377# endif
1378 {
1379# ifdef DEBUG
1380 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1381# endif
1382 return;
1383 }
1384 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1385 }
1386# ifdef DEBUG
1387 else
1388 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1389# endif
1390#endif
1391}
1392
1393/**
1394 * Internal R0 logger worker: Custom prefix.
1395 *
1396 * @returns Number of chars written.
1397 *
1398 * @param pLogger The logger instance.
1399 * @param pchBuf The output buffer.
1400 * @param cchBuf The size of the buffer.
1401 * @param pvUser User argument (ignored).
1402 */
1403VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1404{
1405 NOREF(pvUser);
1406#ifdef LOG_ENABLED
1407 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1408 if ( !VALID_PTR(pR0Logger)
1409 || !VALID_PTR(pR0Logger + 1)
1410 || pLogger->u32Magic != RTLOGGER_MAGIC
1411 || cchBuf < 2)
1412 return 0;
1413
1414 static const char s_szHex[17] = "0123456789abcdef";
1415 VMCPUID const idCpu = pR0Logger->idCpu;
1416 pchBuf[1] = s_szHex[ idCpu & 15];
1417 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1418
1419 return 2;
1420#else
1421 return 0;
1422#endif
1423}
1424
1425#ifdef LOG_ENABLED
1426
1427/**
1428 * Disables flushing of the ring-0 debug log.
1429 *
1430 * @param pVCpu The shared virtual cpu structure.
1431 */
1432VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1433{
1434 PVM pVM = pVCpu->pVMR0;
1435 if (pVCpu->vmm.s.pR0LoggerR0)
1436 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1437}
1438
1439
1440/**
1441 * Enables flushing of the ring-0 debug log.
1442 *
1443 * @param pVCpu The shared virtual cpu structure.
1444 */
1445VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1446{
1447 PVM pVM = pVCpu->pVMR0;
1448 if (pVCpu->vmm.s.pR0LoggerR0)
1449 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1450}
1451
1452#endif /* LOG_ENABLED */
1453
1454/**
1455 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1456 *
1457 * @returns true if the breakpoint should be hit, false if it should be ignored.
1458 */
1459DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1460{
1461#if 0
1462 return true;
1463#else
1464 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1465 if (pVM)
1466 {
1467 PVMCPU pVCpu = VMMGetCpu(pVM);
1468
1469 if (pVCpu)
1470 {
1471#ifdef RT_ARCH_X86
1472 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1473 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1474#else
1475 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1476 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1477#endif
1478 {
1479 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1480 return RT_FAILURE_NP(rc);
1481 }
1482 }
1483 }
1484#ifdef RT_OS_LINUX
1485 return true;
1486#else
1487 return false;
1488#endif
1489#endif
1490}
1491
1492
1493/**
1494 * Override this so we can push it up to ring-3.
1495 *
1496 * @param pszExpr Expression. Can be NULL.
1497 * @param uLine Location line number.
1498 * @param pszFile Location file name.
1499 * @param pszFunction Location function name.
1500 */
1501DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1502{
1503 /*
1504 * To the log.
1505 */
1506 LogAlways(("\n!!R0-Assertion Failed!!\n"
1507 "Expression: %s\n"
1508 "Location : %s(%d) %s\n",
1509 pszExpr, pszFile, uLine, pszFunction));
1510
1511 /*
1512 * To the global VMM buffer.
1513 */
1514 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1515 if (pVM)
1516 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1517 "\n!!R0-Assertion Failed!!\n"
1518 "Expression: %s\n"
1519 "Location : %s(%d) %s\n",
1520 pszExpr, pszFile, uLine, pszFunction);
1521
1522 /*
1523 * Continue the normal way.
1524 */
1525 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1526}
1527
1528
1529/**
1530 * Callback for RTLogFormatV which writes to the ring-3 log port.
1531 * See PFNLOGOUTPUT() for details.
1532 */
1533static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1534{
1535 for (size_t i = 0; i < cbChars; i++)
1536 LogAlways(("%c", pachChars[i]));
1537
1538 return cbChars;
1539}
1540
1541
1542/**
1543 * Override this so we can push it up to ring-3.
1544 *
1545 * @param pszFormat The format string.
1546 * @param va Arguments.
1547 */
1548DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1549{
1550 va_list vaCopy;
1551
1552 /*
1553 * Push the message to the logger.
1554 */
1555 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1556 if (pLog)
1557 {
1558 va_copy(vaCopy, va);
1559 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1560 va_end(vaCopy);
1561 }
1562
1563 /*
1564 * Push it to the global VMM buffer.
1565 */
1566 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1567 if (pVM)
1568 {
1569 va_copy(vaCopy, va);
1570 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1571 va_end(vaCopy);
1572 }
1573
1574 /*
1575 * Continue the normal way.
1576 */
1577 RTAssertMsg2V(pszFormat, va);
1578}
1579
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette