VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 39014

Last change on this file since 39014 was 38954, checked in by vboxsync, 13 years ago

VMMR0: Triple fault debugging hack proof of concept.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 53.5 KB
Line 
1/* $Id: VMMR0.cpp 38954 2011-10-06 11:28:41Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vmm/vm.h>
32#ifdef VBOX_WITH_PCI_PASSTHROUGH
33# include <VBox/vmm/pdmpci.h>
34#endif
35
36#include <VBox/vmm/gvmm.h>
37#include <VBox/vmm/gmm.h>
38#include <VBox/intnet.h>
39#include <VBox/vmm/hwaccm.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44
45#include <iprt/asm-amd64-x86.h>
46#include <iprt/assert.h>
47#include <iprt/crc.h>
48#include <iprt/mp.h>
49#include <iprt/once.h>
50#include <iprt/stdarg.h>
51#include <iprt/string.h>
52#include <iprt/thread.h>
53#include <iprt/timer.h>
54
55#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
56# pragma intrinsic(_AddressOfReturnAddress)
57#endif
58
59
60/*******************************************************************************
61* Internal Functions *
62*******************************************************************************/
63RT_C_DECLS_BEGIN
64VMMR0DECL(int) ModuleInit(void);
65VMMR0DECL(void) ModuleTerm(void);
66
67#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
68extern uint64_t __udivdi3(uint64_t, uint64_t);
69extern uint64_t __umoddi3(uint64_t, uint64_t);
70#endif // RT_ARCH_X86 && (RT_OS_SOLARIS || RT_OS_FREEBSD)
71RT_C_DECLS_END
72
73
74/*******************************************************************************
75* Global Variables *
76*******************************************************************************/
77/** Drag in necessary library bits.
78 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
79PFNRT g_VMMGCDeps[] =
80{
81 (PFNRT)RTCrc32,
82 (PFNRT)RTOnce,
83#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
84 (PFNRT)__udivdi3,
85 (PFNRT)__umoddi3,
86#endif // RT_ARCH_X86 && (RT_OS_SOLARIS || RT_OS_FREEBSD)
87 NULL
88};
89
90#ifdef RT_OS_SOLARIS
91/* Dependency information for the native solaris loader. */
92extern "C" { char _depends_on[] = "vboxdrv"; }
93#endif
94
95
96
97#if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
98/* Increase the size of the image to work around the refusal of Win64 to
99 * load images in the 0x80000 range.
100 */
101static uint64_t u64BloatImage[8192] = {0};
102#endif
103
104/**
105 * Initialize the module.
106 * This is called when we're first loaded.
107 *
108 * @returns 0 on success.
109 * @returns VBox status on failure.
110 */
111VMMR0DECL(int) ModuleInit(void)
112{
113 LogFlow(("ModuleInit:\n"));
114
115 /*
116 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
117 */
118 int rc = GVMMR0Init();
119 if (RT_SUCCESS(rc))
120 {
121 rc = GMMR0Init();
122 if (RT_SUCCESS(rc))
123 {
124 rc = HWACCMR0Init();
125 if (RT_SUCCESS(rc))
126 {
127 rc = PGMRegisterStringFormatTypes();
128 if (RT_SUCCESS(rc))
129 {
130#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
131 rc = PGMR0DynMapInit();
132#endif
133 if (RT_SUCCESS(rc))
134 {
135 rc = IntNetR0Init();
136 if (RT_SUCCESS(rc))
137 {
138#ifdef VBOX_WITH_PCI_PASSTHROUGH
139 rc = PciRawR0Init();
140#endif
141 if (RT_SUCCESS(rc))
142 {
143 rc = CPUMR0ModuleInit();
144 if (RT_SUCCESS(rc))
145 {
146#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
147 rc = vmmR0TripleFaultHackInit();
148 if (RT_SUCCESS(rc))
149#endif
150 {
151 LogFlow(("ModuleInit: returns success.\n"));
152 return VINF_SUCCESS;
153 }
154
155 /*
156 * Bail out.
157 */
158#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
159 vmmR0TripleFaultHackTerm();
160#endif
161 }
162#ifdef VBOX_WITH_PCI_PASSTHROUGH
163 PciRawR0Term();
164#endif
165 }
166 IntNetR0Term();
167 }
168#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
169 PGMR0DynMapTerm();
170#endif
171 }
172 PGMDeregisterStringFormatTypes();
173 }
174 HWACCMR0Term();
175 }
176 GMMR0Term();
177 }
178 GVMMR0Term();
179 }
180
181 LogFlow(("ModuleInit: failed %Rrc\n", rc));
182 return rc;
183}
184
185
186/**
187 * Terminate the module.
188 * This is called when we're finally unloaded.
189 */
190VMMR0DECL(void) ModuleTerm(void)
191{
192 LogFlow(("ModuleTerm:\n"));
193
194 /*
195 * Terminate the CPUM module (Local APIC cleanup).
196 */
197 CPUMR0ModuleTerm();
198
199 /*
200 * Terminate the internal network service.
201 */
202 IntNetR0Term();
203
204 /*
205 * PGM (Darwin), HWACCM and PciRaw global cleanup.
206 */
207#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
208 PGMR0DynMapTerm();
209#endif
210#ifdef VBOX_WITH_PCI_PASSTHROUGH
211 PciRawR0Term();
212#endif
213 PGMDeregisterStringFormatTypes();
214 HWACCMR0Term();
215#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
216 vmmR0TripleFaultHackTerm();
217#endif
218
219 /*
220 * Destroy the GMM and GVMM instances.
221 */
222 GMMR0Term();
223 GVMMR0Term();
224
225 LogFlow(("ModuleTerm: returns\n"));
226}
227
228
229/**
230 * Initiates the R0 driver for a particular VM instance.
231 *
232 * @returns VBox status code.
233 *
234 * @param pVM The VM instance in question.
235 * @param uSvnRev The SVN revision of the ring-3 part.
236 * @thread EMT.
237 */
238static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
239{
240 /*
241 * Match the SVN revisions.
242 */
243 if (uSvnRev != VMMGetSvnRev())
244 {
245 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
246 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
247 return VERR_VMM_R0_VERSION_MISMATCH;
248 }
249 if ( !VALID_PTR(pVM)
250 || pVM->pVMR0 != pVM)
251 return VERR_INVALID_PARAMETER;
252
253#ifdef LOG_ENABLED
254 /*
255 * Register the EMT R0 logger instance for VCPU 0.
256 */
257 PVMCPU pVCpu = &pVM->aCpus[0];
258
259 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
260 if (pR0Logger)
261 {
262# if 0 /* testing of the logger. */
263 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
264 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
265 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
266 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
267
268 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
269 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
270 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
271 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
272
273 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
274 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
275 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
276 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
277
278 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
279 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
280 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
281 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
282 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
283 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
284
285 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
286 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
287
288 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
289 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
290 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
291# endif
292 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
293 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
294 pR0Logger->fRegistered = true;
295 }
296#endif /* LOG_ENABLED */
297
298 /*
299 * Check if the host supports high resolution timers or not.
300 */
301 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
302 && !RTTimerCanDoHighResolution())
303 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
304
305 /*
306 * Initialize the per VM data for GVMM and GMM.
307 */
308 int rc = GVMMR0InitVM(pVM);
309// if (RT_SUCCESS(rc))
310// rc = GMMR0InitPerVMData(pVM);
311 if (RT_SUCCESS(rc))
312 {
313 /*
314 * Init HWACCM, CPUM and PGM (Darwin only).
315 */
316 rc = HWACCMR0InitVM(pVM);
317 if (RT_SUCCESS(rc))
318 {
319 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
320 if (RT_SUCCESS(rc))
321 {
322#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
323 rc = PGMR0DynMapInitVM(pVM);
324#endif
325 if (RT_SUCCESS(rc))
326 {
327#ifdef VBOX_WITH_PCI_PASSTHROUGH
328 rc = PciRawR0InitVM(pVM);
329#endif
330 if (RT_SUCCESS(rc))
331 {
332 GVMMR0DoneInitVM(pVM);
333 return rc;
334 }
335 }
336
337 /* bail out */
338 }
339#ifdef VBOX_WITH_PCI_PASSTHROUGH
340 PciRawR0TermVM(pVM);
341#endif
342 HWACCMR0TermVM(pVM);
343 }
344 }
345
346
347 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
348 return rc;
349}
350
351
352/**
353 * Terminates the R0 driver for a particular VM instance.
354 *
355 * This is normally called by ring-3 as part of the VM termination process, but
356 * may alternatively be called during the support driver session cleanup when
357 * the VM object is destroyed (see GVMM).
358 *
359 * @returns VBox status code.
360 *
361 * @param pVM The VM instance in question.
362 * @param pGVM Pointer to the global VM structure. Optional.
363 * @thread EMT or session clean up thread.
364 */
365VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
366{
367#ifdef VBOX_WITH_PCI_PASSTHROUGH
368 PciRawR0TermVM(pVM);
369#endif
370
371 /*
372 * Tell GVMM what we're up to and check that we only do this once.
373 */
374 if (GVMMR0DoingTermVM(pVM, pGVM))
375 {
376#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
377 PGMR0DynMapTermVM(pVM);
378#endif
379 HWACCMR0TermVM(pVM);
380 }
381
382 /*
383 * Deregister the logger.
384 */
385 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
386 return VINF_SUCCESS;
387}
388
389
390#ifdef VBOX_WITH_STATISTICS
391/**
392 * Record return code statistics
393 * @param pVM The VM handle.
394 * @param pVCpu The VMCPU handle.
395 * @param rc The status code.
396 */
397static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
398{
399 /*
400 * Collect statistics.
401 */
402 switch (rc)
403 {
404 case VINF_SUCCESS:
405 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
406 break;
407 case VINF_EM_RAW_INTERRUPT:
408 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
409 break;
410 case VINF_EM_RAW_INTERRUPT_HYPER:
411 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
412 break;
413 case VINF_EM_RAW_GUEST_TRAP:
414 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
415 break;
416 case VINF_EM_RAW_RING_SWITCH:
417 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
418 break;
419 case VINF_EM_RAW_RING_SWITCH_INT:
420 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
421 break;
422 case VINF_EM_RAW_STALE_SELECTOR:
423 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
424 break;
425 case VINF_EM_RAW_IRET_TRAP:
426 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
427 break;
428 case VINF_IOM_HC_IOPORT_READ:
429 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
430 break;
431 case VINF_IOM_HC_IOPORT_WRITE:
432 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
433 break;
434 case VINF_IOM_HC_MMIO_READ:
435 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
436 break;
437 case VINF_IOM_HC_MMIO_WRITE:
438 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
439 break;
440 case VINF_IOM_HC_MMIO_READ_WRITE:
441 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
442 break;
443 case VINF_PATM_HC_MMIO_PATCH_READ:
444 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
445 break;
446 case VINF_PATM_HC_MMIO_PATCH_WRITE:
447 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
448 break;
449 case VINF_EM_RAW_EMULATE_INSTR:
450 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
451 break;
452 case VINF_EM_RAW_EMULATE_IO_BLOCK:
453 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
454 break;
455 case VINF_PATCH_EMULATE_INSTR:
456 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
457 break;
458 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
459 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
460 break;
461 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
462 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
463 break;
464 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
465 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
466 break;
467 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
468 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
469 break;
470 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
471 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
472 break;
473 case VINF_CSAM_PENDING_ACTION:
474 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
475 break;
476 case VINF_PGM_SYNC_CR3:
477 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
478 break;
479 case VINF_PATM_PATCH_INT3:
480 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
481 break;
482 case VINF_PATM_PATCH_TRAP_PF:
483 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
484 break;
485 case VINF_PATM_PATCH_TRAP_GP:
486 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
487 break;
488 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
489 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
490 break;
491 case VINF_EM_RESCHEDULE_REM:
492 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
493 break;
494 case VINF_EM_RAW_TO_R3:
495 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
496 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
497 else if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
498 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
499 else if (VM_FF_ISPENDING(pVM, VM_FF_PDM_QUEUES))
500 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
501 else if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
502 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
503 else if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA))
504 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
505 else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))
506 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
507 else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
508 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
509 else if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3))
510 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
511 else
512 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
513 break;
514
515 case VINF_EM_RAW_TIMER_PENDING:
516 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
517 break;
518 case VINF_EM_RAW_INTERRUPT_PENDING:
519 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
520 break;
521 case VINF_VMM_CALL_HOST:
522 switch (pVCpu->vmm.s.enmCallRing3Operation)
523 {
524 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
525 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
526 break;
527 case VMMCALLRING3_PDM_LOCK:
528 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
529 break;
530 case VMMCALLRING3_PGM_POOL_GROW:
531 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
532 break;
533 case VMMCALLRING3_PGM_LOCK:
534 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
535 break;
536 case VMMCALLRING3_PGM_MAP_CHUNK:
537 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
538 break;
539 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
540 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
541 break;
542 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
543 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
544 break;
545 case VMMCALLRING3_VMM_LOGGER_FLUSH:
546 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
547 break;
548 case VMMCALLRING3_VM_SET_ERROR:
549 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
550 break;
551 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
552 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
553 break;
554 case VMMCALLRING3_VM_R0_ASSERTION:
555 default:
556 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
557 break;
558 }
559 break;
560 case VINF_PATM_DUPLICATE_FUNCTION:
561 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
562 break;
563 case VINF_PGM_CHANGE_MODE:
564 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
565 break;
566 case VINF_PGM_POOL_FLUSH_PENDING:
567 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
568 break;
569 case VINF_EM_PENDING_REQUEST:
570 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
571 break;
572 case VINF_EM_HWACCM_PATCH_TPR_INSTR:
573 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
574 break;
575 default:
576 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
577 break;
578 }
579}
580#endif /* VBOX_WITH_STATISTICS */
581
582
583/**
584 * Unused ring-0 entry point that used to be called from the interrupt gate.
585 *
586 * Will be removed one of the next times we do a major SUPDrv version bump.
587 *
588 * @returns VBox status code.
589 * @param pVM The VM to operate on.
590 * @param enmOperation Which operation to execute.
591 * @param pvArg Argument to the operation.
592 * @remarks Assume called with interrupts disabled.
593 */
594VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
595{
596 /*
597 * We're returning VERR_NOT_SUPPORT here so we've got something else
598 * than -1 which the interrupt gate glue code might return.
599 */
600 Log(("operation %#x is not supported\n", enmOperation));
601 return VERR_NOT_SUPPORTED;
602}
603
604
605/**
606 * The Ring 0 entry point, called by the fast-ioctl path.
607 *
608 * @param pVM The VM to operate on.
609 * The return code is stored in pVM->vmm.s.iLastGZRc.
610 * @param idCpu The Virtual CPU ID of the calling EMT.
611 * @param enmOperation Which operation to execute.
612 * @remarks Assume called with interrupts _enabled_.
613 */
614VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
615{
616 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
617 return;
618 PVMCPU pVCpu = &pVM->aCpus[idCpu];
619
620 switch (enmOperation)
621 {
622 /*
623 * Switch to GC and run guest raw mode code.
624 * Disable interrupts before doing the world switch.
625 */
626 case VMMR0_DO_RAW_RUN:
627 {
628 /* Some safety precautions first. */
629#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
630 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled /* hwaccm */
631 && pVM->cCpus == 1 /* !smp */
632 && PGMGetHyperCR3(pVCpu)))
633#else
634 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled
635 && pVM->cCpus == 1))
636#endif
637 {
638 /* Disable preemption and update the periodic preemption timer. */
639 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
640 RTThreadPreemptDisable(&PreemptState);
641 RTCPUID idHostCpu = RTMpCpuId();
642#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
643 CPUMR0SetLApic(pVM, idHostCpu);
644#endif
645 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
646 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
647 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
648
649 /* We might need to disable VT-x if the active switcher turns off paging. */
650 bool fVTxDisabled;
651 int rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
652 if (RT_SUCCESS(rc))
653 {
654 RTCCUINTREG uFlags = ASMIntDisableFlags();
655 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
656
657 TMNotifyStartOfExecution(pVCpu);
658 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
659 pVCpu->vmm.s.iLastGZRc = rc;
660 TMNotifyEndOfExecution(pVCpu);
661
662 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
663
664 /* Re-enable VT-x if previously turned off. */
665 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
666
667 if ( rc == VINF_EM_RAW_INTERRUPT
668 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
669 TRPMR0DispatchHostInterrupt(pVM);
670
671 ASMSetFlags(uFlags);
672
673#ifdef VBOX_WITH_STATISTICS
674 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
675 vmmR0RecordRC(pVM, pVCpu, rc);
676#endif
677 }
678 else
679 pVCpu->vmm.s.iLastGZRc = rc;
680 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
681 RTThreadPreemptRestore(&PreemptState);
682 }
683 else
684 {
685 Assert(!pVM->vmm.s.fSwitcherDisabled);
686 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
687 if (pVM->cCpus != 1)
688 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
689#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
690 if (!PGMGetHyperCR3(pVCpu))
691 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
692#endif
693 }
694 break;
695 }
696
697 /*
698 * Run guest code using the available hardware acceleration technology.
699 *
700 * Disable interrupts before we do anything interesting. On Windows we avoid
701 * this by having the support driver raise the IRQL before calling us, this way
702 * we hope to get away with page faults and later calling into the kernel.
703 */
704 case VMMR0_DO_HWACC_RUN:
705 {
706#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
707 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
708 RTThreadPreemptDisable(&PreemptState);
709#elif !defined(RT_OS_WINDOWS)
710 RTCCUINTREG uFlags = ASMIntDisableFlags();
711#endif
712 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
713 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
714 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
715
716#ifdef LOG_ENABLED
717 if (pVCpu->idCpu > 0)
718 {
719 /* Lazy registration of ring 0 loggers. */
720 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
721 if ( pR0Logger
722 && !pR0Logger->fRegistered)
723 {
724 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
725 pR0Logger->fRegistered = true;
726 }
727 }
728#endif
729 int rc;
730 if (!HWACCMR0SuspendPending())
731 {
732 rc = HWACCMR0Enter(pVM, pVCpu);
733 if (RT_SUCCESS(rc))
734 {
735 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
736 int rc2 = HWACCMR0Leave(pVM, pVCpu);
737 AssertRC(rc2);
738 }
739 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
740 }
741 else
742 {
743 /* System is about to go into suspend mode; go back to ring 3. */
744 rc = VINF_EM_RAW_INTERRUPT;
745 }
746 pVCpu->vmm.s.iLastGZRc = rc;
747
748 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
749#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
750 RTThreadPreemptRestore(&PreemptState);
751#elif !defined(RT_OS_WINDOWS)
752 ASMSetFlags(uFlags);
753#endif
754
755#ifdef VBOX_WITH_STATISTICS
756 vmmR0RecordRC(pVM, pVCpu, rc);
757#endif
758 /* No special action required for external interrupts, just return. */
759 break;
760 }
761
762 /*
763 * For profiling.
764 */
765 case VMMR0_DO_NOP:
766 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
767 break;
768
769 /*
770 * Impossible.
771 */
772 default:
773 AssertMsgFailed(("%#x\n", enmOperation));
774 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
775 break;
776 }
777}
778
779
780/**
781 * Validates a session or VM session argument.
782 *
783 * @returns true / false accordingly.
784 * @param pVM The VM argument.
785 * @param pSession The session argument.
786 */
787DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
788{
789 /* This must be set! */
790 if (!pSession)
791 return false;
792
793 /* Only one out of the two. */
794 if (pVM && pClaimedSession)
795 return false;
796 if (pVM)
797 pClaimedSession = pVM->pSession;
798 return pClaimedSession == pSession;
799}
800
801
802/**
803 * VMMR0EntryEx worker function, either called directly or when ever possible
804 * called thru a longjmp so we can exit safely on failure.
805 *
806 * @returns VBox status code.
807 * @param pVM The VM to operate on.
808 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
809 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
810 * @param enmOperation Which operation to execute.
811 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
812 * The support driver validates this if it's present.
813 * @param u64Arg Some simple constant argument.
814 * @param pSession The session of the caller.
815 * @remarks Assume called with interrupts _enabled_.
816 */
817static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
818{
819 /*
820 * Common VM pointer validation.
821 */
822 if (pVM)
823 {
824 if (RT_UNLIKELY( !VALID_PTR(pVM)
825 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
826 {
827 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
828 return VERR_INVALID_POINTER;
829 }
830 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
831 || pVM->enmVMState > VMSTATE_TERMINATED
832 || pVM->pVMR0 != pVM))
833 {
834 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
835 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
836 return VERR_INVALID_POINTER;
837 }
838
839 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
840 {
841 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
842 return VERR_INVALID_PARAMETER;
843 }
844 }
845 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
846 {
847 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
848 return VERR_INVALID_PARAMETER;
849 }
850
851
852 switch (enmOperation)
853 {
854 /*
855 * GVM requests
856 */
857 case VMMR0_DO_GVMM_CREATE_VM:
858 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
859 return VERR_INVALID_PARAMETER;
860 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
861
862 case VMMR0_DO_GVMM_DESTROY_VM:
863 if (pReqHdr || u64Arg)
864 return VERR_INVALID_PARAMETER;
865 return GVMMR0DestroyVM(pVM);
866
867 case VMMR0_DO_GVMM_REGISTER_VMCPU:
868 {
869 if (!pVM)
870 return VERR_INVALID_PARAMETER;
871 return GVMMR0RegisterVCpu(pVM, idCpu);
872 }
873
874 case VMMR0_DO_GVMM_SCHED_HALT:
875 if (pReqHdr)
876 return VERR_INVALID_PARAMETER;
877 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
878
879 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
880 if (pReqHdr || u64Arg)
881 return VERR_INVALID_PARAMETER;
882 return GVMMR0SchedWakeUp(pVM, idCpu);
883
884 case VMMR0_DO_GVMM_SCHED_POKE:
885 if (pReqHdr || u64Arg)
886 return VERR_INVALID_PARAMETER;
887 return GVMMR0SchedPoke(pVM, idCpu);
888
889 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
890 if (u64Arg)
891 return VERR_INVALID_PARAMETER;
892 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
893
894 case VMMR0_DO_GVMM_SCHED_POLL:
895 if (pReqHdr || u64Arg > 1)
896 return VERR_INVALID_PARAMETER;
897 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
898
899 case VMMR0_DO_GVMM_QUERY_STATISTICS:
900 if (u64Arg)
901 return VERR_INVALID_PARAMETER;
902 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
903
904 case VMMR0_DO_GVMM_RESET_STATISTICS:
905 if (u64Arg)
906 return VERR_INVALID_PARAMETER;
907 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
908
909 /*
910 * Initialize the R0 part of a VM instance.
911 */
912 case VMMR0_DO_VMMR0_INIT:
913 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
914
915 /*
916 * Terminate the R0 part of a VM instance.
917 */
918 case VMMR0_DO_VMMR0_TERM:
919 return VMMR0TermVM(pVM, NULL);
920
921 /*
922 * Attempt to enable hwacc mode and check the current setting.
923 */
924 case VMMR0_DO_HWACC_ENABLE:
925 return HWACCMR0EnableAllCpus(pVM);
926
927 /*
928 * Setup the hardware accelerated session.
929 */
930 case VMMR0_DO_HWACC_SETUP_VM:
931 return HWACCMR0SetupVM(pVM);
932
933 /*
934 * Switch to RC to execute Hypervisor function.
935 */
936 case VMMR0_DO_CALL_HYPERVISOR:
937 {
938 int rc;
939 bool fVTxDisabled;
940
941 /* Safety precaution as HWACCM can disable the switcher. */
942 Assert(!pVM->vmm.s.fSwitcherDisabled);
943 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
944 return VERR_NOT_SUPPORTED;
945
946#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
947 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
948 return VERR_PGM_NO_CR3_SHADOW_ROOT;
949#endif
950
951 RTCCUINTREG fFlags = ASMIntDisableFlags();
952
953#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
954 RTCPUID idHostCpu = RTMpCpuId();
955 CPUMR0SetLApic(pVM, idHostCpu);
956#endif
957
958 /* We might need to disable VT-x if the active switcher turns off paging. */
959 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
960 if (RT_FAILURE(rc))
961 return rc;
962
963 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
964
965 /* Re-enable VT-x if previously turned off. */
966 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
967
968 /** @todo dispatch interrupts? */
969 ASMSetFlags(fFlags);
970 return rc;
971 }
972
973 /*
974 * PGM wrappers.
975 */
976 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
977 if (idCpu == NIL_VMCPUID)
978 return VERR_INVALID_CPU_ID;
979 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
980
981 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
982 if (idCpu == NIL_VMCPUID)
983 return VERR_INVALID_CPU_ID;
984 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
985
986 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
987 if (idCpu != 0)
988 return VERR_INVALID_CPU_ID;
989 return PGMR0PhysSetupIommu(pVM);
990
991 /*
992 * GMM wrappers.
993 */
994 case VMMR0_DO_GMM_INITIAL_RESERVATION:
995 if (u64Arg)
996 return VERR_INVALID_PARAMETER;
997 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
998
999 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1000 if (u64Arg)
1001 return VERR_INVALID_PARAMETER;
1002 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1003
1004 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1005 if (u64Arg)
1006 return VERR_INVALID_PARAMETER;
1007 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1008
1009 case VMMR0_DO_GMM_FREE_PAGES:
1010 if (u64Arg)
1011 return VERR_INVALID_PARAMETER;
1012 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1013
1014 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1015 if (u64Arg)
1016 return VERR_INVALID_PARAMETER;
1017 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1018
1019 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1020 if (u64Arg)
1021 return VERR_INVALID_PARAMETER;
1022 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1023
1024 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1025 if (idCpu == NIL_VMCPUID)
1026 return VERR_INVALID_CPU_ID;
1027 if (u64Arg)
1028 return VERR_INVALID_PARAMETER;
1029 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1030
1031 case VMMR0_DO_GMM_BALLOONED_PAGES:
1032 if (u64Arg)
1033 return VERR_INVALID_PARAMETER;
1034 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1035
1036 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1037 if (u64Arg)
1038 return VERR_INVALID_PARAMETER;
1039 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1040
1041 case VMMR0_DO_GMM_SEED_CHUNK:
1042 if (pReqHdr)
1043 return VERR_INVALID_PARAMETER;
1044 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1045
1046 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1047 if (idCpu == NIL_VMCPUID)
1048 return VERR_INVALID_CPU_ID;
1049 if (u64Arg)
1050 return VERR_INVALID_PARAMETER;
1051 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1052
1053 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1054 if (idCpu == NIL_VMCPUID)
1055 return VERR_INVALID_CPU_ID;
1056 if (u64Arg)
1057 return VERR_INVALID_PARAMETER;
1058 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1059
1060 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1061 if (idCpu == NIL_VMCPUID)
1062 return VERR_INVALID_CPU_ID;
1063 if ( u64Arg
1064 || pReqHdr)
1065 return VERR_INVALID_PARAMETER;
1066 return GMMR0ResetSharedModules(pVM, idCpu);
1067
1068#ifdef VBOX_WITH_PAGE_SHARING
1069 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1070 {
1071 if (idCpu == NIL_VMCPUID)
1072 return VERR_INVALID_CPU_ID;
1073 if ( u64Arg
1074 || pReqHdr)
1075 return VERR_INVALID_PARAMETER;
1076
1077 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1078 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1079
1080# ifdef DEBUG_sandervl
1081 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1082 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1083 int rc = GMMR0CheckSharedModulesStart(pVM);
1084 if (rc == VINF_SUCCESS)
1085 {
1086 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1087 Assert( rc == VINF_SUCCESS
1088 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1089 GMMR0CheckSharedModulesEnd(pVM);
1090 }
1091# else
1092 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1093# endif
1094 return rc;
1095 }
1096#endif
1097
1098#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1099 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1100 {
1101 if (u64Arg)
1102 return VERR_INVALID_PARAMETER;
1103 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1104 }
1105#endif
1106
1107 /*
1108 * A quick GCFGM mock-up.
1109 */
1110 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1111 case VMMR0_DO_GCFGM_SET_VALUE:
1112 case VMMR0_DO_GCFGM_QUERY_VALUE:
1113 {
1114 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1115 return VERR_INVALID_PARAMETER;
1116 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1117 if (pReq->Hdr.cbReq != sizeof(*pReq))
1118 return VERR_INVALID_PARAMETER;
1119 int rc;
1120 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1121 {
1122 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1123 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1124 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1125 }
1126 else
1127 {
1128 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1129 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1130 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1131 }
1132 return rc;
1133 }
1134
1135 /*
1136 * PDM Wrappers.
1137 */
1138 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1139 {
1140 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1141 return VERR_INVALID_PARAMETER;
1142 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1143 }
1144
1145 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1146 {
1147 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1148 return VERR_INVALID_PARAMETER;
1149 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1150 }
1151
1152 /*
1153 * Requests to the internal networking service.
1154 */
1155 case VMMR0_DO_INTNET_OPEN:
1156 {
1157 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1158 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1159 return VERR_INVALID_PARAMETER;
1160 return IntNetR0OpenReq(pSession, pReq);
1161 }
1162
1163 case VMMR0_DO_INTNET_IF_CLOSE:
1164 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1165 return VERR_INVALID_PARAMETER;
1166 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1167
1168 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1169 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1170 return VERR_INVALID_PARAMETER;
1171 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1172
1173 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1174 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1175 return VERR_INVALID_PARAMETER;
1176 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1177
1178 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1179 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1180 return VERR_INVALID_PARAMETER;
1181 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1182
1183 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1184 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1185 return VERR_INVALID_PARAMETER;
1186 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1187
1188 case VMMR0_DO_INTNET_IF_SEND:
1189 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1190 return VERR_INVALID_PARAMETER;
1191 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1192
1193 case VMMR0_DO_INTNET_IF_WAIT:
1194 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1195 return VERR_INVALID_PARAMETER;
1196 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1197
1198 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1199 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1200 return VERR_INVALID_PARAMETER;
1201 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1202
1203#ifdef VBOX_WITH_PCI_PASSTHROUGH
1204 /*
1205 * Requests to host PCI driver service.
1206 */
1207 case VMMR0_DO_PCIRAW_REQ:
1208 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1209 return VERR_INVALID_PARAMETER;
1210 return PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1211#endif
1212 /*
1213 * For profiling.
1214 */
1215 case VMMR0_DO_NOP:
1216 case VMMR0_DO_SLOW_NOP:
1217 return VINF_SUCCESS;
1218
1219 /*
1220 * For testing Ring-0 APIs invoked in this environment.
1221 */
1222 case VMMR0_DO_TESTS:
1223 /** @todo make new test */
1224 return VINF_SUCCESS;
1225
1226
1227#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1228 case VMMR0_DO_TEST_SWITCHER3264:
1229 if (idCpu == NIL_VMCPUID)
1230 return VERR_INVALID_CPU_ID;
1231 return HWACCMR0TestSwitcher3264(pVM);
1232#endif
1233 default:
1234 /*
1235 * We're returning VERR_NOT_SUPPORT here so we've got something else
1236 * than -1 which the interrupt gate glue code might return.
1237 */
1238 Log(("operation %#x is not supported\n", enmOperation));
1239 return VERR_NOT_SUPPORTED;
1240 }
1241}
1242
1243
1244/**
1245 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1246 */
1247typedef struct VMMR0ENTRYEXARGS
1248{
1249 PVM pVM;
1250 VMCPUID idCpu;
1251 VMMR0OPERATION enmOperation;
1252 PSUPVMMR0REQHDR pReq;
1253 uint64_t u64Arg;
1254 PSUPDRVSESSION pSession;
1255} VMMR0ENTRYEXARGS;
1256/** Pointer to a vmmR0EntryExWrapper argument package. */
1257typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1258
1259/**
1260 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1261 *
1262 * @returns VBox status code.
1263 * @param pvArgs The argument package
1264 */
1265static int vmmR0EntryExWrapper(void *pvArgs)
1266{
1267 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1268 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1269 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1270 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1271 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1272 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1273}
1274
1275
1276/**
1277 * The Ring 0 entry point, called by the support library (SUP).
1278 *
1279 * @returns VBox status code.
1280 * @param pVM The VM to operate on.
1281 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1282 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1283 * @param enmOperation Which operation to execute.
1284 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1285 * @param u64Arg Some simple constant argument.
1286 * @param pSession The session of the caller.
1287 * @remarks Assume called with interrupts _enabled_.
1288 */
1289VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1290{
1291 /*
1292 * Requests that should only happen on the EMT thread will be
1293 * wrapped in a setjmp so we can assert without causing trouble.
1294 */
1295 if ( VALID_PTR(pVM)
1296 && pVM->pVMR0
1297 && idCpu < pVM->cCpus)
1298 {
1299 switch (enmOperation)
1300 {
1301 /* These might/will be called before VMMR3Init. */
1302 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1303 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1304 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1305 case VMMR0_DO_GMM_FREE_PAGES:
1306 case VMMR0_DO_GMM_BALLOONED_PAGES:
1307 /* On the mac we might not have a valid jmp buf, so check these as well. */
1308 case VMMR0_DO_VMMR0_INIT:
1309 case VMMR0_DO_VMMR0_TERM:
1310 {
1311 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1312
1313 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1314 break;
1315
1316 /** @todo validate this EMT claim... GVM knows. */
1317 VMMR0ENTRYEXARGS Args;
1318 Args.pVM = pVM;
1319 Args.idCpu = idCpu;
1320 Args.enmOperation = enmOperation;
1321 Args.pReq = pReq;
1322 Args.u64Arg = u64Arg;
1323 Args.pSession = pSession;
1324 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1325 }
1326
1327 default:
1328 break;
1329 }
1330 }
1331 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1332}
1333
1334/**
1335 * Internal R0 logger worker: Flush logger.
1336 *
1337 * @param pLogger The logger instance to flush.
1338 * @remark This function must be exported!
1339 */
1340VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1341{
1342#ifdef LOG_ENABLED
1343 /*
1344 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1345 * (This is a bit paranoid code.)
1346 */
1347 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1348 if ( !VALID_PTR(pR0Logger)
1349 || !VALID_PTR(pR0Logger + 1)
1350 || pLogger->u32Magic != RTLOGGER_MAGIC)
1351 {
1352# ifdef DEBUG
1353 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1354# endif
1355 return;
1356 }
1357 if (pR0Logger->fFlushingDisabled)
1358 return; /* quietly */
1359
1360 PVM pVM = pR0Logger->pVM;
1361 if ( !VALID_PTR(pVM)
1362 || pVM->pVMR0 != pVM)
1363 {
1364# ifdef DEBUG
1365 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1366# endif
1367 return;
1368 }
1369
1370 PVMCPU pVCpu = VMMGetCpu(pVM);
1371 if (pVCpu)
1372 {
1373 /*
1374 * Check that the jump buffer is armed.
1375 */
1376# ifdef RT_ARCH_X86
1377 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1378 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1379# else
1380 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1381 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1382# endif
1383 {
1384# ifdef DEBUG
1385 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1386# endif
1387 return;
1388 }
1389 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1390 }
1391# ifdef DEBUG
1392 else
1393 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1394# endif
1395#endif
1396}
1397
1398/**
1399 * Internal R0 logger worker: Custom prefix.
1400 *
1401 * @returns Number of chars written.
1402 *
1403 * @param pLogger The logger instance.
1404 * @param pchBuf The output buffer.
1405 * @param cchBuf The size of the buffer.
1406 * @param pvUser User argument (ignored).
1407 */
1408VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1409{
1410 NOREF(pvUser);
1411#ifdef LOG_ENABLED
1412 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1413 if ( !VALID_PTR(pR0Logger)
1414 || !VALID_PTR(pR0Logger + 1)
1415 || pLogger->u32Magic != RTLOGGER_MAGIC
1416 || cchBuf < 2)
1417 return 0;
1418
1419 static const char s_szHex[17] = "0123456789abcdef";
1420 VMCPUID const idCpu = pR0Logger->idCpu;
1421 pchBuf[1] = s_szHex[ idCpu & 15];
1422 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1423
1424 return 2;
1425#else
1426 return 0;
1427#endif
1428}
1429
1430#ifdef LOG_ENABLED
1431
1432/**
1433 * Disables flushing of the ring-0 debug log.
1434 *
1435 * @param pVCpu The shared virtual cpu structure.
1436 */
1437VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1438{
1439 PVM pVM = pVCpu->pVMR0;
1440 if (pVCpu->vmm.s.pR0LoggerR0)
1441 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1442}
1443
1444
1445/**
1446 * Enables flushing of the ring-0 debug log.
1447 *
1448 * @param pVCpu The shared virtual cpu structure.
1449 */
1450VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1451{
1452 PVM pVM = pVCpu->pVMR0;
1453 if (pVCpu->vmm.s.pR0LoggerR0)
1454 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1455}
1456
1457#endif /* LOG_ENABLED */
1458
1459/**
1460 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1461 *
1462 * @returns true if the breakpoint should be hit, false if it should be ignored.
1463 */
1464DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1465{
1466#if 0
1467 return true;
1468#else
1469 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1470 if (pVM)
1471 {
1472 PVMCPU pVCpu = VMMGetCpu(pVM);
1473
1474 if (pVCpu)
1475 {
1476#ifdef RT_ARCH_X86
1477 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1478 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1479#else
1480 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1481 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1482#endif
1483 {
1484 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1485 return RT_FAILURE_NP(rc);
1486 }
1487 }
1488 }
1489#ifdef RT_OS_LINUX
1490 return true;
1491#else
1492 return false;
1493#endif
1494#endif
1495}
1496
1497
1498/**
1499 * Override this so we can push it up to ring-3.
1500 *
1501 * @param pszExpr Expression. Can be NULL.
1502 * @param uLine Location line number.
1503 * @param pszFile Location file name.
1504 * @param pszFunction Location function name.
1505 */
1506DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1507{
1508 /*
1509 * To the log.
1510 */
1511 LogAlways(("\n!!R0-Assertion Failed!!\n"
1512 "Expression: %s\n"
1513 "Location : %s(%d) %s\n",
1514 pszExpr, pszFile, uLine, pszFunction));
1515
1516 /*
1517 * To the global VMM buffer.
1518 */
1519 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1520 if (pVM)
1521 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1522 "\n!!R0-Assertion Failed!!\n"
1523 "Expression: %s\n"
1524 "Location : %s(%d) %s\n",
1525 pszExpr, pszFile, uLine, pszFunction);
1526
1527 /*
1528 * Continue the normal way.
1529 */
1530 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1531}
1532
1533
1534/**
1535 * Callback for RTLogFormatV which writes to the ring-3 log port.
1536 * See PFNLOGOUTPUT() for details.
1537 */
1538static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1539{
1540 for (size_t i = 0; i < cbChars; i++)
1541 LogAlways(("%c", pachChars[i]));
1542
1543 return cbChars;
1544}
1545
1546
1547/**
1548 * Override this so we can push it up to ring-3.
1549 *
1550 * @param pszFormat The format string.
1551 * @param va Arguments.
1552 */
1553DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1554{
1555 va_list vaCopy;
1556
1557 /*
1558 * Push the message to the logger.
1559 */
1560 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1561 if (pLog)
1562 {
1563 va_copy(vaCopy, va);
1564 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1565 va_end(vaCopy);
1566 }
1567
1568 /*
1569 * Push it to the global VMM buffer.
1570 */
1571 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1572 if (pVM)
1573 {
1574 va_copy(vaCopy, va);
1575 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1576 va_end(vaCopy);
1577 }
1578
1579 /*
1580 * Continue the normal way.
1581 */
1582 RTAssertMsg2V(pszFormat, va);
1583}
1584
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette