VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 35766

Last change on this file since 35766 was 35346, checked in by vboxsync, 14 years ago

VMM reorg: Moving the public include files from include/VBox to include/VBox/vmm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 51.7 KB
Line 
1/* $Id: VMMR0.cpp 35346 2010-12-27 16:13:13Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vmm/vm.h>
32
33#include <VBox/vmm/gvmm.h>
34#include <VBox/vmm/gmm.h>
35#include <VBox/intnet.h>
36#include <VBox/vmm/hwaccm.h>
37#include <VBox/param.h>
38#include <VBox/err.h>
39#include <VBox/version.h>
40#include <VBox/log.h>
41
42#include <iprt/asm-amd64-x86.h>
43#include <iprt/assert.h>
44#include <iprt/crc.h>
45#include <iprt/mp.h>
46#include <iprt/once.h>
47#include <iprt/stdarg.h>
48#include <iprt/string.h>
49#include <iprt/thread.h>
50#include <iprt/timer.h>
51
52#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
53# pragma intrinsic(_AddressOfReturnAddress)
54#endif
55
56
57/*******************************************************************************
58* Internal Functions *
59*******************************************************************************/
60RT_C_DECLS_BEGIN
61VMMR0DECL(int) ModuleInit(void);
62VMMR0DECL(void) ModuleTerm(void);
63
64#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
65extern uint64_t __udivdi3(uint64_t, uint64_t);
66extern uint64_t __umoddi3(uint64_t, uint64_t);
67#endif // RT_ARCH_X86 && (RT_OS_SOLARIS || RT_OS_FREEBSD)
68RT_C_DECLS_END
69
70
71/*******************************************************************************
72* Global Variables *
73*******************************************************************************/
74/** Drag in necessary library bits.
75 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
76PFNRT g_VMMGCDeps[] =
77{
78 (PFNRT)RTCrc32,
79 (PFNRT)RTOnce,
80#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
81 (PFNRT)__udivdi3,
82 (PFNRT)__umoddi3,
83#endif // RT_ARCH_X86 && (RT_OS_SOLARIS || RT_OS_FREEBSD)
84 NULL
85};
86
87
88#if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
89/* Increase the size of the image to work around the refusal of Win64 to
90 * load images in the 0x80000 range.
91 */
92static uint64_t u64BloatImage[8192] = {0};
93#endif
94
95/**
96 * Initialize the module.
97 * This is called when we're first loaded.
98 *
99 * @returns 0 on success.
100 * @returns VBox status on failure.
101 */
102VMMR0DECL(int) ModuleInit(void)
103{
104 LogFlow(("ModuleInit:\n"));
105
106 /*
107 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
108 */
109 int rc = GVMMR0Init();
110 if (RT_SUCCESS(rc))
111 {
112 rc = GMMR0Init();
113 if (RT_SUCCESS(rc))
114 {
115 rc = HWACCMR0Init();
116 if (RT_SUCCESS(rc))
117 {
118 rc = PGMRegisterStringFormatTypes();
119 if (RT_SUCCESS(rc))
120 {
121#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
122 rc = PGMR0DynMapInit();
123#endif
124 if (RT_SUCCESS(rc))
125 {
126 rc = IntNetR0Init();
127 if (RT_SUCCESS(rc))
128 {
129 rc = CPUMR0ModuleInit();
130 if (RT_SUCCESS(rc))
131 {
132 LogFlow(("ModuleInit: returns success.\n"));
133 return VINF_SUCCESS;
134 }
135 }
136
137 /* bail out */
138 LogFlow(("ModuleTerm: returns %Rrc\n", rc));
139#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
140 PGMR0DynMapTerm();
141#endif
142 }
143 PGMDeregisterStringFormatTypes();
144 }
145 HWACCMR0Term();
146 }
147 GMMR0Term();
148 }
149 GVMMR0Term();
150 }
151
152 LogFlow(("ModuleInit: failed %Rrc\n", rc));
153 return rc;
154}
155
156
157/**
158 * Terminate the module.
159 * This is called when we're finally unloaded.
160 */
161VMMR0DECL(void) ModuleTerm(void)
162{
163 LogFlow(("ModuleTerm:\n"));
164
165 /*
166 * Terminate the CPUM module (Local APIC cleanup).
167 */
168 CPUMR0ModuleTerm();
169
170 /*
171 * Terminate the internal network service.
172 */
173 IntNetR0Term();
174
175 /*
176 * PGM (Darwin) and HWACCM global cleanup.
177 */
178#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
179 PGMR0DynMapTerm();
180#endif
181 PGMDeregisterStringFormatTypes();
182 HWACCMR0Term();
183
184 /*
185 * Destroy the GMM and GVMM instances.
186 */
187 GMMR0Term();
188 GVMMR0Term();
189
190 LogFlow(("ModuleTerm: returns\n"));
191}
192
193
194/**
195 * Initiates the R0 driver for a particular VM instance.
196 *
197 * @returns VBox status code.
198 *
199 * @param pVM The VM instance in question.
200 * @param uSvnRev The SVN revision of the ring-3 part.
201 * @thread EMT.
202 */
203static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
204{
205 /*
206 * Match the SVN revisions.
207 */
208 if (uSvnRev != VMMGetSvnRev())
209 {
210 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
211 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
212 return VERR_VERSION_MISMATCH;
213 }
214 if ( !VALID_PTR(pVM)
215 || pVM->pVMR0 != pVM)
216 return VERR_INVALID_PARAMETER;
217
218#ifdef LOG_ENABLED
219 /*
220 * Register the EMT R0 logger instance for VCPU 0.
221 */
222 PVMCPU pVCpu = &pVM->aCpus[0];
223
224 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
225 if (pR0Logger)
226 {
227# if 0 /* testing of the logger. */
228 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
229 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
230 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
231 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
232
233 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
234 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
235 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
236 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
237
238 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
239 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
240 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
241 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
242
243 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
244 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
245 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
246 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
247 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
248 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
249
250 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
251 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
252
253 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
254 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
255 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
256# endif
257 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
258 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
259 pR0Logger->fRegistered = true;
260 }
261#endif /* LOG_ENABLED */
262
263 /*
264 * Check if the host supports high resolution timers or not.
265 */
266 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
267 && !RTTimerCanDoHighResolution())
268 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
269
270 /*
271 * Initialize the per VM data for GVMM and GMM.
272 */
273 int rc = GVMMR0InitVM(pVM);
274// if (RT_SUCCESS(rc))
275// rc = GMMR0InitPerVMData(pVM);
276 if (RT_SUCCESS(rc))
277 {
278 /*
279 * Init HWACCM, CPUM and PGM (Darwin only).
280 */
281 rc = HWACCMR0InitVM(pVM);
282 if (RT_SUCCESS(rc))
283 {
284 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
285 if (RT_SUCCESS(rc))
286 {
287#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
288 rc = PGMR0DynMapInitVM(pVM);
289#endif
290 if (RT_SUCCESS(rc))
291 {
292 GVMMR0DoneInitVM(pVM);
293 return rc;
294 }
295
296 /* bail out */
297 }
298 HWACCMR0TermVM(pVM);
299 }
300 }
301 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
302 return rc;
303}
304
305
306/**
307 * Terminates the R0 driver for a particular VM instance.
308 *
309 * This is normally called by ring-3 as part of the VM termination process, but
310 * may alternatively be called during the support driver session cleanup when
311 * the VM object is destroyed (see GVMM).
312 *
313 * @returns VBox status code.
314 *
315 * @param pVM The VM instance in question.
316 * @param pGVM Pointer to the global VM structure. Optional.
317 * @thread EMT or session clean up thread.
318 */
319VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
320{
321 /*
322 * Tell GVMM what we're up to and check that we only do this once.
323 */
324 if (GVMMR0DoingTermVM(pVM, pGVM))
325 {
326#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
327 PGMR0DynMapTermVM(pVM);
328#endif
329 HWACCMR0TermVM(pVM);
330 }
331
332 /*
333 * Deregister the logger.
334 */
335 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
336 return VINF_SUCCESS;
337}
338
339
340#ifdef VBOX_WITH_STATISTICS
341/**
342 * Record return code statistics
343 * @param pVM The VM handle.
344 * @param pVCpu The VMCPU handle.
345 * @param rc The status code.
346 */
347static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
348{
349 /*
350 * Collect statistics.
351 */
352 switch (rc)
353 {
354 case VINF_SUCCESS:
355 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
356 break;
357 case VINF_EM_RAW_INTERRUPT:
358 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
359 break;
360 case VINF_EM_RAW_INTERRUPT_HYPER:
361 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
362 break;
363 case VINF_EM_RAW_GUEST_TRAP:
364 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
365 break;
366 case VINF_EM_RAW_RING_SWITCH:
367 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
368 break;
369 case VINF_EM_RAW_RING_SWITCH_INT:
370 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
371 break;
372 case VINF_EM_RAW_STALE_SELECTOR:
373 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
374 break;
375 case VINF_EM_RAW_IRET_TRAP:
376 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
377 break;
378 case VINF_IOM_HC_IOPORT_READ:
379 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
380 break;
381 case VINF_IOM_HC_IOPORT_WRITE:
382 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
383 break;
384 case VINF_IOM_HC_MMIO_READ:
385 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
386 break;
387 case VINF_IOM_HC_MMIO_WRITE:
388 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
389 break;
390 case VINF_IOM_HC_MMIO_READ_WRITE:
391 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
392 break;
393 case VINF_PATM_HC_MMIO_PATCH_READ:
394 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
395 break;
396 case VINF_PATM_HC_MMIO_PATCH_WRITE:
397 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
398 break;
399 case VINF_EM_RAW_EMULATE_INSTR:
400 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
401 break;
402 case VINF_EM_RAW_EMULATE_IO_BLOCK:
403 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
404 break;
405 case VINF_PATCH_EMULATE_INSTR:
406 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
407 break;
408 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
409 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
410 break;
411 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
412 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
413 break;
414 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
415 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
416 break;
417 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
418 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
419 break;
420 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
421 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
422 break;
423 case VINF_CSAM_PENDING_ACTION:
424 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
425 break;
426 case VINF_PGM_SYNC_CR3:
427 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
428 break;
429 case VINF_PATM_PATCH_INT3:
430 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
431 break;
432 case VINF_PATM_PATCH_TRAP_PF:
433 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
434 break;
435 case VINF_PATM_PATCH_TRAP_GP:
436 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
437 break;
438 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
439 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
440 break;
441 case VINF_EM_RESCHEDULE_REM:
442 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
443 break;
444 case VINF_EM_RAW_TO_R3:
445 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
446 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
447 else
448 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
449 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
450 else
451 if (VM_FF_ISPENDING(pVM, VM_FF_PDM_QUEUES))
452 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
453 else
454 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
455 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
456 else
457 if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA))
458 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
459 else
460 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))
461 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
462 else
463 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
464 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
465 else
466 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3))
467 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
468 else
469 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
470 break;
471
472 case VINF_EM_RAW_TIMER_PENDING:
473 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
474 break;
475 case VINF_EM_RAW_INTERRUPT_PENDING:
476 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
477 break;
478 case VINF_VMM_CALL_HOST:
479 switch (pVCpu->vmm.s.enmCallRing3Operation)
480 {
481 case VMMCALLRING3_PDM_LOCK:
482 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
483 break;
484 case VMMCALLRING3_PGM_POOL_GROW:
485 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
486 break;
487 case VMMCALLRING3_PGM_LOCK:
488 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
489 break;
490 case VMMCALLRING3_PGM_MAP_CHUNK:
491 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
492 break;
493 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
494 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
495 break;
496 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
497 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
498 break;
499 case VMMCALLRING3_VMM_LOGGER_FLUSH:
500 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
501 break;
502 case VMMCALLRING3_VM_SET_ERROR:
503 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
504 break;
505 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
506 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
507 break;
508 case VMMCALLRING3_VM_R0_ASSERTION:
509 default:
510 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
511 break;
512 }
513 break;
514 case VINF_PATM_DUPLICATE_FUNCTION:
515 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
516 break;
517 case VINF_PGM_CHANGE_MODE:
518 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
519 break;
520 case VINF_PGM_POOL_FLUSH_PENDING:
521 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
522 break;
523 case VINF_EM_PENDING_REQUEST:
524 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
525 break;
526 case VINF_EM_HWACCM_PATCH_TPR_INSTR:
527 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
528 break;
529 default:
530 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
531 break;
532 }
533}
534#endif /* VBOX_WITH_STATISTICS */
535
536
537/**
538 * Unused ring-0 entry point that used to be called from the interrupt gate.
539 *
540 * Will be removed one of the next times we do a major SUPDrv version bump.
541 *
542 * @returns VBox status code.
543 * @param pVM The VM to operate on.
544 * @param enmOperation Which operation to execute.
545 * @param pvArg Argument to the operation.
546 * @remarks Assume called with interrupts disabled.
547 */
548VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
549{
550 /*
551 * We're returning VERR_NOT_SUPPORT here so we've got something else
552 * than -1 which the interrupt gate glue code might return.
553 */
554 Log(("operation %#x is not supported\n", enmOperation));
555 return VERR_NOT_SUPPORTED;
556}
557
558
559/**
560 * The Ring 0 entry point, called by the fast-ioctl path.
561 *
562 * @param pVM The VM to operate on.
563 * The return code is stored in pVM->vmm.s.iLastGZRc.
564 * @param idCpu The Virtual CPU ID of the calling EMT.
565 * @param enmOperation Which operation to execute.
566 * @remarks Assume called with interrupts _enabled_.
567 */
568VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
569{
570 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
571 return;
572 PVMCPU pVCpu = &pVM->aCpus[idCpu];
573
574 switch (enmOperation)
575 {
576 /*
577 * Switch to GC and run guest raw mode code.
578 * Disable interrupts before doing the world switch.
579 */
580 case VMMR0_DO_RAW_RUN:
581 {
582 /* Some safety precautions first. */
583#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
584 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled /* hwaccm */
585 && pVM->cCpus == 1 /* !smp */
586 && PGMGetHyperCR3(pVCpu)))
587#else
588 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled
589 && pVM->cCpus == 1))
590#endif
591 {
592 /* Disable preemption and update the periodic preemption timer. */
593 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
594 RTThreadPreemptDisable(&PreemptState);
595 RTCPUID idHostCpu = RTMpCpuId();
596#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
597 CPUMR0SetLApic(pVM, idHostCpu);
598#endif
599 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
600 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
601 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
602
603 /* We might need to disable VT-x if the active switcher turns off paging. */
604 bool fVTxDisabled;
605 int rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
606 if (RT_SUCCESS(rc))
607 {
608 RTCCUINTREG uFlags = ASMIntDisableFlags();
609 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
610
611 TMNotifyStartOfExecution(pVCpu);
612 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
613 pVCpu->vmm.s.iLastGZRc = rc;
614 TMNotifyEndOfExecution(pVCpu);
615
616 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
617
618 /* Re-enable VT-x if previously turned off. */
619 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
620
621 if ( rc == VINF_EM_RAW_INTERRUPT
622 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
623 TRPMR0DispatchHostInterrupt(pVM);
624
625 ASMSetFlags(uFlags);
626
627#ifdef VBOX_WITH_STATISTICS
628 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
629 vmmR0RecordRC(pVM, pVCpu, rc);
630#endif
631 }
632 else
633 pVCpu->vmm.s.iLastGZRc = rc;
634 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
635 RTThreadPreemptRestore(&PreemptState);
636 }
637 else
638 {
639 Assert(!pVM->vmm.s.fSwitcherDisabled);
640 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
641 if (pVM->cCpus != 1)
642 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
643#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
644 if (!PGMGetHyperCR3(pVCpu))
645 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
646#endif
647 }
648 break;
649 }
650
651 /*
652 * Run guest code using the available hardware acceleration technology.
653 *
654 * Disable interrupts before we do anything interesting. On Windows we avoid
655 * this by having the support driver raise the IRQL before calling us, this way
656 * we hope to get away with page faults and later calling into the kernel.
657 */
658 case VMMR0_DO_HWACC_RUN:
659 {
660#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
661 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
662 RTThreadPreemptDisable(&PreemptState);
663#elif !defined(RT_OS_WINDOWS)
664 RTCCUINTREG uFlags = ASMIntDisableFlags();
665#endif
666 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
667 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
668 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
669
670#ifdef LOG_ENABLED
671 if (pVCpu->idCpu > 0)
672 {
673 /* Lazy registration of ring 0 loggers. */
674 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
675 if ( pR0Logger
676 && !pR0Logger->fRegistered)
677 {
678 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
679 pR0Logger->fRegistered = true;
680 }
681 }
682#endif
683 int rc;
684 if (!HWACCMR0SuspendPending())
685 {
686 rc = HWACCMR0Enter(pVM, pVCpu);
687 if (RT_SUCCESS(rc))
688 {
689 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
690 int rc2 = HWACCMR0Leave(pVM, pVCpu);
691 AssertRC(rc2);
692 }
693 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
694 }
695 else
696 {
697 /* System is about to go into suspend mode; go back to ring 3. */
698 rc = VINF_EM_RAW_INTERRUPT;
699 }
700 pVCpu->vmm.s.iLastGZRc = rc;
701
702 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
703#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
704 RTThreadPreemptRestore(&PreemptState);
705#elif !defined(RT_OS_WINDOWS)
706 ASMSetFlags(uFlags);
707#endif
708
709#ifdef VBOX_WITH_STATISTICS
710 vmmR0RecordRC(pVM, pVCpu, rc);
711#endif
712 /* No special action required for external interrupts, just return. */
713 break;
714 }
715
716 /*
717 * For profiling.
718 */
719 case VMMR0_DO_NOP:
720 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
721 break;
722
723 /*
724 * Impossible.
725 */
726 default:
727 AssertMsgFailed(("%#x\n", enmOperation));
728 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
729 break;
730 }
731}
732
733
734/**
735 * Validates a session or VM session argument.
736 *
737 * @returns true / false accordingly.
738 * @param pVM The VM argument.
739 * @param pSession The session argument.
740 */
741DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
742{
743 /* This must be set! */
744 if (!pSession)
745 return false;
746
747 /* Only one out of the two. */
748 if (pVM && pClaimedSession)
749 return false;
750 if (pVM)
751 pClaimedSession = pVM->pSession;
752 return pClaimedSession == pSession;
753}
754
755
756/**
757 * VMMR0EntryEx worker function, either called directly or when ever possible
758 * called thru a longjmp so we can exit safely on failure.
759 *
760 * @returns VBox status code.
761 * @param pVM The VM to operate on.
762 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
763 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
764 * @param enmOperation Which operation to execute.
765 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
766 * The support driver validates this if it's present.
767 * @param u64Arg Some simple constant argument.
768 * @param pSession The session of the caller.
769 * @remarks Assume called with interrupts _enabled_.
770 */
771static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
772{
773 /*
774 * Common VM pointer validation.
775 */
776 if (pVM)
777 {
778 if (RT_UNLIKELY( !VALID_PTR(pVM)
779 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
780 {
781 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
782 return VERR_INVALID_POINTER;
783 }
784 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
785 || pVM->enmVMState > VMSTATE_TERMINATED
786 || pVM->pVMR0 != pVM))
787 {
788 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
789 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
790 return VERR_INVALID_POINTER;
791 }
792
793 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
794 {
795 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
796 return VERR_INVALID_PARAMETER;
797 }
798 }
799 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
800 {
801 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
802 return VERR_INVALID_PARAMETER;
803 }
804
805
806 switch (enmOperation)
807 {
808 /*
809 * GVM requests
810 */
811 case VMMR0_DO_GVMM_CREATE_VM:
812 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
813 return VERR_INVALID_PARAMETER;
814 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
815
816 case VMMR0_DO_GVMM_DESTROY_VM:
817 if (pReqHdr || u64Arg)
818 return VERR_INVALID_PARAMETER;
819 return GVMMR0DestroyVM(pVM);
820
821 case VMMR0_DO_GVMM_REGISTER_VMCPU:
822 {
823 if (!pVM)
824 return VERR_INVALID_PARAMETER;
825 return GVMMR0RegisterVCpu(pVM, idCpu);
826 }
827
828 case VMMR0_DO_GVMM_SCHED_HALT:
829 if (pReqHdr)
830 return VERR_INVALID_PARAMETER;
831 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
832
833 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
834 if (pReqHdr || u64Arg)
835 return VERR_INVALID_PARAMETER;
836 return GVMMR0SchedWakeUp(pVM, idCpu);
837
838 case VMMR0_DO_GVMM_SCHED_POKE:
839 if (pReqHdr || u64Arg)
840 return VERR_INVALID_PARAMETER;
841 return GVMMR0SchedPoke(pVM, idCpu);
842
843 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
844 if (u64Arg)
845 return VERR_INVALID_PARAMETER;
846 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
847
848 case VMMR0_DO_GVMM_SCHED_POLL:
849 if (pReqHdr || u64Arg > 1)
850 return VERR_INVALID_PARAMETER;
851 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
852
853 case VMMR0_DO_GVMM_QUERY_STATISTICS:
854 if (u64Arg)
855 return VERR_INVALID_PARAMETER;
856 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
857
858 case VMMR0_DO_GVMM_RESET_STATISTICS:
859 if (u64Arg)
860 return VERR_INVALID_PARAMETER;
861 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
862
863 /*
864 * Initialize the R0 part of a VM instance.
865 */
866 case VMMR0_DO_VMMR0_INIT:
867 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
868
869 /*
870 * Terminate the R0 part of a VM instance.
871 */
872 case VMMR0_DO_VMMR0_TERM:
873 return VMMR0TermVM(pVM, NULL);
874
875 /*
876 * Attempt to enable hwacc mode and check the current setting.
877 */
878 case VMMR0_DO_HWACC_ENABLE:
879 return HWACCMR0EnableAllCpus(pVM);
880
881 /*
882 * Setup the hardware accelerated session.
883 */
884 case VMMR0_DO_HWACC_SETUP_VM:
885 {
886 RTCCUINTREG fFlags = ASMIntDisableFlags();
887 int rc = HWACCMR0SetupVM(pVM);
888 ASMSetFlags(fFlags);
889 return rc;
890 }
891
892 /*
893 * Switch to RC to execute Hypervisor function.
894 */
895 case VMMR0_DO_CALL_HYPERVISOR:
896 {
897 int rc;
898 bool fVTxDisabled;
899
900 /* Safety precaution as HWACCM can disable the switcher. */
901 Assert(!pVM->vmm.s.fSwitcherDisabled);
902 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
903 return VERR_NOT_SUPPORTED;
904
905#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
906 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
907 return VERR_PGM_NO_CR3_SHADOW_ROOT;
908#endif
909
910 RTCCUINTREG fFlags = ASMIntDisableFlags();
911
912#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
913 RTCPUID idHostCpu = RTMpCpuId();
914 CPUMR0SetLApic(pVM, idHostCpu);
915#endif
916
917 /* We might need to disable VT-x if the active switcher turns off paging. */
918 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
919 if (RT_FAILURE(rc))
920 return rc;
921
922 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
923
924 /* Re-enable VT-x if previously turned off. */
925 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
926
927 /** @todo dispatch interrupts? */
928 ASMSetFlags(fFlags);
929 return rc;
930 }
931
932 /*
933 * PGM wrappers.
934 */
935 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
936 if (idCpu == NIL_VMCPUID)
937 return VERR_INVALID_CPU_ID;
938 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
939
940 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
941 if (idCpu == NIL_VMCPUID)
942 return VERR_INVALID_CPU_ID;
943 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
944
945 /*
946 * GMM wrappers.
947 */
948 case VMMR0_DO_GMM_INITIAL_RESERVATION:
949 if (u64Arg)
950 return VERR_INVALID_PARAMETER;
951 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
952
953 case VMMR0_DO_GMM_UPDATE_RESERVATION:
954 if (u64Arg)
955 return VERR_INVALID_PARAMETER;
956 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
957
958 case VMMR0_DO_GMM_ALLOCATE_PAGES:
959 if (u64Arg)
960 return VERR_INVALID_PARAMETER;
961 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
962
963 case VMMR0_DO_GMM_FREE_PAGES:
964 if (u64Arg)
965 return VERR_INVALID_PARAMETER;
966 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
967
968 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
969 if (u64Arg)
970 return VERR_INVALID_PARAMETER;
971 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
972
973 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
974 if (u64Arg)
975 return VERR_INVALID_PARAMETER;
976 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
977
978 case VMMR0_DO_GMM_QUERY_MEM_STATS:
979 if (idCpu == NIL_VMCPUID)
980 return VERR_INVALID_CPU_ID;
981 if (u64Arg)
982 return VERR_INVALID_PARAMETER;
983 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
984
985 case VMMR0_DO_GMM_BALLOONED_PAGES:
986 if (u64Arg)
987 return VERR_INVALID_PARAMETER;
988 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
989
990 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
991 if (u64Arg)
992 return VERR_INVALID_PARAMETER;
993 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
994
995 case VMMR0_DO_GMM_SEED_CHUNK:
996 if (pReqHdr)
997 return VERR_INVALID_PARAMETER;
998 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
999
1000 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1001 if (idCpu == NIL_VMCPUID)
1002 return VERR_INVALID_CPU_ID;
1003 if (u64Arg)
1004 return VERR_INVALID_PARAMETER;
1005 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1006
1007 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1008 if (idCpu == NIL_VMCPUID)
1009 return VERR_INVALID_CPU_ID;
1010 if (u64Arg)
1011 return VERR_INVALID_PARAMETER;
1012 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1013
1014 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1015 if (idCpu == NIL_VMCPUID)
1016 return VERR_INVALID_CPU_ID;
1017 if ( u64Arg
1018 || pReqHdr)
1019 return VERR_INVALID_PARAMETER;
1020 return GMMR0ResetSharedModules(pVM, idCpu);
1021
1022#ifdef VBOX_WITH_PAGE_SHARING
1023 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1024 {
1025 if (idCpu == NIL_VMCPUID)
1026 return VERR_INVALID_CPU_ID;
1027 if ( u64Arg
1028 || pReqHdr)
1029 return VERR_INVALID_PARAMETER;
1030
1031 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1032 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1033
1034# ifdef DEBUG_sandervl
1035 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1036 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1037 int rc = GMMR0CheckSharedModulesStart(pVM);
1038 if (rc == VINF_SUCCESS)
1039 {
1040 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1041 Assert( rc == VINF_SUCCESS
1042 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1043 GMMR0CheckSharedModulesEnd(pVM);
1044 }
1045# else
1046 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1047# endif
1048 return rc;
1049 }
1050#endif
1051
1052#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1053 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1054 {
1055 if (u64Arg)
1056 return VERR_INVALID_PARAMETER;
1057 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1058 }
1059#endif
1060
1061 /*
1062 * A quick GCFGM mock-up.
1063 */
1064 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1065 case VMMR0_DO_GCFGM_SET_VALUE:
1066 case VMMR0_DO_GCFGM_QUERY_VALUE:
1067 {
1068 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1069 return VERR_INVALID_PARAMETER;
1070 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1071 if (pReq->Hdr.cbReq != sizeof(*pReq))
1072 return VERR_INVALID_PARAMETER;
1073 int rc;
1074 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1075 {
1076 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1077 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1078 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1079 }
1080 else
1081 {
1082 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1083 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1084 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1085 }
1086 return rc;
1087 }
1088
1089 /*
1090 * PDM Wrappers.
1091 */
1092 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1093 {
1094 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1095 return VERR_INVALID_PARAMETER;
1096 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1097 }
1098
1099 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1100 {
1101 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1102 return VERR_INVALID_PARAMETER;
1103 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1104 }
1105
1106 /*
1107 * Requests to the internal networking service.
1108 */
1109 case VMMR0_DO_INTNET_OPEN:
1110 {
1111 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1112 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1113 return VERR_INVALID_PARAMETER;
1114 return IntNetR0OpenReq(pSession, pReq);
1115 }
1116
1117 case VMMR0_DO_INTNET_IF_CLOSE:
1118 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1119 return VERR_INVALID_PARAMETER;
1120 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1121
1122 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1123 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1124 return VERR_INVALID_PARAMETER;
1125 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1126
1127 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1128 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1129 return VERR_INVALID_PARAMETER;
1130 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1131
1132 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1133 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1134 return VERR_INVALID_PARAMETER;
1135 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1136
1137 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1138 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1139 return VERR_INVALID_PARAMETER;
1140 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1141
1142 case VMMR0_DO_INTNET_IF_SEND:
1143 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1144 return VERR_INVALID_PARAMETER;
1145 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1146
1147 case VMMR0_DO_INTNET_IF_WAIT:
1148 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1149 return VERR_INVALID_PARAMETER;
1150 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1151
1152 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1153 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1154 return VERR_INVALID_PARAMETER;
1155 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1156
1157 /*
1158 * For profiling.
1159 */
1160 case VMMR0_DO_NOP:
1161 case VMMR0_DO_SLOW_NOP:
1162 return VINF_SUCCESS;
1163
1164 /*
1165 * For testing Ring-0 APIs invoked in this environment.
1166 */
1167 case VMMR0_DO_TESTS:
1168 /** @todo make new test */
1169 return VINF_SUCCESS;
1170
1171
1172#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1173 case VMMR0_DO_TEST_SWITCHER3264:
1174 if (idCpu == NIL_VMCPUID)
1175 return VERR_INVALID_CPU_ID;
1176 return HWACCMR0TestSwitcher3264(pVM);
1177#endif
1178 default:
1179 /*
1180 * We're returning VERR_NOT_SUPPORT here so we've got something else
1181 * than -1 which the interrupt gate glue code might return.
1182 */
1183 Log(("operation %#x is not supported\n", enmOperation));
1184 return VERR_NOT_SUPPORTED;
1185 }
1186}
1187
1188
1189/**
1190 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1191 */
1192typedef struct VMMR0ENTRYEXARGS
1193{
1194 PVM pVM;
1195 VMCPUID idCpu;
1196 VMMR0OPERATION enmOperation;
1197 PSUPVMMR0REQHDR pReq;
1198 uint64_t u64Arg;
1199 PSUPDRVSESSION pSession;
1200} VMMR0ENTRYEXARGS;
1201/** Pointer to a vmmR0EntryExWrapper argument package. */
1202typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1203
1204/**
1205 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1206 *
1207 * @returns VBox status code.
1208 * @param pvArgs The argument package
1209 */
1210static int vmmR0EntryExWrapper(void *pvArgs)
1211{
1212 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1213 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1214 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1215 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1216 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1217 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1218}
1219
1220
1221/**
1222 * The Ring 0 entry point, called by the support library (SUP).
1223 *
1224 * @returns VBox status code.
1225 * @param pVM The VM to operate on.
1226 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1227 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1228 * @param enmOperation Which operation to execute.
1229 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1230 * @param u64Arg Some simple constant argument.
1231 * @param pSession The session of the caller.
1232 * @remarks Assume called with interrupts _enabled_.
1233 */
1234VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1235{
1236 /*
1237 * Requests that should only happen on the EMT thread will be
1238 * wrapped in a setjmp so we can assert without causing trouble.
1239 */
1240 if ( VALID_PTR(pVM)
1241 && pVM->pVMR0
1242 && idCpu < pVM->cCpus)
1243 {
1244 switch (enmOperation)
1245 {
1246 /* These might/will be called before VMMR3Init. */
1247 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1248 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1249 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1250 case VMMR0_DO_GMM_FREE_PAGES:
1251 case VMMR0_DO_GMM_BALLOONED_PAGES:
1252 /* On the mac we might not have a valid jmp buf, so check these as well. */
1253 case VMMR0_DO_VMMR0_INIT:
1254 case VMMR0_DO_VMMR0_TERM:
1255 {
1256 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1257
1258 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1259 break;
1260
1261 /** @todo validate this EMT claim... GVM knows. */
1262 VMMR0ENTRYEXARGS Args;
1263 Args.pVM = pVM;
1264 Args.idCpu = idCpu;
1265 Args.enmOperation = enmOperation;
1266 Args.pReq = pReq;
1267 Args.u64Arg = u64Arg;
1268 Args.pSession = pSession;
1269 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1270 }
1271
1272 default:
1273 break;
1274 }
1275 }
1276 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1277}
1278
1279/**
1280 * Internal R0 logger worker: Flush logger.
1281 *
1282 * @param pLogger The logger instance to flush.
1283 * @remark This function must be exported!
1284 */
1285VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1286{
1287#ifdef LOG_ENABLED
1288 /*
1289 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1290 * (This is a bit paranoid code.)
1291 */
1292 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1293 if ( !VALID_PTR(pR0Logger)
1294 || !VALID_PTR(pR0Logger + 1)
1295 || pLogger->u32Magic != RTLOGGER_MAGIC)
1296 {
1297# ifdef DEBUG
1298 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1299# endif
1300 return;
1301 }
1302 if (pR0Logger->fFlushingDisabled)
1303 return; /* quietly */
1304
1305 PVM pVM = pR0Logger->pVM;
1306 if ( !VALID_PTR(pVM)
1307 || pVM->pVMR0 != pVM)
1308 {
1309# ifdef DEBUG
1310 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1311# endif
1312 return;
1313 }
1314
1315 PVMCPU pVCpu = VMMGetCpu(pVM);
1316 if (pVCpu)
1317 {
1318 /*
1319 * Check that the jump buffer is armed.
1320 */
1321# ifdef RT_ARCH_X86
1322 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1323 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1324# else
1325 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1326 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1327# endif
1328 {
1329# ifdef DEBUG
1330 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1331# endif
1332 return;
1333 }
1334 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1335 }
1336# ifdef DEBUG
1337 else
1338 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1339# endif
1340#endif
1341}
1342
1343/**
1344 * Internal R0 logger worker: Custom prefix.
1345 *
1346 * @returns Number of chars written.
1347 *
1348 * @param pLogger The logger instance.
1349 * @param pchBuf The output buffer.
1350 * @param cchBuf The size of the buffer.
1351 * @param pvUser User argument (ignored).
1352 */
1353VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1354{
1355 NOREF(pvUser);
1356#ifdef LOG_ENABLED
1357 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1358 if ( !VALID_PTR(pR0Logger)
1359 || !VALID_PTR(pR0Logger + 1)
1360 || pLogger->u32Magic != RTLOGGER_MAGIC
1361 || cchBuf < 2)
1362 return 0;
1363
1364 static const char s_szHex[17] = "0123456789abcdef";
1365 VMCPUID const idCpu = pR0Logger->idCpu;
1366 pchBuf[1] = s_szHex[ idCpu & 15];
1367 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1368
1369 return 2;
1370#else
1371 return 0;
1372#endif
1373}
1374
1375#ifdef LOG_ENABLED
1376
1377/**
1378 * Disables flushing of the ring-0 debug log.
1379 *
1380 * @param pVCpu The shared virtual cpu structure.
1381 */
1382VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1383{
1384 PVM pVM = pVCpu->pVMR0;
1385 if (pVCpu->vmm.s.pR0LoggerR0)
1386 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1387}
1388
1389
1390/**
1391 * Enables flushing of the ring-0 debug log.
1392 *
1393 * @param pVCpu The shared virtual cpu structure.
1394 */
1395VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1396{
1397 PVM pVM = pVCpu->pVMR0;
1398 if (pVCpu->vmm.s.pR0LoggerR0)
1399 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1400}
1401
1402#endif /* LOG_ENABLED */
1403
1404/**
1405 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1406 *
1407 * @returns true if the breakpoint should be hit, false if it should be ignored.
1408 */
1409DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1410{
1411#if 0
1412 return true;
1413#else
1414 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1415 if (pVM)
1416 {
1417 PVMCPU pVCpu = VMMGetCpu(pVM);
1418
1419 if (pVCpu)
1420 {
1421#ifdef RT_ARCH_X86
1422 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1423 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1424#else
1425 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1426 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1427#endif
1428 {
1429 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1430 return RT_FAILURE_NP(rc);
1431 }
1432 }
1433 }
1434#ifdef RT_OS_LINUX
1435 return true;
1436#else
1437 return false;
1438#endif
1439#endif
1440}
1441
1442
1443/**
1444 * Override this so we can push it up to ring-3.
1445 *
1446 * @param pszExpr Expression. Can be NULL.
1447 * @param uLine Location line number.
1448 * @param pszFile Location file name.
1449 * @param pszFunction Location function name.
1450 */
1451DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1452{
1453 /*
1454 * To the log.
1455 */
1456 LogAlways(("\n!!R0-Assertion Failed!!\n"
1457 "Expression: %s\n"
1458 "Location : %s(%d) %s\n",
1459 pszExpr, pszFile, uLine, pszFunction));
1460
1461 /*
1462 * To the global VMM buffer.
1463 */
1464 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1465 if (pVM)
1466 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1467 "\n!!R0-Assertion Failed!!\n"
1468 "Expression: %s\n"
1469 "Location : %s(%d) %s\n",
1470 pszExpr, pszFile, uLine, pszFunction);
1471
1472 /*
1473 * Continue the normal way.
1474 */
1475 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1476}
1477
1478
1479/**
1480 * Callback for RTLogFormatV which writes to the ring-3 log port.
1481 * See PFNLOGOUTPUT() for details.
1482 */
1483static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1484{
1485 for (size_t i = 0; i < cbChars; i++)
1486 LogAlways(("%c", pachChars[i]));
1487
1488 return cbChars;
1489}
1490
1491
1492/**
1493 * Override this so we can push it up to ring-3.
1494 *
1495 * @param pszFormat The format string.
1496 * @param va Arguments.
1497 */
1498DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1499{
1500 va_list vaCopy;
1501
1502 /*
1503 * Push the message to the logger.
1504 */
1505 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1506 if (pLog)
1507 {
1508 va_copy(vaCopy, va);
1509 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1510 va_end(vaCopy);
1511 }
1512
1513 /*
1514 * Push it to the global VMM buffer.
1515 */
1516 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1517 if (pVM)
1518 {
1519 va_copy(vaCopy, va);
1520 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1521 va_end(vaCopy);
1522 }
1523
1524 /*
1525 * Continue the normal way.
1526 */
1527 RTAssertMsg2V(pszFormat, va);
1528}
1529
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette