VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 32792

Last change on this file since 32792 was 32792, checked in by vboxsync, 15 years ago

And more

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 50.5 KB
Line 
1/* $Id: VMMR0.cpp 32792 2010-09-28 13:52:18Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/trpm.h>
25#include <VBox/cpum.h>
26#include <VBox/pdmapi.h>
27#include <VBox/pgm.h>
28#include <VBox/stam.h>
29#include <VBox/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vm.h>
32
33#include <VBox/gvmm.h>
34#include <VBox/gmm.h>
35#include <VBox/intnet.h>
36#include <VBox/hwaccm.h>
37#include <VBox/param.h>
38#include <VBox/err.h>
39#include <VBox/version.h>
40#include <VBox/log.h>
41
42#include <iprt/asm-amd64-x86.h>
43#include <iprt/assert.h>
44#include <iprt/crc.h>
45#include <iprt/mp.h>
46#include <iprt/once.h>
47#include <iprt/stdarg.h>
48#include <iprt/string.h>
49#include <iprt/thread.h>
50#include <iprt/timer.h>
51
52#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
53# pragma intrinsic(_AddressOfReturnAddress)
54#endif
55
56
57/*******************************************************************************
58* Internal Functions *
59*******************************************************************************/
60RT_C_DECLS_BEGIN
61VMMR0DECL(int) ModuleInit(void);
62VMMR0DECL(void) ModuleTerm(void);
63RT_C_DECLS_END
64
65
66/*******************************************************************************
67* Global Variables *
68*******************************************************************************/
69/** Drag in necessary library bits.
70 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
71PFNRT g_VMMGCDeps[] =
72{
73 (PFNRT)RTCrc32,
74 (PFNRT)RTOnce
75};
76
77
78#if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
79/* Increase the size of the image to work around the refusal of Win64 to
80 * load images in the 0x80000 range.
81 */
82static uint64_t u64BloatImage[8192] = {0};
83#endif
84
85/**
86 * Initialize the module.
87 * This is called when we're first loaded.
88 *
89 * @returns 0 on success.
90 * @returns VBox status on failure.
91 */
92VMMR0DECL(int) ModuleInit(void)
93{
94 LogFlow(("ModuleInit:\n"));
95
96 /*
97 * Initialize the GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
98 */
99 int rc = GVMMR0Init();
100 if (RT_SUCCESS(rc))
101 {
102 rc = GMMR0Init();
103 if (RT_SUCCESS(rc))
104 {
105 rc = HWACCMR0Init();
106 if (RT_SUCCESS(rc))
107 {
108 rc = PGMRegisterStringFormatTypes();
109 if (RT_SUCCESS(rc))
110 {
111#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
112 rc = PGMR0DynMapInit();
113#endif
114 if (RT_SUCCESS(rc))
115 {
116 rc = IntNetR0Init();
117 if (RT_SUCCESS(rc))
118 {
119 LogFlow(("ModuleInit: returns success.\n"));
120 return VINF_SUCCESS;
121 }
122
123 /* bail out */
124 LogFlow(("ModuleTerm: returns %Rrc\n", rc));
125#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
126 PGMR0DynMapTerm();
127#endif
128 }
129 PGMDeregisterStringFormatTypes();
130 }
131 HWACCMR0Term();
132 }
133 GMMR0Term();
134 }
135 GVMMR0Term();
136 }
137
138 LogFlow(("ModuleInit: failed %Rrc\n", rc));
139 return rc;
140}
141
142
143/**
144 * Terminate the module.
145 * This is called when we're finally unloaded.
146 */
147VMMR0DECL(void) ModuleTerm(void)
148{
149 LogFlow(("ModuleTerm:\n"));
150
151 /*
152 * Terminate the internal network service.
153 */
154 IntNetR0Term();
155
156 /*
157 * PGM (Darwin) and HWACCM global cleanup.
158 */
159#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
160 PGMR0DynMapTerm();
161#endif
162 PGMDeregisterStringFormatTypes();
163 HWACCMR0Term();
164
165 /*
166 * Destroy the GMM and GVMM instances.
167 */
168 GMMR0Term();
169 GVMMR0Term();
170
171 LogFlow(("ModuleTerm: returns\n"));
172}
173
174
175/**
176 * Initaties the R0 driver for a particular VM instance.
177 *
178 * @returns VBox status code.
179 *
180 * @param pVM The VM instance in question.
181 * @param uSvnRev The SVN revision of the ring-3 part.
182 * @thread EMT.
183 */
184static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev)
185{
186 /*
187 * Match the SVN revisions.
188 */
189 if (uSvnRev != VMMGetSvnRev())
190 {
191 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
192 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
193 return VERR_VERSION_MISMATCH;
194 }
195 if ( !VALID_PTR(pVM)
196 || pVM->pVMR0 != pVM)
197 return VERR_INVALID_PARAMETER;
198
199#ifdef LOG_ENABLED
200 /*
201 * Register the EMT R0 logger instance for VCPU 0.
202 */
203 PVMCPU pVCpu = &pVM->aCpus[0];
204
205 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
206 if (pR0Logger)
207 {
208# if 0 /* testing of the logger. */
209 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
210 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
211 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
212 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
213
214 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
215 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
216 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
217 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
218
219 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
220 LogCom(("vmmR0InitVM: returned succesfully from direct logger call.\n"));
221 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
222 LogCom(("vmmR0InitVM: returned succesfully from direct flush call.\n"));
223
224 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
225 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
226 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
227 LogCom(("vmmR0InitVM: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
228 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
229 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
230
231 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
232 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
233
234 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
235 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
236 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
237# endif
238 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
239 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
240 pR0Logger->fRegistered = true;
241 }
242#endif /* LOG_ENABLED */
243
244 /*
245 * Check if the host supports high resolution timers or not.
246 */
247 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
248 && !RTTimerCanDoHighResolution())
249 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
250
251 /*
252 * Initialize the per VM data for GVMM and GMM.
253 */
254 int rc = GVMMR0InitVM(pVM);
255// if (RT_SUCCESS(rc))
256// rc = GMMR0InitPerVMData(pVM);
257 if (RT_SUCCESS(rc))
258 {
259 /*
260 * Init HWACCM, CPUM and PGM (Darwin only).
261 */
262 rc = HWACCMR0InitVM(pVM);
263 if (RT_SUCCESS(rc))
264 {
265 rc = CPUMR0Init(pVM); /** @todo rename to CPUMR0InitVM */
266 if (RT_SUCCESS(rc))
267 {
268#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
269 rc = PGMR0DynMapInitVM(pVM);
270#endif
271 if (RT_SUCCESS(rc))
272 {
273 GVMMR0DoneInitVM(pVM);
274 return rc;
275 }
276
277 /* bail out */
278 }
279 HWACCMR0TermVM(pVM);
280 }
281 }
282 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
283 return rc;
284}
285
286
287/**
288 * Terminates the R0 driver for a particular VM instance.
289 *
290 * This is normally called by ring-3 as part of the VM termination process, but
291 * may alternatively be called during the support driver session cleanup when
292 * the VM object is destroyed (see GVMM).
293 *
294 * @returns VBox status code.
295 *
296 * @param pVM The VM instance in question.
297 * @param pGVM Pointer to the global VM structure. Optional.
298 * @thread EMT or session clean up thread.
299 */
300VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
301{
302 /*
303 * Tell GVMM what we're up to and check that we only do this once.
304 */
305 if (GVMMR0DoingTermVM(pVM, pGVM))
306 {
307#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
308 PGMR0DynMapTermVM(pVM);
309#endif
310 HWACCMR0TermVM(pVM);
311 }
312
313 /*
314 * Deregister the logger.
315 */
316 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
317 return VINF_SUCCESS;
318}
319
320
321#ifdef VBOX_WITH_STATISTICS
322/**
323 * Record return code statistics
324 * @param pVM The VM handle.
325 * @param pVCpu The VMCPU handle.
326 * @param rc The status code.
327 */
328static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
329{
330 /*
331 * Collect statistics.
332 */
333 switch (rc)
334 {
335 case VINF_SUCCESS:
336 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
337 break;
338 case VINF_EM_RAW_INTERRUPT:
339 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
340 break;
341 case VINF_EM_RAW_INTERRUPT_HYPER:
342 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
343 break;
344 case VINF_EM_RAW_GUEST_TRAP:
345 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
346 break;
347 case VINF_EM_RAW_RING_SWITCH:
348 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
349 break;
350 case VINF_EM_RAW_RING_SWITCH_INT:
351 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
352 break;
353 case VINF_EM_RAW_STALE_SELECTOR:
354 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
355 break;
356 case VINF_EM_RAW_IRET_TRAP:
357 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
358 break;
359 case VINF_IOM_HC_IOPORT_READ:
360 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
361 break;
362 case VINF_IOM_HC_IOPORT_WRITE:
363 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
364 break;
365 case VINF_IOM_HC_MMIO_READ:
366 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
367 break;
368 case VINF_IOM_HC_MMIO_WRITE:
369 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
370 break;
371 case VINF_IOM_HC_MMIO_READ_WRITE:
372 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
373 break;
374 case VINF_PATM_HC_MMIO_PATCH_READ:
375 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
376 break;
377 case VINF_PATM_HC_MMIO_PATCH_WRITE:
378 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
379 break;
380 case VINF_EM_RAW_EMULATE_INSTR:
381 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
382 break;
383 case VINF_EM_RAW_EMULATE_IO_BLOCK:
384 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
385 break;
386 case VINF_PATCH_EMULATE_INSTR:
387 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
388 break;
389 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
390 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
391 break;
392 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
393 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
394 break;
395 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
396 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
397 break;
398 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
399 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
400 break;
401 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
402 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
403 break;
404 case VINF_CSAM_PENDING_ACTION:
405 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
406 break;
407 case VINF_PGM_SYNC_CR3:
408 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
409 break;
410 case VINF_PATM_PATCH_INT3:
411 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
412 break;
413 case VINF_PATM_PATCH_TRAP_PF:
414 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
415 break;
416 case VINF_PATM_PATCH_TRAP_GP:
417 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
418 break;
419 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
420 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
421 break;
422 case VINF_EM_RESCHEDULE_REM:
423 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
424 break;
425 case VINF_EM_RAW_TO_R3:
426 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
427 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
428 else
429 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
430 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
431 else
432 if (VM_FF_ISPENDING(pVM, VM_FF_PDM_QUEUES))
433 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
434 else
435 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
436 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
437 else
438 if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA))
439 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
440 else
441 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER))
442 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
443 else
444 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TO_R3))
445 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
446 else
447 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
448 break;
449
450 case VINF_EM_RAW_TIMER_PENDING:
451 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
452 break;
453 case VINF_EM_RAW_INTERRUPT_PENDING:
454 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
455 break;
456 case VINF_VMM_CALL_HOST:
457 switch (pVCpu->vmm.s.enmCallRing3Operation)
458 {
459 case VMMCALLRING3_PDM_LOCK:
460 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
461 break;
462 case VMMCALLRING3_PGM_POOL_GROW:
463 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
464 break;
465 case VMMCALLRING3_PGM_LOCK:
466 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
467 break;
468 case VMMCALLRING3_PGM_MAP_CHUNK:
469 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
470 break;
471 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
472 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
473 break;
474 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
475 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
476 break;
477 case VMMCALLRING3_VMM_LOGGER_FLUSH:
478 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
479 break;
480 case VMMCALLRING3_VM_SET_ERROR:
481 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
482 break;
483 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
484 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
485 break;
486 case VMMCALLRING3_VM_R0_ASSERTION:
487 default:
488 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
489 break;
490 }
491 break;
492 case VINF_PATM_DUPLICATE_FUNCTION:
493 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
494 break;
495 case VINF_PGM_CHANGE_MODE:
496 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
497 break;
498 case VINF_PGM_POOL_FLUSH_PENDING:
499 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
500 break;
501 case VINF_EM_PENDING_REQUEST:
502 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
503 break;
504 case VINF_EM_HWACCM_PATCH_TPR_INSTR:
505 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
506 break;
507 default:
508 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
509 break;
510 }
511}
512#endif /* VBOX_WITH_STATISTICS */
513
514
515/**
516 * Unused ring-0 entry point that used to be called from the interrupt gate.
517 *
518 * Will be removed one of the next times we do a major SUPDrv version bump.
519 *
520 * @returns VBox status code.
521 * @param pVM The VM to operate on.
522 * @param enmOperation Which operation to execute.
523 * @param pvArg Argument to the operation.
524 * @remarks Assume called with interrupts disabled.
525 */
526VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
527{
528 /*
529 * We're returning VERR_NOT_SUPPORT here so we've got something else
530 * than -1 which the interrupt gate glue code might return.
531 */
532 Log(("operation %#x is not supported\n", enmOperation));
533 return VERR_NOT_SUPPORTED;
534}
535
536
537/**
538 * The Ring 0 entry point, called by the fast-ioctl path.
539 *
540 * @param pVM The VM to operate on.
541 * The return code is stored in pVM->vmm.s.iLastGZRc.
542 * @param idCpu The Virtual CPU ID of the calling EMT.
543 * @param enmOperation Which operation to execute.
544 * @remarks Assume called with interrupts _enabled_.
545 */
546VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
547{
548 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
549 return;
550 PVMCPU pVCpu = &pVM->aCpus[idCpu];
551
552 switch (enmOperation)
553 {
554 /*
555 * Switch to GC and run guest raw mode code.
556 * Disable interrupts before doing the world switch.
557 */
558 case VMMR0_DO_RAW_RUN:
559 {
560 /* Some safety precautions first. */
561#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
562 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled /* hwaccm */
563 && pVM->cCpus == 1 /* !smp */
564 && PGMGetHyperCR3(pVCpu)))
565#else
566 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled
567 && pVM->cCpus == 1))
568#endif
569 {
570 /* Disable preemption and update the periodic preemption timer. */
571 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
572 RTThreadPreemptDisable(&PreemptState);
573 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
574 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
575 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
576
577 /* We might need to disable VT-x if the active switcher turns off paging. */
578 bool fVTxDisabled;
579 int rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
580 if (RT_SUCCESS(rc))
581 {
582 RTCCUINTREG uFlags = ASMIntDisableFlags();
583 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
584
585 TMNotifyStartOfExecution(pVCpu);
586 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
587 pVCpu->vmm.s.iLastGZRc = rc;
588 TMNotifyEndOfExecution(pVCpu);
589
590 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
591
592 /* Re-enable VT-x if previously turned off. */
593 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
594
595 if ( rc == VINF_EM_RAW_INTERRUPT
596 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
597 TRPMR0DispatchHostInterrupt(pVM);
598
599 ASMSetFlags(uFlags);
600
601#ifdef VBOX_WITH_STATISTICS
602 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
603 vmmR0RecordRC(pVM, pVCpu, rc);
604#endif
605 }
606 else
607 pVCpu->vmm.s.iLastGZRc = rc;
608 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
609 RTThreadPreemptRestore(&PreemptState);
610 }
611 else
612 {
613 Assert(!pVM->vmm.s.fSwitcherDisabled);
614 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
615 if (pVM->cCpus != 1)
616 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
617#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
618 if (!PGMGetHyperCR3(pVCpu))
619 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
620#endif
621 }
622 break;
623 }
624
625 /*
626 * Run guest code using the available hardware acceleration technology.
627 *
628 * Disable interrupts before we do anything interesting. On Windows we avoid
629 * this by having the support driver raise the IRQL before calling us, this way
630 * we hope to get away with page faults and later calling into the kernel.
631 */
632 case VMMR0_DO_HWACC_RUN:
633 {
634#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
635 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
636 RTThreadPreemptDisable(&PreemptState);
637#elif !defined(RT_OS_WINDOWS)
638 RTCCUINTREG uFlags = ASMIntDisableFlags();
639#endif
640 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
641 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
642 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
643
644#ifdef LOG_ENABLED
645 if (pVCpu->idCpu > 0)
646 {
647 /* Lazy registration of ring 0 loggers. */
648 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
649 if ( pR0Logger
650 && !pR0Logger->fRegistered)
651 {
652 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
653 pR0Logger->fRegistered = true;
654 }
655 }
656#endif
657 int rc;
658 if (!HWACCMR0SuspendPending())
659 {
660 rc = HWACCMR0Enter(pVM, pVCpu);
661 if (RT_SUCCESS(rc))
662 {
663 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
664 int rc2 = HWACCMR0Leave(pVM, pVCpu);
665 AssertRC(rc2);
666 }
667 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
668 }
669 else
670 {
671 /* System is about to go into suspend mode; go back to ring 3. */
672 rc = VINF_EM_RAW_INTERRUPT;
673 }
674 pVCpu->vmm.s.iLastGZRc = rc;
675
676 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
677#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
678 RTThreadPreemptRestore(&PreemptState);
679#elif !defined(RT_OS_WINDOWS)
680 ASMSetFlags(uFlags);
681#endif
682
683#ifdef VBOX_WITH_STATISTICS
684 vmmR0RecordRC(pVM, pVCpu, rc);
685#endif
686 /* No special action required for external interrupts, just return. */
687 break;
688 }
689
690 /*
691 * For profiling.
692 */
693 case VMMR0_DO_NOP:
694 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
695 break;
696
697 /*
698 * Impossible.
699 */
700 default:
701 AssertMsgFailed(("%#x\n", enmOperation));
702 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
703 break;
704 }
705}
706
707
708/**
709 * Validates a session or VM session argument.
710 *
711 * @returns true / false accordingly.
712 * @param pVM The VM argument.
713 * @param pSession The session argument.
714 */
715DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
716{
717 /* This must be set! */
718 if (!pSession)
719 return false;
720
721 /* Only one out of the two. */
722 if (pVM && pClaimedSession)
723 return false;
724 if (pVM)
725 pClaimedSession = pVM->pSession;
726 return pClaimedSession == pSession;
727}
728
729
730/**
731 * VMMR0EntryEx worker function, either called directly or when ever possible
732 * called thru a longjmp so we can exit safely on failure.
733 *
734 * @returns VBox status code.
735 * @param pVM The VM to operate on.
736 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
737 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
738 * @param enmOperation Which operation to execute.
739 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
740 * The support driver validates this if it's present.
741 * @param u64Arg Some simple constant argument.
742 * @param pSession The session of the caller.
743 * @remarks Assume called with interrupts _enabled_.
744 */
745static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
746{
747 /*
748 * Common VM pointer validation.
749 */
750 if (pVM)
751 {
752 if (RT_UNLIKELY( !VALID_PTR(pVM)
753 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
754 {
755 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
756 return VERR_INVALID_POINTER;
757 }
758 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
759 || pVM->enmVMState > VMSTATE_TERMINATED
760 || pVM->pVMR0 != pVM))
761 {
762 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
763 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
764 return VERR_INVALID_POINTER;
765 }
766
767 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
768 {
769 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
770 return VERR_INVALID_PARAMETER;
771 }
772 }
773 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
774 {
775 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
776 return VERR_INVALID_PARAMETER;
777 }
778
779
780 switch (enmOperation)
781 {
782 /*
783 * GVM requests
784 */
785 case VMMR0_DO_GVMM_CREATE_VM:
786 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
787 return VERR_INVALID_PARAMETER;
788 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
789
790 case VMMR0_DO_GVMM_DESTROY_VM:
791 if (pReqHdr || u64Arg)
792 return VERR_INVALID_PARAMETER;
793 return GVMMR0DestroyVM(pVM);
794
795 case VMMR0_DO_GVMM_REGISTER_VMCPU:
796 {
797 if (!pVM)
798 return VERR_INVALID_PARAMETER;
799 return GVMMR0RegisterVCpu(pVM, idCpu);
800 }
801
802 case VMMR0_DO_GVMM_SCHED_HALT:
803 if (pReqHdr)
804 return VERR_INVALID_PARAMETER;
805 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
806
807 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
808 if (pReqHdr || u64Arg)
809 return VERR_INVALID_PARAMETER;
810 return GVMMR0SchedWakeUp(pVM, idCpu);
811
812 case VMMR0_DO_GVMM_SCHED_POKE:
813 if (pReqHdr || u64Arg)
814 return VERR_INVALID_PARAMETER;
815 return GVMMR0SchedPoke(pVM, idCpu);
816
817 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
818 if (u64Arg)
819 return VERR_INVALID_PARAMETER;
820 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
821
822 case VMMR0_DO_GVMM_SCHED_POLL:
823 if (pReqHdr || u64Arg > 1)
824 return VERR_INVALID_PARAMETER;
825 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
826
827 case VMMR0_DO_GVMM_QUERY_STATISTICS:
828 if (u64Arg)
829 return VERR_INVALID_PARAMETER;
830 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
831
832 case VMMR0_DO_GVMM_RESET_STATISTICS:
833 if (u64Arg)
834 return VERR_INVALID_PARAMETER;
835 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
836
837 /*
838 * Initialize the R0 part of a VM instance.
839 */
840 case VMMR0_DO_VMMR0_INIT:
841 return vmmR0InitVM(pVM, (uint32_t)u64Arg);
842
843 /*
844 * Terminate the R0 part of a VM instance.
845 */
846 case VMMR0_DO_VMMR0_TERM:
847 return VMMR0TermVM(pVM, NULL);
848
849 /*
850 * Attempt to enable hwacc mode and check the current setting.
851 */
852 case VMMR0_DO_HWACC_ENABLE:
853 return HWACCMR0EnableAllCpus(pVM);
854
855 /*
856 * Setup the hardware accelerated session.
857 */
858 case VMMR0_DO_HWACC_SETUP_VM:
859 {
860 RTCCUINTREG fFlags = ASMIntDisableFlags();
861 int rc = HWACCMR0SetupVM(pVM);
862 ASMSetFlags(fFlags);
863 return rc;
864 }
865
866 /*
867 * Switch to RC to execute Hypervisor function.
868 */
869 case VMMR0_DO_CALL_HYPERVISOR:
870 {
871 int rc;
872 bool fVTxDisabled;
873
874 /* Safety precaution as HWACCM can disable the switcher. */
875 Assert(!pVM->vmm.s.fSwitcherDisabled);
876 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
877 return VERR_NOT_SUPPORTED;
878
879#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
880 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM))))
881 return VERR_PGM_NO_CR3_SHADOW_ROOT;
882#endif
883
884 RTCCUINTREG fFlags = ASMIntDisableFlags();
885
886 /* We might need to disable VT-x if the active switcher turns off paging. */
887 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled);
888 if (RT_FAILURE(rc))
889 return rc;
890
891 rc = pVM->vmm.s.pfnHostToGuestR0(pVM);
892
893 /* Re-enable VT-x if previously turned off. */
894 HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
895
896 /** @todo dispatch interrupts? */
897 ASMSetFlags(fFlags);
898 return rc;
899 }
900
901 /*
902 * PGM wrappers.
903 */
904 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
905 if (idCpu == NIL_VMCPUID)
906 return VERR_INVALID_CPU_ID;
907 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
908
909 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
910 if (idCpu == NIL_VMCPUID)
911 return VERR_INVALID_CPU_ID;
912 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
913
914 /*
915 * GMM wrappers.
916 */
917 case VMMR0_DO_GMM_INITIAL_RESERVATION:
918 if (u64Arg)
919 return VERR_INVALID_PARAMETER;
920 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
921
922 case VMMR0_DO_GMM_UPDATE_RESERVATION:
923 if (u64Arg)
924 return VERR_INVALID_PARAMETER;
925 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
926
927 case VMMR0_DO_GMM_ALLOCATE_PAGES:
928 if (u64Arg)
929 return VERR_INVALID_PARAMETER;
930 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
931
932 case VMMR0_DO_GMM_FREE_PAGES:
933 if (u64Arg)
934 return VERR_INVALID_PARAMETER;
935 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
936
937 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
938 if (u64Arg)
939 return VERR_INVALID_PARAMETER;
940 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
941
942 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
943 if (u64Arg)
944 return VERR_INVALID_PARAMETER;
945 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
946
947 case VMMR0_DO_GMM_QUERY_MEM_STATS:
948 if (idCpu == NIL_VMCPUID)
949 return VERR_INVALID_CPU_ID;
950 if (u64Arg)
951 return VERR_INVALID_PARAMETER;
952 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
953
954 case VMMR0_DO_GMM_BALLOONED_PAGES:
955 if (u64Arg)
956 return VERR_INVALID_PARAMETER;
957 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
958
959 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
960 if (u64Arg)
961 return VERR_INVALID_PARAMETER;
962 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
963
964 case VMMR0_DO_GMM_SEED_CHUNK:
965 if (pReqHdr)
966 return VERR_INVALID_PARAMETER;
967 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
968
969 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
970 if (idCpu == NIL_VMCPUID)
971 return VERR_INVALID_CPU_ID;
972 if (u64Arg)
973 return VERR_INVALID_PARAMETER;
974 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
975
976 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
977 if (idCpu == NIL_VMCPUID)
978 return VERR_INVALID_CPU_ID;
979 if (u64Arg)
980 return VERR_INVALID_PARAMETER;
981 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
982
983 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
984 if (idCpu == NIL_VMCPUID)
985 return VERR_INVALID_CPU_ID;
986 if ( u64Arg
987 || pReqHdr)
988 return VERR_INVALID_PARAMETER;
989 return GMMR0ResetSharedModules(pVM, idCpu);
990
991#ifdef VBOX_WITH_PAGE_SHARING
992 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
993 {
994 if (idCpu == NIL_VMCPUID)
995 return VERR_INVALID_CPU_ID;
996 if ( u64Arg
997 || pReqHdr)
998 return VERR_INVALID_PARAMETER;
999
1000 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1001 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1002
1003# ifdef DEBUG_sandervl
1004 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1005 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1006 int rc = GMMR0CheckSharedModulesStart(pVM);
1007 if (rc == VINF_SUCCESS)
1008 {
1009 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1010 Assert( rc == VINF_SUCCESS
1011 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1012 GMMR0CheckSharedModulesEnd(pVM);
1013 }
1014# else
1015 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1016# endif
1017 return rc;
1018 }
1019#endif
1020
1021#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1022 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1023 {
1024 if (u64Arg)
1025 return VERR_INVALID_PARAMETER;
1026 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1027 }
1028#endif
1029
1030 /*
1031 * A quick GCFGM mock-up.
1032 */
1033 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1034 case VMMR0_DO_GCFGM_SET_VALUE:
1035 case VMMR0_DO_GCFGM_QUERY_VALUE:
1036 {
1037 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1038 return VERR_INVALID_PARAMETER;
1039 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1040 if (pReq->Hdr.cbReq != sizeof(*pReq))
1041 return VERR_INVALID_PARAMETER;
1042 int rc;
1043 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1044 {
1045 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1046 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1047 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1048 }
1049 else
1050 {
1051 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1052 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1053 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1054 }
1055 return rc;
1056 }
1057
1058 /*
1059 * PDM Wrappers.
1060 */
1061 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1062 {
1063 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1064 return VERR_INVALID_PARAMETER;
1065 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1066 }
1067
1068 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1069 {
1070 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1071 return VERR_INVALID_PARAMETER;
1072 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1073 }
1074
1075 /*
1076 * Requests to the internal networking service.
1077 */
1078 case VMMR0_DO_INTNET_OPEN:
1079 {
1080 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1081 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1082 return VERR_INVALID_PARAMETER;
1083 return IntNetR0OpenReq(pSession, pReq);
1084 }
1085
1086 case VMMR0_DO_INTNET_IF_CLOSE:
1087 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1088 return VERR_INVALID_PARAMETER;
1089 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1090
1091 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1092 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1093 return VERR_INVALID_PARAMETER;
1094 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1095
1096 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1097 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1098 return VERR_INVALID_PARAMETER;
1099 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1100
1101 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1102 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1103 return VERR_INVALID_PARAMETER;
1104 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1105
1106 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1107 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1108 return VERR_INVALID_PARAMETER;
1109 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1110
1111 case VMMR0_DO_INTNET_IF_SEND:
1112 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1113 return VERR_INVALID_PARAMETER;
1114 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1115
1116 case VMMR0_DO_INTNET_IF_WAIT:
1117 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1118 return VERR_INVALID_PARAMETER;
1119 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1120
1121 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1122 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1123 return VERR_INVALID_PARAMETER;
1124 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1125
1126 /*
1127 * For profiling.
1128 */
1129 case VMMR0_DO_NOP:
1130 case VMMR0_DO_SLOW_NOP:
1131 return VINF_SUCCESS;
1132
1133 /*
1134 * For testing Ring-0 APIs invoked in this environment.
1135 */
1136 case VMMR0_DO_TESTS:
1137 /** @todo make new test */
1138 return VINF_SUCCESS;
1139
1140
1141#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1142 case VMMR0_DO_TEST_SWITCHER3264:
1143 if (idCpu == NIL_VMCPUID)
1144 return VERR_INVALID_CPU_ID;
1145 return HWACCMR0TestSwitcher3264(pVM);
1146#endif
1147 default:
1148 /*
1149 * We're returning VERR_NOT_SUPPORT here so we've got something else
1150 * than -1 which the interrupt gate glue code might return.
1151 */
1152 Log(("operation %#x is not supported\n", enmOperation));
1153 return VERR_NOT_SUPPORTED;
1154 }
1155}
1156
1157
1158/**
1159 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1160 */
1161typedef struct VMMR0ENTRYEXARGS
1162{
1163 PVM pVM;
1164 VMCPUID idCpu;
1165 VMMR0OPERATION enmOperation;
1166 PSUPVMMR0REQHDR pReq;
1167 uint64_t u64Arg;
1168 PSUPDRVSESSION pSession;
1169} VMMR0ENTRYEXARGS;
1170/** Pointer to a vmmR0EntryExWrapper argument package. */
1171typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1172
1173/**
1174 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1175 *
1176 * @returns VBox status code.
1177 * @param pvArgs The argument package
1178 */
1179static int vmmR0EntryExWrapper(void *pvArgs)
1180{
1181 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1182 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1183 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1184 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1185 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1186 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1187}
1188
1189
1190/**
1191 * The Ring 0 entry point, called by the support library (SUP).
1192 *
1193 * @returns VBox status code.
1194 * @param pVM The VM to operate on.
1195 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1196 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1197 * @param enmOperation Which operation to execute.
1198 * @param pReq This points to a SUPVMMR0REQHDR packet. Optional.
1199 * @param u64Arg Some simple constant argument.
1200 * @param pSession The session of the caller.
1201 * @remarks Assume called with interrupts _enabled_.
1202 */
1203VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1204{
1205 /*
1206 * Requests that should only happen on the EMT thread will be
1207 * wrapped in a setjmp so we can assert without causing trouble.
1208 */
1209 if ( VALID_PTR(pVM)
1210 && pVM->pVMR0
1211 && idCpu < pVM->cCpus)
1212 {
1213 switch (enmOperation)
1214 {
1215 /* These might/will be called before VMMR3Init. */
1216 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1217 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1218 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1219 case VMMR0_DO_GMM_FREE_PAGES:
1220 case VMMR0_DO_GMM_BALLOONED_PAGES:
1221 /* On the mac we might not have a valid jmp buf, so check these as well. */
1222 case VMMR0_DO_VMMR0_INIT:
1223 case VMMR0_DO_VMMR0_TERM:
1224 {
1225 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1226
1227 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1228 break;
1229
1230 /** @todo validate this EMT claim... GVM knows. */
1231 VMMR0ENTRYEXARGS Args;
1232 Args.pVM = pVM;
1233 Args.idCpu = idCpu;
1234 Args.enmOperation = enmOperation;
1235 Args.pReq = pReq;
1236 Args.u64Arg = u64Arg;
1237 Args.pSession = pSession;
1238 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1239 }
1240
1241 default:
1242 break;
1243 }
1244 }
1245 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1246}
1247
1248/**
1249 * Internal R0 logger worker: Flush logger.
1250 *
1251 * @param pLogger The logger instance to flush.
1252 * @remark This function must be exported!
1253 */
1254VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1255{
1256#ifdef LOG_ENABLED
1257 /*
1258 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1259 * (This is a bit paranoid code.)
1260 */
1261 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1262 if ( !VALID_PTR(pR0Logger)
1263 || !VALID_PTR(pR0Logger + 1)
1264 || pLogger->u32Magic != RTLOGGER_MAGIC)
1265 {
1266# ifdef DEBUG
1267 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1268# endif
1269 return;
1270 }
1271 if (pR0Logger->fFlushingDisabled)
1272 return; /* quietly */
1273
1274 PVM pVM = pR0Logger->pVM;
1275 if ( !VALID_PTR(pVM)
1276 || pVM->pVMR0 != pVM)
1277 {
1278# ifdef DEBUG
1279 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1280# endif
1281 return;
1282 }
1283
1284 PVMCPU pVCpu = VMMGetCpu(pVM);
1285 if (pVCpu)
1286 {
1287 /*
1288 * Check that the jump buffer is armed.
1289 */
1290# ifdef RT_ARCH_X86
1291 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1292 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1293# else
1294 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1295 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1296# endif
1297 {
1298# ifdef DEBUG
1299 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1300# endif
1301 return;
1302 }
1303 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1304 }
1305# ifdef DEBUG
1306 else
1307 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1308# endif
1309#endif
1310}
1311
1312/**
1313 * Interal R0 logger worker: Custom prefix.
1314 *
1315 * @returns Number of chars written.
1316 *
1317 * @param pLogger The logger instance.
1318 * @param pchBuf The output buffer.
1319 * @param cchBuf The size of the buffer.
1320 * @param pvUser User argument (ignored).
1321 */
1322VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1323{
1324 NOREF(pvUser);
1325#ifdef LOG_ENABLED
1326 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1327 if ( !VALID_PTR(pR0Logger)
1328 || !VALID_PTR(pR0Logger + 1)
1329 || pLogger->u32Magic != RTLOGGER_MAGIC
1330 || cchBuf < 2)
1331 return 0;
1332
1333 static const char s_szHex[17] = "0123456789abcdef";
1334 VMCPUID const idCpu = pR0Logger->idCpu;
1335 pchBuf[1] = s_szHex[ idCpu & 15];
1336 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1337
1338 return 2;
1339#else
1340 return 0;
1341#endif
1342}
1343
1344#ifdef LOG_ENABLED
1345
1346/**
1347 * Disables flushing of the ring-0 debug log.
1348 *
1349 * @param pVCpu The shared virtual cpu structure.
1350 */
1351VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1352{
1353 PVM pVM = pVCpu->pVMR0;
1354 if (pVCpu->vmm.s.pR0LoggerR0)
1355 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1356}
1357
1358
1359/**
1360 * Enables flushing of the ring-0 debug log.
1361 *
1362 * @param pVCpu The shared virtual cpu structure.
1363 */
1364VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1365{
1366 PVM pVM = pVCpu->pVMR0;
1367 if (pVCpu->vmm.s.pR0LoggerR0)
1368 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1369}
1370
1371#endif /* LOG_ENABLED */
1372
1373/**
1374 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1375 *
1376 * @returns true if the breakpoint should be hit, false if it should be ignored.
1377 */
1378DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1379{
1380#if 0
1381 return true;
1382#else
1383 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1384 if (pVM)
1385 {
1386 PVMCPU pVCpu = VMMGetCpu(pVM);
1387
1388 if (pVCpu)
1389 {
1390#ifdef RT_ARCH_X86
1391 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1392 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1393#else
1394 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1395 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1396#endif
1397 {
1398 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1399 return RT_FAILURE_NP(rc);
1400 }
1401 }
1402 }
1403#ifdef RT_OS_LINUX
1404 return true;
1405#else
1406 return false;
1407#endif
1408#endif
1409}
1410
1411
1412/**
1413 * Override this so we can push it up to ring-3.
1414 *
1415 * @param pszExpr Expression. Can be NULL.
1416 * @param uLine Location line number.
1417 * @param pszFile Location file name.
1418 * @param pszFunction Location function name.
1419 */
1420DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1421{
1422 /*
1423 * To the log.
1424 */
1425 LogAlways(("\n!!R0-Assertion Failed!!\n"
1426 "Expression: %s\n"
1427 "Location : %s(%d) %s\n",
1428 pszExpr, pszFile, uLine, pszFunction));
1429
1430 /*
1431 * To the global VMM buffer.
1432 */
1433 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1434 if (pVM)
1435 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
1436 "\n!!R0-Assertion Failed!!\n"
1437 "Expression: %s\n"
1438 "Location : %s(%d) %s\n",
1439 pszExpr, pszFile, uLine, pszFunction);
1440
1441 /*
1442 * Continue the normal way.
1443 */
1444 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
1445}
1446
1447
1448/**
1449 * Callback for RTLogFormatV which writes to the ring-3 log port.
1450 * See PFNLOGOUTPUT() for details.
1451 */
1452static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
1453{
1454 for (size_t i = 0; i < cbChars; i++)
1455 LogAlways(("%c", pachChars[i]));
1456
1457 return cbChars;
1458}
1459
1460
1461/**
1462 * Override this so we can push it up to ring-3.
1463 *
1464 * @param pszFormat The format string.
1465 * @param va Arguments.
1466 */
1467DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
1468{
1469 va_list vaCopy;
1470
1471 /*
1472 * Push the message to the logger.
1473 */
1474 PRTLOGGER pLog = RTLogDefaultInstance(); /** @todo we want this for release as well! */
1475 if (pLog)
1476 {
1477 va_copy(vaCopy, va);
1478 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
1479 va_end(vaCopy);
1480 }
1481
1482 /*
1483 * Push it to the global VMM buffer.
1484 */
1485 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1486 if (pVM)
1487 {
1488 va_copy(vaCopy, va);
1489 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
1490 va_end(vaCopy);
1491 }
1492
1493 /*
1494 * Continue the normal way.
1495 */
1496 RTAssertMsg2V(pszFormat, va);
1497}
1498
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette