VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 956

Last change on this file since 956 was 914, checked in by vboxsync, 18 years ago

PVMR0 changes for darwin.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 24.7 KB
Line 
1/* $Id: VMMR0.cpp 914 2007-02-14 23:23:08Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VMM
27#ifdef __AMD64__ /** @todo fix logging on __AMD64__ (swapgs) */
28# define LOG_DISABLED
29#endif
30#include <VBox/vmm.h>
31#include <VBox/sup.h>
32#include <VBox/trpm.h>
33#include <VBox/cpum.h>
34#include <VBox/stam.h>
35#include <VBox/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vm.h>
38#include <VBox/intnet.h>
39#include <VBox/hwaccm.h>
40
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45
46#if defined(_MSC_VER) && defined(__AMD64__) /** @todo check this with with VC7! */
47# pragma intrinsic(_AddressOfReturnAddress)
48#endif
49
50
51/*******************************************************************************
52* Internal Functions *
53*******************************************************************************/
54static int VMMR0Init(PVM pVM, unsigned uVersion);
55static int VMMR0Term(PVM pVM);
56__BEGIN_DECLS
57VMMR0DECL(int) ModuleInit(void);
58VMMR0DECL(void) ModuleTerm(void);
59__END_DECLS
60
61
62//#define DEBUG_NO_RING0_ASSERTIONS
63#ifdef DEBUG_NO_RING0_ASSERTIONS
64static PVM g_pVMAssert = 0;
65#endif
66
67/*******************************************************************************
68* Global Variables *
69*******************************************************************************/
70#ifndef __AMD64__ /* doesn't link here */
71/** Pointer to the internal networking service instance. */
72PINTNET g_pIntNet = 0;
73#endif
74
75
76/**
77 * Initialize the module.
78 * This is called when we're first loaded.
79 *
80 * @returns 0 on success.
81 * @returns VBox status on failure.
82 */
83VMMR0DECL(int) ModuleInit(void)
84{
85#ifndef __AMD64__ /* doesn't link here */
86 LogFlow(("ModuleInit: g_pIntNet=%p\n", g_pIntNet));
87 g_pIntNet = NULL;
88 LogFlow(("ModuleInit: g_pIntNet=%p should be NULL now...\n", g_pIntNet));
89 int rc = INTNETR0Create(&g_pIntNet);
90 if (VBOX_SUCCESS(rc))
91 {
92 LogFlow(("ModuleInit: returns success. g_pIntNet=%p\n", g_pIntNet));
93 return 0;
94 }
95 g_pIntNet = NULL;
96 LogFlow(("ModuleTerm: returns %Vrc\n", rc));
97 return rc;
98#else
99 return 0;
100#endif
101}
102
103
104/**
105 * Terminate the module.
106 * This is called when we're finally unloaded.
107 */
108VMMR0DECL(void) ModuleTerm(void)
109{
110#ifndef __AMD64__ /* doesn't link here */
111 LogFlow(("ModuleTerm:\n"));
112 if (g_pIntNet)
113 {
114 INTNETR0Destroy(g_pIntNet);
115 g_pIntNet = NULL;
116 }
117 LogFlow(("ModuleTerm: returns\n"));
118#endif
119}
120
121
122/**
123 * Initaties the R0 driver for a particular VM instance.
124 *
125 * @returns VBox status code.
126 *
127 * @param pVM The VM instance in question.
128 * @param uVersion The minimum module version required.
129 */
130static int VMMR0Init(PVM pVM, unsigned uVersion)
131{
132 /*
133 * Check if compatible version.
134 */
135 if ( uVersion != VBOX_VERSION
136 && ( VBOX_GET_VERSION_MAJOR(uVersion) != VBOX_VERSION_MAJOR
137 || VBOX_GET_VERSION_MINOR(uVersion) < VBOX_VERSION_MINOR))
138 return VERR_VERSION_MISMATCH;
139 if ( !VALID_PTR(pVM)
140 || pVM->pVMR0 != pVM)
141 return VERR_INVALID_PARAMETER;
142
143 /*
144 * Register the EMT R0 logger instance.
145 */
146 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
147 if (pR0Logger)
148 {
149#if 0 /* testing of the logger. */
150 LogCom(("VMMR0Init: before %p\n", RTLogDefaultInstance()));
151 LogCom(("VMMR0Init: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
152 LogCom(("VMMR0Init: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
153 LogCom(("VMMR0Init: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
154
155 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
156 LogCom(("VMMR0Init: after %p reg\n", RTLogDefaultInstance()));
157 RTLogSetDefaultInstanceThread(NULL, 0);
158 LogCom(("VMMR0Init: after %p dereg\n", RTLogDefaultInstance()));
159
160 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
161 LogCom(("VMMR0Init: returned succesfully from direct logger call.\n"));
162 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
163 LogCom(("VMMR0Init: returned succesfully from direct flush call.\n"));
164
165 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
166 LogCom(("VMMR0Init: after %p reg2\n", RTLogDefaultInstance()));
167 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
168 LogCom(("VMMR0Init: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
169 RTLogSetDefaultInstanceThread(NULL, 0);
170 LogCom(("VMMR0Init: after %p dereg2\n", RTLogDefaultInstance()));
171
172 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
173 LogCom(("VMMR0Init: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
174#endif
175 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
176 }
177
178
179 /*
180 * Init VMXM.
181 */
182 HWACCMR0Init(pVM);
183
184 /*
185 * Init CPUM.
186 */
187 int rc = CPUMR0Init(pVM);
188
189 if (RT_FAILURE(rc))
190 RTLogSetDefaultInstanceThread(NULL, 0);
191 return rc;
192}
193
194
195/**
196 * Terminates the R0 driver for a particular VM instance.
197 *
198 * @returns VBox status code.
199 *
200 * @param pVM The VM instance in question.
201 */
202static int VMMR0Term(PVM pVM)
203{
204 /*
205 * Deregister the logger.
206 */
207 RTLogSetDefaultInstanceThread(NULL, 0);
208 return VINF_SUCCESS;
209}
210
211
212/**
213 * Calls the ring-3 host code.
214 *
215 * @returns VBox status code of the ring-3 call.
216 * @param pVM The VM handle.
217 * @param enmOperation The operation.
218 * @param uArg The argument to the operation.
219 */
220VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg)
221{
222/** @todo profile this! */
223 pVM->vmm.s.enmCallHostOperation = enmOperation;
224 pVM->vmm.s.u64CallHostArg = uArg;
225 pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
226 int rc = vmmR0CallHostLongJmp(&pVM->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);
227 if (rc == VINF_SUCCESS)
228 rc = pVM->vmm.s.rcCallHost;
229 return rc;
230}
231
232
233#ifdef VBOX_WITH_STATISTICS
234/**
235 * Record return code statistics
236 * @param pVM The VM handle.
237 * @param rc The status code.
238 */
239static void vmmR0RecordRC(PVM pVM, int rc)
240{
241 /*
242 * Collect statistics.
243 */
244 switch (rc)
245 {
246 case VINF_SUCCESS:
247 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetNormal);
248 break;
249 case VINF_EM_RAW_INTERRUPT:
250 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterrupt);
251 break;
252 case VINF_EM_RAW_INTERRUPT_HYPER:
253 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterruptHyper);
254 break;
255 case VINF_EM_RAW_GUEST_TRAP:
256 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetGuestTrap);
257 break;
258 case VINF_EM_RAW_RING_SWITCH:
259 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRingSwitch);
260 break;
261 case VINF_EM_RAW_RING_SWITCH_INT:
262 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRingSwitchInt);
263 break;
264 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
265 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetExceptionPrivilege);
266 break;
267 case VINF_EM_RAW_STALE_SELECTOR:
268 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetStaleSelector);
269 break;
270 case VINF_EM_RAW_IRET_TRAP:
271 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIRETTrap);
272 break;
273 case VINF_IOM_HC_IOPORT_READ:
274 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIORead);
275 break;
276 case VINF_IOM_HC_IOPORT_WRITE:
277 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIOWrite);
278 break;
279 case VINF_IOM_HC_IOPORT_READWRITE:
280 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIOReadWrite);
281 break;
282 case VINF_IOM_HC_MMIO_READ:
283 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIORead);
284 break;
285 case VINF_IOM_HC_MMIO_WRITE:
286 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOWrite);
287 break;
288 case VINF_IOM_HC_MMIO_READ_WRITE:
289 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOReadWrite);
290 break;
291 case VINF_PATM_HC_MMIO_PATCH_READ:
292 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOPatchRead);
293 break;
294 case VINF_PATM_HC_MMIO_PATCH_WRITE:
295 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOPatchWrite);
296 break;
297 case VINF_EM_RAW_EMULATE_INSTR:
298 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetEmulate);
299 break;
300 case VINF_PATCH_EMULATE_INSTR:
301 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchEmulate);
302 break;
303 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
304 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetLDTFault);
305 break;
306 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
307 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetGDTFault);
308 break;
309 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
310 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIDTFault);
311 break;
312 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
313 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetTSSFault);
314 break;
315 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
316 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDFault);
317 break;
318 case VINF_CSAM_PENDING_ACTION:
319 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetCSAMTask);
320 break;
321 case VINF_PGM_SYNC_CR3:
322 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetSyncCR3);
323 break;
324 case VINF_PATM_PATCH_INT3:
325 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchInt3);
326 break;
327 case VINF_PATM_PATCH_TRAP_PF:
328 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchPF);
329 break;
330 case VINF_PATM_PATCH_TRAP_GP:
331 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchGP);
332 break;
333 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
334 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchIretIRQ);
335 break;
336 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
337 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPageOverflow);
338 break;
339 case VINF_EM_RESCHEDULE_REM:
340 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRescheduleREM);
341 break;
342 case VINF_EM_RAW_TO_R3:
343 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetToR3);
344 break;
345 case VINF_EM_RAW_TIMER_PENDING:
346 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetTimerPending);
347 break;
348 case VINF_EM_RAW_INTERRUPT_PENDING:
349 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterruptPending);
350 break;
351 case VINF_VMM_CALL_HOST:
352 switch (pVM->vmm.s.enmCallHostOperation)
353 {
354 case VMMCALLHOST_PDM_LOCK:
355 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDMLock);
356 break;
357 case VMMCALLHOST_PDM_QUEUE_FLUSH:
358 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDMQueueFlush);
359 break;
360 case VMMCALLHOST_PGM_POOL_GROW:
361 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMPoolGrow);
362 break;
363 case VMMCALLHOST_PGM_LOCK:
364 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMLock);
365 break;
366 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
367 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRemReplay);
368 break;
369 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
370 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMGrowRAM);
371 break;
372 case VMMCALLHOST_VMM_LOGGER_FLUSH:
373 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetLogFlush);
374 break;
375 case VMMCALLHOST_VM_SET_ERROR:
376 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetVMSetError);
377 break;
378 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
379 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetVMSetRuntimeError);
380 break;
381 default:
382 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetCallHost);
383 break;
384 }
385 break;
386 case VINF_PATM_DUPLICATE_FUNCTION:
387 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPATMDuplicateFn);
388 break;
389 case VINF_PGM_CHANGE_MODE:
390 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMChangeMode);
391 break;
392 case VINF_EM_RAW_EMULATE_INSTR_HLT:
393 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetEmulHlt);
394 break;
395 case VINF_EM_PENDING_REQUEST:
396 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPendingRequest);
397 break;
398 default:
399 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMisc);
400 break;
401 }
402}
403#endif /* VBOX_WITH_STATISTICS */
404
405
406/**
407 * The Ring 0 entry point, called by the support library (SUP).
408 *
409 * @returns VBox status code.
410 * @param pVM The VM to operate on.
411 * @param uOperation Which operation to execute. (VMMR0OPERATION)
412 * @param pvArg Argument to the operation.
413 */
414VMMR0DECL(int) VMMR0Entry(PVM pVM, unsigned /* make me an enum */ uOperation, void *pvArg)
415{
416 switch (uOperation)
417 {
418 /*
419 * Switch to GC.
420 * These calls return whatever the GC returns.
421 */
422 case VMMR0_DO_RAW_RUN:
423 {
424 /* Safety precaution as VMX disables the switcher. */
425 Assert(!pVM->vmm.s.fSwitcherDisabled);
426 if (pVM->vmm.s.fSwitcherDisabled)
427 return VERR_NOT_SUPPORTED;
428
429 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
430 register int rc;
431 TMCpuTickResume(pVM);
432 pVM->vmm.s.iLastGCRc = rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
433 TMCpuTickPause(pVM);
434
435#ifdef VBOX_WITH_STATISTICS
436 vmmR0RecordRC(pVM, rc);
437#endif
438
439 /*
440 * Check if there is an exit R0 action associated with the return code.
441 */
442 switch (rc)
443 {
444 /*
445 * Default - no action, just return.
446 */
447 default:
448#if HC_ARCH_BITS == 64 /* AMD64 debugging - to be removed */
449 if ((unsigned)rc - 0xc0caff00U > 0xff)
450 return rc;
451 /* fall thru */
452#else
453 return rc;
454#endif
455
456 /*
457 * We'll let TRPM change the stack frame so our return is different.
458 * Just keep in mind that after the call, things have changed!
459 */
460 case VINF_EM_RAW_INTERRUPT:
461 case VINF_EM_RAW_INTERRUPT_HYPER:
462 {
463#ifdef VBOX_WITHOUT_IDT_PATCHING
464 TRPMR0DispatchHostInterrupt(pVM);
465#else /* !VBOX_WITHOUT_IDT_PATCHING */
466 /*
467 * Don't trust the compiler to get this right.
468 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit
469 * mode too because we push the arguments on the stack in the IDT patch code.
470 */
471# if defined(__GNUC__)
472 void *pvRet = (uint8_t *)__builtin_frame_address(0) + sizeof(void *);
473# elif defined(_MSC_VER) && defined(__AMD64__) /** @todo check this with with VC7! */
474 void *pvRet = (uint8_t *)_AddressOfReturnAddress();
475# elif defined(__X86__)
476 void *pvRet = (uint8_t *)&pVM - sizeof(pVM);
477# else
478# error "huh?"
479# endif
480 if ( ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM
481 && ((uintptr_t *)pvRet)[2] == (uintptr_t)uOperation
482 && ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg)
483 TRPMR0SetupInterruptDispatcherFrame(pVM, pvRet);
484 else
485 {
486# if defined(DEBUG) || defined(LOG_ENABLED)
487 static bool s_fHaveWarned = false;
488 if (!s_fHaveWarned)
489 {
490 s_fHaveWarned = true;
491 //RTLogPrintf("VMMR0.r0: The compiler can't find the stack frame!\n"); -- @todo export me!
492 RTLogComPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
493 }
494# endif
495 TRPMR0DispatchHostInterrupt(pVM);
496 }
497#endif /* !VBOX_WITHOUT_IDT_PATCHING */
498 return rc;
499 }
500 }
501 /* Won't get here! */
502 break;
503 }
504
505 /*
506 * Run guest code using the available hardware acceleration technology.
507 */
508 case VMMR0_DO_HWACC_RUN:
509 {
510 int rc;
511
512 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
513 TMCpuTickResume(pVM);
514 rc = HWACCMR0Enable(pVM);
515 if (VBOX_SUCCESS(rc))
516 {
517#ifdef DEBUG_NO_RING0_ASSERTIONS
518 g_pVMAssert = pVM;
519#endif
520 rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM); /* this may resume code. */
521#ifdef DEBUG_NO_RING0_ASSERTIONS
522 g_pVMAssert = 0;
523#endif
524 int rc2 = HWACCMR0Disable(pVM);
525 AssertRC(rc2);
526 }
527 TMCpuTickPause(pVM);
528 pVM->vmm.s.iLastGCRc = rc;
529
530#ifdef VBOX_WITH_STATISTICS
531 vmmR0RecordRC(pVM, rc);
532#endif
533 /* No special action required for external interrupts, just return. */
534 return rc;
535 }
536
537 /*
538 * Initialize the R0 part of a VM instance.
539 */
540 case VMMR0_DO_VMMR0_INIT:
541 return VMMR0Init(pVM, (unsigned)(uintptr_t)pvArg);
542
543 /*
544 * Terminate the R0 part of a VM instance.
545 */
546 case VMMR0_DO_VMMR0_TERM:
547 return VMMR0Term(pVM);
548
549 /*
550 * Setup the hardware accelerated raw-mode session.
551 */
552 case VMMR0_DO_HWACC_SETUP_VM:
553 return HWACCMR0SetupVMX(pVM);
554
555 /*
556 * Switch to GC to execute Hypervisor function.
557 */
558 case VMMR0_DO_CALL_HYPERVISOR:
559 {
560 /* Safety precaution as VMX disables the switcher. */
561 Assert(!pVM->vmm.s.fSwitcherDisabled);
562 if (pVM->vmm.s.fSwitcherDisabled)
563 return VERR_NOT_SUPPORTED;
564
565 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
566 return rc;
567 }
568
569#if !defined(__L4__) && !defined(__AMD64__) /** @todo Port this to L4. */ /** @todo fix logging and other services problems on AMD64. */
570 /*
571 * Services.
572 */
573 case VMMR0_DO_INTNET_OPEN:
574 case VMMR0_DO_INTNET_IF_CLOSE:
575 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
576 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
577 case VMMR0_DO_INTNET_IF_SEND:
578 case VMMR0_DO_INTNET_IF_WAIT:
579 {
580 /*
581 * Validate arguments a bit first.
582 */
583 if (!VALID_PTR(pvArg))
584 return VERR_INVALID_POINTER;
585 if (!VALID_PTR(pVM))
586 return VERR_INVALID_POINTER;
587 if (pVM->pVMHC != pVM)
588 return VERR_INVALID_POINTER;
589 if (!VALID_PTR(pVM->pSession))
590 return VERR_INVALID_POINTER;
591 if (!g_pIntNet)
592 return VERR_FILE_NOT_FOUND; ///@todo fix this status code!
593
594 /*
595 * Unpack the arguments and call the service.
596 */
597 switch (uOperation)
598 {
599 case VMMR0_DO_INTNET_OPEN:
600 {
601 PINTNETOPENARGS pArgs = (PINTNETOPENARGS)pvArg;
602 return INTNETR0Open(g_pIntNet, pVM->pSession, &pArgs->szNetwork[0], pArgs->cbSend, pArgs->cbRecv, &pArgs->hIf);
603 }
604
605 case VMMR0_DO_INTNET_IF_CLOSE:
606 {
607 PINTNETIFCLOSEARGS pArgs = (PINTNETIFCLOSEARGS)pvArg;
608 return INTNETR0IfClose(g_pIntNet, pArgs->hIf);
609 }
610
611 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
612 {
613 PINTNETIFGETRING3BUFFERARGS pArgs = (PINTNETIFGETRING3BUFFERARGS)pvArg;
614 return INTNETR0IfGetRing3Buffer(g_pIntNet, pArgs->hIf, &pArgs->pRing3Buf);
615 }
616
617 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
618 {
619 PINTNETIFSETPROMISCUOUSMODEARGS pArgs = (PINTNETIFSETPROMISCUOUSMODEARGS)pvArg;
620 return INTNETR0IfSetPromiscuousMode(g_pIntNet, pArgs->hIf, pArgs->fPromiscuous);
621 }
622
623 case VMMR0_DO_INTNET_IF_SEND:
624 {
625 PINTNETIFSENDARGS pArgs = (PINTNETIFSENDARGS)pvArg;
626 return INTNETR0IfSend(g_pIntNet, pArgs->hIf, pArgs->pvFrame, pArgs->cbFrame);
627 }
628
629 case VMMR0_DO_INTNET_IF_WAIT:
630 {
631 PINTNETIFWAITARGS pArgs = (PINTNETIFWAITARGS)pvArg;
632 return INTNETR0IfWait(g_pIntNet, pArgs->hIf, pArgs->cMillies);
633 }
634
635 default:
636 return VERR_NOT_SUPPORTED;
637 }
638 }
639#endif /* !__L4__ */
640
641 /*
642 * For profiling.
643 */
644 case VMMR0_DO_NOP:
645 return VINF_SUCCESS;
646
647 /*
648 * For testing Ring-0 APIs invoked in this environment.
649 */
650 case VMMR0_DO_TESTS:
651 /** @todo make new test */
652 return VINF_SUCCESS;
653
654
655 default:
656 /*
657 * We're returning VERR_NOT_SUPPORT here so we've got something else
658 * than -1 which the interrupt gate glue code might return.
659 */
660 Log(("operation %#x is not supported\n", uOperation));
661 return VERR_NOT_SUPPORTED;
662 }
663}
664
665
666/**
667 * Internal R0 logger worker: Flush logger.
668 *
669 * @param pLogger The logger instance to flush.
670 * @remark This function must be exported!
671 */
672VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
673{
674 /*
675 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
676 * (This is a bit paranoid code.)
677 */
678 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
679 if ( !VALID_PTR(pR0Logger)
680 || !VALID_PTR(pR0Logger + 1)
681 || !VALID_PTR(pLogger)
682 || pLogger->u32Magic != RTLOGGER_MAGIC)
683 {
684 LogCom(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
685 return;
686 }
687
688 PVM pVM = pR0Logger->pVM;
689 if ( !VALID_PTR(pVM)
690 || pVM->pVMHC != pVM)
691 {
692 LogCom(("vmmR0LoggerFlush: pVM=%p! pLogger=%p\n", pVM, pLogger));
693 return;
694 }
695
696 /*
697 * Check that the jump buffer is armed.
698 */
699#ifdef __X86__
700 if (!pVM->vmm.s.CallHostR0JmpBuf.eip)
701#else
702 if (!pVM->vmm.s.CallHostR0JmpBuf.rip)
703#endif
704 {
705 LogCom(("vmmR0LoggerFlush: Jump buffer isn't armed!\n"));
706 pLogger->offScratch = 0;
707 return;
708 }
709
710 VMMR0CallHost(pVM, VMMCALLHOST_VMM_LOGGER_FLUSH, 0);
711}
712
713#ifdef DEBUG_NO_RING0_ASSERTIONS
714/**
715 * Check if we really want to hit a breakpoint.
716 * Can jump back to ring-3 when the longjmp is armed.
717 */
718DECLEXPORT(bool) RTCALL RTAssertDoBreakpoint()
719{
720 if (g_pVMAssert)
721 {
722 g_pVMAssert->vmm.s.enmCallHostOperation = VMMCALLHOST_VMM_LOGGER_FLUSH;
723 g_pVMAssert->vmm.s.u64CallHostArg = 0;
724 g_pVMAssert->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
725 int rc = vmmR0CallHostLongJmp(&g_pVMAssert->vmm.s.CallHostR0JmpBuf, VERR_INTERNAL_ERROR);
726 if (rc == VINF_SUCCESS)
727 rc = g_pVMAssert->vmm.s.rcCallHost;
728 }
729
730 return true;
731}
732
733
734#undef LOG_GROUP
735#define LOG_GROUP LOG_GROUP_EM
736
737/** Runtime assert implementation for Native Win32 Ring-0. */
738DECLEXPORT(void) RTCALL AssertMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
739{
740 Log(("\n!!Assertion Failed!!\n"
741 "Expression: %s\n"
742 "Location : %s(%d) %s\n",
743 pszExpr, pszFile, uLine, pszFunction));
744}
745
746#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette