VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 489

Last change on this file since 489 was 420, checked in by vboxsync, 18 years ago

Don't trust the compiler to give us the right return address address for interrupt dispatching. (Hope this doesn't break on x86.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 24.3 KB
Line 
1/* $Id: VMMR0.cpp 420 2007-01-29 18:17:06Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VMM
27#ifdef __AMD64__ /** @todo fix logging on __AMD64__ (swapgs) */
28# define LOG_DISABLED
29#endif
30#include <VBox/vmm.h>
31#include <VBox/sup.h>
32#include <VBox/trpm.h>
33#include <VBox/cpum.h>
34#include <VBox/stam.h>
35#include <VBox/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vm.h>
38#include <VBox/intnet.h>
39#include <VBox/hwaccm.h>
40
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45
46
47/*******************************************************************************
48* Internal Functions *
49*******************************************************************************/
50static int VMMR0Init(PVM pVM, unsigned uVersion);
51static int VMMR0Term(PVM pVM);
52__BEGIN_DECLS
53VMMR0DECL(int) ModuleInit(void);
54VMMR0DECL(void) ModuleTerm(void);
55__END_DECLS
56
57
58//#define DEBUG_NO_RING0_ASSERTIONS
59#ifdef DEBUG_NO_RING0_ASSERTIONS
60static PVM g_pVMAssert = 0;
61#endif
62
63/*******************************************************************************
64* Global Variables *
65*******************************************************************************/
66#ifndef __AMD64__ /* doesn't link here */
67/** Pointer to the internal networking service instance. */
68PINTNET g_pIntNet = 0;
69#endif
70
71
72/**
73 * Initialize the module.
74 * This is called when we're first loaded.
75 *
76 * @returns 0 on success.
77 * @returns VBox status on failure.
78 */
79VMMR0DECL(int) ModuleInit(void)
80{
81#ifndef __AMD64__ /* doesn't link here */
82 LogFlow(("ModuleInit: g_pIntNet=%p\n", g_pIntNet));
83 g_pIntNet = NULL;
84 LogFlow(("ModuleInit: g_pIntNet=%p should be NULL now...\n", g_pIntNet));
85 int rc = INTNETR0Create(&g_pIntNet);
86 if (VBOX_SUCCESS(rc))
87 {
88 LogFlow(("ModuleInit: returns success. g_pIntNet=%p\n", g_pIntNet));
89 return 0;
90 }
91 g_pIntNet = NULL;
92 LogFlow(("ModuleTerm: returns %Vrc\n", rc));
93 return rc;
94#else
95 return 0;
96#endif
97}
98
99
100/**
101 * Terminate the module.
102 * This is called when we're finally unloaded.
103 */
104VMMR0DECL(void) ModuleTerm(void)
105{
106#ifndef __AMD64__ /* doesn't link here */
107 LogFlow(("ModuleTerm:\n"));
108 if (g_pIntNet)
109 {
110 INTNETR0Destroy(g_pIntNet);
111 g_pIntNet = NULL;
112 }
113 LogFlow(("ModuleTerm: returns\n"));
114#endif
115}
116
117
118/**
119 * Initaties the R0 driver for a particular VM instance.
120 *
121 * @returns VBox status code.
122 *
123 * @param pVM The VM instance in question.
124 * @param uVersion The minimum module version required.
125 */
126static int VMMR0Init(PVM pVM, unsigned uVersion)
127{
128 /*
129 * Check if compatible version.
130 */
131 if ( uVersion != VBOX_VERSION
132 && ( VBOX_GET_VERSION_MAJOR(uVersion) != VBOX_VERSION_MAJOR
133 || VBOX_GET_VERSION_MINOR(uVersion) < VBOX_VERSION_MINOR))
134 return VERR_VERSION_MISMATCH;
135
136 /*
137 * Register the EMT R0 logger instance.
138 */
139 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
140 if (pR0Logger)
141 {
142#if 0 /* testing of the logger. */
143 LogCom(("VMMR0Init: before %p\n", RTLogDefaultInstance()));
144 LogCom(("VMMR0Init: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
145 LogCom(("VMMR0Init: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
146 LogCom(("VMMR0Init: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
147
148 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
149 LogCom(("VMMR0Init: after %p reg\n", RTLogDefaultInstance()));
150 RTLogSetDefaultInstanceThread(NULL, 0);
151 LogCom(("VMMR0Init: after %p dereg\n", RTLogDefaultInstance()));
152
153 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
154 LogCom(("VMMR0Init: returned succesfully from direct logger call.\n"));
155 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
156 LogCom(("VMMR0Init: returned succesfully from direct flush call.\n"));
157
158 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
159 LogCom(("VMMR0Init: after %p reg2\n", RTLogDefaultInstance()));
160 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
161 LogCom(("VMMR0Init: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
162 RTLogSetDefaultInstanceThread(NULL, 0);
163 LogCom(("VMMR0Init: after %p dereg2\n", RTLogDefaultInstance()));
164
165 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
166 LogCom(("VMMR0Init: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
167#endif
168 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
169 }
170
171
172 /*
173 * Init VMXM.
174 */
175 HWACCMR0Init(pVM);
176
177 /*
178 * Init CPUM.
179 */
180 int rc = CPUMR0Init(pVM);
181
182 if (RT_FAILURE(rc))
183 RTLogSetDefaultInstanceThread(NULL, 0);
184 return rc;
185}
186
187
188/**
189 * Terminates the R0 driver for a particular VM instance.
190 *
191 * @returns VBox status code.
192 *
193 * @param pVM The VM instance in question.
194 */
195static int VMMR0Term(PVM pVM)
196{
197 /*
198 * Deregister the logger.
199 */
200 RTLogSetDefaultInstanceThread(NULL, 0);
201 return VINF_SUCCESS;
202}
203
204
205/**
206 * Calls the ring-3 host code.
207 *
208 * @returns VBox status code of the ring-3 call.
209 * @param pVM The VM handle.
210 * @param enmOperation The operation.
211 * @param uArg The argument to the operation.
212 */
213VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg)
214{
215/** @todo profile this! */
216 pVM->vmm.s.enmCallHostOperation = enmOperation;
217 pVM->vmm.s.u64CallHostArg = uArg;
218 pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
219 int rc = vmmR0CallHostLongJmp(&pVM->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);
220 if (rc == VINF_SUCCESS)
221 rc = pVM->vmm.s.rcCallHost;
222 return rc;
223}
224
225
226#ifdef VBOX_WITH_STATISTICS
227/**
228 * Record return code statistics
229 * @param pVM The VM handle.
230 * @param rc The status code.
231 */
232static void vmmR0RecordRC(PVM pVM, int rc)
233{
234 /*
235 * Collect statistics.
236 */
237 switch (rc)
238 {
239 case VINF_SUCCESS:
240 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetNormal);
241 break;
242 case VINF_EM_RAW_INTERRUPT:
243 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterrupt);
244 break;
245 case VINF_EM_RAW_INTERRUPT_HYPER:
246 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterruptHyper);
247 break;
248 case VINF_EM_RAW_GUEST_TRAP:
249 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetGuestTrap);
250 break;
251 case VINF_EM_RAW_RING_SWITCH:
252 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRingSwitch);
253 break;
254 case VINF_EM_RAW_RING_SWITCH_INT:
255 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRingSwitchInt);
256 break;
257 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
258 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetExceptionPrivilege);
259 break;
260 case VINF_EM_RAW_STALE_SELECTOR:
261 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetStaleSelector);
262 break;
263 case VINF_EM_RAW_IRET_TRAP:
264 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIRETTrap);
265 break;
266 case VINF_IOM_HC_IOPORT_READ:
267 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIORead);
268 break;
269 case VINF_IOM_HC_IOPORT_WRITE:
270 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIOWrite);
271 break;
272 case VINF_IOM_HC_IOPORT_READWRITE:
273 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIOReadWrite);
274 break;
275 case VINF_IOM_HC_MMIO_READ:
276 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIORead);
277 break;
278 case VINF_IOM_HC_MMIO_WRITE:
279 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOWrite);
280 break;
281 case VINF_IOM_HC_MMIO_READ_WRITE:
282 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOReadWrite);
283 break;
284 case VINF_PATM_HC_MMIO_PATCH_READ:
285 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOPatchRead);
286 break;
287 case VINF_PATM_HC_MMIO_PATCH_WRITE:
288 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOPatchWrite);
289 break;
290 case VINF_EM_RAW_EMULATE_INSTR:
291 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetEmulate);
292 break;
293 case VINF_PATCH_EMULATE_INSTR:
294 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchEmulate);
295 break;
296 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
297 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetLDTFault);
298 break;
299 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
300 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetGDTFault);
301 break;
302 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
303 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIDTFault);
304 break;
305 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
306 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetTSSFault);
307 break;
308 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
309 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDFault);
310 break;
311 case VINF_CSAM_PENDING_ACTION:
312 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetCSAMTask);
313 break;
314 case VINF_PGM_SYNC_CR3:
315 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetSyncCR3);
316 break;
317 case VINF_PATM_PATCH_INT3:
318 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchInt3);
319 break;
320 case VINF_PATM_PATCH_TRAP_PF:
321 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchPF);
322 break;
323 case VINF_PATM_PATCH_TRAP_GP:
324 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchGP);
325 break;
326 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
327 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchIretIRQ);
328 break;
329 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
330 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPageOverflow);
331 break;
332 case VINF_EM_RESCHEDULE_REM:
333 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRescheduleREM);
334 break;
335 case VINF_EM_RAW_TO_R3:
336 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetToR3);
337 break;
338 case VINF_EM_RAW_TIMER_PENDING:
339 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetTimerPending);
340 break;
341 case VINF_EM_RAW_INTERRUPT_PENDING:
342 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterruptPending);
343 break;
344 case VINF_VMM_CALL_HOST:
345 switch (pVM->vmm.s.enmCallHostOperation)
346 {
347 case VMMCALLHOST_PDM_LOCK:
348 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDMLock);
349 break;
350 case VMMCALLHOST_PDM_QUEUE_FLUSH:
351 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDMQueueFlush);
352 break;
353 case VMMCALLHOST_PGM_POOL_GROW:
354 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMPoolGrow);
355 break;
356 case VMMCALLHOST_PGM_LOCK:
357 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMLock);
358 break;
359 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
360 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRemReplay);
361 break;
362 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
363 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMGrowRAM);
364 break;
365 case VMMCALLHOST_VMM_LOGGER_FLUSH:
366 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetLogFlush);
367 break;
368 case VMMCALLHOST_VM_SET_ERROR:
369 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetVMSetError);
370 break;
371 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
372 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetVMSetRuntimeError);
373 break;
374 default:
375 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetCallHost);
376 break;
377 }
378 break;
379 case VINF_PATM_DUPLICATE_FUNCTION:
380 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPATMDuplicateFn);
381 break;
382 case VINF_PGM_CHANGE_MODE:
383 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMChangeMode);
384 break;
385 case VINF_EM_RAW_EMULATE_INSTR_HLT:
386 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetEmulHlt);
387 break;
388 case VINF_EM_PENDING_REQUEST:
389 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPendingRequest);
390 break;
391 default:
392 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMisc);
393 break;
394 }
395}
396#endif /* VBOX_WITH_STATISTICS */
397
398
399/**
400 * The Ring 0 entry point, called by the support library (SUP).
401 *
402 * @returns VBox status code.
403 * @param pVM The VM to operate on.
404 * @param uOperation Which operation to execute. (VMMR0OPERATION)
405 * @param pvArg Argument to the operation.
406 */
407VMMR0DECL(int) VMMR0Entry(PVM pVM, unsigned /* make me an enum */ uOperation, void *pvArg)
408{
409 switch (uOperation)
410 {
411 /*
412 * Switch to GC.
413 * These calls return whatever the GC returns.
414 */
415 case VMMR0_DO_RAW_RUN:
416 {
417 /* Safety precaution as VMX disables the switcher. */
418 Assert(!pVM->vmm.s.fSwitcherDisabled);
419 if (pVM->vmm.s.fSwitcherDisabled)
420 return VERR_NOT_SUPPORTED;
421
422 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
423 register int rc;
424 TMCpuTickResume(pVM);
425 pVM->vmm.s.iLastGCRc = rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
426 TMCpuTickPause(pVM);
427
428#ifdef VBOX_WITH_STATISTICS
429 vmmR0RecordRC(pVM, rc);
430#endif
431
432 /*
433 * Check if there is an exit R0 action associated with the return code.
434 */
435 switch (rc)
436 {
437 /*
438 * Default - no action, just return.
439 */
440 default:
441 return rc;
442
443 /*
444 * We'll let TRPM change the stack frame so our return is different.
445 * Just keep in mind that after the call, things have changed!
446 */
447 case VINF_EM_RAW_INTERRUPT:
448 case VINF_EM_RAW_INTERRUPT_HYPER:
449 {
450#ifdef VBOX_WITHOUT_IDT_PATCHING
451 TRPMR0DispatchHostInterrupt(pVM);
452#else /* !VBOX_WITHOUT_IDT_PATCHING */
453 /*
454 * Don't trust the compiler to get this right.
455 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit
456 * mode too because we push the arguments on the stack in the IDT patch code.
457 */
458# if defined(__GNUC__)
459 void *pvRet = (uint8_t *)__builtin_frame_address(0) + sizeof(void *);
460# elif defined(_MSC_VER) && defined(__AMD64__) /** @todo check this with with VC7! */
461# pragma intrinsic(_AddressOfReturnAddress)
462 void *pvRet = (uint8_t *)_AddressOfReturnAddress;
463# elif defined(__X86__)
464 void *pvRet = (uint8_t *)&pVM - sizeof(pVM);
465# else
466# error "huh?"
467# endif
468 if ( ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM
469 && ((uintptr_t *)pvRet)[2] == (uintptr_t)uOperation
470 && ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg)
471 TRPMR0SetupInterruptDispatcherFrame(pVM, pvRet);
472 else
473 {
474# if defined(DEBUG) || defined(LOG_ENABLED)
475 static bool s_fHaveWarned = false;
476 if (!s_fHaveWarned)
477 {
478 s_fHaveWarned = true;
479 //RTLogPrintf("VMMR0.r0: The compiler can't find the stack frame!\n"); -- @todo export me!
480 RTLogComPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
481 }
482# endif
483 TRPMR0DispatchHostInterrupt(pVM);
484 }
485#endif /* !VBOX_WITHOUT_IDT_PATCHING */
486 return rc;
487 }
488 }
489 /* Won't get here! */
490 break;
491 }
492
493 /*
494 * Run guest code using the available hardware acceleration technology.
495 */
496 case VMMR0_DO_HWACC_RUN:
497 {
498 int rc;
499
500 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
501 TMCpuTickResume(pVM);
502 rc = HWACCMR0Enable(pVM);
503 if (VBOX_SUCCESS(rc))
504 {
505#ifdef DEBUG_NO_RING0_ASSERTIONS
506 g_pVMAssert = pVM;
507#endif
508 rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM); /* this may resume code. */
509#ifdef DEBUG_NO_RING0_ASSERTIONS
510 g_pVMAssert = 0;
511#endif
512 int rc2 = HWACCMR0Disable(pVM);
513 AssertRC(rc2);
514 }
515 TMCpuTickPause(pVM);
516 pVM->vmm.s.iLastGCRc = rc;
517
518#ifdef VBOX_WITH_STATISTICS
519 vmmR0RecordRC(pVM, rc);
520#endif
521 /* No special action required for external interrupts, just return. */
522 return rc;
523 }
524
525 /*
526 * Initialize the R0 part of a VM instance.
527 */
528 case VMMR0_DO_VMMR0_INIT:
529 return VMMR0Init(pVM, (unsigned)(uintptr_t)pvArg);
530
531 /*
532 * Terminate the R0 part of a VM instance.
533 */
534 case VMMR0_DO_VMMR0_TERM:
535 return VMMR0Term(pVM);
536
537 /*
538 * Setup the hardware accelerated raw-mode session.
539 */
540 case VMMR0_DO_HWACC_SETUP_VM:
541 return HWACCMR0SetupVMX(pVM);
542
543 /*
544 * Switch to GC to execute Hypervisor function.
545 */
546 case VMMR0_DO_CALL_HYPERVISOR:
547 {
548 /* Safety precaution as VMX disables the switcher. */
549 Assert(!pVM->vmm.s.fSwitcherDisabled);
550 if (pVM->vmm.s.fSwitcherDisabled)
551 return VERR_NOT_SUPPORTED;
552
553 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
554 return rc;
555 }
556
557#if !defined(__L4__) && !defined(__AMD64__) /** @todo Port this to L4. */ /** @todo fix logging and other services problems on AMD64. */
558 /*
559 * Services.
560 */
561 case VMMR0_DO_INTNET_OPEN:
562 case VMMR0_DO_INTNET_IF_CLOSE:
563 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
564 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
565 case VMMR0_DO_INTNET_IF_SEND:
566 case VMMR0_DO_INTNET_IF_WAIT:
567 {
568 /*
569 * Validate arguments a bit first.
570 */
571 if (!VALID_PTR(pvArg))
572 return VERR_INVALID_POINTER;
573 if (!VALID_PTR(pVM))
574 return VERR_INVALID_POINTER;
575 if (pVM->pVMHC != pVM)
576 return VERR_INVALID_POINTER;
577 if (!VALID_PTR(pVM->pSession))
578 return VERR_INVALID_POINTER;
579 if (!g_pIntNet)
580 return VERR_FILE_NOT_FOUND; ///@todo fix this status code!
581
582 /*
583 * Unpack the arguments and call the service.
584 */
585 switch (uOperation)
586 {
587 case VMMR0_DO_INTNET_OPEN:
588 {
589 PINTNETOPENARGS pArgs = (PINTNETOPENARGS)pvArg;
590 return INTNETR0Open(g_pIntNet, pVM->pSession, &pArgs->szNetwork[0], pArgs->cbSend, pArgs->cbRecv, &pArgs->hIf);
591 }
592
593 case VMMR0_DO_INTNET_IF_CLOSE:
594 {
595 PINTNETIFCLOSEARGS pArgs = (PINTNETIFCLOSEARGS)pvArg;
596 return INTNETR0IfClose(g_pIntNet, pArgs->hIf);
597 }
598
599 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
600 {
601 PINTNETIFGETRING3BUFFERARGS pArgs = (PINTNETIFGETRING3BUFFERARGS)pvArg;
602 return INTNETR0IfGetRing3Buffer(g_pIntNet, pArgs->hIf, &pArgs->pRing3Buf);
603 }
604
605 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
606 {
607 PINTNETIFSETPROMISCUOUSMODEARGS pArgs = (PINTNETIFSETPROMISCUOUSMODEARGS)pvArg;
608 return INTNETR0IfSetPromiscuousMode(g_pIntNet, pArgs->hIf, pArgs->fPromiscuous);
609 }
610
611 case VMMR0_DO_INTNET_IF_SEND:
612 {
613 PINTNETIFSENDARGS pArgs = (PINTNETIFSENDARGS)pvArg;
614 return INTNETR0IfSend(g_pIntNet, pArgs->hIf, pArgs->pvFrame, pArgs->cbFrame);
615 }
616
617 case VMMR0_DO_INTNET_IF_WAIT:
618 {
619 PINTNETIFWAITARGS pArgs = (PINTNETIFWAITARGS)pvArg;
620 return INTNETR0IfWait(g_pIntNet, pArgs->hIf, pArgs->cMillies);
621 }
622
623 default:
624 return VERR_NOT_SUPPORTED;
625 }
626 }
627#endif /* !__L4__ */
628
629 /*
630 * For profiling.
631 */
632 case VMMR0_DO_NOP:
633 return VINF_SUCCESS;
634
635 /*
636 * For testing Ring-0 APIs invoked in this environment.
637 */
638 case VMMR0_DO_TESTS:
639 /** @todo make new test */
640 return VINF_SUCCESS;
641
642
643 default:
644 /*
645 * We're returning VERR_NOT_SUPPORT here so we've got something else
646 * than -1 which the interrupt gate glue code might return.
647 */
648 Log(("operation %#x is not supported\n", uOperation));
649 return VERR_NOT_SUPPORTED;
650 }
651}
652
653
654/**
655 * Internal R0 logger worker: Flush logger.
656 *
657 * @param pLogger The logger instance to flush.
658 * @remark This function must be exported!
659 */
660VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
661{
662 /*
663 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
664 * (This is a bit paranoid code.)
665 */
666 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
667 if ( !VALID_PTR(pR0Logger)
668 || !VALID_PTR(pR0Logger + 1)
669 || !VALID_PTR(pLogger)
670 || pLogger->u32Magic != RTLOGGER_MAGIC)
671 {
672 LogCom(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
673 return;
674 }
675
676 PVM pVM = pR0Logger->pVM;
677 if ( !VALID_PTR(pVM)
678 || pVM->pVMHC != pVM)
679 {
680 LogCom(("vmmR0LoggerFlush: pVM=%p! pLogger=%p\n", pVM, pLogger));
681 return;
682 }
683
684 /*
685 * Check that the jump buffer is armed.
686 */
687#ifdef __X86__
688 if (!pVM->vmm.s.CallHostR0JmpBuf.eip)
689#else
690 if (!pVM->vmm.s.CallHostR0JmpBuf.rip)
691#endif
692 {
693 LogCom(("vmmR0LoggerFlush: Jump buffer isn't armed!\n"));
694 pLogger->offScratch = 0;
695 return;
696 }
697
698 VMMR0CallHost(pVM, VMMCALLHOST_VMM_LOGGER_FLUSH, 0);
699}
700
701#ifdef DEBUG_NO_RING0_ASSERTIONS
702/**
703 * Check if we really want to hit a breakpoint.
704 * Can jump back to ring-3 when the longjmp is armed.
705 */
706DECLEXPORT(bool) RTCALL RTAssertDoBreakpoint()
707{
708 if (g_pVMAssert)
709 {
710 g_pVMAssert->vmm.s.enmCallHostOperation = VMMCALLHOST_VMM_LOGGER_FLUSH;
711 g_pVMAssert->vmm.s.u64CallHostArg = 0;
712 g_pVMAssert->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
713 int rc = vmmR0CallHostLongJmp(&g_pVMAssert->vmm.s.CallHostR0JmpBuf, VERR_INTERNAL_ERROR);
714 if (rc == VINF_SUCCESS)
715 rc = g_pVMAssert->vmm.s.rcCallHost;
716 }
717
718 return true;
719}
720
721
722#undef LOG_GROUP
723#define LOG_GROUP LOG_GROUP_EM
724
725/** Runtime assert implementation for Native Win32 Ring-0. */
726DECLEXPORT(void) RTCALL AssertMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
727{
728 Log(("\n!!Assertion Failed!!\n"
729 "Expression: %s\n"
730 "Location : %s(%d) %s\n",
731 pszExpr, pszFile, uLine, pszFunction));
732}
733
734#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette