VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 1132

Last change on this file since 1132 was 1057, checked in by vboxsync, 18 years ago

Trapping and virtualizing TSC (both disabled).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 24.6 KB
Line 
1/* $Id: VMMR0.cpp 1057 2007-02-23 20:38:37Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VMM
27#ifdef __AMD64__ /** @todo fix logging on __AMD64__ (swapgs) */
28# define LOG_DISABLED
29#endif
30#include <VBox/vmm.h>
31#include <VBox/sup.h>
32#include <VBox/trpm.h>
33#include <VBox/cpum.h>
34#include <VBox/stam.h>
35#include <VBox/tm.h>
36#include "VMMInternal.h"
37#include <VBox/vm.h>
38#include <VBox/intnet.h>
39#include <VBox/hwaccm.h>
40
41#include <VBox/err.h>
42#include <VBox/version.h>
43#include <VBox/log.h>
44#include <iprt/assert.h>
45
46#if defined(_MSC_VER) && defined(__AMD64__) /** @todo check this with with VC7! */
47# pragma intrinsic(_AddressOfReturnAddress)
48#endif
49
50
51/*******************************************************************************
52* Internal Functions *
53*******************************************************************************/
54static int VMMR0Init(PVM pVM, unsigned uVersion);
55static int VMMR0Term(PVM pVM);
56__BEGIN_DECLS
57VMMR0DECL(int) ModuleInit(void);
58VMMR0DECL(void) ModuleTerm(void);
59__END_DECLS
60
61
62//#define DEBUG_NO_RING0_ASSERTIONS
63#ifdef DEBUG_NO_RING0_ASSERTIONS
64static PVM g_pVMAssert = 0;
65#endif
66
67/*******************************************************************************
68* Global Variables *
69*******************************************************************************/
70#ifndef __AMD64__ /* doesn't link here */
71/** Pointer to the internal networking service instance. */
72PINTNET g_pIntNet = 0;
73#endif
74
75
76/**
77 * Initialize the module.
78 * This is called when we're first loaded.
79 *
80 * @returns 0 on success.
81 * @returns VBox status on failure.
82 */
83VMMR0DECL(int) ModuleInit(void)
84{
85#ifndef __AMD64__ /* doesn't link here */
86 LogFlow(("ModuleInit: g_pIntNet=%p\n", g_pIntNet));
87 g_pIntNet = NULL;
88 LogFlow(("ModuleInit: g_pIntNet=%p should be NULL now...\n", g_pIntNet));
89 int rc = INTNETR0Create(&g_pIntNet);
90 if (VBOX_SUCCESS(rc))
91 {
92 LogFlow(("ModuleInit: returns success. g_pIntNet=%p\n", g_pIntNet));
93 return 0;
94 }
95 g_pIntNet = NULL;
96 LogFlow(("ModuleTerm: returns %Vrc\n", rc));
97 return rc;
98#else
99 return 0;
100#endif
101}
102
103
104/**
105 * Terminate the module.
106 * This is called when we're finally unloaded.
107 */
108VMMR0DECL(void) ModuleTerm(void)
109{
110#ifndef __AMD64__ /* doesn't link here */
111 LogFlow(("ModuleTerm:\n"));
112 if (g_pIntNet)
113 {
114 INTNETR0Destroy(g_pIntNet);
115 g_pIntNet = NULL;
116 }
117 LogFlow(("ModuleTerm: returns\n"));
118#endif
119}
120
121
122/**
123 * Initaties the R0 driver for a particular VM instance.
124 *
125 * @returns VBox status code.
126 *
127 * @param pVM The VM instance in question.
128 * @param uVersion The minimum module version required.
129 */
130static int VMMR0Init(PVM pVM, unsigned uVersion)
131{
132 /*
133 * Check if compatible version.
134 */
135 if ( uVersion != VBOX_VERSION
136 && ( VBOX_GET_VERSION_MAJOR(uVersion) != VBOX_VERSION_MAJOR
137 || VBOX_GET_VERSION_MINOR(uVersion) < VBOX_VERSION_MINOR))
138 return VERR_VERSION_MISMATCH;
139 if ( !VALID_PTR(pVM)
140 || pVM->pVMR0 != pVM)
141 return VERR_INVALID_PARAMETER;
142
143 /*
144 * Register the EMT R0 logger instance.
145 */
146 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
147 if (pR0Logger)
148 {
149#if 0 /* testing of the logger. */
150 LogCom(("VMMR0Init: before %p\n", RTLogDefaultInstance()));
151 LogCom(("VMMR0Init: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
152 LogCom(("VMMR0Init: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
153 LogCom(("VMMR0Init: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
154
155 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
156 LogCom(("VMMR0Init: after %p reg\n", RTLogDefaultInstance()));
157 RTLogSetDefaultInstanceThread(NULL, 0);
158 LogCom(("VMMR0Init: after %p dereg\n", RTLogDefaultInstance()));
159
160 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
161 LogCom(("VMMR0Init: returned succesfully from direct logger call.\n"));
162 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
163 LogCom(("VMMR0Init: returned succesfully from direct flush call.\n"));
164
165 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
166 LogCom(("VMMR0Init: after %p reg2\n", RTLogDefaultInstance()));
167 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
168 LogCom(("VMMR0Init: returned succesfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
169 RTLogSetDefaultInstanceThread(NULL, 0);
170 LogCom(("VMMR0Init: after %p dereg2\n", RTLogDefaultInstance()));
171
172 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
173 LogCom(("VMMR0Init: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
174#endif
175 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
176 }
177
178
179 /*
180 * Init VMXM.
181 */
182 HWACCMR0Init(pVM);
183
184 /*
185 * Init CPUM.
186 */
187 int rc = CPUMR0Init(pVM);
188
189 if (RT_FAILURE(rc))
190 RTLogSetDefaultInstanceThread(NULL, 0);
191 return rc;
192}
193
194
195/**
196 * Terminates the R0 driver for a particular VM instance.
197 *
198 * @returns VBox status code.
199 *
200 * @param pVM The VM instance in question.
201 */
202static int VMMR0Term(PVM pVM)
203{
204 /*
205 * Deregister the logger.
206 */
207 RTLogSetDefaultInstanceThread(NULL, 0);
208 return VINF_SUCCESS;
209}
210
211
212/**
213 * Calls the ring-3 host code.
214 *
215 * @returns VBox status code of the ring-3 call.
216 * @param pVM The VM handle.
217 * @param enmOperation The operation.
218 * @param uArg The argument to the operation.
219 */
220VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg)
221{
222/** @todo profile this! */
223 pVM->vmm.s.enmCallHostOperation = enmOperation;
224 pVM->vmm.s.u64CallHostArg = uArg;
225 pVM->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
226 int rc = vmmR0CallHostLongJmp(&pVM->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);
227 if (rc == VINF_SUCCESS)
228 rc = pVM->vmm.s.rcCallHost;
229 return rc;
230}
231
232
233#ifdef VBOX_WITH_STATISTICS
234/**
235 * Record return code statistics
236 * @param pVM The VM handle.
237 * @param rc The status code.
238 */
239static void vmmR0RecordRC(PVM pVM, int rc)
240{
241 /*
242 * Collect statistics.
243 */
244 switch (rc)
245 {
246 case VINF_SUCCESS:
247 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetNormal);
248 break;
249 case VINF_EM_RAW_INTERRUPT:
250 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterrupt);
251 break;
252 case VINF_EM_RAW_INTERRUPT_HYPER:
253 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterruptHyper);
254 break;
255 case VINF_EM_RAW_GUEST_TRAP:
256 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetGuestTrap);
257 break;
258 case VINF_EM_RAW_RING_SWITCH:
259 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRingSwitch);
260 break;
261 case VINF_EM_RAW_RING_SWITCH_INT:
262 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRingSwitchInt);
263 break;
264 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
265 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetExceptionPrivilege);
266 break;
267 case VINF_EM_RAW_STALE_SELECTOR:
268 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetStaleSelector);
269 break;
270 case VINF_EM_RAW_IRET_TRAP:
271 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIRETTrap);
272 break;
273 case VINF_IOM_HC_IOPORT_READ:
274 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIORead);
275 break;
276 case VINF_IOM_HC_IOPORT_WRITE:
277 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIOWrite);
278 break;
279 case VINF_IOM_HC_IOPORT_READWRITE:
280 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIOReadWrite);
281 break;
282 case VINF_IOM_HC_MMIO_READ:
283 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIORead);
284 break;
285 case VINF_IOM_HC_MMIO_WRITE:
286 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOWrite);
287 break;
288 case VINF_IOM_HC_MMIO_READ_WRITE:
289 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOReadWrite);
290 break;
291 case VINF_PATM_HC_MMIO_PATCH_READ:
292 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOPatchRead);
293 break;
294 case VINF_PATM_HC_MMIO_PATCH_WRITE:
295 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMMIOPatchWrite);
296 break;
297 case VINF_EM_RAW_EMULATE_INSTR:
298 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetEmulate);
299 break;
300 case VINF_PATCH_EMULATE_INSTR:
301 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchEmulate);
302 break;
303 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
304 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetLDTFault);
305 break;
306 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
307 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetGDTFault);
308 break;
309 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
310 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetIDTFault);
311 break;
312 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
313 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetTSSFault);
314 break;
315 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
316 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDFault);
317 break;
318 case VINF_CSAM_PENDING_ACTION:
319 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetCSAMTask);
320 break;
321 case VINF_PGM_SYNC_CR3:
322 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetSyncCR3);
323 break;
324 case VINF_PATM_PATCH_INT3:
325 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchInt3);
326 break;
327 case VINF_PATM_PATCH_TRAP_PF:
328 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchPF);
329 break;
330 case VINF_PATM_PATCH_TRAP_GP:
331 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchGP);
332 break;
333 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
334 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPatchIretIRQ);
335 break;
336 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
337 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPageOverflow);
338 break;
339 case VINF_EM_RESCHEDULE_REM:
340 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRescheduleREM);
341 break;
342 case VINF_EM_RAW_TO_R3:
343 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetToR3);
344 break;
345 case VINF_EM_RAW_TIMER_PENDING:
346 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetTimerPending);
347 break;
348 case VINF_EM_RAW_INTERRUPT_PENDING:
349 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetInterruptPending);
350 break;
351 case VINF_VMM_CALL_HOST:
352 switch (pVM->vmm.s.enmCallHostOperation)
353 {
354 case VMMCALLHOST_PDM_LOCK:
355 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDMLock);
356 break;
357 case VMMCALLHOST_PDM_QUEUE_FLUSH:
358 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPDMQueueFlush);
359 break;
360 case VMMCALLHOST_PGM_POOL_GROW:
361 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMPoolGrow);
362 break;
363 case VMMCALLHOST_PGM_LOCK:
364 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMLock);
365 break;
366 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
367 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetRemReplay);
368 break;
369 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
370 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMGrowRAM);
371 break;
372 case VMMCALLHOST_VMM_LOGGER_FLUSH:
373 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetLogFlush);
374 break;
375 case VMMCALLHOST_VM_SET_ERROR:
376 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetVMSetError);
377 break;
378 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
379 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetVMSetRuntimeError);
380 break;
381 default:
382 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetCallHost);
383 break;
384 }
385 break;
386 case VINF_PATM_DUPLICATE_FUNCTION:
387 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPATMDuplicateFn);
388 break;
389 case VINF_PGM_CHANGE_MODE:
390 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPGMChangeMode);
391 break;
392 case VINF_EM_RAW_EMULATE_INSTR_HLT:
393 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetEmulHlt);
394 break;
395 case VINF_EM_PENDING_REQUEST:
396 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetPendingRequest);
397 break;
398 default:
399 STAM_COUNTER_INC(&pVM->vmm.s.StatGCRetMisc);
400 break;
401 }
402}
403#endif /* VBOX_WITH_STATISTICS */
404
405
406/**
407 * The Ring 0 entry point, called by the support library (SUP).
408 *
409 * @returns VBox status code.
410 * @param pVM The VM to operate on.
411 * @param uOperation Which operation to execute. (VMMR0OPERATION)
412 * @param pvArg Argument to the operation.
413 */
414VMMR0DECL(int) VMMR0Entry(PVM pVM, unsigned /* make me an enum */ uOperation, void *pvArg)
415{
416 switch (uOperation)
417 {
418 /*
419 * Switch to GC.
420 * These calls return whatever the GC returns.
421 */
422 case VMMR0_DO_RAW_RUN:
423 {
424 /* Safety precaution as VMX disables the switcher. */
425 Assert(!pVM->vmm.s.fSwitcherDisabled);
426 if (pVM->vmm.s.fSwitcherDisabled)
427 return VERR_NOT_SUPPORTED;
428
429 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
430 register int rc;
431 pVM->vmm.s.iLastGCRc = rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
432
433#ifdef VBOX_WITH_STATISTICS
434 vmmR0RecordRC(pVM, rc);
435#endif
436
437 /*
438 * Check if there is an exit R0 action associated with the return code.
439 */
440 switch (rc)
441 {
442 /*
443 * Default - no action, just return.
444 */
445 default:
446#if HC_ARCH_BITS == 64 /* AMD64 debugging - to be removed */
447 if ((unsigned)rc - 0xc0caff00U > 0xff)
448 return rc;
449 /* fall thru */
450#else
451 return rc;
452#endif
453
454 /*
455 * We'll let TRPM change the stack frame so our return is different.
456 * Just keep in mind that after the call, things have changed!
457 */
458 case VINF_EM_RAW_INTERRUPT:
459 case VINF_EM_RAW_INTERRUPT_HYPER:
460 {
461#ifdef VBOX_WITHOUT_IDT_PATCHING
462 TRPMR0DispatchHostInterrupt(pVM);
463#else /* !VBOX_WITHOUT_IDT_PATCHING */
464 /*
465 * Don't trust the compiler to get this right.
466 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit
467 * mode too because we push the arguments on the stack in the IDT patch code.
468 */
469# if defined(__GNUC__)
470 void *pvRet = (uint8_t *)__builtin_frame_address(0) + sizeof(void *);
471# elif defined(_MSC_VER) && defined(__AMD64__) /** @todo check this with with VC7! */
472 void *pvRet = (uint8_t *)_AddressOfReturnAddress();
473# elif defined(__X86__)
474 void *pvRet = (uint8_t *)&pVM - sizeof(pVM);
475# else
476# error "huh?"
477# endif
478 if ( ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM
479 && ((uintptr_t *)pvRet)[2] == (uintptr_t)uOperation
480 && ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg)
481 TRPMR0SetupInterruptDispatcherFrame(pVM, pvRet);
482 else
483 {
484# if defined(DEBUG) || defined(LOG_ENABLED)
485 static bool s_fHaveWarned = false;
486 if (!s_fHaveWarned)
487 {
488 s_fHaveWarned = true;
489 //RTLogPrintf("VMMR0.r0: The compiler can't find the stack frame!\n"); -- @todo export me!
490 RTLogComPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
491 }
492# endif
493 TRPMR0DispatchHostInterrupt(pVM);
494 }
495#endif /* !VBOX_WITHOUT_IDT_PATCHING */
496 return rc;
497 }
498 }
499 /* Won't get here! */
500 break;
501 }
502
503 /*
504 * Run guest code using the available hardware acceleration technology.
505 */
506 case VMMR0_DO_HWACC_RUN:
507 {
508 int rc;
509
510 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
511 rc = HWACCMR0Enable(pVM);
512 if (VBOX_SUCCESS(rc))
513 {
514#ifdef DEBUG_NO_RING0_ASSERTIONS
515 g_pVMAssert = pVM;
516#endif
517 rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM); /* this may resume code. */
518#ifdef DEBUG_NO_RING0_ASSERTIONS
519 g_pVMAssert = 0;
520#endif
521 int rc2 = HWACCMR0Disable(pVM);
522 AssertRC(rc2);
523 }
524 pVM->vmm.s.iLastGCRc = rc;
525
526#ifdef VBOX_WITH_STATISTICS
527 vmmR0RecordRC(pVM, rc);
528#endif
529 /* No special action required for external interrupts, just return. */
530 return rc;
531 }
532
533 /*
534 * Initialize the R0 part of a VM instance.
535 */
536 case VMMR0_DO_VMMR0_INIT:
537 return VMMR0Init(pVM, (unsigned)(uintptr_t)pvArg);
538
539 /*
540 * Terminate the R0 part of a VM instance.
541 */
542 case VMMR0_DO_VMMR0_TERM:
543 return VMMR0Term(pVM);
544
545 /*
546 * Setup the hardware accelerated raw-mode session.
547 */
548 case VMMR0_DO_HWACC_SETUP_VM:
549 return HWACCMR0SetupVMX(pVM);
550
551 /*
552 * Switch to GC to execute Hypervisor function.
553 */
554 case VMMR0_DO_CALL_HYPERVISOR:
555 {
556 /* Safety precaution as VMX disables the switcher. */
557 Assert(!pVM->vmm.s.fSwitcherDisabled);
558 if (pVM->vmm.s.fSwitcherDisabled)
559 return VERR_NOT_SUPPORTED;
560
561 int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
562 return rc;
563 }
564
565#if !defined(__L4__) && !defined(__AMD64__) /** @todo Port this to L4. */ /** @todo fix logging and other services problems on AMD64. */
566 /*
567 * Services.
568 */
569 case VMMR0_DO_INTNET_OPEN:
570 case VMMR0_DO_INTNET_IF_CLOSE:
571 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
572 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
573 case VMMR0_DO_INTNET_IF_SEND:
574 case VMMR0_DO_INTNET_IF_WAIT:
575 {
576 /*
577 * Validate arguments a bit first.
578 */
579 if (!VALID_PTR(pvArg))
580 return VERR_INVALID_POINTER;
581 if (!VALID_PTR(pVM))
582 return VERR_INVALID_POINTER;
583 if (pVM->pVMR0 != pVM)
584 return VERR_INVALID_POINTER;
585 if (!VALID_PTR(pVM->pSession))
586 return VERR_INVALID_POINTER;
587 if (!g_pIntNet)
588 return VERR_FILE_NOT_FOUND; ///@todo fix this status code!
589
590 /*
591 * Unpack the arguments and call the service.
592 */
593 switch (uOperation)
594 {
595 case VMMR0_DO_INTNET_OPEN:
596 {
597 PINTNETOPENARGS pArgs = (PINTNETOPENARGS)pvArg;
598 return INTNETR0Open(g_pIntNet, pVM->pSession, &pArgs->szNetwork[0], pArgs->cbSend, pArgs->cbRecv, &pArgs->hIf);
599 }
600
601 case VMMR0_DO_INTNET_IF_CLOSE:
602 {
603 PINTNETIFCLOSEARGS pArgs = (PINTNETIFCLOSEARGS)pvArg;
604 return INTNETR0IfClose(g_pIntNet, pArgs->hIf);
605 }
606
607 case VMMR0_DO_INTNET_IF_GET_RING3_BUFFER:
608 {
609 PINTNETIFGETRING3BUFFERARGS pArgs = (PINTNETIFGETRING3BUFFERARGS)pvArg;
610 return INTNETR0IfGetRing3Buffer(g_pIntNet, pArgs->hIf, &pArgs->pRing3Buf);
611 }
612
613 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
614 {
615 PINTNETIFSETPROMISCUOUSMODEARGS pArgs = (PINTNETIFSETPROMISCUOUSMODEARGS)pvArg;
616 return INTNETR0IfSetPromiscuousMode(g_pIntNet, pArgs->hIf, pArgs->fPromiscuous);
617 }
618
619 case VMMR0_DO_INTNET_IF_SEND:
620 {
621 PINTNETIFSENDARGS pArgs = (PINTNETIFSENDARGS)pvArg;
622 return INTNETR0IfSend(g_pIntNet, pArgs->hIf, pArgs->pvFrame, pArgs->cbFrame);
623 }
624
625 case VMMR0_DO_INTNET_IF_WAIT:
626 {
627 PINTNETIFWAITARGS pArgs = (PINTNETIFWAITARGS)pvArg;
628 return INTNETR0IfWait(g_pIntNet, pArgs->hIf, pArgs->cMillies);
629 }
630
631 default:
632 return VERR_NOT_SUPPORTED;
633 }
634 }
635#endif /* !__L4__ */
636
637 /*
638 * For profiling.
639 */
640 case VMMR0_DO_NOP:
641 return VINF_SUCCESS;
642
643 /*
644 * For testing Ring-0 APIs invoked in this environment.
645 */
646 case VMMR0_DO_TESTS:
647 /** @todo make new test */
648 return VINF_SUCCESS;
649
650
651 default:
652 /*
653 * We're returning VERR_NOT_SUPPORT here so we've got something else
654 * than -1 which the interrupt gate glue code might return.
655 */
656 Log(("operation %#x is not supported\n", uOperation));
657 return VERR_NOT_SUPPORTED;
658 }
659}
660
661
662/**
663 * Internal R0 logger worker: Flush logger.
664 *
665 * @param pLogger The logger instance to flush.
666 * @remark This function must be exported!
667 */
668VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
669{
670 /*
671 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
672 * (This is a bit paranoid code.)
673 */
674 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
675 if ( !VALID_PTR(pR0Logger)
676 || !VALID_PTR(pR0Logger + 1)
677 || !VALID_PTR(pLogger)
678 || pLogger->u32Magic != RTLOGGER_MAGIC)
679 {
680 LogCom(("vmmR0LoggerFlush: pLogger=%p!\n", pLogger));
681 return;
682 }
683
684 PVM pVM = pR0Logger->pVM;
685 if ( !VALID_PTR(pVM)
686 || pVM->pVMHC != pVM)
687 {
688 LogCom(("vmmR0LoggerFlush: pVM=%p! pLogger=%p\n", pVM, pLogger));
689 return;
690 }
691
692 /*
693 * Check that the jump buffer is armed.
694 */
695#ifdef __X86__
696 if (!pVM->vmm.s.CallHostR0JmpBuf.eip)
697#else
698 if (!pVM->vmm.s.CallHostR0JmpBuf.rip)
699#endif
700 {
701 LogCom(("vmmR0LoggerFlush: Jump buffer isn't armed!\n"));
702 pLogger->offScratch = 0;
703 return;
704 }
705
706 VMMR0CallHost(pVM, VMMCALLHOST_VMM_LOGGER_FLUSH, 0);
707}
708
709#ifdef DEBUG_NO_RING0_ASSERTIONS
710/**
711 * Check if we really want to hit a breakpoint.
712 * Can jump back to ring-3 when the longjmp is armed.
713 */
714DECLEXPORT(bool) RTCALL RTAssertDoBreakpoint()
715{
716 if (g_pVMAssert)
717 {
718 g_pVMAssert->vmm.s.enmCallHostOperation = VMMCALLHOST_VMM_LOGGER_FLUSH;
719 g_pVMAssert->vmm.s.u64CallHostArg = 0;
720 g_pVMAssert->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;
721 int rc = vmmR0CallHostLongJmp(&g_pVMAssert->vmm.s.CallHostR0JmpBuf, VERR_INTERNAL_ERROR);
722 if (rc == VINF_SUCCESS)
723 rc = g_pVMAssert->vmm.s.rcCallHost;
724 }
725
726 return true;
727}
728
729
730#undef LOG_GROUP
731#define LOG_GROUP LOG_GROUP_EM
732
733/** Runtime assert implementation for Native Win32 Ring-0. */
734DECLEXPORT(void) RTCALL AssertMsg1(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
735{
736 Log(("\n!!Assertion Failed!!\n"
737 "Expression: %s\n"
738 "Location : %s(%d) %s\n",
739 pszExpr, pszFile, uLine, pszFunction));
740}
741
742#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette